repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
gwastro/pycbc
pycbc/results/metadata.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/results/metadata.py#L57-L67
def load_html_metadata(filename): """ Get metadata from html file """ parser = MetaParser() data = open(filename, 'r').read() if 'pycbc-meta' in data: print("LOADING HTML FILE %s" % filename) parser.feed(data) cp = ConfigParser.ConfigParser(parser.metadata) cp.add_section(os.path.basename(filename)) return cp
[ "def", "load_html_metadata", "(", "filename", ")", ":", "parser", "=", "MetaParser", "(", ")", "data", "=", "open", "(", "filename", ",", "'r'", ")", ".", "read", "(", ")", "if", "'pycbc-meta'", "in", "data", ":", "print", "(", "\"LOADING HTML FILE %s\"", ...
Get metadata from html file
[ "Get", "metadata", "from", "html", "file" ]
python
train
vint21h/nagios-notification-google-calendar
notification_google_calendar.py
https://github.com/vint21h/nagios-notification-google-calendar/blob/ef2b58c939d9d55a69a54b4e6a3fd9b61bde50d4/notification_google_calendar.py#L192-L220
def create_event(options, config, credentials): """ Create event in calendar with sms reminder. """ try: http = credentials.authorize(httplib2.Http()) service = build("calendar", "v3", http=http) event = { "summary": options.message, "location": "", "reminders": { "useDefault": False, "overrides": [ { "method": "sms", "minutes": config["message"], }, ], } } event.update(create_event_datetimes(options, config)) service.events().insert(calendarId=options.calendar, sendNotifications=True, body=event).execute() except Exception, err: if not options.quiet: sys.stderr.write("ERROR: Creating google calendar event error. {err}\n".format(err=err)) sys.exit(-1)
[ "def", "create_event", "(", "options", ",", "config", ",", "credentials", ")", ":", "try", ":", "http", "=", "credentials", ".", "authorize", "(", "httplib2", ".", "Http", "(", ")", ")", "service", "=", "build", "(", "\"calendar\"", ",", "\"v3\"", ",", ...
Create event in calendar with sms reminder.
[ "Create", "event", "in", "calendar", "with", "sms", "reminder", "." ]
python
test
python-wink/python-wink
src/pywink/devices/binary_switch.py
https://github.com/python-wink/python-wink/blob/cf8bdce8c6518f30b91b23aa7aa32e89c2ce48da/src/pywink/devices/binary_switch.py#L15-L23
def set_state(self, state): """ :param state: a boolean of true (on) or false ('off') :return: nothing """ _field = self.binary_state_name() values = {"desired_state": {_field: state}} response = self.api_interface.local_set_state(self, values, type_override="binary_switche") self._update_state_from_response(response)
[ "def", "set_state", "(", "self", ",", "state", ")", ":", "_field", "=", "self", ".", "binary_state_name", "(", ")", "values", "=", "{", "\"desired_state\"", ":", "{", "_field", ":", "state", "}", "}", "response", "=", "self", ".", "api_interface", ".", ...
:param state: a boolean of true (on) or false ('off') :return: nothing
[ ":", "param", "state", ":", "a", "boolean", "of", "true", "(", "on", ")", "or", "false", "(", "off", ")", ":", "return", ":", "nothing" ]
python
train
AkihikoITOH/capybara
capybara/virtualenv/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.py
https://github.com/AkihikoITOH/capybara/blob/e86c2173ea386654f4ae061148e8fbe3f25e715c/capybara/virtualenv/lib/python2.7/site-packages/pip/_vendor/html5lib/html5parser.py#L404-L419
def parseRCDataRawtext(self, token, contentType): """Generic RCDATA/RAWTEXT Parsing algorithm contentType - RCDATA or RAWTEXT """ assert contentType in ("RAWTEXT", "RCDATA") self.tree.insertElement(token) if contentType == "RAWTEXT": self.tokenizer.state = self.tokenizer.rawtextState else: self.tokenizer.state = self.tokenizer.rcdataState self.originalPhase = self.phase self.phase = self.phases["text"]
[ "def", "parseRCDataRawtext", "(", "self", ",", "token", ",", "contentType", ")", ":", "assert", "contentType", "in", "(", "\"RAWTEXT\"", ",", "\"RCDATA\"", ")", "self", ".", "tree", ".", "insertElement", "(", "token", ")", "if", "contentType", "==", "\"RAWTE...
Generic RCDATA/RAWTEXT Parsing algorithm contentType - RCDATA or RAWTEXT
[ "Generic", "RCDATA", "/", "RAWTEXT", "Parsing", "algorithm", "contentType", "-", "RCDATA", "or", "RAWTEXT" ]
python
test
sonyxperiadev/pygerrit
pygerrit/ssh.py
https://github.com/sonyxperiadev/pygerrit/blob/756300120b0f4f4af19e0f985566d82bc80b4359/pygerrit/ssh.py#L84-L111
def _configure(self): """ Configure the ssh parameters from the config file. """ configfile = expanduser("~/.ssh/config") if not isfile(configfile): raise GerritError("ssh config file '%s' does not exist" % configfile) config = SSHConfig() config.parse(open(configfile)) data = config.lookup(self.hostname) if not data: raise GerritError("No ssh config for host %s" % self.hostname) if 'hostname' not in data or 'port' not in data or 'user' not in data: raise GerritError("Missing configuration data in %s" % configfile) self.hostname = data['hostname'] self.username = data['user'] if 'identityfile' in data: key_filename = abspath(expanduser(data['identityfile'][0])) if not isfile(key_filename): raise GerritError("Identity file '%s' does not exist" % key_filename) self.key_filename = key_filename try: self.port = int(data['port']) except ValueError: raise GerritError("Invalid port: %s" % data['port']) if 'proxycommand' in data: self.proxy = ProxyCommand(data['proxycommand'])
[ "def", "_configure", "(", "self", ")", ":", "configfile", "=", "expanduser", "(", "\"~/.ssh/config\"", ")", "if", "not", "isfile", "(", "configfile", ")", ":", "raise", "GerritError", "(", "\"ssh config file '%s' does not exist\"", "%", "configfile", ")", "config"...
Configure the ssh parameters from the config file.
[ "Configure", "the", "ssh", "parameters", "from", "the", "config", "file", "." ]
python
train
DsixTools/python-smeftrunner
smeftrunner/classes.py
https://github.com/DsixTools/python-smeftrunner/blob/4c9130e53ad4f7bbb526657a82150ca9d57c4b37/smeftrunner/classes.py#L134-L156
def get_wcxf(self, C_out, scale_out): """Return the Wilson coefficients `C_out` as a wcxf.WC instance. Note that the Wilson coefficients are rotated into the Warsaw basis as defined in WCxf, i.e. to the basis where the down-type and charged lepton mass matrices are diagonal.""" import wcxf C = self.rotate_defaultbasis(C_out) d = wcxf.translators.smeft.arrays2wcxf(C) basis = wcxf.Basis['SMEFT', 'Warsaw'] d = {k: v for k, v in d.items() if k in basis.all_wcs and v != 0} keys_dim5 = ['llphiphi'] keys_dim6 = list(set(definitions.WC_keys_0f + definitions.WC_keys_2f + definitions.WC_keys_4f) - set(keys_dim5)) for k in d: if k.split('_')[0] in keys_dim5: d[k] = d[k] / self.scale_high for k in d: if k.split('_')[0] in keys_dim6: d[k] = d[k] / self.scale_high**2 d = wcxf.WC.dict2values(d) wc = wcxf.WC('SMEFT', 'Warsaw', scale_out, d) return wc
[ "def", "get_wcxf", "(", "self", ",", "C_out", ",", "scale_out", ")", ":", "import", "wcxf", "C", "=", "self", ".", "rotate_defaultbasis", "(", "C_out", ")", "d", "=", "wcxf", ".", "translators", ".", "smeft", ".", "arrays2wcxf", "(", "C", ")", "basis",...
Return the Wilson coefficients `C_out` as a wcxf.WC instance. Note that the Wilson coefficients are rotated into the Warsaw basis as defined in WCxf, i.e. to the basis where the down-type and charged lepton mass matrices are diagonal.
[ "Return", "the", "Wilson", "coefficients", "C_out", "as", "a", "wcxf", ".", "WC", "instance", "." ]
python
train
basho/riak-python-client
riak/transports/http/transport.py
https://github.com/basho/riak-python-client/blob/91de13a16607cdf553d1a194e762734e3bec4231/riak/transports/http/transport.py#L379-L413
def get_index(self, bucket, index, startkey, endkey=None, return_terms=None, max_results=None, continuation=None, timeout=None, term_regex=None): """ Performs a secondary index query. """ if term_regex and not self.index_term_regex(): raise NotImplementedError("Secondary index term_regex is not " "supported on %s" % self.server_version.vstring) if timeout == 'infinity': timeout = 0 params = {'return_terms': return_terms, 'max_results': max_results, 'continuation': continuation, 'timeout': timeout, 'term_regex': term_regex} bucket_type = self._get_bucket_type(bucket.bucket_type) url = self.index_path(bucket.name, index, startkey, endkey, bucket_type=bucket_type, **params) status, headers, body = self._request('GET', url) self.check_http_code(status, [200]) json_data = json.loads(bytes_to_str(body)) if return_terms and u'results' in json_data: results = [] for result in json_data[u'results'][:]: term, key = list(result.items())[0] results.append((decode_index_value(index, term), key),) else: results = json_data[u'keys'][:] if max_results and u'continuation' in json_data: return (results, json_data[u'continuation']) else: return (results, None)
[ "def", "get_index", "(", "self", ",", "bucket", ",", "index", ",", "startkey", ",", "endkey", "=", "None", ",", "return_terms", "=", "None", ",", "max_results", "=", "None", ",", "continuation", "=", "None", ",", "timeout", "=", "None", ",", "term_regex"...
Performs a secondary index query.
[ "Performs", "a", "secondary", "index", "query", "." ]
python
train
tensorflow/hub
tensorflow_hub/feature_column.py
https://github.com/tensorflow/hub/blob/09f45963f6787322967b6fec61459f3ac56fbb27/tensorflow_hub/feature_column.py#L83-L124
def _check_module_is_text_embedding(module_spec): """Raises ValueError if `module_spec` is not a text-embedding module. Args: module_spec: A `ModuleSpec` to test. Raises: ValueError: if `module_spec` default signature is not compatible with Tensor(string, shape=(?,)) -> Tensor(float32, shape=(?,K)). """ issues = [] # Find issues with signature inputs. input_info_dict = module_spec.get_input_info_dict() if len(input_info_dict) != 1: issues.append("Module default signature must require only one input") else: input_info, = input_info_dict.values() input_shape = input_info.get_shape() if not (input_info.dtype == tf.string and input_shape.ndims == 1 and input_shape.as_list() == [None]): issues.append( "Module default signature must have only one input " "tf.Tensor(shape=(?,), dtype=string)" ) # Find issues with signature outputs. output_info_dict = module_spec.get_output_info_dict() if "default" not in output_info_dict: issues.append("Module default signature must have a 'default' output.") else: output_info = output_info_dict["default"] output_shape = output_info.get_shape() if not (output_info.dtype == tf.float32 and output_shape.ndims == 2 and not output_shape.as_list()[0] and output_shape.as_list()[1]): issues.append( "Module default signature must have a 'default' output of " "tf.Tensor(shape=(?,K), dtype=float32)." ) if issues: raise ValueError("Module is not a text-embedding: %r" % issues)
[ "def", "_check_module_is_text_embedding", "(", "module_spec", ")", ":", "issues", "=", "[", "]", "# Find issues with signature inputs.", "input_info_dict", "=", "module_spec", ".", "get_input_info_dict", "(", ")", "if", "len", "(", "input_info_dict", ")", "!=", "1", ...
Raises ValueError if `module_spec` is not a text-embedding module. Args: module_spec: A `ModuleSpec` to test. Raises: ValueError: if `module_spec` default signature is not compatible with Tensor(string, shape=(?,)) -> Tensor(float32, shape=(?,K)).
[ "Raises", "ValueError", "if", "module_spec", "is", "not", "a", "text", "-", "embedding", "module", "." ]
python
train
adamalton/django-csp-reports
cspreports/summary.py
https://github.com/adamalton/django-csp-reports/blob/867992c6f535cf6afbf911f92af7eea4c61e4b73/cspreports/summary.py#L35-L40
def append(self, report): """Append a new CSP report.""" assert report not in self.examples self.count += 1 if len(self.examples) < self.top: self.examples.append(report)
[ "def", "append", "(", "self", ",", "report", ")", ":", "assert", "report", "not", "in", "self", ".", "examples", "self", ".", "count", "+=", "1", "if", "len", "(", "self", ".", "examples", ")", "<", "self", ".", "top", ":", "self", ".", "examples",...
Append a new CSP report.
[ "Append", "a", "new", "CSP", "report", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/gui/shortcut_manager.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/shortcut_manager.py#L72-L106
def add_callback_for_action(self, action, callback): """Adds a callback function to an action The method checks whether both action and callback are valid. If so, the callback is added to the list of functions called when the action is triggered. :param str action: An action like 'add', 'copy', 'info' :param callback: A callback function, which is called when action is triggered. It retrieves the event as parameter :return: True is the parameters are valid and the callback is registered, False else :rtype: bool """ if hasattr(callback, '__call__'): # Is the callback really a function? if action not in self.__action_to_callbacks: self.__action_to_callbacks[action] = [] self.__action_to_callbacks[action].append(callback) controller = None try: controller = callback.__self__ except AttributeError: try: # Needed when callback was wrapped using functools.partial controller = callback.func.__self__ except AttributeError: pass if controller: if controller not in self.__controller_action_callbacks: self.__controller_action_callbacks[controller] = {} if action not in self.__controller_action_callbacks[controller]: self.__controller_action_callbacks[controller][action] = [] self.__controller_action_callbacks[controller][action].append(callback) return True
[ "def", "add_callback_for_action", "(", "self", ",", "action", ",", "callback", ")", ":", "if", "hasattr", "(", "callback", ",", "'__call__'", ")", ":", "# Is the callback really a function?", "if", "action", "not", "in", "self", ".", "__action_to_callbacks", ":", ...
Adds a callback function to an action The method checks whether both action and callback are valid. If so, the callback is added to the list of functions called when the action is triggered. :param str action: An action like 'add', 'copy', 'info' :param callback: A callback function, which is called when action is triggered. It retrieves the event as parameter :return: True is the parameters are valid and the callback is registered, False else :rtype: bool
[ "Adds", "a", "callback", "function", "to", "an", "action" ]
python
train
jotacor/ComunioPy
ComunioPy/__init__.py
https://github.com/jotacor/ComunioPy/blob/2dd71e3e197b497980ea7b9cfbec1da64dca3ed0/ComunioPy/__init__.py#L100-L108
def standings(self): '''Get standings from the community's account''' headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain","User-Agent": user_agent} req = self.session.get('http://'+self.domain+'/standings.phtml',headers=headers).content soup = BeautifulSoup(req) table = soup.find('table',{'id':'tablestandings'}).find_all('tr') clasificacion = [] [clasificacion.append(('%s\t%s\t%s\t%s\t%s')%(tablas.find('td').text,tablas.find('div')['id'],tablas.a.text,tablas.find_all('td')[3].text,tablas.find_all('td')[4].text)) for tablas in table[1:]] return clasificacion
[ "def", "standings", "(", "self", ")", ":", "headers", "=", "{", "\"Content-type\"", ":", "\"application/x-www-form-urlencoded\"", ",", "\"Accept\"", ":", "\"text/plain\"", ",", "\"User-Agent\"", ":", "user_agent", "}", "req", "=", "self", ".", "session", ".", "g...
Get standings from the community's account
[ "Get", "standings", "from", "the", "community", "s", "account" ]
python
train
exosite-labs/pyonep
pyonep/portals/__init__.py
https://github.com/exosite-labs/pyonep/blob/d27b621b00688a542e0adcc01f3e3354c05238a1/pyonep/portals/__init__.py#L506-L516
def add_dplist_permission_for_user_on_portal(self, user_email, portal_id): """ Adds the 'd_p_list' permission to a user object when provided a user_email and portal_id.""" _id = self.get_user_id_from_email(user_email) print(self.get_user_permission_from_email(user_email)) retval = self.add_user_permission( _id, json.dumps( [{'access': 'd_p_list', 'oid':{'id': portal_id, 'type':'Portal'}}] ) ) print(self.get_user_permission_from_email(user_email)) return retval
[ "def", "add_dplist_permission_for_user_on_portal", "(", "self", ",", "user_email", ",", "portal_id", ")", ":", "_id", "=", "self", ".", "get_user_id_from_email", "(", "user_email", ")", "print", "(", "self", ".", "get_user_permission_from_email", "(", "user_email", ...
Adds the 'd_p_list' permission to a user object when provided a user_email and portal_id.
[ "Adds", "the", "d_p_list", "permission", "to", "a", "user", "object", "when", "provided", "a", "user_email", "and", "portal_id", "." ]
python
train
MycroftAI/adapt
adapt/tools/text/trie.py
https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/tools/text/trie.py#L86-L104
def insert(self, iterable, index=0, data=None, weight=1.0): """Insert new node into tree Args: iterable(hashable): key used to find in the future. data(object): data associated with the key index(int): an index used for insertion. weight(float): the wait given for the item added. """ if index == len(iterable): self.is_terminal = True self.key = iterable self.weight = weight if data: self.data.add(data) else: if iterable[index] not in self.children: self.children[iterable[index]] = TrieNode() self.children[iterable[index]].insert(iterable, index + 1, data)
[ "def", "insert", "(", "self", ",", "iterable", ",", "index", "=", "0", ",", "data", "=", "None", ",", "weight", "=", "1.0", ")", ":", "if", "index", "==", "len", "(", "iterable", ")", ":", "self", ".", "is_terminal", "=", "True", "self", ".", "ke...
Insert new node into tree Args: iterable(hashable): key used to find in the future. data(object): data associated with the key index(int): an index used for insertion. weight(float): the wait given for the item added.
[ "Insert", "new", "node", "into", "tree" ]
python
train
refenv/cijoe
deprecated/modules/cij/liblight.py
https://github.com/refenv/cijoe/blob/21d7b2ed4ff68e0a1457e7df2db27f6334f1a379/deprecated/modules/cij/liblight.py#L177-L184
def scalar_write(self, address, block_count, data_file, meta_file): """nvme write""" cmd = ["nvme", "write", self.envs["DEV_PATH"], "-s 0x{:x}".format(address), "-c {}".format(block_count-1), "-d {}".format(data_file), "-M {}".format(meta_file), "-z 0x{:x}".format(block_count * self.envs["NBYTES"]), "-y 0x{:x}".format(block_count * self.envs["NBYTES_OOB"])] status, _, _ = cij.ssh.command(cmd, shell=True) return status
[ "def", "scalar_write", "(", "self", ",", "address", ",", "block_count", ",", "data_file", ",", "meta_file", ")", ":", "cmd", "=", "[", "\"nvme\"", ",", "\"write\"", ",", "self", ".", "envs", "[", "\"DEV_PATH\"", "]", ",", "\"-s 0x{:x}\"", ".", "format", ...
nvme write
[ "nvme", "write" ]
python
valid
codelv/enaml-native
src/enamlnative/core/app.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/core/app.py#L375-L411
def handle_event(self, event): """ When we get an 'event' type from the bridge handle it by invoking the handler and if needed sending back the result. """ result_id, ptr, method, args = event[1] obj = None result = None try: obj, handler = bridge.get_handler(ptr, method) result = handler(*[v for t, v in args]) except bridge.BridgeReferenceError as e: #: Log the event, don't blow up here msg = "Error processing event: {} - {}".format( event, e).encode("utf-8") print(msg) self.show_error(msg) except: #: Log the event, blow up in user's face msg = "Error processing event: {} - {}".format( event, traceback.format_exc()).encode("utf-8") print(msg) self.show_error(msg) raise finally: if result_id: if hasattr(obj, '__nativeclass__'): sig = getattr(type(obj), method).__returns__ else: sig = type(result).__name__ self.send_event( bridge.Command.RESULT, #: method result_id, bridge.msgpack_encoder(sig, result) #: args )
[ "def", "handle_event", "(", "self", ",", "event", ")", ":", "result_id", ",", "ptr", ",", "method", ",", "args", "=", "event", "[", "1", "]", "obj", "=", "None", "result", "=", "None", "try", ":", "obj", ",", "handler", "=", "bridge", ".", "get_han...
When we get an 'event' type from the bridge handle it by invoking the handler and if needed sending back the result.
[ "When", "we", "get", "an", "event", "type", "from", "the", "bridge", "handle", "it", "by", "invoking", "the", "handler", "and", "if", "needed", "sending", "back", "the", "result", "." ]
python
train
Kortemme-Lab/pull_into_place
pull_into_place/pipeline.py
https://github.com/Kortemme-Lab/pull_into_place/blob/247f303100a612cc90cf31c86e4fe5052eb28c8d/pull_into_place/pipeline.py#L164-L179
def largest_loop(self): """ Return the boundaries for the largest loop segment. This is just meant to be a reasonable default for various selectors and filters to work with, in the case that more than one loop is being modeled. If you want to be more precise, you'll have to override the selectors and filters in question. """ from collections import namedtuple Loop = namedtuple('Loop', 'start end') largest_segment = sorted( self.loop_segments, key=lambda x: abs(x[1] - x[0]), )[-1] return Loop(*largest_segment)
[ "def", "largest_loop", "(", "self", ")", ":", "from", "collections", "import", "namedtuple", "Loop", "=", "namedtuple", "(", "'Loop'", ",", "'start end'", ")", "largest_segment", "=", "sorted", "(", "self", ".", "loop_segments", ",", "key", "=", "lambda", "x...
Return the boundaries for the largest loop segment. This is just meant to be a reasonable default for various selectors and filters to work with, in the case that more than one loop is being modeled. If you want to be more precise, you'll have to override the selectors and filters in question.
[ "Return", "the", "boundaries", "for", "the", "largest", "loop", "segment", ".", "This", "is", "just", "meant", "to", "be", "a", "reasonable", "default", "for", "various", "selectors", "and", "filters", "to", "work", "with", "in", "the", "case", "that", "mo...
python
train
mdgart/sentrylogs
sentrylogs/daemonize.py
https://github.com/mdgart/sentrylogs/blob/1bff3f2c8e37265430269cdf1ed8f860ce2dd72a/sentrylogs/daemonize.py#L44-L179
def create_daemon(): """Detach a process from the controlling terminal and run it in the background as a daemon. """ try: # Fork a child process so the parent can exit. This returns control to # the command-line or shell. It also guarantees that the child will not # be a process group leader, since the child receives a new process ID # and inherits the parent's process group ID. This step is required # to insure that the next call to os.setsid is successful. pid = os.fork() except OSError as err: raise Exception("%s [%d]" % (err.strerror, err.errno)) if pid == 0: # The first child. # To become the session leader of this new session and the process group # leader of the new process group, we call os.setsid(). The process is # also guaranteed not to have a controlling terminal. os.setsid() # Is ignoring SIGHUP necessary? # # It's often suggested that the SIGHUP signal should be ignored before # the second fork to avoid premature termination of the process. The # reason is that when the first child terminates, all processes, e.g. # the second child, in the orphaned group will be sent a SIGHUP. # # "However, as part of the session management system, there are exactly # two cases where SIGHUP is sent on the death of a process: # # 1) When the process that dies is the session leader of a session that # is attached to a terminal device, SIGHUP is sent to all processes # in the foreground process group of that terminal device. # 2) When the death of a process causes a process group to become # orphaned, and one or more processes in the orphaned group are # stopped, then SIGHUP and SIGCONT are sent to all members of the # orphaned group." [2] # # The first case can be ignored since the child is guaranteed not to have # a controlling terminal. The second case isn't so easy to dismiss. # The process group is orphaned when the first child terminates and # POSIX.1 requires that every STOPPED process in an orphaned process # group be sent a SIGHUP signal followed by a SIGCONT signal. Since the # second child is not STOPPED though, we can safely forego ignoring the # SIGHUP signal. In any case, there are no ill-effects if it is ignored. # # import signal # Set handlers for asynchronous events. # signal.signal(signal.SIGHUP, signal.SIG_IGN) try: # Fork a second child and exit immediately to prevent zombies. This # causes the second child process to be orphaned, making the init # process responsible for its cleanup. And, since the first child is # a session leader without a controlling terminal, it's possible for # it to acquire one by opening a terminal in the future (System V- # based systems). This second fork guarantees that the child is no # longer a session leader, preventing the daemon from ever acquiring # a controlling terminal. pid = os.fork() # Fork a second child. except OSError as err: raise Exception("%s [%d]" % (err.strerror, err.errno)) if pid == 0: # The second child. # Since the current working directory may be a mounted filesystem, we # avoid the issue of not being able to unmount the filesystem at # shutdown time by changing it to the root directory. os.chdir(WORKDIR) # We probably don't want the file mode creation mask inherited from # the parent, so we give the child complete control over permissions. os.umask(UMASK) else: # exit() or _exit()? See below. _exit(0) # Exit parent (the first child) of the second child. else: # exit() or _exit()? # _exit is like exit(), but it doesn't call any functions registered # with atexit (and on_exit) or any registered signal handlers. It also # closes any open file descriptors. Using exit() may cause all stdio # streams to be flushed twice and any temporary files may be unexpectedly # removed. It's therefore recommended that child branches of a fork() # and the parent branch(es) of a daemon use _exit(). _exit(0) # Exit parent of the first child. # Close all open file descriptors. This prevents the child from keeping # open any file descriptors inherited from the parent. There is a variety # of methods to accomplish this task. Three are listed below. # # Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum # number of open file descriptors to close. If it doesn't exists, use # the default value (configurable). # # try: # maxfd = os.sysconf("SC_OPEN_MAX") # except (AttributeError, ValueError): # maxfd = MAXFD # # OR # # if os.sysconf_names.has_key("SC_OPEN_MAX"): # maxfd = os.sysconf("SC_OPEN_MAX") # else: # maxfd = MAXFD # # OR # # Use the getrlimit method to retrieve the maximum file descriptor number # that can be opened by this process. If there is not limit on the # resource, use the default value. # import resource # Resource usage information. maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] if maxfd == resource.RLIM_INFINITY: maxfd = MAXFD # Iterate through and close all file descriptors. for file_desc in range(0, maxfd): try: os.close(file_desc) except OSError: # file descriptor wasn't open to begin with (ignored) pass # Redirect the standard I/O file descriptors to the specified file. Since # the daemon has no controlling terminal, most daemons redirect stdin, # stdout, and stderr to /dev/null. This is done to prevent side-effects # from reads and writes to the standard I/O file descriptors. # This call to open is guaranteed to return the lowest file descriptor, # which will be 0 (stdin), since it was closed above. os.open(REDIRECT_TO, os.O_RDWR) # standard input (0) # Duplicate standard input to standard output and standard error. os.dup2(0, 1) # standard output (1) os.dup2(0, 2) # standard error (2) return 0
[ "def", "create_daemon", "(", ")", ":", "try", ":", "# Fork a child process so the parent can exit. This returns control to", "# the command-line or shell. It also guarantees that the child will not", "# be a process group leader, since the child receives a new process ID", "# and inherits the ...
Detach a process from the controlling terminal and run it in the background as a daemon.
[ "Detach", "a", "process", "from", "the", "controlling", "terminal", "and", "run", "it", "in", "the", "background", "as", "a", "daemon", "." ]
python
train
elastic/elasticsearch-py
elasticsearch/client/xpack/rollup.py
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/xpack/rollup.py#L31-L40
def get_rollup_caps(self, id=None, params=None): """ `<>`_ :arg id: The ID of the index to check rollup capabilities on, or left blank for all jobs """ return self.transport.perform_request( "GET", _make_path("_rollup", "data", id), params=params )
[ "def", "get_rollup_caps", "(", "self", ",", "id", "=", "None", ",", "params", "=", "None", ")", ":", "return", "self", ".", "transport", ".", "perform_request", "(", "\"GET\"", ",", "_make_path", "(", "\"_rollup\"", ",", "\"data\"", ",", "id", ")", ",", ...
`<>`_ :arg id: The ID of the index to check rollup capabilities on, or left blank for all jobs
[ "<", ">", "_" ]
python
train
BerkeleyAutomation/perception
perception/image.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/image.py#L685-L723
def focus(self, height, width, center_i=None, center_j=None): """Zero out all of the image outside of a crop box. Parameters ---------- height : int The height of the desired crop box. width : int The width of the desired crop box. center_i : int The center height point of the crop box. If not specified, the center of the image is used. center_j : int The center width point of the crop box. If not specified, the center of the image is used. Returns ------- :obj:`Image` A new Image of the same type and size that is zeroed out except within the crop box. """ if center_i is None: center_i = self.height / 2 if center_j is None: center_j = self.width / 2 start_row = int(max(0, center_i - height / 2)) end_row = int(min(self.height - 1, center_i + height / 2)) start_col = int(max(0, center_j - width / 2)) end_col = int(min(self.width - 1, center_j + width / 2)) focus_data = np.zeros(self._data.shape) focus_data[start_row:end_row + 1, start_col:end_col + \ 1] = self._data[start_row:end_row + 1, start_col:end_col + 1] return type(self)(focus_data.astype(self._data.dtype), self._frame)
[ "def", "focus", "(", "self", ",", "height", ",", "width", ",", "center_i", "=", "None", ",", "center_j", "=", "None", ")", ":", "if", "center_i", "is", "None", ":", "center_i", "=", "self", ".", "height", "/", "2", "if", "center_j", "is", "None", "...
Zero out all of the image outside of a crop box. Parameters ---------- height : int The height of the desired crop box. width : int The width of the desired crop box. center_i : int The center height point of the crop box. If not specified, the center of the image is used. center_j : int The center width point of the crop box. If not specified, the center of the image is used. Returns ------- :obj:`Image` A new Image of the same type and size that is zeroed out except within the crop box.
[ "Zero", "out", "all", "of", "the", "image", "outside", "of", "a", "crop", "box", "." ]
python
train
ibelie/typy
typy/google/protobuf/descriptor.py
https://github.com/ibelie/typy/blob/3616845fb91459aacd8df6bf82c5d91f4542bee7/typy/google/protobuf/descriptor.py#L174-L179
def GetTopLevelContainingType(self): """Returns the root if this is a nested type, or itself if its the root.""" desc = self while desc.containing_type is not None: desc = desc.containing_type return desc
[ "def", "GetTopLevelContainingType", "(", "self", ")", ":", "desc", "=", "self", "while", "desc", ".", "containing_type", "is", "not", "None", ":", "desc", "=", "desc", ".", "containing_type", "return", "desc" ]
Returns the root if this is a nested type, or itself if its the root.
[ "Returns", "the", "root", "if", "this", "is", "a", "nested", "type", "or", "itself", "if", "its", "the", "root", "." ]
python
valid
docker/docker-py
docker/models/images.py
https://github.com/docker/docker-py/blob/613d6aad83acc9931ff2ecfd6a6c7bd8061dc125/docker/models/images.py#L300-L316
def get(self, name): """ Gets an image. Args: name (str): The name of the image. Returns: (:py:class:`Image`): The image. Raises: :py:class:`docker.errors.ImageNotFound` If the image does not exist. :py:class:`docker.errors.APIError` If the server returns an error. """ return self.prepare_model(self.client.api.inspect_image(name))
[ "def", "get", "(", "self", ",", "name", ")", ":", "return", "self", ".", "prepare_model", "(", "self", ".", "client", ".", "api", ".", "inspect_image", "(", "name", ")", ")" ]
Gets an image. Args: name (str): The name of the image. Returns: (:py:class:`Image`): The image. Raises: :py:class:`docker.errors.ImageNotFound` If the image does not exist. :py:class:`docker.errors.APIError` If the server returns an error.
[ "Gets", "an", "image", "." ]
python
train
opendatateam/udata
udata/harvest/actions.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/actions.py#L45-L49
def paginate_sources(owner=None, page=1, page_size=DEFAULT_PAGE_SIZE): '''Paginate harvest sources''' sources = _sources_queryset(owner=owner) page = max(page or 1, 1) return sources.paginate(page, page_size)
[ "def", "paginate_sources", "(", "owner", "=", "None", ",", "page", "=", "1", ",", "page_size", "=", "DEFAULT_PAGE_SIZE", ")", ":", "sources", "=", "_sources_queryset", "(", "owner", "=", "owner", ")", "page", "=", "max", "(", "page", "or", "1", ",", "1...
Paginate harvest sources
[ "Paginate", "harvest", "sources" ]
python
train
mikedh/trimesh
trimesh/base.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/base.py#L1423-L1440
def is_volume(self): """ Check if a mesh has all the properties required to represent a valid volume, rather than just a surface. These properties include being watertight, having consistent winding and outward facing normals. Returns --------- valid : bool Does the mesh represent a volume """ valid = bool(self.is_watertight and self.is_winding_consistent and np.isfinite(self.center_mass).all() and self.volume > 0.0) return valid
[ "def", "is_volume", "(", "self", ")", ":", "valid", "=", "bool", "(", "self", ".", "is_watertight", "and", "self", ".", "is_winding_consistent", "and", "np", ".", "isfinite", "(", "self", ".", "center_mass", ")", ".", "all", "(", ")", "and", "self", "....
Check if a mesh has all the properties required to represent a valid volume, rather than just a surface. These properties include being watertight, having consistent winding and outward facing normals. Returns --------- valid : bool Does the mesh represent a volume
[ "Check", "if", "a", "mesh", "has", "all", "the", "properties", "required", "to", "represent", "a", "valid", "volume", "rather", "than", "just", "a", "surface", "." ]
python
train
QunarOPS/qg.core
qg/core/importutils.py
https://github.com/QunarOPS/qg.core/blob/d5d7e36ea140cfe73e1b1850e8c96960b02a1ed3/qg/core/importutils.py#L41-L52
def import_object_ns(name_space, import_str, *args, **kwargs): """Tries to import object from default namespace. Imports a class and return an instance of it, first by trying to find the class in a default namespace, then failing back to a full path if not found in the default namespace. """ import_value = "%s.%s" % (name_space, import_str) try: return import_class(import_value)(*args, **kwargs) except ImportError: return import_class(import_str)(*args, **kwargs)
[ "def", "import_object_ns", "(", "name_space", ",", "import_str", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "import_value", "=", "\"%s.%s\"", "%", "(", "name_space", ",", "import_str", ")", "try", ":", "return", "import_class", "(", "import_value",...
Tries to import object from default namespace. Imports a class and return an instance of it, first by trying to find the class in a default namespace, then failing back to a full path if not found in the default namespace.
[ "Tries", "to", "import", "object", "from", "default", "namespace", "." ]
python
train
opendatateam/udata
udata/frontend/helpers.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/frontend/helpers.py#L254-L264
def tooltip_ellipsis(source, length=0): ''' return the plain text representation of markdown encoded text. That is the texted without any html tags. If ``length`` is 0 then it will not be truncated.''' try: length = int(length) except ValueError: # invalid literal for int() return source # Fail silently. ellipsis = '<a href v-tooltip title="{0}">...</a>'.format(source) return Markup((source[:length] + ellipsis) if len(source) > length and length > 0 else source)
[ "def", "tooltip_ellipsis", "(", "source", ",", "length", "=", "0", ")", ":", "try", ":", "length", "=", "int", "(", "length", ")", "except", "ValueError", ":", "# invalid literal for int()", "return", "source", "# Fail silently.", "ellipsis", "=", "'<a href v-to...
return the plain text representation of markdown encoded text. That is the texted without any html tags. If ``length`` is 0 then it will not be truncated.
[ "return", "the", "plain", "text", "representation", "of", "markdown", "encoded", "text", ".", "That", "is", "the", "texted", "without", "any", "html", "tags", ".", "If", "length", "is", "0", "then", "it", "will", "not", "be", "truncated", "." ]
python
train
cloudtools/stacker
stacker/lookups/handlers/dynamodb.py
https://github.com/cloudtools/stacker/blob/ad6013a03a560c46ba3c63c4d153336273e6da5d/stacker/lookups/handlers/dynamodb.py#L146-L177
def _get_val_from_ddb_data(data, keylist): """Given a dictionary of dynamodb data (including the datatypes) and a properly structured keylist, it will return the value of the lookup Args: data (dict): the raw dynamodb data keylist(list): a list of keys to lookup. This must include the datatype Returns: various: It returns the value from the dynamodb record, and casts it to a matching python datatype """ next_type = None # iterate through the keylist to find the matching key/datatype for k in keylist: for k1 in k: if next_type is None: data = data[k[k1]] else: temp_dict = data[next_type] data = temp_dict[k[k1]] next_type = k1 if next_type == 'L': # if type is list, convert it to a list and return return _convert_ddb_list_to_list(data[next_type]) if next_type == 'N': # TODO: handle various types of 'number' datatypes, (e.g. int, double) # if a number, convert to an int and return return int(data[next_type]) # else, just assume its a string and return return str(data[next_type])
[ "def", "_get_val_from_ddb_data", "(", "data", ",", "keylist", ")", ":", "next_type", "=", "None", "# iterate through the keylist to find the matching key/datatype", "for", "k", "in", "keylist", ":", "for", "k1", "in", "k", ":", "if", "next_type", "is", "None", ":"...
Given a dictionary of dynamodb data (including the datatypes) and a properly structured keylist, it will return the value of the lookup Args: data (dict): the raw dynamodb data keylist(list): a list of keys to lookup. This must include the datatype Returns: various: It returns the value from the dynamodb record, and casts it to a matching python datatype
[ "Given", "a", "dictionary", "of", "dynamodb", "data", "(", "including", "the", "datatypes", ")", "and", "a", "properly", "structured", "keylist", "it", "will", "return", "the", "value", "of", "the", "lookup" ]
python
train
Nukesor/pueue
pueue/client/displaying.py
https://github.com/Nukesor/pueue/blob/f1d276360454d4dd2738658a13df1e20caa4b926/pueue/client/displaying.py#L159-L228
def execute_show(args, root_dir): """Print stderr and stdout of the current running process. Args: args['watch'] (bool): If True, we open a curses session and tail the output live in the console. root_dir (string): The path to the root directory the daemon is running in. """ key = None if args.get('key'): key = args['key'] status = command_factory('status')({}, root_dir=root_dir) if key not in status['data'] or status['data'][key]['status'] != 'running': print('No running process with this key, use `log` to show finished processes.') return # In case no key provided, we take the oldest running process else: status = command_factory('status')({}, root_dir=root_dir) if isinstance(status['data'], str): print(status['data']) return for k in sorted(status['data'].keys()): if status['data'][k]['status'] == 'running': key = k break if key is None: print('No running process, use `log` to show finished processes.') return config_dir = os.path.join(root_dir, '.config/pueue') # Get current pueueSTDout file from tmp stdoutFile = os.path.join(config_dir, 'pueue_process_{}.stdout'.format(key)) stderrFile = os.path.join(config_dir, 'pueue_process_{}.stderr'.format(key)) stdoutDescriptor = open(stdoutFile, 'r') stderrDescriptor = open(stderrFile, 'r') running = True # Continually print output with curses or just print once if args['watch']: # Initialize curses stdscr = curses.initscr() curses.noecho() curses.cbreak() curses.curs_set(2) stdscr.keypad(True) stdscr.refresh() try: # Update output every two seconds while running: stdscr.clear() stdoutDescriptor.seek(0) message = stdoutDescriptor.read() stdscr.addstr(0, 0, message) stdscr.refresh() time.sleep(2) except Exception: # Curses cleanup curses.nocbreak() stdscr.keypad(False) curses.echo() curses.endwin() else: print('Stdout output:\n') stdoutDescriptor.seek(0) print(get_descriptor_output(stdoutDescriptor, key)) print('\n\nStderr output:\n') stderrDescriptor.seek(0) print(get_descriptor_output(stderrDescriptor, key))
[ "def", "execute_show", "(", "args", ",", "root_dir", ")", ":", "key", "=", "None", "if", "args", ".", "get", "(", "'key'", ")", ":", "key", "=", "args", "[", "'key'", "]", "status", "=", "command_factory", "(", "'status'", ")", "(", "{", "}", ",", ...
Print stderr and stdout of the current running process. Args: args['watch'] (bool): If True, we open a curses session and tail the output live in the console. root_dir (string): The path to the root directory the daemon is running in.
[ "Print", "stderr", "and", "stdout", "of", "the", "current", "running", "process", "." ]
python
train
gmr/queries
queries/session.py
https://github.com/gmr/queries/blob/a68855013dc6aaf9ed7b6909a4701f8da8796a0a/queries/session.py#L368-L382
def _status(self): """Return the current connection status as an integer value. The status should match one of the following constants: - queries.Session.INTRANS: Connection established, in transaction - queries.Session.PREPARED: Prepared for second phase of transaction - queries.Session.READY: Connected, no active transaction :rtype: int """ if self._conn.status == psycopg2.extensions.STATUS_BEGIN: return self.READY return self._conn.status
[ "def", "_status", "(", "self", ")", ":", "if", "self", ".", "_conn", ".", "status", "==", "psycopg2", ".", "extensions", ".", "STATUS_BEGIN", ":", "return", "self", ".", "READY", "return", "self", ".", "_conn", ".", "status" ]
Return the current connection status as an integer value. The status should match one of the following constants: - queries.Session.INTRANS: Connection established, in transaction - queries.Session.PREPARED: Prepared for second phase of transaction - queries.Session.READY: Connected, no active transaction :rtype: int
[ "Return", "the", "current", "connection", "status", "as", "an", "integer", "value", "." ]
python
train
StackStorm/pybind
pybind/slxos/v17s_1_02/acl_mirror/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/acl_mirror/__init__.py#L92-L113
def _set_source(self, v, load=False): """ Setter method for source, mapped from YANG variable /acl_mirror/source (list) If this variable is read-only (config: false) in the source YANG file, then _set_source is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_source() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("src_interface_type src_interface_name destination dst_interface_type dst_interface_name",source.source, yang_name="source", rest_name="source", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='src-interface-type src-interface-name destination dst-interface-type dst-interface-name', extensions={u'tailf-common': {u'info': u'Source interface for ACL Mirroring', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None}}), is_container='list', yang_name="source", rest_name="source", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Source interface for ACL Mirroring', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-access-list', defining_module='brocade-ip-access-list', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """source must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("src_interface_type src_interface_name destination dst_interface_type dst_interface_name",source.source, yang_name="source", rest_name="source", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='src-interface-type src-interface-name destination dst-interface-type dst-interface-name', extensions={u'tailf-common': {u'info': u'Source interface for ACL Mirroring', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None}}), is_container='list', yang_name="source", rest_name="source", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Source interface for ACL Mirroring', u'cli-suppress-list-no': None, u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-key-abbreviation': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-access-list', defining_module='brocade-ip-access-list', yang_type='list', is_config=True)""", }) self.__source = t if hasattr(self, '_set'): self._set()
[ "def", "_set_source", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", "base", ...
Setter method for source, mapped from YANG variable /acl_mirror/source (list) If this variable is read-only (config: false) in the source YANG file, then _set_source is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_source() directly.
[ "Setter", "method", "for", "source", "mapped", "from", "YANG", "variable", "/", "acl_mirror", "/", "source", "(", "list", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "source", "YANG", "file", ...
python
train
numenta/htmresearch
htmresearch/frameworks/poirazi_neuron_model/neuron_model.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/poirazi_neuron_model/neuron_model.py#L95-L102
def calculate_activation(self, datapoint): """ Only for a single datapoint """ activations = datapoint * self.dendrites activations = self.nonlinearity(activations) return activations.sum()
[ "def", "calculate_activation", "(", "self", ",", "datapoint", ")", ":", "activations", "=", "datapoint", "*", "self", ".", "dendrites", "activations", "=", "self", ".", "nonlinearity", "(", "activations", ")", "return", "activations", ".", "sum", "(", ")" ]
Only for a single datapoint
[ "Only", "for", "a", "single", "datapoint" ]
python
train
wummel/patool
patoolib/programs/xdms.py
https://github.com/wummel/patool/blob/d7e64d9fd60faaa4b3f824bd97c43ce59b185c40/patoolib/programs/xdms.py#L30-L33
def list_dms (archive, compression, cmd, verbosity, interactive): """List a DMS archive.""" check_archive_ext(archive) return [cmd, 'v', archive]
[ "def", "list_dms", "(", "archive", ",", "compression", ",", "cmd", ",", "verbosity", ",", "interactive", ")", ":", "check_archive_ext", "(", "archive", ")", "return", "[", "cmd", ",", "'v'", ",", "archive", "]" ]
List a DMS archive.
[ "List", "a", "DMS", "archive", "." ]
python
train
pycontribs/jira
jira/client.py
https://github.com/pycontribs/jira/blob/397db5d78441ed6a680a9b7db4c62030ade1fd8a/jira/client.py#L3782-L3826
def create_board(self, name, project_ids, preset="scrum", location_type='user', location_id=None): """Create a new board for the ``project_ids``. :param name: name of the board :type name: str :param project_ids: the projects to create the board in :type project_ids: str :param preset: What preset to use for this board. (Default: "scrum") :type preset: 'kanban', 'scrum', 'diy' :param location_type: the location type. Available in cloud. (Default: "user") :type location_type: 'user', 'project' :param location_id: the id of project that the board should be located under. Omit this for a 'user' location_type. Available in cloud. :type location_id: Optional[str] :return: The newly created board :rtype: Board """ if self._options['agile_rest_path'] != GreenHopperResource.GREENHOPPER_REST_PATH: raise NotImplementedError('JIRA Agile Public API does not support this request') payload = {} if isinstance(project_ids, string_types): ids = [] for p in project_ids.split(','): ids.append(self.project(p).id) project_ids = ','.join(ids) if location_id is not None: location_id = self.project(location_id).id payload['name'] = name if isinstance(project_ids, string_types): project_ids = project_ids.split(',') payload['projectIds'] = project_ids payload['preset'] = preset if self.deploymentType == 'Cloud': payload['locationType'] = location_type payload['locationId'] = location_id url = self._get_url( 'rapidview/create/presets', base=self.AGILE_BASE_URL) r = self._session.post( url, data=json.dumps(payload)) raw_issue_json = json_loads(r) return Board(self._options, self._session, raw=raw_issue_json)
[ "def", "create_board", "(", "self", ",", "name", ",", "project_ids", ",", "preset", "=", "\"scrum\"", ",", "location_type", "=", "'user'", ",", "location_id", "=", "None", ")", ":", "if", "self", ".", "_options", "[", "'agile_rest_path'", "]", "!=", "Green...
Create a new board for the ``project_ids``. :param name: name of the board :type name: str :param project_ids: the projects to create the board in :type project_ids: str :param preset: What preset to use for this board. (Default: "scrum") :type preset: 'kanban', 'scrum', 'diy' :param location_type: the location type. Available in cloud. (Default: "user") :type location_type: 'user', 'project' :param location_id: the id of project that the board should be located under. Omit this for a 'user' location_type. Available in cloud. :type location_id: Optional[str] :return: The newly created board :rtype: Board
[ "Create", "a", "new", "board", "for", "the", "project_ids", "." ]
python
train
blockstack/blockstack-core
blockstack/blockstackd.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/blockstackd.py#L1552-L1568
def rpc_get_all_names_cumulative( self, offset, count, **con_info ): """ Get all names that have ever existed, paginated Return {'status': true, 'names': [...]} on success Return {'error': ...} on error """ if not check_offset(offset): return {'error': 'invalid offset', 'http_status': 400} if not check_count(count, 100): return {'error': 'invalid count', 'http_status': 400} db = get_db_state(self.working_dir) all_names = db.get_all_names( offset=offset, count=count, include_expired=True ) db.close() return self.success_response( {'names': all_names} )
[ "def", "rpc_get_all_names_cumulative", "(", "self", ",", "offset", ",", "count", ",", "*", "*", "con_info", ")", ":", "if", "not", "check_offset", "(", "offset", ")", ":", "return", "{", "'error'", ":", "'invalid offset'", ",", "'http_status'", ":", "400", ...
Get all names that have ever existed, paginated Return {'status': true, 'names': [...]} on success Return {'error': ...} on error
[ "Get", "all", "names", "that", "have", "ever", "existed", "paginated", "Return", "{", "status", ":", "true", "names", ":", "[", "...", "]", "}", "on", "success", "Return", "{", "error", ":", "...", "}", "on", "error" ]
python
train
daler/trackhub
trackhub/helpers.py
https://github.com/daler/trackhub/blob/e4655f79177822529f80b923df117e38e28df702/trackhub/helpers.py#L134-L150
def print_rendered_results(results_dict): """ Pretty-prints the rendered results dictionary. Rendered results can be multiply-nested dictionaries; this uses JSON serialization to print a nice representation. """ class _HubComponentEncoder(json.JSONEncoder): def default(self, o): if isinstance(o, base.HubComponent): return repr(o) return json.JSONEncoder.default(self, o) formatted = json.dumps(results_dict, indent=4, cls=_HubComponentEncoder) # the returned string contains lines with trailing spaces, which causes # doctests to fail. So fix that here. for s in formatted.splitlines(): print(s.rstrip())
[ "def", "print_rendered_results", "(", "results_dict", ")", ":", "class", "_HubComponentEncoder", "(", "json", ".", "JSONEncoder", ")", ":", "def", "default", "(", "self", ",", "o", ")", ":", "if", "isinstance", "(", "o", ",", "base", ".", "HubComponent", "...
Pretty-prints the rendered results dictionary. Rendered results can be multiply-nested dictionaries; this uses JSON serialization to print a nice representation.
[ "Pretty", "-", "prints", "the", "rendered", "results", "dictionary", "." ]
python
train
linuxsoftware/ls.joyous
ls/joyous/models/events.py
https://github.com/linuxsoftware/ls.joyous/blob/316283140ca5171a68ad3170a5964fdc89be0b56/ls/joyous/models/events.py#L951-L958
def next_date(self): """ Date when this event is next scheduled to occur in the local time zone (Does not include postponements, but does exclude cancellations) """ nextDt = self.__localAfter(timezone.localtime(), dt.time.min) if nextDt is not None: return nextDt.date()
[ "def", "next_date", "(", "self", ")", ":", "nextDt", "=", "self", ".", "__localAfter", "(", "timezone", ".", "localtime", "(", ")", ",", "dt", ".", "time", ".", "min", ")", "if", "nextDt", "is", "not", "None", ":", "return", "nextDt", ".", "date", ...
Date when this event is next scheduled to occur in the local time zone (Does not include postponements, but does exclude cancellations)
[ "Date", "when", "this", "event", "is", "next", "scheduled", "to", "occur", "in", "the", "local", "time", "zone", "(", "Does", "not", "include", "postponements", "but", "does", "exclude", "cancellations", ")" ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L12053-L12087
def spkltc(targ, et, ref, abcorr, stobs): """ Return the state (position and velocity) of a target body relative to an observer, optionally corrected for light time, expressed relative to an inertial reference frame. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkltc_c.html :param targ: Target body. :type targ: int :param et: Observer epoch. :type et: float :param ref: Inertial reference frame of output state. :type ref: str :param abcorr: Aberration correction flag. :type abcorr: str :param stobs: State of the observer relative to the SSB. :type stobs: 6-Element Array of floats :return: One way light time between observer and target, Derivative of light time with respect to time :rtype: tuple """ assert len(stobs) == 6 targ = stypes.c_int(targ) et = ctypes.c_double(et) ref = stypes.stringToCharP(ref) abcorr = stypes.stringToCharP(abcorr) stobs = stypes.toDoubleVector(stobs) starg = stypes.emptyDoubleVector(6) lt = ctypes.c_double() dlt = ctypes.c_double() libspice.spkltc_c(targ, et, ref, abcorr, stobs, starg, ctypes.byref(lt), ctypes.byref(dlt)) return stypes.cVectorToPython(starg), lt.value, dlt.value
[ "def", "spkltc", "(", "targ", ",", "et", ",", "ref", ",", "abcorr", ",", "stobs", ")", ":", "assert", "len", "(", "stobs", ")", "==", "6", "targ", "=", "stypes", ".", "c_int", "(", "targ", ")", "et", "=", "ctypes", ".", "c_double", "(", "et", "...
Return the state (position and velocity) of a target body relative to an observer, optionally corrected for light time, expressed relative to an inertial reference frame. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/spkltc_c.html :param targ: Target body. :type targ: int :param et: Observer epoch. :type et: float :param ref: Inertial reference frame of output state. :type ref: str :param abcorr: Aberration correction flag. :type abcorr: str :param stobs: State of the observer relative to the SSB. :type stobs: 6-Element Array of floats :return: One way light time between observer and target, Derivative of light time with respect to time :rtype: tuple
[ "Return", "the", "state", "(", "position", "and", "velocity", ")", "of", "a", "target", "body", "relative", "to", "an", "observer", "optionally", "corrected", "for", "light", "time", "expressed", "relative", "to", "an", "inertial", "reference", "frame", "." ]
python
train
alevinval/scheduling
scheduling/graph.py
https://github.com/alevinval/scheduling/blob/127239712c0b73b929ca19b4b5c2855eebb7fcf0/scheduling/graph.py#L88-L110
def dfs(node, expand=expansion_all, callback=None, silent=True): """ Perform a depth-first search on the node graph :param node: GraphNode :param expand: Returns the list of Nodes to explore from a Node :param callback: Callback to run in each node :param silent: Don't throw exception on circular dependency :return: """ nodes = deque() for n in expand(node): nodes.append(n) while nodes: n = nodes.pop() n.visits += 1 if callback: callback(n) for k in expand(n): if k.visits < 1: nodes.append(k) else: if not silent: raise CircularDependency('Circular Dependency')
[ "def", "dfs", "(", "node", ",", "expand", "=", "expansion_all", ",", "callback", "=", "None", ",", "silent", "=", "True", ")", ":", "nodes", "=", "deque", "(", ")", "for", "n", "in", "expand", "(", "node", ")", ":", "nodes", ".", "append", "(", "...
Perform a depth-first search on the node graph :param node: GraphNode :param expand: Returns the list of Nodes to explore from a Node :param callback: Callback to run in each node :param silent: Don't throw exception on circular dependency :return:
[ "Perform", "a", "depth", "-", "first", "search", "on", "the", "node", "graph", ":", "param", "node", ":", "GraphNode", ":", "param", "expand", ":", "Returns", "the", "list", "of", "Nodes", "to", "explore", "from", "a", "Node", ":", "param", "callback", ...
python
train
numenta/nupic
src/nupic/data/stream_reader.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/stream_reader.py#L375-L388
def getDataRowCount(self): """ Iterates through stream to calculate total records after aggregation. This will alter the bookmark state. """ inputRowCountAfterAggregation = 0 while True: record = self.getNextRecord() if record is None: return inputRowCountAfterAggregation inputRowCountAfterAggregation += 1 if inputRowCountAfterAggregation > 10000: raise RuntimeError('No end of datastream found.')
[ "def", "getDataRowCount", "(", "self", ")", ":", "inputRowCountAfterAggregation", "=", "0", "while", "True", ":", "record", "=", "self", ".", "getNextRecord", "(", ")", "if", "record", "is", "None", ":", "return", "inputRowCountAfterAggregation", "inputRowCountAft...
Iterates through stream to calculate total records after aggregation. This will alter the bookmark state.
[ "Iterates", "through", "stream", "to", "calculate", "total", "records", "after", "aggregation", ".", "This", "will", "alter", "the", "bookmark", "state", "." ]
python
valid
CalebBell/fluids
fluids/particle_size_distribution.py
https://github.com/CalebBell/fluids/blob/57f556752e039f1d3e5a822f408c184783db2828/fluids/particle_size_distribution.py#L1913-L1952
def di_power(self, i, power=1): r'''Method to calculate a power of a particle class/bin in a generic way so as to support when there are as many `ds` as `fractions`, or one more diameter spec than `fractions`. When each bin has a lower and upper bound, the formula is as follows [1]_. .. math:: D_i^r = \frac{D_{i, ub}^{(r+1)} - D_{i, lb}^{(r+1)}} {(D_{i, ub} - D_{i, lb})(r+1)} Where `ub` represents the upper bound, and `lb` represents the lower bound. Otherwise, the standard definition is used: .. math:: D_i^r = D_i^r Parameters ---------- i : int The index of the diameter for the calculation, [-] power : int The exponent, [-] Returns ------- di_power : float The representative bin diameter raised to `power`, [m^power] References ---------- .. [1] ASTM E799 - 03(2015) - Standard Practice for Determining Data Criteria and Processing for Liquid Drop Size Analysis. ''' if self.size_classes: rt = power + 1 return ((self.ds[i+1]**rt - self.ds[i]**rt)/((self.ds[i+1] - self.ds[i])*rt)) else: return self.ds[i]**power
[ "def", "di_power", "(", "self", ",", "i", ",", "power", "=", "1", ")", ":", "if", "self", ".", "size_classes", ":", "rt", "=", "power", "+", "1", "return", "(", "(", "self", ".", "ds", "[", "i", "+", "1", "]", "**", "rt", "-", "self", ".", ...
r'''Method to calculate a power of a particle class/bin in a generic way so as to support when there are as many `ds` as `fractions`, or one more diameter spec than `fractions`. When each bin has a lower and upper bound, the formula is as follows [1]_. .. math:: D_i^r = \frac{D_{i, ub}^{(r+1)} - D_{i, lb}^{(r+1)}} {(D_{i, ub} - D_{i, lb})(r+1)} Where `ub` represents the upper bound, and `lb` represents the lower bound. Otherwise, the standard definition is used: .. math:: D_i^r = D_i^r Parameters ---------- i : int The index of the diameter for the calculation, [-] power : int The exponent, [-] Returns ------- di_power : float The representative bin diameter raised to `power`, [m^power] References ---------- .. [1] ASTM E799 - 03(2015) - Standard Practice for Determining Data Criteria and Processing for Liquid Drop Size Analysis.
[ "r", "Method", "to", "calculate", "a", "power", "of", "a", "particle", "class", "/", "bin", "in", "a", "generic", "way", "so", "as", "to", "support", "when", "there", "are", "as", "many", "ds", "as", "fractions", "or", "one", "more", "diameter", "spec"...
python
train
uber/tchannel-python
tchannel/tornado/peer.py
https://github.com/uber/tchannel-python/blob/ee08cce6234f24fd2373774988186dd374306c43/tchannel/tornado/peer.py#L352-L442
def send( self, arg1, arg2, arg3, headers=None, retry_limit=None, ttl=None, ): """Make a request to the Peer. :param arg1: String or Stream containing the contents of arg1. If None, an empty stream is used. :param arg2: String or Stream containing the contents of arg2. If None, an empty stream is used. :param arg3: String or Stream containing the contents of arg3. If None, an empty stream is used. :param headers: Headers will be put in the message as protocol header. :param retry_limit: Maximum number of retries will perform on the message. If the number is 0, it means no retry. :param ttl: Timeout for each request (second). :return: Future that contains the response from the peer. """ # find a peer connection # If we can't find available peer at the first time, we throw # NoAvailablePeerError. Later during retry, if we can't find available # peer, we throw exceptions from retry not NoAvailablePeerError. peer, connection = yield self._get_peer_connection() arg1, arg2, arg3 = ( maybe_stream(arg1), maybe_stream(arg2), maybe_stream(arg3) ) if retry_limit is None: retry_limit = DEFAULT_RETRY_LIMIT ttl = ttl or DEFAULT_TIMEOUT # hack to get endpoint from arg_1 for trace name arg1.close() endpoint = yield read_full(arg1) # set default transport headers headers = headers or {} for k, v in self.headers.iteritems(): headers.setdefault(k, v) if self.tracing_span is None: tracer = ClientTracer(channel=self.tchannel) self.tracing_span, _ = tracer.start_span( service=self.service, endpoint=endpoint, hostport=self._hostport, encoding=self.headers.get('as') ) request = Request( service=self.service, argstreams=[InMemStream(endpoint), arg2, arg3], id=connection.writer.next_message_id(), headers=headers, endpoint=endpoint, ttl=ttl, tracing=tracing.span_to_tracing_field(self.tracing_span) ) # only retry on non-stream request if request.is_streaming_request or self._hostport: retry_limit = 0 if request.is_streaming_request: request.ttl = 0 try: with self.tracing_span: # to ensure span is finished response = yield self.send_with_retry( request, peer, retry_limit, connection ) except Exception as e: # event: on_exception exc_info = sys.exc_info() yield self.tchannel.event_emitter.fire( EventType.on_exception, request, e, ) six.reraise(*exc_info) log.debug("Got response %s", response) raise gen.Return(response)
[ "def", "send", "(", "self", ",", "arg1", ",", "arg2", ",", "arg3", ",", "headers", "=", "None", ",", "retry_limit", "=", "None", ",", "ttl", "=", "None", ",", ")", ":", "# find a peer connection", "# If we can't find available peer at the first time, we throw", ...
Make a request to the Peer. :param arg1: String or Stream containing the contents of arg1. If None, an empty stream is used. :param arg2: String or Stream containing the contents of arg2. If None, an empty stream is used. :param arg3: String or Stream containing the contents of arg3. If None, an empty stream is used. :param headers: Headers will be put in the message as protocol header. :param retry_limit: Maximum number of retries will perform on the message. If the number is 0, it means no retry. :param ttl: Timeout for each request (second). :return: Future that contains the response from the peer.
[ "Make", "a", "request", "to", "the", "Peer", "." ]
python
train
nephila/djangocms-apphook-setup
djangocms_apphook_setup/base.py
https://github.com/nephila/djangocms-apphook-setup/blob/e82c0afdf966f859fe13dc80fcd417b44080f460/djangocms_apphook_setup/base.py#L60-L71
def _create_config(cls): """ Creates an ApphookConfig instance ``AutoCMSAppMixin.auto_setup['config_fields']`` is used to fill in the data of the instance. :return: ApphookConfig instance """ return cls.app_config.objects.create( namespace=cls.auto_setup['namespace'], **cls.auto_setup['config_fields'] )
[ "def", "_create_config", "(", "cls", ")", ":", "return", "cls", ".", "app_config", ".", "objects", ".", "create", "(", "namespace", "=", "cls", ".", "auto_setup", "[", "'namespace'", "]", ",", "*", "*", "cls", ".", "auto_setup", "[", "'config_fields'", "...
Creates an ApphookConfig instance ``AutoCMSAppMixin.auto_setup['config_fields']`` is used to fill in the data of the instance. :return: ApphookConfig instance
[ "Creates", "an", "ApphookConfig", "instance" ]
python
train
DLR-RM/RAFCON
source/rafcon/core/id_generator.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/id_generator.py#L143-L156
def global_variable_id_generator(size=10, chars=string.ascii_uppercase): """ Create a new and unique global variable id Generates an id for a global variable. It randomly samples from random ascii uppercase letters size times and concatenates them. If the id already exists it draws a new one. :param size: the length of the generated keys :param chars: the set of characters a sample draws from """ new_global_variable_id = ''.join(random.choice(chars) for x in range(size)) while new_global_variable_id in used_global_variable_ids: new_global_variable_id = ''.join(random.choice(chars) for x in range(size)) used_global_variable_ids.append(new_global_variable_id) return new_global_variable_id
[ "def", "global_variable_id_generator", "(", "size", "=", "10", ",", "chars", "=", "string", ".", "ascii_uppercase", ")", ":", "new_global_variable_id", "=", "''", ".", "join", "(", "random", ".", "choice", "(", "chars", ")", "for", "x", "in", "range", "(",...
Create a new and unique global variable id Generates an id for a global variable. It randomly samples from random ascii uppercase letters size times and concatenates them. If the id already exists it draws a new one. :param size: the length of the generated keys :param chars: the set of characters a sample draws from
[ "Create", "a", "new", "and", "unique", "global", "variable", "id" ]
python
train
lbusoni/pysilico
pysilico/gui/image_show_widget/image_show_basic_widget.py
https://github.com/lbusoni/pysilico/blob/44872c8c202bedc8af5d7ac0cd2971912a59a365/pysilico/gui/image_show_widget/image_show_basic_widget.py#L504-L513
def _getProcessedImage(self): """Returns the image data after it has been processed by any normalization options in use. This method also sets the attributes self.levelMin and self.levelMax to indicate the range of data in the image.""" if self.imageDisp is None: self.imageDisp = self.image self.levelMin, self.levelMax = self._quickLevels( self.imageDisp) #list( map(float, self._quickLevels(self.imageDisp))) return self.imageDisp
[ "def", "_getProcessedImage", "(", "self", ")", ":", "if", "self", ".", "imageDisp", "is", "None", ":", "self", ".", "imageDisp", "=", "self", ".", "image", "self", ".", "levelMin", ",", "self", ".", "levelMax", "=", "self", ".", "_quickLevels", "(", "s...
Returns the image data after it has been processed by any normalization options in use. This method also sets the attributes self.levelMin and self.levelMax to indicate the range of data in the image.
[ "Returns", "the", "image", "data", "after", "it", "has", "been", "processed", "by", "any", "normalization", "options", "in", "use", ".", "This", "method", "also", "sets", "the", "attributes", "self", ".", "levelMin", "and", "self", ".", "levelMax", "to", "...
python
train
django-danceschool/django-danceschool
danceschool/guestlist/models.py
https://github.com/django-danceschool/django-danceschool/blob/bb08cbf39017a812a5a94bdb4ea34170bf1a30ba/danceschool/guestlist/models.py#L33-L45
def recentEvents(self): ''' Get the set of recent and upcoming events to which this list applies. ''' return Event.objects.filter( Q(pk__in=self.individualEvents.values_list('pk',flat=True)) | Q(session__in=self.eventSessions.all()) | Q(publicevent__category__in=self.eventCategories.all()) | Q(series__category__in=self.seriesCategories.all()) ).filter( Q(startTime__lte=timezone.now() + timedelta(days=60)) & Q(endTime__gte=timezone.now() - timedelta(days=60)) )
[ "def", "recentEvents", "(", "self", ")", ":", "return", "Event", ".", "objects", ".", "filter", "(", "Q", "(", "pk__in", "=", "self", ".", "individualEvents", ".", "values_list", "(", "'pk'", ",", "flat", "=", "True", ")", ")", "|", "Q", "(", "sessio...
Get the set of recent and upcoming events to which this list applies.
[ "Get", "the", "set", "of", "recent", "and", "upcoming", "events", "to", "which", "this", "list", "applies", "." ]
python
train
GuiltyTargets/ppi-network-annotation
src/ppi_network_annotation/parsers.py
https://github.com/GuiltyTargets/ppi-network-annotation/blob/4d7b6713485f2d0a0957e6457edc1b1b5a237460/src/ppi_network_annotation/parsers.py#L89-L129
def handle_dataframe( df: pd.DataFrame, entrez_id_name, log2_fold_change_name, adjusted_p_value_name, entrez_delimiter, base_mean=None, ) -> List[Gene]: """Convert data frame on differential expression values as Gene objects. :param df: Data frame with columns showing values on differential expression. :param cfp: An object that includes paths, cutoffs and other information. :return list: A list of Gene objects. """ logger.info("In _handle_df()") if base_mean is not None and base_mean in df.columns: df = df[pd.notnull(df[base_mean])] df = df[pd.notnull(df[entrez_id_name])] df = df[pd.notnull(df[log2_fold_change_name])] df = df[pd.notnull(df[adjusted_p_value_name])] # try: # import bio2bel_hgnc # except ImportError: # logger.debug('skipping mapping') # else: # manager = bio2bel_hgnc.Manager() # # TODO @cthoyt return [ Gene( entrez_id=entrez_id, log2_fold_change=data[log2_fold_change_name], padj=data[adjusted_p_value_name] ) for _, data in df.iterrows() for entrez_id in str(data[entrez_id_name]).split(entrez_delimiter) ]
[ "def", "handle_dataframe", "(", "df", ":", "pd", ".", "DataFrame", ",", "entrez_id_name", ",", "log2_fold_change_name", ",", "adjusted_p_value_name", ",", "entrez_delimiter", ",", "base_mean", "=", "None", ",", ")", "->", "List", "[", "Gene", "]", ":", "logger...
Convert data frame on differential expression values as Gene objects. :param df: Data frame with columns showing values on differential expression. :param cfp: An object that includes paths, cutoffs and other information. :return list: A list of Gene objects.
[ "Convert", "data", "frame", "on", "differential", "expression", "values", "as", "Gene", "objects", "." ]
python
train
hcpl/xkbgroup
xkbgroup/core.py
https://github.com/hcpl/xkbgroup/blob/fcf4709a3c8221e0cdf62c09e5cccda232b0104c/xkbgroup/core.py#L261-L267
def groups_names(self): """Names of all groups (get-only). :getter: Returns names of all groups :type: list of str """ return _ListProxy(self._get_group_name_by_num(i) for i in range(self.groups_count))
[ "def", "groups_names", "(", "self", ")", ":", "return", "_ListProxy", "(", "self", ".", "_get_group_name_by_num", "(", "i", ")", "for", "i", "in", "range", "(", "self", ".", "groups_count", ")", ")" ]
Names of all groups (get-only). :getter: Returns names of all groups :type: list of str
[ "Names", "of", "all", "groups", "(", "get", "-", "only", ")", "." ]
python
train
Tinche/django-bower-cache
registry/models.py
https://github.com/Tinche/django-bower-cache/blob/5245b2ee80c33c09d85ce0bf8f047825d9df2118/registry/models.py#L40-L43
def pull(self): """Pull from the origin.""" repo_root = settings.REPO_ROOT pull_from_origin(join(repo_root, self.name))
[ "def", "pull", "(", "self", ")", ":", "repo_root", "=", "settings", ".", "REPO_ROOT", "pull_from_origin", "(", "join", "(", "repo_root", ",", "self", ".", "name", ")", ")" ]
Pull from the origin.
[ "Pull", "from", "the", "origin", "." ]
python
train
genialis/resolwe
resolwe/flow/migration_ops.py
https://github.com/genialis/resolwe/blob/f7bb54932c81ec0cfc5b5e80d238fceaeaa48d86/resolwe/flow/migration_ops.py#L396-L406
def deconstruct(self): """Deconstruct operation.""" return ( self.__class__.__name__, [], { 'process': self.process, 'field': self._raw_field, 'new_field': self.new_field, } )
[ "def", "deconstruct", "(", "self", ")", ":", "return", "(", "self", ".", "__class__", ".", "__name__", ",", "[", "]", ",", "{", "'process'", ":", "self", ".", "process", ",", "'field'", ":", "self", ".", "_raw_field", ",", "'new_field'", ":", "self", ...
Deconstruct operation.
[ "Deconstruct", "operation", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/work_item_tracking/work_item_tracking_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/work_item_tracking/work_item_tracking_client.py#L1159-L1178
def get_update(self, id, update_number, project=None): """GetUpdate. [Preview API] Returns a single update for a work item :param int id: :param int update_number: :param str project: Project ID or project name :rtype: :class:`<WorkItemUpdate> <azure.devops.v5_1.work-item-tracking.models.WorkItemUpdate>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if id is not None: route_values['id'] = self._serialize.url('id', id, 'int') if update_number is not None: route_values['updateNumber'] = self._serialize.url('update_number', update_number, 'int') response = self._send(http_method='GET', location_id='6570bf97-d02c-4a91-8d93-3abe9895b1a9', version='5.1-preview.3', route_values=route_values) return self._deserialize('WorkItemUpdate', response)
[ "def", "get_update", "(", "self", ",", "id", ",", "update_number", ",", "project", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", "...
GetUpdate. [Preview API] Returns a single update for a work item :param int id: :param int update_number: :param str project: Project ID or project name :rtype: :class:`<WorkItemUpdate> <azure.devops.v5_1.work-item-tracking.models.WorkItemUpdate>`
[ "GetUpdate", ".", "[", "Preview", "API", "]", "Returns", "a", "single", "update", "for", "a", "work", "item", ":", "param", "int", "id", ":", ":", "param", "int", "update_number", ":", ":", "param", "str", "project", ":", "Project", "ID", "or", "projec...
python
train
redhat-cip/python-dciclient
dciclient/v1/shell_commands/remoteci.py
https://github.com/redhat-cip/python-dciclient/blob/a4aa5899062802bbe4c30a075d8447f8d222d214/dciclient/v1/shell_commands/remoteci.py#L264-L275
def refresh_keys(context, id, etag): """refresh_keys(context, id, etag) Refresh a remoteci key pair. >>> dcictl remoteci-refresh-keys [OPTIONS] :param string id: ID of the remote CI [required] :param string etag: Entity tag of the remote CI resource [required] """ result = remoteci.refresh_keys(context, id=id, etag=etag) utils.format_output(result, context.format)
[ "def", "refresh_keys", "(", "context", ",", "id", ",", "etag", ")", ":", "result", "=", "remoteci", ".", "refresh_keys", "(", "context", ",", "id", "=", "id", ",", "etag", "=", "etag", ")", "utils", ".", "format_output", "(", "result", ",", "context", ...
refresh_keys(context, id, etag) Refresh a remoteci key pair. >>> dcictl remoteci-refresh-keys [OPTIONS] :param string id: ID of the remote CI [required] :param string etag: Entity tag of the remote CI resource [required]
[ "refresh_keys", "(", "context", "id", "etag", ")" ]
python
train
miguelgrinberg/python-engineio
engineio/client.py
https://github.com/miguelgrinberg/python-engineio/blob/261fd67103cb5d9a44369415748e66fdf62de6fb/engineio/client.py#L446-L456
def _trigger_event(self, event, *args, **kwargs): """Invoke an event handler.""" run_async = kwargs.pop('run_async', False) if event in self.handlers: if run_async: return self.start_background_task(self.handlers[event], *args) else: try: return self.handlers[event](*args) except: self.logger.exception(event + ' handler error')
[ "def", "_trigger_event", "(", "self", ",", "event", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "run_async", "=", "kwargs", ".", "pop", "(", "'run_async'", ",", "False", ")", "if", "event", "in", "self", ".", "handlers", ":", "if", "run_asyn...
Invoke an event handler.
[ "Invoke", "an", "event", "handler", "." ]
python
train
boriel/zxbasic
asmparse.py
https://github.com/boriel/zxbasic/blob/23b28db10e41117805bdb3c0f78543590853b132/asmparse.py#L624-L629
def p_line_label_asm(p): """ line : LABEL asms NEWLINE """ p[0] = p[2] __DEBUG__("Declaring '%s%s' (value %04Xh) in %i" % (NAMESPACE, p[1], MEMORY.org, p.lineno(1))) MEMORY.declare_label(p[1], p.lineno(1))
[ "def", "p_line_label_asm", "(", "p", ")", ":", "p", "[", "0", "]", "=", "p", "[", "2", "]", "__DEBUG__", "(", "\"Declaring '%s%s' (value %04Xh) in %i\"", "%", "(", "NAMESPACE", ",", "p", "[", "1", "]", ",", "MEMORY", ".", "org", ",", "p", ".", "linen...
line : LABEL asms NEWLINE
[ "line", ":", "LABEL", "asms", "NEWLINE" ]
python
train
mkorpela/pabot
pabot/pabotlib.py
https://github.com/mkorpela/pabot/blob/b7d85546a58e398d579bb14fd9135858ec08a031/pabot/pabotlib.py#L231-L242
def acquire_value_set(self, *tags): """ Reserve a set of values for this execution. No other process can reserve the same set of values while the set is reserved. Acquired value set needs to be released after use to allow other processes to access it. Add tags to limit the possible value sets that this returns. """ setname = self._acquire_value_set(*tags) if setname is None: raise ValueError("Could not aquire a value set") return setname
[ "def", "acquire_value_set", "(", "self", ",", "*", "tags", ")", ":", "setname", "=", "self", ".", "_acquire_value_set", "(", "*", "tags", ")", "if", "setname", "is", "None", ":", "raise", "ValueError", "(", "\"Could not aquire a value set\"", ")", "return", ...
Reserve a set of values for this execution. No other process can reserve the same set of values while the set is reserved. Acquired value set needs to be released after use to allow other processes to access it. Add tags to limit the possible value sets that this returns.
[ "Reserve", "a", "set", "of", "values", "for", "this", "execution", ".", "No", "other", "process", "can", "reserve", "the", "same", "set", "of", "values", "while", "the", "set", "is", "reserved", ".", "Acquired", "value", "set", "needs", "to", "be", "rele...
python
train
metric-learn/metric-learn
metric_learn/base_metric.py
https://github.com/metric-learn/metric-learn/blob/d945df1342c69012608bb70b92520392a0853de6/metric_learn/base_metric.py#L573-L596
def predict(self, quadruplets): """Predicts the ordering between sample distances in input quadruplets. For each quadruplet, returns 1 if the quadruplet is in the right order ( first pair is more similar than second pair), and -1 if not. Parameters ---------- quadruplets : array-like, shape=(n_quadruplets, 4, n_features) or (n_quadruplets, 4) 3D Array of quadruplets to predict, with each row corresponding to four points, or 2D array of indices of quadruplets if the metric learner uses a preprocessor. Returns ------- prediction : `numpy.ndarray` of floats, shape=(n_constraints,) Predictions of the ordering of pairs, for each quadruplet. """ check_is_fitted(self, 'transformer_') quadruplets = check_input(quadruplets, type_of_inputs='tuples', preprocessor=self.preprocessor_, estimator=self, tuple_size=self._tuple_size) return np.sign(self.decision_function(quadruplets))
[ "def", "predict", "(", "self", ",", "quadruplets", ")", ":", "check_is_fitted", "(", "self", ",", "'transformer_'", ")", "quadruplets", "=", "check_input", "(", "quadruplets", ",", "type_of_inputs", "=", "'tuples'", ",", "preprocessor", "=", "self", ".", "prep...
Predicts the ordering between sample distances in input quadruplets. For each quadruplet, returns 1 if the quadruplet is in the right order ( first pair is more similar than second pair), and -1 if not. Parameters ---------- quadruplets : array-like, shape=(n_quadruplets, 4, n_features) or (n_quadruplets, 4) 3D Array of quadruplets to predict, with each row corresponding to four points, or 2D array of indices of quadruplets if the metric learner uses a preprocessor. Returns ------- prediction : `numpy.ndarray` of floats, shape=(n_constraints,) Predictions of the ordering of pairs, for each quadruplet.
[ "Predicts", "the", "ordering", "between", "sample", "distances", "in", "input", "quadruplets", "." ]
python
train
wilson-eft/wilson
wilson/translate/wet.py
https://github.com/wilson-eft/wilson/blob/4164f55ff663d4f668c6e2b4575fd41562662cc9/wilson/translate/wet.py#L269-L415
def _Fierz_to_JMS_III_IV_V(Fqqqq, qqqq): """From 4-quark Fierz to JMS basis for Classes III, IV and V. `qqqq` should be of the form 'sbuc', 'sdcc', 'ucuu' etc.""" F = Fqqqq.copy() #case dduu classIII = ['sbuc', 'sbcu', 'dbuc', 'dbcu', 'dsuc', 'dscu'] classVdduu = ['sbuu' , 'dbuu', 'dsuu', 'sbcc' , 'dbcc', 'dscc'] if qqqq in classIII + classVdduu: f1 = str(dflav[qqqq[0]] + 1) f2 = str(dflav[qqqq[1]] + 1) f3 = str(uflav[qqqq[2]] + 1) f4 = str(uflav[qqqq[3]] + 1) d = {'V1udLL_' + f3 + f4 + f1 + f2: F['F' + qqqq + '1'] + F['F' + qqqq + '2'] / Nc, 'V8udLL_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '2'], 'V1duLR_' + f1 + f2 + f3 + f4: F['F' + qqqq + '3'] + F['F' + qqqq + '4'] / Nc, 'V8duLR_' + f1 + f2 + f3 + f4: 2 * F['F' + qqqq + '4'], 'S1udRR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '5'] + F['F' + qqqq + '6'] / Nc - 4 * F['F' + qqqq + '9'] - (4 * F['F' + qqqq + '10']) / Nc, 'S8udRR_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '6'] - 8 * F['F' + qqqq + '10'], 'S1udduRR_' + f3 + f2 + f1 + f4: -((8 * F['F' + qqqq + '9']) / Nc) - 8 * F['F' + qqqq + '10'], 'V8udduLR_' + f4 + f1 + f2 + f3: -F['F' + qqqq + '7'].conjugate(), 'V1udduLR_' + f4 + f1 + f2 + f3: -(F['F' + qqqq + '7'].conjugate() / (2 * Nc)) - F['F' + qqqq + '8'].conjugate() / 2, 'S8udduRR_' + f3 + f2 + f1 + f4: -16 * F['F' + qqqq + '9'], 'V1udRR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '1p'] + F['F' + qqqq + '2p'] / Nc, 'V8udRR_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '2p'], 'V1udLR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '3p'] + F['F' + qqqq + '4p'] / Nc, 'V8udLR_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '4p'], 'S1udRR_' + f4 + f3 + f2 + f1: F['F' + qqqq + '5p'].conjugate() + F['F' + qqqq + '6p'].conjugate() / Nc - 4 * F['F' + qqqq + '9p'].conjugate() - (4 * F['F' + qqqq + '10p'].conjugate()) / Nc, 'S8udRR_' + f4 + f3 + f2 + f1: 2 * F['F' + qqqq + '6p'].conjugate() - 8 * F['F' + qqqq + '10p'].conjugate(), 'S1udduRR_' + f4 + f1 + f2 + f3: -((8 * F['F' + qqqq + '9p'].conjugate()) / Nc) - 8 * F['F' + qqqq + '10p'].conjugate(), 'V8udduLR_' + f3 + f2 + f1 + f4: -F['F' + qqqq + '7p'], 'V1udduLR_' + f3 + f2 + f1 + f4: -(F['F' + qqqq + '7p'] / (2 * Nc)) - F['F' + qqqq + '8p'] / 2, 'S8udduRR_' + f4 + f1 + f2 + f3: -16 * F['F' + qqqq + '9p'].conjugate(), } return symmetrize_JMS_dict(d) #case uudd classVuudd = ['ucdd', 'ucss','ucbb'] if qqqq in classVuudd: f3 = str(uflav[qqqq[0]] + 1) f4 = str(uflav[qqqq[1]] + 1) f1 = str(dflav[qqqq[2]] + 1) f2 = str(dflav[qqqq[3]] + 1) d = {'V1udLL_' + f3 + f4 + f1 + f2: F['F' + qqqq + '1'] + F['F' + qqqq + '2'] / Nc, 'V8udLL_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '2'], 'V1duLR_' + f1 + f2 + f3 + f4: F['F' + qqqq + '3p'] + F['F' + qqqq + '4p'] / Nc, 'V8duLR_' + f1 + f2 + f3 + f4: 2 * F['F' + qqqq + '4p'], 'S1udRR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '5'] + F['F' + qqqq + '6'] / Nc - 4 * F['F' + qqqq + '9'] - (4 * F['F' + qqqq + '10']) / Nc, 'S8udRR_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '6'] - 8 * F['F' + qqqq + '10'], 'S1udduRR_' + f3 + f2 + f1 + f4: -((8 * F['F' + qqqq + '9']) / Nc) - 8 * F['F' + qqqq + '10'], 'V8udduLR_' + f4 + f1 + f2 + f3: -F['F' + qqqq + '7p'].conjugate(), 'V1udduLR_' + f4 + f1 + f2 + f3: -(F['F' + qqqq + '7p'].conjugate() / (2 * Nc)) - F['F' + qqqq + '8p'].conjugate() / 2, 'S8udduRR_' + f3 + f2 + f1 + f4: -16 * F['F' + qqqq + '9'], 'V1udRR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '1p'] + F['F' + qqqq + '2p'] / Nc, 'V8udRR_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '2p'], 'V1udLR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '3'] + F['F' + qqqq + '4'] / Nc, 'V8udLR_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '4'], 'S1udRR_' + f4 + f3 + f2 + f1: F['F' + qqqq + '5p'].conjugate() + F['F' + qqqq + '6p'].conjugate() / Nc - 4 * F['F' + qqqq + '9p'].conjugate() - (4 * F['F' + qqqq + '10p'].conjugate()) / Nc, 'S8udRR_' + f4 + f3 + f2 + f1: 2 * F['F' + qqqq + '6p'].conjugate() - 8 * F['F' + qqqq + '10p'].conjugate(), 'S1udduRR_' + f4 + f1 + f2 + f3: -((8 * F['F' + qqqq + '9p'].conjugate()) / Nc) - 8 * F['F' + qqqq + '10p'].conjugate(), 'V8udduLR_' + f3 + f2 + f1 + f4: -F['F' + qqqq + '7'], 'V1udduLR_' + f3 + f2 + f1 + f4: -(F['F' + qqqq + '7'] / (2 * Nc)) - F['F' + qqqq + '8'] / 2, 'S8udduRR_' + f4 + f1 + f2 + f3: -16 * F['F' + qqqq + '9p'].conjugate(), } return symmetrize_JMS_dict(d) #case dddd classIV = ['sbsd', 'dbds', 'bsbd'] classVdddd = ['sbss', 'dbdd', 'dsdd', 'sbbb', 'dbbb', 'dsss'] classVddddind = ['sbdd', 'dsbb', 'dbss'] classVuuuu = ['ucuu', 'cucc', 'uccc', 'cuuu'] if qqqq in classVdddd + classIV + classVuuuu: # if 2nd and 4th or 1st and 3rd fields are the same, Fierz can be used # to express the even coeffs in terms of the odd ones for key in F: # to make sure we're not screwing things up, check that none # of the even WCs is actually present assert int(key[5:].replace('p', '')) % 2 == 1, "Unexpected key in Fierz basis: " + key for p in ['', 'p']: if qqqq in ['sbbb', 'dbbb', 'dsss', 'uccc']: F['F' + qqqq + '2' + p] = F['F' + qqqq + '1' + p] F['F' + qqqq + '4' + p] = -1 / 2 * F['F' + qqqq + '7' + p] F['F' + qqqq + '6' + p] = -1 / 2 * F['F' + qqqq + '5' + p] - 6 * F['F' + qqqq + '9' + p] F['F' + qqqq + '8' + p] = -2 * F['F' + qqqq + '3' + p] F['F' + qqqq + '10' + p] = -1 / 8 * F['F' + qqqq + '5' + p] + 1 / 2 * F['F' + qqqq + '9' + p] elif qqqq in ['sbss', 'dbdd', 'dsdd', 'sbsd', 'dbds', 'bsbd', 'ucuu']: notp = 'p' if p == '' else '' F['F' + qqqq + '2' + p] = F['F' + qqqq + '1' + p] F['F' + qqqq + '4' + p] = -1 / 2 * F['F' + qqqq + '7' + notp] F['F' + qqqq + '6' + notp] = -1 / 2 * F['F' + qqqq + '5' + notp] - 6 * F['F' + qqqq + '9' + notp] F['F' + qqqq + '8' + notp] = -2 * F['F' + qqqq + '3' + p] F['F' + qqqq + '10' + notp] = -1 / 8 * F['F' + qqqq + '5' + notp] + 1 / 2 * F['F' + qqqq + '9' + notp] if qqqq in classIV + classVdddd + classVddddind: f1 = str(dflav[qqqq[0]] + 1) f2 = str(dflav[qqqq[1]] + 1) f3 = str(dflav[qqqq[2]] + 1) f4 = str(dflav[qqqq[3]] + 1) d = { 'VddLL_' + f3 + f4 + f1 + f2: F['F' + qqqq + '1'], 'VddLL_' + f1 + f4 + f3 + f2: F['F' + qqqq + '2'], 'V1ddLR_' + f1 + f2 + f3 + f4: F['F' + qqqq + '3'] + F['F' + qqqq + '4'] / Nc, 'V8ddLR_' + f1 + f2 + f3 + f4: 2 * F['F' + qqqq + '4'], 'S1ddRR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '5'] + F['F' + qqqq + '6'] / Nc - 4 * F['F' + qqqq + '9'] - (4 * F['F' + qqqq + '10']) / Nc, 'S8ddRR_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '6'] - 8 * F['F' + qqqq + '10'], 'V8ddLR_' + f1 + f4 + f3 + f2: -F['F' + qqqq + '7'], 'V1ddLR_' + f1 + f4 + f3 + f2: -(F['F' + qqqq + '7'] / (2 * Nc)) - F['F' + qqqq + '8'] / 2, 'S1ddRR_' + f1 + f4 + f3 + f2: -((8 * F['F' + qqqq + '9']) / Nc) - 8 * F['F' + qqqq + '10'], 'S8ddRR_' + f3 + f2 + f1 + f4: -16 * F['F' + qqqq + '9'], 'VddRR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '1p'], 'VddRR_' + f1 + f4 + f3 + f2: F['F' + qqqq + '2p'], 'V1ddLR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '3p'] + F['F' + qqqq + '4p'] / Nc, 'V8ddLR_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '4p'], 'S1ddRR_' + f4 + f3 + f2 + f1: F['F' + qqqq + '5p'].conjugate() + F['F' + qqqq + '6p'].conjugate() / Nc - 4 * F['F' + qqqq + '9p'].conjugate() - (4 * F['F' + qqqq + '10p'].conjugate()) / Nc, 'S8ddRR_' + f4 + f3 + f2 + f1: 2 * F['F' + qqqq + '6p'].conjugate() - 8 * F['F' + qqqq + '10p'].conjugate(), 'V8ddLR_' + f3 + f2 + f1 + f4: -F['F' + qqqq + '7p'], 'V1ddLR_' + f3 + f2 + f1 + f4: -(F['F' + qqqq + '7p'] / (2 * Nc)) - F['F' + qqqq + '8p'] / 2, 'S1ddRR_' + f4 + f1 + f2 + f3: -((8 * F['F' + qqqq + '9p'].conjugate()) / Nc) - 8 * F['F' + qqqq + '10p'].conjugate(), 'S8ddRR_' + f4 + f1 + f2 + f3: -16 * F['F' + qqqq + '9p'].conjugate(), } return symmetrize_JMS_dict(d) #case uuuu if qqqq in classVuuuu: f1 = str(uflav[qqqq[0]] + 1) f2 = str(uflav[qqqq[1]] + 1) f3 = str(uflav[qqqq[2]] + 1) f4 = str(uflav[qqqq[3]] + 1) d = { 'VuuLL_' + f3 + f4 + f1 + f2: F['F' + qqqq + '1'], 'VuuLL_' + f1 + f4 + f3 + f2: F['F' + qqqq + '2'], 'V1uuLR_' + f1 + f2 + f3 + f4: F['F' + qqqq + '3'] + F['F' + qqqq + '4'] / Nc, 'V8uuLR_' + f1 + f2 + f3 + f4: 2 * F['F' + qqqq + '4'], 'S1uuRR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '5'] + F['F' + qqqq + '6'] / Nc - 4 * F['F' + qqqq + '9'] - (4 * F['F' + qqqq + '10']) / Nc, 'S8uuRR_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '6'] - 8 * F['F' + qqqq + '10'], 'V8uuLR_' + f1 + f4 + f3 + f2: -F['F' + qqqq + '7'], 'V1uuLR_' + f1 + f4 + f3 + f2: -(F['F' + qqqq + '7'] / (2 * Nc)) - F['F' + qqqq + '8'] / 2, 'S1uuRR_' + f1 + f4 + f3 + f2: -((8 * F['F' + qqqq + '9']) / Nc) - 8 * F['F' + qqqq + '10'], 'S8uuRR_' + f3 + f2 + f1 + f4: -16 * F['F' + qqqq + '9'], 'VuuRR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '1p'], 'VuuRR_' + f1 + f4 + f3 + f2: F['F' + qqqq + '2p'], 'V1uuLR_' + f3 + f4 + f1 + f2: F['F' + qqqq + '3p'] + F['F' + qqqq + '4p'] / Nc, 'V8uuLR_' + f3 + f4 + f1 + f2: 2 * F['F' + qqqq + '4p'], 'S1uuRR_' + f4 + f3 + f2 + f1: F['F' + qqqq + '5p'].conjugate() + F['F' + qqqq + '6p'].conjugate() / Nc - 4 * F['F' + qqqq + '9p'].conjugate() - (4 * F['F' + qqqq + '10p'].conjugate()) / Nc, 'S8uuRR_' + f4 + f3 + f2 + f1: 2 * F['F' + qqqq + '6p'].conjugate() - 8 * F['F' + qqqq + '10p'].conjugate(), 'V8uuLR_' + f3 + f2 + f1 + f4: -F['F' + qqqq + '7p'], 'V1uuLR_' + f3 + f2 + f1 + f4: -(F['F' + qqqq + '7p'] / (2 * Nc)) - F['F' + qqqq + '8p'] / 2, 'S1uuRR_' + f4 + f1 + f2 + f3: -((8 * F['F' + qqqq + '9p'].conjugate()) / Nc) - 8 * F['F' + qqqq + '10p'].conjugate(), 'S8uuRR_' + f4 + f1 + f2 + f3: -16 * F['F' + qqqq + '9p'].conjugate() } return symmetrize_JMS_dict(d) raise ValueError("Case not implemented: {}".format(qqqq))
[ "def", "_Fierz_to_JMS_III_IV_V", "(", "Fqqqq", ",", "qqqq", ")", ":", "F", "=", "Fqqqq", ".", "copy", "(", ")", "#case dduu", "classIII", "=", "[", "'sbuc'", ",", "'sbcu'", ",", "'dbuc'", ",", "'dbcu'", ",", "'dsuc'", ",", "'dscu'", "]", "classVdduu", ...
From 4-quark Fierz to JMS basis for Classes III, IV and V. `qqqq` should be of the form 'sbuc', 'sdcc', 'ucuu' etc.
[ "From", "4", "-", "quark", "Fierz", "to", "JMS", "basis", "for", "Classes", "III", "IV", "and", "V", ".", "qqqq", "should", "be", "of", "the", "form", "sbuc", "sdcc", "ucuu", "etc", "." ]
python
train
smarie/python-parsyfiles
parsyfiles/parsing_combining_parsers.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_combining_parsers.py#L239-L302
def add_parser_to_cascade(self, parser: AnyParser, typ: Type = None): """ Adds the provided parser to this cascade. If this is the first parser, it will configure the cascade according to the parser capabilities (single and multifile support, extensions). Subsequent parsers will have to support the same capabilities at least, to be added. :param parser: :param typ: :return: """ # the first parser added will configure the cascade if not self.configured: self.supported_exts = parser.supported_exts self.supported_types = parser.supported_types # check if new parser is compliant with previous ones if self.supports_singlefile(): if not parser.supports_singlefile(): raise ValueError( 'Cannot add this parser to this parsing cascade : it does not match the rest of the cascades ' 'configuration (singlefile support)') if self.supports_multifile(): if not parser.supports_multifile(): raise ValueError( 'Cannot add this parser to this parsing cascade : it does not match the rest of the cascades ' 'configuration (multifile support)') if AnyObject not in parser.supported_types: if typ is None: # in that case the expected types for this parser will be self.supported_types if AnyObject in self.supported_types: raise ValueError( 'Cannot add this parser to this parsing cascade : it does not match the rest of the cascades ' 'configuration (the cascade supports any type while the parser only supports ' + str(parser.supported_types) + ')') else: missing_types = set(self.supported_types) - set(parser.supported_types) if len(missing_types) > 0: raise ValueError( 'Cannot add this parser to this parsing cascade : it does not match the rest of the ' 'cascades configuration (supported types should at least contain the supported types ' 'already in place. The parser misses type(s) ' + str(missing_types) + ')') else: # a parser is added but with a specific type target (parallel cascade) if typ == AnyObject: raise ValueError( 'Cannot add this parser to this parsing cascade : it does not match the expected type "Any", ' 'it only supports ' + str(parser.supported_types)) # else: # if get_base_generic_type(typ) not in parser.supported_types: # raise ValueError( # 'Cannot add this parser to this parsing cascade : it does not match the expected type ' + # str(typ) + ', it only supports ' + str(parser.supported_types)) missing_exts = set(self.supported_exts) - set(parser.supported_exts) if len(missing_exts) > 0: raise ValueError( 'Cannot add this parser to this parsing cascade : it does not match the rest of the cascades ' 'configuration (supported extensions should at least contain the supported extensions already in ' 'place. The parser misses extension(s) ' + str(missing_exts) + ')') # finally add it self._parsers_list.append((typ, parser))
[ "def", "add_parser_to_cascade", "(", "self", ",", "parser", ":", "AnyParser", ",", "typ", ":", "Type", "=", "None", ")", ":", "# the first parser added will configure the cascade", "if", "not", "self", ".", "configured", ":", "self", ".", "supported_exts", "=", ...
Adds the provided parser to this cascade. If this is the first parser, it will configure the cascade according to the parser capabilities (single and multifile support, extensions). Subsequent parsers will have to support the same capabilities at least, to be added. :param parser: :param typ: :return:
[ "Adds", "the", "provided", "parser", "to", "this", "cascade", ".", "If", "this", "is", "the", "first", "parser", "it", "will", "configure", "the", "cascade", "according", "to", "the", "parser", "capabilities", "(", "single", "and", "multifile", "support", "e...
python
train
toumorokoshi/sprinter
sprinter/environment.py
https://github.com/toumorokoshi/sprinter/blob/846697a7a087e69c61d075232e754d6975a64152/sprinter/environment.py#L335-L349
def write_debug_log(self, file_path): """ Write the debug log to a file """ with open(file_path, "wb+") as fh: fh.write(system.get_system_info().encode('utf-8')) # writing to debug stream self._debug_stream.seek(0) fh.write(self._debug_stream.read().encode('utf-8')) fh.write("The following errors occured:\n".encode('utf-8')) for error in self._errors: fh.write((error + "\n").encode('utf-8')) for k, v in self._error_dict.items(): if len(v) > 0: fh.write(("Error(s) in %s with formula %s:\n" % k).encode('utf-8')) for error in v: fh.write((error + "\n").encode('utf-8'))
[ "def", "write_debug_log", "(", "self", ",", "file_path", ")", ":", "with", "open", "(", "file_path", ",", "\"wb+\"", ")", "as", "fh", ":", "fh", ".", "write", "(", "system", ".", "get_system_info", "(", ")", ".", "encode", "(", "'utf-8'", ")", ")", "...
Write the debug log to a file
[ "Write", "the", "debug", "log", "to", "a", "file" ]
python
train
joferkington/mpldatacursor
mpldatacursor/datacursor.py
https://github.com/joferkington/mpldatacursor/blob/7dabc589ed02c35ac5d89de5931f91e0323aa795/mpldatacursor/datacursor.py#L277-L310
def event_info(self, event): """Get a dict of info for the artist selected by "event".""" def default_func(event): return {} registry = { AxesImage : [pick_info.image_props], PathCollection : [pick_info.scatter_props, self._contour_info, pick_info.collection_props], Line2D : [pick_info.line_props, pick_info.errorbar_props], LineCollection : [pick_info.collection_props, self._contour_info, pick_info.errorbar_props], PatchCollection : [pick_info.collection_props, self._contour_info], PolyCollection : [pick_info.collection_props, pick_info.scatter_props], QuadMesh : [pick_info.collection_props], Rectangle : [pick_info.rectangle_props], } x, y = event.mouseevent.xdata, event.mouseevent.ydata props = dict(x=x, y=y, label=event.artist.get_label(), event=event) props['ind'] = getattr(event, 'ind', None) props['point_label'] = self._point_label(event) funcs = registry.get(type(event.artist), [default_func]) # 3D artist don't share inheritance. Fall back to naming convention. if '3D' in type(event.artist).__name__: funcs += [pick_info.three_dim_props] for func in funcs: props.update(func(event)) return props
[ "def", "event_info", "(", "self", ",", "event", ")", ":", "def", "default_func", "(", "event", ")", ":", "return", "{", "}", "registry", "=", "{", "AxesImage", ":", "[", "pick_info", ".", "image_props", "]", ",", "PathCollection", ":", "[", "pick_info", ...
Get a dict of info for the artist selected by "event".
[ "Get", "a", "dict", "of", "info", "for", "the", "artist", "selected", "by", "event", "." ]
python
train
ReFirmLabs/binwalk
src/binwalk/core/magic.py
https://github.com/ReFirmLabs/binwalk/blob/a0c5315fd2bae167e5c3d8469ce95d5defc743c2/src/binwalk/core/magic.py#L473-L546
def _do_math(self, offset, expression): ''' Parses and evaluates complex expressions, e.g., "(4.l+12)", "(6*32)", etc. @offset - The offset inside self.data that the current signature starts at. @expressions - The expression to evaluate. Returns an integer value that is the result of the evaluated expression. ''' # Does the expression contain an offset (e.g., "(4.l+12)")? if '.' in expression and '(' in expression: replacements = {} for period in [match.start() for match in self.period.finditer(expression)]: # Separate the offset field into the integer offset and type # values (o and t respsectively) s = expression[:period].rfind('(') + 1 # The offset address may be an evaluatable expression, such as '(4+0.L)', typically the result # of the original offset being something like '(&0.L)'. o = binwalk.core.common.MathExpression(expression[s:period]).value t = expression[period + 1] # Re-build just the parsed offset portion of the expression text = "%s.%c" % (expression[s:period], t) # Have we already evaluated this offset expression? If so, skip # it. if binwalk.core.common.has_key(replacements, text): continue # The offset specified in the expression is relative to the # starting offset inside self.data o += offset # Read the value from self.data at the specified offset try: # Big and little endian byte format if t in ['b', 'B']: v = struct.unpack('b', binwalk.core.compat.str2bytes(self.data[o:o + 1]))[0] # Little endian short format elif t == 's': v = struct.unpack('<h', binwalk.core.compat.str2bytes(self.data[o:o + 2]))[0] # Little endian long format elif t == 'l': v = struct.unpack('<i', binwalk.core.compat.str2bytes(self.data[o:o + 4]))[0] # Big endian short format elif t == 'S': v = struct.unpack('>h', binwalk.core.compat.str2bytes(self.data[o:o + 2]))[0] # Bit endian long format elif t == 'L': v = struct.unpack('>i', binwalk.core.compat.str2bytes(self.data[o:o + 4]))[0] # struct.error is thrown if there is not enough bytes in # self.data for the specified format type except struct.error as e: v = 0 # Keep track of all the recovered values from self.data replacements[text] = v # Finally, replace all offset expressions with their corresponding # text value v = expression for (text, value) in binwalk.core.common.iterator(replacements): v = v.replace(text, "%d" % value) # If no offset, then it's just an evaluatable math expression (e.g., # "(32+0x20)") else: v = expression # Evaluate the final expression value = binwalk.core.common.MathExpression(v).value return value
[ "def", "_do_math", "(", "self", ",", "offset", ",", "expression", ")", ":", "# Does the expression contain an offset (e.g., \"(4.l+12)\")?", "if", "'.'", "in", "expression", "and", "'('", "in", "expression", ":", "replacements", "=", "{", "}", "for", "period", "in...
Parses and evaluates complex expressions, e.g., "(4.l+12)", "(6*32)", etc. @offset - The offset inside self.data that the current signature starts at. @expressions - The expression to evaluate. Returns an integer value that is the result of the evaluated expression.
[ "Parses", "and", "evaluates", "complex", "expressions", "e", ".", "g", ".", "(", "4", ".", "l", "+", "12", ")", "(", "6", "*", "32", ")", "etc", "." ]
python
train
odlgroup/odl
odl/util/utility.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/util/utility.py#L1309-L1430
def method_repr_string(inst_str, meth_str, arg_strs=None, allow_mixed_seps=True): r"""Return a repr string for a method that respects line width. This function is useful to generate a ``repr`` string for a derived class that is created through a method, for instance :: functional.translated(x) as a better way of representing :: FunctionalTranslation(functional, x) Parameters ---------- inst_str : str Stringification of a class instance. meth_str : str Name of the method (not including the ``'.'``). arg_strs : sequence of str, optional Stringification of the arguments to the method. allow_mixed_seps : bool, optional If ``False`` and the argument strings do not fit on one line, use ``',\n'`` to separate all strings. By default, a mixture of ``', '`` and ``',\n'`` is used to fit as much on one line as possible. In case some of the ``arg_strs`` span multiple lines, it is usually advisable to set ``allow_mixed_seps`` to ``False`` since the result tends to be more readable that way. Returns ------- meth_repr_str : str Concatenation of all strings in a way that the line width is respected. Examples -------- >>> inst_str = 'MyClass' >>> meth_str = 'empty' >>> arg_strs = [] >>> print(method_repr_string(inst_str, meth_str, arg_strs)) MyClass.empty() >>> inst_str = 'MyClass' >>> meth_str = 'fromfile' >>> arg_strs = ["'tmpfile.txt'"] >>> print(method_repr_string(inst_str, meth_str, arg_strs)) MyClass.fromfile('tmpfile.txt') >>> inst_str = "MyClass('init string')" >>> meth_str = 'method' >>> arg_strs = ['2.0'] >>> print(method_repr_string(inst_str, meth_str, arg_strs)) MyClass('init string').method(2.0) >>> long_inst_str = ( ... "MyClass('long string that will definitely trigger a line break')" ... ) >>> meth_str = 'method' >>> long_arg1 = "'long argument string that should come on the next line'" >>> arg2 = 'param1=1' >>> arg3 = 'param2=2.0' >>> arg_strs = [long_arg1, arg2, arg3] >>> print(method_repr_string(long_inst_str, meth_str, arg_strs)) MyClass( 'long string that will definitely trigger a line break' ).method( 'long argument string that should come on the next line', param1=1, param2=2.0 ) >>> print(method_repr_string(long_inst_str, meth_str, arg_strs, ... allow_mixed_seps=False)) MyClass( 'long string that will definitely trigger a line break' ).method( 'long argument string that should come on the next line', param1=1, param2=2.0 ) """ linewidth = np.get_printoptions()['linewidth'] # Part up to the method name if (len(inst_str) + 1 + len(meth_str) + 1 <= linewidth or '(' not in inst_str): init_parts = [inst_str, meth_str] # Length of the line to the end of the method name meth_line_start_len = len(inst_str) + 1 + len(meth_str) else: # TODO(kohr-h): use `maxsplit=1` kwarg, not supported in Py 2 left, rest = inst_str.split('(', 1) right, middle = rest[::-1].split(')', 1) middle, right = middle[::-1], right[::-1] if middle.startswith('\n') and middle.endswith('\n'): # Already on multiple lines new_inst_str = inst_str else: new_inst_str = '(\n'.join([left, indent(middle)]) + '\n)' + right # Length of the line to the end of the method name, consisting of # ')' + '.' + <method name> meth_line_start_len = 1 + 1 + len(meth_str) init_parts = [new_inst_str, meth_str] # Method call part arg_str_oneline = ', '.join(arg_strs) if meth_line_start_len + 1 + len(arg_str_oneline) + 1 <= linewidth: meth_call_str = '(' + arg_str_oneline + ')' elif not arg_str_oneline: meth_call_str = '(\n)' else: if allow_mixed_seps: arg_seps = _separators(arg_strs, linewidth - 4) # indented else: arg_seps = [',\n'] * (len(arg_strs) - 1) full_arg_str = '' for arg_str, sep in zip_longest(arg_strs, arg_seps, fillvalue=''): full_arg_str += arg_str + sep meth_call_str = '(\n' + indent(full_arg_str) + '\n)' return '.'.join(init_parts) + meth_call_str
[ "def", "method_repr_string", "(", "inst_str", ",", "meth_str", ",", "arg_strs", "=", "None", ",", "allow_mixed_seps", "=", "True", ")", ":", "linewidth", "=", "np", ".", "get_printoptions", "(", ")", "[", "'linewidth'", "]", "# Part up to the method name", "if",...
r"""Return a repr string for a method that respects line width. This function is useful to generate a ``repr`` string for a derived class that is created through a method, for instance :: functional.translated(x) as a better way of representing :: FunctionalTranslation(functional, x) Parameters ---------- inst_str : str Stringification of a class instance. meth_str : str Name of the method (not including the ``'.'``). arg_strs : sequence of str, optional Stringification of the arguments to the method. allow_mixed_seps : bool, optional If ``False`` and the argument strings do not fit on one line, use ``',\n'`` to separate all strings. By default, a mixture of ``', '`` and ``',\n'`` is used to fit as much on one line as possible. In case some of the ``arg_strs`` span multiple lines, it is usually advisable to set ``allow_mixed_seps`` to ``False`` since the result tends to be more readable that way. Returns ------- meth_repr_str : str Concatenation of all strings in a way that the line width is respected. Examples -------- >>> inst_str = 'MyClass' >>> meth_str = 'empty' >>> arg_strs = [] >>> print(method_repr_string(inst_str, meth_str, arg_strs)) MyClass.empty() >>> inst_str = 'MyClass' >>> meth_str = 'fromfile' >>> arg_strs = ["'tmpfile.txt'"] >>> print(method_repr_string(inst_str, meth_str, arg_strs)) MyClass.fromfile('tmpfile.txt') >>> inst_str = "MyClass('init string')" >>> meth_str = 'method' >>> arg_strs = ['2.0'] >>> print(method_repr_string(inst_str, meth_str, arg_strs)) MyClass('init string').method(2.0) >>> long_inst_str = ( ... "MyClass('long string that will definitely trigger a line break')" ... ) >>> meth_str = 'method' >>> long_arg1 = "'long argument string that should come on the next line'" >>> arg2 = 'param1=1' >>> arg3 = 'param2=2.0' >>> arg_strs = [long_arg1, arg2, arg3] >>> print(method_repr_string(long_inst_str, meth_str, arg_strs)) MyClass( 'long string that will definitely trigger a line break' ).method( 'long argument string that should come on the next line', param1=1, param2=2.0 ) >>> print(method_repr_string(long_inst_str, meth_str, arg_strs, ... allow_mixed_seps=False)) MyClass( 'long string that will definitely trigger a line break' ).method( 'long argument string that should come on the next line', param1=1, param2=2.0 )
[ "r", "Return", "a", "repr", "string", "for", "a", "method", "that", "respects", "line", "width", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/html/notebook/handlers.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/html/notebook/handlers.py#L170-L180
def ws_url(self): """websocket url matching the current request turns http[s]://host[:port] into ws[s]://host[:port] """ proto = self.request.protocol.replace('http', 'ws') host = self.application.ipython_app.websocket_host # default to config value if host == '': host = self.request.host # get from request return "%s://%s" % (proto, host)
[ "def", "ws_url", "(", "self", ")", ":", "proto", "=", "self", ".", "request", ".", "protocol", ".", "replace", "(", "'http'", ",", "'ws'", ")", "host", "=", "self", ".", "application", ".", "ipython_app", ".", "websocket_host", "# default to config value", ...
websocket url matching the current request turns http[s]://host[:port] into ws[s]://host[:port]
[ "websocket", "url", "matching", "the", "current", "request" ]
python
test
ska-sa/katcp-python
katcp/server.py
https://github.com/ska-sa/katcp-python/blob/9127c826a1d030c53b84d0e95743e20e5c5ea153/katcp/server.py#L810-L833
def on_message(self, client_conn, msg): """Handle message. Returns ------- ready : Future A future that will resolve once we're ready, else None. Notes ----- *on_message* should not be called again until *ready* has resolved. """ MAX_QUEUE_SIZE = 30 if len(self._msg_queue) >= MAX_QUEUE_SIZE: # This should never happen if callers to handle_message wait # for its futures to resolve before sending another message. # NM 2014-10-06: Except when there are multiple clients. Oops. raise RuntimeError('MessageHandlerThread unhandled ' 'message queue full, not handling message') ready_future = Future() self._msg_queue.append((ready_future, client_conn, msg)) self._wake.set() return ready_future
[ "def", "on_message", "(", "self", ",", "client_conn", ",", "msg", ")", ":", "MAX_QUEUE_SIZE", "=", "30", "if", "len", "(", "self", ".", "_msg_queue", ")", ">=", "MAX_QUEUE_SIZE", ":", "# This should never happen if callers to handle_message wait", "# for its futures t...
Handle message. Returns ------- ready : Future A future that will resolve once we're ready, else None. Notes ----- *on_message* should not be called again until *ready* has resolved.
[ "Handle", "message", "." ]
python
train
mitsei/dlkit
dlkit/json_/commenting/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/commenting/sessions.py#L2572-L2593
def get_child_books(self, book_id): """Gets the child books of the given ``id``. arg: book_id (osid.id.Id): the ``Id`` of the ``Book`` to query return: (osid.commenting.BookList) - the child books of the ``id`` raise: NotFound - a ``Book`` identified by ``Id is`` not found raise: NullArgument - ``book_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.BinHierarchySession.get_child_bins if self._catalog_session is not None: return self._catalog_session.get_child_catalogs(catalog_id=book_id) return BookLookupSession( self._proxy, self._runtime).get_books_by_ids( list(self.get_child_book_ids(book_id)))
[ "def", "get_child_books", "(", "self", ",", "book_id", ")", ":", "# Implemented from template for", "# osid.resource.BinHierarchySession.get_child_bins", "if", "self", ".", "_catalog_session", "is", "not", "None", ":", "return", "self", ".", "_catalog_session", ".", "ge...
Gets the child books of the given ``id``. arg: book_id (osid.id.Id): the ``Id`` of the ``Book`` to query return: (osid.commenting.BookList) - the child books of the ``id`` raise: NotFound - a ``Book`` identified by ``Id is`` not found raise: NullArgument - ``book_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "child", "books", "of", "the", "given", "id", "." ]
python
train
pypa/pipenv
pipenv/vendor/distlib/manifest.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/manifest.py#L297-L315
def _exclude_pattern(self, pattern, anchor=True, prefix=None, is_regex=False): """Remove strings (presumably filenames) from 'files' that match 'pattern'. Other parameters are the same as for 'include_pattern()', above. The list 'self.files' is modified in place. Return True if files are found. This API is public to allow e.g. exclusion of SCM subdirs, e.g. when packaging source distributions """ found = False pattern_re = self._translate_pattern(pattern, anchor, prefix, is_regex) for f in list(self.files): if pattern_re.search(f): self.files.remove(f) found = True return found
[ "def", "_exclude_pattern", "(", "self", ",", "pattern", ",", "anchor", "=", "True", ",", "prefix", "=", "None", ",", "is_regex", "=", "False", ")", ":", "found", "=", "False", "pattern_re", "=", "self", ".", "_translate_pattern", "(", "pattern", ",", "an...
Remove strings (presumably filenames) from 'files' that match 'pattern'. Other parameters are the same as for 'include_pattern()', above. The list 'self.files' is modified in place. Return True if files are found. This API is public to allow e.g. exclusion of SCM subdirs, e.g. when packaging source distributions
[ "Remove", "strings", "(", "presumably", "filenames", ")", "from", "files", "that", "match", "pattern", "." ]
python
train
ofek/depq
run_performance_check.py
https://github.com/ofek/depq/blob/370e3ad503d3e9cedc3c49dc64add393ba945764/run_performance_check.py#L63-L114
def binary_insert(self, item, priority): """Traditional binary search. Performance: O(n log n)""" with self.lock: self_data = self.data rotate = self_data.rotate maxlen = self._maxlen length = len(self_data) index = 0 min = 0 max = length - 1 while max - min > 10: mid = (min + max) // 2 # If index in 1st half of list if priority > self_data[mid][1]: max = mid - 1 # If index in 2nd half of list else: min = mid + 1 for i in range(min, max + 1): if priority > self_data[i][1]: index = i break elif i == max: index = max + 1 shift = length - index # Never shift more than half length of depq if shift > length // 2: shift = length % shift rotate(-shift) self_data.appendleft((item, priority)) rotate(shift) else: rotate(shift) self_data.append((item, priority)) rotate(-shift) try: self.items[item] += 1 except TypeError: self.items[repr(item)] += 1 if maxlen is not None and maxlen < len(self_data): self._poplast()
[ "def", "binary_insert", "(", "self", ",", "item", ",", "priority", ")", ":", "with", "self", ".", "lock", ":", "self_data", "=", "self", ".", "data", "rotate", "=", "self_data", ".", "rotate", "maxlen", "=", "self", ".", "_maxlen", "length", "=", "len"...
Traditional binary search. Performance: O(n log n)
[ "Traditional", "binary", "search", ".", "Performance", ":", "O", "(", "n", "log", "n", ")" ]
python
train
raymontag/kppy
kppy/database.py
https://github.com/raymontag/kppy/blob/a43f1fff7d49da1da4b3d8628a1b3ebbaf47f43a/kppy/database.py#L861-L871
def _cbc_encrypt(self, content, final_key): """This method encrypts the content.""" aes = AES.new(final_key, AES.MODE_CBC, self._enc_iv) padding = (16 - len(content) % AES.block_size) for _ in range(padding): content += chr(padding).encode() temp = bytes(content) return aes.encrypt(temp)
[ "def", "_cbc_encrypt", "(", "self", ",", "content", ",", "final_key", ")", ":", "aes", "=", "AES", ".", "new", "(", "final_key", ",", "AES", ".", "MODE_CBC", ",", "self", ".", "_enc_iv", ")", "padding", "=", "(", "16", "-", "len", "(", "content", "...
This method encrypts the content.
[ "This", "method", "encrypts", "the", "content", "." ]
python
train
pgmpy/pgmpy
pgmpy/readwrite/ProbModelXML.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/readwrite/ProbModelXML.py#L768-L787
def add_probnet_additionalconstraints(self, constraint): """ Adds Additional Constraints to the probnet dict. Parameters ---------- criterion: <Element Constraint at AdditionalConstraints Node in XML> etree Element consisting Constraint tag. Examples ------- >>> reader = ProbModelXMLReader() >>> reader.add_additionalconstraints(constraint) """ constraint_name = constraint.attrib['name'] self.probnet['AdditionalConstraints'][constraint_name] = {} for argument in constraint.findall('Argument'): argument_name = argument.attrib['name'] argument_value = argument.attrib['value'] self.probnet['AdditionalConstraints'][constraint_name][argument_name] = argument_value
[ "def", "add_probnet_additionalconstraints", "(", "self", ",", "constraint", ")", ":", "constraint_name", "=", "constraint", ".", "attrib", "[", "'name'", "]", "self", ".", "probnet", "[", "'AdditionalConstraints'", "]", "[", "constraint_name", "]", "=", "{", "}"...
Adds Additional Constraints to the probnet dict. Parameters ---------- criterion: <Element Constraint at AdditionalConstraints Node in XML> etree Element consisting Constraint tag. Examples ------- >>> reader = ProbModelXMLReader() >>> reader.add_additionalconstraints(constraint)
[ "Adds", "Additional", "Constraints", "to", "the", "probnet", "dict", "." ]
python
train
OnroerendErfgoed/crabpy_pyramid
crabpy_pyramid/__init__.py
https://github.com/OnroerendErfgoed/crabpy_pyramid/blob/b727ea55838d71575db96e987b536a0bac9f6a7a/crabpy_pyramid/__init__.py#L181-L213
def conditional_http_tween_factory(handler, registry): """ Tween that adds ETag headers and tells Pyramid to enable conditional responses where appropriate. """ settings = registry.settings if hasattr(registry, 'settings') else {} if 'generate_etag_for.list' in settings: route_names = settings.get('generate_etag_for.list').split() GENERATE_ETAG_ROUTE_NAMES.update(route_names) def conditional_http_tween(request): response = handler(request) if request.matched_route.name in GENERATE_ETAG_ROUTE_NAMES: # If the Last-Modified header has been set, we want to enable the # conditional response processing. if response.last_modified is not None: response.conditional_response = True # We want to only enable the conditional machinery if either we # were given an explicit ETag header by the view or we have a # buffered response and can generate the ETag header ourself. if response.etag is not None: response.conditional_response = True elif (isinstance(response.app_iter, Sequence) and len(response.app_iter) == 1) and response.body is not None: response.conditional_response = True response.md5_etag() return response return conditional_http_tween
[ "def", "conditional_http_tween_factory", "(", "handler", ",", "registry", ")", ":", "settings", "=", "registry", ".", "settings", "if", "hasattr", "(", "registry", ",", "'settings'", ")", "else", "{", "}", "if", "'generate_etag_for.list'", "in", "settings", ":",...
Tween that adds ETag headers and tells Pyramid to enable conditional responses where appropriate.
[ "Tween", "that", "adds", "ETag", "headers", "and", "tells", "Pyramid", "to", "enable", "conditional", "responses", "where", "appropriate", "." ]
python
train
matousc89/padasip
padasip/ann/mlp.py
https://github.com/matousc89/padasip/blob/c969eadd7fa181a84da0554d737fc13c6450d16f/padasip/ann/mlp.py#L172-L198
def update(self, w, e): """ This function make update according provided target and the last used input vector. **Args:** * `d` : target (float or 1-dimensional array). Size depends on number of MLP outputs. **Returns:** * `w` : weights of the layers (2-dimensional layer). Every row represents one node. * `e` : error used for update (float or 1-diemnsional array). Size correspond to size of input `d`. """ if len(w.shape) == 1: e = self.activation(self.y, f=self.f, der=True) * e * w dw = self.mu * np.outer(e, self.x) else: e = self.activation(self.y, f=self.f, der=True) * (1 - self.y) * np.dot(e, w) dw = self.mu * np.outer(e, self.x) w = self.w[:,1:] self.w += dw return w, e
[ "def", "update", "(", "self", ",", "w", ",", "e", ")", ":", "if", "len", "(", "w", ".", "shape", ")", "==", "1", ":", "e", "=", "self", ".", "activation", "(", "self", ".", "y", ",", "f", "=", "self", ".", "f", ",", "der", "=", "True", ")...
This function make update according provided target and the last used input vector. **Args:** * `d` : target (float or 1-dimensional array). Size depends on number of MLP outputs. **Returns:** * `w` : weights of the layers (2-dimensional layer). Every row represents one node. * `e` : error used for update (float or 1-diemnsional array). Size correspond to size of input `d`.
[ "This", "function", "make", "update", "according", "provided", "target", "and", "the", "last", "used", "input", "vector", "." ]
python
train
ericpruitt/cronex
cronex/__init__.py
https://github.com/ericpruitt/cronex/blob/ff48a3a71bbcdf01cff46c0bf9376e69492c9224/cronex/__init__.py#L302-L362
def parse_atom(parse, minmax): """ Returns a set containing valid values for a given cron-style range of numbers. The 'minmax' arguments is a two element iterable containing the inclusive upper and lower limits of the expression. Examples: >>> parse_atom("1-5",(0,6)) set([1, 2, 3, 4, 5]) >>> parse_atom("*/6",(0,23)) set([0, 6, 12, 18]) >>> parse_atom("18-6/4",(0,23)) set([18, 22, 0, 4]) >>> parse_atom("*/9",(0,23)) set([0, 9, 18]) """ parse = parse.strip() increment = 1 if parse == '*': return set(xrange(minmax[0], minmax[1] + 1)) elif parse.isdigit(): # A single number still needs to be returned as a set value = int(parse) if value >= minmax[0] and value <= minmax[1]: return set((value,)) else: raise ValueError("\"%s\" is not within valid range." % parse) elif '-' in parse or '/' in parse: divide = parse.split('/') subrange = divide[0] if len(divide) == 2: # Example: 1-3/5 or */7 increment should be 5 and 7 respectively increment = int(divide[1]) if '-' in subrange: # Example: a-b prefix, suffix = [int(n) for n in subrange.split('-')] if prefix < minmax[0] or suffix > minmax[1]: raise ValueError("\"%s\" is not within valid range." % parse) elif subrange.isdigit(): # Handle offset increments e.g. 5/15 to run at :05, :20, :35, and :50 return set(xrange(int(subrange), minmax[1] + 1, increment)) elif subrange == '*': # Include all values with the given range prefix, suffix = minmax else: raise ValueError("Unrecognized symbol \"%s\"" % subrange) if prefix < suffix: # Example: 7-10 return set(xrange(prefix, suffix + 1, increment)) else: # Example: 12-4/2; (12, 12 + n, ..., 12 + m*n) U (n_0, ..., 4) noskips = list(xrange(prefix, minmax[1] + 1)) noskips += list(xrange(minmax[0], suffix + 1)) return set(noskips[::increment]) else: raise ValueError("Atom \"%s\" not in a recognized format." % parse)
[ "def", "parse_atom", "(", "parse", ",", "minmax", ")", ":", "parse", "=", "parse", ".", "strip", "(", ")", "increment", "=", "1", "if", "parse", "==", "'*'", ":", "return", "set", "(", "xrange", "(", "minmax", "[", "0", "]", ",", "minmax", "[", "...
Returns a set containing valid values for a given cron-style range of numbers. The 'minmax' arguments is a two element iterable containing the inclusive upper and lower limits of the expression. Examples: >>> parse_atom("1-5",(0,6)) set([1, 2, 3, 4, 5]) >>> parse_atom("*/6",(0,23)) set([0, 6, 12, 18]) >>> parse_atom("18-6/4",(0,23)) set([18, 22, 0, 4]) >>> parse_atom("*/9",(0,23)) set([0, 9, 18])
[ "Returns", "a", "set", "containing", "valid", "values", "for", "a", "given", "cron", "-", "style", "range", "of", "numbers", ".", "The", "minmax", "arguments", "is", "a", "two", "element", "iterable", "containing", "the", "inclusive", "upper", "and", "lower"...
python
train
bwhite/hadoopy
hadoopy/thirdparty/pyinstaller/PyInstaller/utils/winresource.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/thirdparty/pyinstaller/PyInstaller/utils/winresource.py#L243-L264
def UpdateResourcesFromDict(dstpath, res, types=None, names=None, languages=None): """ Update or add resources from resource dict in dll/exe file dstpath. types = a list of resource types to update (None = all) names = a list of resource names to update (None = all) languages = a list of resource languages to update (None = all) """ if types: types = set(types) if names: names = set(names) if langauges: languages = set(languages) for type_ in res: if not types or type_ in types: for name in res[type_]: if not names or name in names: for language in res[type_][name]: if not languages or language in languages: UpdateResources(dstpath, res[type_][name][language], [type_], [name], [language])
[ "def", "UpdateResourcesFromDict", "(", "dstpath", ",", "res", ",", "types", "=", "None", ",", "names", "=", "None", ",", "languages", "=", "None", ")", ":", "if", "types", ":", "types", "=", "set", "(", "types", ")", "if", "names", ":", "names", "=",...
Update or add resources from resource dict in dll/exe file dstpath. types = a list of resource types to update (None = all) names = a list of resource names to update (None = all) languages = a list of resource languages to update (None = all)
[ "Update", "or", "add", "resources", "from", "resource", "dict", "in", "dll", "/", "exe", "file", "dstpath", ".", "types", "=", "a", "list", "of", "resource", "types", "to", "update", "(", "None", "=", "all", ")", "names", "=", "a", "list", "of", "res...
python
train
digidotcom/python-wvalib
wva/core.py
https://github.com/digidotcom/python-wvalib/blob/4252735e2775f80ebaffd813fbe84046d26906b3/wva/core.py#L102-L112
def get_event_stream(self): """Get the event stream associated with this WVA Note that this event stream is shared across all users of this WVA device as the WVA only supports a single event stream. :return: a new :class:`WVAEventStream` instance """ if self._event_stream is None: self._event_stream = WVAEventStream(self._http_client) return self._event_stream
[ "def", "get_event_stream", "(", "self", ")", ":", "if", "self", ".", "_event_stream", "is", "None", ":", "self", ".", "_event_stream", "=", "WVAEventStream", "(", "self", ".", "_http_client", ")", "return", "self", ".", "_event_stream" ]
Get the event stream associated with this WVA Note that this event stream is shared across all users of this WVA device as the WVA only supports a single event stream. :return: a new :class:`WVAEventStream` instance
[ "Get", "the", "event", "stream", "associated", "with", "this", "WVA" ]
python
train
vertexproject/synapse
synapse/lib/syntax.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/syntax.py#L614-L630
def formpivotin(self): ''' <- * / <- prop ''' self.ignore(whitespace) self.nextmust('<-') self.ignore(whitespace) if self.nextchar() == '*': self.offs += 1 return s_ast.PivotIn() prop = self.absprop() return s_ast.PivotInFrom(kids=(prop,))
[ "def", "formpivotin", "(", "self", ")", ":", "self", ".", "ignore", "(", "whitespace", ")", "self", ".", "nextmust", "(", "'<-'", ")", "self", ".", "ignore", "(", "whitespace", ")", "if", "self", ".", "nextchar", "(", ")", "==", "'*'", ":", "self", ...
<- * / <- prop
[ "<", "-", "*", "/", "<", "-", "prop" ]
python
train
brainiak/brainiak
brainiak/funcalign/sssrm.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/funcalign/sssrm.py#L299-L383
def _sssrm(self, data_align, data_sup, labels): """Block-Coordinate Descent algorithm for fitting SS-SRM. Parameters ---------- data_align : list of 2D arrays, element i has shape=[voxels_i, n_align] Each element in the list contains the fMRI data for alignment of one subject. There are n_align samples for each subject. data_sup : list of 2D arrays, element i has shape=[voxels_i, samples_i] Each element in the list contains the fMRI data of one subject for the classification task. labels : list of arrays of int, element i has shape=[samples_i] Each element in the list contains the labels for the data samples in data_sup. Returns ------- w : list of array, element i has shape=[voxels_i, features] The orthogonal transforms (mappings) :math:`W_i` for each subject. s : array, shape=[features, samples] The shared response. """ classes = self.classes_.size # Initialization: self.random_state_ = np.random.RandomState(self.rand_seed) random_states = [ np.random.RandomState(self.random_state_.randint(2**32)) for i in range(len(data_align))] # Set Wi's to a random orthogonal voxels by TRs w, _ = srm._init_w_transforms(data_align, self.features, random_states) # Initialize the shared response S s = SSSRM._compute_shared_response(data_align, w) # Initialize theta and bias theta, bias = self._update_classifier(data_sup, labels, w, classes) # calculate and print the objective function if logger.isEnabledFor(logging.INFO): objective = self._objective_function(data_align, data_sup, labels, w, s, theta, bias) logger.info('Objective function %f' % objective) # Main loop: for iteration in range(self.n_iter): logger.info('Iteration %d' % (iteration + 1)) # Update the mappings Wi w = self._update_w(data_align, data_sup, labels, w, s, theta, bias) # Output the objective function if logger.isEnabledFor(logging.INFO): objective = self._objective_function(data_align, data_sup, labels, w, s, theta, bias) logger.info('Objective function after updating Wi %f' % objective) # Update the shared response S s = SSSRM._compute_shared_response(data_align, w) # Output the objective function if logger.isEnabledFor(logging.INFO): objective = self._objective_function(data_align, data_sup, labels, w, s, theta, bias) logger.info('Objective function after updating S %f' % objective) # Update the MLR classifier, theta and bias theta, bias = self._update_classifier(data_sup, labels, w, classes) # Output the objective function if logger.isEnabledFor(logging.INFO): objective = self._objective_function(data_align, data_sup, labels, w, s, theta, bias) logger.info('Objective function after updating MLR %f' % objective) return w, s, theta, bias
[ "def", "_sssrm", "(", "self", ",", "data_align", ",", "data_sup", ",", "labels", ")", ":", "classes", "=", "self", ".", "classes_", ".", "size", "# Initialization:", "self", ".", "random_state_", "=", "np", ".", "random", ".", "RandomState", "(", "self", ...
Block-Coordinate Descent algorithm for fitting SS-SRM. Parameters ---------- data_align : list of 2D arrays, element i has shape=[voxels_i, n_align] Each element in the list contains the fMRI data for alignment of one subject. There are n_align samples for each subject. data_sup : list of 2D arrays, element i has shape=[voxels_i, samples_i] Each element in the list contains the fMRI data of one subject for the classification task. labels : list of arrays of int, element i has shape=[samples_i] Each element in the list contains the labels for the data samples in data_sup. Returns ------- w : list of array, element i has shape=[voxels_i, features] The orthogonal transforms (mappings) :math:`W_i` for each subject. s : array, shape=[features, samples] The shared response.
[ "Block", "-", "Coordinate", "Descent", "algorithm", "for", "fitting", "SS", "-", "SRM", "." ]
python
train
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v10/matrixpilot.py#L10725-L10749
def position_target_global_int_encode(self, time_boot_ms, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate): ''' Reports the current commanded vehicle position, velocity, and acceleration as specified by the autopilot. This should match the commands sent in SET_POSITION_TARGET_GLOBAL_INT if the vehicle is being controlled this way. time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t) coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t) type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t) lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t) lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t) alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float) vx : X velocity in NED frame in meter / s (float) vy : Y velocity in NED frame in meter / s (float) vz : Z velocity in NED frame in meter / s (float) afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) yaw : yaw setpoint in rad (float) yaw_rate : yaw rate setpoint in rad/s (float) ''' return MAVLink_position_target_global_int_message(time_boot_ms, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate)
[ "def", "position_target_global_int_encode", "(", "self", ",", "time_boot_ms", ",", "coordinate_frame", ",", "type_mask", ",", "lat_int", ",", "lon_int", ",", "alt", ",", "vx", ",", "vy", ",", "vz", ",", "afx", ",", "afy", ",", "afz", ",", "yaw", ",", "ya...
Reports the current commanded vehicle position, velocity, and acceleration as specified by the autopilot. This should match the commands sent in SET_POSITION_TARGET_GLOBAL_INT if the vehicle is being controlled this way. time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t) coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t) type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t) lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t) lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t) alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float) vx : X velocity in NED frame in meter / s (float) vy : Y velocity in NED frame in meter / s (float) vz : Z velocity in NED frame in meter / s (float) afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float) yaw : yaw setpoint in rad (float) yaw_rate : yaw rate setpoint in rad/s (float)
[ "Reports", "the", "current", "commanded", "vehicle", "position", "velocity", "and", "acceleration", "as", "specified", "by", "the", "autopilot", ".", "This", "should", "match", "the", "commands", "sent", "in", "SET_POSITION_TARGET_GLOBAL_INT", "if", "the", "vehicle"...
python
train
donovan-duplessis/pwnurl
pwnurl/models/base.py
https://github.com/donovan-duplessis/pwnurl/blob/a13e27694f738228d186ea437b4d15ef5a925a87/pwnurl/models/base.py#L60-L66
def save(self, commit=True): """ Save model to database """ db.session.add(self) if commit: db.session.commit() return self
[ "def", "save", "(", "self", ",", "commit", "=", "True", ")", ":", "db", ".", "session", ".", "add", "(", "self", ")", "if", "commit", ":", "db", ".", "session", ".", "commit", "(", ")", "return", "self" ]
Save model to database
[ "Save", "model", "to", "database" ]
python
train
bwhite/hadoopy
hadoopy/_reporter.py
https://github.com/bwhite/hadoopy/blob/ff39b4e6d4e6efaf1f571cf0f2c0e0d7ab28c2d6/hadoopy/_reporter.py#L27-L40
def counter(group, counter, amount=1, err=None): """Output a counter update that is displayed in the Hadoop web interface Counters are useful for quickly identifying the number of times an error occurred, current progress, or coarse statistics. :param group: Counter group :param counter: Counter name :param amount: Value to add (default 1) :param err: Func that outputs a string, if None then sys.stderr.write is used (default None) """ if not err: err = _err err("reporter:counter:%s,%s,%s\n" % (group, counter, str(amount)))
[ "def", "counter", "(", "group", ",", "counter", ",", "amount", "=", "1", ",", "err", "=", "None", ")", ":", "if", "not", "err", ":", "err", "=", "_err", "err", "(", "\"reporter:counter:%s,%s,%s\\n\"", "%", "(", "group", ",", "counter", ",", "str", "(...
Output a counter update that is displayed in the Hadoop web interface Counters are useful for quickly identifying the number of times an error occurred, current progress, or coarse statistics. :param group: Counter group :param counter: Counter name :param amount: Value to add (default 1) :param err: Func that outputs a string, if None then sys.stderr.write is used (default None)
[ "Output", "a", "counter", "update", "that", "is", "displayed", "in", "the", "Hadoop", "web", "interface" ]
python
train
softlayer/softlayer-python
SoftLayer/managers/block.py
https://github.com/softlayer/softlayer-python/blob/9f181be08cc3668353b05a6de0cb324f52cff6fa/SoftLayer/managers/block.py#L343-L369
def order_block_volume(self, storage_type, location, size, os_type, iops=None, tier_level=None, snapshot_size=None, service_offering='storage_as_a_service', hourly_billing_flag=False): """Places an order for a block volume. :param storage_type: 'performance' or 'endurance' :param location: Datacenter in which to order iSCSI volume :param size: Size of the desired volume, in GB :param os_type: OS Type to use for volume alignment, see help for list :param iops: Number of IOPs for a "Performance" order :param tier_level: Tier level to use for an "Endurance" order :param snapshot_size: The size of optional snapshot space, if snapshot space should also be ordered (None if not ordered) :param service_offering: Requested offering package to use in the order ('storage_as_a_service', 'enterprise', or 'performance') :param hourly_billing_flag: Billing type, monthly (False) or hourly (True), default to monthly. """ order = storage_utils.prepare_volume_order_object( self, storage_type, location, size, iops, tier_level, snapshot_size, service_offering, 'block', hourly_billing_flag ) order['osFormatType'] = {'keyName': os_type} return self.client.call('Product_Order', 'placeOrder', order)
[ "def", "order_block_volume", "(", "self", ",", "storage_type", ",", "location", ",", "size", ",", "os_type", ",", "iops", "=", "None", ",", "tier_level", "=", "None", ",", "snapshot_size", "=", "None", ",", "service_offering", "=", "'storage_as_a_service'", ",...
Places an order for a block volume. :param storage_type: 'performance' or 'endurance' :param location: Datacenter in which to order iSCSI volume :param size: Size of the desired volume, in GB :param os_type: OS Type to use for volume alignment, see help for list :param iops: Number of IOPs for a "Performance" order :param tier_level: Tier level to use for an "Endurance" order :param snapshot_size: The size of optional snapshot space, if snapshot space should also be ordered (None if not ordered) :param service_offering: Requested offering package to use in the order ('storage_as_a_service', 'enterprise', or 'performance') :param hourly_billing_flag: Billing type, monthly (False) or hourly (True), default to monthly.
[ "Places", "an", "order", "for", "a", "block", "volume", "." ]
python
train
aconrad/pycobertura
pycobertura/cobertura.py
https://github.com/aconrad/pycobertura/blob/26e472f1424f5cd499c42232dc5ee12e4042806f/pycobertura/cobertura.py#L420-L429
def file_source_hunks(self, filename): """ Like `CoberturaDiff.file_source`, but returns a list of line hunks of the lines that have changed for the given file `filename`. An empty list means that the file has no lines that have a change in coverage status. """ lines = self.file_source(filename) hunks = hunkify_lines(lines) return hunks
[ "def", "file_source_hunks", "(", "self", ",", "filename", ")", ":", "lines", "=", "self", ".", "file_source", "(", "filename", ")", "hunks", "=", "hunkify_lines", "(", "lines", ")", "return", "hunks" ]
Like `CoberturaDiff.file_source`, but returns a list of line hunks of the lines that have changed for the given file `filename`. An empty list means that the file has no lines that have a change in coverage status.
[ "Like", "CoberturaDiff", ".", "file_source", "but", "returns", "a", "list", "of", "line", "hunks", "of", "the", "lines", "that", "have", "changed", "for", "the", "given", "file", "filename", ".", "An", "empty", "list", "means", "that", "the", "file", "has"...
python
train
google/grr
grr/server/grr_response_server/flows/general/filetypes.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/flows/general/filetypes.py#L59-L64
def Start(self): """Issue a request to list the directory.""" self.CallClient( server_stubs.PlistQuery, request=self.args.request, next_state="Receive")
[ "def", "Start", "(", "self", ")", ":", "self", ".", "CallClient", "(", "server_stubs", ".", "PlistQuery", ",", "request", "=", "self", ".", "args", ".", "request", ",", "next_state", "=", "\"Receive\"", ")" ]
Issue a request to list the directory.
[ "Issue", "a", "request", "to", "list", "the", "directory", "." ]
python
train
Fantomas42/django-blog-zinnia
zinnia/admin/widgets.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/admin/widgets.py#L105-L115
def media(self): """ TagAutoComplete's Media. """ def static(path): return staticfiles_storage.url( 'zinnia/admin/select2/%s' % path) return Media( css={'all': (static('css/select2.css'),)}, js=(static('js/select2.js'),) )
[ "def", "media", "(", "self", ")", ":", "def", "static", "(", "path", ")", ":", "return", "staticfiles_storage", ".", "url", "(", "'zinnia/admin/select2/%s'", "%", "path", ")", "return", "Media", "(", "css", "=", "{", "'all'", ":", "(", "static", "(", "...
TagAutoComplete's Media.
[ "TagAutoComplete", "s", "Media", "." ]
python
train
earwig/mwparserfromhell
mwparserfromhell/nodes/template.py
https://github.com/earwig/mwparserfromhell/blob/98dc30902d35c714a70aca8e6616f49d71cb24cc/mwparserfromhell/nodes/template.py#L80-L90
def _surface_escape(code, char): """Return *code* with *char* escaped as an HTML entity. The main use of this is to escape pipes (``|``) or equal signs (``=``) in parameter names or values so they are not mistaken for new parameters. """ replacement = str(HTMLEntity(value=ord(char))) for node in code.filter_text(recursive=False): if char in node: code.replace(node, node.replace(char, replacement), False)
[ "def", "_surface_escape", "(", "code", ",", "char", ")", ":", "replacement", "=", "str", "(", "HTMLEntity", "(", "value", "=", "ord", "(", "char", ")", ")", ")", "for", "node", "in", "code", ".", "filter_text", "(", "recursive", "=", "False", ")", ":...
Return *code* with *char* escaped as an HTML entity. The main use of this is to escape pipes (``|``) or equal signs (``=``) in parameter names or values so they are not mistaken for new parameters.
[ "Return", "*", "code", "*", "with", "*", "char", "*", "escaped", "as", "an", "HTML", "entity", "." ]
python
train
saltstack/salt
salt/modules/solaris_system.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solaris_system.py#L111-L131
def reboot(delay=0, message=None): ''' Reboot the system delay : int Optional wait time in seconds before the system will be rebooted. message : string Optional message to broadcast before rebooting. CLI Example: .. code-block:: bash salt '*' system.reboot salt '*' system.reboot 60 "=== system upgraded ===" ''' cmd = ['shutdown', '-i', '6', '-g', delay, '-y'] if message: cmd.append(message) ret = __salt__['cmd.run'](cmd, python_shell=False) return ret
[ "def", "reboot", "(", "delay", "=", "0", ",", "message", "=", "None", ")", ":", "cmd", "=", "[", "'shutdown'", ",", "'-i'", ",", "'6'", ",", "'-g'", ",", "delay", ",", "'-y'", "]", "if", "message", ":", "cmd", ".", "append", "(", "message", ")", ...
Reboot the system delay : int Optional wait time in seconds before the system will be rebooted. message : string Optional message to broadcast before rebooting. CLI Example: .. code-block:: bash salt '*' system.reboot salt '*' system.reboot 60 "=== system upgraded ==="
[ "Reboot", "the", "system" ]
python
train
vilmibm/done
parsedatetime/parsedatetime.py
https://github.com/vilmibm/done/blob/7e5b60d2900ceddefa49de352a19b794199b51a8/parsedatetime/parsedatetime.py#L1479-L1540
def inc(self, source, month=None, year=None): """ Takes the given C{source} date, or current date if none is passed, and increments it according to the values passed in by month and/or year. This routine is needed because Python's C{timedelta()} function does not allow for month or year increments. @type source: struct_time @param source: C{struct_time} value to increment @type month: integer @param month: optional number of months to increment @type year: integer @param year: optional number of years to increment @rtype: datetime @return: C{source} incremented by the number of months and/or years """ yr = source.year mth = source.month dy = source.day if year: try: yi = int(year) except ValueError: yi = 0 yr += yi if month: try: mi = int(month) except ValueError: mi = 0 m = abs(mi) y = m / 12 # how many years are in month increment m = m % 12 # get remaining months if mi < 0: mth = mth - m # sub months from start month if mth < 1: # cross start-of-year? y -= 1 # yes - decrement year mth += 12 # and fix month else: mth = mth + m # add months to start month if mth > 12: # cross end-of-year? y += 1 # yes - increment year mth -= 12 # and fix month yr += y # if the day ends up past the last day of # the new month, set it to the last day if dy > self.ptc.daysInMonth(mth, yr): dy = self.ptc.daysInMonth(mth, yr) d = source.replace(year=yr, month=mth, day=dy) return source + (d - source)
[ "def", "inc", "(", "self", ",", "source", ",", "month", "=", "None", ",", "year", "=", "None", ")", ":", "yr", "=", "source", ".", "year", "mth", "=", "source", ".", "month", "dy", "=", "source", ".", "day", "if", "year", ":", "try", ":", "yi",...
Takes the given C{source} date, or current date if none is passed, and increments it according to the values passed in by month and/or year. This routine is needed because Python's C{timedelta()} function does not allow for month or year increments. @type source: struct_time @param source: C{struct_time} value to increment @type month: integer @param month: optional number of months to increment @type year: integer @param year: optional number of years to increment @rtype: datetime @return: C{source} incremented by the number of months and/or years
[ "Takes", "the", "given", "C", "{", "source", "}", "date", "or", "current", "date", "if", "none", "is", "passed", "and", "increments", "it", "according", "to", "the", "values", "passed", "in", "by", "month", "and", "/", "or", "year", ".", "This", "routi...
python
train
cloudboss/friend
friend/strings.py
https://github.com/cloudboss/friend/blob/3357e6ec849552e3ae9ed28017ff0926e4006e4e/friend/strings.py#L394-L434
def format_obj_keys(obj, formatter): """ Take a dictionary with string keys and recursively convert all keys from one form to another using the formatting function. The dictionary may contain lists as values, and any nested dictionaries within those lists will also be converted. :param object obj: The object to convert :param function formatter: The formatting function for keys, which takes and returns a string :returns: A new object with keys converted :rtype: object :Example: :: >>> obj = { ... 'dict-list': [ ... {'one-key': 123, 'two-key': 456}, ... {'threeKey': 789, 'four-key': 456}, ... ], ... 'some-other-key': 'some-unconverted-value' ... } >>> format_obj_keys(obj, lambda s: s.upper()) { 'DICT-LIST': [ {'ONE-KEY': 123, 'TWO-KEY': 456}, {'FOUR-KEY': 456, 'THREE-KEY': 789} ], 'SOME-OTHER-KEY': 'some-unconverted-value' } """ if type(obj) == list: return [format_obj_keys(o, formatter) for o in obj] elif type(obj) == dict: return {formatter(k): format_obj_keys(v, formatter) for k, v in obj.items()} else: return obj
[ "def", "format_obj_keys", "(", "obj", ",", "formatter", ")", ":", "if", "type", "(", "obj", ")", "==", "list", ":", "return", "[", "format_obj_keys", "(", "o", ",", "formatter", ")", "for", "o", "in", "obj", "]", "elif", "type", "(", "obj", ")", "=...
Take a dictionary with string keys and recursively convert all keys from one form to another using the formatting function. The dictionary may contain lists as values, and any nested dictionaries within those lists will also be converted. :param object obj: The object to convert :param function formatter: The formatting function for keys, which takes and returns a string :returns: A new object with keys converted :rtype: object :Example: :: >>> obj = { ... 'dict-list': [ ... {'one-key': 123, 'two-key': 456}, ... {'threeKey': 789, 'four-key': 456}, ... ], ... 'some-other-key': 'some-unconverted-value' ... } >>> format_obj_keys(obj, lambda s: s.upper()) { 'DICT-LIST': [ {'ONE-KEY': 123, 'TWO-KEY': 456}, {'FOUR-KEY': 456, 'THREE-KEY': 789} ], 'SOME-OTHER-KEY': 'some-unconverted-value' }
[ "Take", "a", "dictionary", "with", "string", "keys", "and", "recursively", "convert", "all", "keys", "from", "one", "form", "to", "another", "using", "the", "formatting", "function", "." ]
python
train
Microsoft/nni
src/sdk/pynni/nni/hyperband_advisor/hyperband_advisor.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/src/sdk/pynni/nni/hyperband_advisor/hyperband_advisor.py#L356-L365
def handle_update_search_space(self, data): """data: JSON object, which is search space Parameters ---------- data: int number of trial jobs """ self.searchspace_json = data self.random_state = np.random.RandomState()
[ "def", "handle_update_search_space", "(", "self", ",", "data", ")", ":", "self", ".", "searchspace_json", "=", "data", "self", ".", "random_state", "=", "np", ".", "random", ".", "RandomState", "(", ")" ]
data: JSON object, which is search space Parameters ---------- data: int number of trial jobs
[ "data", ":", "JSON", "object", "which", "is", "search", "space", "Parameters", "----------", "data", ":", "int", "number", "of", "trial", "jobs" ]
python
train
openfisca/openfisca-core
openfisca_core/indexed_enums.py
https://github.com/openfisca/openfisca-core/blob/92ce9396e29ae5d9bac5ea604cfce88517c6b35c/openfisca_core/indexed_enums.py#L118-L128
def decode_to_str(self): """ Return the array of string identifiers corresponding to self >>> enum_array = household('housing_occupancy_status', period) >>> enum_array[0] >>> 2 # Encoded value >>> enum_array.decode_to_str()[0] >>> 'free_lodger' # String identifier """ return np.select([self == item.index for item in self.possible_values], [item.name for item in self.possible_values])
[ "def", "decode_to_str", "(", "self", ")", ":", "return", "np", ".", "select", "(", "[", "self", "==", "item", ".", "index", "for", "item", "in", "self", ".", "possible_values", "]", ",", "[", "item", ".", "name", "for", "item", "in", "self", ".", "...
Return the array of string identifiers corresponding to self >>> enum_array = household('housing_occupancy_status', period) >>> enum_array[0] >>> 2 # Encoded value >>> enum_array.decode_to_str()[0] >>> 'free_lodger' # String identifier
[ "Return", "the", "array", "of", "string", "identifiers", "corresponding", "to", "self" ]
python
train
dw/mitogen
ansible_mitogen/connection.py
https://github.com/dw/mitogen/blob/a7fdb55e1300a7e0a5e404b09eb730cf9a525da7/ansible_mitogen/connection.py#L240-L256
def _connect_setns(spec, kind=None): """ Return ContextService arguments for a mitogen_setns connection. """ return { 'method': 'setns', 'kwargs': { 'container': spec.remote_addr(), 'username': spec.remote_user(), 'python_path': spec.python_path(), 'kind': kind or spec.mitogen_kind(), 'docker_path': spec.mitogen_docker_path(), 'lxc_path': spec.mitogen_lxc_path(), 'lxc_info_path': spec.mitogen_lxc_info_path(), 'machinectl_path': spec.mitogen_machinectl_path(), } }
[ "def", "_connect_setns", "(", "spec", ",", "kind", "=", "None", ")", ":", "return", "{", "'method'", ":", "'setns'", ",", "'kwargs'", ":", "{", "'container'", ":", "spec", ".", "remote_addr", "(", ")", ",", "'username'", ":", "spec", ".", "remote_user", ...
Return ContextService arguments for a mitogen_setns connection.
[ "Return", "ContextService", "arguments", "for", "a", "mitogen_setns", "connection", "." ]
python
train
RJT1990/pyflux
pyflux/gpnarx/kernels.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/gpnarx/kernels.py#L79-L91
def K(self, parm): """ Returns the Gram Matrix Parameters ---------- parm : np.ndarray Parameters for the Gram Matrix Returns ---------- - Gram Matrix (np.ndarray) """ return OU_K_matrix(self.X, parm) + np.identity(self.X.shape[0])*(10**-10)
[ "def", "K", "(", "self", ",", "parm", ")", ":", "return", "OU_K_matrix", "(", "self", ".", "X", ",", "parm", ")", "+", "np", ".", "identity", "(", "self", ".", "X", ".", "shape", "[", "0", "]", ")", "*", "(", "10", "**", "-", "10", ")" ]
Returns the Gram Matrix Parameters ---------- parm : np.ndarray Parameters for the Gram Matrix Returns ---------- - Gram Matrix (np.ndarray)
[ "Returns", "the", "Gram", "Matrix" ]
python
train
pypa/setuptools
setuptools/msvc.py
https://github.com/pypa/setuptools/blob/83c667e0b2a98193851c07115d1af65011ed0fb6/setuptools/msvc.py#L204-L237
def _augment_exception(exc, version, arch=''): """ Add details to the exception message to help guide the user as to what action will resolve it. """ # Error if MSVC++ directory not found or environment not set message = exc.args[0] if "vcvarsall" in message.lower() or "visual c" in message.lower(): # Special error message if MSVC++ not installed tmpl = 'Microsoft Visual C++ {version:0.1f} is required.' message = tmpl.format(**locals()) msdownload = 'www.microsoft.com/download/details.aspx?id=%d' if version == 9.0: if arch.lower().find('ia64') > -1: # For VC++ 9.0, if IA64 support is needed, redirect user # to Windows SDK 7.0 message += ' Get it with "Microsoft Windows SDK 7.0": ' message += msdownload % 3138 else: # For VC++ 9.0 redirect user to Vc++ for Python 2.7 : # This redirection link is maintained by Microsoft. # Contact vspython@microsoft.com if it needs updating. message += ' Get it from http://aka.ms/vcpython27' elif version == 10.0: # For VC++ 10.0 Redirect user to Windows SDK 7.1 message += ' Get it with "Microsoft Windows SDK 7.1": ' message += msdownload % 8279 elif version >= 14.0: # For VC++ 14.0 Redirect user to Visual C++ Build Tools message += (' Get it with "Microsoft Visual C++ Build Tools": ' r'https://visualstudio.microsoft.com/downloads/') exc.args = (message, )
[ "def", "_augment_exception", "(", "exc", ",", "version", ",", "arch", "=", "''", ")", ":", "# Error if MSVC++ directory not found or environment not set", "message", "=", "exc", ".", "args", "[", "0", "]", "if", "\"vcvarsall\"", "in", "message", ".", "lower", "(...
Add details to the exception message to help guide the user as to what action will resolve it.
[ "Add", "details", "to", "the", "exception", "message", "to", "help", "guide", "the", "user", "as", "to", "what", "action", "will", "resolve", "it", "." ]
python
train
datastax/python-driver
cassandra/cqlengine/columns.py
https://github.com/datastax/python-driver/blob/30a80d0b798b1f45f8cb77163b1fa791f3e3ca29/cassandra/cqlengine/columns.py#L44-L63
def changed(self): """ Indicates whether or not this value has changed. :rtype: boolean """ if self.explicit: return self.value != self.previous_value if isinstance(self.column, BaseContainerColumn): default_value = self.column.get_default() if self.column._val_is_null(default_value): return not self.column._val_is_null(self.value) and self.value != self.previous_value elif self.previous_value is None: return self.value != default_value return self.value != self.previous_value return False
[ "def", "changed", "(", "self", ")", ":", "if", "self", ".", "explicit", ":", "return", "self", ".", "value", "!=", "self", ".", "previous_value", "if", "isinstance", "(", "self", ".", "column", ",", "BaseContainerColumn", ")", ":", "default_value", "=", ...
Indicates whether or not this value has changed. :rtype: boolean
[ "Indicates", "whether", "or", "not", "this", "value", "has", "changed", "." ]
python
train
lcgong/redbean
redbean/secure/secure.py
https://github.com/lcgong/redbean/blob/45df9ff1e807e742771c752808d7fdac4007c919/redbean/secure/secure.py#L54-L71
async def identify(self, request): """ 从request中得到登录身份identity """ if hasattr(request, '_session_identity'): return request._session_identity token = request.cookies.get(self._cookie_name) if token is None: token = getAuthorizationTokenFromHeader(request) if token is None: raise Unauthorized('无认证身份') identity = await self.decode_jwt(token) setattr(request, '_session_identity', identity) # if identity.client_id.startsWith('spa|'): # checkCRSFToken(request) return identity
[ "async", "def", "identify", "(", "self", ",", "request", ")", ":", "if", "hasattr", "(", "request", ",", "'_session_identity'", ")", ":", "return", "request", ".", "_session_identity", "token", "=", "request", ".", "cookies", ".", "get", "(", "self", ".", ...
从request中得到登录身份identity
[ "从request中得到登录身份identity" ]
python
train
mwouts/jupytext
jupytext/jupytext.py
https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/jupytext.py#L181-L209
def reads(text, fmt, as_version=4, **kwargs): """Read a notebook from a string""" fmt = copy(fmt) fmt = long_form_one_format(fmt) ext = fmt['extension'] if ext == '.ipynb': return nbformat.reads(text, as_version, **kwargs) format_name = read_format_from_metadata(text, ext) or fmt.get('format_name') if format_name: format_options = {} else: format_name, format_options = guess_format(text, ext) if format_name: fmt['format_name'] = format_name fmt.update(format_options) reader = TextNotebookConverter(fmt) notebook = reader.reads(text, **kwargs) rearrange_jupytext_metadata(notebook.metadata) if format_name and insert_or_test_version_number(): notebook.metadata.setdefault('jupytext', {}).setdefault('text_representation', {}).update( {'extension': ext, 'format_name': format_name}) return notebook
[ "def", "reads", "(", "text", ",", "fmt", ",", "as_version", "=", "4", ",", "*", "*", "kwargs", ")", ":", "fmt", "=", "copy", "(", "fmt", ")", "fmt", "=", "long_form_one_format", "(", "fmt", ")", "ext", "=", "fmt", "[", "'extension'", "]", "if", "...
Read a notebook from a string
[ "Read", "a", "notebook", "from", "a", "string" ]
python
train
polyaxon/polyaxon
polyaxon/docker_images/image_info.py
https://github.com/polyaxon/polyaxon/blob/e1724f0756b1a42f9e7aa08a976584a84ef7f016/polyaxon/docker_images/image_info.py#L22-L32
def get_job_image_info(project: 'Project', job: Any) -> Tuple[str, str]: """Return the image name and image tag for a job""" project_name = project.name repo_name = project_name image_name = '{}/{}'.format(conf.get('REGISTRY_URI'), repo_name) try: last_commit = project.repo.last_commit except ValueError: raise ValueError('Repo was not found for project `{}`.'.format(project)) return image_name, last_commit[0]
[ "def", "get_job_image_info", "(", "project", ":", "'Project'", ",", "job", ":", "Any", ")", "->", "Tuple", "[", "str", ",", "str", "]", ":", "project_name", "=", "project", ".", "name", "repo_name", "=", "project_name", "image_name", "=", "'{}/{}'", ".", ...
Return the image name and image tag for a job
[ "Return", "the", "image", "name", "and", "image", "tag", "for", "a", "job" ]
python
train
Metatab/metatab
metatab/terms.py
https://github.com/Metatab/metatab/blob/8336ec3e4bd8da84a9a5cb86de1c1086e14b8b22/metatab/terms.py#L921-L937
def rows(self): """Yield rows for the section""" for t in self.terms: for row in t.rows: term, value = row # Value can either be a string, or a dict if isinstance(value, dict): # Dict is for properties, which might be arg-children term, args, remain = self._args(term, value) yield term, args # 'remain' is all of the children that didn't have an arg-child column -- the # section didn't have a column heder for that ther. for k, v in remain.items(): yield term.split('.')[-1] + '.' + k, v else: yield row
[ "def", "rows", "(", "self", ")", ":", "for", "t", "in", "self", ".", "terms", ":", "for", "row", "in", "t", ".", "rows", ":", "term", ",", "value", "=", "row", "# Value can either be a string, or a dict", "if", "isinstance", "(", "value", ",", "dict", ...
Yield rows for the section
[ "Yield", "rows", "for", "the", "section" ]
python
train
chrisrink10/basilisp
src/basilisp/lang/compiler/generator.py
https://github.com/chrisrink10/basilisp/blob/3d82670ee218ec64eb066289c82766d14d18cc92/src/basilisp/lang/compiler/generator.py#L1823-L1878
def _var_sym_to_py_ast( ctx: GeneratorContext, node: VarRef, is_assigning: bool = False ) -> GeneratedPyAST: """Generate a Python AST node for accessing a Var. If the Var is marked as :dynamic or :redef or the compiler option USE_VAR_INDIRECTION is active, do not compile to a direct access. If the corresponding function name is not defined in a Python module, no direct variable access is possible and Var.find indirection must be used.""" assert node.op == NodeOp.VAR var = node.var ns = var.ns ns_name = ns.name ns_module = ns.module safe_ns = munge(ns_name) var_name = var.name.name py_var_ctx = ast.Store() if is_assigning else ast.Load() # Return the actual var, rather than its value if requested if node.return_var: return GeneratedPyAST( node=ast.Call( func=_FIND_VAR_FN_NAME, args=[ ast.Call( func=_NEW_SYM_FN_NAME, args=[ast.Str(var_name)], keywords=[ast.keyword(arg="ns", value=ast.Str(ns_name))], ) ], keywords=[], ) ) # Check if we should use Var indirection if ctx.use_var_indirection or _is_dynamic(var) or _is_redefable(var): return __var_find_to_py_ast(var_name, ns_name, py_var_ctx) # Otherwise, try to direct-link it like a Python variable # Try without allowing builtins first safe_name = munge(var_name) if safe_name not in ns_module.__dict__: # Try allowing builtins safe_name = munge(var_name, allow_builtins=True) if safe_name in ns_module.__dict__: if ns is ctx.current_ns: return GeneratedPyAST(node=ast.Name(id=safe_name, ctx=py_var_ctx)) return GeneratedPyAST(node=_load_attr(f"{safe_ns}.{safe_name}", ctx=py_var_ctx)) if ctx.warn_on_var_indirection: logger.warning(f"could not resolve a direct link to Var '{var_name}'") return __var_find_to_py_ast(var_name, ns_name, py_var_ctx)
[ "def", "_var_sym_to_py_ast", "(", "ctx", ":", "GeneratorContext", ",", "node", ":", "VarRef", ",", "is_assigning", ":", "bool", "=", "False", ")", "->", "GeneratedPyAST", ":", "assert", "node", ".", "op", "==", "NodeOp", ".", "VAR", "var", "=", "node", "...
Generate a Python AST node for accessing a Var. If the Var is marked as :dynamic or :redef or the compiler option USE_VAR_INDIRECTION is active, do not compile to a direct access. If the corresponding function name is not defined in a Python module, no direct variable access is possible and Var.find indirection must be used.
[ "Generate", "a", "Python", "AST", "node", "for", "accessing", "a", "Var", "." ]
python
test
hardbyte/python-can
can/notifier.py
https://github.com/hardbyte/python-can/blob/cdc5254d96072df7739263623f3e920628a7d214/can/notifier.py#L70-L90
def stop(self, timeout=5): """Stop notifying Listeners when new :class:`~can.Message` objects arrive and call :meth:`~can.Listener.stop` on each Listener. :param float timeout: Max time in seconds to wait for receive threads to finish. Should be longer than timeout given at instantiation. """ self._running = False end_time = time.time() + timeout for reader in self._readers: if isinstance(reader, threading.Thread): now = time.time() if now < end_time: reader.join(end_time - now) else: # reader is a file descriptor self._loop.remove_reader(reader) for listener in self.listeners: if hasattr(listener, 'stop'): listener.stop()
[ "def", "stop", "(", "self", ",", "timeout", "=", "5", ")", ":", "self", ".", "_running", "=", "False", "end_time", "=", "time", ".", "time", "(", ")", "+", "timeout", "for", "reader", "in", "self", ".", "_readers", ":", "if", "isinstance", "(", "re...
Stop notifying Listeners when new :class:`~can.Message` objects arrive and call :meth:`~can.Listener.stop` on each Listener. :param float timeout: Max time in seconds to wait for receive threads to finish. Should be longer than timeout given at instantiation.
[ "Stop", "notifying", "Listeners", "when", "new", ":", "class", ":", "~can", ".", "Message", "objects", "arrive", "and", "call", ":", "meth", ":", "~can", ".", "Listener", ".", "stop", "on", "each", "Listener", "." ]
python
train
calmjs/calmjs
src/calmjs/base.py
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/base.py#L508-L518
def which(self): """ Figure out which binary this will execute. Returns None if the binary is not found. """ if self.binary is None: return None return which(self.binary, path=self.env_path)
[ "def", "which", "(", "self", ")", ":", "if", "self", ".", "binary", "is", "None", ":", "return", "None", "return", "which", "(", "self", ".", "binary", ",", "path", "=", "self", ".", "env_path", ")" ]
Figure out which binary this will execute. Returns None if the binary is not found.
[ "Figure", "out", "which", "binary", "this", "will", "execute", "." ]
python
train
edoburu/sphinxcontrib-django
sphinxcontrib_django/docstrings.py
https://github.com/edoburu/sphinxcontrib-django/blob/5116ac7f1510a76b1ff58cf7f8d2fab7d8bbe2a9/sphinxcontrib_django/docstrings.py#L43-L59
def setup(app): """Allow this package to be used as Sphinx extension. This is also called from the top-level ``__init__.py``. :type app: sphinx.application.Sphinx """ from .patches import patch_django_for_autodoc # When running, make sure Django doesn't execute querysets patch_django_for_autodoc() # Generate docstrings for Django model fields # Register the docstring processor with sphinx app.connect('autodoc-process-docstring', improve_model_docstring) # influence skip rules app.connect("autodoc-skip-member", autodoc_skip)
[ "def", "setup", "(", "app", ")", ":", "from", ".", "patches", "import", "patch_django_for_autodoc", "# When running, make sure Django doesn't execute querysets", "patch_django_for_autodoc", "(", ")", "# Generate docstrings for Django model fields", "# Register the docstring processor...
Allow this package to be used as Sphinx extension. This is also called from the top-level ``__init__.py``. :type app: sphinx.application.Sphinx
[ "Allow", "this", "package", "to", "be", "used", "as", "Sphinx", "extension", ".", "This", "is", "also", "called", "from", "the", "top", "-", "level", "__init__", ".", "py", "." ]
python
train
neptune-ml/steppy-toolkit
toolkit/preprocessing/misc.py
https://github.com/neptune-ml/steppy-toolkit/blob/bf3f48cfcc65dffc46e65ddd5d6cfec6bb9f9132/toolkit/preprocessing/misc.py#L287-L309
def transform(self, numerical_feature_list, categorical_feature_list): """ Args: numerical_feature_list: list of numerical features categorical_feature_list: list of categorical features Returns: Dictionary with following keys: features: DataFrame with concatenated features feature_names: list of features names categorical_features: list of categorical feature names """ features = numerical_feature_list + categorical_feature_list for feature in features: feature = self._format_target(feature) feature.set_index(self.id_column, drop=True, inplace=True) features = pd.concat(features, axis=1).astype(np.float32).reset_index() outputs = dict() outputs['features'] = features outputs['feature_names'] = list(features.columns) outputs['categorical_features'] = self._get_feature_names(categorical_feature_list) return outputs
[ "def", "transform", "(", "self", ",", "numerical_feature_list", ",", "categorical_feature_list", ")", ":", "features", "=", "numerical_feature_list", "+", "categorical_feature_list", "for", "feature", "in", "features", ":", "feature", "=", "self", ".", "_format_target...
Args: numerical_feature_list: list of numerical features categorical_feature_list: list of categorical features Returns: Dictionary with following keys: features: DataFrame with concatenated features feature_names: list of features names categorical_features: list of categorical feature names
[ "Args", ":", "numerical_feature_list", ":", "list", "of", "numerical", "features", "categorical_feature_list", ":", "list", "of", "categorical", "features" ]
python
train