repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
osrg/ryu
ryu/services/protocols/bgp/peer.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/services/protocols/bgp/peer.py#L1256-L1312
def _connect_loop(self, client_factory): """In the current greenlet we try to establish connection with peer. This greenlet will spin another greenlet to handle incoming data from the peer once connection is established. """ # If current configuration allow, enable active session establishment. if self._neigh_conf.enabled: self._connect_retry_event.set() while True: self._connect_retry_event.wait() # Reconnecting immediately after closing connection may be not very # well seen by some peers (ALU?) self.pause(1.0) if self.state.bgp_state in \ (const.BGP_FSM_IDLE, const.BGP_FSM_ACTIVE): # Check if we have to stop or retry self.state.bgp_state = const.BGP_FSM_CONNECT # If we have specific host interface to bind to, we will do so # else we will bind to system default. if self._neigh_conf.host_bind_ip and \ self._neigh_conf.host_bind_port: bind_addr = (self._neigh_conf.host_bind_ip, self._neigh_conf.host_bind_port) else: bind_addr = None peer_address = (self._neigh_conf.ip_address, self._neigh_conf.port) if bind_addr: LOG.debug('%s trying to connect from' '%s to %s', self, bind_addr, peer_address) else: LOG.debug('%s trying to connect to %s', self, peer_address) tcp_conn_timeout = self._common_conf.tcp_conn_timeout try: password = self._neigh_conf.password self._connect_tcp(peer_address, client_factory, time_out=tcp_conn_timeout, bind_address=bind_addr, password=password) except socket.error: self.state.bgp_state = const.BGP_FSM_ACTIVE if LOG.isEnabledFor(logging.DEBUG): LOG.debug('Socket could not be created in time' ' (%s secs), reason %s', tcp_conn_timeout, traceback.format_exc()) LOG.info('Will try to reconnect to %s after %s secs: %s', self._neigh_conf.ip_address, self._common_conf.bgp_conn_retry_time, self._connect_retry_event.is_set()) self.pause(self._common_conf.bgp_conn_retry_time)
[ "def", "_connect_loop", "(", "self", ",", "client_factory", ")", ":", "# If current configuration allow, enable active session establishment.", "if", "self", ".", "_neigh_conf", ".", "enabled", ":", "self", ".", "_connect_retry_event", ".", "set", "(", ")", "while", "...
In the current greenlet we try to establish connection with peer. This greenlet will spin another greenlet to handle incoming data from the peer once connection is established.
[ "In", "the", "current", "greenlet", "we", "try", "to", "establish", "connection", "with", "peer", "." ]
python
train
zetaops/zengine
zengine/management_commands.py
https://github.com/zetaops/zengine/blob/b5bc32d3b37bca799f8985be916f04528ac79e4a/zengine/management_commands.py#L619-L639
def check_mq_connection(self): """ RabbitMQ checks the connection It displays on the screen whether or not you have a connection. """ import pika from zengine.client_queue import BLOCKING_MQ_PARAMS from pika.exceptions import ProbableAuthenticationError, ConnectionClosed try: connection = pika.BlockingConnection(BLOCKING_MQ_PARAMS) channel = connection.channel() if channel.is_open: print(__(u"{0}RabbitMQ is working{1}").format(CheckList.OKGREEN, CheckList.ENDC)) elif self.channel.is_closed or self.channel.is_closing: print(__(u"{0}RabbitMQ is not working!{1}").format(CheckList.FAIL, CheckList.ENDC)) except ConnectionClosed as e: print(__(u"{0}RabbitMQ is not working!{1}").format(CheckList.FAIL, CheckList.ENDC), e) except ProbableAuthenticationError as e: print(__(u"{0}RabbitMQ username and password wrong{1}").format(CheckList.FAIL, CheckList.ENDC))
[ "def", "check_mq_connection", "(", "self", ")", ":", "import", "pika", "from", "zengine", ".", "client_queue", "import", "BLOCKING_MQ_PARAMS", "from", "pika", ".", "exceptions", "import", "ProbableAuthenticationError", ",", "ConnectionClosed", "try", ":", "connection"...
RabbitMQ checks the connection It displays on the screen whether or not you have a connection.
[ "RabbitMQ", "checks", "the", "connection", "It", "displays", "on", "the", "screen", "whether", "or", "not", "you", "have", "a", "connection", "." ]
python
train
tadashi-aikawa/owlmixin
owlmixin/__init__.py
https://github.com/tadashi-aikawa/owlmixin/blob/7c4a042c3008abddc56a8e8e55ae930d276071f5/owlmixin/__init__.py#L455-L480
def from_json_to_list(cls, data: str, force_snake_case=True, force_cast: bool=False, restrict: bool=False) -> TList[T]: """From json string to list of instance :param data: Json string :param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True :param force_cast: Cast forcibly if True :param restrict: Prohibit extra parameters if True :return: List of instance Usage: >>> from owlmixin.samples import Human >>> humans: TList[Human] = Human.from_json_to_list('''[ ... {"id": 1, "name": "Tom", "favorites": [{"name": "Apple"}]}, ... {"id": 2, "name": "John", "favorites": [{"name": "Orange"}]} ... ]''') >>> humans[0].name 'Tom' >>> humans[1].name 'John' """ return cls.from_dicts(util.load_json(data), force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict)
[ "def", "from_json_to_list", "(", "cls", ",", "data", ":", "str", ",", "force_snake_case", "=", "True", ",", "force_cast", ":", "bool", "=", "False", ",", "restrict", ":", "bool", "=", "False", ")", "->", "TList", "[", "T", "]", ":", "return", "cls", ...
From json string to list of instance :param data: Json string :param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True :param force_cast: Cast forcibly if True :param restrict: Prohibit extra parameters if True :return: List of instance Usage: >>> from owlmixin.samples import Human >>> humans: TList[Human] = Human.from_json_to_list('''[ ... {"id": 1, "name": "Tom", "favorites": [{"name": "Apple"}]}, ... {"id": 2, "name": "John", "favorites": [{"name": "Orange"}]} ... ]''') >>> humans[0].name 'Tom' >>> humans[1].name 'John'
[ "From", "json", "string", "to", "list", "of", "instance" ]
python
train
OpenHydrology/floodestimation
floodestimation/loaders.py
https://github.com/OpenHydrology/floodestimation/blob/782da7c5abd1348923129efe89fb70003ebb088c/floodestimation/loaders.py#L151-L176
def userdata_to_db(session, method='update', autocommit=False): """ Add catchments from a user folder to the database. The user folder is specified in the ``config.ini`` file like this:: [import] folder = path/to/import/folder If this configuration key does not exist this will be silently ignored. :param session: database session to use, typically `floodestimation.db.Session()` :type session: :class:`sqlalchemy.orm.session.Session` :param method: - ``create``: only new catchments will be loaded, it must not already exist in the database. - ``update``: any existing catchment in the database will be updated. Otherwise it will be created. :type method: str :param autocommit: Whether to commit the database session immediately. Default: ``False``. :type autocommit: bool """ try: folder = config['import']['folder'] except KeyError: return if folder: folder_to_db(folder, session, method=method, autocommit=autocommit)
[ "def", "userdata_to_db", "(", "session", ",", "method", "=", "'update'", ",", "autocommit", "=", "False", ")", ":", "try", ":", "folder", "=", "config", "[", "'import'", "]", "[", "'folder'", "]", "except", "KeyError", ":", "return", "if", "folder", ":",...
Add catchments from a user folder to the database. The user folder is specified in the ``config.ini`` file like this:: [import] folder = path/to/import/folder If this configuration key does not exist this will be silently ignored. :param session: database session to use, typically `floodestimation.db.Session()` :type session: :class:`sqlalchemy.orm.session.Session` :param method: - ``create``: only new catchments will be loaded, it must not already exist in the database. - ``update``: any existing catchment in the database will be updated. Otherwise it will be created. :type method: str :param autocommit: Whether to commit the database session immediately. Default: ``False``. :type autocommit: bool
[ "Add", "catchments", "from", "a", "user", "folder", "to", "the", "database", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_threshold_monitor.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_threshold_monitor.py#L610-L627
def threshold_monitor_hidden_threshold_monitor_interface_policy_area_area_value(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor") threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor") interface = ET.SubElement(threshold_monitor, "interface") policy = ET.SubElement(interface, "policy") policy_name_key = ET.SubElement(policy, "policy_name") policy_name_key.text = kwargs.pop('policy_name') area = ET.SubElement(policy, "area") type_key = ET.SubElement(area, "type") type_key.text = kwargs.pop('type') area_value = ET.SubElement(area, "area_value") area_value.text = kwargs.pop('area_value') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "threshold_monitor_hidden_threshold_monitor_interface_policy_area_area_value", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "threshold_monitor_hidden", "=", "ET", ".", "SubElement", "(", "config", ...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
atlassian-api/atlassian-python-api
atlassian/bitbucket.py
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/atlassian/bitbucket.py#L81-L92
def project_users_with_administrator_permissions(self, key): """ Get project administrators for project :param key: project key :return: project administrators """ project_administrators = [user['user'] for user in self.project_users(key) if user['permission'] == 'PROJECT_ADMIN'] for group in self.project_groups_with_administrator_permissions(key): for user in self.group_members(group): project_administrators.append(user) return project_administrators
[ "def", "project_users_with_administrator_permissions", "(", "self", ",", "key", ")", ":", "project_administrators", "=", "[", "user", "[", "'user'", "]", "for", "user", "in", "self", ".", "project_users", "(", "key", ")", "if", "user", "[", "'permission'", "]"...
Get project administrators for project :param key: project key :return: project administrators
[ "Get", "project", "administrators", "for", "project", ":", "param", "key", ":", "project", "key", ":", "return", ":", "project", "administrators" ]
python
train
NoneGG/aredis
aredis/lock.py
https://github.com/NoneGG/aredis/blob/204caad740ac13e5760d46444a2ba7632982a046/aredis/lock.py#L132-L138
async def release(self): "Releases the already acquired lock" expected_token = self.local.token if expected_token is None: raise LockError("Cannot release an unlocked lock") self.local.token = None await self.do_release(expected_token)
[ "async", "def", "release", "(", "self", ")", ":", "expected_token", "=", "self", ".", "local", ".", "token", "if", "expected_token", "is", "None", ":", "raise", "LockError", "(", "\"Cannot release an unlocked lock\"", ")", "self", ".", "local", ".", "token", ...
Releases the already acquired lock
[ "Releases", "the", "already", "acquired", "lock" ]
python
train
nion-software/nionswift
nion/swift/model/Utility.py
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/Utility.py#L182-L216
def clean_item_no_list(i): """ Return a json-clean item or None. Will log info message for failure. """ itype = type(i) if itype == dict: return clean_dict(i, clean_item_no_list) elif itype == list: return clean_tuple(i, clean_item_no_list) elif itype == tuple: return clean_tuple(i, clean_item_no_list) elif itype == numpy.float32: return float(i) elif itype == numpy.float64: return float(i) elif itype == numpy.int16: return int(i) elif itype == numpy.uint16: return int(i) elif itype == numpy.int32: return int(i) elif itype == numpy.uint32: return int(i) elif itype == float: return i elif itype == str: return i elif itype == int: return i elif itype == bool: return i elif itype == type(None): return i logging.info("[2] Unable to handle type %s", itype) return None
[ "def", "clean_item_no_list", "(", "i", ")", ":", "itype", "=", "type", "(", "i", ")", "if", "itype", "==", "dict", ":", "return", "clean_dict", "(", "i", ",", "clean_item_no_list", ")", "elif", "itype", "==", "list", ":", "return", "clean_tuple", "(", ...
Return a json-clean item or None. Will log info message for failure.
[ "Return", "a", "json", "-", "clean", "item", "or", "None", ".", "Will", "log", "info", "message", "for", "failure", "." ]
python
train
chainer/chainerui
chainerui/tasks/collect_results.py
https://github.com/chainer/chainerui/blob/87ad25e875bc332bfdad20197fd3d0cb81a078e8/chainerui/tasks/collect_results.py#L8-L18
def _list_result_paths(target_path, log_file_name='log'): """list_result_paths.""" result_list = [] for root, _dirs, _files in os.walk(os.path.abspath(target_path)): for name in _files: if name == log_file_name: result_list.append(root) return result_list
[ "def", "_list_result_paths", "(", "target_path", ",", "log_file_name", "=", "'log'", ")", ":", "result_list", "=", "[", "]", "for", "root", ",", "_dirs", ",", "_files", "in", "os", ".", "walk", "(", "os", ".", "path", ".", "abspath", "(", "target_path", ...
list_result_paths.
[ "list_result_paths", "." ]
python
train
MAVENSDC/PyTplot
pytplot/tplot_restore.py
https://github.com/MAVENSDC/PyTplot/blob/d76cdb95363a4bd4fea6bca7960f8523efa7fa83/pytplot/tplot_restore.py#L16-L114
def tplot_restore(filename): """ This function will restore tplot variables that have been saved with the "tplot_save" command. .. note:: This function is compatible with the IDL tplot_save routine. If you have a ".tplot" file generated from IDL, this procedure will restore the data contained in the file. Not all plot options will transfer over at this time. Parameters: filename : str The file name and full path generated by the "tplot_save" command. Returns: None Examples: >>> # Restore the saved data from the tplot_save example >>> import pytplot >>> pytplot.restore('C:/temp/variable1.pytplot') """ #Error check if not (os.path.isfile(filename)): print("Not a valid file name") return #Check if the restored file was an IDL file if filename.endswith('.tplot'): temp_tplot = readsav(filename) for i in range(len(temp_tplot['dq'])): data_name = temp_tplot['dq'][i][0].decode("utf-8") temp_x_data = temp_tplot['dq'][i][1][0][0] #Pandas reads in data the other way I guess if len(temp_tplot['dq'][i][1][0][2].shape) == 2: temp_y_data = np.transpose(temp_tplot['dq'][i][1][0][2]) else: temp_y_data = temp_tplot['dq'][i][1][0][2] #If there are more than 4 fields, that means it is a spectrogram if len(temp_tplot['dq'][i][1][0]) > 4: temp_v_data = temp_tplot['dq'][i][1][0][4] #Change from little endian to big endian, since pandas apparently hates little endian #We might want to move this into the store_data procedure eventually if (temp_x_data.dtype.byteorder == '>'): temp_x_data = temp_x_data.byteswap().newbyteorder() if (temp_y_data.dtype.byteorder == '>'): temp_y_data = temp_y_data.byteswap().newbyteorder() if (temp_v_data.dtype.byteorder == '>'): temp_v_data = temp_v_data.byteswap().newbyteorder() store_data(data_name, data={'x':temp_x_data, 'y':temp_y_data, 'v':temp_v_data}) else: #Change from little endian to big endian, since pandas apparently hates little endian #We might want to move this into the store_data procedure eventually if (temp_x_data.dtype.byteorder == '>'): temp_x_data = temp_x_data.byteswap().newbyteorder() if (temp_y_data.dtype.byteorder == '>'): temp_y_data = temp_y_data.byteswap().newbyteorder() store_data(data_name, data={'x':temp_x_data, 'y':temp_y_data}) if temp_tplot['dq'][i][3].dtype.names is not None: for option_name in temp_tplot['dq'][i][3].dtype.names: options(data_name, option_name, temp_tplot['dq'][i][3][option_name][0]) data_quants[data_name].trange = temp_tplot['dq'][i][4].tolist() data_quants[data_name].dtype = temp_tplot['dq'][i][5] data_quants[data_name].create_time = temp_tplot['dq'][i][6] for option_name in temp_tplot['tv'][0][0].dtype.names: if option_name == 'TRANGE': tplot_options('x_range', temp_tplot['tv'][0][0][option_name][0]) if option_name == 'WSIZE': tplot_options('wsize', temp_tplot['tv'][0][0][option_name][0]) if option_name == 'VAR_LABEL': tplot_options('var_label', temp_tplot['tv'][0][0][option_name][0]) if 'P' in temp_tplot['tv'][0][1].tolist(): for option_name in temp_tplot['tv'][0][1]['P'][0].dtype.names: if option_name == 'TITLE': tplot_options('title', temp_tplot['tv'][0][1]['P'][0][option_name][0]) #temp_tplot['tv'][0][1] is all of the "settings" variables #temp_tplot['tv'][0][1]['D'][0] is "device" options #temp_tplot['tv'][0][1]['P'][0] is "plot" options #temp_tplot['tv'][0][1]['X'][0] is x axis options #temp_tplot['tv'][0][1]['Y'][0] is y axis options #################################################################### else: temp = pickle.load(open(filename,"rb")) num_data_quants = temp[0] for i in range(0, num_data_quants): data_quants[temp[i+1].name] = temp[i+1] tplot_opt_glob = temp[num_data_quants+1] return
[ "def", "tplot_restore", "(", "filename", ")", ":", "#Error check", "if", "not", "(", "os", ".", "path", ".", "isfile", "(", "filename", ")", ")", ":", "print", "(", "\"Not a valid file name\"", ")", "return", "#Check if the restored file was an IDL file", "if", ...
This function will restore tplot variables that have been saved with the "tplot_save" command. .. note:: This function is compatible with the IDL tplot_save routine. If you have a ".tplot" file generated from IDL, this procedure will restore the data contained in the file. Not all plot options will transfer over at this time. Parameters: filename : str The file name and full path generated by the "tplot_save" command. Returns: None Examples: >>> # Restore the saved data from the tplot_save example >>> import pytplot >>> pytplot.restore('C:/temp/variable1.pytplot')
[ "This", "function", "will", "restore", "tplot", "variables", "that", "have", "been", "saved", "with", "the", "tplot_save", "command", ".", "..", "note", "::", "This", "function", "is", "compatible", "with", "the", "IDL", "tplot_save", "routine", ".", "If", "...
python
train
cltrudeau/django-awl
awl/waelsteng.py
https://github.com/cltrudeau/django-awl/blob/70d469ef9a161c1170b53aa017cf02d7c15eb90c/awl/waelsteng.py#L135-L142
def initiate(self): """Sets up the :class:`AdminSite` and creates a user with the appropriate privileges. This should be called from the inheritor's :class:`TestCase.setUp` method. """ self.site = admin.sites.AdminSite() self.admin_user = create_admin(self.USERNAME, self.EMAIL, self.PASSWORD) self.authed = False
[ "def", "initiate", "(", "self", ")", ":", "self", ".", "site", "=", "admin", ".", "sites", ".", "AdminSite", "(", ")", "self", ".", "admin_user", "=", "create_admin", "(", "self", ".", "USERNAME", ",", "self", ".", "EMAIL", ",", "self", ".", "PASSWOR...
Sets up the :class:`AdminSite` and creates a user with the appropriate privileges. This should be called from the inheritor's :class:`TestCase.setUp` method.
[ "Sets", "up", "the", ":", "class", ":", "AdminSite", "and", "creates", "a", "user", "with", "the", "appropriate", "privileges", ".", "This", "should", "be", "called", "from", "the", "inheritor", "s", ":", "class", ":", "TestCase", ".", "setUp", "method", ...
python
valid
tensorflow/probability
tensorflow_probability/python/mcmc/text_messages_hmc.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/mcmc/text_messages_hmc.py#L44-L61
def text_messages_joint_log_prob(count_data, lambda_1, lambda_2, tau): """Joint log probability function.""" alpha = (1. / tf.reduce_mean(input_tensor=count_data)) rv_lambda = tfd.Exponential(rate=alpha) rv_tau = tfd.Uniform() lambda_ = tf.gather( [lambda_1, lambda_2], indices=tf.cast( tau * tf.cast(tf.size(input=count_data), dtype=tf.float32) <= tf.cast( tf.range(tf.size(input=count_data)), dtype=tf.float32), dtype=tf.int32)) rv_observation = tfd.Poisson(rate=lambda_) return (rv_lambda.log_prob(lambda_1) + rv_lambda.log_prob(lambda_2) + rv_tau.log_prob(tau) + tf.reduce_sum(input_tensor=rv_observation.log_prob(count_data)))
[ "def", "text_messages_joint_log_prob", "(", "count_data", ",", "lambda_1", ",", "lambda_2", ",", "tau", ")", ":", "alpha", "=", "(", "1.", "/", "tf", ".", "reduce_mean", "(", "input_tensor", "=", "count_data", ")", ")", "rv_lambda", "=", "tfd", ".", "Expon...
Joint log probability function.
[ "Joint", "log", "probability", "function", "." ]
python
test
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/interactive.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/interactive.py#L1074-L1097
def do_shell(self, arg): """ ! - spawn a system shell shell - spawn a system shell ! <command> [arguments...] - execute a single shell command shell <command> [arguments...] - execute a single shell command """ if self.cmdprefix: raise CmdError("prefix not allowed") # Try to use the environment to locate cmd.exe. # If not found, it's usually OK to just use the filename, # since cmd.exe is one of those "magic" programs that # can be automatically found by CreateProcess. shell = os.getenv('ComSpec', 'cmd.exe') # When given a command, run it and return. # When no command is given, spawn a shell. if arg: arg = '%s /c %s' % (shell, arg) else: arg = shell process = self.debug.system.start_process(arg, bConsole = True) process.wait()
[ "def", "do_shell", "(", "self", ",", "arg", ")", ":", "if", "self", ".", "cmdprefix", ":", "raise", "CmdError", "(", "\"prefix not allowed\"", ")", "# Try to use the environment to locate cmd.exe.", "# If not found, it's usually OK to just use the filename,", "# since cmd.exe...
! - spawn a system shell shell - spawn a system shell ! <command> [arguments...] - execute a single shell command shell <command> [arguments...] - execute a single shell command
[ "!", "-", "spawn", "a", "system", "shell", "shell", "-", "spawn", "a", "system", "shell", "!", "<command", ">", "[", "arguments", "...", "]", "-", "execute", "a", "single", "shell", "command", "shell", "<command", ">", "[", "arguments", "...", "]", "-"...
python
train
calmjs/calmjs
src/calmjs/base.py
https://github.com/calmjs/calmjs/blob/b9b407c2b6a7662da64bccba93bb8d92e7a5fafd/src/calmjs/base.py#L691-L700
def dump(self, blob, stream): """ Call json.dump with the attributes of this instance as arguments. """ json.dump( blob, stream, indent=self.indent, sort_keys=True, separators=self.separators, )
[ "def", "dump", "(", "self", ",", "blob", ",", "stream", ")", ":", "json", ".", "dump", "(", "blob", ",", "stream", ",", "indent", "=", "self", ".", "indent", ",", "sort_keys", "=", "True", ",", "separators", "=", "self", ".", "separators", ",", ")"...
Call json.dump with the attributes of this instance as arguments.
[ "Call", "json", ".", "dump", "with", "the", "attributes", "of", "this", "instance", "as", "arguments", "." ]
python
train
alephdata/memorious
memorious/logic/context.py
https://github.com/alephdata/memorious/blob/b4033c5064447ed5f696f9c2bbbc6c12062d2fa4/memorious/logic/context.py#L115-L136
def skip_incremental(self, *criteria): """Perform an incremental check on a set of criteria. This can be used to execute a part of a crawler only once per an interval (which is specified by the ``expire`` setting). If the operation has already been performed (and should thus be skipped), this will return ``True``. If the operation needs to be executed, the returned value will be ``False``. """ if not self.incremental: return False # this is pure convenience, and will probably backfire at some point. key = make_key(*criteria) if key is None: return False if self.check_tag(key): return True self.set_tag(key, None) return False
[ "def", "skip_incremental", "(", "self", ",", "*", "criteria", ")", ":", "if", "not", "self", ".", "incremental", ":", "return", "False", "# this is pure convenience, and will probably backfire at some point.", "key", "=", "make_key", "(", "*", "criteria", ")", "if",...
Perform an incremental check on a set of criteria. This can be used to execute a part of a crawler only once per an interval (which is specified by the ``expire`` setting). If the operation has already been performed (and should thus be skipped), this will return ``True``. If the operation needs to be executed, the returned value will be ``False``.
[ "Perform", "an", "incremental", "check", "on", "a", "set", "of", "criteria", "." ]
python
train
saltstack/salt
salt/cloud/clouds/joyent.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/joyent.py#L325-L370
def create_node(**kwargs): ''' convenience function to make the rest api call for node creation. ''' name = kwargs['name'] size = kwargs['size'] image = kwargs['image'] location = kwargs['location'] networks = kwargs.get('networks') tag = kwargs.get('tag') locality = kwargs.get('locality') metadata = kwargs.get('metadata') firewall_enabled = kwargs.get('firewall_enabled') create_data = { 'name': name, 'package': size['name'], 'image': image['name'], } if networks is not None: create_data['networks'] = networks if locality is not None: create_data['locality'] = locality if metadata is not None: for key, value in six.iteritems(metadata): create_data['metadata.{0}'.format(key)] = value if tag is not None: for key, value in six.iteritems(tag): create_data['tag.{0}'.format(key)] = value if firewall_enabled is not None: create_data['firewall_enabled'] = firewall_enabled data = salt.utils.json.dumps(create_data) ret = query(command='my/machines', data=data, method='POST', location=location) if ret[0] in VALID_RESPONSE_CODES: return ret[1] else: log.error('Failed to create node %s: %s', name, ret[1]) return {}
[ "def", "create_node", "(", "*", "*", "kwargs", ")", ":", "name", "=", "kwargs", "[", "'name'", "]", "size", "=", "kwargs", "[", "'size'", "]", "image", "=", "kwargs", "[", "'image'", "]", "location", "=", "kwargs", "[", "'location'", "]", "networks", ...
convenience function to make the rest api call for node creation.
[ "convenience", "function", "to", "make", "the", "rest", "api", "call", "for", "node", "creation", "." ]
python
train
python-security/pyt
pyt/vulnerabilities/trigger_definitions_parser.py
https://github.com/python-security/pyt/blob/efc0cfb716e40e0c8df4098f1cc8cf43723cd31f/pyt/vulnerabilities/trigger_definitions_parser.py#L69-L82
def parse(trigger_word_file): """Parse the file for source and sink definitions. Returns: A definitions tuple with sources and sinks. """ with open(trigger_word_file) as fd: triggers_dict = json.load(fd) sources = [Source(s) for s in triggers_dict['sources']] sinks = [ Sink.from_json(trigger, data) for trigger, data in triggers_dict['sinks'].items() ] return Definitions(sources, sinks)
[ "def", "parse", "(", "trigger_word_file", ")", ":", "with", "open", "(", "trigger_word_file", ")", "as", "fd", ":", "triggers_dict", "=", "json", ".", "load", "(", "fd", ")", "sources", "=", "[", "Source", "(", "s", ")", "for", "s", "in", "triggers_dic...
Parse the file for source and sink definitions. Returns: A definitions tuple with sources and sinks.
[ "Parse", "the", "file", "for", "source", "and", "sink", "definitions", "." ]
python
train
Azure/blobxfer
blobxfer/operations/download.py
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/operations/download.py#L639-L657
def _finalize_chunk(self, dd, offsets): # type: (Downloader, blobxfer.models.download.Descriptor, # blobxfer.models.download.Offsets) -> None """Finalize written chunk :param Downloader self: this :param blobxfer.models.download.Descriptor dd: download descriptor :param blobxfer.models.download.Offsets offsets: offsets """ if dd.entity.is_encrypted: dd.mark_unchecked_chunk_decrypted(offsets.chunk_num) # integrity check data and write to disk (this is called # regardless of md5/hmac enablement for resume purposes) dd.perform_chunked_integrity_check() # remove from disk set and add bytes to counter with self._disk_operation_lock: self._disk_set.remove( blobxfer.operations.download.Downloader. create_unique_disk_operation_id(dd, offsets)) self._download_bytes_sofar += offsets.num_bytes
[ "def", "_finalize_chunk", "(", "self", ",", "dd", ",", "offsets", ")", ":", "# type: (Downloader, blobxfer.models.download.Descriptor,", "# blobxfer.models.download.Offsets) -> None", "if", "dd", ".", "entity", ".", "is_encrypted", ":", "dd", ".", "mark_unchecked_chu...
Finalize written chunk :param Downloader self: this :param blobxfer.models.download.Descriptor dd: download descriptor :param blobxfer.models.download.Offsets offsets: offsets
[ "Finalize", "written", "chunk", ":", "param", "Downloader", "self", ":", "this", ":", "param", "blobxfer", ".", "models", ".", "download", ".", "Descriptor", "dd", ":", "download", "descriptor", ":", "param", "blobxfer", ".", "models", ".", "download", ".", ...
python
train
openeemeter/eemeter
eemeter/io.py
https://github.com/openeemeter/eemeter/blob/e03b1cc5f4906e8f4f7fd16183bc037107d1dfa0/eemeter/io.py#L186-L215
def temperature_data_from_json(data, orient="list"): """ Load temperature data from json. (Must be given in degrees Fahrenheit). Default format:: [ ['2017-01-01T00:00:00+00:00', 3.5], ['2017-01-01T01:00:00+00:00', 5.4], ['2017-01-01T02:00:00+00:00', 7.4], ] Parameters ---------- data : :any:`list` List elements are each a rows of data. Returns ------- series : :any:`pandas.Series` DataFrame with a single column (``'tempF'``) and a :any:`pandas.DatetimeIndex`. """ if orient == "list": df = pd.DataFrame(data, columns=["dt", "tempF"]) series = df.tempF series.index = pd.DatetimeIndex(df.dt).tz_localize("UTC") return series else: raise ValueError("orientation not recognized.")
[ "def", "temperature_data_from_json", "(", "data", ",", "orient", "=", "\"list\"", ")", ":", "if", "orient", "==", "\"list\"", ":", "df", "=", "pd", ".", "DataFrame", "(", "data", ",", "columns", "=", "[", "\"dt\"", ",", "\"tempF\"", "]", ")", "series", ...
Load temperature data from json. (Must be given in degrees Fahrenheit). Default format:: [ ['2017-01-01T00:00:00+00:00', 3.5], ['2017-01-01T01:00:00+00:00', 5.4], ['2017-01-01T02:00:00+00:00', 7.4], ] Parameters ---------- data : :any:`list` List elements are each a rows of data. Returns ------- series : :any:`pandas.Series` DataFrame with a single column (``'tempF'``) and a :any:`pandas.DatetimeIndex`.
[ "Load", "temperature", "data", "from", "json", ".", "(", "Must", "be", "given", "in", "degrees", "Fahrenheit", ")", "." ]
python
train
pytroll/satpy
satpy/resample.py
https://github.com/pytroll/satpy/blob/1f21d20ac686b745fb0da9b4030d139893e066dd/satpy/resample.py#L233-L272
def resample(self, data, cache_dir=None, mask_area=None, **kwargs): """Resample `data` by calling `precompute` and `compute` methods. Only certain resampling classes may use `cache_dir` and the `mask` provided when `mask_area` is True. The return value of calling the `precompute` method is passed as the `cache_id` keyword argument of the `compute` method, but may not be used directly for caching. It is up to the individual resampler subclasses to determine how this is used. Args: data (xarray.DataArray): Data to be resampled cache_dir (str): directory to cache precomputed results (default False, optional) mask_area (bool): Mask geolocation data where data values are invalid. This should be used when data values may affect what neighbors are considered valid. Returns (xarray.DataArray): Data resampled to the target area """ # default is to mask areas for SwathDefinitions if mask_area is None and isinstance( self.source_geo_def, SwathDefinition): mask_area = True if mask_area: if isinstance(self.source_geo_def, SwathDefinition): geo_dims = self.source_geo_def.lons.dims else: geo_dims = ('y', 'x') flat_dims = [dim for dim in data.dims if dim not in geo_dims] # xarray <= 0.10.1 computes dask arrays during isnull if np.issubdtype(data.dtype, np.integer): kwargs['mask'] = data == data.attrs.get('_FillValue', np.iinfo(data.dtype.type).max) else: kwargs['mask'] = data.isnull() kwargs['mask'] = kwargs['mask'].all(dim=flat_dims) cache_id = self.precompute(cache_dir=cache_dir, **kwargs) return self.compute(data, cache_id=cache_id, **kwargs)
[ "def", "resample", "(", "self", ",", "data", ",", "cache_dir", "=", "None", ",", "mask_area", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# default is to mask areas for SwathDefinitions", "if", "mask_area", "is", "None", "and", "isinstance", "(", "self", ...
Resample `data` by calling `precompute` and `compute` methods. Only certain resampling classes may use `cache_dir` and the `mask` provided when `mask_area` is True. The return value of calling the `precompute` method is passed as the `cache_id` keyword argument of the `compute` method, but may not be used directly for caching. It is up to the individual resampler subclasses to determine how this is used. Args: data (xarray.DataArray): Data to be resampled cache_dir (str): directory to cache precomputed results (default False, optional) mask_area (bool): Mask geolocation data where data values are invalid. This should be used when data values may affect what neighbors are considered valid. Returns (xarray.DataArray): Data resampled to the target area
[ "Resample", "data", "by", "calling", "precompute", "and", "compute", "methods", "." ]
python
train
hydpy-dev/hydpy
hydpy/core/devicetools.py
https://github.com/hydpy-dev/hydpy/blob/1bc6a82cf30786521d86b36e27900c6717d3348d/hydpy/core/devicetools.py#L814-L818
def prepare_allseries(self, ramflag: bool = True) -> None: """Call methods |Node.prepare_simseries| and |Node.prepare_obsseries|.""" self.prepare_simseries(ramflag) self.prepare_obsseries(ramflag)
[ "def", "prepare_allseries", "(", "self", ",", "ramflag", ":", "bool", "=", "True", ")", "->", "None", ":", "self", ".", "prepare_simseries", "(", "ramflag", ")", "self", ".", "prepare_obsseries", "(", "ramflag", ")" ]
Call methods |Node.prepare_simseries| and |Node.prepare_obsseries|.
[ "Call", "methods", "|Node", ".", "prepare_simseries|", "and", "|Node", ".", "prepare_obsseries|", "." ]
python
train
OnroerendErfgoed/crabpy
crabpy/gateway/crab.py
https://github.com/OnroerendErfgoed/crabpy/blob/3a6fd8bc5aca37c2a173e3ea94e4e468b8aa79c1/crabpy/gateway/crab.py#L128-L163
def get_gewest_by_id(self, id): ''' Get a `gewest` by id. :param integer id: The id of a `gewest`. :rtype: A :class:`Gewest`. ''' def creator(): nl = crab_gateway_request( self.client, 'GetGewestByGewestIdAndTaalCode', id, 'nl' ) fr = crab_gateway_request( self.client, 'GetGewestByGewestIdAndTaalCode', id, 'fr' ) de = crab_gateway_request( self.client, 'GetGewestByGewestIdAndTaalCode', id, 'de' ) if nl == None: raise GatewayResourceNotFoundException() return Gewest( nl.GewestId, { 'nl': nl.GewestNaam, 'fr': fr.GewestNaam, 'de': de.GewestNaam }, (nl.CenterX, nl.CenterY), (nl.MinimumX, nl.MinimumY, nl.MaximumX, nl.MaximumY), ) if self.caches['permanent'].is_configured: key = 'GetGewestByGewestId#%s' % id gewest = self.caches['long'].get_or_create(key, creator) else: gewest = creator() gewest.set_gateway(self) return gewest
[ "def", "get_gewest_by_id", "(", "self", ",", "id", ")", ":", "def", "creator", "(", ")", ":", "nl", "=", "crab_gateway_request", "(", "self", ".", "client", ",", "'GetGewestByGewestIdAndTaalCode'", ",", "id", ",", "'nl'", ")", "fr", "=", "crab_gateway_reques...
Get a `gewest` by id. :param integer id: The id of a `gewest`. :rtype: A :class:`Gewest`.
[ "Get", "a", "gewest", "by", "id", "." ]
python
train
tgbugs/pyontutils
pyontutils/core.py
https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/pyontutils/core.py#L250-L264
def add_op(self, id_, label=None, subPropertyOf=None, inverse=None, transitive=False, addPrefix=True): """ Add id_ as an owl:ObjectProperty""" self.add_trip(id_, rdf.type, owl.ObjectProperty) if inverse: self.add_trip(id_, owl.inverseOf, inverse) if subPropertyOf: self.add_trip(id_, rdfs.subPropertyOf, subPropertyOf) if label: self.add_trip(id_, rdfs.label, label) if addPrefix: prefix = ''.join([s.capitalize() for s in label.split()]) namespace = self.expand(id_) self.add_namespace(prefix, namespace) if transitive: self.add_trip(id_, rdf.type, owl.TransitiveProperty)
[ "def", "add_op", "(", "self", ",", "id_", ",", "label", "=", "None", ",", "subPropertyOf", "=", "None", ",", "inverse", "=", "None", ",", "transitive", "=", "False", ",", "addPrefix", "=", "True", ")", ":", "self", ".", "add_trip", "(", "id_", ",", ...
Add id_ as an owl:ObjectProperty
[ "Add", "id_", "as", "an", "owl", ":", "ObjectProperty" ]
python
train
wandb/client
wandb/summary.py
https://github.com/wandb/client/blob/7d08954ed5674fee223cd85ed0d8518fe47266b2/wandb/summary.py#L253-L269
def _decode(self, path, json_value): """Decode a `dict` encoded by `Summary._encode()`, loading h5 objects. h5 objects may be very large, so we won't have loaded them automatically. """ if isinstance(json_value, dict): if json_value.get("_type") in H5_TYPES: return self.read_h5(path, json_value) elif json_value.get("_type") == 'data-frame': wandb.termerror( 'This data frame was saved via the wandb data API. Contact support@wandb.com for help.') return None # TODO: transform wandb objects and plots else: return SummarySubDict(self, path) else: return json_value
[ "def", "_decode", "(", "self", ",", "path", ",", "json_value", ")", ":", "if", "isinstance", "(", "json_value", ",", "dict", ")", ":", "if", "json_value", ".", "get", "(", "\"_type\"", ")", "in", "H5_TYPES", ":", "return", "self", ".", "read_h5", "(", ...
Decode a `dict` encoded by `Summary._encode()`, loading h5 objects. h5 objects may be very large, so we won't have loaded them automatically.
[ "Decode", "a", "dict", "encoded", "by", "Summary", ".", "_encode", "()", "loading", "h5", "objects", "." ]
python
train
synw/dataswim
dataswim/data/transform/calculations.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/transform/calculations.py#L129-L150
def diffsp(self, col: str, serie: "iterable", name: str="Diff"): """ Add a diff column in percentage from a serie. The serie is an iterable of the same length than the dataframe :param col: column to diff :type col: str :param serie: serie to diff from :type serie: iterable :param name: name of the diff col, defaults to "Diff" :param name: str, optional :example: ``ds.diffp("Col 1", [1, 1, 4], "New col")`` """ try: d = [] for i, row in self.df.iterrows(): v = (row[col]*100) / serie[i] d.append(v) self.df[name] = d except Exception as e: self.err(e, self._append, "Can not diff column from serie")
[ "def", "diffsp", "(", "self", ",", "col", ":", "str", ",", "serie", ":", "\"iterable\"", ",", "name", ":", "str", "=", "\"Diff\"", ")", ":", "try", ":", "d", "=", "[", "]", "for", "i", ",", "row", "in", "self", ".", "df", ".", "iterrows", "(", ...
Add a diff column in percentage from a serie. The serie is an iterable of the same length than the dataframe :param col: column to diff :type col: str :param serie: serie to diff from :type serie: iterable :param name: name of the diff col, defaults to "Diff" :param name: str, optional :example: ``ds.diffp("Col 1", [1, 1, 4], "New col")``
[ "Add", "a", "diff", "column", "in", "percentage", "from", "a", "serie", ".", "The", "serie", "is", "an", "iterable", "of", "the", "same", "length", "than", "the", "dataframe" ]
python
train
marshallward/f90nml
f90nml/parser.py
https://github.com/marshallward/f90nml/blob/4932cabc5221afc844ee6a5b4a05ceb8bd4a2711/f90nml/parser.py#L852-L862
def merge_values(src, new): """Merge two lists or dicts into a single element.""" if isinstance(src, dict) and isinstance(new, dict): return merge_dicts(src, new) else: if not isinstance(src, list): src = [src] if not isinstance(new, list): new = [new] return merge_lists(src, new)
[ "def", "merge_values", "(", "src", ",", "new", ")", ":", "if", "isinstance", "(", "src", ",", "dict", ")", "and", "isinstance", "(", "new", ",", "dict", ")", ":", "return", "merge_dicts", "(", "src", ",", "new", ")", "else", ":", "if", "not", "isin...
Merge two lists or dicts into a single element.
[ "Merge", "two", "lists", "or", "dicts", "into", "a", "single", "element", "." ]
python
train
shoebot/shoebot
lib/colors/__init__.py
https://github.com/shoebot/shoebot/blob/d554c1765c1899fa25727c9fc6805d221585562b/lib/colors/__init__.py#L1552-L1582
def monochrome(clr): """ Returns colors in the same hue with varying brightness/saturation. """ def _wrap(x, min, threshold, plus): if x - min < threshold: return x + plus else: return x - min colors = colorlist(clr) c = clr.copy() c.brightness = _wrap(clr.brightness, 0.5, 0.2, 0.3) c.saturation = _wrap(clr.saturation, 0.3, 0.1, 0.3) colors.append(c) c = clr.copy() c.brightness = _wrap(clr.brightness, 0.2, 0.2, 0.6) colors.append(c) c = clr.copy() c.brightness = max(0.2, clr.brightness + (1 - clr.brightness) * 0.2) c.saturation = _wrap(clr.saturation, 0.3, 0.1, 0.3) colors.append(c) c = clr.copy() c.brightness = _wrap(clr.brightness, 0.5, 0.2, 0.3) colors.append(c) return colors
[ "def", "monochrome", "(", "clr", ")", ":", "def", "_wrap", "(", "x", ",", "min", ",", "threshold", ",", "plus", ")", ":", "if", "x", "-", "min", "<", "threshold", ":", "return", "x", "+", "plus", "else", ":", "return", "x", "-", "min", "colors", ...
Returns colors in the same hue with varying brightness/saturation.
[ "Returns", "colors", "in", "the", "same", "hue", "with", "varying", "brightness", "/", "saturation", "." ]
python
valid
michaelpb/omnic
omnic/conversion/graph.py
https://github.com/michaelpb/omnic/blob/1111cfd73c9dc1955afe42d9cf2a468c46f83cd6/omnic/conversion/graph.py#L74-L90
def _setup_profiles(self, conversion_profiles): ''' Add given conversion profiles checking for invalid profiles ''' # Check for invalid profiles for key, path in conversion_profiles.items(): if isinstance(path, str): path = (path, ) for left, right in pair_looper(path): pair = (_format(left), _format(right)) if pair not in self.converters: msg = 'Invalid conversion profile %s, unknown step %s' log.warning(msg % (repr(key), repr(pair))) break else: # If it did not break, then add to conversion profiles self.conversion_profiles[key] = path
[ "def", "_setup_profiles", "(", "self", ",", "conversion_profiles", ")", ":", "# Check for invalid profiles", "for", "key", ",", "path", "in", "conversion_profiles", ".", "items", "(", ")", ":", "if", "isinstance", "(", "path", ",", "str", ")", ":", "path", "...
Add given conversion profiles checking for invalid profiles
[ "Add", "given", "conversion", "profiles", "checking", "for", "invalid", "profiles" ]
python
train
skorch-dev/skorch
skorch/utils.py
https://github.com/skorch-dev/skorch/blob/5b9b8b7b7712cb6e5aaa759d9608ea6269d5bcd3/skorch/utils.py#L183-L217
def check_indexing(data): """Perform a check how incoming data should be indexed and return an appropriate indexing function with signature f(data, index). This is useful for determining upfront how data should be indexed instead of doing it repeatedly for each batch, thus saving some time. """ if data is None: return _indexing_none if isinstance(data, dict): # dictionary of containers return _indexing_dict if isinstance(data, (list, tuple)): try: # list or tuple of containers # TODO: Is there a better way than just to try to index? This # is error prone (e.g. if one day list of strings are # possible). multi_indexing(data[0], 0) indexings = [check_indexing(x) for x in data] return partial(_indexing_list_tuple_of_data, indexings=indexings) except TypeError: # list or tuple of values return _indexing_other if is_pandas_ndframe(data): # pandas NDFrame, will be transformed to dict return _indexing_ndframe # torch tensor, numpy ndarray, list return _indexing_other
[ "def", "check_indexing", "(", "data", ")", ":", "if", "data", "is", "None", ":", "return", "_indexing_none", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "# dictionary of containers", "return", "_indexing_dict", "if", "isinstance", "(", "data", ",",...
Perform a check how incoming data should be indexed and return an appropriate indexing function with signature f(data, index). This is useful for determining upfront how data should be indexed instead of doing it repeatedly for each batch, thus saving some time.
[ "Perform", "a", "check", "how", "incoming", "data", "should", "be", "indexed", "and", "return", "an", "appropriate", "indexing", "function", "with", "signature", "f", "(", "data", "index", ")", "." ]
python
train
edeposit/edeposit.amqp.harvester
src/edeposit/amqp/harvester/scrappers/cpress_cz.py
https://github.com/edeposit/edeposit.amqp.harvester/blob/38cb87ccdf6bf2f550a98460d0a329c4b9dc8e2e/src/edeposit/amqp/harvester/scrappers/cpress_cz.py#L95-L129
def _parse_authors(html_chunk): """ Parse authors of the book. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: list: List of :class:`structures.Author` objects. Blank if no author \ found. """ authors_tags = html_chunk.match( ["div", {"class": "polozka_autor"}], "a" ) authors = [] for author_tag in authors_tags: # get name name = author_tag.getContent().strip() # skip tags without name if not name: continue # get url - if not found, set it to None url = author_tag.params.get("href", None) if url: url = normalize_url(BASE_URL, url) authors.append( Author(name, url) ) return authors
[ "def", "_parse_authors", "(", "html_chunk", ")", ":", "authors_tags", "=", "html_chunk", ".", "match", "(", "[", "\"div\"", ",", "{", "\"class\"", ":", "\"polozka_autor\"", "}", "]", ",", "\"a\"", ")", "authors", "=", "[", "]", "for", "author_tag", "in", ...
Parse authors of the book. Args: html_chunk (obj): HTMLElement containing slice of the page with details. Returns: list: List of :class:`structures.Author` objects. Blank if no author \ found.
[ "Parse", "authors", "of", "the", "book", "." ]
python
train
iotile/coretools
iotileship/iotile/ship/resources/hardware_manager.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotileship/iotile/ship/resources/hardware_manager.py#L46-L64
def open(self): """Open and potentially connect to a device.""" self.hwman = HardwareManager(port=self._port) self.opened = True if self._connection_string is not None: try: self.hwman.connect_direct(self._connection_string) except HardwareError: self.hwman.close() raise elif self._connect_id is not None: try: self.hwman.connect(self._connect_id) except HardwareError: self.hwman.close() raise
[ "def", "open", "(", "self", ")", ":", "self", ".", "hwman", "=", "HardwareManager", "(", "port", "=", "self", ".", "_port", ")", "self", ".", "opened", "=", "True", "if", "self", ".", "_connection_string", "is", "not", "None", ":", "try", ":", "self"...
Open and potentially connect to a device.
[ "Open", "and", "potentially", "connect", "to", "a", "device", "." ]
python
train
Julian/Ivoire
ivoire/transform.py
https://github.com/Julian/Ivoire/blob/5b8218cffa409ed733cf850a6fde16fafb8fc2af/ivoire/transform.py#L141-L152
def takes_only_self(self): """ Return an argument list node that takes only ``self``. """ return ast.arguments( args=[ast.arg(arg="self")], defaults=[], kw_defaults=[], kwonlyargs=[], )
[ "def", "takes_only_self", "(", "self", ")", ":", "return", "ast", ".", "arguments", "(", "args", "=", "[", "ast", ".", "arg", "(", "arg", "=", "\"self\"", ")", "]", ",", "defaults", "=", "[", "]", ",", "kw_defaults", "=", "[", "]", ",", "kwonlyargs...
Return an argument list node that takes only ``self``.
[ "Return", "an", "argument", "list", "node", "that", "takes", "only", "self", "." ]
python
test
ebu/PlugIt
plugit_proxy/views.py
https://github.com/ebu/PlugIt/blob/de5f1e870f67caaef7a4a58e4bb1ed54d9c5dc53/plugit_proxy/views.py#L1250-L1272
def api_send_mail(request, key=None, hproPk=None): """Send a email. Posts parameters are used""" if not check_api_key(request, key, hproPk): return HttpResponseForbidden sender = request.POST.get('sender', settings.MAIL_SENDER) dests = request.POST.getlist('dests') subject = request.POST['subject'] message = request.POST['message'] html_message = request.POST.get('html_message') if html_message and html_message.lower() == 'false': html_message = False if 'response_id' in request.POST: key = hproPk + ':' + request.POST['response_id'] else: key = None generic_send_mail(sender, dests, subject, message, key, 'PlugIt API (%s)' % (hproPk or 'StandAlone',), html_message) return HttpResponse(json.dumps({}), content_type="application/json")
[ "def", "api_send_mail", "(", "request", ",", "key", "=", "None", ",", "hproPk", "=", "None", ")", ":", "if", "not", "check_api_key", "(", "request", ",", "key", ",", "hproPk", ")", ":", "return", "HttpResponseForbidden", "sender", "=", "request", ".", "P...
Send a email. Posts parameters are used
[ "Send", "a", "email", ".", "Posts", "parameters", "are", "used" ]
python
train
SecurityInnovation/PGPy
pgpy/pgp.py
https://github.com/SecurityInnovation/PGPy/blob/f1c3d68e32c334f5aa14c34580925e97f17f4fde/pgpy/pgp.py#L102-L108
def compprefs(self): """ A ``list`` of preferred compression algorithms specified in this signature, if any. Otherwise, an empty ``list``. """ if 'PreferredCompressionAlgorithms' in self._signature.subpackets: return next(iter(self._signature.subpackets['h_PreferredCompressionAlgorithms'])).flags return []
[ "def", "compprefs", "(", "self", ")", ":", "if", "'PreferredCompressionAlgorithms'", "in", "self", ".", "_signature", ".", "subpackets", ":", "return", "next", "(", "iter", "(", "self", ".", "_signature", ".", "subpackets", "[", "'h_PreferredCompressionAlgorithms'...
A ``list`` of preferred compression algorithms specified in this signature, if any. Otherwise, an empty ``list``.
[ "A", "list", "of", "preferred", "compression", "algorithms", "specified", "in", "this", "signature", "if", "any", ".", "Otherwise", "an", "empty", "list", "." ]
python
train
gem/oq-engine
openquake/baselib/__init__.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/baselib/__init__.py#L90-L99
def boolean(flag): """ Convert string in boolean """ s = flag.lower() if s in ('1', 'yes', 'true'): return True elif s in ('0', 'no', 'false'): return False raise ValueError('Unknown flag %r' % s)
[ "def", "boolean", "(", "flag", ")", ":", "s", "=", "flag", ".", "lower", "(", ")", "if", "s", "in", "(", "'1'", ",", "'yes'", ",", "'true'", ")", ":", "return", "True", "elif", "s", "in", "(", "'0'", ",", "'no'", ",", "'false'", ")", ":", "re...
Convert string in boolean
[ "Convert", "string", "in", "boolean" ]
python
train
angr/angr
angr/state_plugins/heap/heap_libc.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/heap/heap_libc.py#L27-L36
def calloc(self, sim_nmemb, sim_size): """ A somewhat faithful implementation of libc `calloc`. :param sim_nmemb: the number of elements to allocated :param sim_size: the size of each element (in bytes) :returns: the address of the allocation, or a NULL pointer if the allocation failed """ raise NotImplementedError("%s not implemented for %s" % (self.calloc.__func__.__name__, self.__class__.__name__))
[ "def", "calloc", "(", "self", ",", "sim_nmemb", ",", "sim_size", ")", ":", "raise", "NotImplementedError", "(", "\"%s not implemented for %s\"", "%", "(", "self", ".", "calloc", ".", "__func__", ".", "__name__", ",", "self", ".", "__class__", ".", "__name__", ...
A somewhat faithful implementation of libc `calloc`. :param sim_nmemb: the number of elements to allocated :param sim_size: the size of each element (in bytes) :returns: the address of the allocation, or a NULL pointer if the allocation failed
[ "A", "somewhat", "faithful", "implementation", "of", "libc", "calloc", "." ]
python
train
rwl/pylon
pylon/util.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/util.py#L130-L140
def load(cls, filename, format=None): """ Return an instance of the class that is saved in the file with the given filename in the specified format. """ if format is None: # try to derive protocol from file extension format = format_from_extension(filename) with file(filename, 'rbU') as fp: obj = cls.load_from_file_object(fp, format) obj.filename = filename return obj
[ "def", "load", "(", "cls", ",", "filename", ",", "format", "=", "None", ")", ":", "if", "format", "is", "None", ":", "# try to derive protocol from file extension", "format", "=", "format_from_extension", "(", "filename", ")", "with", "file", "(", "filename", ...
Return an instance of the class that is saved in the file with the given filename in the specified format.
[ "Return", "an", "instance", "of", "the", "class", "that", "is", "saved", "in", "the", "file", "with", "the", "given", "filename", "in", "the", "specified", "format", "." ]
python
train
xolox/python-vcs-repo-mgr
vcs_repo_mgr/backends/git.py
https://github.com/xolox/python-vcs-repo-mgr/blob/fdad2441a3e7ba5deeeddfa1c2f5ebc00c393aed/vcs_repo_mgr/backends/git.py#L254-L264
def find_tags(self): """Find information about the tags in the repository.""" listing = self.context.capture('git', 'show-ref', '--tags', check=False) for line in listing.splitlines(): tokens = line.split() if len(tokens) >= 2 and tokens[1].startswith('refs/tags/'): yield Revision( repository=self, revision_id=tokens[0], tag=tokens[1][len('refs/tags/'):], )
[ "def", "find_tags", "(", "self", ")", ":", "listing", "=", "self", ".", "context", ".", "capture", "(", "'git'", ",", "'show-ref'", ",", "'--tags'", ",", "check", "=", "False", ")", "for", "line", "in", "listing", ".", "splitlines", "(", ")", ":", "t...
Find information about the tags in the repository.
[ "Find", "information", "about", "the", "tags", "in", "the", "repository", "." ]
python
train
PythonCharmers/python-future
src/future/backports/xmlrpc/server.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/xmlrpc/server.py#L463-L532
def do_POST(self): """Handles the HTTP POST request. Attempts to interpret all HTTP POST requests as XML-RPC calls, which are forwarded to the server's _dispatch method for handling. """ # Check that the path is legal if not self.is_rpc_path_valid(): self.report_404() return try: # Get arguments by reading body of request. # We read this in chunks to avoid straining # socket.read(); around the 10 or 15Mb mark, some platforms # begin to have problems (bug #792570). max_chunk_size = 10*1024*1024 size_remaining = int(self.headers["content-length"]) L = [] while size_remaining: chunk_size = min(size_remaining, max_chunk_size) chunk = self.rfile.read(chunk_size) if not chunk: break L.append(chunk) size_remaining -= len(L[-1]) data = b''.join(L) data = self.decode_request_content(data) if data is None: return #response has been sent # In previous versions of SimpleXMLRPCServer, _dispatch # could be overridden in this class, instead of in # SimpleXMLRPCDispatcher. To maintain backwards compatibility, # check to see if a subclass implements _dispatch and dispatch # using that method if present. response = self.server._marshaled_dispatch( data, getattr(self, '_dispatch', None), self.path ) except Exception as e: # This should only happen if the module is buggy # internal error, report as HTTP server error self.send_response(500) # Send information about the exception if requested if hasattr(self.server, '_send_traceback_header') and \ self.server._send_traceback_header: self.send_header("X-exception", str(e)) trace = traceback.format_exc() trace = str(trace.encode('ASCII', 'backslashreplace'), 'ASCII') self.send_header("X-traceback", trace) self.send_header("Content-length", "0") self.end_headers() else: self.send_response(200) self.send_header("Content-type", "text/xml") if self.encode_threshold is not None: if len(response) > self.encode_threshold: q = self.accept_encodings().get("gzip", 0) if q: try: response = gzip_encode(response) self.send_header("Content-Encoding", "gzip") except NotImplementedError: pass self.send_header("Content-length", str(len(response))) self.end_headers() self.wfile.write(response)
[ "def", "do_POST", "(", "self", ")", ":", "# Check that the path is legal", "if", "not", "self", ".", "is_rpc_path_valid", "(", ")", ":", "self", ".", "report_404", "(", ")", "return", "try", ":", "# Get arguments by reading body of request.", "# We read this in chunks...
Handles the HTTP POST request. Attempts to interpret all HTTP POST requests as XML-RPC calls, which are forwarded to the server's _dispatch method for handling.
[ "Handles", "the", "HTTP", "POST", "request", "." ]
python
train
cokelaer/spectrum
src/spectrum/window.py
https://github.com/cokelaer/spectrum/blob/bad6c32e3f10e185098748f67bb421b378b06afe/src/spectrum/window.py#L1304-L1326
def window_cauchy(N, alpha=3): r"""Cauchy tapering window :param int N: window length :param float alpha: parameter of the poisson window .. math:: w(n) = \frac{1}{1+\left(\frac{\alpha*n}{N/2}\right)**2} .. plot:: :width: 80% :include-source: from spectrum import window_visu window_visu(64, 'cauchy', alpha=3) window_visu(64, 'cauchy', alpha=4) window_visu(64, 'cauchy', alpha=5) .. seealso:: :func:`window_poisson`, :func:`window_hann` """ n = linspace(-N/2., (N)/2., N) w = 1./(1.+ (alpha*n/(N/2.))**2) return w
[ "def", "window_cauchy", "(", "N", ",", "alpha", "=", "3", ")", ":", "n", "=", "linspace", "(", "-", "N", "/", "2.", ",", "(", "N", ")", "/", "2.", ",", "N", ")", "w", "=", "1.", "/", "(", "1.", "+", "(", "alpha", "*", "n", "/", "(", "N"...
r"""Cauchy tapering window :param int N: window length :param float alpha: parameter of the poisson window .. math:: w(n) = \frac{1}{1+\left(\frac{\alpha*n}{N/2}\right)**2} .. plot:: :width: 80% :include-source: from spectrum import window_visu window_visu(64, 'cauchy', alpha=3) window_visu(64, 'cauchy', alpha=4) window_visu(64, 'cauchy', alpha=5) .. seealso:: :func:`window_poisson`, :func:`window_hann`
[ "r", "Cauchy", "tapering", "window" ]
python
valid
inasafe/inasafe
safe/gui/tools/multi_exposure_dialog.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/multi_exposure_dialog.py#L277-L295
def _list_selection_changed(self): """Selection has changed in the list.""" items = self.list_layers_in_map_report.selectedItems() self.remove_layer.setEnabled(len(items) >= 1) if len(items) == 1 and self.list_layers_in_map_report.count() >= 2: index = self.list_layers_in_map_report.indexFromItem(items[0]) index = index.row() if index == 0: self.move_up.setEnabled(False) self.move_down.setEnabled(True) elif index == self.list_layers_in_map_report.count() - 1: self.move_up.setEnabled(True) self.move_down.setEnabled(False) else: self.move_up.setEnabled(True) self.move_down.setEnabled(True) else: self.move_up.setEnabled(False) self.move_down.setEnabled(False)
[ "def", "_list_selection_changed", "(", "self", ")", ":", "items", "=", "self", ".", "list_layers_in_map_report", ".", "selectedItems", "(", ")", "self", ".", "remove_layer", ".", "setEnabled", "(", "len", "(", "items", ")", ">=", "1", ")", "if", "len", "("...
Selection has changed in the list.
[ "Selection", "has", "changed", "in", "the", "list", "." ]
python
train
CitrineInformatics/pypif
pypif/util/serializable.py
https://github.com/CitrineInformatics/pypif/blob/938348a8ff7b10b330770cccaaeb2109922f681b/pypif/util/serializable.py#L36-L49
def _get_object(class_, obj): """ Helper function that returns an object, or if it is a dictionary, initializes it from class_. :param class_: Class to use to instantiate object. :param obj: Object to process. :return: One or more objects. """ if isinstance(obj, list): return [Serializable._get_object(class_, i) for i in obj] elif isinstance(obj, dict): return class_(**keys_to_snake_case(obj)) else: return obj
[ "def", "_get_object", "(", "class_", ",", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "list", ")", ":", "return", "[", "Serializable", ".", "_get_object", "(", "class_", ",", "i", ")", "for", "i", "in", "obj", "]", "elif", "isinstance", "("...
Helper function that returns an object, or if it is a dictionary, initializes it from class_. :param class_: Class to use to instantiate object. :param obj: Object to process. :return: One or more objects.
[ "Helper", "function", "that", "returns", "an", "object", "or", "if", "it", "is", "a", "dictionary", "initializes", "it", "from", "class_", "." ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/server/cisco_dfa_rest.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/server/cisco_dfa_rest.py#L392-L397
def _logout_request(self, url_logout): """Internal logout request to DCNM. """ requests.post(url_logout, headers=self._req_headers, timeout=self.timeout_resp, verify=False)
[ "def", "_logout_request", "(", "self", ",", "url_logout", ")", ":", "requests", ".", "post", "(", "url_logout", ",", "headers", "=", "self", ".", "_req_headers", ",", "timeout", "=", "self", ".", "timeout_resp", ",", "verify", "=", "False", ")" ]
Internal logout request to DCNM.
[ "Internal", "logout", "request", "to", "DCNM", "." ]
python
train
dpkp/kafka-python
kafka/coordinator/consumer.py
https://github.com/dpkp/kafka-python/blob/f6a8a38937688ea2cc5dc13d3d1039493be5c9b5/kafka/coordinator/consumer.py#L379-L387
def refresh_committed_offsets_if_needed(self): """Fetch committed offsets for assigned partitions.""" if self._subscription.needs_fetch_committed_offsets: offsets = self.fetch_committed_offsets(self._subscription.assigned_partitions()) for partition, offset in six.iteritems(offsets): # verify assignment is still active if self._subscription.is_assigned(partition): self._subscription.assignment[partition].committed = offset.offset self._subscription.needs_fetch_committed_offsets = False
[ "def", "refresh_committed_offsets_if_needed", "(", "self", ")", ":", "if", "self", ".", "_subscription", ".", "needs_fetch_committed_offsets", ":", "offsets", "=", "self", ".", "fetch_committed_offsets", "(", "self", ".", "_subscription", ".", "assigned_partitions", "...
Fetch committed offsets for assigned partitions.
[ "Fetch", "committed", "offsets", "for", "assigned", "partitions", "." ]
python
train
tcalmant/ipopo
pelix/shell/parser.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/shell/parser.py#L62-L79
def _find_assignment(arg_token): """ Find the first non-escaped assignment in the given argument token. Returns -1 if no assignment was found. :param arg_token: The argument token :return: The index of the first assignment, or -1 """ idx = arg_token.find("=") while idx != -1: if idx != 0 and arg_token[idx - 1] != "\\": # No escape character return idx idx = arg_token.find("=", idx + 1) # No assignment found return -1
[ "def", "_find_assignment", "(", "arg_token", ")", ":", "idx", "=", "arg_token", ".", "find", "(", "\"=\"", ")", "while", "idx", "!=", "-", "1", ":", "if", "idx", "!=", "0", "and", "arg_token", "[", "idx", "-", "1", "]", "!=", "\"\\\\\"", ":", "# No...
Find the first non-escaped assignment in the given argument token. Returns -1 if no assignment was found. :param arg_token: The argument token :return: The index of the first assignment, or -1
[ "Find", "the", "first", "non", "-", "escaped", "assignment", "in", "the", "given", "argument", "token", ".", "Returns", "-", "1", "if", "no", "assignment", "was", "found", "." ]
python
train
openai/retro
retro/examples/brute.py
https://github.com/openai/retro/blob/29dc84fef6d7076fd11a3847d2877fe59e705d36/retro/examples/brute.py#L144-L162
def update_tree(root, executed_acts, total_rew): """ Given the tree, a list of actions that were executed before the game ended, and a reward, update the tree so that the path formed by the executed actions are all updated to the new reward. """ root.value = max(total_rew, root.value) root.visits += 1 new_nodes = 0 node = root for step, act in enumerate(executed_acts): if act not in node.children: node.children[act] = Node() new_nodes += 1 node = node.children[act] node.value = max(total_rew, node.value) node.visits += 1 return new_nodes
[ "def", "update_tree", "(", "root", ",", "executed_acts", ",", "total_rew", ")", ":", "root", ".", "value", "=", "max", "(", "total_rew", ",", "root", ".", "value", ")", "root", ".", "visits", "+=", "1", "new_nodes", "=", "0", "node", "=", "root", "fo...
Given the tree, a list of actions that were executed before the game ended, and a reward, update the tree so that the path formed by the executed actions are all updated to the new reward.
[ "Given", "the", "tree", "a", "list", "of", "actions", "that", "were", "executed", "before", "the", "game", "ended", "and", "a", "reward", "update", "the", "tree", "so", "that", "the", "path", "formed", "by", "the", "executed", "actions", "are", "all", "u...
python
train
tensorflow/tensor2tensor
tensor2tensor/models/research/glow_ops.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/glow_ops.py#L141-L147
def get_cond_latents_at_level(cond_latents, level, hparams): """Returns a single or list of conditional latents at level 'level'.""" if cond_latents: if hparams.latent_dist_encoder in ["conv_net", "conv3d_net"]: return [cond_latent[level] for cond_latent in cond_latents] elif hparams.latent_dist_encoder in ["pointwise", "conv_lstm"]: return cond_latents[level]
[ "def", "get_cond_latents_at_level", "(", "cond_latents", ",", "level", ",", "hparams", ")", ":", "if", "cond_latents", ":", "if", "hparams", ".", "latent_dist_encoder", "in", "[", "\"conv_net\"", ",", "\"conv3d_net\"", "]", ":", "return", "[", "cond_latent", "["...
Returns a single or list of conditional latents at level 'level'.
[ "Returns", "a", "single", "or", "list", "of", "conditional", "latents", "at", "level", "level", "." ]
python
train
mila-iqia/fuel
fuel/converters/binarized_mnist.py
https://github.com/mila-iqia/fuel/blob/1d6292dc25e3a115544237e392e61bff6631d23c/fuel/converters/binarized_mnist.py#L17-L71
def convert_binarized_mnist(directory, output_directory, output_filename='binarized_mnist.hdf5'): """Converts the binarized MNIST dataset to HDF5. Converts the binarized MNIST dataset used in R. Salakhutdinov's DBN paper [DBN] to an HDF5 dataset compatible with :class:`fuel.datasets.BinarizedMNIST`. The converted dataset is saved as 'binarized_mnist.hdf5'. This method assumes the existence of the files `binarized_mnist_{train,valid,test}.amat`, which are accessible through Hugo Larochelle's website [HUGO]. .. [DBN] Ruslan Salakhutdinov and Iain Murray, *On the Quantitative Analysis of Deep Belief Networks*, Proceedings of the 25th international conference on Machine learning, 2008, pp. 872-879. Parameters ---------- directory : str Directory in which input files reside. output_directory : str Directory in which to save the converted dataset. output_filename : str, optional Name of the saved dataset. Defaults to 'binarized_mnist.hdf5'. Returns ------- output_paths : tuple of str Single-element tuple containing the path to the converted dataset. """ output_path = os.path.join(output_directory, output_filename) h5file = h5py.File(output_path, mode='w') train_set = numpy.loadtxt( os.path.join(directory, TRAIN_FILE)).reshape( (-1, 1, 28, 28)).astype('uint8') valid_set = numpy.loadtxt( os.path.join(directory, VALID_FILE)).reshape( (-1, 1, 28, 28)).astype('uint8') test_set = numpy.loadtxt( os.path.join(directory, TEST_FILE)).reshape( (-1, 1, 28, 28)).astype('uint8') data = (('train', 'features', train_set), ('valid', 'features', valid_set), ('test', 'features', test_set)) fill_hdf5_file(h5file, data) for i, label in enumerate(('batch', 'channel', 'height', 'width')): h5file['features'].dims[i].label = label h5file.flush() h5file.close() return (output_path,)
[ "def", "convert_binarized_mnist", "(", "directory", ",", "output_directory", ",", "output_filename", "=", "'binarized_mnist.hdf5'", ")", ":", "output_path", "=", "os", ".", "path", ".", "join", "(", "output_directory", ",", "output_filename", ")", "h5file", "=", "...
Converts the binarized MNIST dataset to HDF5. Converts the binarized MNIST dataset used in R. Salakhutdinov's DBN paper [DBN] to an HDF5 dataset compatible with :class:`fuel.datasets.BinarizedMNIST`. The converted dataset is saved as 'binarized_mnist.hdf5'. This method assumes the existence of the files `binarized_mnist_{train,valid,test}.amat`, which are accessible through Hugo Larochelle's website [HUGO]. .. [DBN] Ruslan Salakhutdinov and Iain Murray, *On the Quantitative Analysis of Deep Belief Networks*, Proceedings of the 25th international conference on Machine learning, 2008, pp. 872-879. Parameters ---------- directory : str Directory in which input files reside. output_directory : str Directory in which to save the converted dataset. output_filename : str, optional Name of the saved dataset. Defaults to 'binarized_mnist.hdf5'. Returns ------- output_paths : tuple of str Single-element tuple containing the path to the converted dataset.
[ "Converts", "the", "binarized", "MNIST", "dataset", "to", "HDF5", "." ]
python
train
inveniosoftware/invenio-github
invenio_github/utils.py
https://github.com/inveniosoftware/invenio-github/blob/ec42fd6a06079310dcbe2c46d9fd79d5197bbe26/invenio_github/utils.py#L44-L49
def parse_timestamp(x): """Parse ISO8601 formatted timestamp.""" dt = dateutil.parser.parse(x) if dt.tzinfo is None: dt = dt.replace(tzinfo=pytz.utc) return dt
[ "def", "parse_timestamp", "(", "x", ")", ":", "dt", "=", "dateutil", ".", "parser", ".", "parse", "(", "x", ")", "if", "dt", ".", "tzinfo", "is", "None", ":", "dt", "=", "dt", ".", "replace", "(", "tzinfo", "=", "pytz", ".", "utc", ")", "return",...
Parse ISO8601 formatted timestamp.
[ "Parse", "ISO8601", "formatted", "timestamp", "." ]
python
train
google/grr
grr/server/grr_response_server/databases/mysql_flows.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/databases/mysql_flows.py#L1437-L1463
def WriteFlowOutputPluginLogEntries(self, entries, cursor=None): """Writes flow output plugin log entries for a given flow.""" query = ("INSERT INTO flow_output_plugin_log_entries " "(client_id, flow_id, hunt_id, output_plugin_id, " "log_entry_type, message) " "VALUES ") templates = [] args = [] for entry in entries: templates.append("(%s, %s, %s, %s, %s, %s)") args.append(db_utils.ClientIDToInt(entry.client_id)) args.append(db_utils.FlowIDToInt(entry.flow_id)) if entry.hunt_id: args.append(db_utils.HuntIDToInt(entry.hunt_id)) else: args.append(0) args.append(db_utils.OutputPluginIDToInt(entry.output_plugin_id)) args.append(int(entry.log_entry_type)) args.append(entry.message) query += ",".join(templates) try: cursor.execute(query, args) except MySQLdb.IntegrityError as e: raise db.AtLeastOneUnknownFlowError( [(entry.client_id, entry.flow_id) for entry in entries], cause=e)
[ "def", "WriteFlowOutputPluginLogEntries", "(", "self", ",", "entries", ",", "cursor", "=", "None", ")", ":", "query", "=", "(", "\"INSERT INTO flow_output_plugin_log_entries \"", "\"(client_id, flow_id, hunt_id, output_plugin_id, \"", "\"log_entry_type, message) \"", "\"VALUES \"...
Writes flow output plugin log entries for a given flow.
[ "Writes", "flow", "output", "plugin", "log", "entries", "for", "a", "given", "flow", "." ]
python
train
UCL-INGI/INGInious
inginious/frontend/parsable_text.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/parsable_text.py#L168-L171
def html(cls, string, show_everything=False, translation=gettext.NullTranslations()): # pylint: disable=unused-argument """Parses HTML""" out, _ = tidylib.tidy_fragment(string) return out
[ "def", "html", "(", "cls", ",", "string", ",", "show_everything", "=", "False", ",", "translation", "=", "gettext", ".", "NullTranslations", "(", ")", ")", ":", "# pylint: disable=unused-argument", "out", ",", "_", "=", "tidylib", ".", "tidy_fragment", "(", ...
Parses HTML
[ "Parses", "HTML" ]
python
train
kata198/AdvancedHTMLParser
AdvancedHTMLParser/Tags.py
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Tags.py#L1272-L1286
def getPeers(self): ''' getPeers - Get elements who share a parent with this element @return - TagCollection of elements ''' parentNode = self.parentNode # If no parent, no peers if not parentNode: return None peers = parentNode.children # Otherwise, get all children of parent excluding this node return TagCollection([peer for peer in peers if peer is not self])
[ "def", "getPeers", "(", "self", ")", ":", "parentNode", "=", "self", ".", "parentNode", "# If no parent, no peers", "if", "not", "parentNode", ":", "return", "None", "peers", "=", "parentNode", ".", "children", "# Otherwise, get all children of parent excluding this nod...
getPeers - Get elements who share a parent with this element @return - TagCollection of elements
[ "getPeers", "-", "Get", "elements", "who", "share", "a", "parent", "with", "this", "element" ]
python
train
nion-software/nionswift
nion/swift/model/NDataHandler.py
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/NDataHandler.py#L444-L467
def write_properties(self, properties, file_datetime): """ Write properties to the ndata file specified by reference. :param reference: the reference to which to write :param properties: the dict to write to the file :param file_datetime: the datetime for the file The properties param must not change during this method. Callers should take care to ensure this does not happen. """ with self.__lock: absolute_file_path = self.__file_path #logging.debug("WRITE properties %s for %s", absolute_file_path, key) make_directory_if_needed(os.path.dirname(absolute_file_path)) exists = os.path.exists(absolute_file_path) if exists: rewrite_zip(absolute_file_path, Utility.clean_dict(properties)) else: write_zip(absolute_file_path, None, Utility.clean_dict(properties)) # convert to utc time. tz_minutes = Utility.local_utcoffset_minutes(file_datetime) timestamp = calendar.timegm(file_datetime.timetuple()) - tz_minutes * 60 os.utime(absolute_file_path, (time.time(), timestamp))
[ "def", "write_properties", "(", "self", ",", "properties", ",", "file_datetime", ")", ":", "with", "self", ".", "__lock", ":", "absolute_file_path", "=", "self", ".", "__file_path", "#logging.debug(\"WRITE properties %s for %s\", absolute_file_path, key)", "make_directory_i...
Write properties to the ndata file specified by reference. :param reference: the reference to which to write :param properties: the dict to write to the file :param file_datetime: the datetime for the file The properties param must not change during this method. Callers should take care to ensure this does not happen.
[ "Write", "properties", "to", "the", "ndata", "file", "specified", "by", "reference", "." ]
python
train
jkenlooper/chill
src/chill/api.py
https://github.com/jkenlooper/chill/blob/35360c17c2a3b769ecb5406c6dabcf4cc70bd76f/src/chill/api.py#L53-L97
def _query(_node_id, value=None, **kw): "Look up value by using Query table" query_result = [] try: query_result = db.execute(text(fetch_query_string('select_query_from_node.sql')), **kw).fetchall() except DatabaseError as err: current_app.logger.error("DatabaseError: %s, %s", err, kw) return value #current_app.logger.debug("queries kw: %s", kw) #current_app.logger.debug("queries value: %s", value) current_app.logger.debug("queries: %s", query_result) if query_result: values = [] for query_name in [x['name'] for x in query_result]: if query_name: result = [] try: current_app.logger.debug("query_name: %s", query_name) #current_app.logger.debug("kw: %s", kw) # Query string can be insert or select here #statement = text(fetch_query_string(query_name)) #params = [x.key for x in statement.params().get_children()] #skw = {key: kw[key] for key in params} #result = db.execute(statement, **skw) result = db.execute(text(fetch_query_string(query_name)), **kw) current_app.logger.debug("result query: %s", result.keys()) except (DatabaseError, StatementError) as err: current_app.logger.error("DatabaseError (%s) %s: %s", query_name, kw, err) if result and result.returns_rows: result = result.fetchall() #values.append(([[dict(zip(result.keys(), x)) for x in result]], result.keys())) #values.append((result.fetchall(), result.keys())) #current_app.logger.debug("fetchall: %s", values) if len(result) == 0: values.append(([], [])) else: current_app.logger.debug("result: %s", result) # There may be more results, but only interested in the # first one. Use the older rowify method for now. # TODO: use case for rowify? values.append(rowify(result, [(x, None) for x in result[0].keys()])) #current_app.logger.debug("fetchone: %s", values) value = values #current_app.logger.debug("value: %s", value) return value
[ "def", "_query", "(", "_node_id", ",", "value", "=", "None", ",", "*", "*", "kw", ")", ":", "query_result", "=", "[", "]", "try", ":", "query_result", "=", "db", ".", "execute", "(", "text", "(", "fetch_query_string", "(", "'select_query_from_node.sql'", ...
Look up value by using Query table
[ "Look", "up", "value", "by", "using", "Query", "table" ]
python
train
TheHive-Project/Cortex-Analyzers
analyzers/MaxMind/ipaddr.py
https://github.com/TheHive-Project/Cortex-Analyzers/blob/8dae6a8c4cf9af5554ae8c844985c4b44d4bd4bf/analyzers/MaxMind/ipaddr.py#L648-L651
def overlaps(self, other): """Tell if self is partly contained in other.""" return self.network in other or self.broadcast in other or ( other.network in self or other.broadcast in self)
[ "def", "overlaps", "(", "self", ",", "other", ")", ":", "return", "self", ".", "network", "in", "other", "or", "self", ".", "broadcast", "in", "other", "or", "(", "other", ".", "network", "in", "self", "or", "other", ".", "broadcast", "in", "self", "...
Tell if self is partly contained in other.
[ "Tell", "if", "self", "is", "partly", "contained", "in", "other", "." ]
python
train
phoebe-project/phoebe2
phoebe/parameters/parameters.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/parameters/parameters.py#L3882-L3894
def expand_value(self, **kwargs): """ expand the selection to account for wildcards """ selection = [] for v in self.get_value(**kwargs): for choice in self.choices: if v==choice and choice not in selection: selection.append(choice) elif fnmatch(choice, v) and choice not in selection: selection.append(choice) return selection
[ "def", "expand_value", "(", "self", ",", "*", "*", "kwargs", ")", ":", "selection", "=", "[", "]", "for", "v", "in", "self", ".", "get_value", "(", "*", "*", "kwargs", ")", ":", "for", "choice", "in", "self", ".", "choices", ":", "if", "v", "==",...
expand the selection to account for wildcards
[ "expand", "the", "selection", "to", "account", "for", "wildcards" ]
python
train
PMEAL/OpenPNM
openpnm/topotools/topotools.py
https://github.com/PMEAL/OpenPNM/blob/0547b5724ffedc0a593aae48639d36fe10e0baed/openpnm/topotools/topotools.py#L1947-L2032
def plot_coordinates(network, pores=None, fig=None, **kwargs): r""" Produces a 3D plot showing specified pore coordinates as markers Parameters ---------- network : OpenPNM Network Object The network whose topological connections to plot pores : array_like (optional) The list of pores to plot if only a sub-sample is desired. This is useful for inspecting a small region of the network. If no pores are specified then all are shown. fig : Matplotlib figure handle If a ``fig`` is supplied, then the coordinates will be overlaid. This enables the plotting of multiple different sets of pores as well as throat connections from ``plot_connections``. kwargs : dict By also in different marker properties such as size (``s``) and color (``c``). For information on available marker style options, visit the Matplotlib documentation on the `web <http://matplotlib.org/api/lines_api.html#matplotlib.lines.Line2D>`_ Notes ----- The figure handle returned by this method can be passed into ``plot_topology`` to create a plot that combines pore coordinates and throat connections, and vice versa. See Also -------- plot_connections Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[10, 10, 3]) >>> pn.add_boundary_pores() >>> Ps = pn.pores('internal') >>> # Create figure showing internal pores >>> fig = op.topotools.plot_coordinates(network=pn, pores=Ps, c='b') >>> Ps = pn.pores('*boundary') >>> # Pass existing fig back into function to plot boundary pores >>> fig = op.topotools.plot_coordinates(network=pn, pores=Ps, fig=fig, ... c='r') """ import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D if pores is None: Ps = network.Ps else: Ps = network._parse_indices(indices=pores) if len(sp.unique(network['pore.coords'][:, 2])) == 1: ThreeD = False else: ThreeD = True if fig is None: fig = plt.figure() if ThreeD: ax = fig.add_subplot(111, projection='3d') else: ax = fig.add_subplot(111) else: ax = fig.gca() # Collect specified coordinates X = network['pore.coords'][Ps, 0] Y = network['pore.coords'][Ps, 1] Z = network['pore.coords'][Ps, 2] if ThreeD: _scale_3d_axes(ax=ax, X=X, Y=Y, Z=Z) if ThreeD: ax.scatter(xs=X, ys=Y, zs=Z, **kwargs) else: ax.scatter(X, Y, **kwargs) return fig
[ "def", "plot_coordinates", "(", "network", ",", "pores", "=", "None", ",", "fig", "=", "None", ",", "*", "*", "kwargs", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "from", "mpl_toolkits", ".", "mplot3d", "import", "Axes3D", "if", "pores...
r""" Produces a 3D plot showing specified pore coordinates as markers Parameters ---------- network : OpenPNM Network Object The network whose topological connections to plot pores : array_like (optional) The list of pores to plot if only a sub-sample is desired. This is useful for inspecting a small region of the network. If no pores are specified then all are shown. fig : Matplotlib figure handle If a ``fig`` is supplied, then the coordinates will be overlaid. This enables the plotting of multiple different sets of pores as well as throat connections from ``plot_connections``. kwargs : dict By also in different marker properties such as size (``s``) and color (``c``). For information on available marker style options, visit the Matplotlib documentation on the `web <http://matplotlib.org/api/lines_api.html#matplotlib.lines.Line2D>`_ Notes ----- The figure handle returned by this method can be passed into ``plot_topology`` to create a plot that combines pore coordinates and throat connections, and vice versa. See Also -------- plot_connections Examples -------- >>> import openpnm as op >>> pn = op.network.Cubic(shape=[10, 10, 3]) >>> pn.add_boundary_pores() >>> Ps = pn.pores('internal') >>> # Create figure showing internal pores >>> fig = op.topotools.plot_coordinates(network=pn, pores=Ps, c='b') >>> Ps = pn.pores('*boundary') >>> # Pass existing fig back into function to plot boundary pores >>> fig = op.topotools.plot_coordinates(network=pn, pores=Ps, fig=fig, ... c='r')
[ "r", "Produces", "a", "3D", "plot", "showing", "specified", "pore", "coordinates", "as", "markers" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py#L656-L674
def ip_rtm_config_route_static_route_oif_vrf_static_route_next_vrf_dest(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") ip = ET.SubElement(config, "ip", xmlns="urn:brocade.com:mgmt:brocade-common-def") rtm_config = ET.SubElement(ip, "rtm-config", xmlns="urn:brocade.com:mgmt:brocade-rtm") route = ET.SubElement(rtm_config, "route") static_route_oif_vrf = ET.SubElement(route, "static-route-oif-vrf") next_hop_vrf_key = ET.SubElement(static_route_oif_vrf, "next-hop-vrf") next_hop_vrf_key.text = kwargs.pop('next_hop_vrf') static_route_oif_type_key = ET.SubElement(static_route_oif_vrf, "static-route-oif-type") static_route_oif_type_key.text = kwargs.pop('static_route_oif_type') static_route_oif_name_key = ET.SubElement(static_route_oif_vrf, "static-route-oif-name") static_route_oif_name_key.text = kwargs.pop('static_route_oif_name') static_route_next_vrf_dest = ET.SubElement(static_route_oif_vrf, "static-route-next-vrf-dest") static_route_next_vrf_dest.text = kwargs.pop('static_route_next_vrf_dest') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "ip_rtm_config_route_static_route_oif_vrf_static_route_next_vrf_dest", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "ip", "=", "ET", ".", "SubElement", "(", "config", ",", "\"ip\"", ",", "x...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
citruz/beacontools
beacontools/parser.py
https://github.com/citruz/beacontools/blob/15a83e9750d0a4393f8a36868e07f6d9458253fe/beacontools/parser.py#L14-L19
def parse_packet(packet): """Parse a beacon advertisement packet.""" frame = parse_ltv_packet(packet) if frame is None: frame = parse_ibeacon_packet(packet) return frame
[ "def", "parse_packet", "(", "packet", ")", ":", "frame", "=", "parse_ltv_packet", "(", "packet", ")", "if", "frame", "is", "None", ":", "frame", "=", "parse_ibeacon_packet", "(", "packet", ")", "return", "frame" ]
Parse a beacon advertisement packet.
[ "Parse", "a", "beacon", "advertisement", "packet", "." ]
python
train
barryp/py-amqplib
amqplib/client_0_8/channel.py
https://github.com/barryp/py-amqplib/blob/2b3a47de34b4712c111d0a55d7ff109dffc2a7b2/amqplib/client_0_8/channel.py#L2063-L2120
def basic_get(self, queue='', no_ack=False, ticket=None): """ direct access to a queue This method provides a direct access to the messages in a queue using a synchronous dialogue that is designed for specific types of application where synchronous functionality is more important than performance. PARAMETERS: queue: shortstr Specifies the name of the queue to consume from. If the queue name is null, refers to the current queue for the channel, which is the last declared queue. RULE: If the client did not previously declare a queue, and the queue name in this method is empty, the server MUST raise a connection exception with reply code 530 (not allowed). no_ack: boolean no acknowledgement needed If this field is set the server does not expect acknowledgments for messages. That is, when a message is delivered to the client the server automatically and silently acknowledges it on behalf of the client. This functionality increases performance but at the cost of reliability. Messages can get lost if a client dies before it can deliver them to the application. ticket: short RULE: The client MUST provide a valid access ticket giving "read" access rights to the realm for the queue. Non-blocking, returns a message object, or None. """ args = AMQPWriter() if ticket is not None: args.write_short(ticket) else: args.write_short(self.default_ticket) args.write_shortstr(queue) args.write_bit(no_ack) self._send_method((60, 70), args) return self.wait(allowed_methods=[ (60, 71), # Channel.basic_get_ok (60, 72), # Channel.basic_get_empty ])
[ "def", "basic_get", "(", "self", ",", "queue", "=", "''", ",", "no_ack", "=", "False", ",", "ticket", "=", "None", ")", ":", "args", "=", "AMQPWriter", "(", ")", "if", "ticket", "is", "not", "None", ":", "args", ".", "write_short", "(", "ticket", "...
direct access to a queue This method provides a direct access to the messages in a queue using a synchronous dialogue that is designed for specific types of application where synchronous functionality is more important than performance. PARAMETERS: queue: shortstr Specifies the name of the queue to consume from. If the queue name is null, refers to the current queue for the channel, which is the last declared queue. RULE: If the client did not previously declare a queue, and the queue name in this method is empty, the server MUST raise a connection exception with reply code 530 (not allowed). no_ack: boolean no acknowledgement needed If this field is set the server does not expect acknowledgments for messages. That is, when a message is delivered to the client the server automatically and silently acknowledges it on behalf of the client. This functionality increases performance but at the cost of reliability. Messages can get lost if a client dies before it can deliver them to the application. ticket: short RULE: The client MUST provide a valid access ticket giving "read" access rights to the realm for the queue. Non-blocking, returns a message object, or None.
[ "direct", "access", "to", "a", "queue" ]
python
train
lsst-epo/vela
astropixie-widgets/astropixie_widgets/science.py
https://github.com/lsst-epo/vela/blob/8e17ebec509be5c3cc2063f4645dfe9e26b49c18/astropixie-widgets/astropixie_widgets/science.py#L24-L41
def teff(cluster): """ Calculate Teff for main sequence stars ranging from Teff 3500K - 8000K. Use [Fe/H] of the cluster, if available. Returns a list of Teff values. """ b_vs, _ = cluster.stars() teffs = [] for b_v in b_vs: b_v -= cluster.eb_v if b_v > -0.04: x = (14.551 - b_v) / 3.684 else: x = (3.402 - math.sqrt(0.515 + 1.376 * b_v)) / 0.688 teffs.append(math.pow(10, x)) return teffs
[ "def", "teff", "(", "cluster", ")", ":", "b_vs", ",", "_", "=", "cluster", ".", "stars", "(", ")", "teffs", "=", "[", "]", "for", "b_v", "in", "b_vs", ":", "b_v", "-=", "cluster", ".", "eb_v", "if", "b_v", ">", "-", "0.04", ":", "x", "=", "("...
Calculate Teff for main sequence stars ranging from Teff 3500K - 8000K. Use [Fe/H] of the cluster, if available. Returns a list of Teff values.
[ "Calculate", "Teff", "for", "main", "sequence", "stars", "ranging", "from", "Teff", "3500K", "-", "8000K", ".", "Use", "[", "Fe", "/", "H", "]", "of", "the", "cluster", "if", "available", "." ]
python
valid
DataONEorg/d1_python
client_onedrive/src/d1_onedrive/impl/drivers/dokan/solrclient.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_onedrive/src/d1_onedrive/impl/drivers/dokan/solrclient.py#L370-L377
def addDocs(self, docs): """docs is a list of fields that are a dictionary of name:value for a record.""" lst = ['<add>'] for fields in docs: self.__add(lst, fields) lst.append('</add>') xstr = ''.join(lst) return self.doUpdateXML(xstr)
[ "def", "addDocs", "(", "self", ",", "docs", ")", ":", "lst", "=", "[", "'<add>'", "]", "for", "fields", "in", "docs", ":", "self", ".", "__add", "(", "lst", ",", "fields", ")", "lst", ".", "append", "(", "'</add>'", ")", "xstr", "=", "''", ".", ...
docs is a list of fields that are a dictionary of name:value for a record.
[ "docs", "is", "a", "list", "of", "fields", "that", "are", "a", "dictionary", "of", "name", ":", "value", "for", "a", "record", "." ]
python
train
pysathq/pysat
pysat/solvers.py
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/pysat/solvers.py#L2595-L2607
def add_clause(self, clause, no_return=True): """ Add a new clause to solver's internal formula. """ if self.maplesat: res = pysolvers.maplesat_add_cl(self.maplesat, clause) if res == False: self.status = False if not no_return: return res
[ "def", "add_clause", "(", "self", ",", "clause", ",", "no_return", "=", "True", ")", ":", "if", "self", ".", "maplesat", ":", "res", "=", "pysolvers", ".", "maplesat_add_cl", "(", "self", ".", "maplesat", ",", "clause", ")", "if", "res", "==", "False",...
Add a new clause to solver's internal formula.
[ "Add", "a", "new", "clause", "to", "solver", "s", "internal", "formula", "." ]
python
train
archman/beamline
beamline/simulation.py
https://github.com/archman/beamline/blob/417bc5dc13e754bc89d246427984590fced64d07/beamline/simulation.py#L92-L111
def _getOutputElegant(self, **kws): """ get results from elegant output according to the given keywords, input parameter format: key = sdds field name tuple, e.g.: available keywords are: - 'file': sdds fielname, file = test.sig - 'data': data array, data = ('s','Sx') - 'dump': h5file name, if defined, dump data to hdf5 format """ datascript = "sddsprintdata.sh" datapath = self.sim_path trajparam_list = kws['data'] sddsfile = os.path.expanduser(os.path.join(self.sim_path, kws['file'])) dh = datautils.DataExtracter(sddsfile, *trajparam_list) dh.setDataScript(datascript) dh.setDataPath(datapath) if 'dump' in kws: dh.setH5file(kws['dump']) dh.extractData().dump() data = dh.extractData().getH5Data() return data
[ "def", "_getOutputElegant", "(", "self", ",", "*", "*", "kws", ")", ":", "datascript", "=", "\"sddsprintdata.sh\"", "datapath", "=", "self", ".", "sim_path", "trajparam_list", "=", "kws", "[", "'data'", "]", "sddsfile", "=", "os", ".", "path", ".", "expand...
get results from elegant output according to the given keywords, input parameter format: key = sdds field name tuple, e.g.: available keywords are: - 'file': sdds fielname, file = test.sig - 'data': data array, data = ('s','Sx') - 'dump': h5file name, if defined, dump data to hdf5 format
[ "get", "results", "from", "elegant", "output", "according", "to", "the", "given", "keywords", "input", "parameter", "format", ":", "key", "=", "sdds", "field", "name", "tuple", "e", ".", "g", ".", ":", "available", "keywords", "are", ":", "-", "file", ":...
python
train
konstantint/matplotlib-venn
matplotlib_venn/_arc.py
https://github.com/konstantint/matplotlib-venn/blob/c26796c9925bdac512edf48387452fbd1848c791/matplotlib_venn/_arc.py#L474-L483
def lies_on_circle(self, center, radius): '''Tests whether the arc circle's center and radius match the given ones within <tol> tolerance. >>> a = Arc((0, 0), 1, 0, 0, False) >>> a.lies_on_circle((tol/2, tol/2), 1+tol/2) True >>> a.lies_on_circle((tol/2, tol/2), 1-tol) False ''' return np.all(abs(np.asarray(center) - self.center) < tol) and abs(radius - self.radius) < tol
[ "def", "lies_on_circle", "(", "self", ",", "center", ",", "radius", ")", ":", "return", "np", ".", "all", "(", "abs", "(", "np", ".", "asarray", "(", "center", ")", "-", "self", ".", "center", ")", "<", "tol", ")", "and", "abs", "(", "radius", "-...
Tests whether the arc circle's center and radius match the given ones within <tol> tolerance. >>> a = Arc((0, 0), 1, 0, 0, False) >>> a.lies_on_circle((tol/2, tol/2), 1+tol/2) True >>> a.lies_on_circle((tol/2, tol/2), 1-tol) False
[ "Tests", "whether", "the", "arc", "circle", "s", "center", "and", "radius", "match", "the", "given", "ones", "within", "<tol", ">", "tolerance", ".", ">>>", "a", "=", "Arc", "((", "0", "0", ")", "1", "0", "0", "False", ")", ">>>", "a", ".", "lies_o...
python
train
ejeschke/ginga
ginga/util/heaptimer.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/util/heaptimer.py#L201-L207
def _remove(self, timer): """Remove timer from heap lock and presence are assumed""" assert timer.timer_heap == self del self.timers[timer] assert timer in self.heap self.heap.remove(timer) heapq.heapify(self.heap)
[ "def", "_remove", "(", "self", ",", "timer", ")", ":", "assert", "timer", ".", "timer_heap", "==", "self", "del", "self", ".", "timers", "[", "timer", "]", "assert", "timer", "in", "self", ".", "heap", "self", ".", "heap", ".", "remove", "(", "timer"...
Remove timer from heap lock and presence are assumed
[ "Remove", "timer", "from", "heap", "lock", "and", "presence", "are", "assumed" ]
python
train
datosgobar/pydatajson
pydatajson/documentation.py
https://github.com/datosgobar/pydatajson/blob/3141082ffbaa295e2deaf6ffbbc5a59f5859960e/pydatajson/documentation.py#L85-L107
def field_to_markdown(field): """Genera texto en markdown a partir de los metadatos de un `field`. Args: field (dict): Diccionario con metadatos de un `field`. Returns: str: Texto que describe un `field`. """ if "title" in field: field_title = "**{}**".format(field["title"]) else: raise Exception("Es necesario un `title` para describir un campo.") field_type = " ({})".format(field["type"]) if "type" in field else "" field_desc = ": {}".format( field["description"]) if "description" in field else "" text_template = "{title}{type}{description}" text = text_template.format(title=field_title, type=field_type, description=field_desc) return text
[ "def", "field_to_markdown", "(", "field", ")", ":", "if", "\"title\"", "in", "field", ":", "field_title", "=", "\"**{}**\"", ".", "format", "(", "field", "[", "\"title\"", "]", ")", "else", ":", "raise", "Exception", "(", "\"Es necesario un `title` para describi...
Genera texto en markdown a partir de los metadatos de un `field`. Args: field (dict): Diccionario con metadatos de un `field`. Returns: str: Texto que describe un `field`.
[ "Genera", "texto", "en", "markdown", "a", "partir", "de", "los", "metadatos", "de", "un", "field", "." ]
python
train
SheffieldML/GPy
GPy/core/parameterization/priorizable.py
https://github.com/SheffieldML/GPy/blob/54c32d79d289d622fb18b898aee65a2a431d90cf/GPy/core/parameterization/priorizable.py#L49-L65
def log_prior(self): """evaluate the prior""" if self.priors.size == 0: return 0. x = self.param_array #evaluate the prior log densities log_p = reduce(lambda a, b: a + b, (p.lnpdf(x[ind]).sum() for p, ind in self.priors.items()), 0) #account for the transformation by evaluating the log Jacobian (where things are transformed) log_j = 0. priored_indexes = np.hstack([i for p, i in self.priors.items()]) for c,j in self.constraints.items(): if not isinstance(c, Transformation):continue for jj in j: if jj in priored_indexes: log_j += c.log_jacobian(x[jj]) return log_p + log_j
[ "def", "log_prior", "(", "self", ")", ":", "if", "self", ".", "priors", ".", "size", "==", "0", ":", "return", "0.", "x", "=", "self", ".", "param_array", "#evaluate the prior log densities", "log_p", "=", "reduce", "(", "lambda", "a", ",", "b", ":", "...
evaluate the prior
[ "evaluate", "the", "prior" ]
python
train
wuher/devil
devil/resource.py
https://github.com/wuher/devil/blob/a8834d4f88d915a21754c6b96f99d0ad9123ad4d/devil/resource.py#L316-L334
def _create_object(self, data, request): """ Create a python object from the given data. This will use ``self.factory`` object's ``create()`` function to create the data. If no factory is defined, this will simply return the same data that was given. """ if request.method.upper() == 'POST' and self.post_factory: fac_func = self.post_factory.create else: fac_func = self.factory.create if isinstance(data, (list, tuple)): return map(fac_func, data) else: return fac_func(data)
[ "def", "_create_object", "(", "self", ",", "data", ",", "request", ")", ":", "if", "request", ".", "method", ".", "upper", "(", ")", "==", "'POST'", "and", "self", ".", "post_factory", ":", "fac_func", "=", "self", ".", "post_factory", ".", "create", "...
Create a python object from the given data. This will use ``self.factory`` object's ``create()`` function to create the data. If no factory is defined, this will simply return the same data that was given.
[ "Create", "a", "python", "object", "from", "the", "given", "data", "." ]
python
train
msiemens/PyGitUp
PyGitUp/gitup.py
https://github.com/msiemens/PyGitUp/blob/b1f78831cb6b8d29d3a7d59f7a2b54fdd0720e9c/PyGitUp/gitup.py#L628-L656
def run(version, quiet, no_fetch, push, **kwargs): # pragma: no cover """ A nicer `git pull`. """ if version: if NO_DISTRIBUTE: print(colored('Please install \'git-up\' via pip in order to ' 'get version information.', 'yellow')) else: GitUp(sparse=True).version_info() return if quiet: sys.stdout = StringIO() try: gitup = GitUp() if push is not None: gitup.settings['push.auto'] = push # if arguments['--no-fetch'] or arguments['--no-f']: if no_fetch: gitup.should_fetch = False except GitError: sys.exit(1) # Error in constructor else: gitup.run()
[ "def", "run", "(", "version", ",", "quiet", ",", "no_fetch", ",", "push", ",", "*", "*", "kwargs", ")", ":", "# pragma: no cover\r", "if", "version", ":", "if", "NO_DISTRIBUTE", ":", "print", "(", "colored", "(", "'Please install \\'git-up\\' via pip in order to...
A nicer `git pull`.
[ "A", "nicer", "git", "pull", "." ]
python
train
sdispater/orator
orator/orm/relations/has_one_or_many.py
https://github.com/sdispater/orator/blob/bd90bf198ee897751848f9a92e49d18e60a74136/orator/orm/relations/has_one_or_many.py#L78-L113
def _match_one_or_many(self, models, results, relation, type_): """ Match the eargerly loaded resuls to their single parents. :param models: The parents :type models: list :param results: The results collection :type results: Collection :param relation: The relation :type relation: str :param type_: The match type :type type_: str :rtype: list """ dictionary = self._build_dictionary(results) for model in models: key = model.get_attribute(self._local_key) if key in dictionary: value = Result( self._get_relation_value(dictionary, key, type_), self, model ) else: if type_ == "one": value = Result(None, self, model) else: value = Result(self._related.new_collection(), self, model) model.set_relation(relation, value) return models
[ "def", "_match_one_or_many", "(", "self", ",", "models", ",", "results", ",", "relation", ",", "type_", ")", ":", "dictionary", "=", "self", ".", "_build_dictionary", "(", "results", ")", "for", "model", "in", "models", ":", "key", "=", "model", ".", "ge...
Match the eargerly loaded resuls to their single parents. :param models: The parents :type models: list :param results: The results collection :type results: Collection :param relation: The relation :type relation: str :param type_: The match type :type type_: str :rtype: list
[ "Match", "the", "eargerly", "loaded", "resuls", "to", "their", "single", "parents", "." ]
python
train
nimbis/cmsplugin-newsplus
cmsplugin_newsplus/admin.py
https://github.com/nimbis/cmsplugin-newsplus/blob/1787fb674faa7800845f18ce782154e290f6be27/cmsplugin_newsplus/admin.py#L49-L57
def make_unpublished(self, request, queryset): """ Marks selected news items as unpublished """ rows_updated = queryset.update(is_published=False) self.message_user(request, ungettext('%(count)d newsitem was unpublished', '%(count)d newsitems were unpublished', rows_updated) % {'count': rows_updated})
[ "def", "make_unpublished", "(", "self", ",", "request", ",", "queryset", ")", ":", "rows_updated", "=", "queryset", ".", "update", "(", "is_published", "=", "False", ")", "self", ".", "message_user", "(", "request", ",", "ungettext", "(", "'%(count)d newsitem ...
Marks selected news items as unpublished
[ "Marks", "selected", "news", "items", "as", "unpublished" ]
python
train
victorlei/smop
smop/parse.py
https://github.com/victorlei/smop/blob/bdad96b715d1dd75ce8ab4724f76b9b1bb1f61cd/smop/parse.py#L464-L466
def p_expr_string(p): "string : STRING" p[0] = node.string(p[1], lineno=p.lineno(1), lexpos=p.lexpos(1))
[ "def", "p_expr_string", "(", "p", ")", ":", "p", "[", "0", "]", "=", "node", ".", "string", "(", "p", "[", "1", "]", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ",", "lexpos", "=", "p", ".", "lexpos", "(", "1", ")", ")" ]
string : STRING
[ "string", ":", "STRING" ]
python
train
FNNDSC/pfmisc
pfmisc/C_snode.py
https://github.com/FNNDSC/pfmisc/blob/960b4d6135fcc50bed0a8e55db2ab1ddad9b99d8/pfmisc/C_snode.py#L381-L386
def node_branch(self, astr_node, abranch): """ Adds a branch to a node, i.e. depth addition. The given node's md_nodes is set to the abranch's mdict_branch. """ self.dict_branch[astr_node].node_dictBranch(abranch.dict_branch)
[ "def", "node_branch", "(", "self", ",", "astr_node", ",", "abranch", ")", ":", "self", ".", "dict_branch", "[", "astr_node", "]", ".", "node_dictBranch", "(", "abranch", ".", "dict_branch", ")" ]
Adds a branch to a node, i.e. depth addition. The given node's md_nodes is set to the abranch's mdict_branch.
[ "Adds", "a", "branch", "to", "a", "node", "i", ".", "e", ".", "depth", "addition", ".", "The", "given", "node", "s", "md_nodes", "is", "set", "to", "the", "abranch", "s", "mdict_branch", "." ]
python
train
the01/python-paps
paps/si/app/sensorClient.py
https://github.com/the01/python-paps/blob/2dde5a71913e4c7b22901cf05c6ecedd890919c4/paps/si/app/sensorClient.py#L244-L256
def stop(self): """ Stop the interface :rtype: None """ self.debug("()") try: self.unjoin() time.sleep(2) except: self.exception("Failed to leave audience") super(SensorClient, self).stop()
[ "def", "stop", "(", "self", ")", ":", "self", ".", "debug", "(", "\"()\"", ")", "try", ":", "self", ".", "unjoin", "(", ")", "time", ".", "sleep", "(", "2", ")", "except", ":", "self", ".", "exception", "(", "\"Failed to leave audience\"", ")", "supe...
Stop the interface :rtype: None
[ "Stop", "the", "interface" ]
python
train
lk-geimfari/mimesis
mimesis/providers/address.py
https://github.com/lk-geimfari/mimesis/blob/4b16ee7a8dba6281a904654a88dbb4b052869fc5/mimesis/providers/address.py#L205-L219
def _get_fs(self, key: str, dms: bool = False) -> Union[str, float]: """Get float number. :param key: Key (`lt` or `lg`). :param dms: DMS format. :return: Float number """ # Default range is a range of longitude. rng = (-90, 90) if key == 'lt' else (-180, 180) result = self.random.uniform(*rng, precision=6) if dms: return self._dd_to_dms(result, key) return result
[ "def", "_get_fs", "(", "self", ",", "key", ":", "str", ",", "dms", ":", "bool", "=", "False", ")", "->", "Union", "[", "str", ",", "float", "]", ":", "# Default range is a range of longitude.", "rng", "=", "(", "-", "90", ",", "90", ")", "if", "key",...
Get float number. :param key: Key (`lt` or `lg`). :param dms: DMS format. :return: Float number
[ "Get", "float", "number", "." ]
python
train
jaredLunde/redis_structures
redis_structures/__init__.py
https://github.com/jaredLunde/redis_structures/blob/b9cce5f5c85db5e12c292633ff8d04e3ae053294/redis_structures/__init__.py#L1706-L1715
def union(self, *others): """ Calculates union between sets @others: one or several :class:RedisSet objects or #str redis set keynames -> #set of new set members """ others = self._typesafe_others(others) return set(map( self._loads, self._client.sunion(self.key_prefix, *others)))
[ "def", "union", "(", "self", ",", "*", "others", ")", ":", "others", "=", "self", ".", "_typesafe_others", "(", "others", ")", "return", "set", "(", "map", "(", "self", ".", "_loads", ",", "self", ".", "_client", ".", "sunion", "(", "self", ".", "k...
Calculates union between sets @others: one or several :class:RedisSet objects or #str redis set keynames -> #set of new set members
[ "Calculates", "union", "between", "sets", "@others", ":", "one", "or", "several", ":", "class", ":", "RedisSet", "objects", "or", "#str", "redis", "set", "keynames" ]
python
train
kappius/pyheaderfile
pyheaderfile/excel.py
https://github.com/kappius/pyheaderfile/blob/8d587dadae538adcec527fd8e74ad89ed5e2006a/pyheaderfile/excel.py#L56-L65
def write_cell(self, x, y, value, style=None): """ writing style and value in the cell of x and y position """ if isinstance(style, str): style = self.xlwt.easyxf(style) if style: self._sheet.write(x, y, label=value, style=style) else: self._sheet.write(x, y, label=value)
[ "def", "write_cell", "(", "self", ",", "x", ",", "y", ",", "value", ",", "style", "=", "None", ")", ":", "if", "isinstance", "(", "style", ",", "str", ")", ":", "style", "=", "self", ".", "xlwt", ".", "easyxf", "(", "style", ")", "if", "style", ...
writing style and value in the cell of x and y position
[ "writing", "style", "and", "value", "in", "the", "cell", "of", "x", "and", "y", "position" ]
python
train
3DLIRIOUS/MeshLabXML
meshlabxml/transform.py
https://github.com/3DLIRIOUS/MeshLabXML/blob/177cce21e92baca500f56a932d66bd9a33257af8/meshlabxml/transform.py#L363-L413
def function(script, x_func='x', y_func='y', z_func='z'): """Geometric function using muparser lib to generate new Coordinates You can change x, y, z for every vertex according to the function specified. See help(mlx.muparser_ref) for muparser reference documentation. It's possible to use the following per-vertex variables in the expression: Variables (per vertex): x, y, z (coordinates) nx, ny, nz (normal) r, g, b, a (color) q (quality) rad (radius) vi (vertex index) vtu, vtv (texture coordinates) ti (texture index) vsel (is the vertex selected? 1 yes, 0 no) and all custom vertex attributes already defined by user. Args: x_func (str): function to generate new coordinates for x y_func (str): function to generate new coordinates for y z_func (str): function to generate new coordinates for z Layer stack: No impacts MeshLab versions: 1.3.4BETA """ filter_xml = ''.join([ ' <filter name="Geometric Function">\n', ' <Param name="x" ', 'value="{}" '.format(str(x_func).replace('&', '&amp;').replace('<', '&lt;')), 'description="func x = " ', 'type="RichString" ', '/>\n', ' <Param name="y" ', 'value="{}" '.format(str(y_func).replace('&', '&amp;').replace('<', '&lt;')), 'description="func y = " ', 'type="RichString" ', '/>\n', ' <Param name="z" ', 'value="{}" '.format(str(z_func).replace('&', '&amp;').replace('<', '&lt;')), 'description="func z = " ', 'type="RichString" ', '/>\n', ' </filter>\n']) util.write_filter(script, filter_xml) return None
[ "def", "function", "(", "script", ",", "x_func", "=", "'x'", ",", "y_func", "=", "'y'", ",", "z_func", "=", "'z'", ")", ":", "filter_xml", "=", "''", ".", "join", "(", "[", "' <filter name=\"Geometric Function\">\\n'", ",", "' <Param name=\"x\" '", ",", ...
Geometric function using muparser lib to generate new Coordinates You can change x, y, z for every vertex according to the function specified. See help(mlx.muparser_ref) for muparser reference documentation. It's possible to use the following per-vertex variables in the expression: Variables (per vertex): x, y, z (coordinates) nx, ny, nz (normal) r, g, b, a (color) q (quality) rad (radius) vi (vertex index) vtu, vtv (texture coordinates) ti (texture index) vsel (is the vertex selected? 1 yes, 0 no) and all custom vertex attributes already defined by user. Args: x_func (str): function to generate new coordinates for x y_func (str): function to generate new coordinates for y z_func (str): function to generate new coordinates for z Layer stack: No impacts MeshLab versions: 1.3.4BETA
[ "Geometric", "function", "using", "muparser", "lib", "to", "generate", "new", "Coordinates" ]
python
test
poppy-project/pypot
pypot/vrep/remoteApiBindings/vrep.py
https://github.com/poppy-project/pypot/blob/d9c6551bbc87d45d9d1f0bc15e35b616d0002afd/pypot/vrep/remoteApiBindings/vrep.py#L875-L888
def simxGetObjectSelection(clientID, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' objectCount = ct.c_int() objectHandles = ct.POINTER(ct.c_int)() ret = c_GetObjectSelection(clientID, ct.byref(objectHandles), ct.byref(objectCount), operationMode) newobj = [] if ret == 0: for i in range(objectCount.value): newobj.append(objectHandles[i]) return ret, newobj
[ "def", "simxGetObjectSelection", "(", "clientID", ",", "operationMode", ")", ":", "objectCount", "=", "ct", ".", "c_int", "(", ")", "objectHandles", "=", "ct", ".", "POINTER", "(", "ct", ".", "c_int", ")", "(", ")", "ret", "=", "c_GetObjectSelection", "(",...
Please have a look at the function description/documentation in the V-REP user manual
[ "Please", "have", "a", "look", "at", "the", "function", "description", "/", "documentation", "in", "the", "V", "-", "REP", "user", "manual" ]
python
train
JoelBender/bacpypes
py25/bacpypes/appservice.py
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py25/bacpypes/appservice.py#L760-L856
def confirmation(self, apdu): """This function is called when the application has provided a response and needs it to be sent to the client.""" if _debug: ServerSSM._debug("confirmation %r", apdu) # check to see we are in the correct state if self.state != AWAIT_RESPONSE: if _debug: ServerSSM._debug(" - warning: not expecting a response") # abort response if (apdu.apduType == AbortPDU.pduType): if _debug: ServerSSM._debug(" - abort") self.set_state(ABORTED) # send the response to the device self.response(apdu) return # simple response if (apdu.apduType == SimpleAckPDU.pduType) or (apdu.apduType == ErrorPDU.pduType) or (apdu.apduType == RejectPDU.pduType): if _debug: ServerSSM._debug(" - simple ack, error, or reject") # transaction completed self.set_state(COMPLETED) # send the response to the device self.response(apdu) return # complex ack if (apdu.apduType == ComplexAckPDU.pduType): if _debug: ServerSSM._debug(" - complex ack") # save the response and set the segmentation context self.set_segmentation_context(apdu) # the segment size is the minimum of the size of the largest packet # that can be delivered to the client and the largest it can accept if (not self.device_info) or (self.device_info.maxNpduLength is None): self.segmentSize = self.maxApduLengthAccepted else: self.segmentSize = min(self.device_info.maxNpduLength, self.maxApduLengthAccepted) if _debug: ServerSSM._debug(" - segment size: %r", self.segmentSize) # compute the segment count if not apdu.pduData: # always at least one segment self.segmentCount = 1 else: # split into chunks, maybe need one more self.segmentCount, more = divmod(len(apdu.pduData), self.segmentSize) if more: self.segmentCount += 1 if _debug: ServerSSM._debug(" - segment count: %r", self.segmentCount) # make sure we support segmented transmit if we need to if self.segmentCount > 1: if _debug: ServerSSM._debug(" - segmentation required, %d segments", self.segmentCount) # make sure we support segmented transmit if self.segmentationSupported not in ('segmentedTransmit', 'segmentedBoth'): if _debug: ServerSSM._debug(" - server can't send segmented responses") abort = self.abort(AbortReason.segmentationNotSupported) self.response(abort) return # make sure client supports segmented receive if not self.segmented_response_accepted: if _debug: ServerSSM._debug(" - client can't receive segmented responses") abort = self.abort(AbortReason.segmentationNotSupported) self.response(abort) return # make sure we dont exceed the number of segments in our response # that the client said it was willing to accept in the request if (self.maxSegmentsAccepted is not None) and (self.segmentCount > self.maxSegmentsAccepted): if _debug: ServerSSM._debug(" - client can't receive enough segments") abort = self.abort(AbortReason.apduTooLong) self.response(abort) return # initialize the state self.segmentRetryCount = 0 self.initialSequenceNumber = 0 self.actualWindowSize = None # send out the first segment (or the whole thing) if self.segmentCount == 1: self.response(apdu) self.set_state(COMPLETED) else: self.response(self.get_segment(0)) self.set_state(SEGMENTED_RESPONSE, self.segmentTimeout) else: raise RuntimeError("invalid APDU (4)")
[ "def", "confirmation", "(", "self", ",", "apdu", ")", ":", "if", "_debug", ":", "ServerSSM", ".", "_debug", "(", "\"confirmation %r\"", ",", "apdu", ")", "# check to see we are in the correct state", "if", "self", ".", "state", "!=", "AWAIT_RESPONSE", ":", "if",...
This function is called when the application has provided a response and needs it to be sent to the client.
[ "This", "function", "is", "called", "when", "the", "application", "has", "provided", "a", "response", "and", "needs", "it", "to", "be", "sent", "to", "the", "client", "." ]
python
train
amyth/django-instapush
instapush/models/base.py
https://github.com/amyth/django-instapush/blob/f8643a2e342fc73a16c95dff79c3daac8ce4b034/instapush/models/base.py#L63-L74
def send_message(self, message, **kwargs): """ Sends a push notification to this device via GCM """ from ..libs.gcm import gcm_send_message data = kwargs.pop("extra", {}) if message is not None: data["message"] = message return gcm_send_message(registration_id=self.registration_id, data=data, **kwargs)
[ "def", "send_message", "(", "self", ",", "message", ",", "*", "*", "kwargs", ")", ":", "from", ".", ".", "libs", ".", "gcm", "import", "gcm_send_message", "data", "=", "kwargs", ".", "pop", "(", "\"extra\"", ",", "{", "}", ")", "if", "message", "is",...
Sends a push notification to this device via GCM
[ "Sends", "a", "push", "notification", "to", "this", "device", "via", "GCM" ]
python
test
open511/open511
open511/converter/o5xml.py
https://github.com/open511/open511/blob/3d573f59d7efa06ff1b5419ea5ff4d90a90b3cf8/open511/converter/o5xml.py#L43-L91
def json_struct_to_xml(json_obj, root, custom_namespace=None): """Converts a Open511 JSON fragment to XML. Takes a dict deserialized from JSON, returns an lxml Element. This won't provide a conforming document if you pass in a full JSON document; it's for translating little fragments, and is mostly used internally.""" if isinstance(root, (str, unicode)): if root.startswith('!'): root = etree.Element('{%s}%s' % (NS_PROTECTED, root[1:])) elif root.startswith('+'): if not custom_namespace: raise Exception("JSON fields starts with +, but no custom namespace provided") root = etree.Element('{%s}%s' % (custom_namespace, root[1:])) else: root = etree.Element(root) if root.tag in ('attachments', 'grouped_events', 'media_files'): for link in json_obj: root.append(json_link_to_xml(link)) elif isinstance(json_obj, (str, unicode)): root.text = json_obj elif isinstance(json_obj, (int, float)): root.text = unicode(json_obj) elif isinstance(json_obj, dict): if frozenset(json_obj.keys()) == frozenset(('type', 'coordinates')): root.append(geojson_to_gml(json_obj)) else: for key, val in json_obj.items(): if key == 'url' or key.endswith('_url'): el = json_link_to_xml(val, json_link_key_to_xml_rel(key)) else: el = json_struct_to_xml(val, key, custom_namespace=custom_namespace) if el is not None: root.append(el) elif isinstance(json_obj, list): tag_name = root.tag if tag_name.endswith('ies'): tag_name = tag_name[:-3] + 'y' elif tag_name.endswith('s'): tag_name = tag_name[:-1] for val in json_obj: el = json_struct_to_xml(val, tag_name, custom_namespace=custom_namespace) if el is not None: root.append(el) elif json_obj is None: return None else: raise NotImplementedError return root
[ "def", "json_struct_to_xml", "(", "json_obj", ",", "root", ",", "custom_namespace", "=", "None", ")", ":", "if", "isinstance", "(", "root", ",", "(", "str", ",", "unicode", ")", ")", ":", "if", "root", ".", "startswith", "(", "'!'", ")", ":", "root", ...
Converts a Open511 JSON fragment to XML. Takes a dict deserialized from JSON, returns an lxml Element. This won't provide a conforming document if you pass in a full JSON document; it's for translating little fragments, and is mostly used internally.
[ "Converts", "a", "Open511", "JSON", "fragment", "to", "XML", "." ]
python
valid
csparpa/pyowm
pyowm/utils/timeformatutils.py
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/utils/timeformatutils.py#L50-L76
def to_date(timeobject): """ Returns the ``datetime.datetime`` object corresponding to the time value conveyed by the specified object, which can be either a UNIXtime, a ``datetime.datetime`` object or an ISO8601-formatted string in the format `YYYY-MM-DD HH:MM:SS+00``. :param timeobject: the object conveying the time value :type timeobject: int, ``datetime.datetime`` or ISO8601-formatted string :returns: a ``datetime.datetime`` object :raises: *TypeError* when bad argument types are provided, *ValueError* when negative UNIXtimes are provided """ if isinstance(timeobject, int): if timeobject < 0: raise ValueError("The time value is a negative number") return datetime.utcfromtimestamp(timeobject).replace(tzinfo=UTC()) elif isinstance(timeobject, datetime): return timeobject.replace(tzinfo=UTC()) elif isinstance(timeobject, str): return datetime.strptime(timeobject, '%Y-%m-%d %H:%M:%S+00').replace(tzinfo=UTC()) else: raise TypeError('The time value must be expressed either by an int ' \ 'UNIX time, a datetime.datetime object or an ' \ 'ISO8601-formatted string')
[ "def", "to_date", "(", "timeobject", ")", ":", "if", "isinstance", "(", "timeobject", ",", "int", ")", ":", "if", "timeobject", "<", "0", ":", "raise", "ValueError", "(", "\"The time value is a negative number\"", ")", "return", "datetime", ".", "utcfromtimestam...
Returns the ``datetime.datetime`` object corresponding to the time value conveyed by the specified object, which can be either a UNIXtime, a ``datetime.datetime`` object or an ISO8601-formatted string in the format `YYYY-MM-DD HH:MM:SS+00``. :param timeobject: the object conveying the time value :type timeobject: int, ``datetime.datetime`` or ISO8601-formatted string :returns: a ``datetime.datetime`` object :raises: *TypeError* when bad argument types are provided, *ValueError* when negative UNIXtimes are provided
[ "Returns", "the", "datetime", ".", "datetime", "object", "corresponding", "to", "the", "time", "value", "conveyed", "by", "the", "specified", "object", "which", "can", "be", "either", "a", "UNIXtime", "a", "datetime", ".", "datetime", "object", "or", "an", "...
python
train
glormph/msstitch
src/app/drivers/prottable/fdr.py
https://github.com/glormph/msstitch/blob/ded7e5cbd813d7797dc9d42805778266e59ff042/src/app/drivers/prottable/fdr.py#L32-L36
def prepare(self): """No percolator XML for protein tables""" self.target = self.fn self.targetheader = reader.get_tsv_header(self.target) self.decoyheader = reader.get_tsv_header(self.decoyfn)
[ "def", "prepare", "(", "self", ")", ":", "self", ".", "target", "=", "self", ".", "fn", "self", ".", "targetheader", "=", "reader", ".", "get_tsv_header", "(", "self", ".", "target", ")", "self", ".", "decoyheader", "=", "reader", ".", "get_tsv_header", ...
No percolator XML for protein tables
[ "No", "percolator", "XML", "for", "protein", "tables" ]
python
train
kurtbrose/pyjks
jks/jks.py
https://github.com/kurtbrose/pyjks/blob/1cbe7f060e2ad076b6462f3273f11d635771ea3d/jks/jks.py#L730-L739
def _java_is_subclass(cls, obj, class_name): """Given a deserialized JavaObject as returned by the javaobj library, determine whether it's a subclass of the given class name. """ clazz = obj.get_class() while clazz: if clazz.name == class_name: return True clazz = clazz.superclass return False
[ "def", "_java_is_subclass", "(", "cls", ",", "obj", ",", "class_name", ")", ":", "clazz", "=", "obj", ".", "get_class", "(", ")", "while", "clazz", ":", "if", "clazz", ".", "name", "==", "class_name", ":", "return", "True", "clazz", "=", "clazz", ".", ...
Given a deserialized JavaObject as returned by the javaobj library, determine whether it's a subclass of the given class name.
[ "Given", "a", "deserialized", "JavaObject", "as", "returned", "by", "the", "javaobj", "library", "determine", "whether", "it", "s", "a", "subclass", "of", "the", "given", "class", "name", "." ]
python
train
CTPUG/wafer
wafer/compare/admin.py
https://github.com/CTPUG/wafer/blob/a20af3c399267f76373dc342f4d542a9bc457c35/wafer/compare/admin.py#L129-L154
def compare_view(self, request, object_id, version_id, extra_context=None): """Actually compare two versions.""" opts = self.model._meta object_id = unquote(object_id) # get_for_object's ordering means this is always the latest revision. # The reversion we want to compare to current = Version.objects.get_for_object_reference(self.model, object_id)[0] revision = Version.objects.get_for_object_reference(self.model, object_id).filter(id=version_id)[0] the_diff = make_diff(current, revision) context = { "title": _("Comparing current %(model)s with revision created %(date)s") % { 'model': current, 'date' : get_date(revision), }, "opts": opts, "compare_list_url": reverse("%s:%s_%s_comparelist" % (self.admin_site.name, opts.app_label, opts.model_name), args=(quote(object_id),)), "diff_list": the_diff, } extra_context = extra_context or {} context.update(extra_context) return render(request, self.compare_template or self._get_template_list("compare.html"), context)
[ "def", "compare_view", "(", "self", ",", "request", ",", "object_id", ",", "version_id", ",", "extra_context", "=", "None", ")", ":", "opts", "=", "self", ".", "model", ".", "_meta", "object_id", "=", "unquote", "(", "object_id", ")", "# get_for_object's ord...
Actually compare two versions.
[ "Actually", "compare", "two", "versions", "." ]
python
train
kristianfoerster/melodist
melodist/station.py
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/station.py#L298-L338
def disaggregate_precipitation(self, method='equal', zerodiv='uniform', shift=0, master_precip=None): """ Disaggregate precipitation. Parameters ---------- method : str, optional Disaggregation method. ``equal`` Daily precipitation is distributed equally over the 24 hours of the day. (Default) ``cascade`` Hourly precipitation values are obtained using a cascade model set up using hourly observations. zerodiv : str, optional Method to deal with zero division, relevant for ``method='masterstation'``. ``uniform`` Use uniform distribution. (Default) master_precip : Series, optional Hourly precipitation records from a representative station (required for ``method='masterstation'``). """ if method == 'equal': precip_disagg = melodist.disagg_prec(self.data_daily, method=method, shift=shift) elif method == 'cascade': precip_disagg = pd.Series(index=self.data_disagg.index) for months, stats in zip(self.statistics.precip.months, self.statistics.precip.stats): precip_daily = melodist.seasonal_subset(self.data_daily.precip, months=months) if len(precip_daily) > 1: data = melodist.disagg_prec(precip_daily, method=method, cascade_options=stats, shift=shift, zerodiv=zerodiv) precip_disagg.loc[data.index] = data elif method == 'masterstation': precip_disagg = melodist.precip_master_station(self.data_daily.precip, master_precip, zerodiv) self.data_disagg.precip = precip_disagg
[ "def", "disaggregate_precipitation", "(", "self", ",", "method", "=", "'equal'", ",", "zerodiv", "=", "'uniform'", ",", "shift", "=", "0", ",", "master_precip", "=", "None", ")", ":", "if", "method", "==", "'equal'", ":", "precip_disagg", "=", "melodist", ...
Disaggregate precipitation. Parameters ---------- method : str, optional Disaggregation method. ``equal`` Daily precipitation is distributed equally over the 24 hours of the day. (Default) ``cascade`` Hourly precipitation values are obtained using a cascade model set up using hourly observations. zerodiv : str, optional Method to deal with zero division, relevant for ``method='masterstation'``. ``uniform`` Use uniform distribution. (Default) master_precip : Series, optional Hourly precipitation records from a representative station (required for ``method='masterstation'``).
[ "Disaggregate", "precipitation", "." ]
python
train
etscrivner/nose-perfdump
perfdump/html.py
https://github.com/etscrivner/nose-perfdump/blob/a203a68495d30346fab43fb903cb60cd29b17d49/perfdump/html.py#L36-L81
def write(cls, html_file): """Writes the HTML report to the given file.""" f = open(html_file, 'w') f.write('<html>') f.write('<head>') f.write('</head>') f.write('<body>') f.write('<h1>Test times</h1>') fmt_test = '<tr><td>{:.05f}</td><td>{}</td></tr><tr><td>&nbsp;</td><td>{}</td></tr>' f.write('<table>') f.write('<tr><th>Time</th><th>Test info</th></tr>') for row in TestTime.get_slowest_tests(10): f.write(fmt_test.format(row['elapsed'], row['file'], '{}.{}.{}'.format(row['module'], row['class'], row['func']))) f.write('</table>') fmt_file = '<tr><td>{:.05f}</td><td>{}</td></tr>' f.write('<table>') f.write('<tr><th>Time</th><th>Test info</th></tr>') for row in TestTime.get_slowest_files(10): f.write(fmt_file.format(row['sum_elapsed'], row['file'])) f.write('</table>') f.write('<h1>Setup times</h1>') f.write('<table>') f.write('<tr><th>Time</th><th>Test info</th></tr>') for row in SetupTime.get_slowest_tests(10): f.write(fmt_test.format(row['elapsed'], row['file'], '{}.{}.{}'.format(row['module'], row['class'], row['func']))) f.write('</table>') f.write('<table>') f.write('<tr><th>Time</th><th>Test info</th></tr>') for row in SetupTime.get_slowest_files(10): f.write(fmt_file.format(row['sum_elapsed'], row['file'])) f.write('</table>') f.write('</body>') f.write('</html>') f.close()
[ "def", "write", "(", "cls", ",", "html_file", ")", ":", "f", "=", "open", "(", "html_file", ",", "'w'", ")", "f", ".", "write", "(", "'<html>'", ")", "f", ".", "write", "(", "'<head>'", ")", "f", ".", "write", "(", "'</head>'", ")", "f", ".", "...
Writes the HTML report to the given file.
[ "Writes", "the", "HTML", "report", "to", "the", "given", "file", "." ]
python
train
galaxyproject/pulsar
pulsar/util/__init__.py
https://github.com/galaxyproject/pulsar/blob/9ab6683802884324652da0a9f0808c7eb59d3ab4/pulsar/util/__init__.py#L27-L34
def copy_to_temp(object): """ Copy file-like object to temp file and return path. """ temp_file = NamedTemporaryFile(delete=False) _copy_and_close(object, temp_file) return temp_file.name
[ "def", "copy_to_temp", "(", "object", ")", ":", "temp_file", "=", "NamedTemporaryFile", "(", "delete", "=", "False", ")", "_copy_and_close", "(", "object", ",", "temp_file", ")", "return", "temp_file", ".", "name" ]
Copy file-like object to temp file and return path.
[ "Copy", "file", "-", "like", "object", "to", "temp", "file", "and", "return", "path", "." ]
python
train
apple/turicreate
src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/src/external/coremltools_wrap/coremltools/deps/protobuf/python/google/protobuf/internal/well_known_types.py#L352-L355
def FromTimedelta(self, td): """Convertd timedelta to Duration.""" self._NormalizeDuration(td.seconds + td.days * _SECONDS_PER_DAY, td.microseconds * _NANOS_PER_MICROSECOND)
[ "def", "FromTimedelta", "(", "self", ",", "td", ")", ":", "self", ".", "_NormalizeDuration", "(", "td", ".", "seconds", "+", "td", ".", "days", "*", "_SECONDS_PER_DAY", ",", "td", ".", "microseconds", "*", "_NANOS_PER_MICROSECOND", ")" ]
Convertd timedelta to Duration.
[ "Convertd", "timedelta", "to", "Duration", "." ]
python
train
spacetelescope/drizzlepac
drizzlepac/imageObject.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/imageObject.py#L288-L317
def _findExtnames(self, extname=None, exclude=None): """ This method builds a list of all extensions which have 'EXTNAME'==extname and do not include any extensions with 'EXTNAME'==exclude, if any are specified for exclusion at all. """ #make a list of the available extension names for the object extensions=[] if extname is not None: if not isinstance(extname,list): extname=[extname] for extn in extname: extensions.append(extn.upper()) else: #restore all the extensions data from the original file, be careful here #if you've altered data in memory you want to keep! for i in range(1,self._nextend+1,1): if hasattr(self._image[i],'_extension') and \ "IMAGE" in self._image[i]._extension: if self._image[i].extname.upper() not in extensions: extensions.append(self._image[i].extname) #remove this extension from the list if exclude is not None: exclude.upper() if exclude in extensions: newExt=[] for item in extensions: if item != exclude: newExt.append(item) extensions=newExt del newExt return extensions
[ "def", "_findExtnames", "(", "self", ",", "extname", "=", "None", ",", "exclude", "=", "None", ")", ":", "#make a list of the available extension names for the object", "extensions", "=", "[", "]", "if", "extname", "is", "not", "None", ":", "if", "not", "isinsta...
This method builds a list of all extensions which have 'EXTNAME'==extname and do not include any extensions with 'EXTNAME'==exclude, if any are specified for exclusion at all.
[ "This", "method", "builds", "a", "list", "of", "all", "extensions", "which", "have", "EXTNAME", "==", "extname", "and", "do", "not", "include", "any", "extensions", "with", "EXTNAME", "==", "exclude", "if", "any", "are", "specified", "for", "exclusion", "at"...
python
train
Dentosal/python-sc2
sc2/client.py
https://github.com/Dentosal/python-sc2/blob/608bd25f04e89d39cef68b40101d8e9a8a7f1634/sc2/client.py#L281-L307
async def debug_create_unit(self, unit_spawn_commands: List[List[Union[UnitTypeId, int, Point2, Point3]]]): """ Usage example (will spawn 1 marine in the center of the map for player ID 1): await self._client.debug_create_unit([[UnitTypeId.MARINE, 1, self._game_info.map_center, 1]]) """ assert isinstance(unit_spawn_commands, list) assert unit_spawn_commands assert isinstance(unit_spawn_commands[0], list) assert len(unit_spawn_commands[0]) == 4 assert isinstance(unit_spawn_commands[0][0], UnitTypeId) assert unit_spawn_commands[0][1] > 0 # careful, in realtime=True this function may create more units assert isinstance(unit_spawn_commands[0][2], (Point2, Point3)) assert 1 <= unit_spawn_commands[0][3] <= 2 await self._execute( debug=sc_pb.RequestDebug( debug=[ debug_pb.DebugCommand( create_unit=debug_pb.DebugCreateUnit( unit_type=unit_type.value, owner=owner_id, pos=common_pb.Point2D(x=position.x, y=position.y), quantity=amount_of_units, ) ) for unit_type, amount_of_units, position, owner_id in unit_spawn_commands ] ) )
[ "async", "def", "debug_create_unit", "(", "self", ",", "unit_spawn_commands", ":", "List", "[", "List", "[", "Union", "[", "UnitTypeId", ",", "int", ",", "Point2", ",", "Point3", "]", "]", "]", ")", ":", "assert", "isinstance", "(", "unit_spawn_commands", ...
Usage example (will spawn 1 marine in the center of the map for player ID 1): await self._client.debug_create_unit([[UnitTypeId.MARINE, 1, self._game_info.map_center, 1]])
[ "Usage", "example", "(", "will", "spawn", "1", "marine", "in", "the", "center", "of", "the", "map", "for", "player", "ID", "1", ")", ":", "await", "self", ".", "_client", ".", "debug_create_unit", "(", "[[", "UnitTypeId", ".", "MARINE", "1", "self", "....
python
train
bethgelab/foolbox
foolbox/utils.py
https://github.com/bethgelab/foolbox/blob/8ab54248c70e45d8580a7d9ee44c9c0fb5755c4a/foolbox/utils.py#L89-L117
def binarize(x, values, threshold=None, included_in='upper'): """Binarizes the values of x. Parameters ---------- values : tuple of two floats The lower and upper value to which the inputs are mapped. threshold : float The threshold; defaults to (values[0] + values[1]) / 2 if None. included_in : str Whether the threshold value itself belongs to the lower or upper interval. """ lower, upper = values if threshold is None: threshold = (lower + upper) / 2. x = x.copy() if included_in == 'lower': x[x <= threshold] = lower x[x > threshold] = upper elif included_in == 'upper': x[x < threshold] = lower x[x >= threshold] = upper else: raise ValueError('included_in must be "lower" or "upper"') return x
[ "def", "binarize", "(", "x", ",", "values", ",", "threshold", "=", "None", ",", "included_in", "=", "'upper'", ")", ":", "lower", ",", "upper", "=", "values", "if", "threshold", "is", "None", ":", "threshold", "=", "(", "lower", "+", "upper", ")", "/...
Binarizes the values of x. Parameters ---------- values : tuple of two floats The lower and upper value to which the inputs are mapped. threshold : float The threshold; defaults to (values[0] + values[1]) / 2 if None. included_in : str Whether the threshold value itself belongs to the lower or upper interval.
[ "Binarizes", "the", "values", "of", "x", "." ]
python
valid
smarie/python-parsyfiles
parsyfiles/parsing_registries.py
https://github.com/smarie/python-parsyfiles/blob/344b37e1151e8d4e7c2ee49ae09d6568715ae64e/parsyfiles/parsing_registries.py#L29-L39
def build_parser_for_fileobject_and_desiredtype(self, obj_on_filesystem: PersistedObject, object_type: Type[T], logger: Logger = None) -> Parser: """ Returns the most appropriate parser to use to parse object obj_on_filesystem as an object of type object_type :param obj_on_filesystem: the filesystem object to parse :param object_type: the type of object that the parser is expected to produce :param logger: :return: """ pass
[ "def", "build_parser_for_fileobject_and_desiredtype", "(", "self", ",", "obj_on_filesystem", ":", "PersistedObject", ",", "object_type", ":", "Type", "[", "T", "]", ",", "logger", ":", "Logger", "=", "None", ")", "->", "Parser", ":", "pass" ]
Returns the most appropriate parser to use to parse object obj_on_filesystem as an object of type object_type :param obj_on_filesystem: the filesystem object to parse :param object_type: the type of object that the parser is expected to produce :param logger: :return:
[ "Returns", "the", "most", "appropriate", "parser", "to", "use", "to", "parse", "object", "obj_on_filesystem", "as", "an", "object", "of", "type", "object_type" ]
python
train
RobotStudio/bors
bors/api/adapter/api.py
https://github.com/RobotStudio/bors/blob/38bf338fc6905d90819faa56bd832140116720f0/bors/api/adapter/api.py#L20-L32
def run(self): """Executed on startup of application""" self.api = self.context.get("cls")(self.context) self.context["inst"].append(self) # Adapters used by strategies for call, calldata in self.context.get("calls", {}).items(): def loop(): """Loop on event scheduler, calling calls""" while not self.stopped.wait(calldata.get("delay", None)): self.call(call, calldata.get("arguments", None)) self.thread[call] = Process(target=loop) self.thread[call].start()
[ "def", "run", "(", "self", ")", ":", "self", ".", "api", "=", "self", ".", "context", ".", "get", "(", "\"cls\"", ")", "(", "self", ".", "context", ")", "self", ".", "context", "[", "\"inst\"", "]", ".", "append", "(", "self", ")", "# Adapters used...
Executed on startup of application
[ "Executed", "on", "startup", "of", "application" ]
python
train
dpgaspar/Flask-AppBuilder
flask_appbuilder/baseviews.py
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/baseviews.py#L885-L930
def _get_list_widget( self, filters, actions=None, order_column="", order_direction="", page=None, page_size=None, widgets=None, **args ): """ get joined base filter and current active filter for query """ widgets = widgets or {} actions = actions or self.actions page_size = page_size or self.page_size if not order_column and self.base_order: order_column, order_direction = self.base_order joined_filters = filters.get_joined_filters(self._base_filters) count, lst = self.datamodel.query( joined_filters, order_column, order_direction, page=page, page_size=page_size, ) pks = self.datamodel.get_keys(lst) # serialize composite pks pks = [self._serialize_pk_if_composite(pk) for pk in pks] widgets["list"] = self.list_widget( label_columns=self.label_columns, include_columns=self.list_columns, value_columns=self.datamodel.get_values(lst, self.list_columns), order_columns=self.order_columns, formatters_columns=self.formatters_columns, page=page, page_size=page_size, count=count, pks=pks, actions=actions, filters=filters, modelview_name=self.__class__.__name__, ) return widgets
[ "def", "_get_list_widget", "(", "self", ",", "filters", ",", "actions", "=", "None", ",", "order_column", "=", "\"\"", ",", "order_direction", "=", "\"\"", ",", "page", "=", "None", ",", "page_size", "=", "None", ",", "widgets", "=", "None", ",", "*", ...
get joined base filter and current active filter for query
[ "get", "joined", "base", "filter", "and", "current", "active", "filter", "for", "query" ]
python
train
timothycrosley/isort
isort/utils.py
https://github.com/timothycrosley/isort/blob/493c02a1a000fe782cec56f1f43262bacb316381/isort/utils.py#L34-L44
def union(a: Iterable[Any], b: Iterable[Any]) -> List[Any]: """ Return a list of items that are in `a` or `b` """ u = [] # type: List[Any] for item in a: if item not in u: u.append(item) for item in b: if item not in u: u.append(item) return u
[ "def", "union", "(", "a", ":", "Iterable", "[", "Any", "]", ",", "b", ":", "Iterable", "[", "Any", "]", ")", "->", "List", "[", "Any", "]", ":", "u", "=", "[", "]", "# type: List[Any]", "for", "item", "in", "a", ":", "if", "item", "not", "in", ...
Return a list of items that are in `a` or `b`
[ "Return", "a", "list", "of", "items", "that", "are", "in", "a", "or", "b" ]
python
train
peterbrittain/asciimatics
asciimatics/widgets.py
https://github.com/peterbrittain/asciimatics/blob/f471427d7786ce2d5f1eeb2dae0e67d19e46e085/asciimatics/widgets.py#L2506-L2513
def _get_pos(self): """ Get current position for scroll bar. """ if self._h >= len(self._options): return 0 else: return self._start_line / (len(self._options) - self._h)
[ "def", "_get_pos", "(", "self", ")", ":", "if", "self", ".", "_h", ">=", "len", "(", "self", ".", "_options", ")", ":", "return", "0", "else", ":", "return", "self", ".", "_start_line", "/", "(", "len", "(", "self", ".", "_options", ")", "-", "se...
Get current position for scroll bar.
[ "Get", "current", "position", "for", "scroll", "bar", "." ]
python
train
Jaymon/endpoints
endpoints/http.py
https://github.com/Jaymon/endpoints/blob/2f1c4ae2c69a168e69447d3d8395ada7becaa5fb/endpoints/http.py#L1004-L1010
def body(self): """return the raw version of the body""" body = None if self.body_input: body = self.body_input.read(int(self.get_header('content-length', -1))) return body
[ "def", "body", "(", "self", ")", ":", "body", "=", "None", "if", "self", ".", "body_input", ":", "body", "=", "self", ".", "body_input", ".", "read", "(", "int", "(", "self", ".", "get_header", "(", "'content-length'", ",", "-", "1", ")", ")", ")",...
return the raw version of the body
[ "return", "the", "raw", "version", "of", "the", "body" ]
python
train