text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nl_cb_err(cb, kind, func, arg):
"""Set up an error callback. Updates `cb` in place. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/handlers.c#L343 Positional arguments: cb -- nl_cb class instance. kind -- kind of callback (integer). func -- callback function. arg -- argument to be passed to callback function. Returns: 0 on success or a negative error code. """
|
if kind < 0 or kind > NL_CB_KIND_MAX:
return -NLE_RANGE
if kind == NL_CB_CUSTOM:
cb.cb_err = func
cb.cb_err_arg = arg
else:
cb.cb_err = cb_err_def[kind]
cb.cb_err_arg = arg
return 0
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ifi_index(self, value):
"""Index setter."""
|
self.bytearray[self._get_slicers(3)] = bytearray(c_int(value or 0))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ifi_change(self, value):
"""Change setter."""
|
self.bytearray[self._get_slicers(5)] = bytearray(c_uint(value or 0))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _class_factory(base):
"""Create subclasses of ctypes. Positional arguments: base -- base class to subclass. Returns: New class definition. """
|
class ClsPyPy(base):
def __repr__(self):
return repr(base(super(ClsPyPy, self).value))
@classmethod
def from_buffer(cls, ba):
try:
integer = struct.unpack_from(getattr(cls, '_type_'), ba)[0]
except struct.error:
len_ = len(ba)
size = struct.calcsize(getattr(cls, '_type_'))
if len_ < size:
raise ValueError('Buffer size too small ({0} instead of at least {1} bytes)'.format(len_, size))
raise
return cls(integer)
class ClsPy26(base):
def __repr__(self):
return repr(base(super(ClsPy26, self).value))
def __iter__(self):
return iter(struct.pack(getattr(super(ClsPy26, self), '_type_'), super(ClsPy26, self).value))
try:
base.from_buffer(bytearray(base()))
except TypeError:
# Python2.6, ctypes cannot be converted to bytearrays.
return ClsPy26
except AttributeError:
# PyPy on my Raspberry Pi, ctypes don't have from_buffer attribute.
return ClsPyPy
except ValueError:
# PyPy on Travis CI, from_buffer cannot handle non-buffer() bytearrays.
return ClsPyPy
return base
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pid(self, value):
"""Process ID setter."""
|
self.bytearray[self._get_slicers(0)] = bytearray(c_int32(value or 0))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def uid(self, value):
"""User ID setter."""
|
self.bytearray[self._get_slicers(1)] = bytearray(c_int32(value or 0))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gid(self, value):
"""Group ID setter."""
|
self.bytearray[self._get_slicers(2)] = bytearray(c_int32(value or 0))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def put_skeleton_files_on_disk(metadata_type, where, github_template=None, params={}):
""" Generates file based on jinja2 templates """
|
api_name = params["api_name"]
file_name = github_template["file_name"]
template_source = config.connection.get_plugin_client_setting('mm_template_source', 'joeferraro/MavensMate-Templates/master')
template_location = config.connection.get_plugin_client_setting('mm_template_location', 'remote')
try:
if template_location == 'remote':
if 'linux' in sys.platform:
template_body = os.popen("wget https://raw.githubusercontent.com/{0}/{1}/{2} -q -O -".format(template_source, metadata_type, file_name)).read()
else:
template_body = urllib2.urlopen("https://raw.githubusercontent.com/{0}/{1}/{2}".format(template_source, metadata_type, file_name)).read()
else:
template_body = get_file_as_string(os.path.join(template_source,metadata_type,file_name))
except:
template_body = get_file_as_string(os.path.join(config.base_path,config.support_dir,"templates","github-local",metadata_type,file_name))
template = env.from_string(template_body)
file_body = template.render(params)
metadata_type = get_meta_type_by_name(metadata_type)
os.makedirs("{0}/{1}".format(where, metadata_type['directoryName']))
f = open("{0}/{1}/{2}".format(where, metadata_type['directoryName'], api_name+"."+metadata_type['suffix']), 'w')
f.write(file_body)
f.close()
template = env.get_template('meta.html')
file_body = template.render(api_name=api_name, sfdc_api_version=SFDC_API_VERSION,meta_type=metadata_type['xmlName'])
f = open("{0}/{1}/{2}".format(where, metadata_type['directoryName'], api_name+"."+metadata_type['suffix'])+"-meta.xml", 'w')
f.write(file_body)
f.close()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def genl_ctrl_probe_by_name(sk, name):
"""Look up generic Netlink family by family name querying the kernel directly. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/ctrl.c#L237 Directly query's the kernel for a given family name. Note: This API call differs from genl_ctrl_search_by_name in that it queries the kernel directly, allowing for module autoload to take place to resolve the family request. Using an nl_cache prevents that operation. Positional arguments: sk -- Generic Netlink socket (nl_sock class instance). name -- family name (bytes). Returns: Generic Netlink family `genl_family` class instance or None if no match was found. """
|
ret = genl_family_alloc()
if not ret:
return None
genl_family_set_name(ret, name)
msg = nlmsg_alloc()
orig = nl_socket_get_cb(sk)
cb = nl_cb_clone(orig)
genlmsg_put(msg, NL_AUTO_PORT, NL_AUTO_SEQ, GENL_ID_CTRL, 0, 0, CTRL_CMD_GETFAMILY, 1)
nla_put_string(msg, CTRL_ATTR_FAMILY_NAME, name)
nl_cb_set(cb, NL_CB_VALID, NL_CB_CUSTOM, probe_response, ret)
if nl_send_auto(sk, msg) < 0:
return None
if nl_recvmsgs(sk, cb) < 0:
return None
if wait_for_ack(sk) < 0: # If search was successful, request may be ACKed after data.
return None
if genl_family_get_id(ret) != 0:
return ret
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def genl_ctrl_resolve(sk, name):
"""Resolve Generic Netlink family name to numeric identifier. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/ctrl.c#L429 Resolves the Generic Netlink family name to the corresponding numeric family identifier. This function queries the kernel directly, use genl_ctrl_search_by_name() if you need to resolve multiple names. Positional arguments: sk -- Generic Netlink socket (nl_sock class instance). name -- name of Generic Netlink family (bytes). Returns: The numeric family identifier or a negative error code. """
|
family = genl_ctrl_probe_by_name(sk, name)
if family is None:
return -NLE_OBJ_NOTFOUND
return int(genl_family_get_id(family))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def genl_ctrl_resolve_grp(sk, family_name, grp_name):
"""Resolve Generic Netlink family group name. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/ctrl.c#L471 Looks up the family object and resolves the group name to the numeric group identifier. Positional arguments: sk -- Generic Netlink socket (nl_sock class instance). family_name -- name of Generic Netlink family (bytes). grp_name -- name of group to resolve (bytes). Returns: The numeric group identifier or a negative error code. """
|
family = genl_ctrl_probe_by_name(sk, family_name)
if family is None:
return -NLE_OBJ_NOTFOUND
return genl_ctrl_grp_by_name(family, grp_name)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _safe_read(path, length):
"""Read file contents."""
|
if not os.path.exists(os.path.join(HERE, path)):
return ''
file_handle = codecs.open(os.path.join(HERE, path), encoding='utf-8')
contents = file_handle.read(length)
file_handle.close()
return contents
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def error_handler(_, err, arg):
"""Update the mutable integer `arg` with the error code."""
|
arg.value = err.error
return libnl.handlers.NL_STOP
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def callback_trigger(msg, arg):
"""Called when the kernel is done scanning. Only signals if it was successful or if it failed. No other data. Positional arguments: msg -- nl_msg class instance containing the data sent by the kernel. arg -- mutable integer (ctypes.c_int()) to update with results. Returns: An integer, value of NL_SKIP. It tells libnl to stop calling other callbacks for this message and proceed with processing the next kernel message. """
|
gnlh = genlmsghdr(nlmsg_data(nlmsg_hdr(msg)))
if gnlh.cmd == nl80211.NL80211_CMD_SCAN_ABORTED:
arg.value = 1 # The scan was aborted for some reason.
elif gnlh.cmd == nl80211.NL80211_CMD_NEW_SCAN_RESULTS:
arg.value = 0 # The scan completed successfully. `callback_dump` will collect the results later.
return libnl.handlers.NL_SKIP
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def callback_dump(msg, results):
"""Here is where SSIDs and their data is decoded from the binary data sent by the kernel. This function is called once per SSID. Everything in `msg` pertains to just one SSID. Positional arguments: msg -- nl_msg class instance containing the data sent by the kernel. results -- dictionary to populate with parsed data. """
|
bss = dict() # To be filled by nla_parse_nested().
# First we must parse incoming data into manageable chunks and check for errors.
gnlh = genlmsghdr(nlmsg_data(nlmsg_hdr(msg)))
tb = dict((i, None) for i in range(nl80211.NL80211_ATTR_MAX + 1))
nla_parse(tb, nl80211.NL80211_ATTR_MAX, genlmsg_attrdata(gnlh, 0), genlmsg_attrlen(gnlh, 0), None)
if not tb[nl80211.NL80211_ATTR_BSS]:
print('WARNING: BSS info missing for an access point.')
return libnl.handlers.NL_SKIP
if nla_parse_nested(bss, nl80211.NL80211_BSS_MAX, tb[nl80211.NL80211_ATTR_BSS], bss_policy):
print('WARNING: Failed to parse nested attributes for an access point!')
return libnl.handlers.NL_SKIP
if not bss[nl80211.NL80211_BSS_BSSID]:
print('WARNING: No BSSID detected for an access point!')
return libnl.handlers.NL_SKIP
if not bss[nl80211.NL80211_BSS_INFORMATION_ELEMENTS]:
print('WARNING: No additional information available for an access point!')
return libnl.handlers.NL_SKIP
# Further parse and then store. Overwrite existing data for BSSID if scan is run multiple times.
bss_parsed = parse_bss(bss)
results[bss_parsed['bssid']] = bss_parsed
return libnl.handlers.NL_SKIP
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def do_scan_trigger(sk, if_index, driver_id, mcid):
"""Issue a scan request to the kernel and wait for it to reply with a signal. This function issues NL80211_CMD_TRIGGER_SCAN which requires root privileges. The way NL80211 works is first you issue NL80211_CMD_TRIGGER_SCAN and wait for the kernel to signal that the scan is done. When that signal occurs, data is not yet available. The signal tells us if the scan was aborted or if it was successful (if new scan results are waiting). This function handles that simple signal. May exit the program (sys.exit()) if a fatal error occurs. Positional arguments: sk -- nl_sock class instance (from nl_socket_alloc()). if_index -- interface index (integer). driver_id -- nl80211 driver ID from genl_ctrl_resolve() (integer). mcid -- nl80211 scanning group ID from genl_ctrl_resolve_grp() (integer). Returns: 0 on success or a negative error code. """
|
# First get the "scan" membership group ID and join the socket to the group.
_LOGGER.debug('Joining group %d.', mcid)
ret = nl_socket_add_membership(sk, mcid) # Listen for results of scan requests (aborted or new results).
if ret < 0:
return ret
# Build the message to be sent to the kernel.
msg = nlmsg_alloc()
genlmsg_put(msg, 0, 0, driver_id, 0, 0, nl80211.NL80211_CMD_TRIGGER_SCAN, 0) # Setup which command to run.
nla_put_u32(msg, nl80211.NL80211_ATTR_IFINDEX, if_index) # Setup which interface to use.
ssids_to_scan = nlmsg_alloc()
nla_put(ssids_to_scan, 1, 0, b'') # Scan all SSIDs.
nla_put_nested(msg, nl80211.NL80211_ATTR_SCAN_SSIDS, ssids_to_scan) # Setup what kind of scan to perform.
# Setup the callbacks to be used for triggering the scan only.
err = ctypes.c_int(1) # Used as a mutable integer to be updated by the callback function. Signals end of messages.
results = ctypes.c_int(-1) # Signals if the scan was successful (new results) or aborted, or not started.
cb = libnl.handlers.nl_cb_alloc(libnl.handlers.NL_CB_DEFAULT)
libnl.handlers.nl_cb_set(cb, libnl.handlers.NL_CB_VALID, libnl.handlers.NL_CB_CUSTOM, callback_trigger, results)
libnl.handlers.nl_cb_err(cb, libnl.handlers.NL_CB_CUSTOM, error_handler, err)
libnl.handlers.nl_cb_set(cb, libnl.handlers.NL_CB_ACK, libnl.handlers.NL_CB_CUSTOM, ack_handler, err)
libnl.handlers.nl_cb_set(cb, libnl.handlers.NL_CB_SEQ_CHECK, libnl.handlers.NL_CB_CUSTOM,
lambda *_: libnl.handlers.NL_OK, None) # Ignore sequence checking.
# Now we send the message to the kernel, and retrieve the acknowledgement. The kernel takes a few seconds to finish
# scanning for access points.
_LOGGER.debug('Sending NL80211_CMD_TRIGGER_SCAN...')
ret = nl_send_auto(sk, msg)
if ret < 0:
return ret
while err.value > 0:
_LOGGER.debug('Retrieving NL80211_CMD_TRIGGER_SCAN acknowledgement...')
ret = nl_recvmsgs(sk, cb)
if ret < 0:
return ret
if err.value < 0:
error('Unknown error {0} ({1})'.format(err.value, errmsg[abs(err.value)]))
# Block until the kernel is done scanning or aborted the scan.
while results.value < 0:
_LOGGER.debug('Retrieving NL80211_CMD_TRIGGER_SCAN final response...')
ret = nl_recvmsgs(sk, cb)
if ret < 0:
return ret
if results.value > 0:
error('The kernel aborted the scan.')
# Done, cleaning up.
_LOGGER.debug('Leaving group %d.', mcid)
return nl_socket_drop_membership(sk, mcid)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def eta_letters(seconds):
"""Convert seconds remaining into human readable strings. From https://github.com/Robpol86/etaprogress/blob/ad934d4/etaprogress/components/eta_conversions.py. Positional arguments: seconds -- integer/float indicating seconds remaining. """
|
final_days, final_hours, final_minutes, final_seconds = 0, 0, 0, seconds
if final_seconds >= 86400:
final_days = int(final_seconds / 86400.0)
final_seconds -= final_days * 86400
if final_seconds >= 3600:
final_hours = int(final_seconds / 3600.0)
final_seconds -= final_hours * 3600
if final_seconds >= 60:
final_minutes = int(final_seconds / 60.0)
final_seconds -= final_minutes * 60
final_seconds = int(math.ceil(final_seconds))
if final_days:
template = '{1:d}d {2:d}h {3:02d}m {4:02d}s'
elif final_hours:
template = '{2:d}h {3:02d}m {4:02d}s'
elif final_minutes:
template = '{3:02d}m {4:02d}s'
else:
template = '{4:02d}s'
return template.format(final_days, final_hours, final_minutes, final_seconds)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def print_table(data):
"""Print the table of detected SSIDs and their data to screen. Positional arguments: data -- list of dictionaries. """
|
table = AsciiTable([COLUMNS])
table.justify_columns[2] = 'right'
table.justify_columns[3] = 'right'
table.justify_columns[4] = 'right'
table_data = list()
for row_in in data:
row_out = [
str(row_in.get('ssid', '')).replace('\0', ''),
str(row_in.get('security', '')),
str(row_in.get('channel', '')),
str(row_in.get('frequency', '')),
str(row_in.get('signal', '')),
str(row_in.get('bssid', '')),
]
if row_out[3]:
row_out[3] += ' MHz'
if row_out[4]:
row_out[4] += ' dBm'
table_data.append(row_out)
sort_by_column = [c.lower() for c in COLUMNS].index(OPTIONS['--key'].lower())
table_data.sort(key=lambda c: c[sort_by_column], reverse=OPTIONS['--reverse'])
table.table_data.extend(table_data)
print(table.table)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def generateObject(self, sObjectType):
'''
Generate a Salesforce object, such as a Lead or Contact
'''
obj = self._sforce.factory.create('ens:sObject')
obj.type = sObjectType
return obj
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def _marshallSObjects(self, sObjects, tag = 'sObjects'):
'''
Marshall generic sObjects into a list of SAX elements
This code is going away ASAP
tag param is for nested objects (e.g. MergeRequest) where
key: object must be in <key/>, not <sObjects/>
'''
if not isinstance(sObjects, (tuple, list)):
sObjects = (sObjects, )
if sObjects[0].type in ['LeadConvert', 'SingleEmailMessage', 'MassEmailMessage']:
nsPrefix = 'tns:'
else:
nsPrefix = 'ens:'
li = []
for obj in sObjects:
el = Element(tag)
el.set('xsi:type', nsPrefix + obj.type)
for k, v in obj:
if k == 'type':
continue
# This is here to avoid 'duplicate values' error when setting a field in fieldsToNull
# Even a tag like <FieldName/> will trigger it
if v == None:
# not going to win any awards for variable-naming scheme here
tmp = Element(k)
tmp.set('xsi:nil', 'true')
el.append(tmp)
elif isinstance(v, (list, tuple)):
for value in v:
el.append(Element(k).setText(value))
elif isinstance(v, suds.sudsobject.Object):
el.append(self._marshallSObjects(v, k))
else:
el.append(Element(k).setText(v))
li.append(el)
return li
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def _setHeaders(self, call = None, **kwargs):
'''
Attach particular SOAP headers to the request depending on the method call made
'''
# All calls, including utility calls, set the session header
headers = {'SessionHeader': self._sessionHeader}
if 'debug_categories' in kwargs:
#ERROR, WARN, INFO, DEBUG, FINE, FINER, FINEST
#Db, Workflow, Validation, Callout, Apex Code, Apex Profiling, All
debug_categories = kwargs['debug_categories']
headers['DebuggingHeader'] = {
'categories' : debug_categories
}
# headers['DebuggingHeader'] = {
# 'categories' : {
# 'category' : kwargs['debug_category'],
# 'level' : kwargs['debug_level']
# }
# }
if call in ('convertLead',
'create',
'merge',
'process',
'undelete',
'update',
'upsert'):
if self._allowFieldTruncationHeader is not None:
headers['AllowFieldTruncationHeader'] = self._allowFieldTruncationHeader
if call in ('create',
'merge',
'update',
'upsert'):
if self._assignmentRuleHeader is not None:
headers['AssignmentRuleHeader'] = self._assignmentRuleHeader
# CallOptions will only ever be set by the SforcePartnerClient
if self._callOptions is not None:
if call in ('create',
'merge',
'queryAll',
'query',
'queryMore',
'retrieve',
'search',
'update',
'upsert',
'convertLead',
'login',
'delete',
'describeGlobal',
'describeLayout',
'describeTabs',
'describeSObject',
'describeSObjects',
'getDeleted',
'getUpdated',
'process',
'undelete',
'getServerTimestamp',
'getUserInfo',
'setPassword',
'resetPassword'):
headers['CallOptions'] = self._callOptions
if call in ('create',
'delete',
'resetPassword',
'update',
'upsert'):
if self._emailHeader is not None:
headers['EmailHeader'] = self._emailHeader
if call in ('describeSObject',
'describeSObjects'):
if self._localeOptions is not None:
headers['LocaleOptions'] = self._localeOptions
if call == 'login':
if self._loginScopeHeader is not None:
headers['LoginScopeHeader'] = self._loginScopeHeader
if call in ('create',
'merge',
'query',
'retrieve',
'update',
'upsert'):
if self._mruHeader is not None:
headers['MruHeader'] = self._mruHeader
if call in ('convertLead',
'create',
'delete',
'describeGlobal',
'describeLayout',
'describeSObject',
'describeSObjects',
'describeTabs',
'merge',
'process',
'query',
'retrieve',
'search',
'undelete',
'update',
'upsert'):
if self._packageVersionHeader is not None:
headers['PackageVersionHeader'] = self._packageVersionHeader
if call in ('query',
'queryAll',
'queryMore',
'retrieve'):
if self._queryOptions is not None:
headers['QueryOptions'] = self._queryOptions
if call == 'delete':
if self._userTerritoryDeleteHeader is not None:
headers['UserTerritoryDeleteHeader'] = self._userTerritoryDeleteHeader
#print '\n\n>>>>>>>>>>>>>>>> setting header '
#print headers
self._sforce.set_options(soapheaders = headers)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def invalidateSessions(self, sessionIds):
'''
Invalidate a Salesforce session
This should be used with extreme caution, for the following (undocumented) reason:
All API connections for a given user share a single session ID
This will call logout() WHICH LOGS OUT THAT USER FROM EVERY CONCURRENT SESSION
return invalidateSessionsResult
'''
self._setHeaders('invalidateSessions')
return self._handleResultTyping(self._sforce.service.invalidateSessions(sessionIds))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def query(self, queryString):
'''
Executes a query against the specified object and returns data that matches
the specified criteria.
'''
self._setHeaders('query')
return self._sforce.service.query(queryString)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def queryAll(self, queryString):
'''
Retrieves data from specified objects, whether or not they have been deleted.
'''
self._setHeaders('queryAll')
return self._sforce.service.queryAll(queryString)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def queryMore(self, queryLocator):
'''
Retrieves the next batch of objects from a query.
'''
self._setHeaders('queryMore')
return self._sforce.service.queryMore(queryLocator)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def resetPassword(self, userId):
'''
Changes a user's password to a system-generated value.
'''
self._setHeaders('resetPassword')
return self._sforce.service.resetPassword(userId)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def setPassword(self, userId, password):
'''
Sets the specified user's password to the specified value.
'''
self._setHeaders('setPassword')
return self._sforce.service.setPassword(userId, password)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nl_msgtype_lookup(ops, msgtype):
"""Lookup message type cache association. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/cache_mngt.c#L189 Searches for a matching message type association ing the specified cache operations. Positional arguments: ops -- cache operations (nl_cache_ops class instance). msgtype -- Netlink message type (integer). Returns: A message type association or None. """
|
for i in ops.co_msgtypes:
if i.mt_id == msgtype:
return i
return None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nl_cache_mngt_register(ops):
"""Register a set of cache operations. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/cache_mngt.c#L252 Called by users of caches to announce the availability of a certain cache type. Positional arguments: ops -- cache operations (nl_cache_ops class instance). Returns: 0 on success or a negative error code. """
|
global cache_ops
if not ops.co_name or not ops.co_obj_ops:
return -NLE_INVAL
with cache_ops_lock:
if _nl_cache_ops_lookup(ops.co_name):
return -NLE_EXIST
ops.co_refcnt = 0
ops.co_next = cache_ops
cache_ops = ops
_LOGGER.debug('Registered cache operations {0}'.format(ops.co_name))
return 0
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nl_connect(sk, protocol):
"""Create file descriptor and bind socket. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L96 Creates a new Netlink socket using `socket.socket()` and binds the socket to the protocol and local port specified in the `sk` socket object (if any). Fails if the socket is already connected. Positional arguments: sk -- Netlink socket (nl_sock class instance). protocol -- Netlink protocol to use (integer). Returns: 0 on success or a negative error code. """
|
flags = getattr(socket, 'SOCK_CLOEXEC', 0)
if sk.s_fd != -1:
return -NLE_BAD_SOCK
try:
sk.socket_instance = socket.socket(getattr(socket, 'AF_NETLINK', -1), socket.SOCK_RAW | flags, protocol)
except OSError as exc:
return -nl_syserr2nlerr(exc.errno)
if not sk.s_flags & NL_SOCK_BUFSIZE_SET:
err = nl_socket_set_buffer_size(sk, 0, 0)
if err < 0:
sk.socket_instance.close()
return err
try:
sk.socket_instance.bind((sk.s_local.nl_pid, sk.s_local.nl_groups))
except OSError as exc:
sk.socket_instance.close()
return -nl_syserr2nlerr(exc.errno)
sk.s_local.nl_pid = sk.socket_instance.getsockname()[0]
if sk.s_local.nl_family != socket.AF_NETLINK:
sk.socket_instance.close()
return -NLE_AF_NOSUPPORT
sk.s_proto = protocol
return 0
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nl_complete_msg(sk, msg):
"""Finalize Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L450 This function finalizes a Netlink message by completing the message with desirable flags and values depending on the socket configuration. - If not yet filled out, the source address of the message (`nlmsg_pid`) will be set to the local port number of the socket. - If not yet specified, the next available sequence number is assigned to the message (`nlmsg_seq`). - If not yet specified, the protocol field of the message will be set to the protocol field of the socket. - The `NLM_F_REQUEST` Netlink message flag will be set. - The `NLM_F_ACK` flag will be set if Auto-ACK mode is enabled on the socket. Positional arguments: sk -- Netlink socket (nl_sock class instance). msg -- Netlink message (nl_msg class instance). """
|
nlh = msg.nm_nlh
if nlh.nlmsg_pid == NL_AUTO_PORT:
nlh.nlmsg_pid = nl_socket_get_local_port(sk)
if nlh.nlmsg_seq == NL_AUTO_SEQ:
nlh.nlmsg_seq = sk.s_seq_next
sk.s_seq_next += 1
if msg.nm_protocol == -1:
msg.nm_protocol = sk.s_proto
nlh.nlmsg_flags |= NLM_F_REQUEST
if not sk.s_flags & NL_NO_AUTO_ACK:
nlh.nlmsg_flags |= NLM_F_ACK
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nl_send_simple(sk, type_, flags, buf=None, size=0):
"""Construct and transmit a Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L549 Allocates a new Netlink message based on `type_` and `flags`. If `buf` points to payload of length `size` that payload will be appended to the message. Sends out the message using `nl_send_auto()`. Positional arguments: sk -- Netlink socket (nl_sock class instance). type_ -- Netlink message type (integer). flags -- Netlink message flags (integer). Keyword arguments: buf -- payload data. size -- size of `data` (integer). Returns: Number of characters sent on success or a negative error code. """
|
msg = nlmsg_alloc_simple(type_, flags)
if buf is not None and size:
err = nlmsg_append(msg, buf, size, NLMSG_ALIGNTO)
if err < 0:
return err
return nl_send_auto(sk, msg)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nl_recv(sk, nla, buf, creds=None):
"""Receive data from Netlink socket. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L625 Receives data from a connected netlink socket using recvmsg() and returns the number of bytes read. The read data is stored in a newly allocated buffer that is assigned to `buf`. The peer's netlink address will be stored in `nla`. This function blocks until data is available to be read unless the socket has been put into non-blocking mode using nl_socket_set_nonblocking() in which case this function will return immediately with a return value of 0. The buffer size used when reading from the netlink socket and thus limiting the maximum size of a netlink message that can be read defaults to the size of a memory page (getpagesize()). The buffer size can be modified on a per socket level using the function `nl_socket_set_msg_buf_size()`. If message peeking is enabled using nl_socket_enable_msg_peek() the size of the message to be read will be determined using the MSG_PEEK flag prior to performing the actual read. This leads to an additional recvmsg() call for every read operation which has performance implications and is not recommended for high throughput protocols. An eventual interruption of the recvmsg() system call is automatically handled by retrying the operation. If receiving of credentials has been enabled using the function `nl_socket_set_passcred()`, this function will allocate a new struct `ucred` filled with the received credentials and assign it to `creds`. Positional arguments: sk -- Netlink socket (nl_sock class instance) (input). nla -- Netlink socket structure to hold address of peer (sockaddr_nl class instance) (output). buf -- destination bytearray() for message content (output). creds -- destination class instance for credentials (ucred class instance) (output). Returns: Two-item tuple. First item is number of bytes read, 0 on EOF, 0 on no data event (non-blocking mode), or a negative error code. Second item is the message content from the socket or None. """
|
flags = 0
page_size = resource.getpagesize() * 4
if sk.s_flags & NL_MSG_PEEK:
flags |= socket.MSG_PEEK | socket.MSG_TRUNC
iov_len = sk.s_bufsize or page_size
if creds and sk.s_flags & NL_SOCK_PASSCRED:
raise NotImplementedError # TODO https://github.com/Robpol86/libnl/issues/2
while True: # This is the `goto retry` implementation.
try:
if hasattr(sk.socket_instance, 'recvmsg'):
iov, _, msg_flags, address = sk.socket_instance.recvmsg(iov_len, 0, flags)
else:
iov, address = sk.socket_instance.recvfrom(iov_len, flags)
msg_flags = 0
except OSError as exc:
if exc.errno == errno.EINTR:
continue # recvmsg() returned EINTR, retrying.
return -nl_syserr2nlerr(exc.errno)
nla.nl_family = sk.socket_instance.family # recvmsg() in C does this, but not Python's.
if not iov:
return 0
if msg_flags & socket.MSG_CTRUNC:
raise NotImplementedError # TODO https://github.com/Robpol86/libnl/issues/2
if iov_len < len(iov) or msg_flags & socket.MSG_TRUNC:
# Provided buffer is not long enough.
# Enlarge it to size of n (which should be total length of the message) and try again.
iov_len = len(iov)
continue
if flags:
# Buffer is big enough, do the actual reading.
flags = 0
continue
nla.nl_pid = address[0]
nla.nl_groups = address[1]
if creds and sk.s_flags * NL_SOCK_PASSCRED:
raise NotImplementedError # TODO https://github.com/Robpol86/libnl/issues/2
if iov:
buf += iov
return len(buf)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nl_recvmsgs_report(sk, cb):
"""Receive a set of messages from a Netlink socket and report parsed messages. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L998 This function is identical to nl_recvmsgs() to the point that it will return the number of parsed messages instead of 0 on success. See nl_recvmsgs(). Positional arguments: sk -- Netlink socket (nl_sock class instance). cb -- set of callbacks to control behaviour (nl_cb class instance). Returns: Number of received messages or a negative error code from nl_recv(). """
|
if cb.cb_recvmsgs_ow:
return int(cb.cb_recvmsgs_ow(sk, cb))
return int(recvmsgs(sk, cb))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nl_recvmsgs(sk, cb):
"""Receive a set of messages from a Netlink socket. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L1023 Repeatedly calls nl_recv() or the respective replacement if provided by the application (see nl_cb_overwrite_recv()) and parses the received data as Netlink messages. Stops reading if one of the callbacks returns NL_STOP or nl_recv returns either 0 or a negative error code. A non-blocking sockets causes the function to return immediately if no data is available. See nl_recvmsgs_report(). Positional arguments: sk -- Netlink socket (nl_sock class instance). cb -- set of callbacks to control behaviour (nl_cb class instance). Returns: 0 on success or a negative error code from nl_recv(). """
|
err = nl_recvmsgs_report(sk, cb)
if err > 0:
return 0
return int(err)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nl_wait_for_ack(sk):
"""Wait for ACK. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/nl.c#L1058 Waits until an ACK is received for the latest not yet acknowledged Netlink message. Positional arguments: sk -- Netlink socket (nl_sock class instance). Returns: Number of received messages or a negative error code from nl_recvmsgs(). """
|
cb = nl_cb_clone(sk.s_cb)
nl_cb_set(cb, NL_CB_ACK, NL_CB_CUSTOM, lambda *_: NL_STOP, None)
return int(nl_recvmsgs(sk, cb))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def get_plugin_client_settings(self):
settings = {}
user_path = self.get_plugin_settings_path("User")
def_path = self.get_plugin_settings_path("MavensMate")
'''
if the default path for settings is none, we're either dealing with a bad client setup or
a new client like Atom.io. Let's load the settings from the default cache and optionally allow
them to pipe settings in via STDIN
'''
if def_path == None:
if 'ATOM' in self.plugin_client:
file_name = 'atom'
elif 'SUBLIME_TEXT' in self.plugin_client:
file_name = 'st3'
elif 'BRACKETS' in self.plugin_client:
file_name = 'brackets'
settings['default'] = util.parse_json_from_file(config.base_path + "/"+config.support_dir+"/config/"+file_name+".json")
if config.plugin_client_settings != None:
settings['user'] = config.plugin_client_settings
else:
workspace = self.params.get('workspace', None)
if self.project_name != None and workspace != None:
try:
settings['project'] = util.parse_json_from_file(os.path.join(workspace,self.project_name,self.project_name+'.sublime-settings'))
except:
debug('Project settings could not be loaded')
if not user_path == None:
try:
settings['user'] = util.parse_json_from_file(user_path)
except:
debug('User settings could not be loaded')
if not def_path == None:
try:
settings['default'] = util.parse_json_from_file(def_path)
except:
raise MMException('Could not load default MavensMate settings.')
if settings == {}:
raise MMException('Could not load MavensMate settings. Please ensure they contain valid JSON')
return settings
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nlmsg_for_each_attr(nlh, hdrlen, rem):
"""Iterate over a stream of attributes in a message. https://github.com/thom311/libnl/blob/libnl3_2_25/include/netlink/msg.h#L123 Positional arguments: nlh -- Netlink message header (nlmsghdr class instance). hdrlen -- length of family header (integer). rem -- initialized to len, holds bytes currently remaining in stream (c_int). Returns: Generator yielding nl_attr instances. """
|
return nla_for_each_attr(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen), rem)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nlmsg_attrdata(nlh, hdrlen):
"""Head of attributes data. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L143 Positional arguments: nlh -- Netlink message header (nlmsghdr class instance). hdrlen -- length of family specific header (integer). Returns: First attribute (nlattr class instance with others in its payload). """
|
data = nlmsg_data(nlh)
return libnl.linux_private.netlink.nlattr(bytearray_ptr(data, libnl.linux_private.netlink.NLMSG_ALIGN(hdrlen)))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nlmsg_attrlen(nlh, hdrlen):
"""Length of attributes data. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L154 nlh -- Netlink message header (nlmsghdr class instance). hdrlen -- length of family specific header (integer). Returns: Integer. """
|
return max(nlmsg_len(nlh) - libnl.linux_private.netlink.NLMSG_ALIGN(hdrlen), 0)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nlmsg_ok(nlh, remaining):
"""Check if the Netlink message fits into the remaining bytes. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L179 Positional arguments: nlh -- Netlink message header (nlmsghdr class instance). remaining -- number of bytes remaining in message stream (c_int). Returns: Boolean. """
|
sizeof = libnl.linux_private.netlink.nlmsghdr.SIZEOF
return remaining.value >= sizeof and sizeof <= nlh.nlmsg_len <= remaining.value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nlmsg_next(nlh, remaining):
"""Next Netlink message in message stream. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L194 Positional arguments: nlh -- Netlink message header (nlmsghdr class instance). remaining -- number of bytes remaining in message stream (c_int). Returns: The next Netlink message in the message stream and decrements remaining by the size of the current message. """
|
totlen = libnl.linux_private.netlink.NLMSG_ALIGN(nlh.nlmsg_len)
remaining.value -= totlen
return libnl.linux_private.netlink.nlmsghdr(bytearray_ptr(nlh.bytearray, totlen))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nlmsg_parse(nlh, hdrlen, tb, maxtype, policy):
"""Parse attributes of a Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L213 Positional arguments: nlh -- Netlink message header (nlmsghdr class instance). hdrlen -- length of family specific header (integer). tb -- dictionary of nlattr instances (length of maxtype+1). maxtype -- maximum attribute type to be expected (integer). policy -- validation policy (nla_policy class instance). Returns: 0 on success or a negative error code. """
|
if not nlmsg_valid_hdr(nlh, hdrlen):
return -NLE_MSG_TOOSHORT
return nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen), policy)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nlmsg_find_attr(nlh, hdrlen, attrtype):
"""Find a specific attribute in a Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L231 Positional arguments: nlh -- Netlink message header (nlmsghdr class instance). hdrlen -- length of family specific header (integer). attrtype -- type of attribute to look for (integer). Returns: The first attribute which matches the specified type (nlattr class instance). """
|
return nla_find(nlmsg_attrdata(nlh, hdrlen), nlmsg_attrlen(nlh, hdrlen), attrtype)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nlmsg_alloc(len_=default_msg_size):
"""Allocate a new Netlink message with maximum payload size specified. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L299 Allocates a new Netlink message without any further payload. The maximum payload size defaults to resource.getpagesize() or as otherwise specified with nlmsg_set_default_size(). Returns: Newly allocated Netlink message (nl_msg class instance). """
|
len_ = max(libnl.linux_private.netlink.nlmsghdr.SIZEOF, len_)
nm = nl_msg()
nm.nm_refcnt = 1
nm.nm_nlh = libnl.linux_private.netlink.nlmsghdr(bytearray(b'\0') * len_)
nm.nm_protocol = -1
nm.nm_size = len_
nm.nm_nlh.nlmsg_len = nlmsg_total_size(0)
_LOGGER.debug('msg 0x%x: Allocated new message, maxlen=%d', id(nm), len_)
return nm
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nlmsg_inherit(hdr=None):
"""Allocate a new Netlink message and inherit Netlink message header. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L322 Allocates a new Netlink message and inherits the original message header. If `hdr` is not None it will be used as a template for the Netlink message header, otherwise the header is left blank. Keyword arguments: hdr -- Netlink message header template (nlmsghdr class instance). Returns: Newly allocated Netlink message (nl_msg class instance). """
|
nm = nlmsg_alloc()
if hdr:
new = nm.nm_nlh
new.nlmsg_type = hdr.nlmsg_type
new.nlmsg_flags = hdr.nlmsg_flags
new.nlmsg_seq = hdr.nlmsg_seq
new.nlmsg_pid = hdr.nlmsg_pid
return nm
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nlmsg_alloc_simple(nlmsgtype, flags):
"""Allocate a new Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L346 Positional arguments: nlmsgtype -- Netlink message type (integer). flags -- message flags (integer). Returns: Newly allocated Netlink message (nl_msg class instance) or None. """
|
nlh = libnl.linux_private.netlink.nlmsghdr(nlmsg_type=nlmsgtype, nlmsg_flags=flags)
msg = nlmsg_inherit(nlh)
_LOGGER.debug('msg 0x%x: Allocated new simple message', id(msg))
return msg
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nlmsg_convert(hdr):
"""Convert a Netlink message received from a Netlink socket to an nl_msg. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L382 Allocates a new Netlink message and copies all of the data in `hdr` into the new message object. Positional arguments: hdr -- Netlink message received from netlink socket (nlmsghdr class instance). Returns: Newly allocated Netlink message (nl_msg class instance) or None. """
|
nm = nlmsg_alloc(hdr.nlmsg_len)
if not nm:
return None
nm.nm_nlh.bytearray = hdr.bytearray.copy()[:hdr.nlmsg_len]
return nm
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nlmsg_reserve(n, len_, pad):
"""Reserve room for additional data in a Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L407 Reserves room for additional data at the tail of the an existing netlink message. Eventual padding required will be zeroed out. bytearray_ptr() at the start of additional data or None. """
|
nlmsg_len_ = n.nm_nlh.nlmsg_len
tlen = len_ if not pad else ((len_ + (pad - 1)) & ~(pad - 1))
if tlen + nlmsg_len_ > n.nm_size:
return None
buf = bytearray_ptr(n.nm_nlh.bytearray, nlmsg_len_)
n.nm_nlh.nlmsg_len += tlen
if tlen > len_:
bytearray_ptr(buf, len_, tlen)[:] = bytearray(b'\0') * (tlen - len_)
_LOGGER.debug('msg 0x%x: Reserved %d (%d) bytes, pad=%d, nlmsg_len=%d', id(n), tlen, len_, pad, n.nm_nlh.nlmsg_len)
return buf
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nlmsg_append(n, data, len_, pad):
"""Append data to tail of a Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L442 Extends the Netlink message as needed and appends the data of given length to the message. Positional arguments: n -- Netlink message (nl_msg class instance). data -- data to add. len_ -- length of data (integer). pad -- number of bytes to align data to (integer). Returns: 0 on success or a negative error code. """
|
tmp = nlmsg_reserve(n, len_, pad)
if tmp is None:
return -NLE_NOMEM
tmp[:len_] = data.bytearray[:len_]
_LOGGER.debug('msg 0x%x: Appended %d bytes with padding %d', id(n), len_, pad)
return 0
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nlmsg_put(n, pid, seq, type_, payload, flags):
"""Add a Netlink message header to a Netlink message. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L503 Adds or overwrites the Netlink message header in an existing message object. Positional arguments: n -- Netlink message (nl_msg class instance). pid -- Netlink process id or NL_AUTO_PID (c_uint32). seq -- sequence number of message or NL_AUTO_SEQ (c_uint32). type_ -- message type (integer). payload -- length of message payload (integer). flags -- message flags (integer). Returns: nlmsghdr class instance or None. """
|
if n.nm_nlh.nlmsg_len < libnl.linux_private.netlink.NLMSG_HDRLEN:
raise BUG
nlh = n.nm_nlh
nlh.nlmsg_type = type_
nlh.nlmsg_flags = flags
nlh.nlmsg_pid = pid
nlh.nlmsg_seq = seq
_LOGGER.debug('msg 0x%x: Added netlink header type=%d, flags=%d, pid=%d, seq=%d', id(n), type_, flags, pid, seq)
if payload > 0 and nlmsg_reserve(n, payload, libnl.linux_private.netlink.NLMSG_ALIGNTO) is None:
return None
return nlh
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nl_nlmsg_flags2str(flags, buf, _=None):
"""Netlink Message Flags Translations. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L664 Positional arguments: flags -- integer. buf -- bytearray(). Keyword arguments: _ -- unused. Returns: Reference to `buf`. """
|
del buf[:]
all_flags = (
('REQUEST', libnl.linux_private.netlink.NLM_F_REQUEST),
('MULTI', libnl.linux_private.netlink.NLM_F_MULTI),
('ACK', libnl.linux_private.netlink.NLM_F_ACK),
('ECHO', libnl.linux_private.netlink.NLM_F_ECHO),
('ROOT', libnl.linux_private.netlink.NLM_F_ROOT),
('MATCH', libnl.linux_private.netlink.NLM_F_MATCH),
('ATOMIC', libnl.linux_private.netlink.NLM_F_ATOMIC),
('REPLACE', libnl.linux_private.netlink.NLM_F_REPLACE),
('EXCL', libnl.linux_private.netlink.NLM_F_EXCL),
('CREATE', libnl.linux_private.netlink.NLM_F_CREATE),
('APPEND', libnl.linux_private.netlink.NLM_F_APPEND),
)
print_flags = []
for k, v in all_flags:
if not flags & v:
continue
flags &= ~v
print_flags.append(k)
if flags:
print_flags.append('0x{0:x}'.format(flags))
buf.extend(','.join(print_flags).encode('ascii'))
return buf
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dump_hex(ofd, start, len_, prefix=0):
"""Convert `start` to hex and logs it, 16 bytes per log statement. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L760 Positional arguments: ofd -- function to call with arguments similar to `logging.debug`. start -- bytearray() or bytearray_ptr() instance. len_ -- size of `start` (integer). Keyword arguments: prefix -- additional number of whitespace pairs to prefix each log statement with. """
|
prefix_whitespaces = ' ' * prefix
limit = 16 - (prefix * 2)
start_ = start[:len_]
for line in (start_[i:i + limit] for i in range(0, len(start_), limit)): # stackoverflow.com/a/9475354/1198943
hex_lines, ascii_lines = list(), list()
for c in line:
hex_lines.append('{0:02x}'.format(c if hasattr(c, 'real') else ord(c)))
c2 = chr(c) if hasattr(c, 'real') else c
ascii_lines.append(c2 if c2 in string.printable[:95] else '.')
hex_line = ' '.join(hex_lines).ljust(limit * 3)
ascii_line = ''.join(ascii_lines)
ofd(' %s%s%s', prefix_whitespaces, hex_line, ascii_line)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nl_msg_dump(msg, ofd=_LOGGER.debug):
"""Dump message in human readable format to callable. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/msg.c#L970 Positional arguments: msg -- message to print (nl_msg class instance). Keyword arguments: ofd -- function to call with arguments similar to `logging.debug`. """
|
hdr = nlmsg_hdr(msg)
ofd('-------------------------- BEGIN NETLINK MESSAGE ---------------------------')
ofd(' [NETLINK HEADER] %d octets', hdr.SIZEOF)
print_hdr(ofd, msg)
if hdr.nlmsg_type == libnl.linux_private.netlink.NLMSG_ERROR:
dump_error_msg(msg, ofd)
elif nlmsg_len(hdr) > 0:
print_msg(msg, ofd, hdr)
ofd('--------------------------- END NETLINK MESSAGE ---------------------------')
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def nl_object_alloc(ops):
"""Allocate a new object of kind specified by the operations handle. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/object.c#L54 Positional arguments: ops -- cache operations handle (nl_object_ops class instance). Returns: New nl_object class instance or None. """
|
new = nl_object()
nl_init_list_head(new.ce_list)
new.ce_ops = ops
if ops.oo_constructor:
ops.oo_constructor(new)
_LOGGER.debug('Allocated new object 0x%x', id(new))
return new
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def genl_register_family(ops):
"""Register Generic Netlink family and associated commands. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L164 Registers the specified Generic Netlink family definition together with all associated commands. After registration, received Generic Netlink messages can be passed to genl_handle_msg() which will validate the messages, look for a matching command and call the respective callback function automatically. Positional arguments: ops -- Generic Netlink family definition (genl_ops class instance). Returns: 0 on success or a negative error code. """
|
if not ops.o_name or (ops.o_cmds and ops.o_ncmds <= 0):
return -NLE_INVAL
if ops.o_id and lookup_family(ops.o_id):
return -NLE_EXIST
if lookup_family_by_name(ops.o_name):
return -NLE_EXIST
nl_list_add_tail(ops.o_list, genl_ops_list)
return 0
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def genl_register(ops):
"""Register Generic Netlink family backed cache. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L241 Same as genl_register_family() but additionally registers the specified cache operations using nl_cache_mngt_register() and associates it with the Generic Netlink family. Positional arguments: ops -- cache operations definition (nl_cache_ops class instance). Returns: 0 on success or a negative error code. """
|
if ops.co_protocol != NETLINK_GENERIC:
return -NLE_PROTO_MISMATCH
if ops.co_hdrsize < GENL_HDRSIZE(0):
return -NLE_INVAL
if ops.co_genl is None:
return -NLE_INVAL
ops.co_genl.o_cache_ops = ops
ops.co_genl.o_hdrsize = ops.co_hdrsize - GENL_HDRLEN
ops.co_genl.o_name = ops.co_msgtypes[0].mt_name
ops.co_genl.o_id = ops.co_msgtypes[0].mt_id
ops.co_msg_parser = genl_msg_parser
err = genl_register_family(ops.co_genl)
if err < 0:
return err
return nl_cache_mngt_register(ops)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __setup_connection(self):
""" each operation requested represents a session the session holds information about the plugin running it and establishes a project object """
|
if self.payload != None and type(self.payload) is dict and 'settings' in self.payload:
config.plugin_client_settings = self.payload['settings']
config.offline = self.args.offline
config.connection = PluginConnection(
client=self.args.client or 'SUBLIME_TEXT_3',
ui=self.args.ui_switch,
args=self.args,
params=self.payload,
operation=self.operation,
verbose=self.args.verbose)
config.project = MavensMateProject(params=self.payload,ui=self.args.ui_switch)
config.sfdc_client = config.project.sfdc_client
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def execute(self):
""" Executes requested command """
|
try:
self.__setup_connection()
#if the arg switch argument is included, the request is to launch the out of box
#MavensMate UI, so we generate the HTML for the UI and launch the process
#example: mm -o new_project --ui
if self.args.ui_switch == True:
config.logger.debug('UI operation requested, attempting to launch MavensMate UI')
tmp_html_file = util.generate_ui(self.operation,self.payload,self.args)
if config.connection.plugin_client == 'ATOM': #returning location of html file here so we can open the page inside an atom panel
self.__printr(util.generate_success_response(tmp_html_file))
else:
util.launch_ui(tmp_html_file)
self.__printr(util.generate_success_response('UI Generated Successfully'))
#non-ui command
else:
commands = get_available_commands()
#debug(commands)
try:
command_clazz = commands[self.operation](params=self.payload,args=self.args)
except KeyError:
raise MMUnsupportedOperationException('Could not find the operation you requested. Be sure the command is located in mm.commands, inherits from Command (found in basecommand.py) and includes an execute method.')
except NotImplementedError:
raise MMException("This command is not properly implemented. Be sure it contains an 'execute' method.")
self.__printr(command_clazz.execute())
except Exception, e:
self.__printr(e, is_exception=True)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_alert(self, alert):
""" Recieves a day as an argument and returns the prediction for that alert if is available. If not, function will return None. """
|
if alert > self.alerts_count() or self.alerts_count() is None:
return None
else:
return self.get()[alert-1]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_forecast(self, latitude, longitude):
""" Gets the weather data from darksky api and stores it in the respective dictionaries if available. This function should be used to fetch weather information. """
|
reply = self.http_get(self.url_builder(latitude, longitude))
self.forecast = json.loads(reply)
for item in self.forecast.keys():
setattr(self, item, self.forecast[item])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_forecast_fromstr(self, reply):
""" Gets the weather data from a darksky api response string and stores it in the respective dictionaries if available. This function should be used to fetch weather information. """
|
self.forecast = json.loads(reply)
for item in self.forecast.keys():
setattr(self, item, self.forecast[item])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def url_builder(self, latitude, longitude):
""" This function is used to build the correct url to make the request to the forecast.io api. Recieves the latitude and the longitude. Return a string with the url. """
|
try:
float(latitude)
float(longitude)
except TypeError:
raise TypeError('Latitude (%s) and Longitude (%s) must be a float number' % (latitude, longitude))
url = self._darksky_url + self.forecast_io_api_key + '/'
url += str(latitude).strip() + ',' + str(longitude).strip()
if self.time_url and not self.time_url.isspace():
url += ',' + self.time_url.strip()
url += '?units=' + self.units_url.strip()
url += '&lang=' + self.lang_url.strip()
if self.exclude_url is not None:
excludes = ''
if self.exclude_url in self._allowed_excludes_extends:
excludes += self.exclude_url + ','
else:
for item in self.exclude_url:
if item in self._allowed_excludes_extends:
excludes += item + ','
if len(excludes) > 0:
url += '&exclude=' + excludes.rstrip(',')
if self.extend_url is not None:
extends = ''
if self.extend_url in self._allowed_excludes_extends:
extends += self.extend_url + ','
else:
for item in self.extend_url:
if item in self._allowed_excludes_extends:
extends += item + ','
if len(extends) > 0:
url += '&extend=' + extends.rstrip(',')
return url
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def http_get(self, request_url):
""" This function recieves the request url and it is used internally to get the information via http. Returns the response content. Raises Timeout, TooManyRedirects, RequestException. Raises KeyError if headers are not present. Raises HTTPError if responde code is not 200. """
|
try:
headers = {'Accept-Encoding': 'gzip, deflate'}
response = requests.get(request_url, headers=headers)
except requests.exceptions.Timeout as ext:
log.error('Error: Timeout', ext)
except requests.exceptions.TooManyRedirects as extmr:
log.error('Error: TooManyRedirects', extmr)
except requests.exceptions.RequestException as ex:
log.error('Error: RequestException', ex)
sys.exit(1)
try:
self.cache_control = response.headers['Cache-Control']
except KeyError as kerr:
log.warning('Warning: Could not get headers. %s' % kerr)
self.cache_control = None
try:
self.expires = response.headers['Expires']
except KeyError as kerr:
log.warning('Warning: Could not get headers. %s' % kerr)
self.extend_url = None
try:
self.x_forecast_api_calls = response.headers['X-Forecast-API-Calls']
except KeyError as kerr:
log.warning('Warning: Could not get headers. %s' % kerr)
self.x_forecast_api_calls = None
try:
self.x_responde_time = response.headers['X-Response-Time']
except KeyError as kerr:
log.warning('Warning: Could not get headers. %s' % kerr)
self.x_responde_time = None
if response.status_code is not 200:
raise requests.exceptions.HTTPError('Bad response, status code: %x' % (response.status_code))
self.raw_response = response.text
return self.raw_response
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _map_or_starmap(function, iterable, args, kwargs, map_or_starmap):
""" Shared function between parmap.map and parmap.starmap. Refer to those functions for details. """
|
arg_newarg = (("parallel", "pm_parallel"), ("chunksize", "pm_chunksize"),
("pool", "pm_pool"), ("processes", "pm_processes"),
("parmap_progress", "pm_pbar"))
kwargs = _deprecated_kwargs(kwargs, arg_newarg)
chunksize = kwargs.pop("pm_chunksize", None)
progress = kwargs.pop("pm_pbar", False)
progress = progress and HAVE_TQDM
parallel, pool, close_pool = _create_pool(kwargs)
# Map:
if parallel:
func_star = _get_helper_func(map_or_starmap)
try:
if progress and close_pool:
try:
num_tasks = len(iterable)
# get a chunksize (as multiprocessing does):
chunksize = _get_default_chunksize(chunksize,
pool, num_tasks)
# use map_async to get progress information
result = pool.map_async(func_star,
izip(repeat(function),
iterable,
repeat(list(args)),
repeat(kwargs)),
chunksize)
finally:
pool.close()
# Progress bar:
try:
_do_pbar(result, num_tasks, chunksize)
finally:
output = result.get()
else:
result = pool.map_async(func_star,
izip(repeat(function),
iterable,
repeat(list(args)),
repeat(kwargs)),
chunksize)
output = result.get()
finally:
if close_pool:
if not progress:
pool.close()
pool.join()
else:
output = _serial_map_or_starmap(function, iterable, args, kwargs,
progress, map_or_starmap)
return output
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _map_or_starmap_async(function, iterable, args, kwargs, map_or_starmap):
""" Shared function between parmap.map_async and parmap.starmap_async. Refer to those functions for details. """
|
arg_newarg = (("parallel", "pm_parallel"), ("chunksize", "pm_chunksize"),
("pool", "pm_pool"), ("processes", "pm_processes"),
("callback", "pm_callback"),
("error_callback", "pm_error_callback"))
kwargs = _deprecated_kwargs(kwargs, arg_newarg)
chunksize = kwargs.pop("pm_chunksize", None)
callback = kwargs.pop("pm_callback", None)
error_callback = kwargs.pop("pm_error_callback", None)
parallel, pool, close_pool = _create_pool(kwargs)
# Map:
if parallel:
func_star = _get_helper_func(map_or_starmap)
try:
if sys.version_info[0] == 2: # does not support error_callback
result = pool.map_async(func_star,
izip(repeat(function),
iterable,
repeat(list(args)),
repeat(kwargs)),
chunksize, callback)
else:
result = pool.map_async(func_star,
izip(repeat(function),
iterable,
repeat(list(args)),
repeat(kwargs)),
chunksize, callback, error_callback)
finally:
if close_pool:
pool.close()
result = _ParallelAsyncResult(result, pool)
else:
result = _ParallelAsyncResult(result)
else:
values = _serial_map_or_starmap(function, iterable, args, kwargs,
False, map_or_starmap)
result = _DummyAsyncResult(values)
return result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def map_async(function, iterable, *args, **kwargs):
"""This function is the multiprocessing.Pool.map_async version that supports multiple arguments. :param pm_parallel: Force parallelization on/off. If False, the function won't be asynchronous. :type pm_parallel: bool :param pm_chunksize: see :py:class:`multiprocessing.pool.Pool` :type pm_chunksize: int :param pm_callback: see :py:class:`multiprocessing.pool.Pool` :type pm_callback: function :param pm_error_callback: (not on python 2) see :py:class:`multiprocessing.pool.Pool` :type pm_error_callback: function :param pm_pool: Pass an existing pool. :type pm_pool: multiprocessing.pool.Pool :param pm_processes: Number of processes to use in the pool. See :py:class:`multiprocessing.pool.Pool` :type pm_processes: int """
|
return _map_or_starmap_async(function, iterable, args, kwargs, "map")
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def starmap_async(function, iterables, *args, **kwargs):
"""This function is the multiprocessing.Pool.starmap_async version that supports multiple arguments. :param pm_parallel: Force parallelization on/off. If False, the function won't be asynchronous. :type pm_parallel: bool :param pm_chunksize: see :py:class:`multiprocessing.pool.Pool` :type pm_chunksize: int :param pm_callback: see :py:class:`multiprocessing.pool.Pool` :type pm_callback: function :param pm_error_callback: see :py:class:`multiprocessing.pool.Pool` :type pm_error_callback: function :param pm_pool: Pass an existing pool. :type pm_pool: multiprocessing.pool.Pool :param pm_processes: Number of processes to use in the pool. See :py:class:`multiprocessing.pool.Pool` :type pm_processes: int """
|
return _map_or_starmap_async(function, iterables, args, kwargs, "starmap")
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lookup_domain(domain, nameservers=[], rtype="A", exclude_nameservers=[], timeout=2):
"""Wrapper for DNSQuery method"""
|
dns_exp = DNSQuery(domains=[domain], nameservers=nameservers, rtype=rtype,
exclude_nameservers=exclude_nameservers, timeout=timeout)
return dns_exp.lookup_domain(domain)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_out_ips(message):
"""Given a message, parse out the ips in the answer"""
|
ips = []
for entry in message.answer:
for rdata in entry.items:
ips.append(rdata.to_text())
return ips
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def send_chaos_queries(self):
"""Send chaos queries to identify the DNS server and its manufacturer Note: we send 2 queries for BIND stuff per RFC 4892 and 1 query per RFC 6304 Note: we are not waiting on a second response because we shouldn't be getting injected packets here """
|
names = ["HOSTNAME.BIND", "VERSION.BIND", "ID.SERVER"]
self.results = {'exp-name': "chaos-queries"}
for name in names:
self.results[name] = {}
for nameserver in self.nameservers:
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.settimeout(self.timeout)
query = dns.message.make_query(name,
dns.rdatatype.from_text("TXT"),
dns.rdataclass.from_text("CH"))
sock.sendto(query.to_wire(), (nameserver, 53))
reads, _, _ = select.select([sock], [], [], self.timeout)
if len(reads) == 0:
self.results[name][nameserver] = None
else:
response = reads[0].recvfrom(4096)[0]
self.results[name][nameserver] = b64encode(response)
return self.results
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lookup_domains(self):
"""More complex DNS primitive that looks up domains concurrently Note: if you want to lookup multiple domains, you should use this function """
|
thread_error = False
thread_wait_timeout = 200
ind = 1
total_item_count = len(self.domains)
for domain in self.domains:
for nameserver in self.nameservers:
wait_time = 0
while threading.active_count() > self.max_threads:
time.sleep(1)
wait_time += 1
if wait_time > thread_wait_timeout:
thread_error = True
break
if thread_error:
self.results["error"] = "Threads took too long to finish."
break
log_prefix = "%d/%d: " % (ind, total_item_count)
thread = threading.Thread(target=self.lookup_domain,
args=(domain, nameserver,
log_prefix))
thread.setDaemon(1)
thread_open_success = False
retries = 0
while not thread_open_success and retries < MAX_THREAD_START_RETRY:
try:
thread.start()
self.threads.append(thread)
thread_open_success = True
except:
retries += 1
time.sleep(THREAD_START_DELAY)
logging.error("%sThread start failed for %s, retrying... (%d/%d)" % (log_prefix, domain, retries, MAX_THREAD_START_RETRY))
if retries == MAX_THREAD_START_RETRY:
logging.error("%sCan't start a new thread for %s after %d retries." % (log_prefix, domain, retries))
if thread_error:
break
ind += 1
for thread in self.threads:
thread.join(self.timeout * 3)
return self.results
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start(self, timeout=None):
"""Start running the command"""
|
self.thread.start()
start_time = time.time()
if not timeout:
timeout = self.timeout
# every second, check the condition of the thread and return
# control to the user if appropriate
while start_time + timeout > time.time():
self.thread.join(1)
if self.started:
return True
if self.error:
return False
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stop(self, timeout=None):
"""Stop the given command"""
|
if not timeout:
timeout = self.timeout
self.kill_switch()
# Send the signal to all the process groups
self.process.kill()
self.thread.join(timeout)
try:
os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
except:
pass
if self.stopped:
return True
else:
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def traceroute_batch(input_list, results={}, method="udp", cmd_arguments=None, delay_time=0.1, max_threads=100):
""" This is a parallel version of the traceroute primitive. :param input_list: the input is a list of domain names :param method: the packet type used for traceroute, UDP by default :param cmd_arguments: the list of arguments that need to be passed to traceroute. :param delay_time: delay before starting each thread :param max_threads: maximum number of concurrent threads :return: """
|
threads = []
thread_error = False
thread_wait_timeout = 200
ind = 1
total_item_count = len(input_list)
for domain in input_list:
wait_time = 0
while threading.active_count() > max_threads:
time.sleep(1)
wait_time += 1
if wait_time > thread_wait_timeout:
thread_error = True
break
if thread_error:
results["error"] = "Threads took too long to finish."
break
# add just a little bit of delay before starting the thread
# to avoid overwhelming the connection.
time.sleep(delay_time)
log_prefix = "%d/%d: " % (ind, total_item_count)
thread = threading.Thread(target=traceroute,
args=(domain, method, cmd_arguments,
results, log_prefix))
ind += 1
thread.setDaemon(1)
thread_open_success = False
retries = 0
while not thread_open_success and retries < MAX_THREAD_START_RETRY:
try:
thread.start()
threads.append(thread)
thread_open_success = True
except:
retries += 1
time.sleep(THREAD_START_DELAY)
logging.error("%sThread start failed for %s, retrying... (%d/%d)" % (log_prefix, domain, retries, MAX_THREAD_START_RETRY))
if retries == MAX_THREAD_START_RETRY:
logging.error("%sCan't start a new thread for %s after %d retries." % (log_prefix, domain, retries))
for thread in threads:
thread.join(thread_wait_timeout)
return results
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _traceroute_callback(self, line, kill_switch):
""" Callback function to handle traceroute. :param self: :param line: :param kill_switch: :return: """
|
line = line.lower()
if "traceroute to" in line:
self.started = True
# need to run as root but not running as root.
# usually happens when doing TCP and ICMP traceroute.
if "enough privileges" in line:
self.error = True
self.kill_switch()
self.stopped = True
# name resolution failed
if "service not known" in line:
self.error = True
self.kill_switch()
self.stopped = True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def output_callback(self, line, kill_switch):
"""Set status of openvpn according to what we process"""
|
self.notifications += line + "\n"
if "Initialization Sequence Completed" in line:
self.started = True
if "ERROR:" in line or "Cannot resolve host address:" in line:
self.error = True
if "process exiting" in line:
self.stopped = True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_experiments(self):
"""This function will return the list of experiments. """
|
logging.debug("Loading experiments.")
# look for experiments in experiments directory
exp_dir = self.config['dirs']['experiments_dir']
for path in glob.glob(os.path.join(exp_dir, '[!_]*.py')):
# get name of file and path
name, ext = os.path.splitext(os.path.basename(path))
# load the experiment
try:
# do not load modules that have already been loaded
if name in loaded_modules:
continue
imp.load_source(name, path)
loaded_modules.add(name)
logging.debug("Loaded experiment \"%s(%s)\"." % (name, path))
except Exception as exception:
logging.exception("Failed to load experiment %s: %s" %
(name, exception))
logging.debug("Finished loading experiments.")
# return dict of experiment names and classes
return ExperimentList.experiments
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _tcpdump_callback(self, line, kill_switch):
"""Callback function to handle tcpdump"""
|
line = line.lower()
if ("listening" in line) or ("reading" in line):
self.started = True
if ("no suitable device" in line):
self.error = True
self.kill_switch()
if "by kernel" in line:
self.stopped = True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _run():
"""Entry point for package and cli uses"""
|
args = parse_args()
# parse custom parameters
custom_meta = None
if args.custom_meta:
print "Adding custom parameters:"
custom_meta = {}
try:
for item in args.custom_meta.split(','):
key, value = item.split(':')
custom_meta[key] = value
print 'key: %s, value: %s' % (key, value)
except Exception as e:
sys.stderr.write("ERROR: Can not parse custom meta tags! %s\n" % (str(e)))
# we need to store some persistent info, so check if a config file
# exists (default location is ~/.centinel/config.ini). If the file
# does not exist, then create a new one at run time
configuration = centinel.config.Configuration()
if args.config:
configuration.parse_config(args.config)
else:
# if the file does not exist, then the default config file
# will be used
new_configuration = None
if os.path.exists(DEFAULT_CONFIG_FILE):
configuration.parse_config(DEFAULT_CONFIG_FILE)
else:
print 'Configuration file does not exist. Creating a new one.'
new_configuration = centinel.config.Configuration()
if not ('version' in configuration.params and
configuration.params['version']['version'] == centinel.__version__):
if not args.update_config:
print ('WARNING: configuration file is from '
'a different version (%s) of '
'Centinel. Run with --update-config to update '
'it.' % (configuration.params['version']['version']))
else:
new_configuration = centinel.config.Configuration()
backup_path = DEFAULT_CONFIG_FILE + ".old"
new_configuration.update(configuration, backup_path)
if new_configuration is not None:
configuration = new_configuration
configuration.write_out_config(DEFAULT_CONFIG_FILE)
print 'New configuration written to %s' % (DEFAULT_CONFIG_FILE)
if args.update_config:
sys.exit(0)
if args.verbose:
if 'log' not in configuration.params:
configuration.params['log'] = dict()
configuration.params['log']['log_level'] = logging.DEBUG
# add custom meta values from CLI
if custom_meta is not None:
if 'custom_meta' in configuration.params:
configuration.params['custom_meta'].update(custom_meta)
else:
configuration.params['custom_meta'] = custom_meta
centinel.conf = configuration.params
client = centinel.client.Client(configuration.params)
client.setup_logging()
# disable cert verification if the flag is set
if args.no_verify:
configuration.params['server']['verify'] = False
user = centinel.backend.User(configuration.params)
# Note: because we have mutually exclusive arguments, we don't
# have to worry about multiple arguments being called
if args.sync:
centinel.backend.sync(configuration.params)
elif args.consent:
user.informed_consent()
elif args.daemonize:
# if we don't have a valid binary location, then exit
if not os.path.exists(args.binary):
print "Error: no binary found to daemonize"
exit(1)
centinel.daemonize.daemonize(args.auto_update, args.binary,
args.user)
else:
client.run()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_fingerprint_batch(input_list, results={}, default_port=443, delay_time=0.5, max_threads=100):
""" This is a parallel version of the TLS fingerprint primitive. :param input_list: the input is a list of host:ports. :param default_port: default port to use when no port specified :param delay_time: delay before starting each thread :param max_threads: maximum number of concurrent threads :return: """
|
threads = []
thread_error = False
thread_wait_timeout = 200
ind = 1
total_item_count = len(input_list)
for row in input_list:
if len(row.split(":")) == 2:
host, port = row.split(":")
elif len(row.split(":")) == 1:
host = row
port = default_port
else:
continue
port = int(port)
wait_time = 0
while threading.active_count() > max_threads:
time.sleep(1)
wait_time += 1
if wait_time > thread_wait_timeout:
thread_error = True
break
if thread_error:
results["error"] = "Threads took too long to finish."
break
# add just a little bit of delay before starting the thread
# to avoid overwhelming the connection.
time.sleep(delay_time)
log_prefix = "%d/%d: " % (ind, total_item_count)
thread = threading.Thread(target=get_fingerprint,
args=(host, port,
results, log_prefix))
ind += 1
thread.setDaemon(1)
thread_open_success = False
retries = 0
while not thread_open_success and retries < MAX_THREAD_START_RETRY:
try:
thread.start()
threads.append(thread)
thread_open_success = True
except:
retries += 1
time.sleep(THREAD_START_DELAY)
logging.error("%sThread start failed for %s, retrying... (%d/%d)" % (log_prefix, host, retries, MAX_THREAD_START_RETRY))
if retries == MAX_THREAD_START_RETRY:
logging.error("%sCan't start a new thread for %s after %d retries." % (log_prefix, host, retries))
for thread in threads:
thread.join(thread_wait_timeout)
return results
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def meta_redirect(content):
""" Returns redirecting URL if there is a HTML refresh meta tag, returns None otherwise :param content: HTML content """
|
decoded = content.decode("utf-8", errors="replace")
try:
soup = BeautifulSoup.BeautifulSoup(decoded)
except Exception as e:
return None
result = soup.find("meta", attrs={"http-equiv": re.compile("^refresh$", re.I)})
if result:
try:
wait, text = result["content"].split(";")
text = text.strip()
if text.lower().startswith("url="):
url = text[4:]
return url
except:
# there are normal meta tag with refresh that are not
# redirect and don't have a URL in it
pass
return None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_http_request(netloc, path="/", headers=None, ssl=False):
""" Actually gets the http. Moved this to it's own private method since it is called several times for following redirects :param host: :param path: :param headers: :param ssl: :return: """
|
if ssl:
port = 443
else:
port = 80
host = netloc
if len(netloc.split(":")) == 2:
host, port = netloc.split(":")
request = {"host": host,
"port": port,
"path": path,
"ssl": ssl,
"method": "GET"}
if headers:
request["headers"] = headers
response = {}
try:
conn = ICHTTPConnection(host=host, port=port, timeout=10)
conn.request(path, headers, ssl, timeout=10)
response["status"] = conn.status
response["reason"] = conn.reason
response["headers"] = conn.headers
body = conn.body
try:
response["body"] = body.encode('utf-8')
except UnicodeDecodeError:
# if utf-8 fails to encode, just use base64
response["body.b64"] = body.encode('base64')
except Exception as err:
response["failure"] = str(err)
result = {"response": response,
"request": request}
return result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_requests_batch(input_list, results={}, delay_time=0.5, max_threads=100):
""" This is a parallel version of the HTTP GET primitive. :param input_list: the input is a list of either dictionaries containing query information, or just domain names (and NOT URLs). :param delay_time: delay before starting each thread :param max_threads: maximum number of concurrent threads :return: results in dict format Note: the input list can look like this: [ { "host": "www.google.com", "path": "/", "headers": {}, "ssl": False, "url": "http://www.google.com/" }, "www.twitter.com", "www.youtube.com", { "host": "www.facebook.com", "path": "/", "headers": {}, "ssl": True, "url": "http://www.facebook.com" }, ] """
|
threads = []
thread_error = False
thread_wait_timeout = 200
ind = 1
total_item_count = len(input_list)
# randomly select one user agent for one input list
user_agent = random.choice(user_agent_pool)
for row in input_list:
headers = {}
path = "/"
ssl = False
theme = "http"
if type(row) is dict:
if "host" not in row:
continue
host = row["host"]
if "path" in row:
path = row["path"]
if "headers" in row:
if type(row["headers"]) is dict:
headers = row["headers"]
if "ssl" in row:
ssl = row["ssl"]
theme = "https"
if "url" in row:
url = row["url"]
else:
url = "%s://%s%s" % (theme, host, path)
else:
host = row
url = "%s://%s%s" % (theme, host, path)
wait_time = 0
while threading.active_count() > max_threads:
time.sleep(1)
wait_time += 1
if wait_time > thread_wait_timeout:
thread_error = True
break
if thread_error:
results["error"] = "Threads took too long to finish."
break
if "User-Agent" not in headers:
headers["User-Agent"] = user_agent
# add just a little bit of delay before starting the thread
# to avoid overwhelming the connection.
time.sleep(delay_time)
log_prefix = "%d/%d: " % (ind, total_item_count)
thread = threading.Thread(target=get_request,
args=(host, path, headers, ssl,
results, url, log_prefix))
ind += 1
thread.setDaemon(1)
thread_open_success = False
retries = 0
while not thread_open_success and retries < MAX_THREAD_START_RETRY:
try:
thread.start()
threads.append(thread)
thread_open_success = True
except:
retries += 1
time.sleep(THREAD_START_DELAY)
logging.error("%sThread start failed for %s, retrying... (%d/%d)" % (log_prefix, url, retries, MAX_THREAD_START_RETRY))
if retries == MAX_THREAD_START_RETRY:
logging.error("%sCan't start a new thread for %s after %d retries." % (log_prefix, url, retries))
for thread in threads:
thread.join(thread_wait_timeout)
return results
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_script_for_location(content, destination):
"""Create a script with the given content, mv it to the destination, and make it executable Parameters: content- the content to put in the script destination- the directory to copy to Note: due to constraints on os.rename, destination must be an absolute path to a file, not just a directory """
|
temp = tempfile.NamedTemporaryFile(mode='w', delete=False)
temp.write(content)
temp.close()
shutil.move(temp.name, destination)
cur_perms = os.stat(destination).st_mode
set_perms = cur_perms | stat.S_IXOTH | stat.S_IXGRP | stat.S_IXUSR
os.chmod(destination, set_perms)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def daemonize(package, bin_loc, user):
"""Create crontab entries to run centinel every hour and autoupdate every day Parameters: package- name of the currently installed package (will be used for autoupdate). If this parameter is None, the autoupdater will not be used bin_loc- location of the centinel binary/script. Note: this works by creating temporary files, adding the content of the cron scripts to these temporary files, moving these files into the appropriate cron folders, and making these scripts executable Note: if the script already exists, this will delete it """
|
path = "/etc/cron.hourly/centinel-" + user
if user != "root":
# create a script to run centinel every hour as the current user
hourly = "".join(["#!/bin/bash\n",
"# cron job for centinel\n",
"su ", user, " -c '", bin_loc, " --sync'\n",
"su ", user, " -c '", bin_loc, "'\n",
"su ", user, " -c '", bin_loc, " --sync'\n"])
else:
# create a script to run centinel every hour as root
hourly = "".join(["#!/bin/bash\n",
"# cron job for centinel\n",
bin_loc, " --sync\n",
bin_loc, "\n",
bin_loc, " --sync\n"])
create_script_for_location(hourly, path)
# create a script to get the client to autoupdate every day
if package is None:
return
updater = "".join(["#!/bin/bash\n",
"# autoupdater for centinel\n"
"sudo pip install --upgrade ", package, "\n"])
create_script_for_location(updater, "/etc/cron.daily/centinel-autoupdate")
print "Successfully created cron jobs for user " + user
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_config_files(directory):
"""Create all available VPN configuration files in the given directory Note: I am basically just following along with what their script client does """
|
# get the config file template
template_url = ("https://securenetconnection.com/vpnconfig/"
"openvpn-template.ovpn")
resp = requests.get(template_url)
resp.raise_for_status()
template = resp.content
# get the available servers and create a config file for each server
server_url = ("https://securenetconnection.com/vpnconfig/"
"servers-cli.php")
resp = requests.get(server_url)
resp.raise_for_status()
servers = resp.content.split("\n")
if not os.path.exists(directory):
os.makedirs(directory)
with open(os.path.join(directory, "servers.txt"), 'w') as f:
f.write(resp.content)
for server_line in servers:
if server_line.strip() == "":
continue
server_line = server_line.split("|")
try:
ip, desc, country, udp_sup, tcp_sup = server_line
except ValueError:
ip, desc, country, udp_sup, tcp_sup, no_rand = server_line
with open(os.path.join(directory, ip + ".ovpn"), 'w') as file_o:
file_o.write(template)
# create tcp if available, else udp
tcp_sup = tcp_sup.strip()
if tcp_sup:
port, proto = 443, "tcp"
else:
port, proto = 53, "udp"
file_o.write("remote {0} {1}\n".format(ip, port))
file_o.write("proto {0}\n".format(proto))
# add automatic dns server update
file_o.write("up /etc/openvpn/update-resolv-conf\n")
file_o.write("down /etc/openvpn/update-resolv-conf\n")
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sync_scheduler(self):
"""Download the scheduler.info file and perform a smart comparison with what we currently have so that we don't overwrite the last_run timestamp To do a smart comparison, we go over each entry in the server's scheduler file. If a scheduler entry is not present in the server copy, we delete it in the client copy and if the scheduler entry is present in the server copy, then we overwrite the frequency count in the client copy """
|
# get the server scheduler.info file
url = "%s/%s/%s" % (self.config['server']['server_url'],
"experiments", "scheduler.info")
try:
req = requests.get(url, proxies=self.config['proxy']['proxy'],
auth=self.auth,
verify=self.verify)
req.raise_for_status()
except Exception as exp:
logging.exception("Error trying to download scheduler.info: %s" % exp)
raise exp
try:
server_sched = json.loads(req.content)
except Exception as exp:
logging.exception("Error parsing server scheduler: %s" % exp)
raise exp
sched_filename = os.path.join(self.config['dirs']['experiments_dir'],
'scheduler.info')
if not os.path.exists(sched_filename):
with open(sched_filename, 'w') as file_p:
json.dump(server_sched, file_p, indent=2,
separators=(',', ': '))
return
client_sched = {}
try:
with open(sched_filename, 'r') as file_p:
client_sched = json.load(file_p)
except Exception as exp:
client_sched = {}
logging.exception("Error loading scheduler file: %s" % exp)
logging.info("Making an empty scheduler")
# delete any scheduled tasks as necessary
#
# Note: this looks ugly, but we can't modify dictionaries
# while we iterate over them
client_exp_keys = client_sched.keys()
for exp in client_exp_keys:
if exp not in server_sched:
del client_sched[exp]
# and update all the other frequencies
for exp in server_sched:
if exp in client_sched:
client_sched[exp]['frequency'] = server_sched[exp]['frequency']
else:
client_sched[exp] = server_sched[exp]
# write out the results
with open(sched_filename, 'w') as file_p:
json.dump(client_sched, file_p, indent=2,
separators=(',', ': '))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def informed_consent(self):
"""Create a URL for the user to give their consent through"""
|
if self.typeable_handle is None:
consent_url = [self.config['server']['server_url'],
"/get_initial_consent?username="]
consent_url.append(urlsafe_b64encode(self.username))
consent_url.append("&password=")
consent_url.append(urlsafe_b64encode(self.password))
else:
consent_url = [self.config['server']['server_url'],
"/consent/"]
consent_url.append(self.typeable_handle)
consent_url = "".join(consent_url)
print "Please go to %s to give your consent." % (consent_url)
return consent_url
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def return_abs_path(directory, path):
""" Unfortunately, Python is not smart enough to return an absolute path with tilde expansion, so I writing functionality to do this :param directory: :param path: :return: """
|
if directory is None or path is None:
return
directory = os.path.expanduser(directory)
return os.path.abspath(os.path.join(directory, path))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _run():
"""Entry point for all uses of centinel"""
|
args = parse_args()
# register signal handler
signal.signal(signal.SIGTERM, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
# set up logging
log_formatter = logging.Formatter("%(asctime)s %(filename)s(line %(lineno)d) "
"%(levelname)s: %(message)s")
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
root_logger.addHandler(console_handler)
# add file handler if specified
if args.log_file:
file_handler = logging.FileHandler(args.log_file)
file_handler.setFormatter(log_formatter)
root_logger.addHandler(file_handler)
# check vm_num and vm_index value
if args.vm_num < 1:
print "vm_num value cannot be negative!"
return
if args.vm_index < 1 or args.vm_index > args.vm_num:
print "vm_index value cannot be negative or greater than vm_num!"
return
if args.create_conf_dir:
if args.create_HMA:
hma_dir = return_abs_path(args.create_conf_dir, 'vpns')
hma.create_config_files(hma_dir)
elif args.create_IPVANISH:
ipvanish_dir = return_abs_path(args.create_conf_dir, 'vpns')
ipvanish.create_config_files(ipvanish_dir)
elif args.create_PUREVPN:
purevpn_dir = return_abs_path(args.create_conf_dir, 'vpns')
purevpn.create_config_files(purevpn_dir)
elif args.create_VPNGATE:
vpngate_dir = return_abs_path(args.create_conf_dir, 'vpns')
vpngate.create_config_files(vpngate_dir)
# create the config files for the openvpn config files
create_config_files(args.create_conf_dir)
else:
# sanity check tls_auth and key_direction
if (args.tls_auth is not None and args.key_direction is None) or \
(args.tls_auth is None and args.key_direction is not None):
logging.error("tls_auth and key_direction must be specified "
"together!")
return
scan_vpns(directory=args.directory, auth_file=args.auth_file,
crt_file=args.crt_file, tls_auth=args.tls_auth,
key_direction=args.key_direction, exclude_list=args.exclude_list,
shuffle_lists=args.shuffle_lists, vm_num=args.vm_num,
vm_index=args.vm_index, reduce_vp=args.reduce_vp)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_config(self, config_file):
""" Given a configuration file, read in and interpret the results :param config_file: :return: """
|
with open(config_file, 'r') as f:
config = json.load(f)
self.params = config
if self.params['proxy']['proxy_type']:
self.params['proxy'] = {self.params['proxy']['proxy_type']:
self.params['proxy']['proxy_url']}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self, old, backup_path=None):
""" Update the old configuration file with new values. :param old: old configuration to update. :param backup_path: path to write a backup of the old config file. :return: """
|
for category in old.params.keys():
for parameter in old.params[category].keys():
if (category in self.params and parameter in self.params[category] and
(old.params[category][parameter] != self.params[category][parameter]) and
(category != "version")):
print ("Config value '%s.%s' "
"in old configuration is different "
"from the new version\n"
"[old value] = %s\n"
"[new value] = %s"
"" % (category, parameter,
old.params[category][parameter],
self.params[category][parameter]))
answer = raw_input("Do you want to overwrite? ([y]/n) ")
while answer.lower() not in ['y', 'yes', 'n', 'no']:
answer = raw_input("Answer not recongnized. Enter 'y' or 'n'. ")
if answer in ['n', 'no']:
old_value = old.params[category][parameter]
self.params[category][parameter] = old_value
elif not (category in self.params and
parameter in self.params[category]):
print ("Deprecated config option '%s.%s' has "
"been removed." % (category, parameter))
if backup_path is not None:
old.write_out_config(backup_path)
print "Backup saved in %s." % backup_path
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_out_config(self, config_file):
""" Write out the configuration file :param config_file: :return: Note: this will erase all comments from the config file """
|
with open(config_file, 'w') as f:
json.dump(self.params, f, indent=2,
separators=(',', ': '))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def divide_url(self, url):
""" divide url into host and path two parts """
|
if 'https://' in url:
host = url[8:].split('/')[0]
path = url[8 + len(host):]
elif 'http://' in url:
host = url[7:].split('/')[0]
path = url[7 + len(host):]
else:
host = url.split('/')[0]
path = url[len(host):]
return host, path
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hash_folder(folder, regex='[!_]*'):
""" Get the md5 sum of each file in the folder and return to the user :param folder: the folder to compute the sums over :param regex: an expression to limit the files we match :return: Note: by default we will hash every file in the folder Note: we will not match anything that starts with an underscore """
|
file_hashes = {}
for path in glob.glob(os.path.join(folder, regex)):
# exclude folders
if not os.path.isfile(path):
continue
with open(path, 'r') as fileP:
md5_hash = hashlib.md5(fileP.read()).digest()
file_name = os.path.basename(path)
file_hashes[file_name] = urlsafe_b64encode(md5_hash)
return file_hashes
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compute_files_to_download(client_hashes, server_hashes):
""" Given a dictionary of file hashes from the client and the server, specify which files should be downloaded from the server :param client_hashes: a dictionary where the filenames are keys and the values are md5 hashes as strings :param server_hashes: a dictionary where the filenames are keys and the values are md5 hashes as strings :return: a list of 2 lists -> [to_dload, to_delete] to_dload- a list of filenames to get from the server to_delete- a list of filenames to delete from the folder Note: we will get a file from the server if a) it is not on the client or b) the md5 differs between the client and server Note: we will mark a file for deletion if it is not available on the server """
|
to_dload, to_delete = [], []
for filename in server_hashes:
if filename not in client_hashes:
to_dload.append(filename)
continue
if client_hashes[filename] != server_hashes[filename]:
to_dload.append(filename)
for filename in client_hashes:
if filename not in server_hashes:
to_delete.append(filename)
return [to_dload, to_delete]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def spinner(beep=False, disable=False, force=False):
"""This function creates a context manager that is used to display a spinner on stdout as long as the context has not exited. The spinner is created only if stdout is not redirected, or if the spinner is forced using the `force` parameter. Parameters beep : bool Beep when spinner finishes. disable : bool Hide spinner. force : bool Force creation of spinner even when stdout is redirected. Example ------- with spinner():
do_something() do_something_else() """
|
return Spinner(beep, disable, force)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def verifier(self, url):
""" Will ask user to click link to accept app and write code """
|
webbrowser.open(url)
print('A browser should have opened up with a link to allow us to access')
print('your account, follow the instructions on the link and paste the verifier')
print('Code into here to give us access, if the browser didn\'t open, the link is:')
print(url)
print()
return input('Verifier: ').lstrip(" ").rstrip(" ")
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_config(self):
""" Write config to file """
|
if not os.path.exists(os.path.dirname(self.config_file)):
os.makedirs(os.path.dirname(self.config_file))
with open(self.config_file, 'w') as f:
f.write(json.dumps(self.config))
f.close()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.