text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Row(self):
""" The class for a row in this list. """
|
if not hasattr(self, '_row_class'):
attrs = {'fields': self.fields, 'list': self, 'opener': self.opener}
for field in self.fields.values():
attrs[field.name] = field.descriptor
self._row_class = type('SharePointListRow', (SharePointListRow,), attrs)
return self._row_class
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def append(self, row):
""" Appends a row to the list. Takes a dictionary, returns a row. """
|
if isinstance(row, dict):
row = self.Row(row)
elif isinstance(row, self.Row):
pass
elif isinstance(row, SharePointListRow):
raise TypeError("row must be a dict or an instance of SharePointList.Row, not SharePointListRow")
else:
raise TypeError("row must be a dict or an instance of SharePointList.Row")
self.rows # Make sure self._rows exists.
self._rows.append(row)
return row
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove(self, row):
""" Removes the row from the list. """
|
self._rows.remove(row)
self._deleted_rows.add(row)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self):
""" Updates the list with changes. """
|
# Based on the documentation at
# http://msdn.microsoft.com/en-us/library/lists.lists.updatelistitems%28v=office.12%29.aspx
# Note, this ends up un-namespaced. SharePoint doesn't care about
# namespaces on this XML node, and will bork if any of these elements
# have a namespace prefix. Likewise Method and Field in
# SharePointRow.get_batch_method().
batches = E.Batch(ListVersion='1', OnError='Return')
# Here's the root element of our SOAP request.
xml = SP.UpdateListItems(SP.listName(self.id), SP.updates(batches))
# rows_by_batch_id contains a mapping from new rows to their batch
# IDs, so we can set their IDs when they are returned by SharePoint.
rows_by_batch_id, batch_id = {}, 1
for row in self._rows:
batch = row.get_batch_method()
if batch is None:
continue
# Add the batch ID
batch.attrib['ID'] = text_type(batch_id)
rows_by_batch_id[batch_id] = row
batches.append(batch)
batch_id += 1
for row in self._deleted_rows:
batch = E.Method(E.Field(text_type(row.id),
Name='ID'),
ID=text_type(batch_id), Cmd='Delete')
rows_by_batch_id[batch_id] = row
batches.append(batch)
batch_id += 1
if len(batches) == 0:
return
response = self.opener.post_soap(LIST_WEBSERVICE, xml,
soapaction='http://schemas.microsoft.com/sharepoint/soap/UpdateListItems')
for result in response.xpath('.//sp:Result', namespaces=namespaces):
batch_id, batch_result = result.attrib['ID'].split(',')
row = rows_by_batch_id[int(batch_id)]
error_code = result.find('sp:ErrorCode', namespaces=namespaces)
error_text = result.find('sp:ErrorText', namespaces=namespaces)
if error_code is not None and error_code.text != '0x00000000':
raise UpdateFailedError(row, batch_result,
error_code.text,
error_text.text)
if batch_result in ('Update', 'New'):
row._update(result.xpath('z:row', namespaces=namespaces)[0],
clear=True)
else:
self._deleted_rows.remove(row)
assert not self._deleted_rows
assert not any(row._changed for row in self.rows)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_batch_method(self):
""" Returns a change batch for SharePoint's UpdateListItems operation. """
|
if not self._changed:
return None
batch_method = E.Method(Cmd='Update' if self.id else 'New')
batch_method.append(E.Field(text_type(self.id) if self.id else 'New',
Name='ID'))
for field in self.fields.values():
if field.name in self._changed:
value = field.unparse(self._data[field.name] or '')
batch_method.append(E.Field(value, Name=field.name))
return batch_method
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_to_python(self, xmlrpc=None):
"""
Extracts a value for the field from an XML-RPC response.
"""
|
if xmlrpc:
return xmlrpc.get(self.name, self.default)
elif self.default:
return self.default
else:
return None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_outputs(self, input_value):
"""
Generate a set of output values for a given input.
"""
|
output_value = self.convert_to_xmlrpc(input_value)
output = {}
for name in self.output_names:
output[name] = output_value
return output
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def struct(self):
"""
XML-RPC-friendly representation of the current object state
"""
|
data = {}
for var, fmap in self._def.items():
if hasattr(self, var):
data.update(fmap.get_outputs(getattr(self, var)))
return data
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_args(self, client):
"""
Builds final set of XML-RPC method arguments based on
the method's arguments, any default arguments, and their
defined respective ordering.
"""
|
default_args = self.default_args(client)
if self.method_args or self.optional_args:
optional_args = getattr(self, 'optional_args', tuple())
args = []
for arg in (self.method_args + optional_args):
if hasattr(self, arg):
obj = getattr(self, arg)
if hasattr(obj, 'struct'):
args.append(obj.struct)
else:
args.append(obj)
args = list(default_args) + args
else:
args = default_args
return args
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_result(self, raw_result):
"""
Performs actions on the raw result from the XML-RPC response.
If a `results_class` is defined, the response will be converted
into one or more object instances of that class.
"""
|
if self.results_class and raw_result:
if isinstance(raw_result, dict_type):
return self.results_class(raw_result)
elif isinstance(raw_result, collections.Iterable):
return [self.results_class(result) for result in raw_result]
return raw_result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def parse(self, text):
'''Returns a list of addresses found in text
together with parsed address parts
'''
results = []
if isinstance(text, str):
if six.PY2:
text = unicode(text, 'utf-8')
self.clean_text = self._normalize_string(text)
# get addresses
addresses = set(self._get_addresses(self.clean_text))
if addresses:
# append parsed address info
results = list(map(self._parse_address, addresses))
return results
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def _parse_address(self, address_string):
'''Parses address into parts'''
match = utils.match(self.rules, address_string, flags=re.VERBOSE | re.U)
if match:
match_as_dict = match.groupdict()
match_as_dict.update({'country_id': self.country})
# combine results
cleaned_dict = self._combine_results(match_as_dict)
# create object containing results
return address.Address(**cleaned_dict)
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def _get_addresses(self, text):
'''Returns a list of addresses found in text'''
# find addresses
addresses = []
matches = utils.findall(
self.rules,
text,
flags=re.VERBOSE | re.U)
if(matches):
for match in matches:
addresses.append(match[0].strip())
return addresses
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse(some_text, **kwargs):
"""Creates request to AddressParser and returns list of Address objects """
|
ap = parser.AddressParser(**kwargs)
return ap.parse(some_text)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setAttribute(values, value):
""" Takes the values of an attribute value list and attempts to append attributes of the proper type, inferred from their Python type. """
|
if isinstance(value, int):
values.add().int32_value = value
elif isinstance(value, float):
values.add().double_value = value
elif isinstance(value, long):
values.add().int64_value = value
elif isinstance(value, str):
values.add().string_value = value
elif isinstance(value, bool):
values.add().bool_value = value
elif isinstance(value, (list, tuple, array.array)):
for v in value:
setAttribute(values, v)
elif isinstance(value, dict):
for key in value:
setAttribute(
values.add().attributes.attr[key].values, value[key])
else:
values.add().string_value = str(value)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def deepSetAttr(obj, path, val):
""" Sets a deep attribute on an object by resolving a dot-delimited path. If path does not exist an `AttributeError` will be raised`. """
|
first, _, rest = path.rpartition('.')
return setattr(deepGetAttr(obj, first) if first else obj, rest, val)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convertDatetime(t):
""" Converts the specified datetime object into its appropriate protocol value. This is the number of milliseconds from the epoch. """
|
epoch = datetime.datetime.utcfromtimestamp(0)
delta = t - epoch
millis = delta.total_seconds() * 1000
return int(millis)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getValueFromValue(value):
""" Extract the currently set field from a Value structure """
|
if type(value) != common.AttributeValue:
raise TypeError(
"Expected an AttributeValue, but got {}".format(type(value)))
if value.WhichOneof("value") is None:
raise AttributeError("Nothing set for {}".format(value))
return getattr(value, value.WhichOneof("value"))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def toJson(protoObject, indent=None):
""" Serialises a protobuf object as json """
|
# Using the internal method because this way we can reformat the JSON
js = json_format.MessageToDict(protoObject, False)
return json.dumps(js, indent=indent)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getProtocolClasses(superclass=message.Message):
""" Returns all the protocol classes that are subclasses of the specified superclass. Only 'leaf' classes are returned, corresponding directly to the classes defined in the protocol. """
|
# We keep a manual list of the superclasses that we define here
# so we can filter them out when we're getting the protocol
# classes.
superclasses = set([message.Message])
thisModule = sys.modules[__name__]
subclasses = []
for name, class_ in inspect.getmembers(thisModule):
if ((inspect.isclass(class_) and
issubclass(class_, superclass) and
class_ not in superclasses)):
subclasses.append(class_)
return subclasses
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def runCommandSplits(splits, silent=False, shell=False):
""" Run a shell command given the command's parsed command line """
|
try:
if silent:
with open(os.devnull, 'w') as devnull:
subprocess.check_call(
splits, stdout=devnull, stderr=devnull, shell=shell)
else:
subprocess.check_call(splits, shell=shell)
except OSError as exception:
if exception.errno == 2: # cmd not found
raise Exception(
"Can't find command while trying to run {}".format(splits))
else:
raise
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _createSchemaFiles(self, destPath, schemasPath):
""" Create a hierarchy of proto files in a destination directory, copied from the schemasPath hierarchy """
|
# Create the target directory hierarchy, if neccessary
ga4ghPath = os.path.join(destPath, 'ga4gh')
if not os.path.exists(ga4ghPath):
os.mkdir(ga4ghPath)
ga4ghSchemasPath = os.path.join(ga4ghPath, 'schemas')
if not os.path.exists(ga4ghSchemasPath):
os.mkdir(ga4ghSchemasPath)
ga4ghSchemasGa4ghPath = os.path.join(ga4ghSchemasPath, 'ga4gh')
if not os.path.exists(ga4ghSchemasGa4ghPath):
os.mkdir(ga4ghSchemasGa4ghPath)
ga4ghSchemasGooglePath = os.path.join(ga4ghSchemasPath, 'google')
if not os.path.exists(ga4ghSchemasGooglePath):
os.mkdir(ga4ghSchemasGooglePath)
ga4ghSchemasGoogleApiPath = os.path.join(
ga4ghSchemasGooglePath, 'api')
if not os.path.exists(ga4ghSchemasGoogleApiPath):
os.mkdir(ga4ghSchemasGoogleApiPath)
# rewrite the proto files to the destination
for root, dirs, files in os.walk(schemasPath):
for protoFilePath in fnmatch.filter(files, '*.proto'):
src = os.path.join(root, protoFilePath)
dst = os.path.join(
ga4ghSchemasPath,
os.path.relpath(root, schemasPath), protoFilePath)
self._copySchemaFile(src, dst)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _doLineReplacements(self, line):
""" Given a line of a proto file, replace the line with one that is appropriate for the hierarchy that we want to compile """
|
# ga4gh packages
packageString = 'package ga4gh;'
if packageString in line:
return line.replace(
packageString,
'package ga4gh.schemas.ga4gh;')
importString = 'import "ga4gh/'
if importString in line:
return line.replace(
importString,
'import "ga4gh/schemas/ga4gh/')
# google packages
googlePackageString = 'package google.api;'
if googlePackageString in line:
return line.replace(
googlePackageString,
'package ga4gh.schemas.google.api;')
googleImportString = 'import "google/api/'
if googleImportString in line:
return line.replace(
googleImportString,
'import "ga4gh/schemas/google/api/')
optionString = 'option (google.api.http)'
if optionString in line:
return line.replace(
optionString,
'option (.ga4gh.schemas.google.api.http)')
return line
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _copySchemaFile(self, src, dst):
""" Copy a proto file to the temporary directory, with appropriate line replacements """
|
with open(src) as srcFile, open(dst, 'w') as dstFile:
srcLines = srcFile.readlines()
for srcLine in srcLines:
toWrite = self._doLineReplacements(srcLine)
dstFile.write(toWrite)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_protodef_to_editable(proto):
""" Protobuf objects can't have arbitrary fields addedd and we need to later on add comments to them, so we instead make "Editable" objects that can do so """
|
class Editable(object):
def __init__(self, prot):
self.kind = type(prot)
self.name = prot.name
self.comment = ""
self.options = dict([(key.name, value) for (key, value) in prot.options.ListFields()])
if isinstance(prot, EnumDescriptorProto):
self.value = [convert_protodef_to_editable(x) for x in prot.value]
elif isinstance(prot, DescriptorProto):
self.field = [convert_protodef_to_editable(x) for x in prot.field]
self.enum_type = [convert_protodef_to_editable(x) for x in prot.enum_type]
self.nested_type = prot.nested_type
self.oneof_decl = prot.oneof_decl
elif isinstance(prot, EnumValueDescriptorProto):
self.number = prot.number
elif isinstance(prot, FieldDescriptorProto):
if prot.type in [11, 14]:
self.ref_type = prot.type_name[1:]
self.type = prot.type
self.label = prot.label
elif isinstance(prot, ServiceDescriptorProto):
self.method = [convert_protodef_to_editable(x) for x in prot.method]
elif isinstance(prot, MethodDescriptorProto):
self.input_type = prot.input_type
self.output_type = prot.output_type
else:
raise Exception, type(prot)
return Editable(proto)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def haversine(point1, point2, unit='km'):
""" Calculate the great-circle distance between two points on the Earth surface. :input: two 2-tuples, containing the latitude and longitude of each point in decimal degrees. Keyword arguments: unit -- a string containing the initials of a unit of measurement (i.e. miles = mi) default 'km' (kilometers). Example: haversine((45.7597, 4.8422), (48.8567, 2.3508)) :output: Returns the distance between the two points. The default returned unit is kilometers. The default unit can be changed by setting the unit parameter to a string containing the initials of the desired unit. Other available units are miles (mi), nautic miles (nmi), meters (m), feets (ft) and inches (in). """
|
# mean earth radius - https://en.wikipedia.org/wiki/Earth_radius#Mean_radius
AVG_EARTH_RADIUS_KM = 6371.0088
# Units values taken from http://www.unitconversion.org/unit_converter/length.html
conversions = {'km': 1,
'm': 1000,
'mi': 0.621371192,
'nmi': 0.539956803,
'ft': 3280.839895013,
'in': 39370.078740158}
# get earth radius in required units
avg_earth_radius = AVG_EARTH_RADIUS_KM * conversions[unit]
# unpack latitude/longitude
lat1, lng1 = point1
lat2, lng2 = point2
# convert all latitudes/longitudes from decimal degrees to radians
lat1, lng1, lat2, lng2 = map(radians, (lat1, lng1, lat2, lng2))
# calculate haversine
lat = lat2 - lat1
lng = lng2 - lng1
d = sin(lat * 0.5) ** 2 + cos(lat1) * cos(lat2) * sin(lng * 0.5) ** 2
return 2 * avg_earth_radius * asin(sqrt(d))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main():
""" Retrieve run folder paths from the command line Ensure only metrics required for summary are loaded Load the run metrics Calculate the summary metrics Display error by lane, read """
|
logging.basicConfig(level=logging.INFO)
run_metrics = py_interop_run_metrics.run_metrics()
summary = py_interop_summary.run_summary()
valid_to_load = py_interop_run.uchar_vector(py_interop_run.MetricCount, 0)
py_interop_run_metrics.list_summary_metrics_to_load(valid_to_load)
for run_folder_path in sys.argv[1:]:
run_folder = os.path.basename(run_folder_path)
try:
run_metrics.read(run_folder_path, valid_to_load)
except Exception, ex:
logging.warn("Skipping - cannot read RunInfo.xml: %s - %s"%(run_folder, str(ex)))
continue
py_interop_summary.summarize_run_metrics(run_metrics, summary)
error_rate_read_lane_surface = numpy.zeros((summary.size(), summary.lane_count(), summary.surface_count()))
for read_index in xrange(summary.size()):
for lane_index in xrange(summary.lane_count()):
for surface_index in xrange(summary.surface_count()):
error_rate_read_lane_surface[read_index, lane_index, surface_index] = \
summary.at(read_index).at(lane_index).at(surface_index).error_rate().mean()
logging.info("Run Folder: "+run_folder)
for read_index in xrange(summary.size()):
read_summary = summary.at(read_index)
logging.info("Read "+str(read_summary.read().number())+" - Top Surface Mean Error: "+str(error_rate_read_lane_surface[read_index, :, 0].mean()))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def gen_csv(sc, filename, field_list, source, filters):
'''csv SecurityCenterObj, AssetListName, CSVFields, EmailAddress
'''
# First thing we need to do is initialize the csvfile and build the header
# for the file.
datafile = open(filename, 'wb')
csvfile = csv.writer(datafile)
header = []
for field in field_list:
header.append(fields.fields[field]['name'])
csvfile.writerow(header)
debug.write('Generating %s: ' % filename)
# Next we will run the Security Center query. because this could be a
# potentially very large dataset that is returned, we don't want to run out
# of memory. To get around this, we will pass the query function the writer
# function with the appropriate fields so that it is parsed inline.
fparams = {'fobj': csvfile, 'flist': field_list}
sc.query('vulndetails', source=source,
func=writer, func_params=fparams, **filters)
debug.write('\n')
# Lastly we need to close the datafile.
datafile.close()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def login(self, user, passwd):
'''Logs the user into SecurityCenter and stores the needed token and cookies.'''
resp = self.post('token', json={'username': user, 'password': passwd})
self._token = resp.json()['response']['token']
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def download_scans(sc, age=0, unzip=False, path='scans'):
'''Scan Downloader
Here we will attempt to download all of the scans that have completed between
now and AGE days ago.
sc = SecurityCenter5 object
age = how many days back do we want to pull? (default: 0)
unzip = Do we want to uncompress the nessus files? (default: False)
path = Path where the resulting data will be placed. (default: scans)
'''
# if the download path doesn't exist, we need to create it.
if not os.path.exists(path):
logger.debug('scan path didn\'t exist. creating it.')
os.makedirs(path)
# Now we will need to comuter the timestamp for the date that the age has
# apecified. The API expects this in a unix timestamp format.
findate = (date.today() - timedelta(days=age))
# Lets get the list of scans that had completed within the timefram that we
# had specified.
logger.debug('getting scan results for parsing')
resp = sc.get('scanResult', params={
'startTime': int(time.mktime(findate.timetuple())),
'fields': 'name,finishTime,downloadAvailable,repository',
})
for scan in resp.json()['response']['usable']:
# If this particular scan does not have any results (either it was a
# partial, failed, or incomplete scan) then we have nothing further to
# do and should simply ignore this scan.
if scan['downloadAvailable'] == 'false':
logger.debug('%s/"%s" not available for download' % (scan['id'],
scan['name']))
else:
# Well look, this scan actually has results, lets go ahead and pull
# them down.
logger.debug('%s/"%s" downloading' % (scan['id'], scan['name']))
scandata = sc.post('scanResult/%s/download' % scan['id'],
json={'downloadType': 'v2'})
sfin = datetime.fromtimestamp(int(scan['finishTime']))
# The filename is being computed generically here. As this will be
# used whether we extract the .nessus file out of the zipfile or
# not.
filename = '%s-%s.%s.%s' % (scan['id'],
scan['name'].replace(' ', '_'),
scan['repository']['id'],
sfin.strftime('%Y.%m.%d-%H.%M'))
if unzip:
# Unzip that .nessus file!
logger.debug('extracting %s/%s' % (scan['id'], scan['name']))
zfile = ZipFile(StringIO(buf=scandata.content))
scanfile = zfile.filelist[0]
scanfile.filename = '%s.nessus' % filename
zfile.extract(scanfile, path=path)
else:
# We want to keep it compressed, just dump to disk.
logger.debug('writing zip for %s/%s' % (scan['id'], scan['name']))
with open('%s.zip' % filename, 'wb') as zfile:
zfile.write(scandata.content)
# Were done with this scan file!!!
logger.info('%s/"%s" downloaded' % (scan['id'], scan['name']))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def update(sc, filename, asset_id):
'''
Updates a DNS Asset List with the contents of the filename. The assumed
format of the file is 1 entry per line. This function will convert the
file contents into an array of entries and then upload that array into
SecurityCenter.
'''
addresses = []
with open(filename) as hostfile:
for line in hostfile.readlines():
addresses.append(line.strip('\n'))
sc.asset_update(asset_id, dns=addresses)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def generate_html_report(base_path, asset_id):
'''
Generates the HTML report and dumps it into the specified filename
'''
jenv = Environment(loader=PackageLoader('swchange', 'templates'))
s = Session()
#hosts = s.query(Host).filter_by(asset_id=asset_id).all()
asset = s.query(AssetList).filter_by(id=asset_id).first()
if not asset:
print 'Invalid Asset ID (%s)!' % asset_id
return
filename = os.path.join(base_path, '%s-INV-CHANGE-%s.html' % (
asset.name,
datetime.now().strftime('%Y-%m-%d.%H.%M.%S'))
)
print 'Generating Report : %s' % filename
with open(filename, 'wb') as report:
report.write(jenv.get_template('layout.html').render(
asset=asset,
current_date=datetime.now()
))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def gen_csv(sc, filename):
'''csv SecurityCenterObj, EmailAddress
'''
# First thing we need to do is initialize the csvfile and build the header
# for the file.
datafile = open(filename, 'wb')
csvfile = csv.writer(datafile)
csvfile.writerow(['Software Package Name', 'Count'])
debug.write('Generating %s: ' % filename)
# Next we will run the Security Center query. because this could be a
# potentially very large dataset that is returned, we don't want to run out
# of memory. To get around this, we will pass the query function the writer
# function with the appropriate fields so that it is parsed inline.
fparams = {'fobj': csvfile}
sc.query('listsoftware', func=writer, func_params=fparams)
debug.write('\n')
# Lastly we need to close the datafile.
datafile.close()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def download(sc, age=0, path='reports', **args):
'''Report Downloader
The report downloader will pull reports down from SecurityCenter
based on the conditions provided to the path provided.
sc = SecurityCenter5 object
age = number of days old the report may be to be included in the
search.
path = The path to the download folder. One will be created if
it doesn't exist.
OPTIONAL ARGUMENTS:
type = Specifies the type of the report. e.g. pdf, csv, etc.
name = A subset of the name of the report used for pattern matching.
'''
# if the download path doesn't exist, we need to create it.
if not os.path.exists(path):
logger.debug('report path didn\'t exist. creating it.')
os.makedirs(path)
# Now we will need to comuter the timestamp for the date that the age has
# apecified. The API expects this in a unix timestamp format.
findate = (date.today() - timedelta(days=age))
# Lets get the listing of reports that we will be working with...
reports = sc.get('report', params={
'startTime': findate.strftime('%s'),
'fields': 'name,type,status,finishTime'
})
# now we will work our way through the resulting dataset and attempt
# to download the reports if they meet our criteria.
for report in reports.json()['response']['usable']:
# We can only download completed reports, so we have no reason
# to even try to download anything that isnt in the completed
# status.
if report['status'] == 'Completed':
# If the name or type arguments are passed, then we will
# want to make sure that we only download the relevent
# reports that match these criteria. For name, we will
# be performing a simple pattern match, and for type we will
# check the report type. e.g. pdf, csv, etc.
if 'name' in args and args['name'] not in report['name']:
continue
if 'type' in args and args['type'].lower() != report['type'].lower():
continue
# now to actually get the report...
report_data = sc.post('report/%s/download' % report['id'],
json={'id': int(report['id'])})
# compute a report name...
report_name = '%s-%s.%s' % (
report['name'].replace(' ', '_'),
report['finishTime'],
report['type']
)
# and finally write the report to disk...
logger.info('writing %s to disk' % report_name)
with open(os.path.join(path, report_name), 'wb') as report_file:
report_file.write(report_data.content)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def post(self, path, **kwargs):
'''Calls the specified path with the POST method'''
resp = self._session.post(self._url(path), **self._builder(**kwargs))
if 'stream' in kwargs:
return resp
else:
return self._resp_error_check(resp)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def import_repo(self, repo_id, fileobj):
'''
Imports a repository package using the repository ID specified.
'''
# Step 1, lets upload the file
filename = self.upload(fileobj).json()['response']['filename']
# Step 2, lets tell SecurityCenter what to do with the file
return self.post('repository/{}/import'.format(repo_id), json={'file': filename})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def _revint(self, version):
'''
Internal function to convert a version string to an integer.
'''
intrev = 0
vsplit = version.split('.')
for c in range(len(vsplit)):
item = int(vsplit[c]) * (10 ** (((len(vsplit) - c - 1) * 2)))
intrev += item
return intrev
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def _revcheck(self, func, version):
'''
Internal function to see if a version is func than what we have
determined to be talking to. This is very useful for newer API calls
to make sure we don't accidentally make a call to something that
doesnt exist.
'''
current = self._revint(self.version)
check = self._revint(version)
if func in ('lt', '<=',):
return check <= current
elif func in ('gt', '>='):
return check >= current
elif func in ('eq', '=', 'equals'):
return check == current
else:
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def _build_xrefs(self):
'''
Internal function to populate the xrefs list with the external
references to be used in searching plugins and potentially
other functions as well.
'''
xrefs = set()
plugins = self.plugins()
for plugin in plugins:
for xref in plugin['xrefs'].split(', '):
xrf = xref.replace('-', '_').split(':')[0]
if xrf is not '':
xrefs.add(xrf)
self._xrefs = list(xrefs)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def login(self, user, passwd):
"""login user passwd Performs the login operation for Security Center, storing the token that Security Center has generated for this login session for future queries. """
|
data = self.raw_query('auth', 'login',
data={'username': user, 'password': passwd})
self._token = data["token"]
self._user = data
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def credential_add(self, name, cred_type, **options):
'''
Adds a new credential into SecurityCenter. As credentials can be of
multiple types, we have different options to specify for each type of
credential.
**Global Options (Required)**
:param name: Unique name to be associated to this credential
:param cred_type: The type of credential. Valid values are:
'ssh', 'windows', 'snmp', or 'kerberos'
:type name: string
:type cred_type: string
**Windows Credential Options**
:param username: Account Name
:param password: Account Password
:param domain: [Optional] Account Member Domain
:type username: string
:type password: string
:type domain: string
**Unix/SSH Credential Options**
SSH Credentials cover a multitude of different types of hosts.
Everything from Linux/Unix boxes to networking gear like Cisco IOS
devices. As a result of this, there are a lot of available options in
order to cover as many possible scenarios as possible. A few examples:
Simple Username/Password:
>>> sc.credential_add('Example Linux Root', 'ssh',
username='root', password='r00tp@ssw0rd')
Utilizing Sudo:
>>> sc.credential_add('Example Linux Sudo', 'ssh',
username='user', password='p@ssw0rd',
privilegeEscalation='sudo',
escalationPassword='p@ssw0rd')
SSH Keys (By Filename):
>>> sc.credential_add('Example Linux Keys', 'ssh',
username='root',
privateKey='/path/to/id_rsa',
publicKey='/path/to/id_rsa.pub',
passphrase='somthing' # Only use this if needed
)
SSH Keys (Using File Objects):
>>> pubkey = open('/path/to/id_rsa.pub', 'rb')
>>> privkey = open('/path/to/id_rsa', 'rb')
>>> sc.credential_add('Example Linux Keys 2', 'ssh',
username='root',
privateKey=privkey,
publicKey=pubkey,
passphrase='somthing' # Only use this if needed
)
:param username: Account Name
:param password: Account Password
:param privilegeEscalation: [Optional] The type of privilege escalation
required for this account. The default is None.
Valid options are: 'su', 'su+sudo', 'dzdo',
'pbrun', 'Cisco \'enable\'', or 'none'.
:param escalationUsername: [Optional] The username to escalate to. Only
used for su+sudo escalation.
:param escalationPassword: [Optional] The password used for escalation.
:param publicKey: [Optional] The SSH public RSA/DSA key used for
authentication.
:param privateKey: [Optional] The SSH private RSA/DSA key used for
authentication.
:param passphrase: [Optional] The passphrase needed for the RSA/DSA
keypair.
:type username: string
:type password: string
:type privilegeEscalation: string
:type escalationUsername: string
:type escalationPassword: string
:type publicKey: string [filename], fileobj
:type privateKey: string [filename], fileobj
:type passphrase: string
**Kerberos Credential Options**
:param ip: Kerberos Host IP
:param port: Kerberos Host Port
:param realm: Kerberos Realm
:param protocol: Kerberos Protocol
:type ip: string
:type port: string
:type realm: string
:type protocol: string
**SNMP Community String**
:param communityString: The community string to connect with.
:type communityString: string
'''
if 'pirvateKey' in options:
options['privateKey'] = self._upload(options['privateKey'])['filename']
if 'publicKey' in options:
options['publicKey'] = self._upload(options['publicKey'])['filename']
return self.raw_query("credential", "add", data=options)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def credential_delete_simulate(self, *ids):
"""Show the relationships and dependencies for one or more credentials. :param ids: one or more credential ids """
|
return self.raw_query("credential", "deleteSimulate", data={
"credentials": [{"id": str(id)} for id in ids]
})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def credential_delete(self, *ids):
"""Delete one or more credentials. :param ids: one or more credential ids """
|
return self.raw_query("credential", "delete", data={
"credentials": [{"id": str(id)} for id in ids]
})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plugins(self, plugin_type='all', sort='id', direction='asc', size=1000, offset=0, all=True, loops=0, since=None, **filterset):
"""plugins Returns a list of of the plugins and their associated families. For simplicity purposes, the plugin family names will be injected into the plugin data so that only 1 list is returned back with all of the information. """
|
plugins = []
# First we need to generate the basic payload that we will be augmenting
# to build the
payload = {
'size': size,
'offset': offset,
'type': plugin_type,
'sortField': sort,
'sortDirection': direction.upper(),
}
# If there was a filter given, we will need to populate that.
if len(filterset) > 0:
fname = list(filterset.keys())[0]
if fname in self._xrefs:
fname = 'xrefs:%s' % fname.replace('_', '-')
payload['filterField'] = fname
payload['filterString'] = filterset[list(filterset.keys())[0]]
# We also need to check if there was a datetime object sent to us and
# parse that down if given.
if since is not None and isinstance(since, date):
payload['since'] = calendar.timegm(since.utctimetuple())
# And now we run through the loop needed to pull all of the data. This
# may take some time even though we are pulling large data sets. At the
# time of development of this module, there were over 55k active plugins
# and over 7k passive ones.
while all or loops > 0:
# First things first, we need to query the data.
data = self.raw_query('plugin', 'init', data=payload)
if not data:
return []
# This no longer works in 4.4 as the family name is already
# referenced. Will re-activate this code when I can get a SC4.2
# Instance up and running to test...
# ---
# Next we convert the family dictionary list into a flat dictionary.
#fams = {}
#for famitem in data['families']:
# fams[famitem['id']] = famitem['name']
# Then we parse thtrough the data set, adding in the family name
# into the plugin definition before adding it into the plugins list.
for plugin in data['plugins']:
# plugin['familyName'] = fams[plugin['familyID']]
plugins.append(plugin)
# ---
# Next its time to increment the offset so that we get a new data
# set. We will also check here to see if the length really is the
# same as whats specified in the size variable. If it isnt, then
# we have reached the end of the dataset and might as well set
# the continue variable to False.
if len(data['plugins']) < size:
all = False
loops = 0
else:
loops -= 1
payload['offset'] += len(data['plugins'])
return plugins
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plugin_counts(self):
"""plugin_counts Returns the plugin counts as dictionary with the last updated info if its available. """
|
ret = {
'total': 0,
}
# As ususal, we need data before we can actually do anything ;)
data = self.raw_query('plugin', 'init')
# For backwards compatability purposes, we will be handling this a bit
# differently than I would like. We are going to check to see if each
# value exists and override the default value of 0. The only value that
# I know existed in bost 4.2 and 4.4 is pluginCount, the rest aren't
# listed in the API docs, however return back from my experimentation.
ret['total'] = data['pluginCount']
if 'lastUpdates' in data:
for item in ['active', 'passive', 'compliance', 'custom', 'event']:
itemdata = {}
if item in data['lastUpdates']:
itemdata = data['lastUpdates'][item]
if item in data:
itemdata['count'] = data[item]
else:
itemdata['count'] = 0
ret[item] = itemdata
return ret
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def ip_info(self, ip, repository_ids=None):
"""ip_info Returns information about the IP specified in the repository ids defined. """
|
if not repository_ids:
repository_ids = []
repos = []
for rid in repository_ids:
repos.append({'id': rid})
return self.raw_query('vuln', 'getIP', data={
'ip': ip, 'repositories': repos})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def scan_list(self, start_time=None, end_time=None, **kwargs):
"""List scans stored in Security Center in a given time range. Time is given in UNIX timestamps, assumed to be UTC. If a `datetime` is passed it is converted. If `end_time` is not specified it is NOW. If `start_time` is not specified it is 30 days previous from `end_time`. :param start_time: start of range to filter :type start_time: date, datetime, int :param end_time: end of range to filter :type start_time: date, datetime, int :return: list of dictionaries representing scans """
|
try:
end_time = datetime.utcfromtimestamp(int(end_time))
except TypeError:
if end_time is None:
end_time = datetime.utcnow()
try:
start_time = datetime.utcfromtimestamp(int(start_time))
except TypeError:
if start_time is None:
start_time = end_time - timedelta(days=30)
data = {"startTime": calendar.timegm(start_time.utctimetuple()),
"endTime": calendar.timegm(end_time.utctimetuple())}
data.update(kwargs)
result = self.raw_query("scanResult", "getRange", data=data)
return result["scanResults"]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dashboard_import(self, name, fileobj):
"""dashboard_import Dashboard_Name, filename Uploads a dashboard template to the current user's dashboard tabs. UN-DOCUMENTED CALL: This function is not considered stable. """
|
data = self._upload(fileobj)
return self.raw_query('dashboard', 'importTab', data={
'filename': data['filename'],
'name': name,
})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def report_import(self, name, filename):
"""report_import Report_Name, filename Uploads a report template to the current user's reports UN-DOCUMENTED CALL: This function is not considered stable. """
|
data = self._upload(filename)
return self.raw_query('report', 'import', data={
'filename': data['filename'],
'name': name,
})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def asset_create(self, name, items, tag='', description='', atype='static'):
'''asset_create_static name, ips, tags, description
Create a new asset list with the defined information.
UN-DOCUMENTED CALL: This function is not considered stable.
:param name: asset list name (must be unique)
:type name: string
:param items: list of IP Addresses, CIDR, and Network Ranges
:type items: list
:param tag: The tag associate to the asset list
:type tag: string
:param description: The Asset List description
:type description: string
'''
data = {
'name': name,
'description': description,
'type': atype,
'tags': tag
}
if atype == 'static':
data['definedIPs'] = ','.join(items)
if atype == 'dns':
data['type'] = 'dnsname'
data['definedDNSNames'] = ' '.join(items)
return self.raw_query('asset', 'add', data=data)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def asset_create_combo(self, name, combo, tag='', description=''):
'''asset_create_combo name, combination, tag, description
Creates a new combination asset list. Operands can be either asset list
IDs or be a nested combination asset list.
UN-DOCUMENTED CALL: This function is not considered stable.
AND = intersection
OR = union
operand = asset list ID or nested combination.
operator = intersection or union.
Example:
combo = {
'operand1': {
'operand1': '2',
'operand2': '2',
'operation': 'union',
},
'operand2': '3',
'operation': 'intersection'
}
:param name: Name of the asset list.
:type name: string
:param combo: dict
:param tag: The tag of the asset list.
:type tag: string
:param description: Description of the asset list.
:type description: string
'''
return self.raw_query('asset', 'add', data={
'name': name,
'description': description,
'type': 'combination',
'combinations': combo,
})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def risk_rule(self, rule_type, rule_value, port, proto, plugin_id,
repo_ids, comment='', expires='-1', severity=None):
'''accept_risk rule_type, rule_value, port, proto, plugin_id, comment
Creates an accept rick rule based on information provided.
UN-DOCUMENTED CALL: This function is not considered stable.
:param rule_type: Valid options: ip, asset, all.
:type rule_type: string
:param rule_value: IP Addresses, or assetID if not the type is not all.
:type tule_value: string
:param port: Port number
:type port: string
:param proto: Either the protocol ID or any. TCP = 6, UDP = 17, ICMP = 1
:type proto: string
:param plugin_id: The plugin ID
:type plugin_id: string
:param repo_ids: List of repository ids that the rule pertains to.
:type repo_ids: string
:param comment: General purpose comment field.
:type comment: string
:param expires: epoch time for expiration.
:type expires: string
:param severity: New severity rating.
'''
data = {
'hostType': rule_type,
'port': port,
'comments': comment,
'protocol': proto,
'pluginID': plugin_id,
'repIDs': [{'id': i} for i in repo_ids]
}
if rule_type != 'all':
data['hostValue'] = rule_value
if severity is None:
data['expires'] = expires
return self.raw_query('acceptRiskRule', 'add', data=data)
else:
sevlevels = {'info': 0, 'low': 1, 'medium': 2, 'high': 3, 'critical': 4}
data['severity'] = sevlevels[severity]
return self.raw_query('recastRiskRule', 'add', data=data)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def group_add(self, name, restrict, repos, lces=[], assets=[], queries=[],
policies=[], dashboards=[], credentials=[], description=''):
'''group_add name, restrict, repos
'''
return self.raw_query('group', 'add', data={
'lces': [{'id': i} for i in lces],
'assets': [{'id': i} for i in assets],
'queries': [{'id': i} for i in queries],
'policies': [{'id': i} for i in policies],
'dashboardTabs': [{'id': i} for i in dashboards],
'credentials': [{'id': i} for i in credentials],
'repositories': [{'id': i} for i in repos],
'definingAssets': [{'id': i} for i in restrict],
'name': name,
'description': description,
'users': [],
'context': ''
})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def get_geo_info(filename, band=1):
''' Gets information from a Raster data set
'''
sourceds = gdal.Open(filename, GA_ReadOnly)
ndv = sourceds.GetRasterBand(band).GetNoDataValue()
xsize = sourceds.RasterXSize
ysize = sourceds.RasterYSize
geot = sourceds.GetGeoTransform()
projection = osr.SpatialReference()
projection.ImportFromWkt(sourceds.GetProjectionRef())
datatype = sourceds.GetRasterBand(band).DataType
datatype = gdal.GetDataTypeName(datatype)
return ndv, xsize, ysize, geot, projection, datatype
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def create_geotiff(name, Array, driver, ndv, xsize, ysize, geot, projection, datatype, band=1):
'''
Creates new geotiff from array
'''
if isinstance(datatype, np.int) == False:
if datatype.startswith('gdal.GDT_') == False:
datatype = eval('gdal.GDT_'+datatype)
newfilename = name+'.tif'
# Set nans to the original No Data Value
Array[np.isnan(Array)] = ndv
# Set up the dataset
DataSet = driver.Create(newfilename, xsize, ysize, 1, datatype)
# the '1' is for band 1.
DataSet.SetGeoTransform(geot)
DataSet.SetProjection(projection.ExportToWkt())
# Write the array
DataSet.GetRasterBand(band).WriteArray(Array)
DataSet.GetRasterBand(band).SetNoDataValue(ndv)
return newfilename
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_tiff(file):
""" Load a geotiff raster keeping ndv values using a masked array Usage: data = load_tiff(file) """
|
ndv, xsize, ysize, geot, projection, datatype = get_geo_info(file)
data = gdalnumeric.LoadFile(file)
data = np.ma.masked_array(data, mask=data == ndv, fill_value=ndv)
return data
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def from_file(filename, **kwargs):
""" Create a GeoRaster object from a file """
|
ndv, xsize, ysize, geot, projection, datatype = get_geo_info(filename, **kwargs)
data = gdalnumeric.LoadFile(filename, **kwargs)
data = np.ma.masked_array(data, mask=data == ndv, fill_value=ndv)
return GeoRaster(data, geot, nodata_value=ndv, projection=projection, datatype=datatype)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def copy(self):
"""Returns copy of itself"""
|
return GeoRaster(self.raster.copy(), self.geot, nodata_value=self.nodata_value,
projection=self.projection, datatype=self.datatype)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def clip(self, shp, keep=False, *args, **kwargs):
'''
Clip raster using shape, where shape is either a GeoPandas DataFrame, shapefile,
or some other geometry format used by python-raster-stats
Returns list of GeoRasters or Pandas DataFrame with GeoRasters and additional information
Usage:
clipped = geo.clip(shape, keep=False)
where:
keep: Boolean (Default False), returns Georasters and Geometry information
'''
df = pd.DataFrame(zonal_stats(shp, self.raster, nodata=self.nodata_value, all_touched=True,
raster_out=True, affine=Affine.from_gdal(*self.geot),
geojson_out=keep,))
if keep:
df['GeoRaster'] = df.properties.apply(lambda x: GeoRaster(x['mini_raster_array'],
Affine.to_gdal(x['mini_raster_affine']),
nodata_value=x['mini_raster_nodata'],
projection=self.projection,
datatype=self.datatype))
cols = list(set([i for i in df.properties[0].keys()]).intersection(set(shp.columns)))
df2 = pd.DataFrame([df.properties.apply(lambda x: x[i]) for i in cols
]).T.merge(df[['GeoRaster']], left_index=True, right_index=True,)
df2.columns = cols+['GeoRaster']
df2 = df2.merge(df[['id']], left_index=True, right_index=True)
df2.set_index('id', inplace=True)
return df2
else:
df['GeoRaster'] = df.apply(lambda x: GeoRaster(x.mini_raster_array,
Affine.to_gdal(x.mini_raster_affine),
nodata_value=x.mini_raster_nodata,
projection=self.projection,
datatype=self.datatype), axis=1)
return df['GeoRaster'].values
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pysal_Gamma(self, **kwargs):
""" Compute Gamma Index of Spatial Autocorrelation for GeoRaster Usage: geo.pysal_Gamma(permutations = 1000, rook=True, operation='c') arguments passed to raster_weights() and pysal.Gamma See help(gr.raster_weights), help(pysal.Gamma) for options """
|
if self.weights is None:
self.raster_weights(**kwargs)
rasterf = self.raster.flatten()
rasterf = rasterf[rasterf.mask==False]
self.Gamma = pysal.Gamma(rasterf, self.weights, **kwargs)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pysal_Join_Counts(self, **kwargs):
""" Compute join count statistics for GeoRaster Usage: geo.pysal_Join_Counts(permutations = 1000, rook=True) arguments passed to raster_weights() and pysal.Join_Counts See help(gr.raster_weights), help(pysal.Join_Counts) for options """
|
if self.weights is None:
self.raster_weights(**kwargs)
rasterf = self.raster.flatten()
rasterf = rasterf[rasterf.mask==False]
self.Join_Counts = pysal.Join_Counts(rasterf, self.weights, **kwargs)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pysal_Moran(self, **kwargs):
""" Compute Moran's I measure of global spatial autocorrelation for GeoRaster Usage: geo.pysal_Moran(permutations = 1000, rook=True) arguments passed to raster_weights() and pysal.Moran See help(gr.raster_weights), help(pysal.Moran) for options """
|
if self.weights is None:
self.raster_weights(**kwargs)
rasterf = self.raster.flatten()
rasterf = rasterf[rasterf.mask==False]
self.Moran = pysal.Moran(rasterf, self.weights, **kwargs)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pysal_Moran_Local(self, **kwargs):
""" Compute Local Moran's I measure of local spatial autocorrelation for GeoRaster Usage: geo.pysal_Moran_Local(permutations = 1000, rook=True) arguments passed to raster_weights() and pysal.Moran_Local See help(gr.raster_weights), help(pysal.Moran_Local) for options """
|
if self.weights is None:
self.raster_weights(**kwargs)
rasterf = self.raster.flatten()
rasterf = rasterf[rasterf.mask==False]
self.Moran_Local = pysal.Moran_Local(rasterf, self.weights, **kwargs)
for i in self.Moran_Local.__dict__.keys():
if (isinstance(getattr(self.Moran_Local, i), np.ma.masked_array) or
(isinstance(getattr(self.Moran_Local, i), np.ndarray)) and
len(getattr(self.Moran_Local, i).shape) == 1):
setattr(self.Moran_Local, i, self.map_vector(getattr(self.Moran_Local, i)))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mcp(self, *args, **kwargs):
""" Setup MCP_Geometric object from skimage for optimal travel time computations """
|
# Create Cost surface to work on
self.mcp_cost = graph.MCP_Geometric(self.raster, *args, **kwargs)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def notify(self, method, params=None):
"""Send a JSON RPC notification to the client. Args: method (str):
The method name of the notification to send params (any):
The payload of the notification """
|
log.debug('Sending notification: %s %s', method, params)
message = {
'jsonrpc': JSONRPC_VERSION,
'method': method,
}
if params is not None:
message['params'] = params
self._consumer(message)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def request(self, method, params=None):
"""Send a JSON RPC request to the client. Args: method (str):
The method name of the message to send params (any):
The payload of the message Returns: Future that will resolve once a response has been received """
|
msg_id = self._id_generator()
log.debug('Sending request with id %s: %s %s', msg_id, method, params)
message = {
'jsonrpc': JSONRPC_VERSION,
'id': msg_id,
'method': method,
}
if params is not None:
message['params'] = params
request_future = futures.Future()
request_future.add_done_callback(self._cancel_callback(msg_id))
self._server_request_futures[msg_id] = request_future
self._consumer(message)
return request_future
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _cancel_callback(self, request_id):
"""Construct a cancellation callback for the given request ID."""
|
def callback(future):
if future.cancelled():
self.notify(CANCEL_METHOD, {'id': request_id})
future.set_exception(JsonRpcRequestCancelled())
return callback
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def consume(self, message):
"""Consume a JSON RPC message from the client. Args: message (dict):
The JSON RPC message sent by the client """
|
if 'jsonrpc' not in message or message['jsonrpc'] != JSONRPC_VERSION:
log.warn("Unknown message type %s", message)
return
if 'id' not in message:
log.debug("Handling notification from client %s", message)
self._handle_notification(message['method'], message.get('params'))
elif 'method' not in message:
log.debug("Handling response from client %s", message)
self._handle_response(message['id'], message.get('result'), message.get('error'))
else:
try:
log.debug("Handling request from client %s", message)
self._handle_request(message['id'], message['method'], message.get('params'))
except JsonRpcException as e:
log.exception("Failed to handle request %s", message['id'])
self._consumer({
'jsonrpc': JSONRPC_VERSION,
'id': message['id'],
'error': e.to_dict()
})
except Exception: # pylint: disable=broad-except
log.exception("Failed to handle request %s", message['id'])
self._consumer({
'jsonrpc': JSONRPC_VERSION,
'id': message['id'],
'error': JsonRpcInternalError.of(sys.exc_info()).to_dict()
})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handle_notification(self, method, params):
"""Handle a notification from the client."""
|
if method == CANCEL_METHOD:
self._handle_cancel_notification(params['id'])
return
try:
handler = self._dispatcher[method]
except KeyError:
log.warn("Ignoring notification for unknown method %s", method)
return
try:
handler_result = handler(params)
except Exception: # pylint: disable=broad-except
log.exception("Failed to handle notification %s: %s", method, params)
return
if callable(handler_result):
log.debug("Executing async notification handler %s", handler_result)
notification_future = self._executor_service.submit(handler_result)
notification_future.add_done_callback(self._notification_callback(method, params))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _notification_callback(method, params):
"""Construct a notification callback for the given request ID."""
|
def callback(future):
try:
future.result()
log.debug("Successfully handled async notification %s %s", method, params)
except Exception: # pylint: disable=broad-except
log.exception("Failed to handle async notification %s %s", method, params)
return callback
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handle_cancel_notification(self, msg_id):
"""Handle a cancel notification from the client."""
|
request_future = self._client_request_futures.pop(msg_id, None)
if not request_future:
log.warn("Received cancel notification for unknown message id %s", msg_id)
return
# Will only work if the request hasn't started executing
if request_future.cancel():
log.debug("Cancelled request with id %s", msg_id)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handle_request(self, msg_id, method, params):
"""Handle a request from the client."""
|
try:
handler = self._dispatcher[method]
except KeyError:
raise JsonRpcMethodNotFound.of(method)
handler_result = handler(params)
if callable(handler_result):
log.debug("Executing async request handler %s", handler_result)
request_future = self._executor_service.submit(handler_result)
self._client_request_futures[msg_id] = request_future
request_future.add_done_callback(self._request_callback(msg_id))
else:
log.debug("Got result from synchronous request handler: %s", handler_result)
self._consumer({
'jsonrpc': JSONRPC_VERSION,
'id': msg_id,
'result': handler_result
})
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _request_callback(self, request_id):
"""Construct a request callback for the given request ID."""
|
def callback(future):
# Remove the future from the client requests map
self._client_request_futures.pop(request_id, None)
if future.cancelled():
future.set_exception(JsonRpcRequestCancelled())
message = {
'jsonrpc': JSONRPC_VERSION,
'id': request_id,
}
try:
message['result'] = future.result()
except JsonRpcException as e:
log.exception("Failed to handle request %s", request_id)
message['error'] = e.to_dict()
except Exception: # pylint: disable=broad-except
log.exception("Failed to handle request %s", request_id)
message['error'] = JsonRpcInternalError.of(sys.exc_info()).to_dict()
self._consumer(message)
return callback
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handle_response(self, msg_id, result=None, error=None):
"""Handle a response from the client."""
|
request_future = self._server_request_futures.pop(msg_id, None)
if not request_future:
log.warn("Received response to unknown message id %s", msg_id)
return
if error is not None:
log.debug("Received error response to message %s: %s", msg_id, error)
request_future.set_exception(JsonRpcException.from_dict(error))
log.debug("Received result for message %s: %s", msg_id, result)
request_future.set_result(result)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def listen(self, message_consumer):
"""Blocking call to listen for messages on the rfile. Args: message_consumer (fn):
function that is passed each message as it is read off the socket. """
|
while not self._rfile.closed:
request_str = self._read_message()
if request_str is None:
break
try:
message_consumer(json.loads(request_str.decode('utf-8')))
except ValueError:
log.exception("Failed to parse JSON message %s", request_str)
continue
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _read_message(self):
"""Reads the contents of a message. Returns: body of message if parsable else None """
|
line = self._rfile.readline()
if not line:
return None
content_length = self._content_length(line)
# Blindly consume all header lines
while line and line.strip():
line = self._rfile.readline()
if not line:
return None
# Grab the body
return self._rfile.read(content_length)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _content_length(line):
"""Extract the content length from an input line."""
|
if line.startswith(b'Content-Length: '):
_, value = line.split(b'Content-Length: ')
value = value.strip()
try:
return int(value)
except ValueError:
raise ValueError("Invalid Content-Length header: {}".format(value))
return None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def hostapi_info(index=None):
"""Return a generator with information about each host API. If index is given, only one dictionary for the given host API is returned. """
|
if index is None:
return (hostapi_info(i) for i in range(_pa.Pa_GetHostApiCount()))
else:
info = _pa.Pa_GetHostApiInfo(index)
if not info:
raise RuntimeError("Invalid host API")
assert info.structVersion == 1
return {'name': ffi.string(info.name).decode(errors='ignore'),
'default_input_device': info.defaultInputDevice,
'default_output_device': info.defaultOutputDevice}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def device_info(index=None):
"""Return a generator with information about each device. If index is given, only one dictionary for the given device is returned. """
|
if index is None:
return (device_info(i) for i in range(_pa.Pa_GetDeviceCount()))
else:
info = _pa.Pa_GetDeviceInfo(index)
if not info:
raise RuntimeError("Invalid device")
assert info.structVersion == 2
if 'DirectSound' in hostapi_info(info.hostApi)['name']:
enc = 'mbcs'
else:
enc = 'utf-8'
return {'name': ffi.string(info.name).decode(encoding=enc,
errors='ignore'),
'hostapi': info.hostApi,
'max_input_channels': info.maxInputChannels,
'max_output_channels': info.maxOutputChannels,
'default_low_input_latency': info.defaultLowInputLatency,
'default_low_output_latency': info.defaultLowOutputLatency,
'default_high_input_latency': info.defaultHighInputLatency,
'default_high_output_latency': info.defaultHighOutputLatency,
'default_samplerate': info.defaultSampleRate}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_stream_parameters(kind, device, channels, dtype, latency, samplerate):
"""Generate PaStreamParameters struct."""
|
if device is None:
if kind == 'input':
device = _pa.Pa_GetDefaultInputDevice()
elif kind == 'output':
device = _pa.Pa_GetDefaultOutputDevice()
info = device_info(device)
if channels is None:
channels = info['max_' + kind + '_channels']
dtype = np.dtype(dtype)
try:
sample_format = _np2pa[dtype]
except KeyError:
raise ValueError("Invalid " + kind + " sample format")
if samplerate is None:
samplerate = info['default_samplerate']
parameters = ffi.new(
"PaStreamParameters*",
(device, channels, sample_format, latency, ffi.NULL))
return parameters, dtype, samplerate
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _frombuffer(ptr, frames, channels, dtype):
"""Create NumPy array from a pointer to some memory."""
|
framesize = channels * dtype.itemsize
data = np.frombuffer(ffi.buffer(ptr, frames * framesize), dtype=dtype)
data.shape = -1, channels
return data
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start(self):
"""Commence audio processing. If successful, the stream is considered active. """
|
err = _pa.Pa_StartStream(self._stream)
if err == _pa.paStreamIsNotStopped:
return
self._handle_error(err)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stop(self):
"""Terminate audio processing. This waits until all pending audio buffers have been played before it returns. If successful, the stream is considered inactive. """
|
err = _pa.Pa_StopStream(self._stream)
if err == _pa.paStreamIsStopped:
return
self._handle_error(err)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def abort(self):
"""Terminate audio processing immediately. This does not wait for pending audio buffers. If successful, the stream is considered inactive. """
|
err = _pa.Pa_AbortStream(self._stream)
if err == _pa.paStreamIsStopped:
return
self._handle_error(err)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read(self, frames, raw=False):
"""Read samples from an input stream. The function does not return until the required number of frames has been read. This may involve waiting for the operating system to supply the data. If raw data is requested, the raw cffi data buffer is returned. Otherwise, a numpy array of the appropriate dtype with one column per channel is returned. """
|
channels, _ = _split(self.channels)
dtype, _ = _split(self.dtype)
data = ffi.new("signed char[]", channels * dtype.itemsize * frames)
self._handle_error(_pa.Pa_ReadStream(self._stream, data, frames))
if not raw:
data = np.frombuffer(ffi.buffer(data), dtype=dtype)
data.shape = frames, channels
return data
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write(self, data):
"""Write samples to an output stream. As much as one blocksize of audio data will be played without blocking. If more than one blocksize was provided, the function will only return when all but one blocksize has been played. Data will be converted to a numpy matrix. Multichannel data should be provided as a (frames, channels) matrix. If the data is provided as a 1-dim array, it will be treated as mono data and will be played on all channels simultaneously. If the data is provided as a 2-dim matrix and fewer tracks are provided than channels, silence will be played on the missing channels. Similarly, if more tracks are provided than there are channels, the extraneous channels will not be played. """
|
frames = len(data)
_, channels = _split(self.channels)
_, dtype = _split(self.dtype)
if (not isinstance(data, np.ndarray) or data.dtype != dtype):
data = np.array(data, dtype=dtype)
if len(data.shape) == 1:
# play mono signals on all channels
data = np.tile(data, (channels, 1)).T
if data.shape[1] > channels:
data = data[:, :channels]
if data.shape < (frames, channels):
# if less data is available than requested, pad with zeros.
tmp = data
data = np.zeros((frames, channels), dtype=dtype)
data[:tmp.shape[0], :tmp.shape[1]] = tmp
data = data.ravel().tostring()
err = _pa.Pa_WriteStream(self._stream, data, frames)
self._handle_error(err)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handle_shell(self,cfg_file,*args,**options):
"""Command 'supervisord shell' runs the interactive command shell."""
|
args = ("--interactive",) + args
return supervisorctl.main(("-c",cfg_file) + args)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handle_getconfig(self,cfg_file,*args,**options):
"""Command 'supervisor getconfig' prints merged config to stdout."""
|
if args:
raise CommandError("supervisor getconfig takes no arguments")
print cfg_file.read()
return 0
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _handle_autoreload(self,cfg_file,*args,**options):
"""Command 'supervisor autoreload' watches for code changes. This command provides a simulation of the Django dev server's auto-reloading mechanism that will restart all supervised processes. It's not quite as accurate as Django's autoreloader because it runs in a separate process, so it doesn't know the precise set of modules that have been loaded. Instead, it tries to watch all python files that are "nearby" the files loaded at startup by Django. """
|
if args:
raise CommandError("supervisor autoreload takes no arguments")
live_dirs = self._find_live_code_dirs()
reload_progs = self._get_autoreload_programs(cfg_file)
def autoreloader():
"""
Forks a subprocess to make the restart call.
Otherwise supervisord might kill us and cancel the restart!
"""
if os.fork() == 0:
sys.exit(self.handle("restart", *reload_progs, **options))
# Call the autoreloader callback whenever a .py file changes.
# To prevent thrashing, limit callbacks to one per second.
handler = CallbackModifiedHandler(callback=autoreloader,
repeat_delay=1,
patterns=AUTORELOAD_PATTERNS,
ignore_patterns=AUTORELOAD_IGNORE,
ignore_directories=True)
# Try to add watches using the platform-specific observer.
# If this fails, print a warning and fall back to the PollingObserver.
# This will avoid errors with e.g. too many inotify watches.
from watchdog.observers import Observer
from watchdog.observers.polling import PollingObserver
observer = None
for ObserverCls in (Observer, PollingObserver):
observer = ObserverCls()
try:
for live_dir in set(live_dirs):
observer.schedule(handler, live_dir, True)
break
except Exception:
print>>sys.stderr, "COULD NOT WATCH FILESYSTEM USING"
print>>sys.stderr, "OBSERVER CLASS: ", ObserverCls
traceback.print_exc()
observer.start()
observer.stop()
# Fail out if none of the observers worked.
if observer is None:
print>>sys.stderr, "COULD NOT WATCH FILESYSTEM"
return 1
# Poll if we have an observer.
# TODO: Is this sleep necessary? Or will it suffice
# to block indefinitely on something and wait to be killed?
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
return 0
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_autoreload_programs(self,cfg_file):
"""Get the set of programs to auto-reload when code changes. Such programs will have autoreload=true in their config section. This can be affected by config file sections or command-line arguments, so we need to read it out of the merged config. """
|
cfg = RawConfigParser()
cfg.readfp(cfg_file)
reload_progs = []
for section in cfg.sections():
if section.startswith("program:"):
try:
if cfg.getboolean(section,"autoreload"):
reload_progs.append(section.split(":",1)[1])
except NoOptionError:
pass
return reload_progs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_live_code_dirs(self):
"""Find all directories in which we might have live python code. This walks all of the currently-imported modules and adds their containing directory to the list of live dirs. After normalization and de-duplication, we get a pretty good approximation of the directories on sys.path that are actively in use. """
|
live_dirs = []
for mod in sys.modules.values():
# Get the directory containing that module.
# This is deliberately casting a wide net.
try:
dirnm = os.path.dirname(mod.__file__)
except AttributeError:
continue
# Normalize it for comparison purposes.
dirnm = os.path.realpath(os.path.abspath(dirnm))
if not dirnm.endswith(os.sep):
dirnm += os.sep
# Check that it's not an egg or some other wierdness
if not os.path.isdir(dirnm):
continue
# If it's a subdir of one we've already found, ignore it.
for dirnm2 in live_dirs:
if dirnm.startswith(dirnm2):
break
else:
# Remove any ones we've found that are subdirs of it.
live_dirs = [dirnm2 for dirnm2 in live_dirs\
if not dirnm2.startswith(dirnm)]
live_dirs.append(dirnm)
return live_dirs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render_config(data,ctx):
"""Render the given config data using Django's template system. This function takes a config data string and a dict of context variables, renders the data through Django's template system, and returns the result. """
|
djsupervisor_tags.current_context = ctx
data = "{% load djsupervisor_tags %}" + data
t = template.Template(data)
c = template.Context(ctx)
return t.render(c).encode("ascii")
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_config_from_options(**options):
"""Get config file fragment reflecting command-line options."""
|
data = []
# Set whether or not to daemonize.
# Unlike supervisord, our default is to stay in the foreground.
data.append("[supervisord]\n")
if options.get("daemonize",False):
data.append("nodaemon=false\n")
else:
data.append("nodaemon=true\n")
if options.get("pidfile",None):
data.append("pidfile=%s\n" % (options["pidfile"],))
if options.get("logfile",None):
data.append("logfile=%s\n" % (options["logfile"],))
# Set which programs to launch automatically on startup.
for progname in options.get("launch",None) or []:
data.append("[program:%s]\nautostart=true\n" % (progname,))
for progname in options.get("nolaunch",None) or []:
data.append("[program:%s]\nautostart=false\n" % (progname,))
# Set which programs to include/exclude from the config
for progname in options.get("include",None) or []:
data.append("[program:%s]\nexclude=false\n" % (progname,))
for progname in options.get("exclude",None) or []:
data.append("[program:%s]\nexclude=true\n" % (progname,))
# Set which programs to autoreload when code changes.
# When this option is specified, the default for all other
# programs becomes autoreload=false.
if options.get("autoreload",None):
data.append("[program:autoreload]\nexclude=false\nautostart=true\n")
data.append("[program:__defaults__]\nautoreload=false\n")
for progname in options["autoreload"]:
data.append("[program:%s]\nautoreload=true\n" % (progname,))
# Set whether to use the autoreloader at all.
if options.get("noreload",False):
data.append("[program:autoreload]\nexclude=true\n")
return "".join(data)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def guess_project_dir():
"""Find the top-level Django project directory. This function guesses the top-level Django project directory based on the current environment. It looks for module containing the currently- active settings module, in both pre-1.4 and post-1.4 layours. """
|
projname = settings.SETTINGS_MODULE.split(".",1)[0]
projmod = import_module(projname)
projdir = os.path.dirname(projmod.__file__)
# For Django 1.3 and earlier, the manage.py file was located
# in the same directory as the settings file.
if os.path.isfile(os.path.join(projdir,"manage.py")):
return projdir
# For Django 1.4 and later, the manage.py file is located in
# the directory *containing* the settings file.
projdir = os.path.abspath(os.path.join(projdir, os.path.pardir))
if os.path.isfile(os.path.join(projdir,"manage.py")):
return projdir
msg = "Unable to determine the Django project directory;"\
" use --project-dir to specify it"
raise RuntimeError(msg)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_if_missing(cfg,section,option,value):
"""If the given option is missing, set to the given value."""
|
try:
cfg.get(section,option)
except NoSectionError:
cfg.add_section(section)
cfg.set(section,option,value)
except NoOptionError:
cfg.set(section,option,value)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rerender_options(options):
"""Helper function to re-render command-line options. This assumes that command-line options use the same name as their key in the options dictionary. """
|
args = []
for name,value in options.iteritems():
name = name.replace("_","-")
if value is None:
pass
elif isinstance(value,bool):
if value:
args.append("--%s" % (name,))
elif isinstance(value,list):
for item in value:
args.append("--%s=%s" % (name,item))
else:
args.append("--%s=%s" % (name,value))
return " ".join(args)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def login(self, email=None, password=None, user=None):
""" Logs the user in and setups the header with the private token :param email: Gitlab user Email :param user: Gitlab username :param password: Gitlab user password :return: True if login successful :raise: HttpError :raise: ValueError """
|
if user is not None:
data = {'login': user, 'password': password}
elif email is not None:
data = {'email': email, 'password': password}
else:
raise ValueError('Neither username nor email provided to login')
self.headers = {'connection': 'close'}
response = self.post('/session', **data)
self.token = response['private_token']
self.headers = {'PRIVATE-TOKEN': self.token,
'connection': 'close'}
return response
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getuser(self, user_id):
""" Get info for a user identified by id :param user_id: id of the user :return: False if not found, a dictionary if found """
|
request = requests.get(
'{0}/{1}'.format(self.users_url, user_id),
headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
if request.status_code == 200:
return request.json()
else:
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def deleteuser(self, user_id):
""" Deletes a user. Available only for administrators. This is an idempotent function, calling this function for a non-existent user id still returns a status code 200 OK. The JSON response differs if the user was actually deleted or not. In the former the user is returned and in the latter not. .. warning:: Warning this is being deprecated please use :func:`gitlab.Gitlab.delete_user` :param user_id: The ID of the user :return: True if it deleted, False if it couldn't """
|
deleted = self.delete_user(user_id)
if deleted is False:
return False
else:
return True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def currentuser(self):
""" Returns the current user parameters. The current user is linked to the secret token :return: a list with the current user properties """
|
request = requests.get(
'{0}/api/v3/user'.format(self.host),
headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout)
return request.json()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.