text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_aws_session(account):
"""Function to return a boto3 Session based on the account passed in the first argument. Args: account (:obj:`Account`):
Account to create the session object for Returns: :obj:`boto3:boto3.session.Session` """ |
from cloud_inquisitor.config import dbconfig
from cloud_inquisitor.plugins.types.accounts import AWSAccount
if not isinstance(account, AWSAccount):
raise InquisitorError('Non AWSAccount passed to get_aws_session, got {}'.format(account.__class__.__name__))
# If no keys are on supplied for the account, use sts.assume_role instead
session = get_local_aws_session()
if session.get_credentials().method in ['iam-role', 'env', 'explicit']:
sts = session.client('sts')
else:
# If we are not running on an EC2 instance, assume the instance role
# first, then assume the remote role
temp_sts = session.client('sts')
audit_sts_role = temp_sts.assume_role(
RoleArn=app_config.aws_api.instance_role_arn,
RoleSessionName='inquisitor'
)
sts = boto3.session.Session(
audit_sts_role['Credentials']['AccessKeyId'],
audit_sts_role['Credentials']['SecretAccessKey'],
audit_sts_role['Credentials']['SessionToken']
).client('sts')
role = sts.assume_role(
RoleArn='arn:aws:iam::{}:role/{}'.format(
account.account_number,
dbconfig.get('role_name', default='cinq_role')
),
RoleSessionName='inquisitor'
)
sess = boto3.session.Session(
role['Credentials']['AccessKeyId'],
role['Credentials']['SecretAccessKey'],
role['Credentials']['SessionToken']
)
return sess |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_aws_regions(*, force=False):
"""Load a list of AWS regions from the AWS static data. Args: force (`bool`):
Force fetch list of regions even if we already have a cached version Returns: :obj:`list` of `str` """ |
from cloud_inquisitor.config import dbconfig
global __regions
if force or not __regions:
logger.debug('Loading list of AWS regions from static data')
data = requests.get('https://ip-ranges.amazonaws.com/ip-ranges.json').json()
rgx = re.compile(dbconfig.get('ignored_aws_regions_regexp', default='(^cn-|GLOBAL|-gov)'), re.I)
__regions = sorted(list({x['region'] for x in data['prefixes'] if not rgx.search(x['region'])}))
return __regions |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self):
"""List all accounts""" |
_, accounts = BaseAccount.search()
if ROLE_ADMIN not in session['user'].roles:
accounts = list(filter(lambda acct: acct.account_id in session['accounts'], accounts))
if accounts:
return self.make_response({
'message': None,
'accounts': [x.to_json(is_admin=ROLE_ADMIN in session['user'].roles or False) for x in accounts]
})
else:
return self.make_response({
'message': 'Unable to find any accounts',
'accounts': None
}, HTTP.NOT_FOUND) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, accountId):
"""Fetch a single account""" |
account = BaseAccount.get(accountId)
if account:
return self.make_response({
'message': None,
'account': account.to_json(is_admin=True)
})
else:
return self.make_response({
'message': 'Unable to find account',
'account': None
}, HTTP.NOT_FOUND) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def put(self, accountId):
"""Update an account""" |
self.reqparse.add_argument('accountName', type=str, required=True)
self.reqparse.add_argument('accountType', type=str, required=True)
self.reqparse.add_argument('contacts', type=dict, required=True, action='append')
self.reqparse.add_argument('enabled', type=int, required=True, choices=(0, 1))
self.reqparse.add_argument('requiredRoles', type=str, action='append', default=())
self.reqparse.add_argument('properties', type=dict, required=True)
args = self.reqparse.parse_args()
account_class = get_plugin_by_name(PLUGIN_NAMESPACES['accounts'], args['accountType'])
if not account_class:
raise InquisitorError('Invalid account type: {}'.format(args['accountType']))
validate_contacts(args['contacts'])
if not args['accountName'].strip():
raise Exception('You must provide an account name')
if not args['contacts']:
raise Exception('You must provide at least one contact')
class_properties = {from_camelcase(key): value for key, value in args['properties'].items()}
for prop in account_class.class_properties:
if prop['key'] not in class_properties:
raise InquisitorError('Missing required property {}'.format(prop))
account = account_class.get(accountId)
if account.account_type != args['accountType']:
raise InquisitorError('You cannot change the type of an account')
account.account_name = args['accountName']
account.contacts = args['contacts']
account.enabled = args['enabled']
account.required_roles = args['requiredRoles']
account.update(**args['properties'])
account.save()
auditlog(event='account.update', actor=session['user'].username, data=args)
return self.make_response({'message': 'Object updated', 'account': account.to_json(is_admin=True)}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete(self, accountId):
"""Delete an account""" |
acct = BaseAccount.get(accountId)
if not acct:
raise Exception('No such account found')
acct.delete()
auditlog(event='account.delete', actor=session['user'].username, data={'accountId': accountId})
return self.make_response('Account deleted') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __get_distribution_tags(self, client, arn):
"""Returns a dict containing the tags for a CloudFront distribution Args: client (botocore.client.CloudFront):
Boto3 CloudFront client object arn (str):
ARN of the distribution to get tags for Returns: `dict` """ |
return {
t['Key']: t['Value'] for t in client.list_tags_for_resource(
Resource=arn
)['Tags']['Items']
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __fetch_route53_zones(self):
"""Return a list of all DNS zones hosted in Route53 Returns: :obj:`list` of `dict` """ |
done = False
marker = None
zones = {}
route53 = self.session.client('route53')
try:
while not done:
if marker:
response = route53.list_hosted_zones(Marker=marker)
else:
response = route53.list_hosted_zones()
if response['IsTruncated']:
marker = response['NextMarker']
else:
done = True
for zone_data in response['HostedZones']:
zones[get_resource_id('r53z', zone_data['Id'])] = {
'name': zone_data['Name'].rstrip('.'),
'source': 'AWS/{}'.format(self.account),
'comment': zone_data['Config']['Comment'] if 'Comment' in zone_data['Config'] else None,
'zone_id': zone_data['Id'],
'private_zone': zone_data['Config']['PrivateZone'],
'tags': self.__fetch_route53_zone_tags(zone_data['Id'])
}
return zones
finally:
del route53 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __fetch_route53_zone_records(self, zone_id):
"""Return all resource records for a specific Route53 zone Args: zone_id (`str`):
Name / ID of the hosted zone Returns: `dict` """ |
route53 = self.session.client('route53')
done = False
nextName = nextType = None
records = {}
try:
while not done:
if nextName and nextType:
response = route53.list_resource_record_sets(
HostedZoneId=zone_id,
StartRecordName=nextName,
StartRecordType=nextType
)
else:
response = route53.list_resource_record_sets(HostedZoneId=zone_id)
if response['IsTruncated']:
nextName = response['NextRecordName']
nextType = response['NextRecordType']
else:
done = True
if 'ResourceRecordSets' in response:
for record in response['ResourceRecordSets']:
# Cannot make this a list, due to a race-condition in the AWS api that might return the same
# record more than once, so we use a dict instead to ensure that if we get duplicate records
# we simply just overwrite the one already there with the same info.
record_id = self._get_resource_hash(zone_id, record)
if 'AliasTarget' in record:
value = record['AliasTarget']['DNSName']
records[record_id] = {
'id': record_id,
'name': record['Name'].rstrip('.'),
'type': 'ALIAS',
'ttl': 0,
'value': [value]
}
else:
value = [y['Value'] for y in record['ResourceRecords']]
records[record_id] = {
'id': record_id,
'name': record['Name'].rstrip('.'),
'type': record['Type'],
'ttl': record['TTL'],
'value': value
}
return list(records.values())
finally:
del route53 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __fetch_route53_zone_tags(self, zone_id):
"""Return a dict with the tags for the zone Args: zone_id (`str`):
ID of the hosted zone Returns: :obj:`dict` of `str`: `str` """ |
route53 = self.session.client('route53')
try:
return {
tag['Key']: tag['Value'] for tag in
route53.list_tags_for_resource(
ResourceType='hostedzone',
ResourceId=zone_id.split('/')[-1]
)['ResourceTagSet']['Tags']
}
finally:
del route53 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_resource_hash(zone_name, record):
"""Returns the last ten digits of the sha256 hash of the combined arguments. Useful for generating unique resource IDs Args: zone_name (`str`):
The name of the DNS Zone the record belongs to record (`dict`):
A record dict to generate the hash from Returns: `str` """ |
record_data = defaultdict(int, record)
if type(record_data['GeoLocation']) == dict:
record_data['GeoLocation'] = ":".join(["{}={}".format(k, v) for k, v in record_data['GeoLocation'].items()])
args = [
zone_name,
record_data['Name'],
record_data['Type'],
record_data['Weight'],
record_data['Region'],
record_data['GeoLocation'],
record_data['Failover'],
record_data['HealthCheckId'],
record_data['TrafficPolicyInstanceId']
]
return get_resource_id('r53r', args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_bucket_statistics(self, bucket_name, bucket_region, storage_type, statistic, days):
""" Returns datapoints from cloudwatch for bucket statistics. Args: bucket_name `(str)`: The name of the bucket statistic `(str)`: The statistic you want to fetch from days `(int)`: Sample period for the statistic """ |
cw = self.session.client('cloudwatch', region_name=bucket_region)
# gather cw stats
try:
obj_stats = cw.get_metric_statistics(
Namespace='AWS/S3',
MetricName=statistic,
Dimensions=[
{
'Name': 'StorageType',
'Value': storage_type
},
{
'Name': 'BucketName',
'Value': bucket_name
}
],
Period=86400,
StartTime=datetime.utcnow() - timedelta(days=days),
EndTime=datetime.utcnow(),
Statistics=[
'Average'
]
)
stat_value = obj_stats['Datapoints'][0]['Average'] if obj_stats['Datapoints'] else 'NO_DATA'
return stat_value
except Exception as e:
self.log.error(
'Could not get bucket statistic for account {} / bucket {} / {}'.format(self.account.account_name,
bucket_name, e))
finally:
del cw |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(cls, resource_type):
"""Returns the ResourceType object for `resource_type`. If no existing object was found, a new type will be created in the database and returned Args: resource_type (str):
Resource type name Returns: :obj:`ResourceType` """ |
if isinstance(resource_type, str):
obj = getattr(db, cls.__name__).find_one(cls.resource_type == resource_type)
elif isinstance(resource_type, int):
obj = getattr(db, cls.__name__).find_one(cls.resource_type_id == resource_type)
elif isinstance(resource_type, cls):
return resource_type
else:
obj = None
if not obj:
obj = cls()
obj.resource_type = resource_type
db.session.add(obj)
db.session.commit()
db.session.refresh(obj)
return obj |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(account_id, account_type_id=None):
"""Return account by ID and type Args: account_id (`int`, `str`):
Unique Account identifier account_type_id (str):
Type of account to get Returns: :obj:`Account`: Returns an Account object if found, else None """ |
if type(account_id) == str:
args = {'account_name': account_id}
else:
args = {'account_id': account_id}
if account_type_id:
args['account_type_id'] = account_type_id
return db.Account.find_one(**args) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def user_has_access(self, user):
"""Check if a user has access to view information for the account Args: user (:obj:`User`):
User object to check Returns: True if user has access to the account, else false """ |
if ROLE_ADMIN in user.roles:
return True
# Non-admin users should only see active accounts
if self.enabled:
if not self.required_roles:
return True
for role in self.required_roles:
if role in user.roles:
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_plugins(self):
"""Refresh the list of available collectors and auditors Returns: `None` """ |
for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.collectors']['plugins']:
cls = entry_point.load()
if cls.enabled():
self.log.debug('Collector loaded: {} in module {}'.format(cls.__name__, cls.__module__))
self.collectors.setdefault(cls.type, []).append(Worker(
cls.name,
cls.interval,
{
'name': entry_point.name,
'module_name': entry_point.module_name,
'attrs': entry_point.attrs
}
))
else:
self.log.debug('Collector disabled: {} in module {}'.format(cls.__name__, cls.__module__))
for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.auditors']['plugins']:
cls = entry_point.load()
if cls.enabled():
self.log.debug('Auditor loaded: {} in module {}'.format(cls.__name__, cls.__module__))
self.auditors.append(Worker(
cls.name,
cls.interval,
{
'name': entry_point.name,
'module_name': entry_point.module_name,
'attrs': entry_point.attrs
}
))
else:
self.log.debug('Auditor disabled: {} in module {}'.format(cls.__name__, cls.__module__))
collector_count = sum(len(x) for x in self.collectors.values())
auditor_count = len(self.auditors)
if collector_count + auditor_count == 0:
raise Exception('No auditors or collectors loaded, aborting scheduler')
self.log.info('Scheduler loaded {} collectors and {} auditors'.format(collector_count, auditor_count)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_scheduler_plugins(self):
"""Refresh the list of available schedulers Returns: `list` of :obj:`BaseScheduler` """ |
if not self.scheduler_plugins:
for entry_point in CINQ_PLUGINS['cloud_inquisitor.plugins.schedulers']['plugins']:
cls = entry_point.load()
self.scheduler_plugins[cls.__name__] = cls
if cls.__name__ == self.active_scheduler:
self.log.debug('Scheduler loaded: {} in module {}'.format(cls.__name__, cls.__module__))
else:
self.log.debug('Scheduler disabled: {} in module {}'.format(cls.__name__, cls.__module__)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self, **kwargs):
"""Execute the scheduler. Returns: `None` """ |
if not super().run(**kwargs):
return
if kwargs['list']:
self.log.info('--- List of Scheduler Modules ---')
for name, scheduler in list(self.scheduler_plugins.items()):
if self.active_scheduler == name:
self.log.info('{} (active)'.format(name))
else:
self.log.info(name)
self.log.info('--- End list of Scheduler Modules ---')
return
scheduler = self.scheduler_plugins[self.active_scheduler]()
scheduler.execute_scheduler() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self, **kwargs):
"""Execute the worker thread. Returns: `None` """ |
super().run(**kwargs)
scheduler = self.scheduler_plugins[self.active_scheduler]()
if not kwargs['no_daemon']:
self.log.info('Starting {} worker with {} threads checking for new messages every {} seconds'.format(
scheduler.name,
kwargs['threads'],
kwargs['delay']
))
for i in range(kwargs['threads']):
thd = threading.Thread(
target=self.execute_worker_thread,
args=(scheduler.execute_worker, kwargs['delay'])
)
thd.start()
else:
self.log.info('Starting {} worker for a single non-daemon execution'.format(
scheduler.name
))
scheduler.execute_worker() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def post(self):
"""Create a new template""" |
self.reqparse.add_argument('templateName', type=str, required=True)
self.reqparse.add_argument('template', type=str, required=True)
args = self.reqparse.parse_args()
template = db.Template.find_one(template_name=args['templateName'])
if template:
return self.make_response('Template already exists, update the existing template instead', HTTP.CONFLICT)
template = Template()
template.template_name = args['templateName']
template.template = args['template']
db.session.add(template)
db.session.commit()
auditlog(event='template.create', actor=session['user'].username, data=args)
return self.make_response('Template {} has been created'.format(template.template_name), HTTP.CREATED) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def put(self):
"""Re-import all templates, overwriting any local changes made""" |
try:
_import_templates(force=True)
return self.make_response('Imported templates')
except:
self.log.exception('Failed importing templates')
return self.make_response('Failed importing templates', HTTP.SERVER_ERROR) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, template_name):
"""Get a specific template""" |
template = db.Template.find_one(template_name=template_name)
if not template:
return self.make_response('No such template found', HTTP.NOT_FOUND)
return self.make_response({'template': template}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def put(self, template_name):
"""Update a template""" |
self.reqparse.add_argument('template', type=str, required=True)
args = self.reqparse.parse_args()
template = db.Template.find_one(template_name=template_name)
if not template:
return self.make_response('No such template found', HTTP.NOT_FOUND)
changes = diff(template.template, args['template'])
template.template = args['template']
template.is_modified = True
db.session.add(template)
db.session.commit()
auditlog(
event='template.update',
actor=session['user'].username,
data={
'template_name': template_name,
'template_changes': changes
}
)
return self.make_response('Template {} has been updated'.format(template_name)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete(self, template_name):
"""Delete a template""" |
template = db.Template.find_one(template_name=template_name)
if not template:
return self.make_response('No such template found', HTTP.NOT_FOUND)
db.session.delete(template)
db.session.commit()
auditlog(event='template.delete', actor=session['user'].username, data={'template_name': template_name})
return self.make_response({
'message': 'Template has been deleted',
'templateName': template_name
}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_action(resource, action, action_issuer='unknown'):
"""Process an audit action for a resource, if possible Args: resource (:obj:`Resource`):
A resource object to perform the action on action (`str`):
Type of action to perform (`kill` or `stop`) action_issuer (`str`):
The issuer of the action Returns: `ActionStatus` """ |
from cinq_collector_aws import AWSRegionCollector
func_action = action_mapper[resource.resource_type][action]
extra_info = {}
action_status = ActionStatus.UNKNOWN
if func_action:
if action_mapper[resource.resource_type]['service_name'] == 'lambda':
client = get_aws_session(
AWSAccount.get(dbconfig.get('rds_collector_account', AWSRegionCollector.ns, ''))
).client(
'lambda',
dbconfig.get('rds_collector_region', AWSRegionCollector.ns, '')
)
else:
client = get_aws_session(AWSAccount(resource.account)).client(
action_mapper[resource.resource_type]['service_name'],
region_name=resource.location
)
try:
logger.info(f'Trying to {action} resource {resource.id} for account {resource.account.account_name} / region {resource.location}')
action_status, extra_info = func_action(client, resource)
Enforcement.create(resource.account.account_id, resource.id, action, datetime.now(), extra_info)
except Exception as ex:
action_status = ActionStatus.FAILED
logger.exception('Failed to apply action {} to {}: {}'.format(action, resource.id, ex))
finally:
auditlog(
event='{}.{}.{}.{}'.format(action_issuer, resource.resource_type, action, action_status),
actor=action_issuer,
data={
'resource_id': resource.id,
'account_name': resource.account.account_name,
'location': resource.location,
'info': extra_info
}
)
return action_status
else:
logger.error('Failed to apply action {} to {}: Not supported'.format(action, resource.id))
return ActionStatus.FAILED |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stop_ec2_instance(client, resource):
"""Stop an EC2 Instance This function will attempt to stop a running instance. Args: client (:obj:`boto3.session.Session.client`):
A boto3 client object resource (:obj:`Resource`):
The resource object to stop Returns: `ActionStatus` """ |
instance = EC2Instance.get(resource.id)
if instance.state in ('stopped', 'terminated'):
return ActionStatus.IGNORED, {}
client.stop_instances(InstanceIds=[resource.id])
return ActionStatus.SUCCEED, {'instance_type': resource.instance_type, 'public_ip': resource.public_ip} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def terminate_ec2_instance(client, resource):
"""Terminate an EC2 Instance This function will terminate an EC2 Instance. Args: client (:obj:`boto3.session.Session.client`):
A boto3 client object resource (:obj:`Resource`):
The resource object to terminate Returns: `ActionStatus` """ |
# TODO: Implement disabling of TerminationProtection
instance = EC2Instance.get(resource.id)
if instance.state == 'terminated':
return ActionStatus.IGNORED, {}
client.terminate_instances(InstanceIds=[resource.id])
return ActionStatus.SUCCEED, {'instance_type': resource.instance_type, 'public_ip': resource.public_ip} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def stop_s3_bucket(client, resource):
""" Stop an S3 bucket from being used This function will try to 1. Add lifecycle policy to make sure objects inside it will expire 2. Block certain access to the bucket """ |
bucket_policy = {
'Version': '2012-10-17',
'Id': 'PutObjPolicy',
'Statement': [
{
'Sid': 'cinqDenyObjectUploads',
'Effect': 'Deny',
'Principal': '*',
'Action': ['s3:PutObject', 's3:GetObject'],
'Resource': 'arn:aws:s3:::{}/*'.format(resource.id)
}
]
}
s3_removal_lifecycle_policy = {
'Rules': [
{'Status': 'Enabled',
'NoncurrentVersionExpiration': {u'NoncurrentDays': 1},
'Filter': {u'Prefix': ''},
'Expiration': {
u'Date': datetime.utcnow().replace(
hour=0, minute=0, second=0, microsecond=0
) + timedelta(days=dbconfig.get('lifecycle_expiration_days', NS_AUDITOR_REQUIRED_TAGS, 3))
},
'AbortIncompleteMultipartUpload': {u'DaysAfterInitiation': 3},
'ID': 'cloudInquisitor'}
]
}
policy_exists = s3_removal_policy_exists(client, resource)
lifecycle_policy_exists = s3_removal_lifecycle_policy_exists(client, resource)
if policy_exists and lifecycle_policy_exists:
return ActionStatus.IGNORED, {}
if not policy_exists:
client.put_bucket_policy(Bucket=resource.id, Policy=json.dumps(bucket_policy))
logger.info('Added policy to prevent putObject in s3 bucket {} in {}'.format(
resource.id,
resource.account.account_name
))
if not lifecycle_policy_exists:
# Grab S3 Metrics before lifecycle policies start removing objects
client.put_bucket_lifecycle_configuration(
Bucket=resource.id,
LifecycleConfiguration=s3_removal_lifecycle_policy
)
logger.info('Added policy to delete bucket contents in s3 bucket {} in {}'.format(
resource.id,
resource.account.account_name
))
return ActionStatus.SUCCEED, resource.metrics() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete_s3_bucket(client, resource):
"""Delete an S3 bucket This function will try to delete an S3 bucket Args: client (:obj:`boto3.session.Session.client`):
A boto3 client object resource (:obj:`Resource`):
The resource object to terminate Returns: `ActionStatus` """ |
if dbconfig.get('enable_delete_s3_buckets', NS_AUDITOR_REQUIRED_TAGS, False):
client.delete_bucket(Bucket=resource.id)
return ActionStatus.SUCCEED, resource.metrics() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(cls, resource_id):
"""Returns the class object identified by `resource_id` Args: resource_id (str):
Unique EC2 Instance ID to load from database Returns: EC2 Instance object if found, else None """ |
res = Resource.get(resource_id)
return cls(res) if res else None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(cls, resource_id, *, account_id, properties=None, tags=None, location=None, auto_add=True, auto_commit=False):
"""Creates a new Resource object with the properties and tags provided Args: resource_id (str):
Unique identifier for the resource object account_id (int):
Account ID which owns the resource properties (dict):
Dictionary of properties for the resource object. tags (dict):
Key / value dictionary of tags. Values must be `str` types location (str):
Location of the resource, if applicable auto_add (bool):
Automatically add the new resource to the DB session. Default: True auto_commit (bool):
Automatically commit the change to the database. Default: False """ |
if cls.get(resource_id):
raise ResourceException('Resource {} already exists'.format(resource_id))
res = Resource()
res.resource_id = resource_id
res.account_id = account_id
res.location = location
res.resource_type_id = ResourceType.get(cls.resource_type).resource_type_id
if properties:
for name, value in properties.items():
prop = ResourceProperty()
prop.resource_id = res.resource_id
prop.name = name
prop.value = value.isoformat() if type(value) == datetime else value
res.properties.append(prop)
db.session.add(prop)
if tags:
for key, value in tags.items():
if type(value) != str:
raise ValueError('Invalid object type for tag value: {}'.format(key))
tag = Tag()
tag.resource_id = resource_id
tag.key = key
tag.value = value
res.tags.append(tag)
db.session.add(tag)
if auto_add:
db.session.add(res)
if auto_commit:
db.session.commit()
return cls.get(res.resource_id)
else:
return cls(res) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_all(cls, account=None, location=None, include_disabled=False):
"""Returns a list of all resources for a given account, location and resource type. Attributes: account (:obj:`Account`):
Account owning the resources location (`str`):
Location of the resources to return (region) include_disabled (`bool`):
Include resources from disabled accounts (default: False) Returns: list of resource objects """ |
qry = db.Resource.filter(
Resource.resource_type_id == ResourceType.get(cls.resource_type).resource_type_id
)
if account:
qry = qry.filter(Resource.account_id == account.account_id)
if not include_disabled:
qry = qry.join(Account, Resource.account_id == Account.account_id).filter(Account.enabled == 1)
if location:
qry = qry.filter(Resource.location == location)
return {res.resource_id: cls(res) for res in qry.all()} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search(cls, *, limit=100, page=1, accounts=None, locations=None, resources=None, properties=None, include_disabled=False, return_query=False):
"""Search for resources based on the provided filters. If `return_query` a sub-class of `sqlalchemy.orm.Query` is returned instead of the resource list. Args: limit (`int`):
Number of results to return. Default: 100 page (`int`):
Pagination offset for results. Default: 1 accounts (`list` of `int`):
A list of account id's to limit the returned resources to locations (`list` of `str`):
A list of locations as strings to limit the search for resources ('list' of `str`):
A list of resource_ids properties (`dict`):
A `dict` containing property name and value pairs. Values can be either a str or a list of strings, in which case a boolean OR search is performed on the values include_disabled (`bool`):
Include resources from disabled accounts. Default: False return_query (`bool`):
Returns the query object prior to adding the limit and offset functions. Allows for sub-classes to amend the search feature with extra conditions. The calling function must handle pagination on its own Returns: `list` of `Resource`, `sqlalchemy.orm.Query` """ |
qry = db.Resource.order_by(Resource.resource_id).filter(
Resource.resource_type_id == ResourceType.get(cls.resource_type).resource_type_id
)
if not include_disabled:
qry = qry.join(Account, Resource.account_id == Account.account_id).filter(Account.enabled == 1)
if session:
qry = qry.filter(Resource.account_id.in_(session['accounts']))
if accounts:
qry = qry.filter(Resource.account_id.in_([Account.get(acct).account_id for acct in accounts]))
if locations:
qry = qry.filter(Resource.location.in_(locations))
if resources:
qry = qry.filter(Resource.resource_id.in_(resources))
if properties:
for prop_name, value in properties.items():
alias = aliased(ResourceProperty)
qry = qry.join(alias, Resource.resource_id == alias.resource_id)
if type(value) == list:
where_clause = []
for item in value:
where_clause.append(alias.value == item)
qry = qry.filter(
and_(
alias.name == prop_name,
or_(*where_clause)
).self_group()
)
else:
qry = qry.filter(
and_(
alias.name == prop_name,
alias.value == value
).self_group()
)
if return_query:
return qry
total = qry.count()
qry = qry.limit(limit)
qry = qry.offset((page - 1) * limit if page > 1 else 0)
return total, [cls(x) for x in qry.all()] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_owner_emails(self, partial_owner_match=True):
"""Return a list of email addresses associated with the instance, based on tags Returns: List of email addresses if any, else None """ |
for tag in self.tags:
if tag.key.lower() == 'owner':
rgx = re.compile(RGX_EMAIL_VALIDATION_PATTERN, re.I)
if partial_owner_match:
match = rgx.findall(tag.value)
if match:
return [NotificationContact('email', email) for email in match]
else:
match = rgx.match(tag.value)
if match:
return [NotificationContact('email', email) for email in match.groups()]
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_property(self, name):
"""Return a named property for a resource, if available. Will raise an `AttributeError` if the property does not exist Args: name (str):
Name of the property to return Returns: `ResourceProperty` """ |
for prop in self.resource.properties:
if prop.name == name:
return prop
raise AttributeError(name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_property(self, name, value, update_session=True):
"""Create or set the value of a property. Returns `True` if the property was created or updated, or `False` if there were no changes to the value of the property. Args: name (str):
Name of the property to create or update value (any):
Value of the property. This can be any type of JSON serializable data update_session (bool):
Automatically add the change to the SQLAlchemy session. Default: True Returns: `bool` """ |
if type(value) == datetime:
value = value.isoformat()
else:
value = value
try:
prop = self.get_property(name)
if prop.value == value:
return False
prop.value = value
except AttributeError:
prop = ResourceProperty()
prop.resource_id = self.id
prop.name = name
prop.value = value
if update_session:
db.session.add(prop)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_tag(self, key, *, case_sensitive=True):
"""Return a tag by key, if found Args: key (str):
Name/key of the tag to locate case_sensitive (bool):
Should tag keys be treated case-sensitive (default: true) Returns: `Tag`,`None` """ |
key = key if case_sensitive else key.lower()
for tag in self.resource.tags:
if not case_sensitive:
if tag.key.lower() == key:
return tag
elif key == tag.key:
return tag
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_tag(self, key, value, update_session=True):
"""Create or set the value of the tag with `key` to `value`. Returns `True` if the tag was created or updated or `False` if there were no changes to be made. Args: key (str):
Key of the tag value (str):
Value of the tag update_session (bool):
Automatically add the change to the SQLAlchemy session. Default: True Returns: `bool` """ |
existing_tags = {x.key: x for x in self.tags}
if key in existing_tags:
tag = existing_tags[key]
if tag.value == value:
return False
tag.value = value
else:
tag = Tag()
tag.resource_id = self.id
tag.key = key
tag.value = value
self.tags.append(tag)
if update_session:
db.session.add(tag)
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete_tag(self, key, update_session=True):
"""Removes a tag from a resource based on the tag key. Returns `True` if the tag was removed or `False` if the tag didn't exist Args: key (str):
Key of the tag to delete update_session (bool):
Automatically add the change to the SQLAlchemy session. Default: True Returns: """ |
existing_tags = {x.key: x for x in self.tags}
if key in existing_tags:
if update_session:
db.session.delete(existing_tags[key])
self.tags.remove(existing_tags[key])
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self, *, auto_commit=False):
"""Save the resource to the database Args: auto_commit (bool):
Automatically commit the transaction. Default: `False` Returns: `None` """ |
try:
db.session.add(self.resource)
if auto_commit:
db.session.commit()
except SQLAlchemyError as ex:
self.log.exception('Failed updating resource: {}'.format(ex))
db.session.rollback() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete(self, *, auto_commit=False):
"""Removes a resource from the database Args: auto_commit (bool):
Automatically commit the transaction. Default: `False` Returns: `None` """ |
try:
db.session.delete(self.resource)
if auto_commit:
db.session.commit()
except SQLAlchemyError:
self.log.exception('Failed deleting resource: {}'.format(self.id))
db.session.rollback() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_json(self):
"""Return a `dict` representation of the resource, including all properties and tags Returns: `dict` """ |
return {
'resourceType': self.resource.resource_type_id,
'resourceId': self.id,
'accountId': self.resource.account_id,
'account': self.account,
'location': self.resource.location,
'properties': {to_camelcase(prop.name): prop.value for prop in self.resource.properties},
'tags': [{'key': t.key, 'value': t.value} for t in self.resource.tags]
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def volumes(self):
"""Returns a list of the volumes attached to the instance Returns: `list` of `EBSVolume` """ |
return [
EBSVolume(res) for res in db.Resource.join(
ResourceProperty, Resource.resource_id == ResourceProperty.resource_id
).filter(
Resource.resource_type_id == ResourceType.get('aws_ebs_volume').resource_type_id,
ResourceProperty.name == 'attachments',
func.JSON_CONTAINS(ResourceProperty.value, func.JSON_QUOTE(self.id))
).all()
] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_name_or_instance_id(self, with_id=False):
"""Returns the name of an instance if existant, else return the instance id Args: with_id (bool):
Include the instance ID even if the name is found (default: False) Returns: Name and/or instance ID of the instance object """ |
name = self.get_tag('Name', case_sensitive=False)
if name and len(name.value.strip()) > 0:
return '{0} ({1})'.format(name.value, self.id) if with_id else name.value
return self.id |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search_by_age(cls, *, limit=100, page=1, accounts=None, locations=None, age=720, properties=None, include_disabled=False):
"""Search for resources based on the provided filters Args: limit (`int`):
Number of results to return. Default: 100 page (`int`):
Pagination offset for results. Default: 1 accounts (`list` of `int`):
A list of account id's to limit the returned resources to locations (`list` of `str`):
A list of locations as strings to limit the search for age (`int`):
Age of instances older than `age` days to return properties (`dict`):
A `dict` containing property name and value pairs. Values can be either a str or a list of strings, in which case a boolean OR search is performed on the values include_disabled (`bool`):
Include resources from disabled accounts. Default: False Returns: `list` of `Resource` """ |
qry = cls.search(
limit=limit,
page=page,
accounts=accounts,
locations=locations,
properties=properties,
include_disabled=include_disabled,
return_query=True
)
age_alias = aliased(ResourceProperty)
qry = (
qry.join(age_alias, Resource.resource_id == age_alias.resource_id)
.filter(
age_alias.name == 'launch_date',
cast(func.JSON_UNQUOTE(age_alias.value), DATETIME) < datetime.now() - timedelta(days=age)
)
)
total = qry.count()
qry = qry.limit(limit)
qry = qry.offset((page - 1) * limit if page > 1 else 0)
return total, [cls(x) for x in qry.all()] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_json(self, with_volumes=True):
"""Augment the base `to_json` function, adding information about volumes Returns: `dict` """ |
data = super().to_json()
if with_volumes:
data['volumes'] = [
{
'volumeId': vol.id,
'volumeType': vol.volume_type,
'size': vol.size
} for vol in self.volumes
]
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete_record(self, record):
"""Remove a DNSRecord Args: record (:obj:`DNSRecord`):
:obj:`DNSRecord` to remove Returns: `None` """ |
self.children.remove(record.resource)
record.delete() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self):
"""List existing config namespaces and their items""" |
namespaces = db.ConfigNamespace.order_by(
ConfigNamespace.sort_order,
ConfigNamespace.name
).all()
return self.make_response({
'message': None,
'namespaces': namespaces
}, HTTP.OK) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def post(self):
"""Create a new config item""" |
self.reqparse.add_argument('namespacePrefix', type=str, required=True)
self.reqparse.add_argument('description', type=str, required=True)
self.reqparse.add_argument('key', type=str, required=True)
self.reqparse.add_argument('value', required=True)
self.reqparse.add_argument('type', type=str, required=True)
args = self.reqparse.parse_args()
if not self.dbconfig.namespace_exists(args['namespacePrefix']):
return self.make_response('The namespace doesnt exist', HTTP.NOT_FOUND)
if self.dbconfig.key_exists(args['namespacePrefix'], args['key']):
return self.make_response('This config item already exists', HTTP.CONFLICT)
self.dbconfig.set(args['namespacePrefix'], args['key'], _to_dbc_class(args), description=args['description'])
auditlog(event='configItem.create', actor=session['user'].username, data=args)
return self.make_response('Config item added', HTTP.CREATED) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, namespace, key):
"""Get a specific configuration item""" |
cfg = self.dbconfig.get(key, namespace, as_object=True)
return self.make_response({
'message': None,
'config': cfg
}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def put(self, namespace, key):
"""Update a single configuration item""" |
args = request.json
if not self.dbconfig.key_exists(namespace, key):
return self.make_response('No such config entry: {}/{}'.format(namespace, key), HTTP.BAD_REQUEST)
if (args['type'] == 'choice' and
not args['value']['min_items'] <= len(args['value']['enabled']) <= args['value']['max_items']):
return self.make_response(
'You should select {} {}item{}'.format(
args['value']['min_items'],
'' if args['value']['min_items'] == args['value']['max_items'] else 'to {} '.format(
args['value']['max_items']
),
's' if args['value']['max_items'] > 1 else ''
),
HTTP.BAD_REQUEST
)
if args['type'] == 'choice' and not set(args['value']['enabled']).issubset(args['value']['available']):
return self.make_response('Invalid item', HTTP.BAD_REQUEST)
item = db.ConfigItem.find_one(
ConfigItem.namespace_prefix == namespace, ConfigItem.key == key
)
if item.value != args['value']:
item.value = args['value']
if item.type != args['type']:
item.type = args['type']
if item.description != args['description']:
item.description = args['description']
self.dbconfig.set(namespace, key, _to_dbc_class(args))
auditlog(event='configItem.update', actor=session['user'].username, data=args)
return self.make_response('Config entry updated') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete(self, namespace, key):
"""Delete a specific configuration item""" |
if not self.dbconfig.key_exists(namespace, key):
return self.make_response('No such config entry exists: {}/{}'.format(namespace, key), HTTP.BAD_REQUEST)
self.dbconfig.delete(namespace, key)
auditlog(event='configItem.delete', actor=session['user'].username, data={'namespace': namespace, 'key': key})
return self.make_response('Config entry deleted') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, namespacePrefix):
"""Get a specific configuration namespace""" |
ns = db.ConfigNamespace.find_one(ConfigNamespace.namespace_prefix == namespacePrefix)
if not ns:
return self.make_response('No such namespace: {}'.format(namespacePrefix), HTTP.NOT_FOUND)
return self.make_response({
'message': None,
'namespace': ns
}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def put(self, namespacePrefix):
"""Update a specific configuration namespace""" |
self.reqparse.add_argument('name', type=str, required=True)
self.reqparse.add_argument('sortOrder', type=int, required=True)
args = self.reqparse.parse_args()
ns = db.ConfigNamespace.find_one(ConfigNamespace.namespace_prefix == namespacePrefix)
if not ns:
return self.make_response('No such namespace: {}'.format(namespacePrefix), HTTP.NOT_FOUND)
ns.name = args['name']
ns.sort_order = args['sortOrder']
db.session.add(ns)
db.session.commit()
self.dbconfig.reload_data()
auditlog(event='configNamespace.update', actor=session['user'].username, data=args)
return self.make_response('Namespace updated') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete(self, namespacePrefix):
"""Delete a specific configuration namespace""" |
ns = db.ConfigNamespace.find_one(ConfigNamespace.namespace_prefix == namespacePrefix)
if not ns:
return self.make_response('No such namespace: {}'.format(namespacePrefix), HTTP.NOT_FOUND)
db.session.delete(ns)
db.session.commit()
self.dbconfig.reload_data()
auditlog(
event='configNamespace.delete',
actor=session['user'].username,
data={'namespacePrefix': namespacePrefix}
)
return self.make_response('Namespace deleted') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def post(self):
"""Create a new configuration namespace""" |
self.reqparse.add_argument('namespacePrefix', type=str, required=True)
self.reqparse.add_argument('name', type=str, required=True)
self.reqparse.add_argument('sortOrder', type=int, required=True)
args = self.reqparse.parse_args()
if self.dbconfig.namespace_exists(args['namespacePrefix']):
return self.make_response('Namespace {} already exists'.format(args['namespacePrefix']), HTTP.CONFLICT)
ns = ConfigNamespace()
ns.namespace_prefix = args['namespacePrefix']
ns.name = args['name']
ns.sort_order = args['sortOrder']
db.session.add(ns)
db.session.commit()
self.dbconfig.reload_data()
auditlog(event='configNamespace.create', actor=session['user'].username, data=args)
return self.make_response('Namespace created', HTTP.CREATED) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_syslog_format(event_type):
"""Take an event type argument and return a python logging format In order to properly format the syslog messages to current standard, load the template and perform necessary replacements and return the string. Args: event_type (str):
Event type name Returns: `str` """ |
syslog_format_template = get_template('syslog_format.json')
fmt = syslog_format_template.render(
event_type=event_type,
host=dbconfig.get('instance_name', default='local')
)
# Load and redump string, to get rid of any extraneous whitespaces
return json.dumps(json.loads(fmt)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup_logging():
"""Utility function to setup the logging systems based on the `logging.json` configuration file""" |
config = json.load(open(os.path.join(config_path, 'logging.json')))
# If syslogging is disabled, set the pipeline handler to NullHandler
if dbconfig.get('enable_syslog_forwarding', NS_LOG, False):
try:
config['formatters']['syslog'] = {
'format': _get_syslog_format('cloud-inquisitor-logs')
}
config['handlers']['syslog'] = {
'class': 'cloud_inquisitor.log.SyslogPipelineHandler',
'formatter': 'syslog',
'filters': ['standard']
}
config['loggers']['cloud_inquisitor']['handlers'].append('syslog')
# Configure the audit log handler
audit_handler = SyslogPipelineHandler()
audit_handler.setFormatter(logging.Formatter(_get_syslog_format('cloud-inquisitor-audit')))
audit_handler.setLevel(logging.DEBUG)
_AUDIT_LOGGER.addHandler(audit_handler)
_AUDIT_LOGGER.propagate = False
except Exception as ex:
print('An error occured while configuring the syslogger: {}'.format(ex))
logging.config.dictConfig(config) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def emit(self, record):
"""Persist a record into the database Args: record (`logging.Record`):
The logging.Record object to store Returns: `None` """ |
# Skip records less than min_level
if record.levelno < logging.getLevelName(self.min_level):
return
evt = LogEvent()
evt.level = record.levelname
evt.levelno = record.levelno
evt.timestamp = datetime.fromtimestamp(record.created)
evt.message = record.message
evt.filename = record.filename
evt.lineno = record.lineno
evt.module = record.module
evt.funcname = record.funcName
evt.pathname = record.pathname
evt.process_id = record.process
# Only log stacktraces if its the level is ERROR or higher
if record.levelno >= 40:
evt.stacktrace = traceback.format_exc()
try:
db.session.add(evt)
db.session.commit()
except Exception:
db.session.rollback() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(cls, ns, key):
"""Fetch an item by namespace and key Args: ns (str):
Namespace prefix key (str):
Item key Returns: :obj:`Configitem`: Returns config item object if found, else `None` """ |
return getattr(db, cls.__name__).find_one(
ConfigItem.namespace_prefix == ns,
ConfigItem.key == key
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_role(user, roles):
"""Map roles for user in database Args: user (User):
User to add roles to roles ([Role]):
List of roles to add Returns: None """ |
def _add_role(role):
user_role = UserRole()
user_role.user_id = user.user_id
user_role.role_id = role.role_id
db.session.add(user_role)
db.session.commit()
[_add_role(role) for role in roles] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def reload_data(self):
"""Reloads the configuration from the database Returns: `None` """ |
# We must force a rollback here to ensure that we are working on a fresh session, without any cache
db.session.rollback()
self.__data = {}
try:
for ns in db.ConfigNamespace.all():
self.__data[ns.namespace_prefix] = {x.key: x.value for x in ns.config_items}
except SQLAlchemyError as ex:
if str(ex).find('1146') != -1:
pass |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def key_exists(self, namespace, key):
"""Checks a namespace for the existence of a specific key Args: namespace (str):
Namespace to check in key (str):
Name of the key to check for Returns: `True` if key exists in the namespace, else `False` """ |
return namespace in self.__data and key in self.__data[namespace] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete(self, namespace, key):
"""Remove a configuration item from the database Args: namespace (`str`):
Namespace of the config item key (`str`):
Key to delete Returns: `None` """ |
if self.key_exists(namespace, key):
obj = db.ConfigItem.find_one(
ConfigItem.namespace_prefix == namespace,
ConfigItem.key == key
)
del self.__data[namespace][key]
db.session.delete(obj)
db.session.commit()
else:
raise KeyError('{}/{}'.format(namespace, key)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def post(self):
"""Create a new role""" |
self.reqparse.add_argument('name', type=str, required=True)
self.reqparse.add_argument('color', type=str, required=True)
args = self.reqparse.parse_args()
role = Role()
role.name = args['name']
role.color = args['color']
db.session.add(role)
db.session.commit()
auditlog(event='role.create', actor=session['user'].username, data=args)
return self.make_response('Role {} has been created'.format(role.role_id), HTTP.CREATED) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, roleId):
"""Get a specific role information""" |
role = db.Role.find_one(Role.role_id == roleId)
if not role:
return self.make_response('No such role found', HTTP.NOT_FOUND)
return self.make_response({'role': role}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete(self, roleId):
"""Delete a user role""" |
role = db.Role.find_one(Role.role_id == roleId)
if not role:
return self.make_response('No such role found', HTTP.NOT_FOUND)
if role.name in ('User', 'Admin'):
return self.make_response('Cannot delete the built-in roles', HTTP.BAD_REQUEST)
db.session.delete(role)
db.session.commit()
auditlog(event='role.delete', actor=session['user'].username, data={'roleId': roleId})
return self.make_response({
'message': 'Role has been deleted',
'roleId': roleId
}) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __send_ses_email(self, recipients, subject, body_html, body_text):
"""Send an email using SES Args: recipients (`1ist` of `str`):
List of recipient email addresses subject (str):
Subject of the email body_html (str):
HTML body of the email body_text (str):
Text body of the email Returns: `None` """ |
source_arn = dbconfig.get('source_arn', NS_EMAIL)
return_arn = dbconfig.get('return_path_arn', NS_EMAIL)
session = get_local_aws_session()
ses = session.client('ses', region_name=dbconfig.get('ses_region', NS_EMAIL, 'us-west-2'))
body = {}
if body_html:
body['Html'] = {
'Data': body_html
}
if body_text:
body['Text'] = {
'Data': body_text
}
ses_options = {
'Source': self.sender,
'Destination': {
'ToAddresses': recipients
},
'Message': {
'Subject': {
'Data': subject
},
'Body': body
}
}
# Set SES options if needed
if source_arn and return_arn:
ses_options.update({
'SourceArn': source_arn,
'ReturnPathArn': return_arn
})
ses.send_email(**ses_options) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __send_smtp_email(self, recipients, subject, html_body, text_body):
"""Send an email using SMTP Args: recipients (`list` of `str`):
List of recipient email addresses subject (str):
Subject of the email html_body (str):
HTML body of the email text_body (str):
Text body of the email Returns: `None` """ |
smtp = smtplib.SMTP(
dbconfig.get('smtp_server', NS_EMAIL, 'localhost'),
dbconfig.get('smtp_port', NS_EMAIL, 25)
)
source_arn = dbconfig.get('source_arn', NS_EMAIL)
return_arn = dbconfig.get('return_path_arn', NS_EMAIL)
from_arn = dbconfig.get('from_arn', NS_EMAIL)
msg = MIMEMultipart('alternative')
# Set SES options if needed
if source_arn and from_arn and return_arn:
msg['X-SES-SOURCE-ARN'] = source_arn
msg['X-SES-FROM-ARN'] = from_arn
msg['X-SES-RETURN-PATH-ARN'] = return_arn
msg['Subject'] = subject
msg['To'] = ','.join(recipients)
msg['From'] = self.sender
# Check body types to avoid exceptions
if html_body:
html_part = MIMEText(html_body, 'html')
msg.attach(html_part)
if text_body:
text_part = MIMEText(text_body, 'plain')
msg.attach(text_part)
# TLS if needed
if dbconfig.get('smtp_tls', NS_EMAIL, False):
smtp.starttls()
# Login if needed
username = dbconfig.get('smtp_username', NS_EMAIL)
password = dbconfig.get('smtp_password', NS_EMAIL)
if username and password:
smtp.login(username, password)
smtp.sendmail(self.sender, recipients, msg.as_string())
smtp.quit() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def default(self, obj):
"""Default object encoder function Args: obj (:obj:`Any`):
Object to be serialized Returns: JSON string """ |
if isinstance(obj, datetime):
return obj.isoformat()
if issubclass(obj.__class__, Enum.__class__):
return obj.value
to_json = getattr(obj, 'to_json', None)
if to_json:
out = obj.to_json()
if issubclass(obj.__class__, Model):
out.update({'__type': obj.__class__.__name__})
return out
return JSONEncoder.default(self, obj) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_truthy(value, default=False):
"""Evaluate a value for truthiness True False True Args: value (Any):
Value to evaluate default (bool):
Optional default value, if the input does not match the true or false values Returns: True if a truthy value is passed, else False """ |
if value is None:
return False
if isinstance(value, bool):
return value
if isinstance(value, int):
return value > 0
trues = ('1', 'true', 'y', 'yes', 'ok')
falses = ('', '0', 'false', 'n', 'none', 'no')
if value.lower().strip() in falses:
return False
elif value.lower().strip() in trues:
return True
else:
if default:
return default
else:
raise ValueError('Invalid argument given to truthy: {0}'.format(value)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate_email(email, partial_match=False):
"""Perform email address validation True False True Args: email (str):
Email address to match partial_match (bool):
If False (default), the entire string must be a valid email address. If true, any valid email address in the string will trigger a valid response Returns: True if the value contains an email address, else False """ |
rgx = re.compile(RGX_EMAIL_VALIDATION_PATTERN, re.I)
if partial_match:
return rgx.search(email) is not None
else:
return rgx.match(email) is not None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_template(template):
"""Return a Jinja2 template by filename Args: template (str):
Name of the template to return Returns: A Jinja2 Template object """ |
from cloud_inquisitor.database import db
tmpl = db.Template.find_one(template_name=template)
if not tmpl:
raise InquisitorError('No such template found: {}'.format(template))
tmplenv = Environment(loader=BaseLoader, autoescape=True)
tmplenv.filters['json_loads'] = json.loads
tmplenv.filters['slack_quote_join'] = lambda data: ', '.join('`{}`'.format(x) for x in data)
return tmplenv.from_string(tmpl.template) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_utc_date(date):
"""Convert a datetime object from local to UTC format datetime.datetime(2017, 8, 16, 1, 24, 31) Args: date (`datetime`):
Input datetime object Returns: `datetime` """ |
return datetime.utcfromtimestamp(float(date.strftime('%s'))).replace(tzinfo=None) if date else None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_password(length=32):
"""Generate a cryptographically secure random string to use for passwords Args: length (int):
Length of password, defaults to 32 characters Returns: Randomly generated string """ |
return ''.join(random.SystemRandom().choice(string.ascii_letters + '!@#$+.,') for _ in range(length)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_jwt_key_data():
"""Returns the data for the JWT private key used for encrypting the user login token as a string object Returns: `str` """ |
global __jwt_data
if __jwt_data:
return __jwt_data
from cloud_inquisitor import config_path
from cloud_inquisitor.config import dbconfig
jwt_key_file = dbconfig.get('jwt_key_file_path', default='ssl/private.key')
if not os.path.isabs(jwt_key_file):
jwt_key_file = os.path.join(config_path, jwt_key_file)
with open(os.path.join(jwt_key_file), 'r') as f:
__jwt_data = f.read()
return __jwt_data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def has_access(user, required_roles, match_all=True):
"""Check if the user meets the role requirements. If mode is set to AND, all the provided roles must apply Args: user (:obj:`User`):
User object required_roles (`list` of `str`):
List of roles that the user must have applied match_all (`bool`):
If true, all the required_roles must be applied to the user, else any one match will return `True` Returns: `bool` """ |
# Admins have access to everything
if ROLE_ADMIN in user.roles:
return True
if isinstance(required_roles, str):
if required_roles in user.roles:
return True
return False
# If we received a list of roles to match against
if match_all:
for role in required_roles:
if role not in user.roles:
return False
return True
else:
for role in required_roles:
if role in user.roles:
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_lists(*args):
"""Merge an arbitrary number of lists into a single list and dedupe it Args: *args: Two or more lists Returns: A deduped merged list of all the provided lists as a single list """ |
out = {}
for contacts in filter(None, args):
for contact in contacts:
out[contact.value] = contact
return list(out.values()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_resource_id(prefix, *data):
"""Returns a unique ID based on the SHA256 hash of the provided data. The input data is flattened and sorted to ensure identical hashes are generated regardless of the order of the input. Values must be of types `str`, `int` or `float`, any other input type will raise a `ValueError` 'ec2-1d21940125214123' 'ecs-e536b036ea6fd463' 'ecs-e536b036ea6fd463' Args: prefix (`str`):
Key prefix *data (`str`, `int`, `float`, `list`, `tuple`):
Data used to generate a unique ID Returns: `str` """ |
parts = flatten(data)
for part in parts:
if type(part) not in (str, int, float):
raise ValueError('Supported data types: int, float, list, tuple, str. Got: {}'.format(type(part)))
return '{}-{}'.format(
prefix,
get_hash('-'.join(sorted(map(str, parts))))[-16:]
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_date(date_string, ignoretz=True):
"""Parse a string as a date. If the string fails to parse, `None` will be returned instead datetime.datetime(2017, 8, 15, 18, 24, 31) Args: date_string (`str`):
Date in string format to parse ignoretz (`bool`):
If set ``True``, ignore time zones and return a naive :class:`datetime` object. Returns: `datetime`, `None` """ |
try:
return parser.parse(date_string, ignoretz=ignoretz)
except TypeError:
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_user_data_configuration():
"""Retrieve and update the application configuration with information from the user-data Returns: `None` """ |
from cloud_inquisitor import get_local_aws_session, app_config
kms_region = app_config.kms_region
session = get_local_aws_session()
if session.get_credentials().method == 'iam-role':
kms = session.client('kms', region_name=kms_region)
else:
sts = session.client('sts')
audit_role = sts.assume_role(RoleArn=app_config.aws_api.instance_role_arn, RoleSessionName='cloud_inquisitor')
kms = boto3.session.Session(
audit_role['Credentials']['AccessKeyId'],
audit_role['Credentials']['SecretAccessKey'],
audit_role['Credentials']['SessionToken'],
).client('kms', region_name=kms_region)
user_data_url = app_config.user_data_url
res = requests.get(user_data_url)
if res.status_code == 200:
data = kms.decrypt(CiphertextBlob=b64decode(res.content))
kms_config = json.loads(zlib.decompress(data['Plaintext']).decode('utf-8'))
app_config.database_uri = kms_config['db_uri']
else:
raise RuntimeError('Failed loading user-data, cannot continue: {}: {}'.format(res.status_code, res.content)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def flatten(data):
"""Returns a flattened version of a list. Courtesy of https://stackoverflow.com/a/12472564 Args: data (`tuple` or `list`):
Input data Returns: `list` """ |
if not data:
return data
if type(data[0]) in (list, tuple):
return list(flatten(data[0])) + list(flatten(data[1:]))
return list(data[:1]) + list(flatten(data[1:])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def diff(a, b):
"""Return the difference between two strings Will return a human-readable difference between two strings. See https://docs.python.org/3/library/difflib.html#difflib.Differ for more information about the output format Args: a (str):
Original string b (str):
New string Returns: `str` """ |
return ''.join(
Differ().compare(
a.splitlines(keepends=True),
b.splitlines(keepends=True)
)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build(bucket_name, version, force, verbose):
"""Build and upload a new tarball Args: bucket_name (str):
Name of the bucket to upload to version (str):
Override build version. Defaults to using SCM based versioning (git tags) force (bool):
Overwrite existing files in S3, if present verbose (bool):
Verbose output """ |
if verbose:
log.setLevel('DEBUG')
if not version:
version = setuptools_scm.get_version()
release = "dev" if "dev" in version else "release"
tarball = TARBALL_FORMAT.format(version)
tarball_path = os.path.join(tempfile.gettempdir(), tarball)
s3_key = os.path.join(release, tarball)
try:
run('npm i')
run('./node_modules/.bin/gulp build.prod')
except ExecutionError:
log.exception('Failed executing command')
return
log.debug('Creating archive')
tar = tarfile.open(tarball_path, "w:gz")
for root, dirnames, filenames in os.walk('dist'):
for f in filenames:
tar.add(os.path.join(root, f), recursive=False, filter=strip_path)
tar.close()
log.debug('Uploading {} to s3://{}/{}'.format(tarball, bucket_name, s3_key))
try:
bucket = get_bucket_resource(bucket_name)
if s3_file_exists(bucket, s3_key) and not force:
log.error('File already exists in S3, use --force to overwrite')
return
bucket.upload_file(tarball_path, os.path.join(release, tarball))
except ClientError:
log.exception('AWS API failure') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(cls, issue_id):
"""Returns the class object identified by `issue_id` Args: issue_id (str):
Unique EC2 Instance ID to load from database Returns: EC2 Instance object if found, else None """ |
res = Issue.get(issue_id, IssueType.get(cls.issue_type).issue_type_id)
return cls(res) if res else None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create(cls, issue_id, *, properties=None, auto_commit=False):
"""Creates a new Issue object with the properties and tags provided Attributes: issue_id (str):
Unique identifier for the issue object account (:obj:`Account`):
Account which owns the issue properties (dict):
Dictionary of properties for the issue object. """ |
if cls.get(issue_id):
raise IssueException('Issue {} already exists'.format(issue_id))
res = Issue()
res.issue_id = issue_id
res.issue_type_id = IssueType.get(cls.issue_type).issue_type_id
if properties:
for name, value in properties.items():
prop = IssueProperty()
prop.issue_id = res.issue_id
prop.name = name
prop.value = value.isoformat() if type(value) == datetime else value
res.properties.append(prop)
db.session.add(prop)
db.session.add(res)
if auto_commit:
db.session.commit()
return cls.get(res.issue_id) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_all(cls):
"""Returns a list of all issues of a given type Returns: list of issue objects """ |
issues = db.Issue.find(
Issue.issue_type_id == IssueType.get(cls.issue_type).issue_type_id
)
return {res.issue_id: cls(res) for res in issues} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def search(cls, *, limit=100, page=1, properties=None, return_query=False):
"""Search for issues based on the provided filters Args: limit (`int`):
Number of results to return. Default: 100 page (`int`):
Pagination offset for results. Default: 1 properties (`dict`):
A `dict` containing property name and value pairs. Values can be either a str or a list of strings, in which case a boolean OR search is performed on the values return_query (`bool`):
Returns the query object prior to adding the limit and offset functions. Allows for sub-classes to amend the search feature with extra conditions. The calling function must handle pagination on its own Returns: `list` of `Issue`, `sqlalchemy.orm.Query` """ |
qry = db.Issue.order_by(Issue.issue_id).filter(
Issue.issue_type_id == IssueType.get(cls.issue_type).issue_type_id
)
if properties:
for prop_name, value in properties.items():
alias = aliased(IssueProperty)
qry = qry.join(alias, Issue.issue_id == alias.issue_id)
if type(value) == list:
where_clause = []
for item in value:
where_clause.append(alias.value == item)
qry = qry.filter(
and_(
alias.name == prop_name,
or_(*where_clause)
).self_group()
)
else:
qry = qry.filter(
and_(
alias.name == prop_name,
alias.value == value
).self_group()
)
if return_query:
return qry
total = qry.count()
qry = qry.limit(limit)
qry = qry.offset((page - 1) * limit if page > 1 else 0)
return total, [cls(x) for x in qry.all()] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def state_name(self):
"""Get a human-readable value of the state Returns: str: Name of the current state """ |
if self.state == 1:
return 'New Issue'
elif self.state == 2:
return 'Shutdown in 1 week'
elif self.state == 3:
return 'Shutdown in 1 day'
elif self.state == 4:
return 'Pending Shutdown'
elif self.state == 5:
return 'Stopped, delete in 12 weeks'
elif self.state == 6:
return 'Instance deleted'
else:
raise ValueError('Invalid state: {}'.format(self.state)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def copy(src, dst, merge, write_v1=True, excluded_tags=None, verbose=False):
"""Returns 0 on success""" |
if excluded_tags is None:
excluded_tags = []
try:
id3 = mutagen.id3.ID3(src, translate=False)
except mutagen.id3.ID3NoHeaderError:
print_(u"No ID3 header found in ", src, file=sys.stderr)
return 1
except Exception as err:
print_(str(err), file=sys.stderr)
return 1
if verbose:
print_(u"File", src, u"contains:", file=sys.stderr)
print_(id3.pprint(), file=sys.stderr)
for tag in excluded_tags:
id3.delall(tag)
if merge:
try:
target = mutagen.id3.ID3(dst, translate=False)
except mutagen.id3.ID3NoHeaderError:
# no need to merge
pass
except Exception as err:
print_(str(err), file=sys.stderr)
return 1
else:
for frame in id3.values():
target.add(frame)
id3 = target
# if the source is 2.3 save it as 2.3
if id3.version < (2, 4, 0):
id3.update_to_v23()
v2_version = 3
else:
id3.update_to_v24()
v2_version = 4
try:
id3.save(dst, v1=(2 if write_v1 else 0), v2_version=v2_version)
except Exception as err:
print_(u"Error saving", dst, u":\n%s" % text_type(err),
file=sys.stderr)
return 1
else:
if verbose:
print_(u"Successfully saved", dst, file=sys.stderr)
return 0 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_tags(self, ID3=None):
"""Add an empty ID3 tag to the file. Args: ID3 (ID3):
An ID3 subclass to use or `None` to use the one that used when loading. A custom tag reader may be used in instead of the default `ID3` object, e.g. an `mutagen.easyid3.EasyID3` reader. """ |
if ID3 is None:
ID3 = self.ID3
if self.tags is None:
self.ID3 = ID3
self.tags = ID3()
else:
raise error("an ID3 tag already exists") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def RegisterKey(cls, key, getter=None, setter=None, deleter=None, lister=None):
"""Register a new key mapping. A key mapping is four functions, a getter, setter, deleter, and lister. The key may be either a string or a glob pattern. The getter, deleted, and lister receive an MP4Tags instance and the requested key name. The setter also receives the desired value, which will be a list of strings. The getter, setter, and deleter are used to implement __getitem__, __setitem__, and __delitem__. The lister is used to implement keys(). It should return a list of keys that are actually in the MP4 instance, provided by its associated getter. """ |
key = key.lower()
if getter is not None:
cls.Get[key] = getter
if setter is not None:
cls.Set[key] = setter
if deleter is not None:
cls.Delete[key] = deleter
if lister is not None:
cls.List[key] = lister |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def RegisterIntKey(cls, key, atomid, min_value=0, max_value=(2 ** 16) - 1):
"""Register a scalar integer key. """ |
def getter(tags, key):
return list(map(text_type, tags[atomid]))
def setter(tags, key, value):
clamp = lambda x: int(min(max(min_value, x), max_value))
tags[atomid] = [clamp(v) for v in map(int, value)]
def deleter(tags, key):
del(tags[atomid])
cls.RegisterKey(key, getter, setter, deleter) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pprint(self):
"""Print tag key=value pairs.""" |
strings = []
for key in sorted(self.keys()):
values = self[key]
for value in values:
strings.append("%s=%s" % (key, value))
return "\n".join(strings) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def has_valid_padding(value, bits=7):
"""Whether the padding bits are all zero""" |
assert bits <= 8
mask = (((1 << (8 - bits)) - 1) << bits)
if isinstance(value, integer_types):
while value:
if value & mask:
return False
value >>= 8
elif isinstance(value, bytes):
for byte in bytearray(value):
if byte & mask:
return False
else:
raise TypeError
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_stream(cls, fileobj, max_bytes):
"""Returns a possibly valid _ADTSStream or None. Args: max_bytes (int):
maximum bytes to read """ |
r = BitReader(fileobj)
stream = cls(r)
if stream.sync(max_bytes):
stream.offset = (r.get_position() - 12) // 8
return stream |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sync(self, max_bytes):
"""Find the next sync. Returns True if found.""" |
# at least 2 bytes for the sync
max_bytes = max(max_bytes, 2)
r = self._r
r.align()
while max_bytes > 0:
try:
b = r.bytes(1)
if b == b"\xff":
if r.bits(4) == 0xf:
return True
r.align()
max_bytes -= 2
else:
max_bytes -= 1
except BitReaderError:
return False
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self, filething=None, v2_version=4, v23_sep='/', padding=None):
"""Save ID3v2 data to the DSF file""" |
fileobj = filething.fileobj
fileobj.seek(0)
dsd_header = DSDChunk(fileobj)
if dsd_header.offset_metdata_chunk == 0:
# create a new ID3 chunk at the end of the file
fileobj.seek(0, 2)
# store reference to ID3 location
dsd_header.offset_metdata_chunk = fileobj.tell()
dsd_header.write()
try:
data = self._prepare_data(
fileobj, dsd_header.offset_metdata_chunk, self.size,
v2_version, v23_sep, padding)
except ID3Error as e:
reraise(error, e, sys.exc_info()[2])
fileobj.seek(dsd_header.offset_metdata_chunk)
fileobj.write(data)
fileobj.truncate()
# Update total file size
dsd_header.total_size = fileobj.tell()
dsd_header.write() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read(self, fileobj):
"""Return if all data could be read and the atom payload""" |
fileobj.seek(self._dataoffset, 0)
data = fileobj.read(self.datalength)
return len(data) == self.datalength, data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render(name, data):
"""Render raw atom data.""" |
# this raises OverflowError if Py_ssize_t can't handle the atom data
size = len(data) + 8
if size <= 0xFFFFFFFF:
return struct.pack(">I4s", size, name) + data
else:
return struct.pack(">I4sQ", 1, name, size + 8) + data |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.