text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_version(addon_dir, manifest, odoo_version_override=None, git_post_version=True):
""" Get addon version information from an addon directory """
|
version = manifest.get('version')
if not version:
warn("No version in manifest in %s" % addon_dir)
version = '0.0.0'
if not odoo_version_override:
if len(version.split('.')) < 5:
raise DistutilsSetupError("Version in manifest must have at least "
"5 components and start with "
"the Odoo series number in %s" %
addon_dir)
odoo_version = '.'.join(version.split('.')[:2])
else:
odoo_version = odoo_version_override
if odoo_version not in ODOO_VERSION_INFO:
raise DistutilsSetupError("Unsupported odoo version '%s' in %s" %
(odoo_version, addon_dir))
odoo_version_info = ODOO_VERSION_INFO[odoo_version]
if git_post_version:
version = get_git_postversion(addon_dir)
return version, odoo_version, odoo_version_info
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_install_requires_odoo_addon(addon_dir, no_depends=[], depends_override={}, external_dependencies_override={}, odoo_version_override=None):
""" Get the list of requirements for an addon """
|
manifest = read_manifest(addon_dir)
_, _, odoo_version_info = _get_version(addon_dir,
manifest,
odoo_version_override,
git_post_version=False)
return _get_install_requires(odoo_version_info,
manifest,
no_depends,
depends_override,
external_dependencies_override)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_install_requires_odoo_addons(addons_dir, depends_override={}, external_dependencies_override={}, odoo_version_override=None):
""" Get the list of requirements for a directory containing addons """
|
addon_dirs = []
addons = os.listdir(addons_dir)
for addon in addons:
addon_dir = os.path.join(addons_dir, addon)
if is_installable_addon(addon_dir):
addon_dirs.append(addon_dir)
install_requires = set()
for addon_dir in addon_dirs:
r = get_install_requires_odoo_addon(
addon_dir,
no_depends=addons,
depends_override=depends_override,
external_dependencies_override=external_dependencies_override,
odoo_version_override=odoo_version_override,
)
install_requires.update(r)
return sorted(install_requires)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_declarative_base(self, metadata=None):
"""Override parent function with alchy's"""
|
return make_declarative_base(self.session,
Model=self.Model,
metadata=metadata)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prep_doc(self, doc_obj):
""" This method Validates, gets the Python value, checks unique indexes, gets the db value, and then returns the prepared doc dict object. Useful for save and backup functions. @param doc_obj: @return: """
|
doc = doc_obj._data.copy()
for key, prop in list(doc_obj._base_properties.items()):
prop.validate(doc.get(key), key)
raw_value = prop.get_python_value(doc.get(key))
if prop.unique:
self.check_unique(doc_obj, key, raw_value)
value = prop.get_db_value(raw_value)
doc[key] = value
doc['_doc_type'] = get_doc_type(doc_obj.__class__)
return doc
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def apply_zappa_settings(zappa_obj, zappa_settings, environment):
'''Load Zappa settings, set defaults if needed, and apply to the Zappa object'''
settings_all = json.load(zappa_settings)
settings = settings_all[environment]
# load defaults for missing options
for key,value in DEFAULT_SETTINGS.items():
settings[key] = settings.get(key, value)
if '~' in settings['settings_file']:
settings['settings_file'] = settings['settings_file'].replace('~', os.path.expanduser('~'))
if not os.path.isfile(settings['settings_file']):
raise SettingsError("Please make sure your settings_file "
"is properly defined in {0}.".format(zappa_settings))
for setting in CUSTOM_SETTINGS:
if setting in settings:
setattr(zappa_obj, setting, settings[setting])
return settings
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def deploy(environment, zappa_settings):
""" Package, create and deploy to Lambda."""
|
print(("Deploying " + environment))
zappa, settings, lambda_name, zip_path = \
_package(environment, zappa_settings)
s3_bucket_name = settings['s3_bucket']
try:
# Load your AWS credentials from ~/.aws/credentials
zappa.load_credentials()
# Make sure the necessary IAM execution roles are available
zappa.create_iam_roles()
# Upload it to S3
zip_arn = zappa.upload_to_s3(zip_path, s3_bucket_name)
# Register the Lambda function with that zip as the source
# You'll also need to define the path to your lambda_handler code.
lambda_arn = zappa.create_lambda_function(bucket=s3_bucket_name,
s3_key=zip_path,
function_name=lambda_name,
handler='handler.lambda_handler',
vpc_config=settings['vpc_config'],
memory_size=settings['memory_size'])
# Create and configure the API Gateway
api_id = zappa.create_api_gateway_routes(lambda_arn, lambda_name)
# Deploy the API!
endpoint_url = zappa.deploy_api_gateway(api_id, environment)
# Remove the uploaded zip from S3, because it is now registered..
zappa.remove_from_s3(zip_path, s3_bucket_name)
if settings['touch']:
requests.get(endpoint_url)
finally:
try:
# Finally, delete the local copy our zip package
if settings['delete_zip']:
os.remove(zip_path)
except:
print("WARNING: Manual cleanup of the zip might be needed.")
print(("Your Zappa deployment is live!: " + endpoint_url))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(environment, zappa_settings):
""" Update an existing deployment."""
|
print(("Updating " + environment))
# Package dependencies, and the source code into a zip
zappa, settings, lambda_name, zip_path = \
_package(environment, zappa_settings)
s3_bucket_name = settings['s3_bucket']
try:
# Load your AWS credentials from ~/.aws/credentials
zappa.load_credentials()
# Update IAM roles if needed
zappa.create_iam_roles()
# Upload it to S3
zip_arn = zappa.upload_to_s3(zip_path, s3_bucket_name)
# Register the Lambda function with that zip as the source
# You'll also need to define the path to your lambda_handler code.
lambda_arn = zappa.update_lambda_function(s3_bucket_name, zip_path,
lambda_name)
# Remove the uploaded zip from S3, because it is now registered..
zappa.remove_from_s3(zip_path, s3_bucket_name)
finally:
try:
# Finally, delete the local copy our zip package
if settings['delete_zip']:
os.remove(zip_path)
except:
print("WARNING: Manual cleanup of the zip might be needed.")
print("Your updated Zappa deployment is live!")
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lambda_handler(event, context, settings_name="zappa_settings"):
""" An AWS Lambda function which parses specific API Gateway input into a WSGI request, feeds it to Flask, procceses the Flask response, and returns that back to the API Gateway. """
|
# Loading settings from a python module
settings = importlib.import_module(settings_name)
# The flask-app module
app_module = importlib.import_module(settings.APP_MODULE)
# The flask-app
app = getattr(app_module, settings.APP_OBJECT)
app.config.from_object('zappa_settings')
app.wsgi_app = ZappaWSGIMiddleware(app.wsgi_app)
# This is a normal HTTP request
if event.get('method', None):
# If we just want to inspect this,
# return this event instead of processing the request
# https://your_api.aws-api.com/?event_echo=true
event_echo = getattr(settings, "EVENT_ECHO", True)
if event_echo:
if 'event_echo' in list(event['params'].values()):
return {'Content': str(event) + '\n' + str(context), 'Status': 200}
# TODO: Enable Let's Encrypt
# # If Let's Encrypt is defined in the settings,
# # and the path is your.domain.com/.well-known/acme-challenge/{{lets_encrypt_challenge_content}},
# # return a 200 of lets_encrypt_challenge_content.
# lets_encrypt_challenge_path = getattr(settings, "LETS_ENCRYPT_CHALLENGE_PATH", None)
# lets_encrypt_challenge_content = getattr(settings, "LETS_ENCRYPT_CHALLENGE_CONTENT", None)
# if lets_encrypt_challenge_path:
# if len(event['params']) == 3:
# if event['params']['parameter_1'] == '.well-known' and \
# event['params']['parameter_2'] == 'acme-challenge' and \
# event['params']['parameter_3'] == lets_encrypt_challenge_path:
# return {'Content': lets_encrypt_challenge_content, 'Status': 200}
# Create the environment for WSGI and handle the request
environ = create_wsgi_request(event, script_name=settings.SCRIPT_NAME,
trailing_slash=False)
# We are always on https on Lambda, so tell our wsgi app that.
environ['wsgi.url_scheme'] = 'https'
response = Response.from_app(app, environ)
# This doesn't work. It should probably be set right after creation, not
# at such a late stage.
# response.autocorrect_location_header = False
zappa_returndict = dict()
if response.data:
zappa_returndict['Content'] = response.data
# Pack the WSGI response into our special dictionary.
for (header_name, header_value) in response.headers:
zappa_returndict[header_name] = header_value
zappa_returndict['Status'] = response.status_code
# TODO: No clue how to handle the flask-equivalent of this. Or is this
# something entirely specified by the middleware?
# # Parse the WSGI Cookie and pack it.
# cookie = response.cookies.output()
# if ': ' in cookie:
# zappa_returndict['Set-Cookie'] = response.cookies.output().split(': ')[1]
# To ensure correct status codes, we need to
# pack the response as a deterministic B64 string and raise it
# as an error to match our APIGW regex.
# The DOCTYPE ensures that the page still renders in the browser.
if response.status_code in [400, 401, 403, 404, 500]:
content = "<!DOCTYPE html>" + str(response.status_code) + response.data
b64_content = base64.b64encode(content)
raise Exception(b64_content)
# Internal are changed to become relative redirects
# so they still work for apps on raw APIGW and on a domain.
elif response.status_code in [301, 302]:
# Location is by default relative on Flask. Location is by default
# absolute on Werkzeug. We can set autocorrect_location_header on
# the response to False, but it doesn't work. We have to manually
# remove the host part.
location = response.location
hostname = 'https://' + environ['HTTP_HOST']
if location.startswith(hostname):
location = location[len(hostname):]
raise Exception(location)
else:
return zappa_returndict
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_context_data(self, **kwargs):
"""Get the context for this view. Also adds the *page_template* variable in the context. If the *page_template* is not given as a kwarg of the *as_view* method then it is generated using app label, model name (obviously if the list is a queryset), *self.template_name_suffix* and *self.page_template_suffix*. For instance, if the list is a queryset of *blog.Entry*, the template will be ``blog/entry_list_page.html``. """
|
queryset = kwargs.pop('object_list')
page_template = kwargs.pop('page_template', None)
context_object_name = self.get_context_object_name(queryset)
context = {'object_list': queryset, 'view': self}
context.update(kwargs)
if context_object_name is not None:
context[context_object_name] = queryset
if page_template is None:
if hasattr(queryset, 'model'):
page_template = self.get_page_template(**kwargs)
else:
raise ImproperlyConfigured(
'AjaxListView requires a page_template')
context['page_template'] = self.page_template = page_template
return context
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clean_var(text):
"""Turn text into a valid python classname or variable"""
|
text = re_invalid_var.sub('', text)
text = re_invalid_start.sub('', text)
return text
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def full_tasktrace(self):
""" List of all failed tasks caused by this and all previous errors. Returns: List[Task] """
|
if self.prev_error:
return self.prev_error.tasktrace + self.tasktrace
else:
return self.tasktrace
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dist_sq(self, other=None):
""" For fast length comparison """
|
v = self - other if other else self
return sum(map(lambda a: a * a, v))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def yaw_pitch(self):
""" Calculate the yaw and pitch of this vector """
|
if not self:
return YawPitch(0, 0)
ground_distance = math.sqrt(self.x ** 2 + self.z ** 2)
if ground_distance:
alpha1 = -math.asin(self.x / ground_distance) / math.pi * 180
alpha2 = math.acos(self.z / ground_distance) / math.pi * 180
if alpha2 > 90:
yaw = 180 - alpha1
else:
yaw = alpha1
pitch = math.atan2(-self.y, ground_distance) / math.pi * 180
else:
yaw = 0
y = round(self.y)
if y > 0:
pitch = -90
elif y < 0:
pitch = 90
else:
pitch = 0
return YawPitch(yaw, pitch)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_slot_check(wanted):
""" Creates and returns a function that takes a slot and checks if it matches the wanted item. Args: wanted: function(Slot) or Slot or itemID or (itemID, metadata) """
|
if isinstance(wanted, types.FunctionType):
return wanted # just forward the slot check function
if isinstance(wanted, int):
item, meta = wanted, None
elif isinstance(wanted, Slot):
item, meta = wanted.item_id, wanted.damage # TODO compare NBT
elif isinstance(wanted, (Item, Block)):
item, meta = wanted.id, wanted.metadata
elif isinstance(wanted, str):
item_or_block = get_item_or_block(wanted, init=True)
item, meta = item_or_block.id, item_or_block.metadata
else: # wanted is (id, meta)
try:
item, meta = wanted
except TypeError:
raise ValueError('Illegal args for make_slot_check(): %s' % wanted)
return lambda slot: item == slot.item_id and meta in (None, slot.damage)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _make_window(window_dict):
""" Creates a new class for that window and registers it at this module. """
|
cls_name = '%sWindow' % camel_case(str(window_dict['name']))
bases = (Window,)
attrs = {
'__module__': sys.modules[__name__],
'name': str(window_dict['name']),
'inv_type': str(window_dict['id']),
'inv_data': window_dict,
}
# creates function-local index and size variables
def make_slot_method(index, size=1):
if size == 1:
return lambda self: self.slots[index]
else:
return lambda self: self.slots[index:(index + size)]
for slots in window_dict.get('slots', []):
index = slots['index']
size = slots.get('size', 1)
attr_name = snake_case(str(slots['name']))
attr_name += '_slot' if size == 1 else '_slots'
slots_method = make_slot_method(index, size)
slots_method.__name__ = attr_name
attrs[attr_name] = property(slots_method)
for i, prop_name in enumerate(window_dict.get('properties', [])):
def make_prop_method(i):
return lambda self: self.properties[i]
prop_method = make_prop_method(i)
prop_name = snake_case(str(prop_name))
prop_method.__name__ = prop_name
attrs[prop_name] = property(prop_method)
cls = type(cls_name, bases, attrs)
assert not hasattr(sys.modules[__name__], cls_name), \
'Window "%s" already registered at %s' % (cls_name, __name__)
setattr(sys.modules[__name__], cls_name, cls)
return cls
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_dict(self):
""" Formats the slot for network packing. """
|
data = {'id': self.item_id}
if self.item_id != constants.INV_ITEMID_EMPTY:
data['damage'] = self.damage
data['amount'] = self.amount
if self.nbt is not None:
data['enchants'] = self.nbt
return data
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def on_success(self, inv_plugin, emit_set_slot):
""" Called when the click was successful and should be applied to the inventory. Args: inv_plugin (InventoryPlugin):
inventory plugin instance emit_set_slot (func):
function to signal a slot change, should be InventoryPlugin().emit_set_slot """
|
self.dirty = set()
self.apply(inv_plugin)
for changed_slot in self.dirty:
emit_set_slot(changed_slot)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def authenticate(self):
""" Generate an access token using an username and password. Any existing client token is invalidated if not provided. Returns: dict: Response or error dict """
|
endpoint = '/authenticate'
payload = {
'agent': {
'name': 'Minecraft',
'version': self.ygg_version,
},
'username': self.username,
'password': self.password,
'clientToken': self.client_token,
}
rep = self._ygg_req(endpoint, payload)
if not rep or 'error' in rep:
return False
self.access_token = rep['accessToken']
self.client_token = rep['clientToken']
self.available_profiles = rep['availableProfiles']
self.selected_profile = rep['selectedProfile']
return True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(self):
""" Check if an access token is valid Returns: dict: Empty or error dict """
|
endpoint = '/validate'
payload = dict(accessToken=self.access_token)
rep = self._ygg_req(endpoint, payload)
return not bool(rep)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def total_stored(self, wanted, slots=None):
""" Calculates the total number of items of that type in the current window or given slot range. Args: wanted: function(Slot) or Slot or itemID or (itemID, metadata) """
|
if slots is None:
slots = self.window.slots
wanted = make_slot_check(wanted)
return sum(slot.amount for slot in slots if wanted(slot))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_slot(self, wanted, slots=None):
""" Searches the given slots or, if not given, active hotbar slot, hotbar, inventory, open window in this order. Args: wanted: function(Slot) or Slot or itemID or (itemID, metadata) Returns: Optional[Slot]: The first slot containing the item or None if not found. """
|
for slot in self.find_slots(wanted, slots):
return slot
return None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_slots(self, wanted, slots=None):
""" Yields all slots containing the item. Searches the given slots or, if not given, active hotbar slot, hotbar, inventory, open window in this order. Args: wanted: function(Slot) or Slot or itemID or (itemID, metadata) """
|
if slots is None:
slots = self.inv_slots_preferred + self.window.window_slots
wanted = make_slot_check(wanted)
for slot in slots:
if wanted(slot):
yield slot
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def click_slot(self, slot, right=False):
""" Left-click or right-click the slot. Args: slot (Slot):
The clicked slot. Can be ``Slot`` instance or integer. Set to ``inventory.cursor_slot`` for clicking outside the window. """
|
if isinstance(slot, int):
slot = self.window.slots[slot]
button = constants.INV_BUTTON_RIGHT \
if right else constants.INV_BUTTON_LEFT
return self.send_click(windows.SingleClick(slot, button))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def drop_slot(self, slot=None, drop_stack=False):
""" Drop one or all items of the slot. Does not wait for confirmation from the server. If you want that, use a ``Task`` and ``yield inventory.async.drop_slot()`` instead. If ``slot`` is None, drops the ``cursor_slot`` or, if that's empty, the currently held item (``active_slot``). Args: slot (Optional[Slot]):
The dropped slot. Can be None, integer, or ``Slot`` instance. Returns: int: The action ID of the click """
|
if slot is None:
if self.cursor_slot.is_empty:
slot = self.active_slot
else:
slot = self.cursor_slot
elif isinstance(slot, int): # also allow slot nr
slot = self.window.slots[slot]
if slot == self.cursor_slot:
# dropping items from cursor is done via normal click
return self.click_slot(self.cursor_slot, not drop_stack)
return self.send_click(windows.DropClick(slot, drop_stack))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def inv_slots_preferred(self):
""" List of all available inventory slots in the preferred search order. Does not include the additional slots from the open window. 1. active slot 2. remainder of the hotbar 3. remainder of the persistent inventory """
|
slots = [self.active_slot]
slots.extend(slot for slot in self.window.hotbar_slots
if slot != self.active_slot)
slots.extend(self.window.inventory_slots)
return slots
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_block_entity_data(self, pos_or_x, y=None, z=None):
""" Access block entity data. Returns: BlockEntityData subclass instance or None if no block entity data is stored for that location. """
|
if None not in (y, z): # x y z supplied
pos_or_x = pos_or_x, y, z
coord_tuple = tuple(int(floor(c)) for c in pos_or_x)
return self.block_entities.get(coord_tuple, None)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_block_entity_data(self, pos_or_x, y=None, z=None, data=None):
""" Update block entity data. Returns: Old data if block entity data was already stored for that location, None otherwise. """
|
if None not in (y, z): # x y z supplied
pos_or_x = pos_or_x, y, z
coord_tuple = tuple(int(floor(c)) for c in pos_or_x)
old_data = self.block_entities.get(coord_tuple, None)
self.block_entities[coord_tuple] = data
return old_data
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_vlq(self, segment):
""" Parse a string of VLQ-encoded data. Returns: a list of integers. """
|
values = []
cur, shift = 0, 0
for c in segment:
val = B64[ord(c)]
# Each character is 6 bits:
# 5 of value and the high bit is the continuation.
val, cont = val & 0b11111, val >> 5
cur += val << shift
shift += 5
if not cont:
# The low bit of the unpacked value is the sign.
cur, sign = cur >> 1, cur & 1
if sign:
cur = -cur
values.append(cur)
cur, shift = 0, 0
if cur or shift:
raise SourceMapDecodeError('leftover cur/shift in vlq decode')
return values
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decode(self, source):
"""Decode a source map object into a SourceMapIndex. The index is keyed on (dst_line, dst_column) for lookups, and a per row index is kept to help calculate which Token to retrieve. For example: A minified source file has two rows and two tokens per row. # All parsed tokens tokens = [ Token(dst_row=0, dst_col=0), Token(dst_row=0, dst_col=5), Token(dst_row=1, dst_col=0), Token(dst_row=1, dst_col=12), ] Two dimentional array of columns -> row rows = [ [0, 5], [0, 12], ] Token lookup, based on location index = { (0, 0):
tokens[0], (0, 5):
tokens[1], (1, 0):
tokens[2], (1, 12):
tokens[3], } To find the token at (1, 20):
- Check if there's a direct hit on the index (1, 20) => False - Pull rows[1] => [0, 12] - bisect_right to find the closest match: bisect_right([0, 12], 20) => 2 - Fetch the column number before, since we want the column lte to the bisect_right: 2-1 => row[2-1] => 12 - At this point, we know the token location, (1, 12) - Pull (1, 12) from index => tokens[3] """
|
# According to spec (https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit#heading=h.h7yy76c5il9v)
# A SouceMap may be prepended with ")]}'" to cause a Javascript error.
# If the file starts with that string, ignore the entire first line.
if source[:4] == ")]}'" or source[:3] == ")]}":
source = source.split('\n', 1)[1]
smap = json.loads(source)
sources = smap['sources']
sourceRoot = smap.get('sourceRoot')
names = list(map(text_type, smap['names']))
mappings = smap['mappings']
lines = mappings.split(';')
if sourceRoot is not None:
sources = list(map(partial(os.path.join, sourceRoot), sources))
# List of all tokens
tokens = []
# line_index is used to identify the closest column when looking up a token
line_index = []
# Main index of all tokens
# The index is keyed on (line, column)
index = {}
dst_col, src_id, src_line, src_col, name_id = 0, 0, 0, 0, 0
for dst_line, line in enumerate(lines):
# Create list for columns in index
line_index.append([])
segments = line.split(',')
dst_col = 0
for segment in segments:
if not segment:
continue
parse = self.parse_vlq(segment)
dst_col += parse[0]
src = None
name = None
if len(parse) > 1:
try:
src_id += parse[1]
if not 0 <= src_id < len(sources):
raise SourceMapDecodeError(
"Segment %s references source %d; there are "
"%d sources" % (segment, src_id, len(sources))
)
src = sources[src_id]
src_line += parse[2]
src_col += parse[3]
if len(parse) > 4:
name_id += parse[4]
if not 0 <= name_id < len(names):
raise SourceMapDecodeError(
"Segment %s references name %d; there are "
"%d names" % (segment, name_id, len(names))
)
name = names[name_id]
except IndexError:
raise SourceMapDecodeError(
"Invalid segment %s, parsed as %r"
% (segment, parse)
)
try:
assert dst_line >= 0, ('dst_line', dst_line)
assert dst_col >= 0, ('dst_col', dst_col)
assert src_line >= 0, ('src_line', src_line)
assert src_col >= 0, ('src_col', src_col)
except AssertionError as e:
raise SourceMapDecodeError(
"Segment %s has negative %s (%d), in file %s"
% (segment, e.message[0], e.message[1], src)
)
token = Token(dst_line, dst_col, src, src_line, src_col, name)
tokens.append(token)
# Insert into main index
index[(dst_line, dst_col)] = token
# Insert into specific line index
line_index[dst_line].append(dst_col)
return SourceMapIndex(smap, tokens, line_index, index, sources)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def discover(source):
"Given a JavaScript file, find the sourceMappingURL line"
source = source.splitlines()
# Source maps are only going to exist at either the top or bottom of the document.
# Technically, there isn't anything indicating *where* it should exist, so we
# are generous and assume it's somewhere either in the first or last 5 lines.
# If it's somewhere else in the document, you're probably doing it wrong.
if len(source) > 10:
possibilities = source[:5] + source[-5:]
else:
possibilities = source
for line in set(possibilities):
pragma = line[:21]
if pragma == '//# sourceMappingURL=' or pragma == '//@ sourceMappingURL=':
# We want everything AFTER the pragma, which is 21 chars long
return line[21:].rstrip()
# XXX: Return None or raise an exception?
return None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clean():
"""Remove build, dist, egg-info garbage."""
|
d = ['build', 'dist', 'scikits.audiolab.egg-info', HTML_DESTDIR,
PDF_DESTDIR]
for i in d:
paver.path.path(i).rmtree()
(paver.path.path('docs') / options.sphinx.builddir).rmtree()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_attendees(self, attendees, required=True):
""" Adds new attendees to the event. *attendees* can be a list of email addresses or :class:`ExchangeEventAttendee` objects. """
|
new_attendees = self._build_resource_dictionary(attendees, required=required)
for email in new_attendees:
self._attendees[email] = new_attendees[email]
self._dirty_attributes.add(u'attendees')
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_attendees(self, attendees):
""" Removes attendees from the event. *attendees* can be a list of email addresses or :class:`ExchangeEventAttendee` objects. """
|
attendees_to_delete = self._build_resource_dictionary(attendees)
for email in attendees_to_delete.keys():
if email in self._attendees:
del self._attendees[email]
self._dirty_attributes.add(u'attendees')
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_resources(self, resources):
""" Adds new resources to the event. *resources* can be a list of email addresses or :class:`ExchangeEventAttendee` objects. """
|
new_resources = self._build_resource_dictionary(resources)
for key in new_resources:
self._resources[key] = new_resources[key]
self._dirty_attributes.add(u'resources')
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_resources(self, resources):
""" Removes resources from the event. *resources* can be a list of email addresses or :class:`ExchangeEventAttendee` objects. """
|
resources_to_delete = self._build_resource_dictionary(resources)
for email in resources_to_delete.keys():
if email in self._resources:
del self._resources[email]
self._dirty_attributes.add(u'resources')
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(self):
""" Validates that all required fields are present """
|
if not self.start:
raise ValueError("Event has no start date")
if not self.end:
raise ValueError("Event has no end date")
if self.end < self.start:
raise ValueError("Start date is after end date")
if self.reminder_minutes_before_start and not isinstance(self.reminder_minutes_before_start, int):
raise TypeError("reminder_minutes_before_start must be of type int")
if self.is_all_day and not isinstance(self.is_all_day, bool):
raise TypeError("is_all_day must be of type bool")
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def info_factory(name, libnames, headers, frameworks=None, section=None, classname=None):
"""Create a system_info class. Parameters name : str name of the library libnames : seq list of libraries to look for headers : seq list of headers to look for classname : str name of the returned class section : str section name in the site.cfg Returns ------- a system_info-derived class with the given meta-parameters """
|
if not classname:
classname = '%s_info' % name
if not section:
section = name
if not frameworks:
framesworks = []
class _ret(system_info):
def __init__(self):
system_info.__init__(self)
def library_extensions(self):
return system_info.library_extensions(self)
def calc_info(self):
""" Compute the informations of the library """
if libnames:
libs = self.get_libs('libraries', '')
if not libs:
libs = libnames
# Look for the shared library
lib_dirs = self.get_lib_dirs()
tmp = None
for d in lib_dirs:
tmp = self.check_libs(d, libs)
if tmp is not None:
info = tmp
break
if tmp is None:
return
# Look for the header file
include_dirs = self.get_include_dirs()
inc_dir = None
for d in include_dirs:
p = self.combine_paths(d, headers)
if p:
inc_dir = os.path.dirname(p[0])
dict_append(info, include_dirs=[d])
break
if inc_dir is None:
log.info(' %s not found' % name)
return
self.set_info(**info)
else:
# Look for frameworks
if frameworks:
fargs = []
for f in frameworks:
p = "/System/Library/Frameworks/%s.framework" % f
if os.path.exists(p):
fargs.append("-framework")
fargs.append(f)
if fargs:
self.set_info(extra_link_args=fargs)
return
_ret.__name__ = classname
_ret.section = section
return _ret
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_all_details(self):
""" This function will execute all the event lookups for known events. This is intended for use when you want to have a completely populated event entry, including Organizer & Attendee details. """
|
log.debug(u"Loading all details")
if self.count > 0:
# Now, empty out the events to prevent duplicates!
del(self.events[:])
# Send the SOAP request with the list of exchange ID values.
log.debug(u"Requesting all event details for events: {event_list}".format(event_list=str(self.event_ids)))
body = soap_request.get_item(exchange_id=self.event_ids, format=u'AllProperties')
response_xml = self.service.send(body)
# Re-parse the results for all the details!
self._parse_response_for_all_events(response_xml)
return self
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def seek(self, offset, whence=0, mode='rw'):
"""similar to python seek function, taking only in account audio data. :Parameters: offset : int the number of frames (eg two samples for stereo files) to move relatively to position set by whence. whence : int only 0 (beginning), 1 (current) and 2 (end of the file) are valid. mode : string If set to 'rw', both read and write pointers are updated. If 'r' is given, only read pointer is updated, if 'w', only the write one is (this may of course make sense only if you open the file in a certain mode). Notes ----- - one only takes into accound audio data. - if an invalid seek is given (beyond or before the file), a PyaudioIOError is launched."""
|
try:
st = self._sndfile.seek(offset, whence, mode)
except IOError, e:
raise PyaudioIOError(str(e))
return st
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_frames(self, nframes, dtype=np.float64):
"""Read nframes frames of the file. :Parameters: nframes : int number of frames to read. dtype : numpy dtype dtype of the returned array containing read data (see note). Notes ----- - read_frames updates the read pointer. - One column is one channel (one row per channel after 0.9) - if float are requested when the file contains integer data, you will get normalized data (that is the max possible integer will be 1.0, and the minimal possible value -1.0). - if integers are requested when the file contains floating point data, it may give wrong results because there is an ambiguity: if the floating data are normalized, you can get a file with only 0 ! Getting integer data from files encoded in normalized floating point is not supported (yet: sndfile supports it)."""
|
return self._sndfile.read_frames(nframes, dtype)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_frames(self, input, nframes = -1):
"""write data to file. :Parameters: input : ndarray array containing data to write. nframes : int number of frames to write. Notes ----- - One column is one channel (one row per channel after 0.9) - updates the write pointer. - if float are given when the file contains integer data, you should put normalized data (that is the range [-1..1] will be written as the maximum range allowed by the integer bitwidth)."""
|
if nframes == -1:
if input.ndim == 1:
nframes = input.size
elif input.ndim == 2:
nframes = input.shape[0]
else:
raise ValueError("Input has to be rank 1 (mono) or rank 2 "\
"(multi-channels)")
return self._sndfile.write_frames(input[:nframes,...])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete_field(field_uri):
""" Helper function to request deletion of a field. This is necessary when you want to overwrite values instead of appending. <t:DeleteItemField> <t:FieldURI FieldURI="calendar:Resources"/> </t:DeleteItemField> """
|
root = T.DeleteItemField(
T.FieldURI(FieldURI=field_uri)
)
return root
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_occurrence(exchange_id, instance_index, format=u"Default"):
""" Requests one or more calendar items from the store matching the master & index. exchange_id is the id for the master event in the Exchange store. format controls how much data you get back from Exchange. Full docs are here, but acceptible values are IdOnly, Default, and AllProperties. GetItem Doc: http://msdn.microsoft.com/en-us/library/aa564509(v=exchg.140).aspx OccurrenceItemId Doc: http://msdn.microsoft.com/en-us/library/office/aa580744(v=exchg.150).aspx <m:GetItem xmlns:m="http://schemas.microsoft.com/exchange/services/2006/messages" xmlns:t="http://schemas.microsoft.com/exchange/services/2006/types"> <m:ItemShape> <t:BaseShape>{format}</t:BaseShape> </m:ItemShape> <m:ItemIds> {% for index in instance_index %} <t:OccurrenceItemId RecurringMasterId="{exchange_id}" InstanceIndex="{{ index }}"/> {% endfor %} </m:ItemIds> </m:GetItem> """
|
root = M.GetItem(
M.ItemShape(
T.BaseShape(format)
),
M.ItemIds()
)
items_node = root.xpath("//m:ItemIds", namespaces=NAMESPACES)[0]
for index in instance_index:
items_node.append(T.OccurrenceItemId(RecurringMasterId=exchange_id, InstanceIndex=str(index)))
return root
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_property_node(node_to_insert, field_uri):
""" Helper function - generates a SetItemField which tells Exchange you want to overwrite the contents of a field."""
|
root = T.SetItemField(
T.FieldURI(FieldURI=field_uri),
T.CalendarItem(node_to_insert)
)
return root
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _validate_example(rh, method, example_type):
"""Validates example against schema :returns: Formatted example if example exists and validates, otherwise None :raises ValidationError: If example does not validate against the schema """
|
example = getattr(method, example_type + "_example")
schema = getattr(method, example_type + "_schema")
if example is None:
return None
try:
validate(example, schema)
except ValidationError as e:
raise ValidationError(
"{}_example for {}.{} could not be validated.\n{}".format(
example_type, rh.__name__, method.__name__, str(e)
)
)
return json.dumps(example, indent=4, sort_keys=True)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_rh_methods(rh):
"""Yield all HTTP methods in ``rh`` that are decorated with schema.validate"""
|
for k, v in vars(rh).items():
if all([
k in HTTP_METHODS,
is_method(v),
hasattr(v, "input_schema")
]):
yield (k, v)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _escape_markdown_literals(string):
"""Escape any markdown literals in ``string`` by prepending with \\ :type string: str :rtype: str """
|
literals = list("\\`*_{}[]()<>#+-.!:|")
escape = lambda c: '\\' + c if c in literals else c
return "".join(map(escape, string))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _cleandoc(doc):
"""Remove uniform indents from ``doc`` lines that are not empty :returns: Cleaned ``doc`` """
|
indent_length = lambda s: len(s) - len(s.lstrip(" "))
not_empty = lambda s: s != ""
lines = doc.split("\n")
indent = min(map(indent_length, filter(not_empty, lines)))
return "\n".join(s[indent:] for s in lines)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_api_docs(routes):
""" Generates GitHub Markdown formatted API documentation using provided schemas in RequestHandler methods and their docstrings. :param routes: List of routes (this is ideally all possible routes of the app) :rtype: str :returns: generated GFM-formatted documentation """
|
routes = map(_get_tuple_from_route, routes)
documentation = []
for url, rh, methods in sorted(routes, key=lambda a: a[0]):
if issubclass(rh, APIHandler):
documentation.append(_get_route_doc(url, rh, methods))
documentation = (
"**This documentation is automatically generated.**\n\n" +
"**Output schemas only represent `data` and not the full output; " +
"see output examples and the JSend specification.**\n" +
"\n<br>\n<br>\n".join(documentation)
)
return documentation
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def error(self, message, data=None, code=None):
"""An error occurred in processing the request, i.e. an exception was thrown. :type data: A JSON-serializable object :param data: A generic container for any other information about the error, i.e. the conditions that caused the error, stack traces, etc. :type message: A JSON-serializable object :param message: A meaningful, end-user-readable (or at the least log-worthy) message, explaining what went wrong :type code: int :param code: A numeric code corresponding to the error, if applicable """
|
result = {'status': 'error', 'message': message}
if data:
result['data'] = data
if code:
result['code'] = code
self.write(result)
self.finish()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def input_schema_clean(input_, input_schema):
""" Updates schema default values with input data. :param input_: Input data :type input_: dict :param input_schema: Input schema :type input_schema: dict :returns: Nested dict with data (defaul values updated with input data) :rtype: dict """
|
if input_schema.get('type') == 'object':
try:
defaults = get_object_defaults(input_schema)
except NoObjectDefaults:
pass
else:
return deep_update(defaults, input_)
return input_
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(input_schema=None, output_schema=None, input_example=None, output_example=None, validator_cls=None, format_checker=None, on_empty_404=False, use_defaults=False):
"""Parameterized decorator for schema validation :type validator_cls: IValidator class :type format_checker: jsonschema.FormatChecker or None :type on_empty_404: bool :param on_empty_404: If this is set, and the result from the decorated method is a falsy value, a 404 will be raised. :type use_defaults: bool :param use_defaults: If this is set, will put 'default' keys from schema to self.body (If schema type is object). Example: { 'published': {'type': 'bool', 'default': False} } self.body will contains 'published' key with value False if no one comes from request, also works with nested schemas. """
|
@container
def _validate(rh_method):
"""Decorator for RequestHandler schema validation
This decorator:
- Validates request body against input schema of the method
- Calls the ``rh_method`` and gets output from it
- Validates output against output schema of the method
- Calls ``JSendMixin.success`` to write the validated output
:type rh_method: function
:param rh_method: The RequestHandler method to be decorated
:returns: The decorated method
:raises ValidationError: If input is invalid as per the schema
or malformed
:raises TypeError: If the output is invalid as per the schema
or malformed
:raises APIError: If the output is a falsy value and
on_empty_404 is True, an HTTP 404 error is returned
"""
@wraps(rh_method)
@tornado.gen.coroutine
def _wrapper(self, *args, **kwargs):
# In case the specified input_schema is ``None``, we
# don't json.loads the input, but just set it to ``None``
# instead.
if input_schema is not None:
# Attempt to json.loads the input
try:
# TODO: Assuming UTF-8 encoding for all requests,
# find a nice way of determining this from charset
# in headers if provided
encoding = "UTF-8"
input_ = json.loads(self.request.body.decode(encoding))
except ValueError as e:
raise jsonschema.ValidationError(
"Input is malformed; could not decode JSON object."
)
if use_defaults:
input_ = input_schema_clean(input_, input_schema)
# Validate the received input
jsonschema.validate(
input_,
input_schema,
cls=validator_cls,
format_checker=format_checker
)
else:
input_ = None
# A json.loads'd version of self.request["body"] is now available
# as self.body
setattr(self, "body", input_)
# Call the requesthandler method
output = rh_method(self, *args, **kwargs)
# If the rh_method returned a Future a la `raise Return(value)`
# we grab the output.
if is_future(output):
output = yield output
# if output is empty, auto return the error 404.
if not output and on_empty_404:
raise APIError(404, "Resource not found.")
if output_schema is not None:
# We wrap output in an object before validating in case
# output is a string (and ergo not a validatable JSON object)
try:
jsonschema.validate(
{"result": output},
{
"type": "object",
"properties": {
"result": output_schema
},
"required": ["result"]
}
)
except jsonschema.ValidationError as e:
# We essentially re-raise this as a TypeError because
# we don't want this error data passed back to the client
# because it's a fault on our end. The client should
# only see a 500 - Internal Server Error.
raise TypeError(str(e))
# If no ValidationError has been raised up until here, we write
# back output
self.success(output)
setattr(_wrapper, "input_schema", input_schema)
setattr(_wrapper, "output_schema", output_schema)
setattr(_wrapper, "input_example", input_example)
setattr(_wrapper, "output_example", output_example)
return _wrapper
return _validate
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read(filename):
"""Read and return `filename` in root dir of project and return string"""
|
return codecs.open(os.path.join(__DIR__, filename), 'r').read()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def deep_update(source, overrides):
"""Update a nested dictionary or similar mapping. Modify ``source`` in place. :type source: collections.Mapping :type overrides: collections.Mapping :rtype: collections.Mapping """
|
for key, value in overrides.items():
if isinstance(value, collections.Mapping) and value:
returned = deep_update(source.get(key, {}), value)
source[key] = returned
else:
source[key] = overrides[key]
return source
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_handler_subclass(cls, classnames=("ViewHandler", "APIHandler")):
"""Determines if ``cls`` is indeed a subclass of ``classnames``"""
|
if isinstance(cls, list):
return any(is_handler_subclass(c) for c in cls)
elif isinstance(cls, type):
return any(c.__name__ in classnames for c in inspect.getmro(cls))
else:
raise TypeError(
"Unexpected type `{}` for class `{}`".format(
type(cls),
cls
)
)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_error(self, status_code, **kwargs):
"""Override of RequestHandler.write_error Calls ``error()`` or ``fail()`` from JSendMixin depending on which exception was raised with provided reason and status code. :type status_code: int :param status_code: HTTP status code """
|
def get_exc_message(exception):
return exception.log_message if \
hasattr(exception, "log_message") else str(exception)
self.clear()
self.set_status(status_code)
# Any APIError exceptions raised will result in a JSend fail written
# back with the log_message as data. Hence, log_message should NEVER
# expose internals. Since log_message is proprietary to HTTPError
# class exceptions, all exceptions without it will return their
# __str__ representation.
# All other exceptions result in a JSend error being written back,
# with log_message only written if debug mode is enabled
exception = kwargs["exc_info"][1]
if any(isinstance(exception, c) for c in [APIError, ValidationError]):
# ValidationError is always due to a malformed request
if isinstance(exception, ValidationError):
self.set_status(400)
self.fail(get_exc_message(exception))
else:
self.error(
message=self._reason,
data=get_exc_message(exception) if self.settings.get("debug")
else None,
code=status_code
)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gen_submodule_names(package):
"""Walk package and yield names of all submodules :type package: package :param package: The package to get submodule names of :returns: Iterator that yields names of all submodules of ``package`` :rtype: Iterator that yields ``str`` """
|
for importer, modname, ispkg in pkgutil.walk_packages(
path=package.__path__,
prefix=package.__name__ + '.',
onerror=lambda x: None):
yield modname
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_module_routes(module_name, custom_routes=None, exclusions=None, arg_pattern=r'(?P<{}>[a-zA-Z0-9_\-]+)'):
"""Create and return routes for module_name Routes are (url, RequestHandler) tuples :returns: list of routes for ``module_name`` with respect to ``exclusions`` and ``custom_routes``. Returned routes are with URLs formatted such that they are forward-slash-separated by module/class level and end with the lowercase name of the RequestHandler (it will also remove 'handler' from the end of the name of the handler). For example, a requesthandler with the name ``helloworld.api.HelloWorldHandler`` would be assigned the url ``/api/helloworld``. Additionally, if a method has extra arguments aside from ``self`` in its signature, routes with URL patterns will be generated to match ``r"(?P<{}>[a-zA-Z0-9_\-]+)".format(argname)`` for each argument. The aforementioned regex will match ONLY values with alphanumeric, hyphen and underscore characters. You can provide your own pattern by setting a ``arg_pattern`` param. :type module_name: str :param module_name: Name of the module to get routes for :param custom_routes: List of routes that have custom URLs and therefore should be automagically generated :param exclusions: List of RequestHandler names that routes should not be generated for :type arg_pattern: str :param arg_pattern: Default pattern for extra arguments of any method """
|
def has_method(module, cls_name, method_name):
return all([
method_name in vars(getattr(module, cls_name)),
is_method(reduce(getattr, [module, cls_name, method_name]))
])
def yield_args(module, cls_name, method_name):
"""Get signature of ``module.cls_name.method_name``
Confession: This function doesn't actually ``yield`` the arguments,
just returns a list. Trust me, it's better that way.
:returns: List of arg names from method_name except ``self``
:rtype: list
"""
wrapped_method = reduce(getattr, [module, cls_name, method_name])
method = extract_method(wrapped_method)
# If using tornado_json.gen.coroutine, original args are annotated...
argspec_args = getattr(method, "__argspec_args",
# otherwise just grab them from the method
inspect.getargspec(method).args)
return [a for a in argspec_args if a not in ["self"]]
def generate_auto_route(module, module_name, cls_name, method_name, url_name):
"""Generate URL for auto_route
:rtype: str
:returns: Constructed URL based on given arguments
"""
def get_handler_name():
"""Get handler identifier for URL
For the special case where ``url_name`` is
``__self__``, the handler is named a lowercase
value of its own name with 'handler' removed
from the ending if give; otherwise, we
simply use the provided ``url_name``
"""
if url_name == "__self__":
if cls_name.lower().endswith('handler'):
return cls_name.lower().replace('handler', '', 1)
return cls_name.lower()
else:
return url_name
def get_arg_route():
"""Get remainder of URL determined by method argspec
:returns: Remainder of URL which matches `\w+` regex
with groups named by the method's argument spec.
If there are no arguments given, returns ``""``.
:rtype: str
"""
if yield_args(module, cls_name, method_name):
return "/{}/?$".format("/".join(
[arg_pattern.format(argname) for argname
in yield_args(module, cls_name, method_name)]
))
return r"/?"
return "/{}/{}{}".format(
"/".join(module_name.split(".")[1:]),
get_handler_name(),
get_arg_route()
)
if not custom_routes:
custom_routes = []
if not exclusions:
exclusions = []
# Import module so we can get its request handlers
module = importlib.import_module(module_name)
# Generate list of RequestHandler names in custom_routes
custom_routes_s = [c.__name__ for r, c in custom_routes]
rhs = {cls_name: cls for (cls_name, cls) in
inspect.getmembers(module, inspect.isclass)}
# You better believe this is a list comprehension
auto_routes = list(chain(*[
list(set(chain(*[
# Generate a route for each "name" specified in the
# __url_names__ attribute of the handler
[
# URL, requesthandler tuple
(
generate_auto_route(
module, module_name, cls_name, method_name, url_name
),
getattr(module, cls_name)
) for url_name in getattr(module, cls_name).__url_names__
# Add routes for each custom URL specified in the
# __urls__ attribute of the handler
] + [
(
url,
getattr(module, cls_name)
) for url in getattr(module, cls_name).__urls__
]
# We create a route for each HTTP method in the handler
# so that we catch all possible routes if different
# HTTP methods have different argspecs and are expecting
# to catch different routes. Any duplicate routes
# are removed from the set() comparison.
for method_name in HTTP_METHODS if has_method(
module, cls_name, method_name)
])))
# foreach classname, pyclbr.Class in rhs
for cls_name, cls in rhs.items()
# Only add the pair to auto_routes if:
# * the superclass is in the list of supers we want
# * the requesthandler isn't already paired in custom_routes
# * the requesthandler isn't manually excluded
if is_handler_subclass(cls)
and cls_name not in (custom_routes_s + exclusions)
]))
routes = auto_routes + custom_routes
return routes
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def coroutine(func, replace_callback=True):
"""Tornado-JSON compatible wrapper for ``tornado.gen.coroutine`` Annotates original argspec.args of ``func`` as attribute ``__argspec_args`` """
|
# gen.coroutine in tornado 3.x.x and 5.x.x have a different signature than 4.x.x
if TORNADO_MAJOR != 4:
wrapper = gen.coroutine(func)
else:
wrapper = gen.coroutine(func, replace_callback)
wrapper.__argspec_args = inspect.getargspec(func).args
return wrapper
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main():
""" Entry point for gns3-converter """
|
arg_parse = setup_argparse()
args = arg_parse.parse_args()
if not args.quiet:
print('GNS3 Topology Converter')
if args.debug:
logging_level = logging.DEBUG
else:
logging_level = logging.WARNING
logging.basicConfig(level=logging_level,
format=LOG_MSG_FMT, datefmt=LOG_DATE_FMT)
logging.getLogger(__name__)
# Add the main topology to the list of files to convert
if args.topology == 'topology.net':
args.topology = os.path.join(os.getcwd(), 'topology.net')
topology_files = [{'file': topology_abspath(args.topology),
'snapshot': False}]
# Add any snapshot topologies to be converted
topology_files.extend(get_snapshots(args.topology))
topology_name = name(args.topology, args.name)
# Do the conversion
for topology in topology_files:
do_conversion(topology, topology_name, args.output, args.debug)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def setup_argparse():
""" Setup the argparse argument parser :return: instance of argparse :rtype: ArgumentParser """
|
parser = argparse.ArgumentParser(
description='Convert old ini-style GNS3 topologies (<=0.8.7) to '
'the newer version 1+ JSON format')
parser.add_argument('--version',
action='version',
version='%(prog)s ' + __version__)
parser.add_argument('-n', '--name', help='Topology name (default uses the '
'name of the old project '
'directory)')
parser.add_argument('-o', '--output', help='Output directory')
parser.add_argument('topology', nargs='?', default='topology.net',
help='GNS3 .net topology file (default: topology.net)')
parser.add_argument('--debug',
help='Enable debugging output',
action='store_true')
parser.add_argument('-q', '--quiet',
help='Quiet-mode (no output to console)',
action='store_true')
return parser
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def do_conversion(topology_def, topology_name, output_dir=None, debug=False, quiet=False):
""" Convert the topology :param dict topology_def: Dict containing topology file and snapshot bool. For example: ``{'file': filename, 'snapshot': False}`` :param str topology_name: The name of the topology :param str output_dir: The directory in which to output the topology. (Default: None) :param bool debug: Enable debugging (Default: False) """
|
# Create a new instance of the the Converter
gns3_conv = Converter(topology_def['file'], debug)
# Read the old topology
old_top = gns3_conv.read_topology()
new_top = JSONTopology()
# Process the sections
(topology) = gns3_conv.process_topology(old_top)
# Generate the nodes
new_top.nodes = gns3_conv.generate_nodes(topology)
# Generate the links
new_top.links = gns3_conv.generate_links(new_top.nodes)
new_top.notes = gns3_conv.generate_notes(topology['artwork']['NOTE'])
new_top.shapes = gns3_conv.generate_shapes(topology['artwork']['SHAPE'])
new_top.images = gns3_conv.generate_images(topology['artwork']['PIXMAP'])
# Enter topology name
new_top.name = topology_name
# Save the new topology
save(output_dir, gns3_conv, new_top, topology_def['snapshot'], quiet)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_snapshots(topology):
""" Return the paths of any snapshot topologies :param str topology: topology file :return: list of dicts containing snapshot topologies :rtype: list """
|
snapshots = []
snap_dir = os.path.join(topology_dirname(topology), 'snapshots')
if os.path.exists(snap_dir):
snaps = os.listdir(snap_dir)
for directory in snaps:
snap_top = os.path.join(snap_dir, directory, 'topology.net')
if os.path.exists(snap_top):
snapshots.append({'file': snap_top,
'snapshot': True})
return snapshots
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def name(topology_file, topology_name=None):
""" Calculate the name to save the converted topology as using either either a specified name or the directory name of the current project :param str topology_file: Topology filename :param topology_name: Optional topology name (Default: None) :type topology_name: str or None :return: new topology name :rtype: str """
|
if topology_name is not None:
logging.debug('topology name supplied')
topo_name = topology_name
else:
logging.debug('topology name not supplied')
topo_name = os.path.basename(topology_dirname(topology_file))
return topo_name
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def snapshot_name(topo_name):
""" Get the snapshot name :param str topo_name: topology file location. The name is taken from the directory containing the topology file using the following format: topology_NAME_snapshot_DATE_TIME :return: snapshot name :raises ConvertError: when unable to determine the snapshot name """
|
topo_name = os.path.basename(topology_dirname(topo_name))
snap_re = re.compile('^topology_(.+)(_snapshot_)(\d{6}_\d{6})$')
result = snap_re.search(topo_name)
if result is not None:
snap_name = result.group(1) + '_' + result.group(3)
else:
raise ConvertError('Unable to get snapshot name')
return snap_name
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(output_dir, converter, json_topology, snapshot, quiet):
""" Save the converted topology :param str output_dir: Output Directory :param Converter converter: Converter instance :param JSONTopology json_topology: JSON topology layout :param bool snapshot: Is this a snapshot? :param bool quiet: No console printing """
|
try:
old_topology_dir = topology_dirname(converter.topology)
if output_dir:
output_dir = os.path.abspath(output_dir)
else:
output_dir = os.getcwd()
topology_name = json_topology.name
topology_files_dir = os.path.join(output_dir, topology_name + '-files')
if snapshot:
snap_name = snapshot_name(converter.topology)
output_dir = os.path.join(topology_files_dir, 'snapshots',
snap_name)
topology_files_dir = os.path.join(output_dir, topology_name +
'-files')
# Prepare the directory structure
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Move the dynamips config files to the new topology folder
config_err = copy_configs(converter.configs, old_topology_dir,
topology_files_dir)
# Copy any VPCS configurations to the the new topology
copy_vpcs_configs(old_topology_dir, topology_files_dir)
# Copy the topology images to the new topology
copy_topology_image(old_topology_dir, output_dir)
# Copy the instructions to the new topology folder
if not snapshot:
copy_instructions(old_topology_dir, output_dir)
# Move the image files to the new topology folder
image_err = copy_images(converter.images, old_topology_dir,
topology_files_dir)
# Create the vbox working directories
make_vbox_dirs(json_topology.get_vboxes(), output_dir, topology_name)
# Create the qemu working directories
make_qemu_dirs(json_topology.get_qemus(), output_dir, topology_name)
if config_err:
logging.warning('Some router startup configurations could not be '
'found to be copied to the new topology')
if image_err:
logging.warning('Some images could not be found to be copied to '
'the new topology')
filename = '%s.gns3' % topology_name
file_path = os.path.join(output_dir, filename)
with open(file_path, 'w') as file:
json.dump(json_topology.get_topology(), file, indent=4,
sort_keys=True)
if not snapshot and not quiet:
print('Your topology has been converted and can found in:\n'
' %s' % output_dir)
except OSError as error:
logging.error(error)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def copy_configs(configs, source, target):
""" Copy dynamips configs to converted topology :param configs: Configs to copy :param str source: Source topology directory :param str target: Target topology files directory :return: True when a config cannot be found, otherwise false :rtype: bool """
|
config_err = False
if len(configs) > 0:
config_dir = os.path.join(target, 'dynamips', 'configs')
os.makedirs(config_dir)
for config in configs:
old_config_file = os.path.join(source, config['old'])
new_config_file = os.path.join(config_dir,
os.path.basename(config['new']))
if os.path.isfile(old_config_file):
# Copy and rename the config
shutil.copy(old_config_file, new_config_file)
else:
config_err = True
logging.error('Unable to find %s' % config['old'])
return config_err
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def copy_vpcs_configs(source, target):
""" Copy any VPCS configs to the converted topology :param str source: Source topology directory :param str target: Target topology files directory """
|
# Prepare a list of files to copy
vpcs_files = glob.glob(os.path.join(source, 'configs', '*.vpc'))
vpcs_hist = os.path.join(source, 'configs', 'vpcs.hist')
vpcs_config_path = os.path.join(target, 'vpcs', 'multi-host')
if os.path.isfile(vpcs_hist):
vpcs_files.append(vpcs_hist)
# Create the directory tree
if len(vpcs_files) > 0:
os.makedirs(vpcs_config_path)
# Copy the files
for old_file in vpcs_files:
new_file = os.path.join(vpcs_config_path, os.path.basename(old_file))
shutil.copy(old_file, new_file)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def copy_topology_image(source, target):
""" Copy any images of the topology to the converted topology :param str source: Source topology directory :param str target: Target Directory """
|
files = glob.glob(os.path.join(source, '*.png'))
for file in files:
shutil.copy(file, target)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def copy_images(images, source, target):
""" Copy images to converted topology :param images: Images to copy :param source: Old Topology Directory :param target: Target topology files directory :return: True when an image cannot be found, otherwise false :rtype: bool """
|
image_err = False
if len(images) > 0:
images_dir = os.path.join(target, 'images')
os.makedirs(images_dir)
for image in images:
if os.path.isabs(image):
old_image_file = image
else:
old_image_file = os.path.join(source, image)
new_image_file = os.path.join(images_dir,
os.path.basename(image))
if os.path.isfile(os.path.abspath(old_image_file)):
shutil.copy(old_image_file, new_image_file)
else:
image_err = True
logging.error('Unable to find %s' % old_image_file)
return image_err
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_vbox_dirs(max_vbox_id, output_dir, topology_name):
""" Create VirtualBox working directories if required :param int max_vbox_id: Number of directories to create :param str output_dir: Output directory :param str topology_name: Topology name """
|
if max_vbox_id is not None:
for i in range(1, max_vbox_id + 1):
vbox_dir = os.path.join(output_dir, topology_name + '-files',
'vbox', 'vm-%s' % i)
os.makedirs(vbox_dir)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_qemu_dirs(max_qemu_id, output_dir, topology_name):
""" Create Qemu VM working directories if required :param int max_qemu_id: Number of directories to create :param str output_dir: Output directory :param str topology_name: Topology name """
|
if max_qemu_id is not None:
for i in range(1, max_qemu_id + 1):
qemu_dir = os.path.join(output_dir, topology_name + '-files',
'qemu', 'vm-%s' % i)
os.makedirs(qemu_dir)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_wic(self, old_wic, wic):
""" Convert the old style WIC slot to a new style WIC slot and add the WIC to the node properties :param str old_wic: Old WIC slot :param str wic: WIC name """
|
new_wic = 'wic' + old_wic[-1]
self.node['properties'][new_wic] = wic
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_slot_ports(self, slot):
""" Add the ports to be added for a adapter card :param str slot: Slot name """
|
slot_nb = int(slot[4])
# slot_adapter = None
# if slot in self.node['properties']:
# slot_adapter = self.node['properties'][slot]
# elif self.device_info['model'] == 'c7200':
# if self.device_info['npe'] == 'npe-g2':
# slot_adapter = 'C7200-IO-GE-E'
# else:
# slot_adapter = 'C7200-IO-2FE'
slot_adapter = self.node['properties'][slot]
num_ports = ADAPTER_MATRIX[slot_adapter]['ports']
port_type = ADAPTER_MATRIX[slot_adapter]['type']
ports = []
for i in range(num_ports):
port_name = PORT_TYPES[port_type] + '%s/%s' % (slot_nb, i)
port_temp = {'name': port_name,
'id': self.port_id,
'port_number': i,
'slot_number': slot_nb}
ports.append(port_temp)
self.port_id += 1
self.node['ports'].extend(ports)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_info_from_hv(self):
""" Add the information we need from the old hypervisor section """
|
# Router Image
if 'image' in self.hypervisor:
self.node['properties']['image'] = \
os.path.basename(self.hypervisor['image'])
# IDLE-PC
if 'idlepc' in self.hypervisor:
self.node['properties']['idlepc'] = self.hypervisor['idlepc']
# Router RAM
if 'ram' in self.hypervisor:
self.node['properties']['ram'] = self.hypervisor['ram']
# 7200 NPE
if 'npe' in self.hypervisor:
self.device_info['npe'] = self.hypervisor['npe']
# Device Chassis
if 'chassis' in self.hypervisor:
self.device_info['chassis'] = self.hypervisor['chassis']
if self.device_info['model'] == 'c3600':
self.node['properties']['chassis'] = \
self.device_info['chassis']
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_device_items(self, item, device):
""" Add the various items from the device to the node :param str item: item key :param dict device: dictionary containing items """
|
if item in ('aux', 'console'):
self.node['properties'][item] = device[item]
elif item.startswith('slot'):
# if self.device_info['model'] == 'c7200':
# if item != 'slot0':
# self.node['properties'][item] = device[item]
# else:
self.node['properties'][item] = device[item]
elif item == 'connections':
self.connections = device[item]
elif INTERFACE_RE.search(item) or VBQ_INT_RE.search(item):
self.interfaces.append({'from': item,
'to': device[item]})
elif NUMBER_RE.search(item):
if self.device_info['type'] == 'EthernetSwitch':
self.calc_ethsw_port(item, device[item])
elif self.device_info['type'] == 'FrameRelaySwitch':
self.calc_frsw_port(item, device[item])
elif MAPINT_RE.search(item):
self.add_mapping((item, device[item]))
elif item == 'cnfg':
new_config = os.path.join('configs', 'i%s_startup-config.cfg' %
self.node['id'])
self.node['properties']['startup_config'] = new_config
self.config.append({'old': fix_path(device[item]),
'new': new_config})
elif item.startswith('wic'):
self.add_wic(item, device[item])
elif item == 'symbol':
self.set_symbol(device[item])
elif item == 'nics':
self.node['properties']['adapters'] = device[item]
elif item == 'image':
self.node['properties']['vmname'] = device[item]
elif item == 'vbox_id' or item == 'qemu_id':
self.node[item] = device[item]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_to_virtualbox(self):
""" Add additional parameters that were in the VBoxDevice section or not present """
|
# VirtualBox Image
if 'vmname' not in self.node['properties']:
self.node['properties']['vmname'] = \
self.hypervisor['VBoxDevice']['image']
# Number of adapters
if 'adapters' not in self.node['properties']:
self.node['properties']['adapters'] = \
self.hypervisor['VBoxDevice']['nics']
# Console Port
if 'console' not in self.node['properties']:
self.node['properties']['console'] = \
self.base_ports['vbox_console'] + self.node['vbox_id'] - 1
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_to_qemu(self):
""" Add additional parameters to a QemuVM Device that were present in its global conf section """
|
device = self.device_info['ext_conf']
node_prop = self.node['properties']
hv_device = self.hypervisor[device]
# QEMU HDD Images
if 'hda_disk_image' not in node_prop:
if 'image' in hv_device:
node_prop['hda_disk_image'] = hv_device['image']
elif 'image1' in hv_device:
node_prop['hda_disk_image'] = hv_device['image1']
if 'hdb_disk_image' not in node_prop and 'image2' in hv_device:
node_prop['hdb_disk_image'] = hv_device['image2']
# RAM
if 'ram' not in node_prop and 'ram' in hv_device:
node_prop['ram'] = hv_device['ram']
else:
node_prop['ram'] = 256
# QEMU Options
if 'options' not in node_prop and 'options' in hv_device:
node_prop['options'] = hv_device['options']
# Kernel Image
if 'kernel_image' not in node_prop and 'kernel' in hv_device:
node_prop['kernel_image'] = hv_device['kernel']
# Kernel Command Line
if 'kernel_command_line' not in node_prop and \
'kernel_cmdline' in hv_device:
node_prop['kernel_command_line'] = hv_device['kernel_cmdline']
# initrd
if 'initrd' not in node_prop and 'initrd' in hv_device:
node_prop['initrd'] = hv_device['initrd']
# Number of adapters
if 'adapters' not in node_prop and 'nics' in hv_device:
node_prop['adapters'] = hv_device['nics']
elif 'adapters' not in node_prop and 'nics' not in hv_device:
node_prop['adapters'] = 6
# Adapter type
if 'adapter_type' not in node_prop and 'netcard' in hv_device:
node_prop['adapter_type'] = hv_device['netcard']
# Console Port
if 'console' not in node_prop:
node_prop['console'] = self.base_ports['qemu_console'] + \
self.node['qemu_id'] - 1
# Qemu Path
if 'qemu_path' not in node_prop:
qemu_path = self.hypervisor['qemu_path']
# Modify QEMU Path if flavor is specified
if 'flavor' in hv_device:
qemu_path = re.sub(r'qemu-system-.*',
'qemu-system' + hv_device['flavor'],
qemu_path)
node_prop['qemu_path'] = qemu_path
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_vm_ethernet_ports(self):
""" Add ethernet ports to Virtualbox and Qemu nodes """
|
for i in range(self.node['properties']['adapters']):
port = {'id': self.port_id,
'name': 'Ethernet%s' % i,
'port_number': i}
self.node['ports'].append(port)
self.port_id += 1
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_qemu_symbol(self):
""" Set the appropriate symbol for QEMU Devices """
|
valid_devices = {'ASA': 'asa', 'PIX': 'PIX_firewall',
'JUNOS': 'router', 'IDS': 'ids'}
if self.device_info['from'] in valid_devices \
and 'default_symbol' not in self.node \
and 'hover_symbol' not in self.node:
self.set_symbol(valid_devices[self.device_info['from']])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_symbol(self, symbol):
""" Set a symbol for a device :param str symbol: Symbol to use """
|
if symbol == 'EtherSwitch router':
symbol = 'multilayer_switch'
elif symbol == 'Host':
symbol = 'computer'
normal = ':/symbols/%s.normal.svg' % symbol
selected = ':/symbols/%s.selected.svg' % symbol
self.node['default_symbol'] = normal
self.node['hover_symbol'] = selected
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calc_ethsw_port(self, port_num, port_def):
""" Split and create the port entry for an Ethernet Switch :param port_num: port number :type port_num: str or int :param str port_def: port definition """
|
# Port String - access 1 SW2 1
# 0: type 1: vlan 2: destination device 3: destination port
port_def = port_def.split(' ')
if len(port_def) == 4:
destination = {'device': port_def[2],
'port': port_def[3]}
else:
destination = {'device': 'NIO',
'port': port_def[2]}
# port entry
port = {'id': self.port_id,
'name': str(port_num),
'port_number': int(port_num),
'type': port_def[0],
'vlan': int(port_def[1])}
self.node['ports'].append(port)
self.calc_link(self.node['id'], self.port_id, port['name'],
destination)
self.port_id += 1
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calc_mb_ports(self):
""" Add the default ports to add to a router """
|
model = self.device_info['model']
chassis = self.device_info['chassis']
num_ports = MODEL_MATRIX[model][chassis]['ports']
ports = []
if num_ports > 0:
port_type = MODEL_MATRIX[model][chassis]['type']
# Create the ports dict
for i in range(num_ports):
port_temp = {'name': PORT_TYPES[port_type] + '0/' + str(i),
'id': self.port_id,
'port_number': i,
'slot_number': 0}
ports.append(port_temp)
self.port_id += 1
self.node['ports'].extend(ports)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calc_link(self, src_id, src_port, src_port_name, destination):
""" Add a link item for processing later :param int src_id: Source node ID :param int src_port: Source port ID :param str src_port_name: Source port name :param dict destination: Destination """
|
if destination['device'] == 'NIO':
destination['port'] = destination['port'].lower()
link = {'source_node_id': src_id,
'source_port_id': src_port,
'source_port_name': src_port_name,
'source_dev': self.node['properties']['name'],
'dest_dev': destination['device'],
'dest_port': destination['port']}
self.links.append(link)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_description(self):
""" Set the node description """
|
if self.device_info['type'] == 'Router':
self.node['description'] = '%s %s' % (self.device_info['type'],
self.device_info['model'])
else:
self.node['description'] = self.device_info['desc']
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_type(self):
""" Set the node type """
|
if self.device_info['type'] == 'Router':
self.node['type'] = self.device_info['model'].upper()
else:
self.node['type'] = self.device_info['type']
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calc_device_links(self):
""" Calculate a router or VirtualBox link """
|
for connection in self.interfaces:
int_type = connection['from'][0]
int_name = connection['from'].replace(int_type,
PORT_TYPES[int_type.upper()])
# Get the source port id
src_port = None
for port in self.node['ports']:
if int_name == port['name']:
src_port = port['id']
break
dest_temp = connection['to'].split(' ')
if len(dest_temp) == 2:
conn_to = {'device': dest_temp[0],
'port': dest_temp[1]}
else:
conn_to = {'device': 'NIO',
'port': dest_temp[0]}
self.calc_link(self.node['id'], src_port, int_name, conn_to)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calc_cloud_connection(self):
""" Add the ports and nios for a cloud connection :return: None on success or RuntimeError on error """
|
# Connection String - SW1:1:nio_gen_eth:eth0
# 0: Destination device 1: Destination port
# 2: NIO 3: NIO Destination
self.node['properties']['nios'] = []
if self.connections is None:
return None
else:
self.connections = self.connections.split(' ')
for connection in sorted(self.connections):
connection = connection.split(':')
connection_len = len(connection)
if connection_len == 4:
nio = '%s:%s' % (connection[2], connection[3])
elif connection_len == 6:
nio = '%s:%s:%s:%s' % (connection[2].lower(), connection[3],
connection[4], connection[5])
else:
return RuntimeError('Error: Unknown connection string length '
'(Length: %s)' % connection_len)
self.node['properties']['nios'].append(nio)
# port entry
self.node['ports'].append({'id': self.port_id,
'name': nio,
'stub': True})
self.port_id += 1
return None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_mappings(self):
""" Process the mappings for a Frame Relay switch. Removes duplicates and adds the mappings to the node properties """
|
for mapping_a in self.mappings:
for mapping_b in self.mappings:
if mapping_a['source'] == mapping_b['dest']:
self.mappings.remove(mapping_b)
break
self.node['properties']['mappings'] = {}
mappings = self.node['properties']['mappings']
for mapping in self.mappings:
mappings[mapping['source']] = mapping['dest']
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fix_path(path):
""" Fix windows path's. Linux path's will remain unaltered :param str path: The path to be fixed :return: The fixed path :rtype: str """
|
if '\\' in path:
path = path.replace('\\', '/')
path = os.path.normpath(path)
return path
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read_topology(self):
""" Read the ini-style topology file using ConfigObj :return config: Topology parsed by :py:mod:`ConfigObj` :rtype: ConfigObj """
|
configspec = resource_stream(__name__, 'configspec')
try:
handle = open(self._topology)
handle.close()
try:
config = ConfigObj(self._topology,
configspec=configspec,
raise_errors=True,
list_values=False,
encoding='utf-8')
except SyntaxError:
logging.error('Error loading .net file')
sys.exit(1)
except IOError:
logging.error('Cannot open topology file')
sys.exit(1)
vtor = Validator()
res = config.validate(vtor, preserve_errors=True)
if res:
logging.debug('Validation passed')
elif not res:
for entry in flatten_errors(config, res):
# each entry is a tuple
(section_list, key, error) = entry
if key is not None:
section_list.append(key)
else:
section_list.append('[missing section]')
section_string = ', '.join(section_list)
if error is False:
error = 'Missing value or section'
print(section_string, ' = ', error)
input('Press ENTER to continue')
sys.exit(1)
configspec.close()
return config
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_topology(self, old_top):
""" Processes the sections returned by get_instances :param ConfigObj old_top: old topology as processed by :py:meth:`read_topology` :returns: tuple of dicts containing hypervisors, devices and artwork :rtype: tuple """
|
sections = self.get_sections(old_top)
topo = LegacyTopology(sections, old_top)
for instance in sorted(sections):
if instance.startswith('vbox') or instance.startswith('qemu'):
if instance.startswith('qemu') and \
'qemupath' in old_top[instance]:
topo.add_qemu_path(instance)
for device in EXTRA_CONF:
try:
if isinstance(old_top[instance][device], dict):
topo.add_conf_item(instance, device)
old_top[instance].pop(device)
except KeyError:
pass
for item in sorted(old_top[instance]):
if isinstance(old_top[instance][item], dict):
if item in MODEL_TRANSFORM:
# A configuration item (topo.conf)
topo.add_conf_item(instance, item)
elif instance == 'GNS3-DATA' and \
(item.startswith('SHAPE')
or item.startswith('NOTE')
or item.startswith('PIXMAP')):
# Item is an artwork item e.g. shapes and notes from
# GNS3-DATA
topo.add_artwork_item(instance, item)
else:
# It must be a physical item (topo.devices)
topo.add_physical_item(instance, item)
return topo.topology
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_links(self, nodes):
""" Generate a list of links :param list nodes: A list of nodes from :py:meth:`generate_nodes` :return: list of links :rtype: list """
|
new_links = []
for link in self.links:
# Expand port name if required
if INTERFACE_RE.search(link['dest_port'])\
or VBQ_INT_RE.search(link['dest_port']):
int_type = link['dest_port'][0]
dest_port = link['dest_port'].replace(
int_type, PORT_TYPES[int_type.upper()])
else:
dest_port = link['dest_port']
# Convert dest_dev and port to id's
dest_details = self.convert_destination_to_id(
link['dest_dev'], dest_port, nodes)
desc = 'Link from %s port %s to %s port %s' % \
(link['source_dev'], link['source_port_name'],
dest_details['name'], dest_port)
new_links.append({'description': desc,
'destination_node_id': dest_details['id'],
'destination_port_id': dest_details['pid'],
'source_port_id': link['source_port_id'],
'source_node_id': link['source_node_id']})
# Remove duplicate links and add link_id
link_id = 1
for link in new_links:
t_link = str(link['source_node_id']) + ':' + \
str(link['source_port_id'])
for link2 in new_links:
d_link = str(link2['destination_node_id']) + ':' + \
str(link2['destination_port_id'])
if t_link == d_link:
new_links.remove(link2)
break
link['id'] = link_id
link_id += 1
self.add_node_connection(link, nodes)
return new_links
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def device_id_from_name(device_name, nodes):
""" Get the device ID when given a device name :param str device_name: device name :param list nodes: list of nodes from :py:meth:`generate_nodes` :return: device ID :rtype: int """
|
device_id = None
for node in nodes:
if device_name == node['properties']['name']:
device_id = node['id']
break
return device_id
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def port_id_from_name(port_name, device_id, nodes):
""" Get the port ID when given a port name :param str port_name: port name :param str device_id: device ID :param list nodes: list of nodes from :py:meth:`generate_nodes` :return: port ID :rtype: int """
|
port_id = None
for node in nodes:
if device_id == node['id']:
for port in node['ports']:
if port_name == port['name']:
port_id = port['id']
break
break
return port_id
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_destination_to_id(destination_node, destination_port, nodes):
""" Convert a destination to device and port ID :param str destination_node: Destination node name :param str destination_port: Destination port name :param list nodes: list of nodes from :py:meth:`generate_nodes` :return: dict containing device ID, device name and port ID :rtype: dict """
|
device_id = None
device_name = None
port_id = None
if destination_node != 'NIO':
for node in nodes:
if destination_node == node['properties']['name']:
device_id = node['id']
device_name = destination_node
for port in node['ports']:
if destination_port == port['name']:
port_id = port['id']
break
break
else:
for node in nodes:
if node['type'] == 'Cloud':
for port in node['ports']:
if destination_port.lower() == port['name'].lower():
device_id = node['id']
device_name = node['properties']['name']
port_id = port['id']
break
info = {'id': device_id,
'name': device_name,
'pid': port_id}
return info
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_node_name_from_id(node_id, nodes):
""" Get the name of a node when given the node_id :param int node_id: The ID of a node :param list nodes: list of nodes from :py:meth:`generate_nodes` :return: node name :rtype: str """
|
node_name = ''
for node in nodes:
if node['id'] == node_id:
node_name = node['properties']['name']
break
return node_name
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_port_name_from_id(node_id, port_id, nodes):
""" Get the name of a port for a given node and port ID :param int node_id: node ID :param int port_id: port ID :param list nodes: list of nodes from :py:meth:`generate_nodes` :return: port name :rtype: str """
|
port_name = ''
for node in nodes:
if node['id'] == node_id:
for port in node['ports']:
if port['id'] == port_id:
port_name = port['name']
break
return port_name
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_node_connection(self, link, nodes):
""" Add a connection to a node :param dict link: link definition :param list nodes: list of nodes from :py:meth:`generate_nodes` """
|
# Description
src_desc = 'connected to %s on port %s' % \
(self.get_node_name_from_id(link['destination_node_id'],
nodes),
self.get_port_name_from_id(link['destination_node_id'],
link['destination_port_id'],
nodes))
dest_desc = 'connected to %s on port %s' % \
(self.get_node_name_from_id(link['source_node_id'],
nodes),
self.get_port_name_from_id(link['source_node_id'],
link['source_port_id'],
nodes))
# Add source connections
for node in nodes:
if node['id'] == link['source_node_id']:
for port in node['ports']:
if port['id'] == link['source_port_id']:
port['link_id'] = link['id']
port['description'] = src_desc
break
elif node['id'] == link['destination_node_id']:
for port in node['ports']:
if port['id'] == link['destination_port_id']:
port['link_id'] = link['id']
port['description'] = dest_desc
break
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.