text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_directory(request): """Get API directory as a nested list of lists."""
def get_url(url): return reverse(url, request=request) if url else url def is_active_url(path, url): return path.startswith(url) if url and path else False path = request.path directory_list = [] def sort_key(r): return r[0] # TODO(ant): support arbitrarily nested # structure, for now it is capped at a single level # for UX reasons for group_name, endpoints in sorted( six.iteritems(directory), key=sort_key ): endpoints_list = [] for endpoint_name, endpoint in sorted( six.iteritems(endpoints), key=sort_key ): if endpoint_name[:1] == '_': continue endpoint_url = get_url(endpoint.get('_url', None)) active = is_active_url(path, endpoint_url) endpoints_list.append( (endpoint_name, endpoint_url, [], active) ) url = get_url(endpoints.get('_url', None)) active = is_active_url(path, url) directory_list.append( (group_name, url, endpoints_list, active) ) return directory_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_api_root_view(self, **kwargs): """Return API root view, using the global directory."""
class API(views.APIView): _ignore_model_permissions = True def get(self, request, *args, **kwargs): directory_list = get_directory(request) result = OrderedDict() for group_name, url, endpoints, _ in directory_list: if url: result[group_name] = url else: group = OrderedDict() for endpoint_name, url, _, _ in endpoints: group[endpoint_name] = url result[group_name] = group return Response(result) return API.as_view()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register(self, prefix, viewset, base_name=None): """Add any registered route into a global API directory. If the prefix includes a path separator, store the URL in the directory under the first path segment. Otherwise, store it as-is. For example, if there are two registered prefixes, 'v1/users' and 'groups', `directory` will look liks: { 'v1': { 'users': { '_url': 'users-list' '_viewset': <class 'UserViewSet'> }, } 'groups': { '_url': 'groups-list' '_viewset': <class 'GroupViewSet'> } } """
if base_name is None: base_name = prefix super(DynamicRouter, self).register(prefix, viewset, base_name) prefix_parts = prefix.split('/') if len(prefix_parts) > 1: prefix = prefix_parts[0] endpoint = '/'.join(prefix_parts[1:]) else: endpoint = prefix prefix = None if prefix and prefix not in directory: current = directory[prefix] = {} else: current = directory.get(prefix, directory) list_name = self.routes[0].name url_name = list_name.format(basename=base_name) if endpoint not in current: current[endpoint] = {} current[endpoint]['_url'] = url_name current[endpoint]['_viewset'] = viewset
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_resource(self, viewset, namespace=None): """ Register a viewset that should be considered the canonical endpoint for a particular resource. In addition to generating and registering the route, it adds the route in a reverse map to allow DREST to build the canonical URL for a given resource. Arguments: viewset - viewset class, should have `serializer_class` attr. namespace - (optional) URL namespace, e.g. 'v3'. """
# Try to extract resource name from viewset. try: serializer = viewset.serializer_class() resource_key = serializer.get_resource_key() resource_name = serializer.get_name() path_name = serializer.get_plural_name() except: import traceback traceback.print_exc() raise Exception( "Failed to extract resource name from viewset: '%s'." " It, or its serializer, may not be DREST-compatible." % ( viewset ) ) # Construct canonical path and register it. if namespace: namespace = namespace.rstrip('/') + '/' base_path = namespace or '' base_path = r'%s' % base_path + path_name self.register(base_path, viewset) # Make sure resource isn't already registered. if resource_key in resource_map: raise Exception( "The resource '%s' has already been mapped to '%s'." " Each resource can only be mapped to one canonical" " path. " % ( resource_key, resource_map[resource_key]['path'] ) ) # Register resource in reverse map. resource_map[resource_key] = { 'path': base_path, 'viewset': viewset } # Make sure the resource name isn't registered, either # TODO: Think of a better way to clean this up, there's a lot of # duplicated effort here, between `resource_name` and `resource_key` # This resource name -> key mapping is currently only used by # the DynamicGenericRelationField if resource_name in resource_name_map: resource_key = resource_name_map[resource_name] raise Exception( "The resource name '%s' has already been mapped to '%s'." " A resource name can only be used once." % ( resource_name, resource_map[resource_key]['path'] ) ) # map the resource name to the resource key for easier lookup resource_name_map[resource_name] = resource_key
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_canonical_path(resource_key, pk=None): """ Return canonical resource path. Arguments: resource_key - Canonical resource key i.e. Serializer.get_resource_key(). pk - (Optional) Object's primary key for a single-resource URL. Returns: Absolute URL as string. """
if resource_key not in resource_map: # Note: Maybe raise? return None base_path = get_script_prefix() + resource_map[resource_key]['path'] if pk: return '%s/%s/' % (base_path, pk) else: return base_path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_canonical_serializer( resource_key, model=None, instance=None, resource_name=None ): """ Return canonical serializer for a given resource name. Arguments: resource_key - Resource key, usually DB table for model-based resources, otherwise the plural name. model - (Optional) Model class to look up by. instance - (Optional) Model object instance. Returns: serializer class """
if model: resource_key = get_model_table(model) elif instance: resource_key = instance._meta.db_table elif resource_name: resource_key = resource_name_map[resource_name] if resource_key not in resource_map: return None return resource_map[resource_key]['viewset'].serializer_class
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_relation_routes(self, viewset): """ Generate routes to serve relational objects. This method will add a sub-URL for each relational field. e.g. A viewset for the following serializer: class UserSerializer(..): events = DynamicRelationField(EventSerializer, many=True) groups = DynamicRelationField(GroupSerializer, many=True) location = DynamicRelationField(LocationSerializer) will have the following URLs added: /users/<pk>/events/ /users/<pk>/groups/ /users/<pk>/location/ """
routes = [] if not hasattr(viewset, 'serializer_class'): return routes if not hasattr(viewset, 'list_related'): return routes serializer = viewset.serializer_class() fields = getattr(serializer, 'get_link_fields', lambda: [])() route_name = '{basename}-{methodnamehyphen}' for field_name, field in six.iteritems(fields): methodname = 'list_related' url = ( r'^{prefix}/{lookup}/(?P<field_name>%s)' '{trailing_slash}$' % field_name ) routes.append(Route( url=url, mapping={'get': methodname}, name=replace_methodname(route_name, field_name), initkwargs={} )) return routes
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_paths(self): """Get all paths from the root to the leaves. For example, given a chain like `{'a':{'b':{'c':None}}}`, this method would return `[['a', 'b', 'c']]`. Returns: A list of lists of paths. """
paths = [] for key, child in six.iteritems(self): if isinstance(child, TreeMap) and child: # current child is an intermediate node for path in child.get_paths(): path.insert(0, key) paths.append(path) else: # current child is an endpoint paths.append([key]) return paths
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insert(self, parts, leaf_value, update=False): """Add a list of nodes into the tree. The list will be converted into a TreeMap (chain) and then merged with the current TreeMap. For example, this method would insert `['a','b','c']` as `{'a':{'b':{'c':{}}}}`. Arguments: parts: List of nodes representing a chain. leaf_value: Value to insert into the leaf of the chain. update: Whether or not to update the leaf with the given value or to replace the value. Returns: self """
tree = self if not parts: return tree cur = tree last = len(parts) - 1 for i, part in enumerate(parts): if part not in cur: cur[part] = TreeMap() if i != last else leaf_value elif i == last: # found leaf if update: cur[part].update(leaf_value) else: cur[part] = leaf_value cur = cur[part] return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tag_dict(obj, *args, **kwargs): """Create a TaggedDict instance. Will either be a TaggedOrderedDict or TaggedPlainDict depending on the type of `obj`."""
if isinstance(obj, OrderedDict): return _TaggedOrderedDict(obj, *args, **kwargs) else: return _TaggedPlainDict(obj, *args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def has_joins(queryset): """Return True iff. a queryset includes joins. If this is the case, it is possible for the queryset to return duplicate results. """
for join in six.itervalues(queryset.query.alias_map): if join.join_type: return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_query_key(self, serializer): """Get the key that can be passed to Django's filter method. To account for serialier field name rewrites, this method translates serializer field names to model field names by inspecting `serializer`. For example, a query like `filter{users.events}` would be returned as `users__events`. Arguments: serializer: A DRF serializer Returns: A filter key. """
rewritten = [] last = len(self.field) - 1 s = serializer field = None for i, field_name in enumerate(self.field): # Note: .fields can be empty for related serializers that aren't # sideloaded. Fields that are deferred also won't be present. # If field name isn't in serializer.fields, get full list from # get_all_fields() method. This is somewhat expensive, so only do # this if we have to. fields = s.fields if field_name not in fields: fields = getattr(s, 'get_all_fields', lambda: {})() if field_name == 'pk': rewritten.append('pk') continue if field_name not in fields: raise ValidationError( "Invalid filter field: %s" % field_name ) field = fields[field_name] # For remote fields, strip off '_set' for filtering. This is a # weird Django inconsistency. model_field_name = field.source or field_name model_field = get_model_field(s.get_model(), model_field_name) if isinstance(model_field, RelatedObject): model_field_name = model_field.field.related_query_name() # If get_all_fields() was used above, field could be unbound, # and field.source would be None rewritten.append(model_field_name) if i == last: break # Recurse into nested field s = getattr(field, 'serializer', None) if isinstance(s, serializers.ListSerializer): s = s.child if not s: raise ValidationError( "Invalid nested filter field: %s" % field_name ) if self.operator: rewritten.append(self.operator) return ('__'.join(rewritten), field)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter_queryset(self, request, queryset, view): """Filter the queryset. This is the main entry-point to this class, and is called by DRF's list handler. """
self.request = request self.view = view # enable addition of extra filters (i.e., a Q()) # so custom filters can be added to the queryset without # running into https://code.djangoproject.com/ticket/18437 # which, without this, would mean that filters added to the queryset # after this is called may not behave as expected extra_filters = self.view.get_extra_filters(request) disable_prefetches = self.view.is_update() self.DEBUG = settings.DEBUG return self._build_queryset( queryset=queryset, extra_filters=extra_filters, disable_prefetches=disable_prefetches, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_implicit_prefetches( self, model, prefetches, requirements ): """Build a prefetch dictionary based on internal requirements."""
for source, remainder in six.iteritems(requirements): if not remainder or isinstance(remainder, six.string_types): # no further requirements to prefetch continue related_field = get_model_field(model, source) related_model = get_related_model(related_field) queryset = self._build_implicit_queryset( related_model, remainder ) if related_model else None prefetches[source] = self._create_prefetch( source, queryset ) return prefetches
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_implicit_queryset(self, model, requirements): """Build a queryset based on implicit requirements."""
queryset = self._make_model_queryset(model) prefetches = {} self._build_implicit_prefetches( model, prefetches, requirements ) prefetch = prefetches.values() queryset = queryset.prefetch_related(*prefetch).distinct() if self.DEBUG: queryset._using_prefetches = prefetches return queryset
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_requested_prefetches( self, prefetches, requirements, model, fields, filters ): """Build a prefetch dictionary based on request requirements."""
for name, field in six.iteritems(fields): original_field = field if isinstance(field, DynamicRelationField): field = field.serializer if isinstance(field, serializers.ListSerializer): field = field.child if not isinstance(field, serializers.ModelSerializer): continue source = field.source or name if '.' in source: raise ValidationError( 'nested relationship values ' 'are not supported' ) if source in prefetches: # ignore duplicated sources continue is_remote = is_field_remote(model, source) is_id_only = getattr(field, 'id_only', lambda: False)() if is_id_only and not is_remote: continue related_queryset = getattr(original_field, 'queryset', None) if callable(related_queryset): related_queryset = related_queryset(field) source = field.source or name # Popping the source here (during explicit prefetch construction) # guarantees that implicitly required prefetches that follow will # not conflict. required = requirements.pop(source, None) prefetch_queryset = self._build_queryset( serializer=field, filters=filters.get(name, {}), queryset=related_queryset, requirements=required ) # Note: There can only be one prefetch per source, even # though there can be multiple fields pointing to # the same source. This could break in some cases, # but is mostly an issue on writes when we use all # fields by default. prefetches[source] = self._create_prefetch( source, prefetch_queryset ) return prefetches
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_implicit_requirements( self, fields, requirements ): """Extract internal prefetch requirements from serializer fields."""
for name, field in six.iteritems(fields): source = field.source # Requires may be manually set on the field -- if not, # assume the field requires only its source. requires = getattr(field, 'requires', None) or [source] for require in requires: if not require: # ignore fields with empty source continue requirement = require.split('.') if requirement[-1] == '': # Change 'a.b.' -> 'a.b.*', # supporting 'a.b.' for backwards compatibility. requirement[-1] = '*' requirements.insert(requirement, TreeMap(), update=True)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _build_queryset( self, serializer=None, filters=None, queryset=None, requirements=None, extra_filters=None, disable_prefetches=False, ): """Build a queryset that pulls in all data required by this request. Handles nested prefetching of related data and deferring fields at the queryset level. Arguments: serializer: An optional serializer to use a base for the queryset. If no serializer is passed, the `get_serializer` method will be used to initialize the base serializer for the viewset. filters: An optional TreeMap of nested filters. queryset: An optional base queryset. requirements: An optional TreeMap of nested requirements. """
is_root_level = False if not serializer: serializer = self.view.get_serializer() is_root_level = True queryset = self._get_queryset(queryset=queryset, serializer=serializer) model = getattr(serializer.Meta, 'model', None) if not model: return queryset prefetches = {} # build a nested Prefetch queryset # based on request parameters and serializer fields fields = serializer.fields if requirements is None: requirements = TreeMap() self._get_implicit_requirements( fields, requirements ) if filters is None: filters = self._get_requested_filters() # build nested Prefetch queryset self._build_requested_prefetches( prefetches, requirements, model, fields, filters ) # build remaining prefetches out of internal requirements # that are not already covered by request requirements self._build_implicit_prefetches( model, prefetches, requirements ) # use requirements at this level to limit fields selected # only do this for GET requests where we are not requesting the # entire fieldset if ( '*' not in requirements and not self.view.is_update() and not self.view.is_delete() ): id_fields = getattr(serializer, 'get_id_fields', lambda: [])() # only include local model fields only = [ field for field in set( id_fields + list(requirements.keys()) ) if is_model_field(model, field) and not is_field_remote(model, field) ] queryset = queryset.only(*only) # add request filters query = self._filters_to_query( includes=filters.get('_include'), excludes=filters.get('_exclude'), serializer=serializer ) # add additional filters specified by calling view if extra_filters: query = extra_filters if not query else extra_filters & query if query: # Convert internal django ValidationError to # APIException-based one in order to resolve validation error # from 500 status code to 400. try: queryset = queryset.filter(query) except InternalValidationError as e: raise ValidationError( dict(e) if hasattr(e, 'error_dict') else list(e) ) except Exception as e: # Some other Django error in parsing the filter. # Very likely a bad query, so throw a ValidationError. err_msg = getattr(e, 'message', '') raise ValidationError(err_msg) # A serializer can have this optional function # to dynamically apply additional filters on # any queries that will use that serializer # You could use this to have (for example) different # serializers for different subsets of a model or to # implement permissions which work even in sideloads if hasattr(serializer, 'filter_queryset'): queryset = self._serializer_filter( serializer=serializer, queryset=queryset ) # add prefetches and remove duplicates if necessary prefetch = prefetches.values() if prefetch and not disable_prefetches: queryset = queryset.prefetch_related(*prefetch) elif isinstance(queryset, Manager): queryset = queryset.all() if has_joins(queryset) or not is_root_level: queryset = queryset.distinct() if self.DEBUG: queryset._using_prefetches = prefetches return queryset
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter_queryset(self, request, queryset, view): """"Filter the queryset, applying the ordering. The `ordering_param` can be overwritten here. In DRF, the ordering_param is 'ordering', but we support changing it to allow the viewset to control the parameter. """
self.ordering_param = view.SORT ordering = self.get_ordering(request, queryset, view) if ordering: return queryset.order_by(*ordering) return queryset
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_ordering(self, request, queryset, view): """Return an ordering for a given request. DRF expects a comma separated list, while DREST expects an array. This method overwrites the DRF default so it can parse the array. """
params = view.get_request_feature(view.SORT) if params: fields = [param.strip() for param in params] valid_ordering, invalid_ordering = self.remove_invalid_fields( queryset, fields, view ) # if any of the sort fields are invalid, throw an error. # else return the ordering if invalid_ordering: raise ValidationError( "Invalid filter field: %s" % invalid_ordering ) else: return valid_ordering # No sorting was included return self.get_default_ordering(view)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_invalid_fields(self, queryset, fields, view): """Remove invalid fields from an ordering. Overwrites the DRF default remove_invalid_fields method to return both the valid orderings and any invalid orderings. """
valid_orderings = [] invalid_orderings = [] # for each field sent down from the query param, # determine if its valid or invalid for term in fields: stripped_term = term.lstrip('-') # add back the '-' add the end if necessary reverse_sort_term = '' if len(stripped_term) is len(term) else '-' ordering = self.ordering_for(stripped_term, view) if ordering: valid_orderings.append(reverse_sort_term + ordering) else: invalid_orderings.append(term) return valid_orderings, invalid_orderings
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def combine(line, left, intersect, right): """Zip borders between items in `line`. e.g. ('l', '1', 'c', '2', 'c', '3', 'r') :param iter line: List to iterate. :param left: Left border. :param intersect: Column separator. :param right: Right border. :return: Yields combined objects. """
# Yield left border. if left: yield left # Yield items with intersect characters. if intersect: try: for j, i in enumerate(line, start=-len(line) + 1): yield i if j: yield intersect except TypeError: # Generator. try: item = next(line) except StopIteration: # Was empty all along. pass else: while True: yield item try: peek = next(line) except StopIteration: break yield intersect item = peek else: for i in line: yield i # Yield right border. if right: yield right
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def build_row(row, left, center, right): """Combine single or multi-lined cells into a single row of list of lists including borders. Row must already be padded and extended so each cell has the same number of lines. Example return value: [ ['>', 'Left ', '|', 'Center', '|', 'Right', '<'], ['>', 'Cell1', '|', 'Cell2 ', '|', 'Cell3', '<'], ] :param iter row: List of cells for one row. :param str left: Left border. :param str center: Column separator. :param str right: Right border. :return: Yields other generators that yield strings. :rtype: iter """
if not row or not row[0]: yield combine((), left, center, right) return for row_index in range(len(row[0])): yield combine((c[row_index] for c in row), left, center, right)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def run(cls): """Check variables."""
project = __import__(IMPORT, fromlist=['']) for expected, var in [('@Robpol86', '__author__'), (LICENSE, '__license__'), (VERSION, '__version__')]: if getattr(project, var) != expected: raise SystemExit('Mismatch: {0}'.format(var)) # Check changelog. if not re.compile(r'^%s - \d{4}-\d{2}-\d{2}[\r\n]' % VERSION, re.MULTILINE).search(readme()): raise SystemExit('Version not found in readme/changelog file.') # Check tox. if INSTALL_REQUIRES: contents = readme('tox.ini') section = re.compile(r'[\r\n]+install_requires =[\r\n]+(.+?)[\r\n]+\w', re.DOTALL).findall(contents) if not section: raise SystemExit('Missing install_requires section in tox.ini.') in_tox = re.findall(r' ([^=]+)==[\w\d.-]+', section[0]) if INSTALL_REQUIRES != in_tox: raise SystemExit('Missing/unordered pinned dependencies in tox.ini.')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def terminal_size(kernel32=None): """Get the width and height of the terminal. http://code.activestate.com/recipes/440694-determine-size-of-console-window-on-windows/ http://stackoverflow.com/questions/17993814/why-the-irrelevant-code-made-a-difference :param kernel32: Optional mock kernel32 object. For testing. :return: Width (number of characters) and height (number of lines) of the terminal. :rtype: tuple """
if IS_WINDOWS: kernel32 = kernel32 or ctypes.windll.kernel32 try: return get_console_info(kernel32, kernel32.GetStdHandle(STD_ERROR_HANDLE)) except OSError: try: return get_console_info(kernel32, kernel32.GetStdHandle(STD_OUTPUT_HANDLE)) except OSError: return DEFAULT_WIDTH, DEFAULT_HEIGHT try: device = __import__('fcntl').ioctl(0, __import__('termios').TIOCGWINSZ, '\0\0\0\0\0\0\0\0') except IOError: return DEFAULT_WIDTH, DEFAULT_HEIGHT height, width = struct.unpack('hhhh', device)[:2] return width, height
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_terminal_title(title, kernel32=None): """Set the terminal title. :param title: The title to set (string, unicode, bytes accepted). :param kernel32: Optional mock kernel32 object. For testing. :return: If title changed successfully (Windows only, always True on Linux/OSX). :rtype: bool """
try: title_bytes = title.encode('utf-8') except AttributeError: title_bytes = title if IS_WINDOWS: kernel32 = kernel32 or ctypes.windll.kernel32 try: is_ascii = all(ord(c) < 128 for c in title) # str/unicode. except TypeError: is_ascii = all(c < 128 for c in title) # bytes. if is_ascii: return kernel32.SetConsoleTitleA(title_bytes) != 0 else: return kernel32.SetConsoleTitleW(title) != 0 # Linux/OSX. sys.stdout.write(b'\033]0;' + title_bytes + b'\007') return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def table_abcd(): """Return table string to be printed. Two tables on one line."""
table_instance = SingleTable([['A', 'B'], ['C', 'D']]) # Get first table lines. table_instance.outer_border = False table_inner_borders = table_instance.table.splitlines() # Get second table lines. table_instance.outer_border = True table_instance.inner_heading_row_border = False table_instance.inner_column_border = False table_outer_borders = table_instance.table.splitlines() # Combine. smallest, largest = sorted([table_inner_borders, table_outer_borders], key=len) smallest += [''] * (len(largest) - len(smallest)) # Make both same size. combined = list() for i, row in enumerate(largest): combined.append(row.ljust(10) + ' ' + smallest[i]) return '\n'.join(combined)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def column_max_width(self, column_number): """Return the maximum width of a column based on the current terminal width. :param int column_number: The column number to query. :return: The max width of the column. :rtype: int """
inner_widths = max_dimensions(self.table_data)[0] outer_border = 2 if self.outer_border else 0 inner_border = 1 if self.inner_column_border else 0 padding = self.padding_left + self.padding_right return column_max_width(inner_widths, column_number, outer_border, inner_border, padding)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def table_width(self): """Return the width of the table including padding and borders."""
outer_widths = max_dimensions(self.table_data, self.padding_left, self.padding_right)[2] outer_border = 2 if self.outer_border else 0 inner_border = 1 if self.inner_column_border else 0 return table_width(outer_widths, outer_border, inner_border)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def horizontal_border(self, _, outer_widths): """Handle the GitHub heading border. E.g.: |:---|:---:|---:|----| :param _: Unused. :param iter outer_widths: List of widths (with padding) for each column. :return: Prepared border strings in a generator. :rtype: iter """
horizontal = str(self.CHAR_INNER_HORIZONTAL) left = self.CHAR_OUTER_LEFT_VERTICAL intersect = self.CHAR_INNER_VERTICAL right = self.CHAR_OUTER_RIGHT_VERTICAL columns = list() for i, width in enumerate(outer_widths): justify = self.justify_columns.get(i) width = max(3, width) # Width should be at least 3 so justification can be applied. if justify == 'left': columns.append(':' + horizontal * (width - 1)) elif justify == 'right': columns.append(horizontal * (width - 1) + ':') elif justify == 'center': columns.append(':' + horizontal * (width - 2) + ':') else: columns.append(horizontal * width) return combine(columns, left, intersect, right)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def visible_width(string): """Get the visible width of a unicode string. Some CJK unicode characters are more than one byte unlike ASCII and latin unicode characters. From: https://github.com/Robpol86/terminaltables/pull/9 :param str string: String to measure. :return: String's width. :rtype: int """
if '\033' in string: string = RE_COLOR_ANSI.sub('', string) # Convert to unicode. try: string = string.decode('u8') except (AttributeError, UnicodeEncodeError): pass width = 0 for char in string: if unicodedata.east_asian_width(char) in ('F', 'W'): width += 2 else: width += 1 return width
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def align_and_pad_cell(string, align, inner_dimensions, padding, space=' '): """Align a string horizontally and vertically. Also add additional padding in both dimensions. :param str string: Input string to operate on. :param tuple align: Tuple that contains one of left/center/right and/or top/middle/bottom. :param tuple inner_dimensions: Width and height ints to expand string to without padding. :param iter padding: Number of space chars for left, right, top, and bottom (4 ints). :param str space: Character to use as white space for resizing/padding (use single visible chars only). :return: Padded cell split into lines. :rtype: list """
if not hasattr(string, 'splitlines'): string = str(string) # Handle trailing newlines or empty strings, str.splitlines() does not satisfy. lines = string.splitlines() or [''] if string.endswith('\n'): lines.append('') # Vertically align and pad. if 'bottom' in align: lines = ([''] * (inner_dimensions[1] - len(lines) + padding[2])) + lines + ([''] * padding[3]) elif 'middle' in align: delta = inner_dimensions[1] - len(lines) lines = ([''] * (delta // 2 + delta % 2 + padding[2])) + lines + ([''] * (delta // 2 + padding[3])) else: lines = ([''] * padding[2]) + lines + ([''] * (inner_dimensions[1] - len(lines) + padding[3])) # Horizontally align and pad. for i, line in enumerate(lines): new_width = inner_dimensions[0] + len(line) - visible_width(line) if 'right' in align: lines[i] = line.rjust(padding[0] + new_width, space) + (space * padding[1]) elif 'center' in align: lines[i] = (space * padding[0]) + line.center(new_width, space) + (space * padding[1]) else: lines[i] = (space * padding[0]) + line.ljust(new_width + padding[1], space) return lines
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def max_dimensions(table_data, padding_left=0, padding_right=0, padding_top=0, padding_bottom=0): """Get maximum widths of each column and maximum height of each row. :param iter table_data: List of list of strings (unmodified table data). :param int padding_left: Number of space chars on left side of cell. :param int padding_right: Number of space chars on right side of cell. :param int padding_top: Number of empty lines on top side of cell. :param int padding_bottom: Number of empty lines on bottom side of cell. :return: 4-item tuple of n-item lists. Inner column widths and row heights, outer column widths and row heights. :rtype: tuple """
inner_widths = [0] * (max(len(r) for r in table_data) if table_data else 0) inner_heights = [0] * len(table_data) # Find max width and heights. for j, row in enumerate(table_data): for i, cell in enumerate(row): if not hasattr(cell, 'count') or not hasattr(cell, 'splitlines'): cell = str(cell) if not cell: continue inner_heights[j] = max(inner_heights[j], cell.count('\n') + 1) inner_widths[i] = max(inner_widths[i], *[visible_width(l) for l in cell.splitlines()]) # Calculate with padding. outer_widths = [padding_left + i + padding_right for i in inner_widths] outer_heights = [padding_top + i + padding_bottom for i in inner_heights] return inner_widths, inner_heights, outer_widths, outer_heights
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def column_max_width(inner_widths, column_number, outer_border, inner_border, padding): """Determine the maximum width of a column based on the current terminal width. :param iter inner_widths: List of widths (no padding) for each column. :param int column_number: The column number to query. :param int outer_border: Sum of left and right outer border visible widths. :param int inner_border: Visible width of the inner border character. :param int padding: Total padding per cell (left + right padding). :return: The maximum width the column can be without causing line wrapping. """
column_count = len(inner_widths) terminal_width = terminal_size()[0] # Count how much space padding, outer, and inner borders take up. non_data_space = outer_border non_data_space += inner_border * (column_count - 1) non_data_space += column_count * padding # Exclude selected column's width. data_space = sum(inner_widths) - inner_widths[column_number] return terminal_width - data_space - non_data_space
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def table_width(outer_widths, outer_border, inner_border): """Determine the width of the entire table including borders and padding. :param iter outer_widths: List of widths (with padding) for each column. :param int outer_border: Sum of left and right outer border visible widths. :param int inner_border: Visible width of the inner border character. :return: The width of the table. :rtype: int """
column_count = len(outer_widths) # Count how much space outer and inner borders take up. non_data_space = outer_border if column_count: non_data_space += inner_border * (column_count - 1) # Space of all columns and their padding. data_space = sum(outer_widths) return data_space + non_data_space
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def horizontal_border(self, style, outer_widths): """Build any kind of horizontal border for the table. :param str style: Type of border to return. :param iter outer_widths: List of widths (with padding) for each column. :return: Prepared border as a tuple of strings. :rtype: tuple """
if style == 'top': horizontal = self.CHAR_OUTER_TOP_HORIZONTAL left = self.CHAR_OUTER_TOP_LEFT intersect = self.CHAR_OUTER_TOP_INTERSECT if self.inner_column_border else '' right = self.CHAR_OUTER_TOP_RIGHT title = self.title elif style == 'bottom': horizontal = self.CHAR_OUTER_BOTTOM_HORIZONTAL left = self.CHAR_OUTER_BOTTOM_LEFT intersect = self.CHAR_OUTER_BOTTOM_INTERSECT if self.inner_column_border else '' right = self.CHAR_OUTER_BOTTOM_RIGHT title = None elif style == 'heading': horizontal = self.CHAR_H_INNER_HORIZONTAL left = self.CHAR_H_OUTER_LEFT_INTERSECT if self.outer_border else '' intersect = self.CHAR_H_INNER_INTERSECT if self.inner_column_border else '' right = self.CHAR_H_OUTER_RIGHT_INTERSECT if self.outer_border else '' title = None elif style == 'footing': horizontal = self.CHAR_F_INNER_HORIZONTAL left = self.CHAR_F_OUTER_LEFT_INTERSECT if self.outer_border else '' intersect = self.CHAR_F_INNER_INTERSECT if self.inner_column_border else '' right = self.CHAR_F_OUTER_RIGHT_INTERSECT if self.outer_border else '' title = None else: horizontal = self.CHAR_INNER_HORIZONTAL left = self.CHAR_OUTER_LEFT_INTERSECT if self.outer_border else '' intersect = self.CHAR_INNER_INTERSECT if self.inner_column_border else '' right = self.CHAR_OUTER_RIGHT_INTERSECT if self.outer_border else '' title = None return build_border(outer_widths, horizontal, left, intersect, right, title)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gen_row_lines(self, row, style, inner_widths, height): r"""Combine cells in row and group them into lines with vertical borders. Caller is expected to pass yielded lines to ''.join() to combine them into a printable line. Caller must append newline character to the end of joined line. In: ['Row One Column One', 'Two', 'Three'] Out: [ ('|', ' Row One Column One ', '|', ' Two ', '|', ' Three ', '|'), ] In: ['Row One\nColumn One', 'Two', 'Three'], Out: [ ('|', ' Row One ', '|', ' Two ', '|', ' Three ', '|'), ('|', ' Column One ', '|', ' ', '|', ' ', '|'), ] :param iter row: One row in the table. List of cells. :param str style: Type of border characters to use. :param iter inner_widths: List of widths (no padding) for each column. :param int height: Inner height (no padding) (number of lines) to expand row to. :return: Yields lines split into components in a list. Caller must ''.join() line. """
cells_in_row = list() # Resize row if it doesn't have enough cells. if len(row) != len(inner_widths): row = row + [''] * (len(inner_widths) - len(row)) # Pad and align each cell. Split each cell into lines to support multi-line cells. for i, cell in enumerate(row): align = (self.justify_columns.get(i),) inner_dimensions = (inner_widths[i], height) padding = (self.padding_left, self.padding_right, 0, 0) cells_in_row.append(align_and_pad_cell(cell, align, inner_dimensions, padding)) # Determine border characters. if style == 'heading': left = self.CHAR_H_OUTER_LEFT_VERTICAL if self.outer_border else '' center = self.CHAR_H_INNER_VERTICAL if self.inner_column_border else '' right = self.CHAR_H_OUTER_RIGHT_VERTICAL if self.outer_border else '' elif style == 'footing': left = self.CHAR_F_OUTER_LEFT_VERTICAL if self.outer_border else '' center = self.CHAR_F_INNER_VERTICAL if self.inner_column_border else '' right = self.CHAR_F_OUTER_RIGHT_VERTICAL if self.outer_border else '' else: left = self.CHAR_OUTER_LEFT_VERTICAL if self.outer_border else '' center = self.CHAR_INNER_VERTICAL if self.inner_column_border else '' right = self.CHAR_OUTER_RIGHT_VERTICAL if self.outer_border else '' # Yield each line. for line in build_row(cells_in_row, left, center, right): yield line
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def simple_atmo_opstring(haze, contrast, bias): """Make a simple atmospheric correction formula."""
gamma_b = 1 - haze gamma_g = 1 - (haze / 3.0) ops = ( "gamma g {gamma_g}, " "gamma b {gamma_b}, " "sigmoidal rgb {contrast} {bias}" ).format(gamma_g=gamma_g, gamma_b=gamma_b, contrast=contrast, bias=bias) return ops
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _op_factory(func, kwargs, opname, bands, rgb_op=False): """create an operation function closure don't call directly, use parse_operations returns a function which itself takes and returns ndarrays """
def f(arr): # Avoid mutation by copying newarr = arr.copy() if rgb_op: # apply func to array's first 3 bands, assumed r,g,b # additional band(s) are untouched newarr[0:3] = func(newarr[0:3], **kwargs) else: # apply func to array band at a time for b in bands: newarr[b - 1] = func(arr[b - 1], **kwargs) return newarr f.__name__ = str(opname) return f
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_operations(ops_string): """Takes a string of operations written with a handy DSL "OPERATION-NAME BANDS ARG1 ARG2 OPERATION-NAME BANDS ARG" And returns a list of functions, each of which take and return ndarrays """
band_lookup = {"r": 1, "g": 2, "b": 3} count = len(band_lookup) opfuncs = {"saturation": saturation, "sigmoidal": sigmoidal, "gamma": gamma} opkwargs = { "saturation": ("proportion",), "sigmoidal": ("contrast", "bias"), "gamma": ("g",), } # Operations that assume RGB colorspace rgb_ops = ("saturation",) # split into tokens, commas are optional whitespace tokens = [x.strip() for x in ops_string.replace(",", "").split(" ")] operations = [] current = [] for token in tokens: if token.lower() in opfuncs.keys(): if len(current) > 0: operations.append(current) current = [] current.append(token.lower()) if len(current) > 0: operations.append(current) result = [] for parts in operations: opname = parts[0] bandstr = parts[1] args = parts[2:] try: func = opfuncs[opname] except KeyError: raise ValueError("{} is not a valid operation".format(opname)) if opname in rgb_ops: # ignore bands, assumed to be in rgb # push 2nd arg into args args = [bandstr] + args bands = (1, 2, 3) else: # 2nd arg is bands # parse r,g,b ~= 1,2,3 bands = set() for bs in bandstr: try: band = int(bs) except ValueError: band = band_lookup[bs.lower()] if band < 1 or band > count: raise ValueError( "{} BAND must be between 1 and {}".format(opname, count) ) bands.add(band) # assume all args are float args = [float(arg) for arg in args] kwargs = dict(zip(opkwargs[opname], args)) # Create opperation function f = _op_factory( func=func, kwargs=kwargs, opname=opname, bands=bands, rgb_op=(opname in rgb_ops), ) result.append(f) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_jobs(jobs): """Validate number of jobs."""
if jobs == 0: raise click.UsageError("Jobs must be >= 1 or == -1") elif jobs < 0: import multiprocessing jobs = multiprocessing.cpu_count() return jobs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_math_type(arr): """Convert an array from native integer dtype range to 0..1 scaling down linearly """
max_int = np.iinfo(arr.dtype).max return arr.astype(math_type) / max_int
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def scale_dtype(arr, dtype): """Convert an array from 0..1 to dtype, scaling up linearly """
max_int = np.iinfo(dtype).max return (arr * max_int).astype(dtype)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def magick_to_rio(convert_opts): """Translate a limited subset of imagemagick convert commands to rio color operations Parameters convert_opts: String, imagemagick convert options Returns ------- operations string, ordered rio color operations """
ops = [] bands = None def set_band(x): global bands if x.upper() == "RGB": x = "RGB" bands = x.upper() set_band("RGB") def append_sig(arg): global bands args = list(filter(None, re.split("[,x]+", arg))) if len(args) == 1: args.append(0.5) elif len(args) == 2: args[1] = float(args[1].replace("%", "")) / 100.0 ops.append("sigmoidal {} {} {}".format(bands, *args)) def append_gamma(arg): global bands ops.append("gamma {} {}".format(bands, arg)) def append_sat(arg): args = list(filter(None, re.split("[,x]+", arg))) # ignore args[0] # convert to proportion prop = float(args[1]) / 100 ops.append("saturation {}".format(prop)) nextf = None for part in convert_opts.strip().split(" "): if part == "-channel": nextf = set_band elif part == "+channel": set_band("RGB") nextf = None elif part == "-sigmoidal-contrast": nextf = append_sig elif part == "-gamma": nextf = append_gamma elif part == "-modulate": nextf = append_sat else: if nextf: nextf(part) nextf = None return " ".join(ops)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calc_downsample(w, h, target=400): """Calculate downsampling value."""
if w > h: return h / target elif h >= w: return w / target
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def move(self): """Create a state change."""
k = random.choice(self.keys) multiplier = random.choice((0.95, 1.05)) invalid_key = True while invalid_key: # make sure bias doesn't exceed 1.0 if k == "bias": if self.state[k] > 0.909: k = random.choice(self.keys) continue invalid_key = False newval = self.state[k] * multiplier self.state[k] = newval
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def apply_color(self, arr, state): """Apply color formula to an array."""
ops = self.cmd(state) for func in parse_operations(ops): arr = func(arr) return arr
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def energy(self): """Calculate state's energy."""
arr = self.src.copy() arr = self.apply_color(arr, self.state) scores = [histogram_distance(self.ref[i], arr[i]) for i in range(3)] # Important: scale by 100 for readability return sum(scores) * 100
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self, step, T, E, acceptance, improvement): """Print progress."""
if acceptance is None: acceptance = 0 if improvement is None: improvement = 0 if step > 0: elapsed = time.time() - self.start remain = (self.steps - step) * (elapsed / step) # print('Time {} ({} Remaing)'.format(time_string(elapsed), time_string(remain))) else: elapsed = 0 remain = 0 curr = self.cmd(self.state) curr_score = float(E) best = self.cmd(self.best_state) best_score = self.best_energy report = progress_report( curr, best, curr_score, best_score, step, self.steps, acceptance * 100, improvement * 100, time_string(elapsed), time_string(remain), ) print(report) if fig: imgs[1].set_data( reshape_as_image(self.apply_color(self.src.copy(), self.state)) ) imgs[2].set_data( reshape_as_image(self.apply_color(self.src.copy(), self.best_state)) ) if txt: txt.set_text(report) fig.canvas.draw()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def atmos_worker(srcs, window, ij, args): """A simple atmospheric correction user function."""
src = srcs[0] rgb = src.read(window=window) rgb = to_math_type(rgb) atmos = simple_atmo(rgb, args["atmo"], args["contrast"], args["bias"]) # should be scaled 0 to 1, scale to outtype return scale_dtype(atmos, args["out_dtype"])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def color_worker(srcs, window, ij, args): """A user function."""
src = srcs[0] arr = src.read(window=window) arr = to_math_type(arr) for func in parse_operations(args["ops_string"]): arr = func(arr) # scaled 0 to 1, now scale to outtype return scale_dtype(arr, args["out_dtype"])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_raw_code(self, string_or_list): """Add raw Gmsh code. """
if _is_string(string_or_list): self._GMSH_CODE.append(string_or_list) else: assert isinstance(string_or_list, list) for string in string_or_list: self._GMSH_CODE.append(string) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_torus_extrude_lines( self, irad, orad, lcar=None, R=numpy.eye(3), x0=numpy.array([0.0, 0.0, 0.0]) ): """Create Gmsh code for the torus in the x-y plane under the coordinate transformation .. math:: \\hat{x} = R x + x_0. :param irad: inner radius of the torus :param orad: outer radius of the torus """
self.add_comment("Torus") # Add circle x0t = numpy.dot(R, numpy.array([0.0, orad, 0.0])) # Get circles in y-z plane Rc = numpy.array([[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0]]) c = self.add_circle(x0 + x0t, irad, lcar=lcar, R=numpy.dot(R, Rc)) rot_axis = [0.0, 0.0, 1.0] rot_axis = numpy.dot(R, rot_axis) point_on_rot_axis = [0.0, 0.0, 0.0] point_on_rot_axis = numpy.dot(R, point_on_rot_axis) + x0 # Form the torus by extruding the circle three times by 2/3*pi. This # works around the inability of Gmsh to extrude by pi or more. The # Extrude() macro returns an array; the first [0] entry in the array is # the entity that has been extruded at the far end. This can be used # for the following Extrude() step. The second [1] entry of the array # is the surface that was created by the extrusion. previous = c.line_loop.lines angle = "2*Pi/3" all_surfaces = [] for i in range(3): self.add_comment("Round no. {}".format(i + 1)) for k, p in enumerate(previous): # ts1[] = Extrude {{0,0,1}, {0,0,0}, 2*Pi/3}{Line{tc1};}; # ... top, surf, _ = self.extrude( p, rotation_axis=rot_axis, point_on_axis=point_on_rot_axis, angle=angle, ) all_surfaces.append(surf) previous[k] = top # compound_surface = CompoundSurface(all_surfaces) surface_loop = self.add_surface_loop(all_surfaces) vol = self.add_volume(surface_loop) # The newline at the end is essential: # If a GEO file doesn't end in a newline, Gmsh will report a syntax # error. self.add_comment("\n") return vol
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_torus_extrude_circle( self, irad, orad, lcar=None, R=numpy.eye(3), x0=numpy.array([0.0, 0.0, 0.0]) ): """Create Gmsh code for the torus under the coordinate transformation .. math:: \\hat{x} = R x + x_0. :param irad: inner radius of the torus :param orad: outer radius of the torus """
self.add_comment(76 * "-") self.add_comment("Torus") # Add circle x0t = numpy.dot(R, numpy.array([0.0, orad, 0.0])) Rc = numpy.array([[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]) c = self.add_circle(x0 + x0t, irad, lcar=lcar, R=numpy.dot(R, Rc)) rot_axis = [0.0, 0.0, 1.0] rot_axis = numpy.dot(R, rot_axis) point_on_rot_axis = [0.0, 0.0, 0.0] point_on_rot_axis = numpy.dot(R, point_on_rot_axis) + x0 # Form the torus by extruding the circle three times by 2/3*pi. This # works around the inability of Gmsh to extrude by pi or more. The # Extrude() macro returns an array; the first [0] entry in the array is # the entity that has been extruded at the far end. This can be used # for the following Extrude() step. The second [1] entry of the array # is the surface that was created by the extrusion. The third [2-end] # is a list of all the planes of the lateral surface. previous = c.plane_surface all_volumes = [] num_steps = 3 for _ in range(num_steps): top, vol, _ = self.extrude( previous, rotation_axis=rot_axis, point_on_axis=point_on_rot_axis, angle="2*Pi/{}".format(num_steps), ) previous = top all_volumes.append(vol) if self._gmsh_major() == 3: # This actually returns the volume, but the gmsh 4 version doesn't have that # feature. Hence, for compatibility, also ditch it here. self.add_compound_volume(all_volumes) else: assert self._gmsh_major() == 4 self.add_raw_code( "Compound Volume{{{}}};".format(",".join(v.id for v in all_volumes)) ) self.add_comment(76 * "-" + "\n") return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_pipe_by_rectangle_rotation( self, outer_radius, inner_radius, length, R=numpy.eye(3), x0=numpy.array([0.0, 0.0, 0.0]), lcar=None, ): """Hollow cylinder. Define a rectangle, extrude it by rotation. """
self.add_comment("Define rectangle.") X = numpy.array( [ [0.0, outer_radius, -0.5 * length], [0.0, outer_radius, +0.5 * length], [0.0, inner_radius, +0.5 * length], [0.0, inner_radius, -0.5 * length], ] ) # Apply transformation. X = [numpy.dot(R, x) + x0 for x in X] # Create points set. p = [self.add_point(x, lcar=lcar) for x in X] # Define edges. e = [ self.add_line(p[0], p[1]), self.add_line(p[1], p[2]), self.add_line(p[2], p[3]), self.add_line(p[3], p[0]), ] rot_axis = [0.0, 0.0, 1.0] rot_axis = numpy.dot(R, rot_axis) point_on_rot_axis = [0.0, 0.0, 0.0] point_on_rot_axis = numpy.dot(R, point_on_rot_axis) + x0 # Extrude all edges three times by 2*Pi/3. previous = e angle = "2*Pi/3" all_surfaces = [] # com = [] self.add_comment("Extrude in 3 steps.") for i in range(3): self.add_comment("Step {}".format(i + 1)) for k, p in enumerate(previous): # ts1[] = Extrude {{0,0,1}, {0,0,0}, 2*Pi/3}{Line{tc1};}; top, surf, _ = self.extrude( p, rotation_axis=rot_axis, point_on_axis=point_on_rot_axis, angle=angle, ) # if k==0: # com.append(surf) # else: # all_names.appends(surf) all_surfaces.append(surf) previous[k] = top # # cs = CompoundSurface(com) # Now just add surface loop and volume. # all_surfaces = all_names + [cs] surface_loop = self.add_surface_loop(all_surfaces) vol = self.add_volume(surface_loop) return vol
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_pipe_by_circle_extrusion( self, outer_radius, inner_radius, length, R=numpy.eye(3), x0=numpy.array([0.0, 0.0, 0.0]), lcar=None, ): """Hollow cylinder. Define a ring, extrude it by translation. """
# Define ring which to Extrude by translation. Rc = numpy.array([[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]) c_inner = self.add_circle( x0, inner_radius, lcar=lcar, R=numpy.dot(R, Rc), make_surface=False ) circ = self.add_circle( x0, outer_radius, lcar=lcar, R=numpy.dot(R, Rc), holes=[c_inner.line_loop] ) # Now Extrude the ring surface. _, vol, _ = self.extrude( circ.plane_surface, translation_axis=numpy.dot(R, [length, 0, 0]) ) return vol
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def translate(self, input_entity, vector): """Translates input_entity itself by vector. Changes the input object. """
d = {1: "Line", 2: "Surface", 3: "Volume"} self._GMSH_CODE.append( "Translate {{{}}} {{ {}{{{}}}; }}".format( ", ".join([str(co) for co in vector]), d[input_entity.dimension], input_entity.id, ) ) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def symmetry(self, input_entity, coefficients, duplicate=True): """Transforms all elementary entities symmetrically to a plane. The vector should contain four expressions giving the coefficients of the plane's equation. """
d = {1: "Line", 2: "Surface", 3: "Volume"} entity = "{}{{{}}};".format(d[input_entity.dimension], input_entity.id) if duplicate: entity = "Duplicata{{{}}}".format(entity) self._GMSH_CODE.append( "Symmetry {{{}}} {{{}}}".format( ", ".join([str(co) for co in coefficients]), entity ) ) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def patch_qcombobox(QComboBox): """ In PySide, using Python objects as userData in QComboBox causes Segmentation faults under certain conditions. Even in cases where it doesn't, findData does not work correctly. Likewise, findData also does not work correctly with Python objects when using PyQt4. On the other hand, PyQt5 deals with this case correctly. We therefore patch QComboBox when using PyQt4 and PySide to avoid issues. """
from ..QtGui import QIcon from ..QtCore import Qt, QObject class userDataWrapper(): """ This class is used to wrap any userData object. If we don't do this, then certain types of objects can cause segmentation faults or issues depending on whether/how __getitem__ is defined. """ def __init__(self, data): self.data = data _addItem = QComboBox.addItem def addItem(self, *args, **kwargs): if len(args) == 3 or (not isinstance(args[0], QIcon) and len(args) == 2): args, kwargs['userData'] = args[:-1], args[-1] if 'userData' in kwargs: kwargs['userData'] = userDataWrapper(kwargs['userData']) _addItem(self, *args, **kwargs) _insertItem = QComboBox.insertItem def insertItem(self, *args, **kwargs): if len(args) == 4 or (not isinstance(args[1], QIcon) and len(args) == 3): args, kwargs['userData'] = args[:-1], args[-1] if 'userData' in kwargs: kwargs['userData'] = userDataWrapper(kwargs['userData']) _insertItem(self, *args, **kwargs) _setItemData = QComboBox.setItemData def setItemData(self, index, value, role=Qt.UserRole): value = userDataWrapper(value) _setItemData(self, index, value, role=role) _itemData = QComboBox.itemData def itemData(self, index, role=Qt.UserRole): userData = _itemData(self, index, role=role) if isinstance(userData, userDataWrapper): userData = userData.data return userData def findData(self, value): for i in range(self.count()): if self.itemData(i) == value: return i return -1 QComboBox.addItem = addItem QComboBox.insertItem = insertItem QComboBox.setItemData = setItemData QComboBox.itemData = itemData QComboBox.findData = findData
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_czml( traffic: Union[Traffic, SO6], filename: Union[str, Path], minimum_time: Optional[timelike] = None, ) -> None: """Generates a CesiumJS scenario file."""
if isinstance(traffic, Traffic): if "baro_altitude" in traffic.data.columns: traffic = traffic.query("baro_altitude == baro_altitude") elif "altitude" in traffic.data.columns: traffic = traffic.query("altitude == altitude") if minimum_time is not None: minimum_time = to_datetime(minimum_time) traffic = cast(Traffic, traffic.query(f"timestamp >= '{minimum_time}'")) if isinstance(filename, str): filename = Path(filename) if not filename.parent.exists(): filename.parent.mkdir(parents=True) start = format_ts(traffic.start_time) availability = f"{start}/{format_ts(traffic.end_time)}" export = [ { "id": "document", "name": f"Traffic_{start}", "version": "1.0", "author": getpass.getuser(), "clock": { "interval": availability, "currentTime": start, "multiplier": _CZML_Params.default_time_multiplier, }, } ] for flight in traffic: for elt in export_flight(flight): export.append(elt) with filename.open("w") as fh: json.dump(export, fh, indent=2) logging.info(f"Scenario file {filename} written")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def plot(self, ax: GeoAxesSubplot, **kwargs) -> Artist: """Plotting function. All arguments are passed to the geometry"""
if "facecolor" not in kwargs: kwargs["facecolor"] = "None" if "edgecolor" not in kwargs: kwargs["edgecolor"] = ax._get_lines.get_next_color() if "projection" in ax.__dict__: return ax.add_geometries([self.shape], crs=PlateCarree(), **kwargs) else: return ax.add_patch( MplPolygon(list(self.shape.exterior.coords), **kwargs) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def api_states( self, own: bool = False, bounds: Union[ BaseGeometry, Tuple[float, float, float, float], None ] = None, ) -> StateVectors: """Returns the current state vectors from OpenSky REST API. If own parameter is set to True, returns only the state vectors associated to own sensors (requires authentication) bounds parameter can be a shape or a tuple of float. Official documentation Limitiations for anonymous (unauthenticated) users Anonymous are those users who access the API without using credentials. The limitations for anonymous users are: Anonymous users can only get the most recent state vectors, i.e. the time parameter will be ignored. Anonymous users can only retrieve data with a time resultion of 10 seconds. That means, the API will return state vectors for time now − (now mod 10) Limitations for OpenSky users An OpenSky user is anybody who uses a valid OpenSky account (see below) to access the API. The rate limitations for OpenSky users are: - OpenSky users can retrieve data of up to 1 hour in the past. If the time parameter has a value t < now−3600 the API will return 400 Bad Request. - OpenSky users can retrieve data with a time resultion of 5 seconds. That means, if the time parameter was set to t , the API will return state vectors for time t−(t mod 5). """
what = "own" if (own and self.auth is not None) else "all" if bounds is not None: try: # thinking of shapely bounds attribute (in this order) # I just don't want to add the shapely dependency here west, south, east, north = bounds.bounds # type: ignore except AttributeError: west, south, east, north = bounds what += f"?lamin={south}&lamax={north}&lomin={west}&lomax={east}" c = requests.get( f"https://opensky-network.org/api/states/{what}", auth=self.auth ) if c.status_code != 200: raise ValueError(c.content.decode()) r = pd.DataFrame.from_records( c.json()["states"], columns=self._json_columns ) r = r.drop(["origin_country", "spi", "sensors"], axis=1) r = r.dropna() return StateVectors( self._format_dataframe(r, nautical_units=True), self )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def api_tracks(self, icao24: str) -> Flight: """Returns a Flight corresponding to a given aircraft. Official documentation Retrieve the trajectory for a certain aircraft at a given time. The trajectory is a list of waypoints containing position, barometric altitude, true track and an on-ground flag. In contrast to state vectors, trajectories do not contain all information we have about the flight, but rather show the aircraft’s general movement pattern. For this reason, waypoints are selected among available state vectors given the following set of rules: - The first point is set immediately after the the aircraft’s expected departure, or after the network received the first poisition when the aircraft entered its reception range. - The last point is set right before the aircraft’s expected arrival, or the aircraft left the networks reception range. - There is a waypoint at least every 15 minutes when the aircraft is in-flight. - A waypoint is added if the aircraft changes its track more than 2.5°. - A waypoint is added if the aircraft changes altitude by more than 100m (~330ft). - A waypoint is added if the on-ground state changes. Tracks are strongly related to flights. Internally, we compute flights and tracks within the same processing step. As such, it may be benificial to retrieve a list of flights with the API methods from above, and use these results with the given time stamps to retrieve detailed track information. """
c = requests.get( f"https://opensky-network.org/api/tracks/?icao24={icao24}" ) if c.status_code != 200: raise ValueError(c.content.decode()) json = c.json() df = pd.DataFrame.from_records( json["path"], columns=[ "timestamp", "latitude", "longitude", "altitude", "track", "onground", ], ).assign(icao24=json["icao24"], callsign=json["callsign"]) return Flight(self._format_dataframe(df, nautical_units=True))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: """Returns the route associated to a callsign."""
from .. import airports c = requests.get( f"https://opensky-network.org/api/routes?callsign={callsign}" ) if c.status_code == 404: raise ValueError("Unknown callsign") if c.status_code != 200: raise ValueError(c.content.decode()) json = c.json() return tuple(airports[a] for a in json["route"])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def api_aircraft( self, icao24: str, begin: Optional[timelike] = None, end: Optional[timelike] = None, ) -> pd.DataFrame: """Returns a flight table associated to an aircraft. Official documentation This API call retrieves flights for a particular aircraft within a certain time interval. Resulting flights departed and arrived within [begin, end]. If no flights are found for the given period, HTTP stats 404 - Not found is returned with an empty response body. """
if begin is None: begin = round_time(datetime.now(timezone.utc), by=timedelta(days=1)) begin = to_datetime(begin) if end is None: end = begin + timedelta(days=1) else: end = to_datetime(end) begin = int(begin.timestamp()) end = int(end.timestamp()) c = requests.get( f"https://opensky-network.org/api/flights/aircraft" f"?icao24={icao24}&begin={begin}&end={end}" ) if c.status_code != 200: raise ValueError(c.content.decode()) return ( pd.DataFrame.from_records(c.json())[ [ "firstSeen", "lastSeen", "icao24", "callsign", "estDepartureAirport", "estArrivalAirport", ] ] .assign( firstSeen=lambda df: pd.to_datetime( df.firstSeen * 1e9 ).dt.tz_localize("utc"), lastSeen=lambda df: pd.to_datetime( df.lastSeen * 1e9 ).dt.tz_localize("utc"), ) .sort_values("lastSeen") )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def api_range( self, serial: str, date: Optional[timelike] = None ) -> SensorRange: """Wraps a polygon representing a sensor's range. By default, returns the current range. Otherwise, you may enter a specific day (as a string, as an epoch or as a datetime) """
if date is None: date = round_time(datetime.now(timezone.utc), by=timedelta(days=1)) else: date = to_datetime(date) date = int(date.timestamp()) c = requests.get( f"https://opensky-network.org/api/range/days" f"?days={date}&serials={serial}" ) if c.status_code != 200: raise ValueError(c.content.decode()) return SensorRange(c.json())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def api_arrival( self, airport: Union[str, Airport], begin: Optional[timelike] = None, end: Optional[timelike] = None, ) -> pd.DataFrame: """Returns a flight table associated to an airport. By default, returns the current table. Otherwise, you may enter a specific date (as a string, as an epoch or as a datetime) Official documentation Retrieve flights for a certain airport which arrived within a given time interval [begin, end]. If no flights are found for the given period, HTTP stats 404 - Not found is returned with an empty response body. """
if isinstance(airport, str): from .. import airports airport_code = airports[airport].icao else: airport_code = airport.icao if begin is None: begin = round_time(datetime.now(timezone.utc), by=timedelta(days=1)) begin = to_datetime(begin) if end is None: end = begin + timedelta(days=1) else: end = to_datetime(end) begin = int(begin.timestamp()) end = int(end.timestamp()) c = requests.get( f"https://opensky-network.org/api/flights/arrival" f"?begin={begin}&airport={airport_code}&end={end}" ) if c.status_code != 200: raise ValueError(c.content.decode()) return ( pd.DataFrame.from_records(c.json())[ [ "firstSeen", "lastSeen", "icao24", "callsign", "estDepartureAirport", "estArrivalAirport", ] ] .assign( firstSeen=lambda df: pd.to_datetime( df.firstSeen * 1e9 ).dt.tz_localize("utc"), lastSeen=lambda df: pd.to_datetime( df.lastSeen * 1e9 ).dt.tz_localize("utc"), ) .sort_values("lastSeen") )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start(self) -> pd.Timestamp: """Returns the minimum timestamp value of the DataFrame."""
start = self.data.timestamp.min() self.data = self.data.assign(start=start) return start
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def squawk(self) -> Set[str]: """Returns all the unique squawk values in the trajectory."""
return set(self.data.squawk.ffill().bfill())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def query_opensky(self) -> Optional["Flight"]: """Return the equivalent Flight from OpenSky History."""
from ..data import opensky query_params = { "start": self.start, "stop": self.stop, "callsign": self.callsign, "icao24": self.icao24, } return opensky.history(**query_params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def coords(self) -> Iterator[Tuple[float, float, float]]: """Iterates on longitudes, latitudes and altitudes. """
data = self.data[self.data.longitude.notnull()] yield from zip(data["longitude"], data["latitude"], data["altitude"])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def xy_time(self) -> Iterator[Tuple[float, float, float]]: """Iterates on longitudes, latitudes and timestamps."""
iterator = iter(zip(self.coords, self.timestamp)) while True: next_ = next(iterator, None) if next_ is None: return coords, time = next_ yield (coords[0], coords[1], time.to_pydatetime().timestamp())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def split(self, value=10, unit=None): # noqa: F811 """Splits Flights in several legs. By default, Flights are split if no value is given during 10 minutes. """
if type(value) == int and unit is None: # default value is 10 m unit = "m" for data in _split(self.data, value, unit): yield self.__class__(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resample(self, rule: Union[str, int] = "1s") -> "Flight": """Resamples a Flight at a one point per second rate."""
if isinstance(rule, str): data = ( self._handle_last_position() .data.assign(start=self.start, stop=self.stop) .set_index("timestamp") .resample(rule) .first() # better performance than min() for duplicate index .interpolate() .reset_index() .fillna(method="pad") ) elif isinstance(rule, int): data = ( self._handle_last_position() .data.set_index("timestamp") .asfreq( (self.stop - self.start) / (rule - 1), # type: ignore method="nearest", ) .reset_index() ) else: raise TypeError("rule must be a str or an int") return self.__class__(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def simplify( self, tolerance: float, altitude: Optional[str] = None, z_factor: float = 3.048, return_type: Type[Mask] = Type["Flight"], ) -> Mask: """Simplifies a trajectory with Douglas-Peucker algorithm. The method uses latitude and longitude, projects the trajectory to a conformal projection and applies the algorithm. By default, a 2D version is called, unless you pass a column name for altitude (z parameter). You may scale the z-axis for more relevance (z_factor); the default value works well in most situations. The method returns a Flight unless you specify a np.ndarray[bool] as return_type for getting a mask. """
# returns a mask mask = douglas_peucker( df=self.data, tolerance=tolerance, lat="latitude", lon="longitude", z=altitude, z_factor=z_factor, ) if return_type == Type["Flight"]: return self.__class__(self.data.loc[mask]) else: return mask
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def project_shape( self, projection: Union[pyproj.Proj, crs.Projection, None] = None ) -> base.BaseGeometry: """Projection for a decent representation of the structure. By default, an equivalent projection is applied. Equivalent projections locally respect areas, which is convenient for the area attribute. """
if self.shape is None: return None if isinstance(projection, crs.Projection): projection = pyproj.Proj(projection.proj4_init) if projection is None: bounds = self.bounds projection = pyproj.Proj( proj="aea", # equivalent projection lat_1=bounds[1], lat_2=bounds[3], lat_0=(bounds[1] + bounds[3]) / 2, lon_0=(bounds[0] + bounds[2]) / 2, ) projected_shape = transform( partial( pyproj.transform, pyproj.Proj(init="EPSG:4326"), projection ), self.shape, ) if not projected_shape.is_valid: warnings.warn("The chosen projection is invalid for current shape") return projected_shape
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_xy( self, projection: Union[pyproj.Proj, crs.Projection, None] = None ): """Computes x and y columns from latitudes and longitudes. The source projection is WGS84 (EPSG 4326). The default destination projection is a Lambert Conformal Conical projection centered on the data inside the dataframe. For consistency reasons with pandas DataFrame, a new Traffic structure is returned. """
if isinstance(projection, crs.Projection): projection = pyproj.Proj(projection.proj4_init) if projection is None: projection = pyproj.Proj( proj="lcc", lat_1=self.data.latitude.min(), lat_2=self.data.latitude.max(), lat_0=self.data.latitude.mean(), lon_0=self.data.longitude.mean(), ) x, y = pyproj.transform( pyproj.Proj(init="EPSG:4326"), projection, self.data.longitude.values, self.data.latitude.values, ) return self.__class__(self.data.assign(x=x, y=y))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def callsigns(self) -> Set[str]: """Return only the most relevant callsigns"""
sub = self.data.query("callsign == callsign") return set(cs for cs in sub.callsign if len(cs) > 3 and " " not in cs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resample( self, rule: Union[str, int] = "1s", max_workers: int = 4, ) -> "Traffic": """Resamples all trajectories, flight by flight. `rule` defines the desired sample rate (default: 1s) """
with ProcessPoolExecutor(max_workers=max_workers) as executor: cumul = [] tasks = { executor.submit(flight.resample, rule): flight for flight in self } for future in tqdm(as_completed(tasks), total=len(tasks)): cumul.append(future.result()) return self.__class__.from_flights(cumul)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def vcas2mach(cas, h): """ CAS to Mach conversion """
tas = vcas2tas(cas, h) M = vtas2mach(tas, h) return M
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cas2mach(cas, h): """ CAS Mach conversion """
tas = cas2tas(cas, h) M = tas2mach(tas, h) return M
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_bluesky( traffic: Traffic, filename: Union[str, Path], minimum_time: Optional[timelike] = None, ) -> None: """Generates a Bluesky scenario file."""
if minimum_time is not None: minimum_time = to_datetime(minimum_time) traffic = traffic.query(f"timestamp >= '{minimum_time}'") if isinstance(filename, str): filename = Path(filename) if not filename.parent.exists(): filename.parent.mkdir(parents=True) altitude = ( "baro_altitude" if "baro_altitude" in traffic.data.columns else "altitude" ) if "mdl" not in traffic.data.columns: traffic = aircraft.merge(traffic) if "cas" not in traffic.data.columns: traffic = Traffic( traffic.data.assign( cas=vtas2cas(traffic.data.ground_speed, traffic.data[altitude]) ) ) with filename.open("w") as fh: t_delta = traffic.data.timestamp - traffic.start_time data = ( traffic.assign_id() .data.groupby("flight_id") .filter(lambda x: x.shape[0] > 3) .assign(timedelta=t_delta.apply(fmt_timedelta)) .sort_values(by="timestamp") ) for column in data.columns: data[column] = data[column].astype(np.str) is_created: List[str] = [] is_deleted: List[str] = [] start_time = cast(pd.Timestamp, traffic.start_time).time() fh.write(f"00:00:00> TIME {start_time}\n") # Add some bluesky command for the visualisation # fh.write("00:00:00>trail on\n") # fh.write("00:00:00>ssd conflicts\n") # We remove an object when it's its last data point buff = data.groupby("flight_id").timestamp.max() dd = pd.DataFrame( columns=["timestamp"], data=buff.values, index=buff.index.values ) map_icao24_last_point = {} for i, v in dd.iterrows(): map_icao24_last_point[i] = v[0] # Main loop to write lines in the scenario file for _, v in data.iterrows(): if v.flight_id not in is_created: # If the object is not created then create it is_created.append(v.flight_id) fh.write( f"{v.timedelta}> CRE {v.callsign} {v.mdl} " f"{v.latitude} {v.longitude} {v.track} " f"{v[altitude]} {v.cas}\n" ) elif v.timestamp == map_icao24_last_point[v.flight_id]: # Remove an aircraft when no data are available if v.flight_id not in is_deleted: is_deleted.append(v.flight_id) fh.write(f"{v.timedelta}> DEL {v.callsign}\n") elif v.flight_id not in is_deleted: # Otherwise update the object position fh.write( f"{v.timedelta}> MOVE {v.callsign} " f"{v.latitude} {v.longitude} {v[altitude]} " f"{v.track} {v.cas} {v.vertical_rate}\n" ) logging.info(f"Scenario file {filename} written")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def import_submodules(package, recursive=True): """ Import all submodules of a module, recursively, including subpackages :param package: package (name or actual module) :type package: str | module :rtype: dict[str, types.ModuleType] """
if isinstance(package, str): package = importlib.import_module(package) results = {} for loader, name, is_pkg in pkgutil.walk_packages(package.__path__): full_name = package.__name__ + "." + name results[name] = importlib.import_module(full_name) if recursive and is_pkg: results.update(import_submodules(full_name)) return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def interpolate(self, times, proj=PlateCarree()) -> np.ndarray: """Interpolates a trajectory in time. """
if proj not in self.interpolator: self.interpolator[proj] = interp1d( np.stack(t.to_pydatetime().timestamp() for t in self.timestamp), proj.transform_points( PlateCarree(), *np.stack(self.coords).T ).T, ) return PlateCarree().transform_points( proj, *self.interpolator[proj](times) )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _set_default_extent(self): """Helper for a default extent limited to the projection boundaries."""
west, south, east, north = self.projection.boundary.bounds self.set_extent((west, east, south, north), crs=self.projection)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _format_dataframe( df: pd.DataFrame, nautical_units=True ) -> pd.DataFrame: """ This function converts types, strips spaces after callsigns and sorts the DataFrame by timestamp. For some reason, all data arriving from OpenSky are converted to units in metric system. Optionally, you may convert the units back to nautical miles, feet and feet/min. """
if "callsign" in df.columns and df.callsign.dtype == object: df.callsign = df.callsign.str.strip() if nautical_units: df.altitude = df.altitude / 0.3048 if "geoaltitude" in df.columns: df.geoaltitude = df.geoaltitude / 0.3048 if "groundspeed" in df.columns: df.groundspeed = df.groundspeed / 1852 * 3600 if "vertical_rate" in df.columns: df.vertical_rate = df.vertical_rate / 0.3048 * 60 df.timestamp = pd.to_datetime(df.timestamp * 1e9).dt.tz_localize("utc") if "last_position" in df.columns: df = df.query("last_position == last_position").assign( last_position=pd.to_datetime( df.last_position * 1e9 ).dt.tz_localize("utc") ) return df.sort_values("timestamp")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter(self, info, releases): """ Remove files from `releases` that match any pattern. """
# Make a copy of releases keys # as we may delete packages during iteration removed = 0 versions = list(releases.keys()) for version in versions: new_files = [] for file_desc in releases[version]: if self._check_match(file_desc): removed += 1 else: new_files.append(file_desc) if len(new_files) == 0: del releases[version] else: releases[version] = new_files logger.debug(f"{self.name}: filenames removed: {removed}")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_filter_plugins(entrypoint_group: str) -> Iterable[Filter]: """ Load all blacklist plugins that are registered with pkg_resources Parameters ========== entrypoint_group: str The entrypoint group name to load plugins from Returns ======= List of Blacklist: A list of objects derived from the Blacklist class """
global loaded_filter_plugins enabled_plugins: List[str] = [] config = BandersnatchConfig().config try: config_blacklist_plugins = config["blacklist"]["plugins"] split_plugins = config_blacklist_plugins.split("\n") if "all" in split_plugins: enabled_plugins = ["all"] else: for plugin in split_plugins: if not plugin: continue enabled_plugins.append(plugin) except KeyError: pass # If the plugins for the entrypoint_group have been loaded return them cached_plugins = loaded_filter_plugins.get(entrypoint_group) if cached_plugins: return cached_plugins plugins = set() for entry_point in pkg_resources.iter_entry_points(group=entrypoint_group): plugin_class = entry_point.load() plugin_instance = plugin_class() if "all" in enabled_plugins or plugin_instance.name in enabled_plugins: plugins.add(plugin_instance) loaded_filter_plugins[entrypoint_group] = list(plugins) return plugins
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter(self, info, releases): """ Remove all release versions that match any of the specificed patterns. """
for version in list(releases.keys()): if any(pattern.match(version) for pattern in self.patterns): del releases[version]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _check_match(self, name, version_string) -> bool: """ Check if the package name and version matches against a blacklisted package version specifier. Parameters ========== name: str Package name version: str Package version Returns ======= bool: True if it matches, False otherwise. """
if not name or not version_string: return False try: version = Version(version_string) except InvalidVersion: logger.debug(f"Package {name}=={version_string} has an invalid version") return False for requirement in self.blacklist_release_requirements: if name != requirement.name: continue if version in requirement.specifier: logger.debug( f"MATCH: Release {name}=={version} matches specifier " f"{requirement.specifier}" ) return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find(root: Union[Path, str], dirs: bool = True) -> str: """A test helper simulating 'find'. Iterates over directories and filenames, given as relative paths to the root. """
if isinstance(root, str): root = Path(root) results: List[Path] = [] for dirpath, dirnames, filenames in os.walk(root): names = filenames if dirs: names += dirnames for name in names: results.append(Path(dirpath) / name) results.sort() return "\n".join(str(result.relative_to(root)) for result in results)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rewrite( filepath: Union[str, Path], mode: str = "w", **kw: Any ) -> Generator[IO, None, None]: """Rewrite an existing file atomically to avoid programs running in parallel to have race conditions while reading."""
if isinstance(filepath, str): base_dir = os.path.dirname(filepath) filename = os.path.basename(filepath) else: base_dir = str(filepath.parent) filename = filepath.name # Change naming format to be more friendly with distributed POSIX # filesystems like GlusterFS that hash based on filename # GlusterFS ignore '.' at the start of filenames and this avoid rehashing with tempfile.NamedTemporaryFile( mode=mode, prefix=f".{filename}.", delete=False, dir=base_dir, **kw ) as f: filepath_tmp = f.name yield f if not os.path.exists(filepath_tmp): # Allow our clients to remove the file in case it doesn't want it to be # put in place actually but also doesn't want to error out. return os.chmod(filepath_tmp, 0o100644) os.rename(filepath_tmp, filepath)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def unlink_parent_dir(path: Path) -> None: """ Remove a file and if the dir is empty remove it """
logger.info(f"unlink {str(path)}") path.unlink() parent_path = path.parent try: parent_path.rmdir() logger.info(f"rmdir {str(parent_path)}") except OSError as oe: logger.debug(f"Did not remove {str(parent_path)}: {str(oe)}")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_safe(filename: str, **kw: Any) -> Generator[IO, None, None]: """Rewrite a file atomically. Clients are allowed to delete the tmpfile to signal that they don't want to have it updated. """
with tempfile.NamedTemporaryFile( dir=os.path.dirname(filename), delete=False, prefix=f"{os.path.basename(filename)}.", **kw, ) as tf: if os.path.exists(filename): os.chmod(tf.name, os.stat(filename).st_mode & 0o7777) tf.has_changed = False # type: ignore yield tf if not os.path.exists(tf.name): return filename_tmp = tf.name if os.path.exists(filename) and filecmp.cmp(filename, filename_tmp, shallow=False): os.unlink(filename_tmp) else: os.rename(filename_tmp, filename) tf.has_changed = True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_json_metadata(self, package_info: Dict) -> bool: """ Take the JSON metadata we just fetched and save to disk """
try: with utils.rewrite(self.json_file) as jf: dump(package_info, jf, indent=4, sort_keys=True) except Exception as e: logger.error( "Unable to write json to {}: {}".format(self.json_file, str(e)) ) return False symlink_dir = self.json_pypi_symlink.parent if not symlink_dir.exists(): symlink_dir.mkdir() try: # If symlink already exists throw a FileExistsError self.json_pypi_symlink.symlink_to(self.json_file) except FileExistsError: pass return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _filter_releases(self): """ Run the release filtering plugins """
global display_filter_log filter_plugins = filter_release_plugins() if not filter_plugins: if display_filter_log: logger.info("No release filters are enabled. Skipping filtering") display_filter_log = False else: for plugin in filter_plugins: plugin.filter(self.info, self.releases)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sync_release_files(self): """ Purge + download files returning files removed + added """
release_files = [] for release in self.releases.values(): release_files.extend(release) downloaded_files = set() deferred_exception = None for release_file in release_files: try: downloaded_file = self.download_file( release_file["url"], release_file["digests"]["sha256"] ) if downloaded_file: downloaded_files.add( str(downloaded_file.relative_to(self.mirror.homedir)) ) except Exception as e: logger.exception( f"Continuing to next file after error downloading: " f"{release_file['url']}" ) if not deferred_exception: # keep first exception deferred_exception = e if deferred_exception: raise deferred_exception # raise the exception after trying all files self.mirror.altered_packages[self.name] = downloaded_files
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _cleanup(self): """Does a couple of cleanup tasks to ensure consistent data for later processing."""
if self.todolist.exists(): try: saved_todo = iter(open(self.todolist, encoding="utf-8")) int(next(saved_todo).strip()) for line in saved_todo: _, serial = line.strip().split() int(serial) except (StopIteration, ValueError): # The todo list was inconsistent. This may happen if we get # killed e.g. by the timeout wrapper. Just remove it - we'll # just have to do whatever happened since the last successful # sync. logger.info("Removing inconsistent todo list.") self.todolist.unlink()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _filter_packages(self): """ Run the package filtering plugins and remove any packages from the packages_to_sync that match any filters. - Logging of action will be done within the check_match methods """
global LOG_PLUGINS filter_plugins = filter_project_plugins() if not filter_plugins: if LOG_PLUGINS: logger.info("No project filters are enabled. Skipping filtering") LOG_PLUGINS = False return # Make a copy of self.packages_to_sync keys # as we may delete packages during iteration packages = list(self.packages_to_sync.keys()) for package_name in packages: for plugin in filter_plugins: if plugin.check_match(name=package_name): if package_name not in self.packages_to_sync: logger.debug( f"{package_name} not found in packages to sync - " + f"{plugin.name} has no effect here ..." ) else: del self.packages_to_sync[package_name]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def determine_packages_to_sync(self): """ Update the self.packages_to_sync to contain packages that need to be synced. """
# In case we don't find any changes we will stay on the currently # synced serial. self.target_serial = self.synced_serial self.packages_to_sync = {} logger.info(f"Current mirror serial: {self.synced_serial}") if self.todolist.exists(): # We started a sync previously and left a todo list as well as the # targetted serial. We'll try to keep going through the todo list # and then mark the targetted serial as done. logger.info("Resuming interrupted sync from local todo list.") saved_todo = iter(open(self.todolist, encoding="utf-8")) self.target_serial = int(next(saved_todo).strip()) for line in saved_todo: package, serial = line.strip().split() self.packages_to_sync[package] = int(serial) elif not self.synced_serial: logger.info("Syncing all packages.") # First get the current serial, then start to sync. This makes us # more defensive in case something changes on the server between # those two calls. self.packages_to_sync.update(self.master.all_packages()) self.target_serial = max( [self.synced_serial] + list(self.packages_to_sync.values()) ) else: logger.info("Syncing based on changelog.") self.packages_to_sync.update( self.master.changed_packages(self.synced_serial) ) self.target_serial = max( [self.synced_serial] + list(self.packages_to_sync.values()) ) # We can avoid downloading the main index page if we don't have # anything todo at all during a changelog-based sync. self.need_index_sync = bool(self.packages_to_sync) self._filter_packages() logger.info(f"Trying to reach serial: {self.target_serial}") pkg_count = len(self.packages_to_sync) logger.info(f"{pkg_count} packages to sync.")