docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Merges two stops. For the stops to be merged, they must have: - the same stop_id - the same stop_name (case insensitive) - the same zone_id - locations less than largest_stop_distance apart The other attributes can have arbitary changes. The merged attributes are taken from the new ...
def _MergeEntities(self, a, b): distance = transitfeed.ApproximateDistanceBetweenStops(a, b) if distance > self.largest_stop_distance: raise MergeError("Stops are too far apart: %.1fm " "(largest_stop_distance is %.1fm)." % (distance, self.largest_stop_di...
197,608
Forces the old and new calendars to be disjoint about a cutoff date. This truncates the service periods of the old schedule so that service stops one day before the given cutoff date and truncates the new schedule so that service only begins on the cutoff date. Args: cutoff: The cutoff date as a...
def DisjoinCalendars(self, cutoff): def TruncatePeriod(service_period, start, end): service_period.start_date = max(service_period.start_date, start) service_period.end_date = min(service_period.end_date, end) dates_to_delete = [] for k in service_period.date_exceptions: ...
197,620
Merges the shapes by taking the new shape. Args: a: The first transitfeed.Shape instance. b: The second transitfeed.Shape instance. Returns: The merged shape. Raises: MergeError: If the ids are different or if the endpoints are further than largest_shape_distance...
def _MergeEntities(self, a, b): if a.shape_id != b.shape_id: raise MergeError('shape_id must be the same') distance = max(ApproximateDistanceBetweenPoints(a.points[0][:2], b.points[0][:2]), ApproximateDistanceBetweenPoints(a.poin...
197,629
Initialise the merger. Once this initialiser has been called, a_schedule and b_schedule should not be modified. Args: a_schedule: The old schedule, an instance of transitfeed.Schedule. b_schedule: The new schedule, an instance of transitfeed.Schedule. problem_reporter: The problem report...
def __init__(self, a_schedule, b_schedule, merged_schedule, problem_reporter): self.a_schedule = a_schedule self.b_schedule = b_schedule self.merged_schedule = merged_schedule self.a_merge_map = {} self.b_merge_map = {} self.a_zone_map = {} self.b_zone_map = {} self._...
197,636
Finds the largest integer used as the ending of an id in the schedule. Args: schedule: The schedule to check. Returns: The maximum integer used as an ending for an id.
def _FindLargestIdPostfixNumber(self, schedule): postfix_number_re = re.compile('(\d+)$') def ExtractPostfixNumber(entity_id): if entity_id is None: return 0 match = postfix_number_re.search(entity_id) if match is not None: return int(match.group(1)) else: ...
197,637
Generate a unique id based on the given id. This is done by appending a counter which is then incremented. The counter is initialised at the maximum number used as an ending for any id in the old and new schedules. Args: entity_id: The base id string. This is allowed to be None. Returns: ...
def GenerateId(self, entity_id=None): self._idnum += 1 if entity_id: return '%s_merged_%d' % (entity_id, self._idnum) else: return 'merged_%d' % self._idnum
197,638
Looks for an added DataSetMerger derived from the given class. Args: cls: A class derived from DataSetMerger. Returns: The matching DataSetMerger instance. Raises: LookupError: No matching DataSetMerger has been added.
def GetMerger(self, cls): for merger in self._mergers: if isinstance(merger, cls): return merger raise LookupError('No matching DataSetMerger found')
197,641
Initialize a new ShapePoint object. Args: field_dict: A dictionary mapping attribute name to unicode string
def __init__(self, shape_id=None, lat=None, lon=None,seq=None, dist=None, field_dict=None): self._schedule = None if field_dict: if isinstance(field_dict, self.__class__): for k, v in field_dict.iteritems(): self.__dict__[k] = v else: self.__dict__.updat...
197,688
Save the current context to be output with any errors. Args: file_name: string row_num: int row: list of strings headers: list of column headers, its order corresponding to row's
def SetFileContext(self, file_name, row_num, row, headers): self._context = (file_name, row_num, row, headers)
197,691
Return a text string describing the problem. Args: d: map returned by GetDictToFormat with with formatting added
def FormatProblem(self, d=None): if not d: d = self.GetDictToFormat() output_error_text = self.__class__.ERROR_TEXT % d if ('reason' in d) and d['reason']: return '%s\n%s' % (output_error_text, d['reason']) else: return output_error_text
197,732
Initialise. Args: raise_warnings: If this is True then warnings are also raised as exceptions. If it is false, warnings are printed to the console using SimpleProblemAccumulator.
def __init__(self, raise_warnings=False): self.raise_warnings = raise_warnings self.accumulator = SimpleProblemAccumulator()
197,738
Returns a point on the shape polyline with the input shape_dist_traveled. Args: shape_dist_traveled: The input shape_dist_traveled. Returns: The shape point as a tuple (lat, lng, shape_dist_traveled), where lat and lng is the location of the shape point, and shape_dist_traveled is an i...
def GetPointWithDistanceTraveled(self, shape_dist_traveled): if not self.distance: return None if shape_dist_traveled <= self.distance[0]: return self.points[0] if shape_dist_traveled >= self.distance[-1]: return self.points[-1] index = bisect.bisect(self.distance, shape_dist_tra...
197,783
Add a stop to this trip. Stops must be added in the order visited. Args: stop: A Stop object kwargs: remaining keyword args passed to StopTime.__init__ Returns: None
def AddStopTime(self, stop, problems=None, schedule=None, **kwargs): if problems is None: # TODO: delete this branch when StopTime.__init__ doesn't need a # ProblemReporter problems = problems_module.default_problem_reporter stoptime = self.GetGtfsFactory().StopTime( problems=prob...
197,788
Add a StopTime object to the end of this trip. Args: stoptime: A StopTime object. Should not be reused in multiple trips. schedule: Schedule object containing this trip which must be passed to Trip.__init__ or here problems: ProblemReporter object for validating the StopTime in its new ...
def AddStopTimeObject(self, stoptime, schedule=None, problems=None): if schedule is None: schedule = self._schedule if schedule is None: warnings.warn("No longer supported. _schedule attribute is used to get " "stop_times table", DeprecationWarning) if problems is None: ...
197,791
Validate attributes of this object. Check that this object has all required values set to a valid value without reference to the rest of the schedule. If the _schedule attribute is set then check that references such as route_id and service_id are correct. Args: problems: A ProblemReporter objec...
def Validate(self, problems, validate_children=True): self.ValidateRouteId(problems) self.ValidateServicePeriod(problems) self.ValidateDirectionId(problems) self.ValidateTripId(problems) self.ValidateShapeIdsExistInShapeList(problems) self.ValidateRouteIdExistsInRouteList(problems) self...
197,816
Return a tuple that outputs a row of _FIELD_NAMES to be written to a GTFS file. Arguments: trip_id: The trip_id of the trip to which this StopTime corresponds. It must be provided, as it is not stored in StopTime.
def GetFieldValuesTuple(self, trip_id): result = [] for fn in self._FIELD_NAMES: if fn == 'trip_id': result.append(trip_id) else: # Since we'll be writting to an output file, we want empty values to be # outputted as an empty string result.append(getattr(self, fn...
197,826
Return a tuple that outputs a row of _FIELD_NAMES to be written to a SQLite database. Arguments: trip_id: The trip_id of the trip to which this StopTime corresponds. It must be provided, as it is not stored in StopTime.
def GetSqlValuesTuple(self, trip_id): result = [] for fn in self._SQL_FIELD_NAMES: if fn == 'trip_id': result.append(trip_id) else: # Since we'll be writting to SQLite, we want empty values to be # outputted as NULL string (contrary to what happens in # GetField...
197,827
Random multivariate hypergeometric variates. Parameters: - `n` : Number of draws. - `m` : Number of items in each categoy.
def rmultivariate_hypergeometric(n, m, size=None): N = len(m) urn = np.repeat(np.arange(N), m) if size: draw = np.array([[urn[i] for i in np.random.permutation(len(urn))[:n]] for j in range(size)]) r = [[np.sum(draw[j] == i) for i in range(len(m))] ...
198,556
Expected value of multivariate hypergeometric distribution. Parameters: - `n` : Number of draws. - `m` : Number of items in each categoy.
def multivariate_hypergeometric_expval(n, m): m = np.asarray(m, float) return n * (m / m.sum())
198,557
Make a grid of images, via numpy. Args: tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W) or a list of images all of the same size. nrow (int, optional): Number of images displayed in each row of the grid. The Final grid size is (B / nrow, nrow). Default...
def make_grid(tensor, nrow=8, padding=2, pad_value=0): if not (isinstance(tensor, np.ndarray) or (isinstance(tensor, list) and all(isinstance(t, np.ndarray) for t in tensor))): raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor))) # if list of tensors, conv...
198,779
Save a given Tensor into an image file. Args: tensor (Tensor or list): Image to be saved. If given a mini-batch tensor, saves the tensor as a grid of images by calling ``make_grid``. **kwargs: Other arguments are documented in ``make_grid``.
def save_image(tensor, filename, nrow=8, padding=2, pad_value=0): from PIL import Image grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value) im = Image.fromarray(pre_pillow_float_img_process(grid)) im.save(filename)
198,781
The render primitive (mode) must be the same as the input primitive of the GeometryShader. Args: mode (int): By default :py:data:`TRIANGLES` will be used. vertices (int): The number of vertices to transform. Keyword Args: first (int):...
def render(self, mode=None, vertices=-1, *, first=0, instances=1) -> None: if mode is None: mode = TRIANGLES self.mglo.render(mode, vertices, first, instances)
199,366
Copy buffer content. Args: dst (Buffer): The destination buffer. src (Buffer): The source buffer. size (int): The number of bytes to copy. Keyword Args: read_offset (int): The read offset. write_offset (int): The w...
def copy_buffer(self, dst, src, size=-1, *, read_offset=0, write_offset=0) -> None: self.mglo.copy_buffer(dst.mglo, src.mglo, size, read_offset, write_offset)
199,390
Copy framebuffer content. Use this method to: - blit framebuffers. - copy framebuffer content into a texture. - downsample framebuffers. (it will allow to read the framebuffer's content) - downsample a framebuffer directly to a texture. ...
def copy_framebuffer(self, dst, src) -> None: self.mglo.copy_framebuffer(dst.mglo, src.mglo)
199,391
Detect framebuffer. Args: glo (int): Framebuffer object. Returns: :py:class:`Framebuffer` object
def detect_framebuffer(self, glo=None) -> 'Framebuffer': res = Framebuffer.__new__(Framebuffer) res.mglo, res._size, res._samples, res._glo = self.mglo.detect_framebuffer(glo) res._color_attachments = None res._depth_attachment = None res.ctx = self res.extra = ...
199,392
Create a :py:class:`Buffer` object. Args: data (bytes): Content of the new buffer. Keyword Args: reserve (int): The number of bytes to reserve. dynamic (bool): Treat buffer as dynamic. Returns: :py:class:`Buffer` obje...
def buffer(self, data=None, *, reserve=0, dynamic=False) -> Buffer: if type(reserve) is str: reserve = mgl.strsize(reserve) res = Buffer.__new__(Buffer) res.mglo, res._size, res._glo = self.mglo.buffer(data, reserve, dynamic) res._dynamic = dynamic res.ctx ...
199,393
Create a :py:class:`Texture3D` object. Args: size (tuple): The width, height and depth of the texture. components (int): The number of components 1, 2, 3 or 4. data (bytes): Content of the texture. Keyword Args: alignment (int): T...
def texture3d(self, size, components, data=None, *, alignment=1, dtype='f1') -> 'Texture3D': res = Texture3D.__new__(Texture3D) res.mglo, res._glo = self.mglo.texture3d(size, components, data, alignment, dtype) res.ctx = self res.extra = None return res
199,396
Create a :py:class:`TextureCube` object. Args: size (tuple): The width, height of the texture. Each side of the cube will have this size. components (int): The number of components 1, 2, 3 or 4. data (bytes): Content of the texture. Keyword Args:...
def texture_cube(self, size, components, data=None, *, alignment=1, dtype='f1') -> 'TextureCube': res = TextureCube.__new__(TextureCube) res.mglo, res._glo = self.mglo.texture_cube(size, components, data, alignment, dtype) res._size = size res._components = components r...
199,397
Create a :py:class:`Texture` object. Args: size (tuple): The width and height of the texture. data (bytes): Content of the texture. Keyword Args: samples (int): The number of samples. Value 0 means no multisample format. alignment...
def depth_texture(self, size, data=None, *, samples=0, alignment=4) -> 'Texture': res = Texture.__new__(Texture) res.mglo, res._glo = self.mglo.depth_texture(size, data, samples, alignment) res._size = size res._components = 1 res._samples = samples res._dtype =...
199,398
Create a :py:class:`VertexArray` object. Args: program (Program): The program used when rendering. buffer (Buffer): The buffer. attributes (list): A list of attribute names. Keyword Args: index_element_size (int): byte size of eac...
def simple_vertex_array(self, program, buffer, *attributes, index_buffer=None, index_element_size=4) -> 'VertexArray': if type(buffer) is list: raise SyntaxError('Change simple_vertex_array to vertex_array') content = [(buffer, detect_format(program, at...
199,400
Create a :py:class:`Program` object. Only linked programs will be returned. A single shader in the `shaders` parameter is also accepted. The varyings are only used when a transform program is created. Args: shaders (list): A list of :py:class:`Shader` o...
def program(self, *, vertex_shader, fragment_shader=None, geometry_shader=None, tess_control_shader=None, tess_evaluation_shader=None, varyings=()) -> 'Program': if type(varyings) is str: varyings = (varyings,) varyings = tuple(varyings) res = Program.__ne...
199,401
Create a :py:class:`Scope` object. Args: framebuffer (Framebuffer): The framebuffer to use when entering. enable_only (int): The enable_only flags to set when entering. Keyword Args: textures (list): List of (texture, binding) tuples. ...
def scope(self, framebuffer, enable_only=None, *, textures=(), uniform_buffers=(), storage_buffers=()) -> 'Scope': textures = tuple((tex.mglo, idx) for tex, idx in textures) uniform_buffers = tuple((buf.mglo, idx) for buf, idx in uniform_buffers) storage_buffers = tuple((buf.mglo, idx)...
199,403
A :py:class:`Framebuffer` is a collection of buffers that can be used as the destination for rendering. The buffers for Framebuffer objects reference images from either Textures or Renderbuffers. Args: color_attachments (list): A list of :py:class:`Texture` or :py:class:`Renderb...
def framebuffer(self, color_attachments=(), depth_attachment=None) -> 'Framebuffer': if type(color_attachments) is Texture or type(color_attachments) is Renderbuffer: color_attachments = (color_attachments,) ca_mglo = tuple(x.mglo for x in color_attachments) da_mglo = None...
199,405
:py:class:`Renderbuffer` objects are OpenGL objects that contain images. They are created and used specifically with :py:class:`Framebuffer` objects. Args: size (tuple): The width and height of the renderbuffer. Keyword Args: samples (int): The numbe...
def depth_renderbuffer(self, size, *, samples=0) -> 'Renderbuffer': res = Renderbuffer.__new__(Renderbuffer) res.mglo, res._glo = self.mglo.depth_renderbuffer(size, samples) res._size = size res._components = 1 res._samples = samples res._dtype = 'f4' re...
199,407
A :py:class:`ComputeShader` is a Shader Stage that is used entirely for computing arbitrary information. While it can do rendering, it is generally used for tasks not directly related to drawing. Args: source (str): The source of the compute shader. Returns: ...
def compute_shader(self, source) -> 'ComputeShader': res = ComputeShader.__new__(ComputeShader) res.mglo, ls1, ls2, ls3, ls4, res._glo = self.mglo.compute_shader(source) members = {} for item in ls1: obj = Uniform.__new__(Uniform) obj.mglo, obj._locati...
199,408
Key event callback for glfw. Translates and forwards keyboard event to :py:func:`keyboard_event` Args: window: Window event origin key: The key that was pressed or released. scancode: The system-specific scancode of the key. action: GLFW_PRESS, GLF...
def key_event_callback(self, window, key, scancode, action, mods): if key == self.keys.ESCAPE: self.close() self.example.key_event(key, action)
199,421
Mouse event callback from glfw. Translates the events forwarding them to :py:func:`cursor_event`. Args: window: The window xpos: viewport x pos ypos: viewport y pos
def mouse_event_callback(self, window, xpos, ypos): # screen coordinates relative to the top-left corner self.example.mouse_position_event(xpos, ypos)
199,422
Split data to count equal parts. Write the chunks using offsets calculated from start, step and stop. Args: data (bytes): The data. start (int): First offset. step (int): Offset increment. count (int): The number of offsets.
def write_chunks(self, data, start, step, count) -> None: self.mglo.write_chunks(data, start, step, count)
199,427
Read the content. Args: size (int): The size. Value ``-1`` means all. Keyword Args: offset (int): The offset. Returns: bytes
def read(self, size=-1, *, offset=0) -> bytes: return self.mglo.read(size, offset)
199,428
Read the content into a buffer. Args: buffer (bytarray): The buffer that will receive the content. size (int): The size. Value ``-1`` means all. Keyword Args: offset (int): The read offset. write_offset (int): The write offset.
def read_into(self, buffer, size=-1, *, offset=0, write_offset=0) -> None: return self.mglo.read_into(buffer, size, offset, write_offset)
199,429
Read the content. Read and concatenate the chunks of size chunk_size using offsets calculated from start, step and stop. Args: chunk_size (int): The chunk size. start (int): First offset. step (int): Offset increment. ...
def read_chunks(self, chunk_size, start, step, count) -> bytes: return self.mglo.read_chunks(chunk_size, start, step, count)
199,430
Clear the content. Args: size (int): The size. Value ``-1`` means all. Keyword Args: offset (int): The offset. chunk (bytes): The chunk to use repeatedly.
def clear(self, size=-1, *, offset=0, chunk=None) -> None: self.mglo.clear(size, offset, chunk)
199,432
Bind the buffer to a uniform block. Args: binding (int): The uniform block binding. Keyword Args: offset (int): The offset. size (int): The size. Value ``-1`` means all.
def bind_to_uniform_block(self, binding=0, *, offset=0, size=-1) -> None: self.mglo.bind_to_uniform_block(binding, offset, size)
199,433
Bind the buffer to a shader storage buffer. Args: binding (int): The shader storage binding. Keyword Args: offset (int): The offset. size (int): The size. Value ``-1`` means all.
def bind_to_storage_buffer(self, binding=0, *, offset=0, size=-1) -> None: self.mglo.bind_to_storage_buffer(binding, offset, size)
199,434
Read the content of the framebuffer. Args: viewport (tuple): The viewport. components (int): The number of components to read. Keyword Args: attachment (int): The color attachment. alignment (int): The byte alignment of the pixels...
def read(self, viewport=None, components=3, *, attachment=0, alignment=1, dtype='f1') -> bytes: return self.mglo.read(viewport, components, attachment, alignment, dtype)
199,452
Renders the assigned example Args: time (float): Current time in seconds frame_time (float): Delta time from last frame in seconds
def render(self, time: float, frame_time: float): self.example.render(time, frame_time)
199,461
Run an example entering a blocking main loop Args: example_cls: The exmaple class to render args: Override sys.args
def run_example(example_cls: Example, args=None): values = parse_args(args) window_cls = get_window_cls(values.window) window = window_cls( title=example_cls.title, size=example_cls.window_size, fullscreen=values.fullscreen, resizable=example_cls.resizable, ...
199,527
Import a dotted module path and return the attribute/class designated by the last name in the path. Raise ImportError if the import failed. Args: dotted_path: The path to attempt importing Returns: Imported class/attribute
def import_string(dotted_path): try: module_path, class_name = dotted_path.rsplit('.', 1) except ValueError as err: raise ImportError("%s doesn't look like a module path" % dotted_path) from err module = import_module(module_path) try: return getattr(module, clas...
199,530
Run the compute shader. Args: group_x (int): The number of work groups to be launched in the X dimension. group_y (int): The number of work groups to be launched in the Y dimension. group_z (int): The number of work groups to be launched in the Z dimension.
def run(self, group_x=1, group_y=1, group_z=1) -> None: return self.mglo.run(group_x, group_y, group_z)
199,553
Returns a Uniform, UniformBlock, Subroutine, Attribute or Varying. Args: default: This is the value to be returned in case key does not exist. Returns: :py:class:`Uniform`, :py:class:`UniformBlock`, :py:class:`Subroutine`, :py:class:`Attribute` o...
def get(self, key, default) -> Union[Uniform, UniformBlock, Subroutine, Attribute, Varying]: return self._members.get(key, default)
199,554
Read a face from the cubemap texture. Args: face (int): The face to read. Keyword Args: alignment (int): The byte alignment of the pixels.
def read(self, face, *, alignment=1) -> bytes: return self.mglo.read(face, alignment)
199,625
Read a face from the cubemap texture. Args: buffer (bytearray): The buffer that will receive the pixels. face (int): The face to read. Keyword Args: alignment (int): The byte alignment of the pixels. write_offset (int): The write ...
def read_into(self, buffer, face, *, alignment=1, write_offset=0) -> None: if type(buffer) is Buffer: buffer = buffer.mglo return self.mglo.read_into(buffer, face, alignment, write_offset)
199,626
Update the content of the texture. Args: face (int): The face to update. data (bytes): The pixel data. viewport (tuple): The viewport. Keyword Args: alignment (int): The byte alignment of the pixels.
def write(self, face, data, viewport=None, *, alignment=1) -> None: if type(data) is Buffer: data = data.mglo self.mglo.write(face, data, viewport, alignment)
199,627
Detect format for vertex attributes. The format returned does not contain padding. Args: program (Program): The program. attributes (list): A list of attribute names. Returns: str
def detect_format(program, attributes) -> str: def fmt(attr): return attr.array_length * attr.dimension, attr.shape return ' '.join('%d%s' % fmt(program[a]) for a in attributes)
199,635
convenience tool to detect if something is iterable. in python3, strings count as iterables to we have the option to exclude them Parameters: ----------- obj : object to analyse reject_string : bool, whether to ignore strings Returns: -------- bool, if the object is itereable.
def isiterable(obj, reject_string=True): iterable = hasattr(obj, '__len__') if reject_string: iterable = iterable and not isinstance(obj, str) return iterable
199,699
Call visitor on root and all dependencies reachable from it in breadth first order. Args: root (component): component function or class visitor (function): signature is `func(component, parent)`. The call on root is `visitor(root, None)`.
def walk_dependencies(root, visitor): def visit(parent, visitor): for d in get_dependencies(parent): visitor(d, parent) visit(d, visitor) visitor(root, None) visit(root, visitor)
201,552
Checks if the specified user or user and group own the file. Args: owner (str): the user (or group) name for which we ask about ownership also_check_group (bool): if set to True, both user owner and group owner checked if set to False, only user owner che...
def owned_by(self, owner, also_check_group=False): if also_check_group: return self.owner == owner and self.group == owner else: return self.owner == owner
201,681
Add a filter or list of filters to a datasource. A filter is a simple string, and it matches if it is contained anywhere within a line. Args: ds (@datasource component): The datasource to filter patterns (str, [str]): A string, list of strings, or set of strings to add to the datasour...
def add_filter(ds, patterns): if not plugins.is_datasource(ds): raise Exception("Filters are applicable only to datasources.") delegate = dr.get_delegate(ds) if delegate.raw: raise Exception("Filters aren't applicable to raw datasources.") if not delegate.filterable: rais...
201,693
Returns a function that hydrates components as they are evaluated. The function should be registered as an observer on a Broker just before execution. Args: to_persist (set): Set of components to persist. Skip everything else.
def make_persister(self, to_persist): if not self.meta_data: raise Exception("Root not set. Can't create persister.") def persister(c, broker): if c in to_persist: self.dehydrate(c, broker) return persister
201,719
Helper method for parsing package string. Args: package_string (str): dash separated package string such as 'bash-4.2.39-3.el7' Returns: dict: dictionary containing 'name', 'version', 'release' and 'arch' keys
def _parse_package(cls, package_string): pkg, arch = rsplit(package_string, cls._arch_sep(package_string)) if arch not in KNOWN_ARCHITECTURES: pkg, arch = (package_string, None) pkg, release = rsplit(pkg, '-') name, version = rsplit(pkg, '-') epoch, version =...
201,766
Helper method for parsing package line with or without SOS report information. Args: line (str): package line with or without SOS report information Returns: dict: dictionary containing 'name', 'version', 'release' and 'arch' keys plus additionally 'installtim...
def _parse_line(cls, line): try: pkg, rest = line.split(None, 1) except ValueError: rpm = cls._parse_package(line.strip()) return rpm rpm = cls._parse_package(pkg) rest = rest.split('\t') for i, value in enumerate(rest): rp...
201,767
Adds an array of systems to specified group Args: group_name: Display name of group systems: Array of {'machine_id': machine_id}
def group_systems(self, group_name, systems): api_group_id = None headers = {'Content-Type': 'application/json'} group_path = self.api_url + '/v1/groups' group_get_path = group_path + ('?display_name=%s' % quote(group_name)) logger.debug("GET group: %s", group_get_path)...
201,815
Utility function to merge the source dictionary `src` to the target dictionary recursively Note: The type of the values in the dictionary can only be `dict` or `list` Parameters: tgt (dict): The target dictionary src (dict): The source dictionary
def dict_deep_merge(tgt, src): for k, v in src.items(): if k in tgt: if isinstance(tgt[k], dict) and isinstance(v, dict): dict_deep_merge(tgt[k], v) else: tgt[k].extend(deepcopy(v)) else: tgt[k] = deepcopy(v)
201,836
Rule reports a response if there is more than 1 host entry defined in the /etc/hosts file. Arguments: hp (HostParser): Parser object for the custom parser in this module. rhr (RedhatRelease): Parser object for the /etc/redhat-release file.
def report(hp, rhr): if len(hp.hosts) > 1: return make_fail("TOO_MANY_HOSTS", num=len(hp.hosts)) return make_pass("TOO_MANY_HOSTS", num=len(hp.hosts))
201,878
Parse part of an ls output line that is selinux. Args: parts (list): A four element list of strings representing the initial parts of an ls line after the permission bits. The parts are owner group, selinux info, and the path. Returns: A dict containing owner, group, se...
def parse_selinux(parts): owner, group = parts[:2] selinux = parts[2].split(":") lsel = len(selinux) path, link = parse_path(parts[-1]) result = { "owner": owner, "group": group, "se_user": selinux[0], "se_role": selinux[1] if lsel > 1 else None, "se_typ...
201,940
Parses a list of lines from ls into dictionaries representing their components. Args: lines (list): A list of lines generated by ls. root (str): The directory name to be used for ls output stanzas that don't have a name. Returns: A dictionary representing the ls output....
def parse(lines, root=None): doc = {} entries = [] name = None total = None for line in lines: line = line.strip() if not line: continue if line and line[0] == "/" and line[-1] == ":": if name is None: name = line[:-1] ...
201,941
Main parsing class method which stores all interesting data from the content. Args: content (context.content): Parser context content
def parse_content(self, content): # note, the Parser class sets: # * self.file_path = context.path and # * self.file_name = os.path.basename(context.path) self.active_lines_unparsed = get_active_lines(content) if content is not None else [] # (man page shows all options...
202,177
Collects fact for each host Collects the cpu and node configuration facts to be used by the rule. Arguments: cpu (CpuInfo): Parser object for the cpu info. cfg (NodeConfig): Parser object for the node configuration. Returns: dict: Dictionary of fact information including the keys ...
def cluster_info(cpu, cfg): cpus = cpu.cpu_count pods_per_core = cfg.doc.find("pods-per-core") pods_per_core_int = int(pods_per_core.value) if pods_per_core else PODS_PER_CORE cfg_max_pods = cfg.doc.find("max-pods") cfg_max_pods_int = int(cfg_max_pods.value) if cfg_max_pods else MAX_PODS ca...
202,178
Get the list of rules for a particular chain. Chain order is kept intact. Args: name (str): chain name, e.g. `` table (str): table name, defaults to ``filter`` Returns: list: rules
def get_chain(self, name, table="filter"): return [r for r in self.rules if r["table"] == table and r["chain"] == name]
202,228
Get a dict where the keys are all the chains for the given table and each value is the set of rules defined for the given chain. Args: table (str): table name, defaults to ``filter`` Returns: dict: chains with set of defined rules
def table_chains(self, table="filter"): return dict((c["name"], self.get_chain(c["name"], table)) for c in self.get_table(table))
202,229
Method for quick testing of a parser against a test string. Good for simple inline microtests of sub expressions while building up larger parser, as in: expr = Word(nums) assert expr.matches("100") Parameters: - testString - string
def matches(self, s, parseAll=True): try: self.parseString(_ustr(s), parseAll=parseAll) return True except ParseBaseException: return False
202,258
Turn the prefix length netmask into a int for comparison. Args: prefixlen: An integer, the prefix length. Returns: An integer.
def _ip_int_from_prefix(self, prefixlen=None): if prefixlen is None: prefixlen = self._prefixlen return self._ALL_ONES ^ (self._ALL_ONES >> prefixlen)
202,333
Turn a prefix length into a dotted decimal string. Args: prefixlen: An integer, the netmask prefix length. Returns: A string, the dotted decimal netmask string.
def _ip_string_from_prefix(self, prefixlen=None): if not prefixlen: prefixlen = self._prefixlen return self._string_from_ip_int(self._ip_int_from_prefix(prefixlen))
202,334
Turn the given IP string into an integer for comparison. Args: ip_str: A string, the IP ip_str. Returns: The IP ip_str as an integer. Raises: AddressValueError: if ip_str isn't a valid IPv4 Address.
def _ip_int_from_string(self, ip_str): if not ip_str: raise AddressValueError('Address cannot be empty') octets = ip_str.split('.') if len(octets) != 4: raise AddressValueError("Expected 4 octets in %r" % ip_str) try: bvs = map(self._parse_o...
202,336
Verify that the netmask/prefixlen is valid. Args: prefixlen: A string, the netmask in prefix length format. Returns: A boolean, True if the prefix represents a valid IPv6 netmask.
def _is_valid_netmask(self, prefixlen): try: prefixlen = int(prefixlen) except ValueError: return False return 0 <= prefixlen <= self._max_prefixlen
202,340
Returns the updated caching headers. Args: response (HttpResponse): The response from the remote service Returns: response:(HttpResponse.Headers): Http caching headers
def update_headers(self, response): if 'expires' in response.headers and 'cache-control' in response.headers: self.msg = self.server_cache_headers return response.headers else: self.msg = self.default_cache_vars date = parsedate(response.headers['...
202,374
If `results` contains a single line and that line is included in the `bad_lines` list, this function returns `False`. If no bad line is found the function returns `True` Parameters: results(str): The results string of the output from the command defined by the comman...
def validate_lines(results, bad_lines): if results and len(results) == 1: first = results[0] if any(l in first.lower() for l in bad_lines): return False return True
202,403
Returns all lines that contain `s` anywhere and wrap them in a list of dictionaries. `s` can be either a single string or a string list. For list, all keywords in the list must be found in each line. Parameters: s(str or list): one or more strings to search for. Returns: ...
def get(self, s): ret = [] search_by_expression = self._valid_search(s) for l in self.lines: if search_by_expression(l): ret.append(self._parse_line(l)) return ret
202,417
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.SayHello = channel.unary_unary( '/helloworld.Greeter/SayHello', request_serializer=hello__world__pb2.HelloRequest. SerializeToString, response_deserializer=hello__world__pb2.HelloReply.FromString, )
202,882
Returns a short, term-friendly string representation of the object. Args: obj: An object for which to return a string representation. max_len: Maximum length of the returned string. Longer reprs will be turned into a brief descriptive string giving the type and length of obj.
def short_repr(obj, max_len=40): obj_repr = repr(obj) if len(obj_repr) <= max_len: return obj_repr return '<{} of length {}>'.format(type(obj).__name__, len(obj_repr))
205,077
Install apk to device. Doesn't support verifier file, instead allows destination directory to be overridden. Arguments: apk_path: Local path to apk to install. destination_dir: Optional destination directory. Use /system/app/ for persistent applications. timeout_ms: Expected time...
def install(self, apk_path, destination_dir=None, timeout_ms=None): if not destination_dir: destination_dir = '/data/local/tmp/' basename = os.path.basename(apk_path) destination_path = destination_dir + basename self.push(apk_path, destination_path, timeout_ms=timeout_ms) return self.She...
205,112
Push source_file to file on device. Arguments: source_file: Either a filename or file-like object to push to the device. If a filename, will set the remote mtime to match the local mtime, otherwise will use the current time. device_filename: The filename on the device to write to. ...
def push(self, source_file, device_filename, timeout_ms=None): mtime = 0 if isinstance(source_file, six.string_types): mtime = os.path.getmtime(source_file) source_file = open(source_file) self.filesync_service.send( source_file, device_filename, mtime=mtime, timeout=timeou...
205,113
Pull file from device. Arguments: device_filename: The filename on the device to pull. dest_file: If set, a filename or writable file-like object. timeout_ms: Expected timeout for the pull. Returns: The file data if dest_file is not set, None otherwise.
def pull(self, device_filename, dest_file=None, timeout_ms=None): should_return_data = dest_file is None if isinstance(dest_file, six.string_types): dest_file = open(dest_file, 'w') elif dest_file is None: dest_file = six.StringIO() self.filesync_service.recv(device_filename, dest_file,...
205,114
Connect to the device. Args: usb_handle: UsbHandle instance to use. **kwargs: See AdbConnection.connect for kwargs. Includes rsa_keys, and auth_timeout_ms. Returns: An instance of this class if the device connected successfully.
def connect(cls, usb_handle, **kwargs): adb_connection = adb_protocol.AdbConnection.connect(usb_handle, **kwargs) return cls(adb_connection)
205,118
Sends a query to the given multicast socket and returns responses. Args: query: The string query to send. address: Multicast IP address component of the socket to send to. port: Multicast UDP port component of the socket to send to. ttl: TTL for multicast messages. 1 to keep traffic in-network. t...
def send(query, address=DEFAULT_ADDRESS, port=DEFAULT_PORT, ttl=DEFAULT_TTL, local_only=False, timeout_s=2): # Set up the socket as a UDP Multicast socket with the given timeout. sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.setsockopt(socket.IPPROTO_...
205,119
Sends a command to the device. Args: command: The command to send. arg: Optional argument to the command.
def send_command(self, command, arg=None): if arg is not None: command = '%s:%s' % (command, arg) self._write(six.StringIO(command), len(command))
205,124
Accepts normal responses from the device. Args: timeout_ms: Timeout in milliseconds to wait for each response. info_cb: Optional callback for text sent from the bootloader. Returns: OKAY packet's message.
def handle_simple_responses( self, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK): return self._accept_responses('OKAY', info_cb, timeout_ms=timeout_ms)
205,125
Constructs a FastbootCommands instance. Arguments: usb: UsbHandle instance.
def __init__(self, usb): self._usb = usb self._protocol = self.protocol_handler(usb)
205,130
Flashes a partition from the file on disk. Args: partition: Partition name to flash to. source_file: Filename to download to the device. source_len: Optional length of source_file, uses os.stat if not provided. info_cb: See Download. progress_callback: See Download. timeout_ms: ...
def flash_from_file(self, partition, source_file, source_len=0, info_cb=DEFAULT_MESSAGE_CALLBACK, progress_callback=None, timeout_ms=None): if source_len == 0: # Fall back to stat. source_len = os.stat(source_file).st_size download_response = self.dow...
205,132
Flashes the last downloaded file to the given partition. Args: partition: Partition to flash. timeout_ms: Optional timeout in milliseconds to wait for it to finish. info_cb: See Download. Usually no messages. Returns: Response to a download request, normally nothing.
def flash(self, partition, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK): return self._simple_command('flash', arg=partition, info_cb=info_cb, timeout_ms=timeout_ms)
205,134
Returns the given variable's definition. Args: var: A variable the bootloader tracks, such as version. info_cb: See Download. Usually no messages. Returns: Value of var according to the current bootloader.
def get_var(self, var, info_cb=DEFAULT_MESSAGE_CALLBACK): return self._simple_command('getvar', arg=var, info_cb=info_cb)
205,136
Executes an OEM command on the device. Args: command: The command to execute, such as 'poweroff' or 'bootconfig read'. timeout_ms: Optional timeout in milliseconds to wait for a response. info_cb: See Download. Messages vary based on command. Returns: The final response from the device.
def oem(self, command, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK): return self._simple_command( 'oem %s' % command, timeout_ms=timeout_ms, info_cb=info_cb)
205,137
Reboots the device. Args: target_mode: Normal reboot when unspecified (or None). Can specify other target modes, such as 'recovery' or 'bootloader'. timeout_ms: Optional timeout in milliseconds to wait for a response. Returns: Usually the empty string. Depends on the bootloa...
def reboot(self, target_mode=None, timeout_ms=None): return self._simple_command('reboot', arg=target_mode, timeout_ms=timeout_ms)
205,138
A generator that parses a worksheet containing UNECE code definitions. Args: sheet: An xldr.sheet object representing a UNECE code worksheet. column_names: A list/tuple with the expected column names corresponding to the unit name, code and suffix in that order. Yields: Lines of Python so...
def unit_defs_from_sheet(sheet, column_names): seen = set() try: col_indices = {} rows = sheet.get_rows() # Find the indices for the columns we care about. for idx, cell in enumerate(six.next(rows)): if cell.value in column_names: col_indices[cell.value] = idx # loop over ...
205,151
Send the given message over this transport. Args: message: The AdbMessage to send. timeout: Use this timeout for the entire write operation, it should be an instance of timeouts.PolledTimeout.
def write_message(self, message, timeout): with self._writer_lock: self._transport.write(message.header, timeout.remaining_ms) # Use any remaining time to send the data. Note that if we get this far, # we always at least try to send the data (with a minimum of 10ms timeout) # because ...
205,157
List directory contents on the device. Args: path: List the contents of this directory. timeout: Timeout to use for this operation. Returns: Generator yielding DeviceFileStat tuples representing the contents of the requested path.
def list(self, path, timeout=None): transport = DentFilesyncTransport(self.stream) transport.write_data('LIST', path, timeout) return (DeviceFileStat(dent_msg.name, dent_msg.mode, dent_msg.size, dent_msg.time) for dent_msg in transport.read_until_done('DENT', time...
205,169
Push a file-like object to the device. Args: src_file: File-like object for reading from filename: Filename to push to on the device st_mode: stat mode for filename on the device mtime: modification time to set for the file on the device timeout: Timeout to use for the send operation....
def send(self, src_file, filename, st_mode=DEFAULT_PUSH_MODE, mtime=None, timeout=None): transport = DataFilesyncTransport(self.stream) transport.write_data('SEND', '%s,%s' % (filename, st_mode), timeout) try: while True: data = src_file.read(MAX_PUSH_DATA_BYTES) if no...
205,172
Write an arbitrary message (of one of the types above). For the host side implementation, this will only ever be a DataMessage, but it's implemented generically enough here that you could use FilesyncTransport to implement the device side if you wanted. Args: msg: The message to send, must be o...
def write_message(self, msg, timeout=None): replace_dict = {'command': self.CMD_TO_WIRE[msg.command]} if msg.has_data: # Swap out data for the data length for the wire. data = msg[-1] replace_dict[msg._fields[-1]] = len(data) self.stream.write(struct.pack(msg.struct_format, ...
205,176
Makes the names of phase measurement and attachments unique. This function will make the names of measurements and attachments unique. It modifies the input all_phases. Args: all_phases: the phases to make unique Returns: the phases now modified.
def phase_uniquizer(all_phases): measurement_name_maker = UniqueNameMaker( itertools.chain.from_iterable( phase.measurements.keys() for phase in all_phases if phase.measurements)) attachment_names = list(itertools.chain.from_iterable( phase.attachments.keys() for phase in all_phas...
205,186
Convert an OpenHTF test record attachment to a multi-dim measurement. This is a best effort attempt to reverse, as some data is lost in converting from a multidim to an attachment. Args: attachment: an `openhtf.test_record.Attachment` from a multi-dim. name: an optional name for the measurement. If not...
def attachment_to_multidim_measurement(attachment, name=None): data = json.loads(attachment.data) name = name or data.get('name') # attachment_dimn are a list of dicts with keys 'uom_suffix' and 'uom_code' attachment_dims = data.get('dimensions', []) # attachment_value is a list of lists [[t1, x1, y1, f1]...
205,190