_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q273600
generate_sigproc_header
test
def generate_sigproc_header(f): """ Generate a serialzed sigproc header which can be written to disk. Args: f (Filterbank object): Filterbank object for which to generate header Returns: header_str (str): Serialized string corresponding to header """ header_string = b'' header_string += to_sigproc_keyword(b'HEADER_START') for keyword in f.header.keys(): if keyword == b'src_raj': header_string += to_sigproc_keyword(b'src_raj') + to_sigproc_angle(f.header[b'src_raj']) elif keyword == b'src_dej': header_string += to_sigproc_keyword(b'src_dej') + to_sigproc_angle(f.header[b'src_dej']) elif keyword == b'az_start' or keyword == b'za_start': header_string += to_sigproc_keyword(keyword) + np.float64(f.header[keyword]).tostring() elif keyword not in header_keyword_types.keys(): pass else: header_string += to_sigproc_keyword(keyword, f.header[keyword]) header_string += to_sigproc_keyword(b'HEADER_END') return header_string
python
{ "resource": "" }
q273601
to_sigproc_angle
test
def to_sigproc_angle(angle_val): """ Convert an astropy.Angle to the ridiculous sigproc angle format string. """ x = str(angle_val) if '.' in x: if 'h' in x: d, m, s, ss = int(x[0:x.index('h')]), int(x[x.index('h')+1:x.index('m')]), \ int(x[x.index('m')+1:x.index('.')]), float(x[x.index('.'):x.index('s')]) if 'd' in x: d, m, s, ss = int(x[0:x.index('d')]), int(x[x.index('d')+1:x.index('m')]), \ int(x[x.index('m')+1:x.index('.')]), float(x[x.index('.'):x.index('s')]) else: if 'h' in x: d, m, s = int(x[0:x.index('h')]), int(x[x.index('h')+1:x.index('m')]), \ int(x[x.index('m')+1:x.index('s')]) if 'd' in x: d, m, s = int(x[0:x.index('d')]), int(x[x.index('d')+1:x.index('m')]), \ int(x[x.index('m')+1:x.index('s')]) ss = 0 num = str(d).zfill(2) + str(m).zfill(2) + str(s).zfill(2)+ '.' + str(ss).split(".")[-1] return np.float64(num).tostring()
python
{ "resource": "" }
q273602
calc_n_ints_in_file
test
def calc_n_ints_in_file(filename): """ Calculate number of integrations in a given file """ # Load binary data h = read_header(filename) n_bytes = int(h[b'nbits'] / 8) n_chans = h[b'nchans'] n_ifs = h[b'nifs'] idx_data = len_header(filename) f = open(filename, 'rb') f.seek(idx_data) filesize = os.path.getsize(filename) n_bytes_data = filesize - idx_data if h[b'nbits'] == 2: n_ints = int(4 * n_bytes_data / (n_chans * n_ifs)) else: n_ints = int(n_bytes_data / (n_bytes * n_chans * n_ifs)) return n_ints
python
{ "resource": "" }
q273603
Traceback.to_dict
test
def to_dict(self): """Convert a Traceback into a dictionary representation""" if self.tb_next is None: tb_next = None else: tb_next = self.tb_next.to_dict() code = { 'co_filename': self.tb_frame.f_code.co_filename, 'co_name': self.tb_frame.f_code.co_name, } frame = { 'f_globals': self.tb_frame.f_globals, 'f_code': code, } return { 'tb_frame': frame, 'tb_lineno': self.tb_lineno, 'tb_next': tb_next, }
python
{ "resource": "" }
q273604
make_rr_subparser
test
def make_rr_subparser(subparsers, rec_type, args_and_types): """ Make a subparser for a given type of DNS record """ sp = subparsers.add_parser(rec_type) sp.add_argument("name", type=str) sp.add_argument("ttl", type=int, nargs='?') sp.add_argument(rec_type, type=str) for my_spec in args_and_types: (argname, argtype) = my_spec[:2] if len(my_spec) > 2: nargs = my_spec[2] sp.add_argument(argname, type=argtype, nargs=nargs) else: sp.add_argument(argname, type=argtype) return sp
python
{ "resource": "" }
q273605
make_parser
test
def make_parser(): """ Make an ArgumentParser that accepts DNS RRs """ line_parser = ZonefileLineParser() subparsers = line_parser.add_subparsers() # parse $ORIGIN sp = subparsers.add_parser("$ORIGIN") sp.add_argument("$ORIGIN", type=str) # parse $TTL sp = subparsers.add_parser("$TTL") sp.add_argument("$TTL", type=int) # parse each RR args_and_types = [ ("mname", str), ("rname", str), ("serial", int), ("refresh", int), ("retry", int), ("expire", int), ("minimum", int) ] make_rr_subparser(subparsers, "SOA", args_and_types) make_rr_subparser(subparsers, "NS", [("host", str)]) make_rr_subparser(subparsers, "A", [("ip", str)]) make_rr_subparser(subparsers, "AAAA", [("ip", str)]) make_rr_subparser(subparsers, "CNAME", [("alias", str)]) make_rr_subparser(subparsers, "ALIAS", [("host", str)]) make_rr_subparser(subparsers, "MX", [("preference", str), ("host", str)]) make_txt_subparser(subparsers) make_rr_subparser(subparsers, "PTR", [("host", str)]) make_rr_subparser(subparsers, "SRV", [("priority", int), ("weight", int), ("port", int), ("target", str)]) make_rr_subparser(subparsers, "SPF", [("data", str)]) make_rr_subparser(subparsers, "URI", [("priority", int), ("weight", int), ("target", str)]) return line_parser
python
{ "resource": "" }
q273606
remove_comments
test
def remove_comments(text): """ Remove comments from a zonefile """ ret = [] lines = text.split("\n") for line in lines: if len(line) == 0: continue line = serialize(tokenize_line(line)) ret.append(line) return "\n".join(ret)
python
{ "resource": "" }
q273607
add_default_name
test
def add_default_name(text): """ Go through each line of the text and ensure that a name is defined. Use '@' if there is none. """ global SUPPORTED_RECORDS lines = text.split("\n") ret = [] for line in lines: tokens = tokenize_line(line) if len(tokens) == 0: continue if tokens[0] in SUPPORTED_RECORDS and not tokens[0].startswith("$"): # add back the name tokens = ['@'] + tokens ret.append(serialize(tokens)) return "\n".join(ret)
python
{ "resource": "" }
q273608
parse_line
test
def parse_line(parser, record_token, parsed_records): """ Given the parser, capitalized list of a line's tokens, and the current set of records parsed so far, parse it into a dictionary. Return the new set of parsed records. Raise an exception on error. """ global SUPPORTED_RECORDS line = " ".join(record_token) # match parser to record type if len(record_token) >= 2 and record_token[1] in SUPPORTED_RECORDS: # with no ttl record_token = [record_token[1]] + record_token elif len(record_token) >= 3 and record_token[2] in SUPPORTED_RECORDS: # with ttl record_token = [record_token[2]] + record_token if record_token[0] == "TXT": record_token = record_token[:2] + ["--ttl"] + record_token[2:] try: rr, unmatched = parser.parse_known_args(record_token) assert len(unmatched) == 0, "Unmatched fields: %s" % unmatched except (SystemExit, AssertionError, InvalidLineException): # invalid argument raise InvalidLineException(line) record_dict = rr.__dict__ if record_token[0] == "TXT" and len(record_dict['txt']) == 1: record_dict['txt'] = record_dict['txt'][0] # what kind of record? including origin and ttl record_type = None for key in record_dict.keys(): if key in SUPPORTED_RECORDS and (key.startswith("$") or record_dict[key] == key): record_type = key if record_dict[key] == key: del record_dict[key] break assert record_type is not None, "Unknown record type in %s" % rr # clean fields for field in record_dict.keys(): if record_dict[field] is None: del record_dict[field] current_origin = record_dict.get('$ORIGIN', parsed_records.get('$ORIGIN', None)) # special record-specific fix-ups if record_type == 'PTR': record_dict['fullname'] = record_dict['name'] + '.' + current_origin if len(record_dict) > 0: if record_type.startswith("$"): # put the value directly record_dict_key = record_type.lower() parsed_records[record_dict_key] = record_dict[record_type] else: record_dict_key = record_type.lower() parsed_records[record_dict_key].append(record_dict) return parsed_records
python
{ "resource": "" }
q273609
parse_lines
test
def parse_lines(text, ignore_invalid=False): """ Parse a zonefile into a dict. @text must be flattened--each record must be on one line. Also, all comments must be removed. """ json_zone_file = defaultdict(list) record_lines = text.split("\n") parser = make_parser() for record_line in record_lines: record_token = tokenize_line(record_line) try: json_zone_file = parse_line(parser, record_token, json_zone_file) except InvalidLineException: if ignore_invalid: continue else: raise return json_zone_file
python
{ "resource": "" }
q273610
parse_zone_file
test
def parse_zone_file(text, ignore_invalid=False): """ Parse a zonefile into a dict """ text = remove_comments(text) text = flatten(text) text = remove_class(text) text = add_default_name(text) json_zone_file = parse_lines(text, ignore_invalid=ignore_invalid) return json_zone_file
python
{ "resource": "" }
q273611
quote_field
test
def quote_field(data, field): """ Quote a field in a list of DNS records. Return the new data records. """ if data is None: return None data_dup = copy.deepcopy(data) for i in xrange(0, len(data_dup)): data_dup[i][field] = '"%s"' % data_dup[i][field] data_dup[i][field] = data_dup[i][field].replace(";", "\;") return data_dup
python
{ "resource": "" }
q273612
parse_schema_string
test
def parse_schema_string(schema_string): """ Load and return a PySchema class from an avsc string """ if isinstance(schema_string, str): schema_string = schema_string.decode("utf8") schema_struct = json.loads(schema_string) return AvroSchemaParser().parse_schema_struct(schema_struct)
python
{ "resource": "" }
q273613
to_python_package
test
def to_python_package(classes, target_folder, parent_package=None, indent=DEFAULT_INDENT): ''' This function can be used to build a python package representation of pyschema classes. One module is created per namespace in a package matching the namespace hierarchy. Args: classes: A collection of classes to build the package from target_folder: Root folder of the package parent_package: Prepended on all import statements in order to support absolute imports. parent_package is not used when building the package file structure indent: Indent level. Defaults to 4 spaces ''' PackageBuilder(target_folder, parent_package, indent).from_classes_with_refs(classes)
python
{ "resource": "" }
q273614
_class_source
test
def _class_source(schema, indent): """Generate Python source code for one specific class Doesn't include or take into account any dependencies between record types """ def_pattern = ( "class {class_name}(pyschema.Record):\n" "{indent}# WARNING: This class was generated by pyschema.to_python_source\n" "{indent}# there is a risk that any modification made to this class will be overwritten\n" "{optional_namespace_def}" "{field_defs}\n" ) if hasattr(schema, '_namespace'): optional_namespace_def = "{indent}_namespace = {namespace!r}\n".format( namespace=schema._namespace, indent=indent) else: optional_namespace_def = "" field_defs = [ "{indent}{field_name} = {field!r}".format(field_name=field_name, field=field, indent=indent) for field_name, field in schema._fields.iteritems() ] if not field_defs: field_defs = ["{indent}pass".format(indent=indent)] return def_pattern.format( class_name=schema._schema_name, optional_namespace_def=optional_namespace_def, field_defs="\n".join(field_defs), indent=indent )
python
{ "resource": "" }
q273615
no_auto_store
test
def no_auto_store(): """ Temporarily disable automatic registration of records in the auto_store Decorator factory. This is _NOT_ thread safe >>> @no_auto_store() ... class BarRecord(Record): ... pass >>> BarRecord in auto_store False """ original_auto_register_value = PySchema.auto_register disable_auto_register() def decorator(cls): PySchema.auto_register = original_auto_register_value return cls return decorator
python
{ "resource": "" }
q273616
to_json_compatible
test
def to_json_compatible(record): "Dump record in json-encodable object format" d = {} for fname, f in record._fields.iteritems(): val = getattr(record, fname) if val is not None: d[fname] = f.dump(val) return d
python
{ "resource": "" }
q273617
load_json_dct
test
def load_json_dct( dct, record_store=None, schema=None, loader=from_json_compatible ): """ Create a Record instance from a json-compatible dictionary The dictionary values should have types that are json compatible, as if just loaded from a json serialized record string. :param dct: Python dictionary with key/value pairs for the record :param record_store: Record store to use for schema lookups (when $schema field is present) :param schema: PySchema Record class for the record to load. This will override any $schema fields specified in `dct` """ if schema is None: if record_store is None: record_store = auto_store try: schema_name = dct.pop(SCHEMA_FIELD_NAME) except KeyError: raise ParseError(( "Serialized record missing '{0}' " "record identifier and no schema supplied") .format(SCHEMA_FIELD_NAME) ) try: schema = record_store.get(schema_name) except KeyError: raise ParseError( "Can't recognize record type %r" % (schema_name,), schema_name) # if schema is explicit, use that instead of SCHEMA_FIELD_NAME elif SCHEMA_FIELD_NAME in dct: dct.pop(SCHEMA_FIELD_NAME) record = loader(schema, dct) return record
python
{ "resource": "" }
q273618
loads
test
def loads( s, record_store=None, schema=None, loader=from_json_compatible, record_class=None # deprecated in favor of schema ): """ Create a Record instance from a json serialized dictionary :param s: String with a json-serialized dictionary :param record_store: Record store to use for schema lookups (when $schema field is present) :param loader: Function called to fetch attributes from json. Typically shouldn't be used by end users :param schema: PySchema Record class for the record to load. This will override any $schema fields specified in `s` :param record_class: DEPRECATED option, old name for the `schema` parameter """ if record_class is not None: warnings.warn( "The record_class parameter is deprecated in favour of schema", DeprecationWarning, stacklevel=2 ) schema = record_class if not isinstance(s, unicode): s = s.decode('utf8') if s.startswith(u"{"): json_dct = json.loads(s) return load_json_dct(json_dct, record_store, schema, loader) else: raise ParseError("Not a json record")
python
{ "resource": "" }
q273619
SchemaStore.add_record
test
def add_record(self, schema, _bump_stack_level=False): """ Add record class to record store for retrieval at record load time. Can be used as a class decorator """ full_name = get_full_name(schema) has_namespace = '.' in full_name self._force_add(full_name, schema, _bump_stack_level, _raise_on_existing=has_namespace) if has_namespace and schema.__name__ not in self._schema_map: self._force_add(schema.__name__, schema, _bump_stack_level) return schema
python
{ "resource": "" }
q273620
SchemaStore.get
test
def get(self, record_name): """ Will return a matching record or raise KeyError is no record is found. If the record name is a full name we will first check for a record matching the full name. If no such record is found any record matching the last part of the full name (without the namespace) will be returned. """ if record_name in self._schema_map: return self._schema_map[record_name] else: last_name = record_name.split('.')[-1] return self._schema_map[last_name]
python
{ "resource": "" }
q273621
Field.repr_vars
test
def repr_vars(self): """Return a dictionary the field definition Should contain all fields that are required for the definition of this field in a pyschema class""" d = OrderedDict() d["nullable"] = repr(self.nullable) d["default"] = repr(self.default) if self.description is not None: d["description"] = repr(self.description) return d
python
{ "resource": "" }
q273622
Field.mixin
test
def mixin(cls, mixin_cls): """Decorator for mixing in additional functionality into field type Example: >>> @Integer.mixin ... class IntegerPostgresExtensions: ... postgres_type = 'INT' ... ... def postgres_dump(self, obj): ... self.dump(obj) + "::integer" Is roughly equivalent to: >>> Integer.postgres_type = 'INT' ... ... def postgres_dump(self, obj): ... self.dump(obj) + "::integer" ... ... Integer.postgres_dump = postgres_dump """ for item_name in dir(mixin_cls): if item_name.startswith("__"): # don't copy magic properties continue item = getattr(mixin_cls, item_name) if isinstance(item, types.MethodType): # unbound method will cause problems # so get the underlying function instead item = item.im_func setattr(cls, item_name, item) return mixin_cls
python
{ "resource": "" }
q273623
PySchema.from_class
test
def from_class(metacls, cls, auto_store=True): """Create proper PySchema class from cls Any methods and attributes will be transferred to the new object """ if auto_store: def wrap(cls): return cls else: wrap = no_auto_store() return wrap(metacls.__new__( metacls, cls.__name__, (Record,), dict(cls.__dict__) ))
python
{ "resource": "" }
q273624
get_schema_dict
test
def get_schema_dict(record, state=None): """Return a python dict representing the jsonschema of a record Any references to sub-schemas will be URI fragments that won't be resolvable without a root schema, available from get_root_schema_dict. """ state = state or SchemaGeneratorState() schema = OrderedDict([ ('type', 'object'), ('id', record._schema_name), ]) fields = dict() for field_name, field_type in record._fields.iteritems(): fields[field_name] = field_type.jsonschema_type_schema(state) required = set(fields.keys()) schema['properties'] = fields schema['required'] = sorted(list(required)) schema['additionalProperties'] = False state.record_schemas[record._schema_name] = schema return schema
python
{ "resource": "" }
q273625
get_root_schema_dict
test
def get_root_schema_dict(record): """Return a root jsonschema for a given record A root schema includes the $schema attribute and all sub-record schemas and definitions. """ state = SchemaGeneratorState() schema = get_schema_dict(record, state) del state.record_schemas[record._schema_name] if state.record_schemas: schema['definitions'] = dict() for name, sub_schema in state.record_schemas.iteritems(): schema['definitions'][name] = sub_schema return schema
python
{ "resource": "" }
q273626
mr_reader
test
def mr_reader(job, input_stream, loads=core.loads): """ Converts a file object with json serialised pyschema records to a stream of pyschema objects Can be used as job.reader in luigi.hadoop.JobTask """ for line in input_stream: yield loads(line),
python
{ "resource": "" }
q273627
mr_writer
test
def mr_writer(job, outputs, output_stream, stderr=sys.stderr, dumps=core.dumps): """ Writes a stream of json serialised pyschema Records to a file object Can be used as job.writer in luigi.hadoop.JobTask """ for output in outputs: try: print >> output_stream, dumps(output) except core.ParseError, e: print >> stderr, e raise
python
{ "resource": "" }
q273628
ordereddict_push_front
test
def ordereddict_push_front(dct, key, value): """Set a value at the front of an OrderedDict The original dict isn't modified, instead a copy is returned """ d = OrderedDict() d[key] = value d.update(dct) return d
python
{ "resource": "" }
q273629
Collection.query_string
test
def query_string(self, **params): """Specify query string to use with the collection. Returns: :py:class:`SearchResult` """ return SearchResult(self, self._api.get(self._href, **params))
python
{ "resource": "" }
q273630
Collection.raw_filter
test
def raw_filter(self, filters): """Sends all filters to the API. No fancy, just a wrapper. Any advanced functionality shall be implemented as another method. Args: filters: List of filters (strings) Returns: :py:class:`SearchResult` """ return SearchResult(self, self._api.get(self._href, **{"filter[]": filters}))
python
{ "resource": "" }
q273631
Collection.all_include_attributes
test
def all_include_attributes(self, attributes): """Returns all entities present in the collection with ``attributes`` included.""" self.reload(expand=True, attributes=attributes) entities = [Entity(self, r, attributes=attributes) for r in self._resources] self.reload() return entities
python
{ "resource": "" }
q273632
Action._get_entity_from_href
test
def _get_entity_from_href(self, result): """Returns entity in correct collection. If the "href" value in result doesn't match the current collection, try to find the collection that the "href" refers to. """ href_result = result['href'] if self.collection._href.startswith(href_result): return Entity(self.collection, result, incomplete=True) href_match = re.match(r"(https?://.+/api[^?]*)/([a-z_-]+)", href_result) if not href_match: raise ValueError("Malformed href: {}".format(href_result)) collection_name = href_match.group(2) entry_point = href_match.group(1) new_collection = Collection( self.collection.api, "{}/{}".format(entry_point, collection_name), collection_name ) return Entity(new_collection, result, incomplete=True)
python
{ "resource": "" }
q273633
give_another_quote
test
def give_another_quote(q): """When you pass a quote character, returns you an another one if possible""" for qc in QUOTES: if qc != q: return qc else: raise ValueError(u'Could not find a different quote for {}'.format(q))
python
{ "resource": "" }
q273634
escape_filter
test
def escape_filter(o): """Tries to escape the values that are passed to filter as correctly as possible. No standard way is followed, but at least it is simple. """ if o is None: return u'NULL' if isinstance(o, int): return str(o) if not isinstance(o, six.string_types): raise ValueError('Filters take only None, int or a string type') if not o: # Empty string return u"''" # Now enforce unicode o = unicode_process(o) if u'"' not in o: # Simple case, just put the quote that does not exist in the string return u'"' + o + u'"' elif u"'" not in o: # Simple case, just put the quote that does not exist in the string return u"'" + o + u"'" else: # Both are there, so start guessing # Empty strings are sorted out, so the string must contain something. # String with length == 1 are sorted out because if they have a quote, they would be quoted # with the another quote in preceeding branch. Therefore the string is at least 2 chars long # here which allows us to NOT check the length here. first_char = o[0] last_char = o[-1] if first_char in QUOTES and last_char in QUOTES: # The first and last chars definitely are quotes if first_char == last_char: # Simple, just put another ones around them quote = give_another_quote(first_char) return quote + o + quote else: # I don't like this but the nature of the escape is like that ... # Since now it uses both of the quotes, just pick the simple ones and surround it return u"'" + o + u"'" elif first_char not in QUOTES and last_char not in QUOTES: # First and last chars are not quotes, so a simple solution return u"'" + o + u"'" else: # One of the first or last chars is not a quote if first_char in QUOTES: quote = give_another_quote(first_char) else: # last_char quote = give_another_quote(last_char) return quote + o + quote
python
{ "resource": "" }
q273635
elementaryRotationMatrix
test
def elementaryRotationMatrix(axis, rotationAngle): """ Construct an elementary rotation matrix describing a rotation around the x, y, or z-axis. Parameters ---------- axis - Axis around which to rotate ("x", "y", or "z") rotationAngle - the rotation angle in radians Returns ------- The rotation matrix Example usage ------------- rotmat = elementaryRotationMatrix("y", pi/6.0) """ if (axis=="x" or axis=="X"): return array([[1.0, 0.0, 0.0], [0.0, cos(rotationAngle), sin(rotationAngle)], [0.0, -sin(rotationAngle), cos(rotationAngle)]]) elif (axis=="y" or axis=="Y"): return array([[cos(rotationAngle), 0.0, -sin(rotationAngle)], [0.0, 1.0, 0.0], [sin(rotationAngle), 0.0, cos(rotationAngle)]]) elif (axis=="z" or axis=="Z"): return array([[cos(rotationAngle), sin(rotationAngle), 0.0], [-sin(rotationAngle), cos(rotationAngle), 0.0], [0.0, 0.0, 1.0]]) else: raise Exception("Unknown rotation axis "+axis+"!")
python
{ "resource": "" }
q273636
construct_covariance_matrix
test
def construct_covariance_matrix(cvec, parallax, radial_velocity, radial_velocity_error): """ Take the astrometric parameter standard uncertainties and the uncertainty correlations as quoted in the Gaia catalogue and construct the covariance matrix. Parameters ---------- cvec : array_like Array of shape (15,) (1 source) or (n,15) (n sources) for the astrometric parameter standard uncertainties and their correlations, as listed in the Gaia catalogue [ra_error, dec_error, parallax_error, pmra_error, pmdec_error, ra_dec_corr, ra_parallax_corr, ra_pmra_corr, ra_pmdec_corr, dec_parallax_corr, dec_pmra_corr, dec_pmdec_corr, parallax_pmra_corr, parallax_pmdec_corr, pmra_pmdec_corr]. Units are (mas^2, mas^2/yr, mas^2/yr^2). parallax : array_like (n elements) Source parallax (mas). radial_velocity : array_like (n elements) Source radial velocity (km/s, does not have to be from Gaia RVS!). If the radial velocity is not known it can be set to zero. radial_velocity_error : array_like (n elements) Source radial velocity uncertainty (km/s). If the radial velocity is not know this can be set to the radial velocity dispersion for the population the source was drawn from. Returns ------- Covariance matrix as a 6x6 array. """ if np.ndim(cvec)==1: cmat = np.zeros((1,6,6)) nsources = 1 cv = np.atleast_2d(cvec) else: nsources = cvec.shape[0] cmat = np.zeros((nsources,6,6)) cv = cvec for k in range(nsources): cmat[k,0:5,0:5] = cv[k,0:5]**2 iu = np.triu_indices(5,k=1) for k in range(10): i = iu[0][k] j = iu[1][k] cmat[:,i,j] = cv[:,i]*cv[:,j]*cv[:,k+5] cmat[:,j,i] = cmat[:,i,j] for k in range(nsources): cmat[k,0:5,5] = cmat[k,0:5,2]*np.atleast_1d(radial_velocity)[k]/auKmYearPerSec cmat[:,5,0:5] = cmat[:,0:5,5] cmat[:,5,5] = cmat[:,2,2]*(radial_velocity**2 + radial_velocity_error**2)/auKmYearPerSec**2 + \ (parallax*radial_velocity_error/auKmYearPerSec)**2 return np.squeeze(cmat)
python
{ "resource": "" }
q273637
vradErrorSkyAvg
test
def vradErrorSkyAvg(vmag, spt): """ Calculate radial velocity error from V and the spectral type. The value of the error is an average over the sky. Parameters ---------- vmag - Value of V-band magnitude. spt - String representing the spectral type of the star. Returns ------- The radial velocity error in km/s. """ return _vradCalibrationFloor + _vradErrorBCoeff[spt]*exp(_vradErrorACoeff[spt]*(vmag-_vradMagnitudeZeroPoint))
python
{ "resource": "" }
q273638
calcParallaxError
test
def calcParallaxError(args): """ Calculate the parallax error for the given input source magnitude and colour. :argument args: command line arguments """ gmag=float(args['gmag']) vmini=float(args['vmini']) sigmaPar=parallaxErrorSkyAvg(gmag, vmini) gminv=gminvFromVmini(vmini) print("G = {0}".format(gmag)) print("V = {0}".format(gmag-gminv)) print("(V-I) = {0}".format(vmini)) print("(G-V) = {0}".format(gminv)) print("standard error = {0} muas".format(sigmaPar))
python
{ "resource": "" }
q273639
gMagnitudeError
test
def gMagnitudeError(G): """ Calculate the single-field-of-view-transit photometric standard error in the G band as a function of G. A 20% margin is included. Parameters ---------- G - Value(s) of G-band magnitude. Returns ------- The G band photometric standard error in units of magnitude. """ z=calcZ(G) return 1.0e-3*sqrt(0.04895*z*z + 1.8633*z + 0.0001985) * _scienceMargin
python
{ "resource": "" }
q273640
gMagnitudeErrorEoM
test
def gMagnitudeErrorEoM(G, nobs=70): """ Calculate the end of mission photometric standard error in the G band as a function of G. A 20% margin is included. Parameters ---------- G - Value(s) of G-band magnitude. Keywords -------- nobs - Number of observations collected (default 70). Returns ------- The G band photometric standard error in units of magnitude. """ return sqrt( (power(gMagnitudeError(G)/_scienceMargin,2) + _eomCalibrationFloorG*_eomCalibrationFloorG)/nobs ) * _scienceMargin
python
{ "resource": "" }
q273641
makePlot
test
def makePlot(args): """ Make the plot with photometry performance predictions. :argument args: command line arguments """ gmag=np.linspace(3.0,20.0,171) vmini = args['vmini'] vmag=gmag-gminvFromVmini(vmini) if args['eom']: sigmaG = gMagnitudeErrorEoM(gmag) sigmaGBp = bpMagnitudeErrorEoM(gmag, vmini) sigmaGRp = rpMagnitudeErrorEoM(gmag, vmini) yminmax = (1.0-4,0.1) else: sigmaG = gMagnitudeError(gmag) sigmaGBp = bpMagnitudeError(gmag, vmini) sigmaGRp = rpMagnitudeError(gmag, vmini) yminmax = (1.0-4,1) fig=plt.figure(figsize=(10,6.5)) if (args['vmagAbscissa']): plt.semilogy(vmag, sigmaG, 'k', label='$\\sigma_G$') plt.semilogy(vmag, sigmaGBp, 'b', label='$\\sigma_{G_\\mathrm{BP}}$'+' for $(V-I)={0}$'.format(vmini)) plt.semilogy(vmag, sigmaGRp, 'r', label='$\\sigma_{G_\\mathrm{RP}}$'+' for $(V-I)={0}$'.format(vmini)) plt.xlim((6,20)) #plt.ylim(yminmax) plt.legend(loc=0) plt.xlabel('$V$ [mag]') else: ax=fig.add_subplot(111) plt.semilogy(gmag, sigmaG, 'k', label='$\\sigma_G$') plt.semilogy(gmag, sigmaGBp, 'b', label='$\\sigma_{G_\\mathrm{BP}}$'+' for $(V-I)={0}$'.format(vmini)) plt.semilogy(gmag, sigmaGRp, 'r', label='$\\sigma_{G_\\mathrm{RP}}$'+' for $(V-I)={0}$'.format(vmini)) plt.xlim((6,20)) #plt.ylim(yminmax) plt.legend(loc=0) plt.xlabel('$G$ [mag]') plt.xticks(np.arange(6,20,2)) ax = plt.gca().yaxis #ax.set_major_formatter(matplotlib.ticker.ScalarFormatter()) #plt.ticklabel_format(axis='y',style='plain') plt.grid(which='both') plt.ylabel('Photometric error [mag]') if args['eom']: plt.title('End-of-mission mean photometry: sky averaged errors for $(V-I)={0}$'.format(vmini), fontsize=14) else: plt.title('Single-FoV-transit photometry: sky averaged errors for $(V-I)={0}$'.format(vmini), fontsize=14) basename = 'PhotometricErrors' if (args['pdfOutput']): plt.savefig(basename+'.pdf') elif (args['pngOutput']): plt.savefig(basename+'.png') else: plt.show()
python
{ "resource": "" }
q273642
averageNumberOfTransits
test
def averageNumberOfTransits(beta): """ Returns the number of transits across the Gaia focal plane averaged over ecliptic longitude. Parameters ---------- beta - Value(s) of the Ecliptic latitude. Returns ------- Average number of transits for the input values of beta. """ indices = array(floor(abs(sin(beta))*_numStepsSinBeta), dtype=int) indices[(indices==_numStepsSinBeta)] = _numStepsSinBeta-1 return _averageTransitNumber[indices]
python
{ "resource": "" }
q273643
angularDistance
test
def angularDistance(phi1, theta1, phi2, theta2): """ Calculate the angular distance between pairs of sky coordinates. Parameters ---------- phi1 : float Longitude of first coordinate (radians). theta1 : float Latitude of first coordinate (radians). phi2 : float Longitude of second coordinate (radians). theta2 : float Latitude of second coordinate (radians). Returns ------- Angular distance in radians. """ # Formula below is more numerically stable than arccos( sin(theta1)*sin(theta2) + # cos(phi2-phi1)*cos(theta1)*cos(theta2) ) # See: https://en.wikipedia.org/wiki/Great-circle_distance return arctan( sqrt((cos(theta2)*sin(phi2-phi1))**2 + (cos(theta1)*sin(theta2)-sin(theta1)*cos(theta2)*cos(phi2-phi1))**2) / (sin(theta1)*sin(theta2) + cos(phi2-phi1)*cos(theta1)*cos(theta2)) )
python
{ "resource": "" }
q273644
CoordinateTransformation.transformCartesianCoordinates
test
def transformCartesianCoordinates(self, x, y, z): """ Rotates Cartesian coordinates from one reference system to another using the rotation matrix with which the class was initialized. The inputs can be scalars or 1-dimensional numpy arrays. Parameters ---------- x - Value of X-coordinate in original reference system y - Value of Y-coordinate in original reference system z - Value of Z-coordinate in original reference system Returns ------- xrot - Value of X-coordinate after rotation yrot - Value of Y-coordinate after rotation zrot - Value of Z-coordinate after rotation """ xrot, yrot, zrot = dot(self.rotationMatrix,[x,y,z]) return xrot, yrot, zrot
python
{ "resource": "" }
q273645
CoordinateTransformation.transformSkyCoordinates
test
def transformSkyCoordinates(self, phi, theta): """ Converts sky coordinates from one reference system to another, making use of the rotation matrix with which the class was initialized. Inputs can be scalars or 1-dimensional numpy arrays. Parameters ---------- phi - Value of the azimuthal angle (right ascension, longitude) in radians. theta - Value of the elevation angle (declination, latitude) in radians. Returns ------- phirot - Value of the transformed azimuthal angle in radians. thetarot - Value of the transformed elevation angle in radians. """ r=ones_like(phi) x, y, z = sphericalToCartesian(r, phi, theta) xrot, yrot, zrot = self.transformCartesianCoordinates(x, y, z) r, phirot, thetarot = cartesianToSpherical(xrot, yrot, zrot) return phirot, thetarot
python
{ "resource": "" }
q273646
CoordinateTransformation.transformCovarianceMatrix
test
def transformCovarianceMatrix(self, phi, theta, covmat): """ Transform the astrometric covariance matrix to its representation in the new coordinate system. Parameters ---------- phi - The longitude-like angle of the position of the source (radians). theta - The latitude-like angle of the position of the source (radians). covmat - Covariance matrix (5x5) of the astrometric parameters. Returns ------- covmat_rot - Covariance matrix in its representation in the new coordinate system. """ c, s = self._getJacobian(phi,theta) jacobian = identity(5) jacobian[0][0]=c jacobian[1][1]=c jacobian[3][3]=c jacobian[4][4]=c jacobian[0][1]=s jacobian[1][0]=-s jacobian[3][4]=s jacobian[4][3]=-s return dot( dot(jacobian, covmat), jacobian.T )
python
{ "resource": "" }
q273647
errorScalingFactor
test
def errorScalingFactor(observable, beta): """ Look up the numerical factors to apply to the sky averaged parallax error in order to obtain error values for a given astrometric parameter, taking the Ecliptic latitude and the number of transits into account. Parameters ---------- observable - Name of astrometric observable (one of: alphaStar, delta, parallax, muAlphaStar, muDelta) beta - Values(s) of the Ecliptic latitude. Returns ------- Numerical factors to apply to the errors of the given observable. """ if isscalar(beta): index=int(floor(abs(sin(beta))*_numStepsSinBeta)) if index == _numStepsSinBeta: return _astrometricErrorFactors[observable][_numStepsSinBeta-1] else: return _astrometricErrorFactors[observable][index] else: indices = array(floor(abs(sin(beta))*_numStepsSinBeta), dtype=int) indices[(indices==_numStepsSinBeta)] = _numStepsSinBeta-1 return _astrometricErrorFactors[observable][indices]
python
{ "resource": "" }
q273648
makePlot
test
def makePlot(pdf=False, png=False): """ Plot relative parallax errors as a function of distance for stars of a given spectral type. Parameters ---------- args - command line arguments """ logdistancekpc = np.linspace(-1,np.log10(20.0),100) sptVabsAndVmini=OrderedDict([('K0V',(5.58,0.87)), ('G5V',(4.78,0.74)), ('G0V',(4.24,0.67)), ('F5V',(3.50,0.50)), ('F0V',(2.98,0.38)), ('RC',(0.8,1.0))]) lines={} fig=plt.figure(figsize=(10,6.5)) currentAxis=plt.gca() for spt in sptVabsAndVmini.keys(): vmag=sptVabsAndVmini[spt][0]+5.0*logdistancekpc+10.0 indices=(vmag>14) & (vmag<16) gmag=vmag+gminvFromVmini(sptVabsAndVmini[spt][1]) parerrors=parallaxErrorSkyAvg(gmag,sptVabsAndVmini[spt][1]) relparerrors=parerrors*10**logdistancekpc/1000.0 plt.loglog(10**logdistancekpc, relparerrors,'--k',lw=1) plt.loglog(10**logdistancekpc[indices], relparerrors[indices],'-',label=spt) plt.xlim(0.1,20.0) plt.ylim(0.001,0.5) plt.text(0.9, 0.05,'Colours indicate $14<V<16$', horizontalalignment='right', verticalalignment='bottom', transform = currentAxis.transAxes) plt.legend(loc=2) plt.xlabel('distance [kpc]') plt.ylabel('$\\sigma_\\varpi/\\varpi$') plt.grid(which='both') if (args['pdfOutput']): plt.savefig('RelativeParallaxErrorsVsDist.pdf') elif (args['pngOutput']): plt.savefig('RelativeParallaxErrorsVsDist.png') else: plt.show()
python
{ "resource": "" }
q273649
makePlot
test
def makePlot(args): """ Make the plot with radial velocity performance predictions. :argument args: command line arguments """ gRvs=np.linspace(5.7,16.1,101) spts=['B0V', 'B5V', 'A0V', 'A5V', 'F0V', 'G0V', 'G5V', 'K0V', 'K1IIIMP', 'K4V', 'K1III'] fig=plt.figure(figsize=(10,6.5)) deltaHue = 240.0/(len(spts)-1) hsv=np.zeros((1,1,3)) hsv[0,0,1]=1.0 hsv[0,0,2]=0.9 count=0 for spt in spts: hsv[0,0,0]=(240-count*deltaHue)/360.0 vmag = vminGrvsFromVmini(vminiFromSpt(spt)) + gRvs vradErrors = vradErrorSkyAvg(vmag, spt) plt.plot(vmag, vradErrors, '-', label=spt, color=hsv_to_rgb(hsv)[0,0,:]) count+=1 plt.grid(which='both') plt.xlim(9,17.5) plt.ylim(0,20) plt.xticks(np.arange(9,18,1)) plt.yticks(np.arange(0,20.5,5)) plt.xlabel('$V$ [mag]') plt.ylabel('End-of-mission radial velocity error [km s$^{-1}$]') leg=plt.legend(loc=0, handlelength=2.0, labelspacing=0.10) for t in leg.get_texts(): t.set_fontsize(12) if (args['pdfOutput']): plt.savefig('RadialVelocityErrors.pdf') elif (args['pngOutput']): plt.savefig('RadialVelocityErrors.png') else: plt.show()
python
{ "resource": "" }
q273650
either
test
def either(*funcs): """ A utility function for selecting the first non-null query. Parameters: funcs: One or more functions Returns: A function that, when called with a :class:`Node`, will pass the input to each `func`, and return the first non-Falsey result. Examples: >>> s = Soupy("<p>hi</p>") >>> s.apply(either(Q.find('a'), Q.find('p').text)) Scalar('hi') """ def either(val): for func in funcs: result = val.apply(func) if result: return result return Null() return either
python
{ "resource": "" }
q273651
_helpful_failure
test
def _helpful_failure(method): """ Decorator for eval_ that prints a helpful error message if an exception is generated in a Q expression """ @wraps(method) def wrapper(self, val): try: return method(self, val) except: exc_cls, inst, tb = sys.exc_info() if hasattr(inst, '_RERAISE'): _, expr, _, inner_val = Q.__debug_info__ Q.__debug_info__ = QDebug(self, expr, val, inner_val) raise if issubclass(exc_cls, KeyError): # Overrides formatting exc_cls = QKeyError # Show val, unless it's too long prettyval = repr(val) if len(prettyval) > 150: prettyval = "<%s instance>" % (type(val).__name__) msg = "{0}\n\n\tEncountered when evaluating {1}{2}".format( inst, prettyval, self) new_exc = exc_cls(msg) new_exc._RERAISE = True Q.__debug_info__ = QDebug(self, self, val, val) six.reraise(exc_cls, new_exc, tb) return wrapper
python
{ "resource": "" }
q273652
_uniquote
test
def _uniquote(value): """ Convert to unicode, and add quotes if initially a string """ if isinstance(value, six.binary_type): try: value = value.decode('utf-8') except UnicodeDecodeError: # Not utf-8. Show the repr value = six.text_type(_dequote(repr(value))) # trim quotes result = six.text_type(value) if isinstance(value, six.text_type): result = "'%s'" % result return result
python
{ "resource": "" }
q273653
Collection.each
test
def each(self, *funcs): """ Call `func` on each element in the collection. If multiple functions are provided, each item in the output will be a tuple of each func(item) in self. Returns a new Collection. Example: >>> col = Collection([Scalar(1), Scalar(2)]) >>> col.each(Q * 10) Collection([Scalar(10), Scalar(20)]) >>> col.each(Q * 10, Q - 1) Collection([Scalar((10, 0)), Scalar((20, 1))]) """ funcs = list(map(_make_callable, funcs)) if len(funcs) == 1: return Collection(map(funcs[0], self._items)) tupler = lambda item: Scalar( tuple(_unwrap(func(item)) for func in funcs)) return Collection(map(tupler, self._items))
python
{ "resource": "" }
q273654
Collection.exclude
test
def exclude(self, func=None): """ Return a new Collection excluding some items Parameters: func : function(Node) -> Scalar A function that, when called on each item in the collection, returns a boolean-like value. If no function is provided, then truthy items will be removed. Returns: A new Collection consisting of the items where bool(func(item)) == False """ func = _make_callable(func) inverse = lambda x: not func(x) return self.filter(inverse)
python
{ "resource": "" }
q273655
Collection.filter
test
def filter(self, func=None): """ Return a new Collection with some items removed. Parameters: func : function(Node) -> Scalar A function that, when called on each item in the collection, returns a boolean-like value. If no function is provided, then false-y items will be removed. Returns: A new Collection consisting of the items where bool(func(item)) == True Examples: node.find_all('a').filter(Q['href'].startswith('http')) """ func = _make_callable(func) return Collection(filter(func, self._items))
python
{ "resource": "" }
q273656
Collection.takewhile
test
def takewhile(self, func=None): """ Return a new Collection with the last few items removed. Parameters: func : function(Node) -> Node Returns: A new Collection, discarding all items at and after the first item where bool(func(item)) == False Examples: node.find_all('tr').takewhile(Q.find_all('td').count() > 3) """ func = _make_callable(func) return Collection(takewhile(func, self._items))
python
{ "resource": "" }
q273657
Collection.dropwhile
test
def dropwhile(self, func=None): """ Return a new Collection with the first few items removed. Parameters: func : function(Node) -> Node Returns: A new Collection, discarding all items before the first item where bool(func(item)) == True """ func = _make_callable(func) return Collection(dropwhile(func, self._items))
python
{ "resource": "" }
q273658
Collection.zip
test
def zip(self, *others): """ Zip the items of this collection with one or more other sequences, and wrap the result. Unlike Python's zip, all sequences must be the same length. Parameters: others: One or more iterables or Collections Returns: A new collection. Examples: >>> c1 = Collection([Scalar(1), Scalar(2)]) >>> c2 = Collection([Scalar(3), Scalar(4)]) >>> c1.zip(c2).val() [(1, 3), (2, 4)] """ args = [_unwrap(item) for item in (self,) + others] ct = self.count() if not all(len(arg) == ct for arg in args): raise ValueError("Arguments are not all the same length") return Collection(map(Wrapper.wrap, zip(*args)))
python
{ "resource": "" }
q273659
Node.find
test
def find(self, *args, **kwargs): """ Find a single Node among this Node's descendants. Returns :class:`NullNode` if nothing matches. This inputs to this function follow the same semantics as BeautifulSoup. See http://bit.ly/bs4doc for more info. Examples: - node.find('a') # look for `a` tags - node.find('a', 'foo') # look for `a` tags with class=`foo` - node.find(func) # find tag where func(tag) is True - node.find(val=3) # look for tag like <a, val=3> """ op = operator.methodcaller('find', *args, **kwargs) return self._wrap_node(op)
python
{ "resource": "" }
q273660
serach_path
test
def serach_path(): """Return potential locations of IACA installation.""" operating_system = get_os() # 1st choice: in ~/.kerncraft/iaca-{} # 2nd choice: in package directory / iaca-{} return [os.path.expanduser("~/.kerncraft/iaca/{}/".format(operating_system)), os.path.abspath(os.path.dirname(os.path.realpath(__file__))) + '/iaca/{}/'.format( operating_system)]
python
{ "resource": "" }
q273661
group_iterator
test
def group_iterator(group): """ Yild all groups of simple regex-like expression. The only special character is a dash (-), which take the preceding and the following chars to compute a range. If the range is non-sensical (e.g., b-a) it will be empty Example: >>> list(group_iterator('a-f')) ['a', 'b', 'c', 'd', 'e', 'f'] >>> list(group_iterator('148')) ['1', '4', '8'] >>> list(group_iterator('7-9ab')) ['7', '8', '9', 'a', 'b'] >>> list(group_iterator('0B-A1')) ['0', '1'] """ ordered_chars = string.ascii_letters + string.digits tokenizer = ('(?P<seq>[a-zA-Z0-9]-[a-zA-Z0-9])|' '(?P<chr>.)') for m in re.finditer(tokenizer, group): if m.group('seq'): start, sep, end = m.group('seq') for i in range(ordered_chars.index(start), ordered_chars.index(end)+1): yield ordered_chars[i] else: yield m.group('chr')
python
{ "resource": "" }
q273662
register_options
test
def register_options(regdescr): """ Very reduced regular expressions for describing a group of registers. Only groups in square bracktes and unions with pipes (|) are supported. Examples: >>> list(register_options('PMC[0-3]')) ['PMC0', 'PMC1', 'PMC2', 'PMC3'] >>> list(register_options('MBOX0C[01]')) ['MBOX0C0', 'MBOX0C1'] >>> list(register_options('CBOX2C1')) ['CBOX2C1'] >>> list(register_options('CBOX[0-3]C[01]')) ['CBOX0C0', 'CBOX0C1', 'CBOX1C0', 'CBOX1C1', 'CBOX2C0', 'CBOX2C1', 'CBOX3C0', 'CBOX3C1'] >>> list(register_options('PMC[0-1]|PMC[23]')) ['PMC0', 'PMC1', 'PMC2', 'PMC3'] """ if not regdescr: yield None tokenizer = ('\[(?P<grp>[^]]+)\]|' '(?P<chr>.)') for u in regdescr.split('|'): m = re.match(tokenizer, u) if m.group('grp'): current = group_iterator(m.group('grp')) else: current = [m.group('chr')] for c in current: if u[m.end():]: for r in register_options(u[m.end():]): yield c + r else: yield c
python
{ "resource": "" }
q273663
eventstr
test
def eventstr(event_tuple=None, event=None, register=None, parameters=None): """ Return a LIKWID event string from an event tuple or keyword arguments. *event_tuple* may have two or three arguments: (event, register) or (event, register, parameters) Keyword arguments will be overwritten by *event_tuple*. >>> eventstr(('L1D_REPLACEMENT', 'PMC0', None)) 'L1D_REPLACEMENT:PMC0' >>> eventstr(('L1D_REPLACEMENT', 'PMC0')) 'L1D_REPLACEMENT:PMC0' >>> eventstr(('MEM_UOPS_RETIRED_LOADS', 'PMC3', {'EDGEDETECT': None, 'THRESHOLD': 2342})) 'MEM_UOPS_RETIRED_LOADS:PMC3:EDGEDETECT:THRESHOLD=0x926' >>> eventstr(event='DTLB_LOAD_MISSES_WALK_DURATION', register='PMC3') 'DTLB_LOAD_MISSES_WALK_DURATION:PMC3' """ if len(event_tuple) == 3: event, register, parameters = event_tuple elif len(event_tuple) == 2: event, register = event_tuple event_dscr = [event, register] if parameters: for k, v in sorted(event_tuple[2].items()): # sorted for reproducability if type(v) is int: k += "={}".format(hex(v)) event_dscr.append(k) return ":".join(event_dscr)
python
{ "resource": "" }
q273664
build_minimal_runs
test
def build_minimal_runs(events): """Compile list of minimal runs for given events.""" # Eliminate multiples events = [e for i, e in enumerate(events) if events.index(e) == i] # Build list of runs per register group scheduled_runs = {} scheduled_events = [] cur_run = 0 while len(scheduled_events) != len(events): for event_tpl in events: event, registers, parameters = event_tpl # Skip allready scheduled events if event_tpl in scheduled_events: continue # Compile explicit list of possible register locations for possible_reg in register_options(registers): # Schedule in current run, if register is not yet in use s = scheduled_runs.setdefault(cur_run, {}) if possible_reg not in s: s[possible_reg] = (event, possible_reg, parameters) # ban from further scheduling attempts scheduled_events.append(event_tpl) break cur_run += 1 # Collaps all register dicts to single runs runs = [list(v.values()) for v in scheduled_runs.values()] return runs
python
{ "resource": "" }
q273665
Roofline.report
test
def report(self, output_file=sys.stdout): """Report analysis outcome in human readable form.""" max_perf = self.results['max_perf'] if self._args and self._args.verbose >= 3: print('{}'.format(pformat(self.results)), file=output_file) if self._args and self._args.verbose >= 1: print('{}'.format(pformat(self.results['verbose infos'])), file=output_file) print('Bottlenecks:', file=output_file) print(' level | a. intensity | performance | peak bandwidth | peak bandwidth kernel', file=output_file) print('--------+--------------+-----------------+-------------------+----------------------', file=output_file) print(' CPU | | {!s:>15} | |'.format( max_perf[self._args.unit]), file=output_file) for b in self.results['mem bottlenecks']: print('{level:>7} | {arithmetic intensity:>5.2} FLOP/B | {0!s:>15} |' ' {bandwidth!s:>17} | {bw kernel:<8}'.format( b['performance'][self._args.unit], **b), file=output_file) print('', file=output_file) if self.results['min performance']['FLOP/s'] > max_perf['FLOP/s']: # CPU bound print('CPU bound. {!s} due to CPU max. FLOP/s'.format(max_perf), file=output_file) else: # Cache or mem bound print('Cache or mem bound.', file=output_file) bottleneck = self.results['mem bottlenecks'][self.results['bottleneck level']] print('{!s} due to {} transfer bottleneck (with bw from {} benchmark)'.format( bottleneck['performance'][self._args.unit], bottleneck['level'], bottleneck['bw kernel']), file=output_file) print('Arithmetic Intensity: {:.2f} FLOP/B'.format(bottleneck['arithmetic intensity']), file=output_file)
python
{ "resource": "" }
q273666
RooflineIACA.report
test
def report(self, output_file=sys.stdout): """Print human readable report of model.""" cpu_perf = self.results['cpu bottleneck']['performance throughput'] if self.verbose >= 3: print('{}'.format(pformat(self.results)), file=output_file) if self.verbose >= 1: print('Bottlenecks:', file=output_file) print(' level | a. intensity | performance | peak bandwidth | peak bandwidth kernel', file=output_file) print('--------+--------------+-----------------+-------------------+----------------------', file=output_file) print(' CPU | | {!s:>15} | |'.format( cpu_perf[self._args.unit]), file=output_file) for b in self.results['mem bottlenecks']: # Skip CPU-L1 from Roofline model if b is None: continue print('{level:>7} | {arithmetic intensity:>5.2} FLOP/B | {0!s:>15} |' ' {bandwidth!s:>17} | {bw kernel:<8}'.format( b['performance'][self._args.unit], **b), file=output_file) print('', file=output_file) print('IACA analisys:', file=output_file) print('{!s}'.format( {k: v for k, v in list(self.results['cpu bottleneck'].items()) if k not in['IACA output']}), file=output_file) if self.results['min performance']['FLOP/s'] > cpu_perf['FLOP/s']: # CPU bound print('CPU bound. {!s} due to CPU bottleneck'.format(cpu_perf[self._args.unit]), file=output_file) else: # Cache or mem bound print('Cache or mem bound.', file=output_file) bottleneck = self.results['mem bottlenecks'][self.results['bottleneck level']] print('{!s} due to {} transfer bottleneck (with bw from {} benchmark)'.format( bottleneck['performance'][self._args.unit], bottleneck['level'], bottleneck['bw kernel']), file=output_file) print('Arithmetic Intensity: {:.2f} FLOP/B'.format(bottleneck['arithmetic intensity']), file=output_file)
python
{ "resource": "" }
q273667
LC.report
test
def report(self, output_file=sys.stdout): """Report generated model in human readable form.""" if self._args and self._args.verbose > 2: pprint(self.results) for dimension, lc_info in self.results['dimensions'].items(): print("{}D layer condition:".format(dimension), file=output_file) for cache, lc_solution in sorted(lc_info['caches'].items()): print(cache+": ", end='', file=output_file) if lc_solution['lt'] is sympy.true: print("unconditionally fulfilled", file=output_file) else: if lc_solution['eq'] is None: print("{}".format(lc_solution['lt']), file=output_file) elif type(lc_solution['eq']) is not list: print("{}".format(lc_solution['eq']), file=output_file) else: for solu in lc_solution['eq']: for s, v in solu.items(): print("{} <= {}".format(s, v), file=output_file)
python
{ "resource": "" }
q273668
clean_code
test
def clean_code(code, comments=True, macros=False, pragmas=False): """ Naive comment and macro striping from source code :param comments: If True, all comments are stripped from code :param macros: If True, all macros are stripped from code :param pragmas: If True, all pragmas are stripped from code :return: cleaned code. Line numbers are preserved with blank lines, and multiline comments and macros are supported. BUT comment-like strings are (wrongfully) treated as comments. """ if macros or pragmas: lines = code.split('\n') in_macro = False in_pragma = False for i in range(len(lines)): l = lines[i].strip() if macros and (l.startswith('#') and not l.startswith('#pragma') or in_macro): lines[i] = '' in_macro = l.endswith('\\') if pragmas and (l.startswith('#pragma') or in_pragma): lines[i] = '' in_pragma = l.endswith('\\') code = '\n'.join(lines) if comments: idx = 0 comment_start = None while idx < len(code) - 1: if comment_start is None and code[idx:idx + 2] == '//': end_idx = code.find('\n', idx) code = code[:idx] + code[end_idx:] idx -= end_idx - idx elif comment_start is None and code[idx:idx + 2] == '/*': comment_start = idx elif comment_start is not None and code[idx:idx + 2] == '*/': code = (code[:comment_start] + '\n' * code[comment_start:idx].count('\n') + code[idx + 2:]) idx -= idx - comment_start comment_start = None idx += 1 return code
python
{ "resource": "" }
q273669
round_to_next
test
def round_to_next(x, base): """Round float to next multiple of base.""" # Based on: http://stackoverflow.com/a/2272174 return int(base * math.ceil(float(x)/base))
python
{ "resource": "" }
q273670
blocking
test
def blocking(indices, block_size, initial_boundary=0): """ Split list of integers into blocks of block_size and return block indices. First block element will be located at initial_boundary (default 0). >>> blocking([0, -1, -2, -3, -4, -5, -6, -7, -8, -9], 8) [0,-1] >>> blocking([0], 8) [0] >>> blocking([0], 8, initial_boundary=32) [-4] """ blocks = [] for idx in indices: bl_idx = (idx-initial_boundary)//float(block_size) if bl_idx not in blocks: blocks.append(bl_idx) blocks.sort() return blocks
python
{ "resource": "" }
q273671
ECMData.calculate_cache_access
test
def calculate_cache_access(self): """Dispatch to cache predictor to get cache stats.""" self.results.update({ 'cycles': [], # will be filled by caclculate_cycles() 'misses': self.predictor.get_misses(), 'hits': self.predictor.get_hits(), 'evicts': self.predictor.get_evicts(), 'verbose infos': self.predictor.get_infos()})
python
{ "resource": "" }
q273672
ECMData.calculate_cycles
test
def calculate_cycles(self): """ Calculate performance model cycles from cache stats. calculate_cache_access() needs to have been execute before. """ element_size = self.kernel.datatypes_size[self.kernel.datatype] elements_per_cacheline = float(self.machine['cacheline size']) // element_size iterations_per_cacheline = (sympy.Integer(self.machine['cacheline size']) / sympy.Integer(self.kernel.bytes_per_iteration)) self.results['iterations per cacheline'] = iterations_per_cacheline cacheline_size = float(self.machine['cacheline size']) loads, stores = (self.predictor.get_loads(), self.predictor.get_stores()) for cache_level, cache_info in list(enumerate(self.machine['memory hierarchy']))[1:]: throughput, duplexness = cache_info['non-overlap upstream throughput'] if type(throughput) is str and throughput == 'full socket memory bandwidth': # Memory transfer # we use bandwidth to calculate cycles and then add panalty cycles (if given) # choose bw according to cache level and problem # first, compile stream counts at current cache level # write-allocate is allready resolved in cache predictor read_streams = loads[cache_level] write_streams = stores[cache_level] # second, try to find best fitting kernel (closest to stream seen stream counts): threads_per_core = 1 bw, measurement_kernel = self.machine.get_bandwidth( cache_level, read_streams, write_streams, threads_per_core) # calculate cycles if duplexness == 'half-duplex': cycles = float(loads[cache_level] + stores[cache_level]) * \ float(elements_per_cacheline) * float(element_size) * \ float(self.machine['clock']) / float(bw) else: # full-duplex raise NotImplementedError( "full-duplex mode is not (yet) supported for memory transfers.") # add penalty cycles for each read stream if 'penalty cycles per read stream' in cache_info: cycles += stores[cache_level] * \ cache_info['penalty cycles per read stream'] self.results.update({ 'memory bandwidth kernel': measurement_kernel, 'memory bandwidth': bw}) else: # since throughput is given in B/cy, and we need CL/cy: throughput = float(throughput) / cacheline_size # only cache cycles count if duplexness == 'half-duplex': cycles = (loads[cache_level] + stores[cache_level]) / float(throughput) elif duplexness == 'full-duplex': cycles = max(loads[cache_level] / float(throughput), stores[cache_level] / float(throughput)) else: raise ValueError("Duplexness of cache throughput may only be 'half-duplex'" "or 'full-duplex', found {} in {}.".format( duplexness, cache_info['name'])) self.results['cycles'].append((cache_info['level'], cycles)) self.results[cache_info['level']] = cycles return self.results
python
{ "resource": "" }
q273673
ECMData.analyze
test
def analyze(self): """Run complete anaylysis and return results.""" self.calculate_cache_access() self.calculate_cycles() self.results['flops per iteration'] = sum(self.kernel._flops.values()) return self.results
python
{ "resource": "" }
q273674
ECMCPU.analyze
test
def analyze(self): """ Run complete analysis and return results. """ try: incore_analysis, asm_block = self.kernel.iaca_analysis( micro_architecture=self.machine['micro-architecture'], asm_block=self.asm_block, pointer_increment=self.pointer_increment, verbose=self.verbose > 2) except RuntimeError as e: print("IACA analysis failed: " + str(e)) sys.exit(1) block_throughput = incore_analysis['throughput'] port_cycles = incore_analysis['port cycles'] uops = incore_analysis['uops'] # Normalize to cycles per cacheline elements_per_block = abs(asm_block['pointer_increment'] // self.kernel.datatypes_size[self.kernel.datatype]) block_size = elements_per_block*self.kernel.datatypes_size[self.kernel.datatype] try: block_to_cl_ratio = float(self.machine['cacheline size'])/block_size except ZeroDivisionError as e: print("Too small block_size / pointer_increment:", e, file=sys.stderr) sys.exit(1) port_cycles = dict([(i[0], i[1]*block_to_cl_ratio) for i in list(port_cycles.items())]) uops = uops*block_to_cl_ratio cl_throughput = block_throughput*block_to_cl_ratio # Compile most relevant information T_OL = max([v for k, v in list(port_cycles.items()) if k in self.machine['overlapping model']['ports']]) T_nOL = max([v for k, v in list(port_cycles.items()) if k in self.machine['non-overlapping model']['ports']]) # Use IACA throughput prediction if it is slower then T_nOL if T_nOL < cl_throughput: T_OL = cl_throughput # Create result dictionary self.results = { 'port cycles': port_cycles, 'cl throughput': self.conv_cy(cl_throughput), 'uops': uops, 'T_nOL': T_nOL, 'T_OL': T_OL, 'IACA output': incore_analysis['output'], 'elements_per_block': elements_per_block, 'pointer_increment': asm_block['pointer_increment'], 'flops per iteration': sum(self.kernel._flops.values())} return self.results
python
{ "resource": "" }
q273675
strip_and_uncomment
test
def strip_and_uncomment(asm_lines): """Strip whitespaces and comments from asm lines.""" asm_stripped = [] for line in asm_lines: # Strip comments and whitespaces asm_stripped.append(line.split('#')[0].strip()) return asm_stripped
python
{ "resource": "" }
q273676
strip_unreferenced_labels
test
def strip_unreferenced_labels(asm_lines): """Strip all labels, which are never referenced.""" asm_stripped = [] for line in asm_lines: if re.match(r'^\S+:', line): # Found label label = line[0:line.find(':')] # Search for references to current label if not any([re.match(r'^[^#]*\s' + re.escape(label) + '[\s,]?.*$', l) for l in asm_lines]): # Skip labels without seen reference line = '' asm_stripped.append(line) return asm_stripped
python
{ "resource": "" }
q273677
select_best_block
test
def select_best_block(blocks): """Return best block selected based on simple heuristic.""" # TODO make this cleverer with more stats if not blocks: raise ValueError("No suitable blocks were found in assembly.") best_block = max(blocks, key=lambda b: b[1]['packed_instr']) if best_block[1]['packed_instr'] == 0: best_block = max(blocks, key=lambda b: (b[1]['ops'] + b[1]['packed_instr'] + b[1]['avx_instr'], b[1]['ZMM'], b[1]['YMM'], b[1]['XMM'])) return best_block[0]
python
{ "resource": "" }
q273678
userselect_increment
test
def userselect_increment(block): """Let user interactively select byte increment.""" print("Selected block:") print('\n ' + ('\n '.join(block['lines']))) print() increment = None while increment is None: increment = input("Choose store pointer increment (number of bytes): ") try: increment = int(increment) except ValueError: increment = None block['pointer_increment'] = increment return increment
python
{ "resource": "" }
q273679
userselect_block
test
def userselect_block(blocks, default=None, debug=False): """Let user interactively select block.""" print("Blocks found in assembly file:") print(" block | OPs | pck. | AVX || Registers | ZMM | YMM | XMM | GP ||ptr.inc|\n" "----------------+-----+------+-----++-----------+----------+----------+----------+---------++-------|") for idx, b in blocks: print('{:>2} {b[labels]!r:>12} | {b[ops]:>3} | {b[packed_instr]:>4} | {b[avx_instr]:>3} |' '| {b[regs][0]:>3} ({b[regs][1]:>3}) | {b[ZMM][0]:>3} ({b[ZMM][1]:>2}) | ' '{b[YMM][0]:>3} ({b[YMM][1]:>2}) | ' '{b[XMM][0]:>3} ({b[XMM][1]:>2}) | {b[GP][0]:>2} ({b[GP][1]:>2}) || ' '{b[pointer_increment]!s:>5} |'.format(idx, b=b)) if debug: ln = b['first_line'] print(' '*4 + 'Code:') for l in b['lines']: print(' '*8 + '{:>5} | {}'.format(ln, l)) ln += 1 print(' '*4 + 'Metadata:') print(textwrap.indent( pformat({k: v for k,v in b.items() if k not in ['lines']}), ' '*8)) # Let user select block: block_idx = -1 while not (0 <= block_idx < len(blocks)): block_idx = input("Choose block to be marked [" + str(default) + "]: ") or default try: block_idx = int(block_idx) except ValueError: block_idx = -1 # block = blocks[block_idx][1] return block_idx
python
{ "resource": "" }
q273680
insert_markers
test
def insert_markers(asm_lines, start_line, end_line): """Insert IACA marker into list of ASM instructions at given indices.""" asm_lines = (asm_lines[:start_line] + START_MARKER + asm_lines[start_line:end_line + 1] + END_MARKER + asm_lines[end_line + 1:]) return asm_lines
python
{ "resource": "" }
q273681
iaca_instrumentation
test
def iaca_instrumentation(input_file, output_file, block_selection='auto', pointer_increment='auto_with_manual_fallback', debug=False): """ Add IACA markers to an assembly file. If instrumentation fails because loop increment could not determined automatically, a ValueError is raised. :param input_file: file-like object to read from :param output_file: file-like object to write to :param block_selection: index of the assembly block to instrument, or 'auto' for automatically using block with the most vector instructions, or 'manual' to read index to prompt user :param pointer_increment: number of bytes the pointer is incremented after the loop or - 'auto': automatic detection, otherwise RuntimeError is raised - 'auto_with_manual_fallback': like auto with fallback to manual input - 'manual': prompt user :param debug: output additional internal analysis information. Only works with manual selection. :return: the instrumented assembly block """ assembly_orig = input_file.readlines() # If input and output files are the same, overwrite with output if input_file is output_file: output_file.seek(0) output_file.truncate() if debug: block_selection = 'manual' assembly = strip_and_uncomment(copy(assembly_orig)) assembly = strip_unreferenced_labels(assembly) blocks = find_asm_blocks(assembly) if block_selection == 'auto': block_idx = select_best_block(blocks) elif block_selection == 'manual': block_idx = userselect_block(blocks, default=select_best_block(blocks), debug=debug) elif isinstance(block_selection, int): block_idx = block_selection else: raise ValueError("block_selection has to be an integer, 'auto' or 'manual' ") block = blocks[block_idx][1] if pointer_increment == 'auto': if block['pointer_increment'] is None: raise RuntimeError("pointer_increment could not be detected automatically. Use " "--pointer-increment to set manually to byte offset of store " "pointer address between consecutive assembly block iterations.") elif pointer_increment == 'auto_with_manual_fallback': if block['pointer_increment'] is None: block['pointer_increment'] = userselect_increment(block) elif pointer_increment == 'manual': block['pointer_increment'] = userselect_increment(block) elif isinstance(pointer_increment, int): block['pointer_increment'] = pointer_increment else: raise ValueError("pointer_increment has to be an integer, 'auto', 'manual' or " "'auto_with_manual_fallback' ") instrumented_asm = insert_markers(assembly_orig, block['first_line'], block['last_line']) output_file.writelines(instrumented_asm) return block
python
{ "resource": "" }
q273682
main
test
def main(): """Execute command line interface.""" parser = argparse.ArgumentParser( description='Find and analyze basic loop blocks and mark for IACA.', epilog='For help, examples, documentation and bug reports go to:\nhttps://github.com' '/RRZE-HPC/kerncraft\nLicense: AGPLv3') parser.add_argument('--version', action='version', version='%(prog)s {}'.format(__version__)) parser.add_argument('source', type=argparse.FileType(), nargs='?', default=sys.stdin, help='assembly file to analyze (default: stdin)') parser.add_argument('--outfile', '-o', type=argparse.FileType('w'), nargs='?', default=sys.stdout, help='output file location (default: stdout)') parser.add_argument('--debug', action='store_true', help='Output nternal analysis information for debugging.') args = parser.parse_args() # pointer_increment is given, since it makes no difference on the command lien and requires # less user input iaca_instrumentation(input_file=args.source, output_file=args.outfile, block_selection='manual', pointer_increment=1, debug=args.debug)
python
{ "resource": "" }
q273683
simulate
test
def simulate(kernel, model, define_dict, blocking_constant, blocking_length): """Setup and execute model with given blocking length""" kernel.clear_state() # Add constants from define arguments for k, v in define_dict.items(): kernel.set_constant(k, v) kernel.set_constant(blocking_constant, blocking_length) model.analyze() return sum([cy for dscr, cy in model.results['cycles']])
python
{ "resource": "" }
q273684
space
test
def space(start, stop, num, endpoint=True, log=False, base=10): """ Return list of evenly spaced integers over an interval. Numbers can either be evenly distributed in a linear space (if *log* is False) or in a log space (if *log* is True). If *log* is True, base is used to define the log space basis. If *endpoint* is True, *stop* will be the last retruned value, as long as *num* >= 2. """ assert type(start) is int and type(stop) is int and type(num) is int, \ "start, stop and num need to be intergers" assert num >= 2, "num has to be atleast 2" if log: start = math.log(start, base) stop = math.log(stop, base) if endpoint: step_length = float((stop - start)) / float(num - 1) else: step_length = float((stop - start)) / float(num) i = 0 while i < num: if log: yield int(round(base ** (start + i * step_length))) else: yield int(round(start + i * step_length)) i += 1
python
{ "resource": "" }
q273685
get_last_modified_datetime
test
def get_last_modified_datetime(dir_path=os.path.dirname(__file__)): """Return datetime object of latest change in kerncraft module directory.""" max_mtime = 0 for root, dirs, files in os.walk(dir_path): for f in files: p = os.path.join(root, f) try: max_mtime = max(max_mtime, os.stat(p).st_mtime) except FileNotFoundError: pass return datetime.utcfromtimestamp(max_mtime)
python
{ "resource": "" }
q273686
check_arguments
test
def check_arguments(args, parser): """Check arguments passed by user that are not checked by argparse itself.""" if args.asm_block not in ['auto', 'manual']: try: args.asm_block = int(args.asm_block) except ValueError: parser.error('--asm-block can only be "auto", "manual" or an integer') # Set default unit depending on performance model requested if not args.unit: if 'Roofline' in args.pmodel or 'RooflineIACA' in args.pmodel: args.unit = 'FLOP/s' else: args.unit = 'cy/CL'
python
{ "resource": "" }
q273687
main
test
def main(): """Initialize and run command line interface.""" # Create and populate parser parser = create_parser() # Parse given arguments args = parser.parse_args() # Checking arguments check_arguments(args, parser) # BUSINESS LOGIC IS FOLLOWING run(parser, args)
python
{ "resource": "" }
q273688
main
test
def main(): """Comand line interface of picklemerge.""" parser = argparse.ArgumentParser( description='Recursively merges two or more pickle files. Only supports pickles consisting ' 'of a single dictionary object.') parser.add_argument('destination', type=argparse.FileType('r+b'), help='File to write to and include in resulting pickle. (WILL BE CHANGED)') parser.add_argument('source', type=argparse.FileType('rb'), nargs='+', help='File to include in resulting pickle.') args = parser.parse_args() result = pickle.load(args.destination) assert isinstance(result, collections.Mapping), "only Mapping types can be handled." for s in args.source: data = pickle.load(s) assert isinstance(data, collections.Mapping), "only Mapping types can be handled." update(result, data) args.destination.seek(0) args.destination.truncate() pickle.dump(result, args.destination)
python
{ "resource": "" }
q273689
symbol_pos_int
test
def symbol_pos_int(*args, **kwargs): """Create a sympy.Symbol with positive and integer assumptions.""" kwargs.update({'positive': True, 'integer': True}) return sympy.Symbol(*args, **kwargs)
python
{ "resource": "" }
q273690
transform_multidim_to_1d_decl
test
def transform_multidim_to_1d_decl(decl): """ Transform ast of multidimensional declaration to a single dimension declaration. In-place operation! Returns name and dimensions of array (to be used with transform_multidim_to_1d_ref()) """ dims = [] type_ = decl.type while type(type_) is c_ast.ArrayDecl: dims.append(type_.dim) type_ = type_.type if dims: # Multidimensional array decl.type.dim = reduce(lambda l, r: c_ast.BinaryOp('*', l, r), dims) decl.type.type = type_ return decl.name, dims
python
{ "resource": "" }
q273691
transform_multidim_to_1d_ref
test
def transform_multidim_to_1d_ref(aref, dimension_dict): """ Transform ast of multidimensional reference to a single dimension reference. In-place operation! """ dims = [] name = aref while type(name) is c_ast.ArrayRef: dims.append(name.subscript) name = name.name subscript_list = [] for i, d in enumerate(dims): if i == 0: subscript_list.append(d) else: subscript_list.append(c_ast.BinaryOp('*', d, reduce( lambda l, r: c_ast.BinaryOp('*', l, r), dimension_dict[name.name][-1:-i-1:-1]))) aref.subscript = reduce( lambda l, r: c_ast.BinaryOp('+', l, r), subscript_list) aref.name = name
python
{ "resource": "" }
q273692
find_node_type
test
def find_node_type(ast, node_type): """Return list of array references in AST.""" if type(ast) is node_type: return [ast] elif type(ast) is list: return reduce(operator.add, list(map(lambda a: find_node_type(a, node_type), ast)), []) elif ast is None: return [] else: return reduce(operator.add, [find_node_type(o[1], node_type) for o in ast.children()], [])
python
{ "resource": "" }
q273693
force_iterable
test
def force_iterable(f): """Will make any functions return an iterable objects by wrapping its result in a list.""" def wrapper(*args, **kwargs): r = f(*args, **kwargs) if hasattr(r, '__iter__'): return r else: return [r] return wrapper
python
{ "resource": "" }
q273694
Kernel.check
test
def check(self): """Check that information about kernel makes sens and is valid.""" datatypes = [v[0] for v in self.variables.values()] assert len(set(datatypes)) <= 1, 'mixing of datatypes within a kernel is not supported.'
python
{ "resource": "" }
q273695
Kernel.set_constant
test
def set_constant(self, name, value): """ Set constant of name to value. :param name: may be a str or a sympy.Symbol :param value: must be an int """ assert isinstance(name, str) or isinstance(name, sympy.Symbol), \ "constant name needs to be of type str, unicode or a sympy.Symbol" assert type(value) is int, "constant value needs to be of type int" if isinstance(name, sympy.Symbol): self.constants[name] = value else: self.constants[symbol_pos_int(name)] = value
python
{ "resource": "" }
q273696
Kernel.subs_consts
test
def subs_consts(self, expr): """Substitute constants in expression unless it is already a number.""" if isinstance(expr, numbers.Number): return expr else: return expr.subs(self.constants)
python
{ "resource": "" }
q273697
Kernel.array_sizes
test
def array_sizes(self, in_bytes=False, subs_consts=False): """ Return a dictionary with all arrays sizes. :param in_bytes: If True, output will be in bytes, not element counts. :param subs_consts: If True, output will be numbers and not symbolic. Scalar variables are ignored. """ var_sizes = {} for var_name, var_info in self.variables.items(): var_type, var_size = var_info # Skiping sclars if var_size is None: continue var_sizes[var_name] = reduce(operator.mul, var_size, 1) # Multiply by bytes per element if requested if in_bytes: element_size = self.datatypes_size[var_type] var_sizes[var_name] *= element_size if subs_consts: return {k: self.subs_consts(v) for k, v in var_sizes.items()} else: return var_sizes
python
{ "resource": "" }
q273698
Kernel._calculate_relative_offset
test
def _calculate_relative_offset(self, name, access_dimensions): """ Return the offset from the iteration center in number of elements. The order of indices used in access is preserved. """ # TODO to be replaced with compile_global_offsets offset = 0 base_dims = self.variables[name][1] for dim, offset_info in enumerate(access_dimensions): offset_type, idx_name, dim_offset = offset_info assert offset_type == 'rel', 'Only relative access to arrays is supported at the moment' if offset_type == 'rel': offset += self.subs_consts( dim_offset*reduce(operator.mul, base_dims[dim+1:], sympy.Integer(1))) else: # should not happen pass return offset
python
{ "resource": "" }
q273699
Kernel._remove_duplicate_accesses
test
def _remove_duplicate_accesses(self): """ Remove duplicate source and destination accesses """ self.destinations = {var_name: set(acs) for var_name, acs in self.destinations.items()} self.sources = {var_name: set(acs) for var_name, acs in self.sources.items()}
python
{ "resource": "" }