function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def _get_qos_queue(self, context, id): try: query = self._model_query(context, QosQueue) qos_queue = query.filter(QosQueue.id == id).one() except exc.NoResultFound: raise ext_qos.QosQueueNotFound(id=id) return qos_queue
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def _aggregate_rate_of_qos_queue(self, qos_queue): if qos_queue.subqueues: return reduce( lambda x, y: x + y, [q.rate for q in qos_queue.subqueues]) else: return 0
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def _check_queue_in_qos(self, qos_id, qos_queue): if qos_id != qos_queue.qos_id: raise ext_qos.QosQueueNotInQos( qos_id=qos_id, qos_queue_id=qos_queue.qos_id )
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def allocate_tc_class_for_queue(context, qos_queue): if qos_queue.tc_class: return tc_class = None with context.session.begin(subtransactions=True): try: if qos_queue.id == qos_queue.qos.default_queue_id: tc_class = 65534 ...
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def _try_allocate_new_tc_class(context, qos): select_range = context.session.query( QosTcClassRange).join(Qos).with_lockmode('update').first() if select_range: new_tc_class = select_range['first'] if select_range['first'] == select_range['last']: LOG.d...
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def _rebuild_tc_class_range(context, qos): LOG.debug("Rebuilding tc class range for qos: %s", qos.id) used_classes = sorted( [1, 65534] + [queue.tc_class for queue in qos.queues if queue.tc_class is not None]) for index in range(len(used_classes) - 1): ...
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def create_qos_queue_bulk(self, context, qos_queue): return self._create_bulk('qos_queue', context, qos_queue)
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def update_qos_queue(self, context, id, qos_queue): qos_queue = qos_queue['qos_queue'] with context.session.begin(subtransactions=True): qos_queue_db = self._get_qos_queue(context, id) if id == qos_queue_db.qos.default_queue_id: raise ext_qos.QosQueueCannotEditDe...
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def get_qos_queues(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = self._get_marker_obj(context, 'qos_queue', limit, marker) return self._get_collection( context, QosQueue, self._ma...
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def get_qos_queue(self, context, id, fields=None): qos_queue = self._get_qos_queue(context, id) return self._make_qos_queue_dict(qos_queue, fields)
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def _make_qos_filter_dict(self, qos_filter, fields=None): res = {'id': qos_filter.id, 'tenant_id': qos_filter.tenant_id, 'qos_id': qos_filter.qos_id, 'queue_id': qos_filter.queue_id, 'prio': qos_filter.prio, 'protocol': qos_filter.protoc...
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def create_qos_filter_bulk(self, context, qos_filter): return self._create_bulk('qos_filter', context, qos_filter)
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def update_qos_filter(self, context, id, qos_filter): qos_filter = qos_filter['qos_filter'] new_prio = qos_filter.get('prio', None) with context.session.begin(subtransactions=True): qos_filter_db = self._get_qos_filter(context, id) if qos_filter.get('queue_id', None) is ...
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def get_qos_filters(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = self._get_marker_obj(context, 'qos_filter', limit, marker) return self._get_collection( context, QosFilter, sel...
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def get_qos_filter(self, context, id, fields=None): qos_filter = self._get_qos_filter(context, id) return self._make_qos_filter_dict(qos_filter, fields)
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def _is_owner_floatingip(device_owner): return device_owner == n_constants.DEVICE_OWNER_FLOATINGIP
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def _make_qos_filter_dict_for_agent(self, qos_filter): qos_filter_dict = {'prio': qos_filter.prio, 'src_addr': qos_filter.src_addr, 'dst_addr': qos_filter.dst_addr} if qos_filter.protocol: qos_filter_dict['protocol'] = qos_filter.protocol...
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def _get_qos_conf_scheme(self, context, qos): root_class = {'rate': qos.rate, 'ceil': qos.rate, 'subclasses': [], 'prio': 0} if qos.burst: root_class['burst'] = qos.burst if qos.cburst: root_class['cburst'] = qos.cburst scheme = {} e...
eayunstack/neutron-qos
[ 2, 4, 2, 1, 1431059236 ]
def parallelwrite(slicenumber):
openconnectome/open-connectome
[ 39, 12, 39, 99, 1302639682 ]
def run(): flypool = multiprocessing.Pool(totalprocs) flypool.map(parallelwrite, totalslices, 16)
openconnectome/open-connectome
[ 39, 12, 39, 99, 1302639682 ]
def compute(self, *args, **kwargs): self.args = args self.kwargs = kwargs
radio-astro-tools/spectral-cube
[ 84, 57, 84, 183, 1397489630 ]
def test_save_to_tmp_dir(data_adv): pytest.importorskip('zarr') cube = DaskSpectralCube.read(data_adv) cube_new = cube.sigma_clip_spectrally(3, save_to_tmp_dir=True) # The following test won't necessarily always work in future since the name # is not really guaranteed, but this is pragmatic enough f...
radio-astro-tools/spectral-cube
[ 84, 57, 84, 183, 1397489630 ]
def test_statistics(data_adv): cube = DaskSpectralCube.read(data_adv).rechunk(chunks=(1, 2, 3)) stats = cube.statistics() assert_quantity_allclose(stats['npts'], 24) assert_quantity_allclose(stats['mean'], 0.4941651776136591 * u.K) assert_quantity_allclose(stats['sigma'], 0.3021908870982011 * u.K) ...
radio-astro-tools/spectral-cube
[ 84, 57, 84, 183, 1397489630 ]
def test_statistics_consistency_casa(data_adv, tmp_path): # Similar to test_statistics but compares to CASA directly. cube = DaskSpectralCube.read(data_adv) stats = cube.statistics() make_casa_testimage(data_adv, tmp_path / 'casa.image') ia = casatools.image() ia.open(str(tmp_path / 'casa.im...
radio-astro-tools/spectral-cube
[ 84, 57, 84, 183, 1397489630 ]
def sum_blocks_spectral(data_chunk): return data_chunk.sum(0)
radio-astro-tools/spectral-cube
[ 84, 57, 84, 183, 1397489630 ]
def test_apply_function_parallel_spectral_noncube_withblockinfo(data_adv): ''' Test receiving block_info information from da.map_blocks so we can place the chunk's location in the whole cube when needed. https://docs.dask.org/en/latest/array-api.html#dask.array.map_blocks ''' chunk_size = (-1...
radio-astro-tools/spectral-cube
[ 84, 57, 84, 183, 1397489630 ]
def test_apply_function_parallel_shape(accepts_chunks): # regression test for #772 def func(x, add=None): if add is not None: y = x + add else: raise ValueError("This test is supposed to have add=1") return y fn = data.get_pkg_data_filename('tests/data/exam...
radio-astro-tools/spectral-cube
[ 84, 57, 84, 183, 1397489630 ]
def test_cube_on_cube(filename, request): if 'image' in filename and not CASA_INSTALLED: pytest.skip('Requires CASA to be installed') dataname = request.getfixturevalue(filename) # regression test for #782 # the regression applies only to VaryingResolutionSpectralCubes # since they are not ...
radio-astro-tools/spectral-cube
[ 84, 57, 84, 183, 1397489630 ]
def setUp(self): self.main_app = MainApp()
soar-telescope/goodman
[ 13, 11, 13, 54, 1468010085 ]
def __init__(self, env_var_base): self.ENV_VAR_BASE = env_var_base
femtotrader/ig-markets-stream-api-python-library
[ 255, 177, 255, 13, 1420221470 ]
def get(self, key, default_value=None): env_var = self._env_var(key) return os.environ.get(env_var, default_value)
femtotrader/ig-markets-stream-api-python-library
[ 255, 177, 255, 13, 1420221470 ]
def __unicode__(self): return u"%s: %s" % (self.team.name, self.name)
F483/bikesurf.org
[ 8, 5, 8, 60, 1413752765 ]
def main(dataset, blockl, local_dir=None, clobber=True): """ Split dataset into time chunks Parameters ---------- dataset : str Name of MS file to split blockl : int Number of time slots per chunk local_dir : str, optional Path to local directory for output of t1.cop...
revoltek/factor
[ 20, 12, 20, 24, 1409837156 ]
def test_0_new_account(self): '''create new account * 5000''' account_table_file = '/home/%s/.metaverse/mainnet/account_table' % common.get_username() origin_payload_size = database.get_payload_size(account_table_file) batch_amount = 5000 lastwords = [] for i in range(ba...
mvs-live/metaverse
[ 307, 120, 307, 61, 1477813012 ]
def __init__(self, _id, dp_id, conf): self.rules = [] self.exact_match = None self.dot1x_assigned = None self.meter = False self.matches = {} self.set_fields = set() self._ports_resolved = False # Tunnel info maintains the tunnel output information for ea...
REANNZ/faucet
[ 480, 166, 480, 33, 1443148776 ]
def check_config(self): test_config_condition( not self.rules, 'no rules found for ACL %s' % self._id) for rule in self.rules: self._check_conf_types(rule, self.rule_types) for rule_field, rule_conf in rule.items(): if rule_field == 'cookie': ...
REANNZ/faucet
[ 480, 166, 480, 33, 1443148776 ]
def get_meters(self): """Yield meters for each rule in ACL""" for rule in self.rules: if 'actions' not in rule or 'meter' not in rule['actions']: continue yield rule['actions']['meter']
REANNZ/faucet
[ 480, 166, 480, 33, 1443148776 ]
def _resolve_ordered_output_ports(self, output_list, resolve_port_cb, resolve_tunnel_objects): """Resolve output actions in the ordered list format""" result = [] for action in output_list: for key, value in action.items(): if key == 'tunnel': tunn...
REANNZ/faucet
[ 480, 166, 480, 33, 1443148776 ]
def resolve_ports(self, resolve_port_cb, resolve_tunnel_objects): """Resolve the values for the actions of an ACL""" if self._ports_resolved: return for rule_conf in self.rules: if 'actions' in rule_conf: actions_conf = rule_conf['actions'] ...
REANNZ/faucet
[ 480, 166, 480, 33, 1443148776 ]
def get_num_tunnels(self): """Returns the number of tunnels specified in the ACL""" num_tunnels = 0 for rule_conf in self.rules: if self.does_rule_contain_tunnel(rule_conf): output_conf = rule_conf['actions']['output'] if isinstance(output_conf, list):...
REANNZ/faucet
[ 480, 166, 480, 33, 1443148776 ]
def does_rule_contain_tunnel(rule_conf): """Return true if the ACL rule contains a tunnel""" if 'actions' in rule_conf: if 'output' in rule_conf['actions']: output_conf = rule_conf['actions']['output'] if isinstance(output_conf, (list, tuple)): ...
REANNZ/faucet
[ 480, 166, 480, 33, 1443148776 ]
def _tunnel_source_id(source): """Return ID for a tunnel source.""" return tuple(sorted(source.items()))
REANNZ/faucet
[ 480, 166, 480, 33, 1443148776 ]
def verify_tunnel_rules(self): """Make sure that matches & set fields are configured correctly to handle tunnels""" if 'eth_type' not in self.matches: self.matches['eth_type'] = False if 'in_port' not in self.matches: self.matches['in_port'] = False if 'vlan_vid' ...
REANNZ/faucet
[ 480, 166, 480, 33, 1443148776 ]
def update_source_tunnel_rules(self, curr_dp, source_id, tunnel_id, out_port, output_table): """Update the tunnel rulelist for when the output port has changed""" src_dp = self.tunnel_sources[source_id]['dp'] dst_dp = self.tunnel_dests[tunnel_id]['dst_dp'] prev_list = self.dyn_tunnel_rul...
REANNZ/faucet
[ 480, 166, 480, 33, 1443148776 ]
def HTML_WRAP(app): """ Wraps the Application object's results in HTML """ def gen(environ, start_response): """The standard WSGI interface""" iterator = app(environ, start_response) first_yield = iterator.next() yield "<html>\n" yield "<body>\n" yield fir...
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def normalizeEnviron(environ): """ Converts environ variables to strings for wsgi compliance and deletes extraneous fields. """ header_list = [] header_dict = environ['headers'] for key in header_dict: line = "%s: %s\n" % (key, header_dict[key]) header_list.append(line) ...
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def __init__(self, app_name, app, request, log_writable, WsgiConfig): super(_WsgiHandler, self).__init__() self.app_name = app_name self.request = request self.environ = request self.app = app #self.log_writable = log_writable self.log_writable = LogWritable.GetLo...
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def start_response(self, status, response_headers, exc_info=None): """ Method to be passed to WSGI application object """ #TODO: Add more exc_info support if exc_info: raise exc_info[0], exc_info[1], exc_info[2] self.status = status self.response_hea...
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def munge_headers(self): for header in self.environ["headers"]: cgi_varname = "HTTP_"+header.replace("-","_").upper() self.environ[cgi_varname] = self.environ["headers"][header] pprint.pprint(self.environ) pprint.pprint(self.environ["headers"])
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def initRequiredVars(self, wsgi_config): """ This method initializes all variables that are required to be present (including ones that could possibly be empty. """ self.environ["REQUEST_METHOD"] = self.request["method"] # Portion of URL that relates to the application o...
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def unsupportedVars(self): """ Probably won't be used. This is just a list of environment variables that aren't implemented as of yet. """ consider = " **CONSIDER ADDING THIS -- eg: " self.environ["HTTP_REFERER"] = consider + "-" self.environ["SERVER_SIGNATURE"] ...
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def _getWsgiHandler(request): requested_uri = sanitizePath(request['raw-uri'], substituted_path) print requested_uri for url_item in urls.UrlList: print 'trying ' + url_item[0] if re.search(url_item[0], requested_uri): print url_item[0] + 'succesful!' ...
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def HTTPProtocol(): def foo(self,**argd): print self.routing return HTTPServer(requestHandlers(self.routing),**argd) return foo
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def __init__(self): super(WsgiError, self).__init__()
sparkslabs/kamaelia_
[ 13, 3, 13, 2, 1348148442 ]
def gc(test_result=True): """Site-wide garbage collections.""" def days_ago(days): return datetime.today() - timedelta(days=days) log.info('Collecting data to delete') logs = ( ActivityLog.objects.filter(created__lt=days_ago(90)) .exclude(action__in=amo.LOG_KEEP) .valu...
mozilla/olympia
[ 810, 542, 810, 201, 1391193855 ]
def __init__(self, layer): self.layer = layer self.template = loader.get_template( 'regulations/layers/definition_citation.html') self.sectional = False self.version = None self.rev_urls = SectionUrl() self.rendered = {} # precomputation for de...
18F/regulations-site
[ 18, 45, 18, 34, 1417818160 ]
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None, port=None, proxy=None, proxy_port=None, host=DefaultHost, debug=0, security_token=None, validate_certs=True): AWSAuthConnection.__init__(self, host, aws_ac...
nanocell/lsync
[ 5, 2, 5, 3, 1359852579 ]
def make_request(self, action, path, headers=None, data='', params=None): if params: pairs = [] for key, val in params.iteritems(): if val is None: continue pairs.append(key + '=' + urllib.quote(str(val))) path += '?' + '&'....
nanocell/lsync
[ 5, 2, 5, 3, 1359852579 ]
def get_all_hosted_zones(self, start_marker=None, zone_list=None): """ Returns a Python data structure with information about all Hosted Zones defined for the AWS account. :param int start_marker: start marker to pass when fetching additional results after a truncated list ...
nanocell/lsync
[ 5, 2, 5, 3, 1359852579 ]
def get_hosted_zone_by_name(self, hosted_zone_name): """ Get detailed information about a particular Hosted Zone. :type hosted_zone_name: str :param hosted_zone_name: The fully qualified domain name for the Hosted Zone """ if hosted_zone_name[-1] != '.': ...
nanocell/lsync
[ 5, 2, 5, 3, 1359852579 ]
def delete_hosted_zone(self, hosted_zone_id): uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id) response = self.make_request('DELETE', uri) body = response.read() boto.log.debug(body) if response.status not in (200, 204): raise exception.DNSServerError(respon...
nanocell/lsync
[ 5, 2, 5, 3, 1359852579 ]
def get_all_rrsets(self, hosted_zone_id, type=None, name=None, identifier=None, maxitems=None): """ Retrieve the Resource Record Sets defined for this Hosted Zone. Returns the raw XML data returned by the Route53 call. :type hosted_zone_id: str :param host...
nanocell/lsync
[ 5, 2, 5, 3, 1359852579 ]
def __init__(self): super(Serializer, self).__init__({ 'find_isolate': evaluators.SequenceEvaluator( [find_isolate.Serializer(), TaskTransformer]), 'run_test': evaluators.SequenceEvaluator( [run_test.Serializer(), TaskTransformer]), 're...
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def TaskTransformer(task, _, context): """Takes the form: { <task id> : { ... } } And turns it into: { 'state': { 'change': {...} 'quest': <string> 'index': <int> 'add_execution': { ... } } } """ if not context: return None input_data...
endlessm/chromium-browser
[ 21, 16, 21, 3, 1435959644 ]
def __init__(self, row_splits, row_lengths=None, value_rowids=None, nrows=None, uniform_row_length=None, nvals=None, internal=False): """Creates a `RowPartition` from the specified encoding tensor(s). This ...
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def from_value_rowids(cls, value_rowids, nrows=None, validate=True, preferred_dtype=None, dtype=None, dtype_hint=None): """Creates a `RowPartition` with rows partitioned by...
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def from_row_splits(cls, row_splits, validate=True, preferred_dtype=None, dtype=None, dtype_hint=None): """Creates a `RowPartition` with rows partitioned by `row_splits`. This `RowPartition` divides a ...
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def from_row_lengths(cls, row_lengths, validate=True, preferred_dtype=None, dtype=None, dtype_hint=None): """Creates a `RowPartition` with rows partitioned by `row_lengths`. This `RowPartition` di...
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def from_row_starts(cls, row_starts, nvals, validate=True, preferred_dtype=None, dtype=None, dtype_hint=None): """Creates a `RowPartition` with rows partitioned by `row_starts`. E...
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def from_row_limits(cls, row_limits, validate=True, preferred_dtype=None, dtype=None, dtype_hint=None): """Creates a `RowPartition` with rows partitioned by `row_limits`. Equivalent to: `from_row_split...
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def from_uniform_row_length(cls, uniform_row_length, nvals=None, nrows=None, validate=True, preferred_dtype=None, dtype=None, ...
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def _convert_row_partition(cls, partition, name, dtype=None, dtype_hint=None): """Converts `partition` to Tensors. Args: partition: A row-partitioning tensor for the `RowPartition` being constructed. I.e., one of: row_splits, row_lengths, row_starts, row_limits, value_rowids, uniform_row...
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def dtype(self): """The `DType` used to encode the row partition (either int32 or int64).""" return self._row_splits.dtype
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def value_rowids(self): """Returns the row indices for this row partition. `value_rowids` specifies the row index fo reach value. In particular, `value_rowids[i]` is the row index for `values[i]`. Returns: A 1-D integer `Tensor` with shape `[self.nvals()]`. The returned tensor is nonnegat...
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def nrows(self): """Returns the number of rows created by this `RowPartition`. Returns: scalar integer Tensor """ if self._nrows is not None: return self._nrows nsplits = tensor_shape.dimension_at_index(self._row_splits.shape, 0) if nsplits.value is None: return array_ops.shap...
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def row_starts(self): """Returns the start indices for rows in this row partition. These indices specify where the values for each row begin. `partition.row_starts()` is equal to `partition.row_splits()[:-1]`. Returns: A 1-D integer Tensor with shape `[self.nrows()]`. The returned tensor i...
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def row_lengths(self): """Returns the lengths of rows in this `RowPartition`. Returns: A 1-D integer Tensor with shape `[self.nrows]`. The returned tensor is nonnegative. `tf.reduce_sum(self.row_lengths) == self.nvals()`. """ if self._row_lengths is not None: return self._row_le...
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def static_nrows(self): """The number of rows in this partition, if statically known. ```python self.row_lengths().shape == [self.static_nrows] self.row_starts().shape == [self.static_nrows] self.row_limits().shape == [self.static_nrows] self.row_splits().shape == [self.static_nrows + 1] ``...
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def static_nvals(self): """The number of values in this partition, if statically known. ```python self.value_rowids().shape == [self.static_vals] ``` Returns: The number of values in this partition as an `int` (if statically known); or `None` (otherwise). """ if self._nvals is ...
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def static_uniform_row_length(self): """The number of values in each row of this partition, if statically known. Returns: The number of values in each row of this partition as an `int` (if statically known); or `None` (otherwise). """ if self._uniform_row_length is not None: return te...
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def is_uniform(self): """Returns true if the partition is known to be uniform statically. This is based upon the existence of self._uniform_row_length. For example: RowPartition.from_row_lengths([3,3,3]).is_uniform()==false RowPartition.from_uniform_row_length(5, nvals=20).is_uniform()==true RowPar...
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def with_row_splits_dtype(self, dtype): """Returns a copy of this RowPartition with the given `row_splits` dtype. Args: dtype: The dtype for `row_splits`. One of `tf.int32` or `tf.int64`. Returns: A copy of this RowPartition, with the `row_splits` cast to the given type. """ dtype = d...
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def __repr__(self): if self._uniform_row_length is not None: return (f"tf.RowPartition(nrows={self._nrows}, " f"uniform_row_length={self._uniform_row_length})") else: return f"tf.RowPartition(row_splits={self._row_splits})"
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def has_precomputed_row_splits(self): """Returns true if `row_splits` has already been computed. If true, then `self.row_splits()` will return its value without calling any TensorFlow ops. """ return self._row_splits is not None
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def has_precomputed_value_rowids(self): """Returns true if `value_rowids` has already been computed. If true, then `self.value_rowids()` will return its value without calling any TensorFlow ops. """ return self._value_rowids is not None
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def has_precomputed_nvals(self): """Returns true if `nvals` has already been computed. If true, then `self.nvals()` will return its value without calling any TensorFlow ops. """ return self._nvals is not None
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def with_precomputed_row_lengths(self): """Returns a copy of `self` with `row_lengths` precomputed.""" return RowPartition( row_splits=self._row_splits, row_lengths=self.row_lengths(), value_rowids=self._value_rowids, nrows=self._nrows, nvals=self._nvals, uniform_...
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def with_precomputed_nrows(self): """Returns a copy of `self` with `nrows` precomputed.""" return RowPartition( row_splits=self._row_splits, row_lengths=self._row_lengths, value_rowids=self._value_rowids, nrows=self.nrows(), nvals=self._nvals, uniform_row_length=s...
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def merge_precomputed_encodings(self, other, validate=True): """Returns a RowPartition that merges encodings from `self` and `other`. Requires that `self` and `other` describe the same partition. Args: other: A `RowPartition` that encodes the same partition as `self`. validate: If true, then a...
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def _type_spec(self): return RowPartitionSpec.from_value(self)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def __init__(self, nrows=None, nvals=None, uniform_row_length=None, dtype=dtypes.int64): """Constructs a new RowPartitionSpec. Args: nrows: The number of rows in the RowPartition, or `None` if unspecified. nvals: The number of values parti...
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def _serialize(self): return (self._nrows, self._nvals, self._uniform_row_length, self._dtype)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def _deserialize(cls, serialization): # Remove TensorShape wrappers from serialization. (nrows, nvals, uniform_row_length, dtype) = serialization nrows = tensor_shape.dimension_value(nrows[0]) nvals = tensor_shape.dimension_value(nvals[0]) return cls(nrows, nvals, uniform_row_length, dtype)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def nrows(self): return tensor_shape.dimension_value(self._nrows[0])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def nvals(self): return tensor_shape.dimension_value(self._nvals[0])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def uniform_row_length(self): return tensor_shape.dimension_value(self._uniform_row_length[0])
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def dtype(self): return self._dtype
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def _component_specs(self): row_splits_shape = tensor_shape.TensorShape( [tensor_shape.dimension_at_index(self._nrows, 0) + 1]) return tensor_spec.TensorSpec(row_splits_shape, self._dtype)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def _from_components(self, tensor): return RowPartition.from_row_splits(tensor, validate=False)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]
def from_value(cls, value): if not isinstance(value, RowPartition): raise TypeError("Expected `value` to be a `RowPartition`") return cls(value.static_nrows, value.static_nvals, value.static_uniform_row_length, value.dtype)
tensorflow/tensorflow
[ 171949, 87931, 171949, 2300, 1446859160 ]