function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def sample_add_context_children(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.AddContextChildrenRequest( context="context_value", ) # Make the request response = client.add_context_children(request=request) # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_query_context_lineage_subgraph(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.QueryContextLineageSubgraphRequest( context="context_value", ) # Make the request response = client.query_context_lineage_subgraph(request=request) # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_create_execution(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.CreateExecutionRequest( parent="parent_value", ) # Make the request response = client.create_execution(request=request) # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_get_execution(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.GetExecutionRequest( name="name_value", ) # Make the request response = client.get_execution(request=request) # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_list_executions(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.ListExecutionsRequest( parent="parent_value", ) # Make the request page_result = client.list_executions(request=request) # Handle the response for response in page_result: print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_update_execution(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.UpdateExecutionRequest( ) # Make the request response = client.update_execution(request=request) # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_delete_execution(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.DeleteExecutionRequest( name="name_value", ) # Make the request operation = client.delete_execution(request=request) print("Waiting for operation to complete...") response = operation.result() # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_purge_executions(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.PurgeExecutionsRequest( parent="parent_value", filter="filter_value", ) # Make the request operation = client.purge_executions(request=request) print("Waiting for operation to complete...") response = operation.result() # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_add_execution_events(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.AddExecutionEventsRequest( execution="execution_value", ) # Make the request response = client.add_execution_events(request=request) # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_query_execution_inputs_and_outputs(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.QueryExecutionInputsAndOutputsRequest( execution="execution_value", ) # Make the request response = client.query_execution_inputs_and_outputs(request=request) # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_create_metadata_schema(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) metadata_schema = aiplatform_v1.MetadataSchema() metadata_schema.schema = "schema_value" request = aiplatform_v1.CreateMetadataSchemaRequest( parent="parent_value", metadata_schema=metadata_schema, ) # Make the request response = client.create_metadata_schema(request=request) # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_get_metadata_schema(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.GetMetadataSchemaRequest( name="name_value", ) # Make the request response = client.get_metadata_schema(request=request) # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_list_metadata_schemas(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.ListMetadataSchemasRequest( parent="parent_value", ) # Make the request page_result = client.list_metadata_schemas(request=request) # Handle the response for response in page_result: print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_query_artifact_lineage_subgraph(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.QueryArtifactLineageSubgraphRequest( artifact="artifact_value", ) # Make the request response = client.query_artifact_lineage_subgraph(request=request) # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def escape_name(name: str) -> str: return name.lower().replace(' ', '_')
lowRISC/opentitan
[ 1741, 534, 1741, 1020, 1566837016 ]
def _get_awparam_name(iface_name: Optional[str]) -> str: return (iface_name or 'Iface').capitalize() + 'Aw'
lowRISC/opentitan
[ 1741, 534, 1741, 1020, 1566837016 ]
def get_type_name_pfx(block: IpBlock, iface_name: Optional[str]) -> str: return block.name.lower() + ('' if iface_name is None else '_{}'.format(iface_name.lower()))
lowRISC/opentitan
[ 1741, 534, 1741, 1020, 1566837016 ]
def get_iface_tx_type(block: IpBlock, iface_name: Optional[str], hw2reg: bool) -> str: x2x = 'hw2reg' if hw2reg else 'reg2hw' pfx = get_type_name_pfx(block, iface_name) return '_'.join([pfx, x2x, 't'])
lowRISC/opentitan
[ 1741, 534, 1741, 1020, 1566837016 ]
def gen_rtl(block: IpBlock, outdir: str) -> int: # Read Register templates reg_top_tpl = Template( filename=resource_filename('reggen', 'reg_top.sv.tpl')) reg_pkg_tpl = Template( filename=resource_filename('reggen', 'reg_pkg.sv.tpl')) # Generate <block>_reg_pkg.sv # # This defines the various types used to interface between the *_reg_top # module(s) and the block itself. reg_pkg_path = os.path.join(outdir, block.name.lower() + "_reg_pkg.sv") with open(reg_pkg_path, 'w', encoding='UTF-8') as fout: try: fout.write(reg_pkg_tpl.render(block=block)) except: # noqa F722 for template Exception handling log.error(exceptions.text_error_template().render()) return 1 # Generate the register block implementation(s). For a device interface # with no name we generate the register module "<block>_reg_top" (writing # to <block>_reg_top.sv). In any other case, we also need the interface # name, giving <block>_<ifname>_reg_top. lblock = block.name.lower() for if_name, rb in block.reg_blocks.items(): if if_name is None: mod_base = lblock else: mod_base = lblock + '_' + if_name.lower() mod_name = mod_base + '_reg_top' reg_top_path = os.path.join(outdir, mod_name + '.sv') with open(reg_top_path, 'w', encoding='UTF-8') as fout: try: fout.write(reg_top_tpl.render(block=block, mod_base=mod_base, mod_name=mod_name, if_name=if_name, rb=rb)) except: # noqa F722 for template Exception handling log.error(exceptions.text_error_template().render()) return 1 return 0
lowRISC/opentitan
[ 1741, 534, 1741, 1020, 1566837016 ]
def __init__(self, json): if json is not None: if 'enabled' in json.keys(): self.enabled = json['enabled'] else: self.enabled = None if 'primaryKey' in json.keys(): self.primary_key = json['primaryKey'] else: self.primary_key = None if 'secondaryKey' in json.keys(): self.secondary_key = json['secondaryKey'] else: self.secondary_key = None else: raise GalaxyFDSClientException("Json data cannot be None")
XiaoMi/galaxy-fds-sdk-python
[ 49, 22, 49, 2, 1416560004 ]
def enabled(self): return self['enabled']
XiaoMi/galaxy-fds-sdk-python
[ 49, 22, 49, 2, 1416560004 ]
def enabled(self, enabled): self['enabled'] = enabled
XiaoMi/galaxy-fds-sdk-python
[ 49, 22, 49, 2, 1416560004 ]
def primary_key(self): return self['primaryKey']
XiaoMi/galaxy-fds-sdk-python
[ 49, 22, 49, 2, 1416560004 ]
def primary_key(self, primary_key): self['primaryKey'] = primary_key
XiaoMi/galaxy-fds-sdk-python
[ 49, 22, 49, 2, 1416560004 ]
def secondary_key(self): return self['secondaryKey']
XiaoMi/galaxy-fds-sdk-python
[ 49, 22, 49, 2, 1416560004 ]
def get_dataset( dataset: str, global_batch_size: int, rng: np.ndarray, train_preprocessing_fn: Optional[Callable[[Any], Any]] = None, eval_preprocessing_fn: Optional[Callable[[Any], Any]] = None, num_epochs: Optional[int] = None, filter_fn: Optional[Callable[[Any], Any]] = None, **kwargs,
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def _image_preprocess_fn(features): if "image" in features: features["image"] = tf.cast(features["image"], tf.float32) / 255.0 if "id" in features: # Included in some TFDS datasets, breaks JAX. del features["id"] return features
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def _get_birds200_dataset( mode: str, rng: np.ndarray) -> Tuple[tf.data.Dataset, tf.data.Dataset, int]: """Load the caltech_birds2011 dataset.""" assert jax.host_count() == 1, ( "caltech_birds2011 dataset does not support multihost training. " "Found {} hosts.".format(jax.host_count())) dataset_builder = tfds.builder("caltech_birds2011") num_classes = 200 # Make sure each host uses a different RNG for the training data. rng, data_rng = jax.random.split(rng) data_rng = jax.random.fold_in(data_rng, jax.host_id()) data_rng, shuffle_rng = jax.random.split(data_rng) if mode == "train-val": read_config = tfds.ReadConfig(shuffle_seed=shuffle_rng[0]) ds = dataset_builder.as_dataset( split="train", shuffle_files=False, read_config=read_config) train_ds = ds.take(5000).shuffle(5000, seed=shuffle_rng[0]) eval_ds = ds.skip(5000) elif mode == "train-test": train_split = "train" eval_split = "test" train_read_config = tfds.ReadConfig(shuffle_seed=shuffle_rng[0]) train_ds = dataset_builder.as_dataset( split=train_split, shuffle_files=True, read_config=train_read_config) eval_read_config = tfds.ReadConfig(shuffle_seed=shuffle_rng[1]) eval_ds = dataset_builder.as_dataset( split=eval_split, shuffle_files=False, read_config=eval_read_config) else: raise ValueError(f"Unknown mode: {mode}.") return train_ds, eval_ds, num_classes
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def _prepare_dataset( dataset: tf.data.Dataset, global_batch_size: int, shuffle: bool, rng: np.ndarray, preprocess_fn: Optional[Callable[[Any], Any]] = None, num_epochs: Optional[int] = None, filter_fn: Optional[Callable[[Any], Any]] = None) -> tf.data.Dataset: """Batches, shuffles, prefetches and preprocesses a dataset. Args: dataset: The dataset to prepare. global_batch_size: The global batch size to use. shuffle: Whether the shuffle the data on example level. rng: PRNG for seeding the shuffle operations. preprocess_fn: Preprocessing function that will be applied to every example. num_epochs: Number of epochs to repeat the dataset. filter_fn: Funtion that filters samples according to some criteria. Returns: The dataset. """ if shuffle and rng is None: raise ValueError("Shuffling without RNG is not supported.") if global_batch_size % jax.host_count() != 0: raise ValueError(f"Batch size {global_batch_size} not divisible by number " f"of hosts ({jax.host_count()}).") local_batch_size = global_batch_size // jax.host_count() batch_dims = [jax.local_device_count(), local_batch_size] # tf.data uses single integers as seed. if rng is not None: rng = rng[0] ds = dataset.repeat(num_epochs) if shuffle: ds = ds.shuffle(1024, seed=rng) if preprocess_fn is not None: ds = ds.map(preprocess_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE) if filter_fn is not None: ds = ds.filter(filter_fn) for batch_size in reversed(batch_dims): ds = ds.batch(batch_size, drop_remainder=True) return ds.prefetch(tf.data.experimental.AUTOTUNE)
google-research/google-research
[ 27788, 6881, 27788, 944, 1538678568 ]
def test_forwarding_rules_scanner_all_match(self): rules_local_path = get_datafile_path(__file__, 'forward_rule_test_1.yaml') scanner = forwarding_rule_scanner.ForwardingRuleScanner( {}, {}, mock.MagicMock(), '', '', rules_local_path) project_id = "abc-123" gcp_forwarding_rules_resource_data = [ { "id": "46", "creationTimestamp": "2017-06-01 04:19:37", "name": "abc-123", "description": "", "region": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1", "IPAddress": "198.51.100.99", "IPProtocol": "UDP", "portRange": "4500-4500", "ports": [], "target": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1/abc-123/abc-123", "loadBalancingScheme": "EXTERNAL", }, { "id": "23", "creationTimestamp": "2017-06-01 04:19:37", "name": "abc-123", "description": "", "region": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1", "IPAddress": "198.51.100.23", "IPProtocol": "TCP", "ports": [8080], "target": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1/abc-123/abc-123", "loadBalancingScheme": "INTERNAL", }, { "id": "46", "creationTimestamp": "2017-06-01 04:19:37", "name": "abc-123", "description": "", "region": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1", "IPAddress": "198.51.100.46", "IPProtocol": "ESP", "ports": [], "target": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1/abc-123/abc-123", "loadBalancingScheme": "EXTERNAL", }, { "id": "46", "creationTimestamp": "2017-06-01 04:19:37", "name": "abc-123", "description": "", "region": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1", "IPAddress": "198.51.100.35", "IPProtocol": "TCP", "portRange": "4500-4500", "target": "https://www.googleapis.com/compute/v1/projects/abc-123/regions/asia-east1/abc-123/abc-123", "loadBalancingScheme": "EXTERNAL", } ] gcp_forwarding_rules_resource_objs = [] for gcp_forwarding_rule_resource_data in gcp_forwarding_rules_resource_data: gcp_forwarding_rules_resource_objs.append( fr.ForwardingRule.from_dict( project_id, '', gcp_forwarding_rule_resource_data)) violations = scanner._find_violations(gcp_forwarding_rules_resource_objs) self.assertEqual(0, len(violations))
forseti-security/forseti-security
[ 1276, 291, 1276, 17, 1487268086 ]
def __init__(self, *args, **kwargs): self.args = args self.kwargs = kwargs self.message = str(self)
jimgoo/zipline-fork
[ 1, 3, 1, 2, 1449499397 ]
def __init__(self, root): if isdir(root): self.root = abspath(root) else: raise ValueError("Invalid root directory '%s'" % root) self.nodeCache = {} self.nodeCachingBehavior = DEFAULT_NODE_CACHING_BEHAVIOR
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def createTree(cls, root, **props): """Create and returns a new Ceres tree with the given properties :param root: The root directory of the new Ceres tree :param \*\*props: Arbitrary key-value properties to store as tree metadata :returns: :class:`CeresTree` """ ceresDir = join(root, '.ceres-tree') if not isdir(ceresDir): os.makedirs(ceresDir, DIR_PERMS) for prop, value in props.items(): propFile = join(ceresDir, prop) with open(propFile, 'w') as fh: fh.write(str(value)) return cls(root)
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def getFilesystemPath(self, nodePath): """Get the on-disk path of a Ceres node given a metric name :param nodePath: A metric name e.g. ``carbon.agents.graphite-a.cpuUsage`` :returns: The Ceres node path on disk""" return join(self.root, nodePath.replace('.', os.sep))
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def hasNode(self, nodePath): """Returns whether the Ceres tree contains the given metric :param nodePath: A metric name e.g. ``carbon.agents.graphite-a.cpuUsage`` :returns: `True` or `False`""" return isdir(self.getFilesystemPath(nodePath))
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def getNode(self, nodePath): """Returns a Ceres node given a metric name. Because nodes are looked up in every read and write, a caching mechanism is provided. Cache behavior is set using :func:`setNodeCachingBehavior` and defaults to the value set in ``DEFAULT_NODE_CACHING_BEHAVIOR`` The following behaviors are available: * `none` - Node is read from the filesystem at every access. * `all` (default) - All nodes are cached. :param nodePath: A metric name :returns: :class:`CeresNode` or `None` """ if self.nodeCachingBehavior == 'all': if nodePath not in self.nodeCache: fsPath = self.getFilesystemPath(nodePath) if CeresNode.isNodeDir(fsPath): self.nodeCache[nodePath] = CeresNode(self, nodePath, fsPath) else: return None return self.nodeCache[nodePath] elif self.nodeCachingBehavior == 'none': fsPath = self.getFilesystemPath(nodePath) if CeresNode.isNodeDir(fsPath): return CeresNode(self, nodePath, fsPath) else: return None else: raise ValueError("invalid caching behavior configured '%s'" % self.nodeCachingBehavior)
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def createNode(self, nodePath, **properties): """Creates a new metric given a new metric name and optional per-node metadata :param nodePath: The new metric name. :param \*\*properties: Arbitrary key-value properties to store as metric metadata. :returns: :class:`CeresNode` """ return CeresNode.create(self, nodePath, **properties)
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def fetch(self, nodePath, fromTime, untilTime): """Fetch data within a given interval from the given metric :param nodePath: The metric name to fetch from :param fromTime: Requested interval start time in unix-epoch. :param untilTime: Requested interval end time in unix-epoch. :returns: :class:`TimeSeriesData` :raises: :class:`NodeNotFound`, :class:`InvalidRequest` """ node = self.getNode(nodePath) if not node: raise NodeNotFound("the node '%s' does not exist in this tree" % nodePath) return node.read(fromTime, untilTime)
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def __init__(self, tree, nodePath, fsPath): self.tree = tree self.nodePath = nodePath self.fsPath = fsPath self.metadataFile = join(fsPath, '.ceres-node') self.timeStep = None self.aggregationMethod = 'average' self.sliceCache = None self.sliceCachingBehavior = DEFAULT_SLICE_CACHING_BEHAVIOR
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def create(cls, tree, nodePath, **properties): """Create a new :class:`CeresNode` on disk with the specified properties. :param tree: The :class:`CeresTree` this node is associated with :param nodePath: The name of the metric this node represents :param \*\*properties: A set of key-value properties to be associated with this node
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def isNodeDir(path): """Tests whether the given path is a :class:`CeresNode` :param path: Path to test :returns `True` or `False` """ return isdir(path) and exists(join(path, '.ceres-node'))
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def fromFilesystemPath(cls, fsPath): """Instantiate a :class:`CeresNode` from the on-disk path of an existing node :params fsPath: The filesystem path of an existing node :returns: :class:`CeresNode` """ dirPath = dirname(fsPath) while True: ceresDir = join(dirPath, '.ceres-tree') if isdir(ceresDir): tree = CeresTree(dirPath) nodePath = tree.getNodePath(fsPath) return cls(tree, nodePath, fsPath) dirPath = dirname(dirPath) if dirPath == '/': raise ValueError("the path '%s' is not in a ceres tree" % fsPath)
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def slice_info(self): """A property providing a list of current information about each slice :returns: ``[(startTime, endTime, timeStep), ...]`` """ return [(slice.startTime, slice.endTime, slice.timeStep) for slice in self.slices]
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def writeMetadata(self, metadata): """Writes new metadata to disk :param metadata: a JSON-serializable dict of node metadata """ self.timeStep = int(metadata['timeStep']) with open(self.metadataFile, 'w') as fh: json.dump(metadata, fh)
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def slices(self): """A property providing access to information about this node's underlying slices. Because this
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def readSlices(self): """Read slice information from disk :returns: ``[(startTime, timeStep), ...]`` """ if not exists(self.fsPath): raise NodeDeleted() slice_info = [] for filename in os.listdir(self.fsPath): if filename.endswith('.slice'): startTime, timeStep = filename[:-6].split('@') slice_info.append((int(startTime), int(timeStep))) slice_info.sort(reverse=True) return slice_info
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def clearSliceCache(self): """Clear slice cache, forcing a refresh from disk at the next access""" self.sliceCache = None
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def read(self, fromTime, untilTime): """Read data from underlying slices and return as a single time-series :param fromTime: Beginning of interval in unix epoch seconds :param untilTime: End of interval in unix epoch seconds :returns: :class:`TimeSeriesData` """ if self.timeStep is None: self.readMetadata() # Normalize the timestamps to fit proper intervals fromTime = int(fromTime - (fromTime % self.timeStep)) untilTime = int(untilTime - (untilTime % self.timeStep)) sliceBoundary = None # to know when to split up queries across slices resultValues = [] earliestData = None timeStep = self.timeStep method = self.aggregationMethod for slice in self.slices: # If there was a prior slice covering the requested interval, dont ask for that data again if (sliceBoundary is not None) and untilTime > sliceBoundary: requestUntilTime = sliceBoundary else: requestUntilTime = untilTime # if the requested interval starts after the start of this slice if fromTime >= slice.startTime: try: series = slice.read(fromTime, requestUntilTime) except NoData: break if series.timeStep != timeStep: if len(resultValues) == 0: # First slice holding series data, this becomes the default timeStep. timeStep = series.timeStep elif series.timeStep < timeStep: # Series is at a different precision, aggregate to fit our current set. series.values = aggregateSeries(method, series.timeStep, timeStep, series.values) else: # Normalize current set to fit new series data. resultValues = aggregateSeries(method, timeStep, series.timeStep, resultValues) timeStep = series.timeStep earliestData = series.startTime rightMissing = (requestUntilTime - series.endTime) // timeStep rightNulls = [None for i in range(rightMissing)] resultValues = series.values + rightNulls + resultValues break # or if slice contains data for part of the requested interval elif untilTime >= slice.startTime: try: series = slice.read(slice.startTime, requestUntilTime) except NoData: continue if series.timeStep != timeStep: if len(resultValues) == 0: # First slice holding series data, this becomes the default timeStep. timeStep = series.timeStep elif series.timeStep < timeStep: # Series is at a different precision, aggregate to fit our current set. series.values = aggregateSeries(method, series.timeStep, timeStep, series.values) else: # Normalize current set to fit new series data. resultValues = aggregateSeries(method, timeStep, series.timeStep, resultValues) timeStep = series.timeStep earliestData = series.startTime rightMissing = (requestUntilTime - series.endTime) // timeStep rightNulls = [None for i in range(rightMissing)] resultValues = series.values + rightNulls + resultValues # this is the right-side boundary on the next iteration sliceBoundary = slice.startTime # The end of the requested interval predates all slices if earliestData is None: missing = int(untilTime - fromTime) // timeStep resultValues = [None for i in range(missing)] # Left pad nulls if the start of the requested interval predates all slices else: leftMissing = (earliestData - fromTime) // timeStep leftNulls = [None for i in range(leftMissing)] resultValues = leftNulls + resultValues return TimeSeriesData(fromTime, untilTime, timeStep, resultValues)
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def compact(self, datapoints): """Compacts datapoints into a list of contiguous, sorted lists of points with duplicate
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def __init__(self, node, startTime, timeStep): self.node = node self.startTime = startTime self.timeStep = timeStep self.fsPath = join(node.fsPath, '%d@%d.slice' % (startTime, timeStep))
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def isEmpty(self): return getsize(self.fsPath) == 0
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def endTime(self): return self.startTime + ((getsize(self.fsPath) // DATAPOINT_SIZE) * self.timeStep)
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def mtime(self): return getmtime(self.fsPath)
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def create(cls, node, startTime, timeStep): slice = cls(node, startTime, timeStep) fileHandle = open(slice.fsPath, 'wb') fileHandle.close() os.chmod(slice.fsPath, SLICE_PERMS) return slice
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def write(self, sequence): beginningTime = sequence[0][0] timeOffset = beginningTime - self.startTime pointOffset = timeOffset // self.timeStep byteOffset = pointOffset * DATAPOINT_SIZE values = [v for t, v in sequence] format = '!' + ('d' * len(values)) packedValues = struct.pack(format, *values) try: filesize = getsize(self.fsPath) except OSError as e: if e.errno == errno.ENOENT: raise SliceDeleted() else: raise byteGap = byteOffset - filesize if byteGap > 0: # pad the allowable gap with nan's pointGap = byteGap // DATAPOINT_SIZE if pointGap > MAX_SLICE_GAP: raise SliceGapTooLarge() else: packedGap = PACKED_NAN * pointGap packedValues = packedGap + packedValues byteOffset -= byteGap with open(self.fsPath, 'r+b') as fileHandle: if LOCK_WRITES: fcntl.flock(fileHandle.fileno(), fcntl.LOCK_EX) try: fileHandle.seek(byteOffset) except IOError: # print " IOError: fsPath=%s byteOffset=%d size=%d sequence=%s" % ( # self.fsPath, byteOffset, filesize, sequence) raise fileHandle.write(packedValues)
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def __lt__(self, other): return self.startTime < other.startTime
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def __init__(self, startTime, endTime, timeStep, values): self.startTime = startTime self.endTime = endTime self.timeStep = timeStep self.values = values
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def timestamps(self): return range(self.startTime, self.endTime, self.timeStep)
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def __len__(self): return len(self.values)
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def __init__(self, node, problem): Exception.__init__(self, problem) self.node = node self.problem = problem
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def aggregate(aggregationMethod, values): # Filter out None values knownValues = list(filter(lambda x: x is not None, values)) if len(knownValues) is 0: return None # Aggregate based on method if aggregationMethod == 'average': return float(sum(knownValues)) / float(len(knownValues)) elif aggregationMethod == 'sum': return float(sum(knownValues)) elif aggregationMethod == 'last': return knownValues[-1] elif aggregationMethod == 'max': return max(knownValues) elif aggregationMethod == 'min': return min(knownValues) else: raise InvalidAggregationMethod("Unrecognized aggregation method %s" % aggregationMethod)
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def getTree(path): while path not in (os.sep, ''): if isdir(join(path, '.ceres-tree')): return CeresTree(path) path = dirname(path)
graphite-project/ceres
[ 354, 78, 354, 12, 1336452383 ]
def tags(config, args): """ List all tags. """ return config.repo.tags()
jlevy/ghizmo
[ 80, 12, 80, 1, 1441065153 ]
def _delete_ref(repo, ref_name, force, dry_run): ref = repo.ref(ref_name) if not ref and not force: raise ValueError("Reference not found: %s" % ref_name) if not dry_run: ref.delete() return lib.status("Deleted %s" % ref_name, dry_run=dry_run)
jlevy/ghizmo
[ 80, 12, 80, 1, 1441065153 ]
def branches_full(config, args): """ List full info about all branches. """ for b in config.repo.branches(): yield config.repo.branch(b.name)
jlevy/ghizmo
[ 80, 12, 80, 1, 1441065153 ]
def delete_branches(config, args): """ Delete branches supplied on stdin. """ for ref_name in lib.input_json_lines(): yield _delete_ref(config.repo, "heads/" + ref_name, args.force, args.dry_run)
jlevy/ghizmo
[ 80, 12, 80, 1, 1441065153 ]
def show_refs(config, args): """ Show refs supplied on stdin. """ for item in lib.input_json_lines(): yield config.repo.ref(item)
jlevy/ghizmo
[ 80, 12, 80, 1, 1441065153 ]
def pull_requests(config, args): """ List all PRs. """ return config.repo.pull_requests(state=args.get("state", "open"))
jlevy/ghizmo
[ 80, 12, 80, 1, 1441065153 ]
def contributor_stats(config, args): """ List contributor statistics. """ return config.repo.contributor_statistics()
jlevy/ghizmo
[ 80, 12, 80, 1, 1441065153 ]
def releases(config, args): """ List all releases. """ return config.repo.releases()
jlevy/ghizmo
[ 80, 12, 80, 1, 1441065153 ]
def create_release(config, args): """ Create a new release. """ yield config.repo.create_release(args.tag_name, name=args.name, target_commitish=args.get("target_commitish"), body=args.get("body"), draft=args.get_bool("draft"), prerelease=args.get_bool("prerelease"))
jlevy/ghizmo
[ 80, 12, 80, 1, 1441065153 ]
def parse_json(http_response, response): """If the body is not empty, convert it to a python object and set as the value of response.body. http_response is always closed if no error occurs. :param http_response: the http_response object returned by HTTPConnection.getresponse() :type http_response: httplib.HTTPResponse :param response: general response object which will be returned to the caller :type response: baidubce.BceResponse :return: always true :rtype bool """ body = http_response.read() if body: response.__dict__.update(json.loads(body, object_hook=dict_to_python_object).__dict__) http_response.close() return True
baidubce/bce-sdk-python
[ 21, 10, 21, 4, 1429591184 ]
def GetSimLimits(): servo_limits = [_CONFIG['sim']['servos_sim'][i]['servo_drive'] for i in range(len(_SERVOS))] return {_SERVOS[i]: (servo_limits[i]['ref_model_min_position_limit'], servo_limits[i]['ref_model_max_position_limit']) for i in range(len(_SERVOS))}
google/makani
[ 1163, 104, 1163, 5, 1589825605 ]
def _GetControlOutputLimits(output, lower_limit='lower_flap_limits', upper_limit='upper_flap_limits'): return {_FLAPS[i]: (output[lower_limit][i], output[upper_limit][i]) for i in range(len(_FLAPS))}
google/makani
[ 1163, 104, 1163, 5, 1589825605 ]
def GetControlCrosswindFlareLimits(): return _GetControlOutputLimits(_CONFIG['control']['crosswind']['output'], lower_limit='lower_flap_limits_flare')
google/makani
[ 1163, 104, 1163, 5, 1589825605 ]
def GetControlHoverLimits(): return _GetControlOutputLimits(_CONFIG['control']['hover']['output'])
google/makani
[ 1163, 104, 1163, 5, 1589825605 ]
def GetAvionicsServoLimits(): """Get the avionics servo mechanical limits for the current system.""" sys_conf = system_config.SystemConfig.GetSystemConfigBySerial( _CONFIG['system']['wing_serial']) config_file = os.path.join(makani.HOME, 'avionics/servo/firmware/config_params.yaml') net_conf = network_config.NetworkConfig() yaml_keys = [sys_conf.config[net_conf.GetAioNode('servo_%s' % s.lower())] for s in _SERVOS] limits = [codec.DecodeYamlFile(config_file, key) for key in yaml_keys] return {_SERVOS[i]: (limits[i].servo_min_limit, limits[i].servo_max_limit) for i in range(len(_SERVOS))}
google/makani
[ 1163, 104, 1163, 5, 1589825605 ]
def _RudderFlapToServo(flap_angle): servo_config = _CONFIG['system']['servos'][system_types.kServoR1] return math.asin(flap_angle / servo_config['nonlinear_servo_to_flap_ratio'])
google/makani
[ 1163, 104, 1163, 5, 1589825605 ]
def ServosToFlaps(servos): """Convert servo limits to flap limits.""" flaps = {} for servo, servo_range in servos.iteritems(): if servo.startswith('A'): flaps[servo] = servo_range flaps['Elevator'] = (0.5 * (servos['E1'][0] + servos['E2'][0]), 0.5 * (servos['E1'][1] + servos['E2'][1])) flaps['Rudder'] = ( _RudderServoToFlap(0.5 * (servos['R1'][0] + servos['R2'][0])), _RudderServoToFlap(0.5 * (servos['R1'][1] + servos['R2'][1]))) return flaps
google/makani
[ 1163, 104, 1163, 5, 1589825605 ]
def _PrintServos(name, limits, print_header): if print_header: print ('%15s A1 A2 A4 A5 A7 A8' ' E1 E2 R1 R2' % '') print ('%15s min:' + ' %5.1f' * 10) % ( (name,) + tuple([math.degrees(limits[_SERVOS[i]][0]) for i in range(len(_SERVOS))])) print ('%15s max:' + ' %5.1f' * 10) % ( ('',) + tuple([math.degrees(limits[_SERVOS[i]][1]) for i in range(len(_SERVOS))]))
google/makani
[ 1163, 104, 1163, 5, 1589825605 ]
def GetServoLimits(): return [(l.name, FlapsToServos(l.limits) if l.is_flap_limit else l.limits) for l in _LIMITS]
google/makani
[ 1163, 104, 1163, 5, 1589825605 ]
def Main(): print '\nFlap limits:' print_header = True for name, flaps in GetFlapLimits(): _PrintFlaps(name, flaps, print_header) print_header = False print '\nServo limits:' print_header = True for name, servos in GetServoLimits(): _PrintServos(name, servos, print_header) print_header = False
google/makani
[ 1163, 104, 1163, 5, 1589825605 ]
def main(self): if not self.nested_command: try: self.setup() # Give some time for the topology to start. time.sleep(10) self._run() finally: self.teardown()
netsec-ethz/scion
[ 22, 27, 22, 14, 1512059421 ]
def _renewal_request( self, isd_as: scion_addr.ISD_AS, mode: str = "--force",
netsec-ethz/scion
[ 22, 27, 22, 14, 1512059421 ]
def read_file(filename: str) -> str: with open(as_dir / "crypto/as" / filename) as f: return f.read()
netsec-ethz/scion
[ 22, 27, 22, 14, 1512059421 ]
def _check_key_cert(self, cs_configs: List[pathlib.Path]): not_ready = [*cs_configs] for _ in range(5): logger.info( "Checking if all control servers have reloaded the key and certificate..." ) for cs_config in not_ready: conn = client.HTTPConnection(self._http_endpoint(cs_config)) conn.request("GET", "/signer") resp = conn.getresponse() if resp.status != 200: logger.info("Unexpected response: %d %s", resp.status, resp.reason) continue isd_as = scion_addr.ISD_AS(cs_config.stem[2:-2]) as_dir = self._to_as_dir(isd_as) chain_name = "ISD%s-AS%s.pem" % (isd_as.isd_str(), isd_as.as_file_fmt()) pld = json.loads(resp.read().decode("utf-8")) if pld["subject_key_id"] != self._extract_skid( as_dir / "crypto/as" / chain_name): continue logger.info( "Control server successfully loaded new key and certificate: %s" % self._rel(cs_config)) not_ready.remove(cs_config) if not not_ready: break time.sleep(3) else: logger.error( "Control servers without reloaded key and certificate: %s" % [cs_config.name for cs_config in not_ready]) sys.exit(1)
netsec-ethz/scion
[ 22, 27, 22, 14, 1512059421 ]
def _extract_skid(self, file: pathlib.Path): out = subprocess.check_output( ['openssl', 'x509', '-in', file, '-noout', '-text']) lines = out.splitlines() for i, v in enumerate(lines): if v.decode("utf-8").find("Subject Key Identifier") > -1: skid = lines[i + 1].decode("utf-8").split()[-1].replace( ":", " ").upper() break return skid
netsec-ethz/scion
[ 22, 27, 22, 14, 1512059421 ]
def _to_as_dir(self, isd_as: scion_addr.ISD_AS) -> pathlib.Path: return pathlib.Path("%s/gen/AS%s" % (self.test_state.artifacts, isd_as.as_file_fmt()))
netsec-ethz/scion
[ 22, 27, 22, 14, 1512059421 ]
def _local_flags(self, isd_as: scion_addr.ISD_AS) -> List[str]: return [ "--local", self.execute("tester_%s" % isd_as.file_fmt(), "sh", "-c", "echo $SCION_LOCAL_ADDR").strip(), ]
netsec-ethz/scion
[ 22, 27, 22, 14, 1512059421 ]
def __init__( self, *, host: str = "dialogflow.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Sequence[str] = None, channel: grpc.Channel = None, api_mtls_endpoint: str = None, client_cert_source: Callable[[], Tuple[bytes, bytes]] = None, ssl_channel_credentials: grpc.ChannelCredentials = None, client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False,
googleapis/python-dialogflow
[ 387, 145, 387, 4, 1508880371 ]
def create_channel( cls, host: str = "dialogflow.googleapis.com", credentials: ga_credentials.Credentials = None, credentials_file: str = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, **kwargs,
googleapis/python-dialogflow
[ 387, 145, 387, 4, 1508880371 ]
def grpc_channel(self) -> grpc.Channel: """Return the channel designed to connect to this service. """ return self._grpc_channel
googleapis/python-dialogflow
[ 387, 145, 387, 4, 1508880371 ]
def list_session_entity_types( self,
googleapis/python-dialogflow
[ 387, 145, 387, 4, 1508880371 ]
def get_session_entity_type( self,
googleapis/python-dialogflow
[ 387, 145, 387, 4, 1508880371 ]
def create_session_entity_type( self,
googleapis/python-dialogflow
[ 387, 145, 387, 4, 1508880371 ]
def update_session_entity_type( self,
googleapis/python-dialogflow
[ 387, 145, 387, 4, 1508880371 ]
def delete_session_entity_type( self,
googleapis/python-dialogflow
[ 387, 145, 387, 4, 1508880371 ]
def close(self): self.grpc_channel.close()
googleapis/python-dialogflow
[ 387, 145, 387, 4, 1508880371 ]
def __init__(self, s3_image_root, http_image_root): self.s3_image_root = s3_image_root self.http_image_root = http_image_root
airbnb/knowledge-repo
[ 5286, 711, 5286, 126, 1471476770 ]
def skip_image(self, kp, image): import re if re.match('http[s]?://', image['src']): return True return False
airbnb/knowledge-repo
[ 5286, 711, 5286, 126, 1471476770 ]