body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
aa27f8e32b0a33ed2decb7fbfc9ac4354623c6c3d6fbb4daf69d486d79bdf66d
def describe(ids: List[str]): 'Describe one or more instances' return functools.partial(ec2.describe_instances, InstanceIds=ids)
Describe one or more instances
aws_instance/backend.py
describe
geekysuavo/aws-instance-tool
0
python
def describe(ids: List[str]): return functools.partial(ec2.describe_instances, InstanceIds=ids)
def describe(ids: List[str]): return functools.partial(ec2.describe_instances, InstanceIds=ids)<|docstring|>Describe one or more instances<|endoftext|>
b6d986cbacc498fa27cce67c263b5a83e7830dc4c49772a68646b53ab7f1dec2
@property def address(self) -> str: 'IP Address' response = run(describe(ids=[self.inst_id])) for res in response['Reservations']: inst_info = res['Instances'][0] if (inst_info['InstanceId'] == self.inst_id): return inst_info['PublicIpAddress']
IP Address
aws_instance/backend.py
address
geekysuavo/aws-instance-tool
0
python
@property def address(self) -> str: response = run(describe(ids=[self.inst_id])) for res in response['Reservations']: inst_info = res['Instances'][0] if (inst_info['InstanceId'] == self.inst_id): return inst_info['PublicIpAddress']
@property def address(self) -> str: response = run(describe(ids=[self.inst_id])) for res in response['Reservations']: inst_info = res['Instances'][0] if (inst_info['InstanceId'] == self.inst_id): return inst_info['PublicIpAddress']<|docstring|>IP Address<|endoftext|>
0853a6c1d5b8b7d9f6baea7074186da1bdbd725981e1445ba7f7f00d7bb8b39f
def start(self) -> Tuple[(str, str)]: 'Start the instance' response = run(start(ids=[self.inst_id])) prev = response['StartingInstances'][0]['PreviousState']['Name'] curr = response['StartingInstances'][0]['CurrentState']['Name'] return (prev, curr)
Start the instance
aws_instance/backend.py
start
geekysuavo/aws-instance-tool
0
python
def start(self) -> Tuple[(str, str)]: response = run(start(ids=[self.inst_id])) prev = response['StartingInstances'][0]['PreviousState']['Name'] curr = response['StartingInstances'][0]['CurrentState']['Name'] return (prev, curr)
def start(self) -> Tuple[(str, str)]: response = run(start(ids=[self.inst_id])) prev = response['StartingInstances'][0]['PreviousState']['Name'] curr = response['StartingInstances'][0]['CurrentState']['Name'] return (prev, curr)<|docstring|>Start the instance<|endoftext|>
74a0f360d5ff8015a0cbaabaeaf823cc3fd9478bfa9bf385f2f82b088e6abd3c
def stop(self) -> Tuple[(str, str)]: 'Stop the instance' response = run(stop(ids=[self.inst_id])) prev = response['StoppingInstances'][0]['PreviousState']['Name'] curr = response['StoppingInstances'][0]['CurrentState']['Name'] return (prev, curr)
Stop the instance
aws_instance/backend.py
stop
geekysuavo/aws-instance-tool
0
python
def stop(self) -> Tuple[(str, str)]: response = run(stop(ids=[self.inst_id])) prev = response['StoppingInstances'][0]['PreviousState']['Name'] curr = response['StoppingInstances'][0]['CurrentState']['Name'] return (prev, curr)
def stop(self) -> Tuple[(str, str)]: response = run(stop(ids=[self.inst_id])) prev = response['StoppingInstances'][0]['PreviousState']['Name'] curr = response['StoppingInstances'][0]['CurrentState']['Name'] return (prev, curr)<|docstring|>Stop the instance<|endoftext|>
55fdc0695ac552e95c81356380f566e652e56721b90052d679f250a9543822be
def __len__(self) -> int: 'Number of instances' return len(self.instances)
Number of instances
aws_instance/backend.py
__len__
geekysuavo/aws-instance-tool
0
python
def __len__(self) -> int: return len(self.instances)
def __len__(self) -> int: return len(self.instances)<|docstring|>Number of instances<|endoftext|>
2bfa09b8f97d5c91172fea4993d52738821f36372e0598229c94bef83b4e9287
def __iter__(self): 'Iterate over the instances' return iter(self.instances.items())
Iterate over the instances
aws_instance/backend.py
__iter__
geekysuavo/aws-instance-tool
0
python
def __iter__(self): return iter(self.instances.items())
def __iter__(self): return iter(self.instances.items())<|docstring|>Iterate over the instances<|endoftext|>
e3729d1c43c7f497fff99a84825ae118009aefad522418141238b0bf3550cd16
def __contains__(self, name: str) -> bool: 'Check if an instance name is supported' return (name in self.instances)
Check if an instance name is supported
aws_instance/backend.py
__contains__
geekysuavo/aws-instance-tool
0
python
def __contains__(self, name: str) -> bool: return (name in self.instances)
def __contains__(self, name: str) -> bool: return (name in self.instances)<|docstring|>Check if an instance name is supported<|endoftext|>
87f6787f8fbb757e92f48ced0d6bcbf87e44fe4792c7e81ea41a938f75cc4bbe
def __getitem__(self, name: str) -> Instance: 'Get an instance' return Instance(name, self.instances[name])
Get an instance
aws_instance/backend.py
__getitem__
geekysuavo/aws-instance-tool
0
python
def __getitem__(self, name: str) -> Instance: return Instance(name, self.instances[name])
def __getitem__(self, name: str) -> Instance: return Instance(name, self.instances[name])<|docstring|>Get an instance<|endoftext|>
f060eb0266d271c5b7d26294ecf8b78cbbef753d1b284a8c6e4b65fb40397317
def start(self, name: str) -> Tuple[(str, str)]: 'Start an instance' return self[name].start()
Start an instance
aws_instance/backend.py
start
geekysuavo/aws-instance-tool
0
python
def start(self, name: str) -> Tuple[(str, str)]: return self[name].start()
def start(self, name: str) -> Tuple[(str, str)]: return self[name].start()<|docstring|>Start an instance<|endoftext|>
41e49f629114b9b35f8bd1338cc0a0a305b417875ce9d85dc61ee2c906f2e1b9
def stop(self, name: str) -> Tuple[(str, str)]: 'Stop an instance' return self[name].stop()
Stop an instance
aws_instance/backend.py
stop
geekysuavo/aws-instance-tool
0
python
def stop(self, name: str) -> Tuple[(str, str)]: return self[name].stop()
def stop(self, name: str) -> Tuple[(str, str)]: return self[name].stop()<|docstring|>Stop an instance<|endoftext|>
721d4f280fda499c680de9cac4c667e44434f453904955a73a5d9c6a38114b9c
def ssh(self, name: str): 'Start a shell on an instance' ip = self[name].address subprocess.run(['ssh', '-i', self.ident, f'{self.username}@{ip}'])
Start a shell on an instance
aws_instance/backend.py
ssh
geekysuavo/aws-instance-tool
0
python
def ssh(self, name: str): ip = self[name].address subprocess.run(['ssh', '-i', self.ident, f'{self.username}@{ip}'])
def ssh(self, name: str): ip = self[name].address subprocess.run(['ssh', '-i', self.ident, f'{self.username}@{ip}'])<|docstring|>Start a shell on an instance<|endoftext|>
bb91e26b28d2ee03ebd09cf578ea5ce118e7e13d40ae5699b0aa7f28b5f6f3da
def tunnel(self, name: str, port: int): 'Connect to a port on the instance' ip = self[name].address subprocess.run(['ssh', '-i', self.ident, '-N', '-L', f'{port}:localhost:{port}', f'{self.username}@{ip}'])
Connect to a port on the instance
aws_instance/backend.py
tunnel
geekysuavo/aws-instance-tool
0
python
def tunnel(self, name: str, port: int): ip = self[name].address subprocess.run(['ssh', '-i', self.ident, '-N', '-L', f'{port}:localhost:{port}', f'{self.username}@{ip}'])
def tunnel(self, name: str, port: int): ip = self[name].address subprocess.run(['ssh', '-i', self.ident, '-N', '-L', f'{port}:localhost:{port}', f'{self.username}@{ip}'])<|docstring|>Connect to a port on the instance<|endoftext|>
26ce8a285cb1675961b71650ef1767fbd2841d40cd6a9d2c6227b6355b70e402
@property def names(self) -> Tuple[(str, ...)]: 'Supported instance names' return tuple(self.instances.keys())
Supported instance names
aws_instance/backend.py
names
geekysuavo/aws-instance-tool
0
python
@property def names(self) -> Tuple[(str, ...)]: return tuple(self.instances.keys())
@property def names(self) -> Tuple[(str, ...)]: return tuple(self.instances.keys())<|docstring|>Supported instance names<|endoftext|>
d823aacdb1220935fd79780618afebf4e6699e2a46655f40e2869cb01e2d7e2d
@property def instance_ids(self) -> Tuple[(str, ...)]: 'Suported instance ids' return tuple(self.instances.values())
Suported instance ids
aws_instance/backend.py
instance_ids
geekysuavo/aws-instance-tool
0
python
@property def instance_ids(self) -> Tuple[(str, ...)]: return tuple(self.instances.values())
@property def instance_ids(self) -> Tuple[(str, ...)]: return tuple(self.instances.values())<|docstring|>Suported instance ids<|endoftext|>
9b6e9071ba26a2dfe4a019d1b982fc106eb0535a8474f86021f0aa2067b4565b
@property def states(self) -> Tuple[(str, ...)]: 'Instance statuses' response = run(describe(ids=list(self.instance_ids))) states = [] for (name, inst_id) in self: for res in response['Reservations']: inst_info = res['Instances'][0] if (inst_info['InstanceId'] == inst_id): state = inst_info['State']['Name'] states.append(state) return tuple(states)
Instance statuses
aws_instance/backend.py
states
geekysuavo/aws-instance-tool
0
python
@property def states(self) -> Tuple[(str, ...)]: response = run(describe(ids=list(self.instance_ids))) states = [] for (name, inst_id) in self: for res in response['Reservations']: inst_info = res['Instances'][0] if (inst_info['InstanceId'] == inst_id): state = inst_info['State']['Name'] states.append(state) return tuple(states)
@property def states(self) -> Tuple[(str, ...)]: response = run(describe(ids=list(self.instance_ids))) states = [] for (name, inst_id) in self: for res in response['Reservations']: inst_info = res['Instances'][0] if (inst_info['InstanceId'] == inst_id): state = inst_info['State']['Name'] states.append(state) return tuple(states)<|docstring|>Instance statuses<|endoftext|>
690c14845e3269a69b03ed9160ea8e36d71b725a90ddd802bf433706ec65302b
@classmethod def load(cls): 'Instantiate a Config from its yaml source' filename = os.path.join(os.path.expanduser('~'), '.config', 'aws-instance.yaml') with open(filename, 'rt', encoding='utf-8') as fh: config = yaml.safe_load(fh) return cls(**config)
Instantiate a Config from its yaml source
aws_instance/backend.py
load
geekysuavo/aws-instance-tool
0
python
@classmethod def load(cls): filename = os.path.join(os.path.expanduser('~'), '.config', 'aws-instance.yaml') with open(filename, 'rt', encoding='utf-8') as fh: config = yaml.safe_load(fh) return cls(**config)
@classmethod def load(cls): filename = os.path.join(os.path.expanduser('~'), '.config', 'aws-instance.yaml') with open(filename, 'rt', encoding='utf-8') as fh: config = yaml.safe_load(fh) return cls(**config)<|docstring|>Instantiate a Config from its yaml source<|endoftext|>
139827261799fc5075e26df21f8b21536f4ecab49f7c598063cdd57f097a82f3
def is_enabled(self): 'Determine if this command is enabled or not\n ' return is_python(self.view)
Determine if this command is enabled or not
Packages/Anaconda/commands/goto.py
is_enabled
prisis/sublime-text-packages
0
python
def is_enabled(self): '\n ' return is_python(self.view)
def is_enabled(self): '\n ' return is_python(self.view)<|docstring|>Determine if this command is enabled or not<|endoftext|>
6b8aa787fcdc71ee027655ae0d3dc0cfae8cdf491dc0a96f7c78adc63b4b7acb
def make_instance(self, include_optional): 'Test IoXK8sClusterAddonsV1alpha3ClusterResourceSetSpec\n include_option is a boolean, when False only required\n params are included, when True both required and\n optional params are included ' if include_optional: return IoXK8sClusterAddonsV1alpha3ClusterResourceSetSpec(cluster_selector=kubernetes.client.models.io_x_k8s_cluster_addons_v1alpha3_cluster_resource_set_spec_cluster_selector.io_x_k8s_cluster_addons_v1alpha3_ClusterResourceSet_spec_clusterSelector(match_expressions=[kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_affinity_pod_affinity_pod_affinity_term_label_selector_match_expressions.com_coreos_monitoring_v1_Alertmanager_spec_affinity_podAffinity_podAffinityTerm_labelSelector_matchExpressions(key='0', operator='0', values=['0'])], match_labels={'key': '0'}), resources=[kubernetes.client.models.io_x_k8s_cluster_addons_v1alpha3_cluster_resource_set_spec_resources.io_x_k8s_cluster_addons_v1alpha3_ClusterResourceSet_spec_resources(kind='Secret', name='0')], strategy='ApplyOnce') else: return IoXK8sClusterAddonsV1alpha3ClusterResourceSetSpec(cluster_selector=kubernetes.client.models.io_x_k8s_cluster_addons_v1alpha3_cluster_resource_set_spec_cluster_selector.io_x_k8s_cluster_addons_v1alpha3_ClusterResourceSet_spec_clusterSelector(match_expressions=[kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_affinity_pod_affinity_pod_affinity_term_label_selector_match_expressions.com_coreos_monitoring_v1_Alertmanager_spec_affinity_podAffinity_podAffinityTerm_labelSelector_matchExpressions(key='0', operator='0', values=['0'])], match_labels={'key': '0'}))
Test IoXK8sClusterAddonsV1alpha3ClusterResourceSetSpec include_option is a boolean, when False only required params are included, when True both required and optional params are included
kubernetes/test/test_io_xk8s_cluster_addons_v1alpha3_cluster_resource_set_spec.py
make_instance
mariusgheorghies/python
0
python
def make_instance(self, include_optional): 'Test IoXK8sClusterAddonsV1alpha3ClusterResourceSetSpec\n include_option is a boolean, when False only required\n params are included, when True both required and\n optional params are included ' if include_optional: return IoXK8sClusterAddonsV1alpha3ClusterResourceSetSpec(cluster_selector=kubernetes.client.models.io_x_k8s_cluster_addons_v1alpha3_cluster_resource_set_spec_cluster_selector.io_x_k8s_cluster_addons_v1alpha3_ClusterResourceSet_spec_clusterSelector(match_expressions=[kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_affinity_pod_affinity_pod_affinity_term_label_selector_match_expressions.com_coreos_monitoring_v1_Alertmanager_spec_affinity_podAffinity_podAffinityTerm_labelSelector_matchExpressions(key='0', operator='0', values=['0'])], match_labels={'key': '0'}), resources=[kubernetes.client.models.io_x_k8s_cluster_addons_v1alpha3_cluster_resource_set_spec_resources.io_x_k8s_cluster_addons_v1alpha3_ClusterResourceSet_spec_resources(kind='Secret', name='0')], strategy='ApplyOnce') else: return IoXK8sClusterAddonsV1alpha3ClusterResourceSetSpec(cluster_selector=kubernetes.client.models.io_x_k8s_cluster_addons_v1alpha3_cluster_resource_set_spec_cluster_selector.io_x_k8s_cluster_addons_v1alpha3_ClusterResourceSet_spec_clusterSelector(match_expressions=[kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_affinity_pod_affinity_pod_affinity_term_label_selector_match_expressions.com_coreos_monitoring_v1_Alertmanager_spec_affinity_podAffinity_podAffinityTerm_labelSelector_matchExpressions(key='0', operator='0', values=['0'])], match_labels={'key': '0'}))
def make_instance(self, include_optional): 'Test IoXK8sClusterAddonsV1alpha3ClusterResourceSetSpec\n include_option is a boolean, when False only required\n params are included, when True both required and\n optional params are included ' if include_optional: return IoXK8sClusterAddonsV1alpha3ClusterResourceSetSpec(cluster_selector=kubernetes.client.models.io_x_k8s_cluster_addons_v1alpha3_cluster_resource_set_spec_cluster_selector.io_x_k8s_cluster_addons_v1alpha3_ClusterResourceSet_spec_clusterSelector(match_expressions=[kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_affinity_pod_affinity_pod_affinity_term_label_selector_match_expressions.com_coreos_monitoring_v1_Alertmanager_spec_affinity_podAffinity_podAffinityTerm_labelSelector_matchExpressions(key='0', operator='0', values=['0'])], match_labels={'key': '0'}), resources=[kubernetes.client.models.io_x_k8s_cluster_addons_v1alpha3_cluster_resource_set_spec_resources.io_x_k8s_cluster_addons_v1alpha3_ClusterResourceSet_spec_resources(kind='Secret', name='0')], strategy='ApplyOnce') else: return IoXK8sClusterAddonsV1alpha3ClusterResourceSetSpec(cluster_selector=kubernetes.client.models.io_x_k8s_cluster_addons_v1alpha3_cluster_resource_set_spec_cluster_selector.io_x_k8s_cluster_addons_v1alpha3_ClusterResourceSet_spec_clusterSelector(match_expressions=[kubernetes.client.models.com_coreos_monitoring_v1_alertmanager_spec_affinity_pod_affinity_pod_affinity_term_label_selector_match_expressions.com_coreos_monitoring_v1_Alertmanager_spec_affinity_podAffinity_podAffinityTerm_labelSelector_matchExpressions(key='0', operator='0', values=['0'])], match_labels={'key': '0'}))<|docstring|>Test IoXK8sClusterAddonsV1alpha3ClusterResourceSetSpec include_option is a boolean, when False only required params are included, when True both required and optional params are included<|endoftext|>
1b2084b99afb216bc83d9c6339eb617a1e7aa82f106267b980d7bf84fc6ee216
def testIoXK8sClusterAddonsV1alpha3ClusterResourceSetSpec(self): 'Test IoXK8sClusterAddonsV1alpha3ClusterResourceSetSpec' inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True)
Test IoXK8sClusterAddonsV1alpha3ClusterResourceSetSpec
kubernetes/test/test_io_xk8s_cluster_addons_v1alpha3_cluster_resource_set_spec.py
testIoXK8sClusterAddonsV1alpha3ClusterResourceSetSpec
mariusgheorghies/python
0
python
def testIoXK8sClusterAddonsV1alpha3ClusterResourceSetSpec(self): inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True)
def testIoXK8sClusterAddonsV1alpha3ClusterResourceSetSpec(self): inst_req_only = self.make_instance(include_optional=False) inst_req_and_optional = self.make_instance(include_optional=True)<|docstring|>Test IoXK8sClusterAddonsV1alpha3ClusterResourceSetSpec<|endoftext|>
fef8b025751d1d8bea09336d37e84b5fe1c29055bb5a8ef57f33440a8f0ba514
def test_scipy_edge_set_to_cugraph_edge_set(): '\n +-+\n ------> |1|\n | +-+\n |\n | |\n | v\n\n +-+ <-- +-+ +-+\n |0| |2| <-- |3|\n +-+ --> +-+ +-+' dpr = mg.resolver scipy_sparse_matrix = ss.csr_matrix(np.array([[0, 1, 1, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 1, 0]])) x = dpr.wrappers.EdgeSet.ScipyEdgeSet(scipy_sparse_matrix) sources = [0, 0, 1, 2, 3] destinations = [1, 2, 2, 0, 2] cdf = cudf.DataFrame({'Source': sources, 'Destination': destinations}) g = cugraph.DiGraph() g.from_cudf_edgelist(cdf, source='Source', destination='Destination') intermediate = dpr.wrappers.EdgeSet.CuGraphEdgeSet(g) y = dpr.translate(x, CuGraphEdgeSet) dpr.assert_equal(y, intermediate) assert (len(dpr.plan.translate(x, CuGraphEdgeSet)) == 1)
+-+ ------> |1| | +-+ | | | | v +-+ <-- +-+ +-+ |0| |2| <-- |3| +-+ --> +-+ +-+
metagraph_cuda/tests/translators/test_to_cugraph_translators.py
test_scipy_edge_set_to_cugraph_edge_set
jim22k/metagraph-cuda
0
python
def test_scipy_edge_set_to_cugraph_edge_set(): '\n +-+\n ------> |1|\n | +-+\n |\n | |\n | v\n\n +-+ <-- +-+ +-+\n |0| |2| <-- |3|\n +-+ --> +-+ +-+' dpr = mg.resolver scipy_sparse_matrix = ss.csr_matrix(np.array([[0, 1, 1, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 1, 0]])) x = dpr.wrappers.EdgeSet.ScipyEdgeSet(scipy_sparse_matrix) sources = [0, 0, 1, 2, 3] destinations = [1, 2, 2, 0, 2] cdf = cudf.DataFrame({'Source': sources, 'Destination': destinations}) g = cugraph.DiGraph() g.from_cudf_edgelist(cdf, source='Source', destination='Destination') intermediate = dpr.wrappers.EdgeSet.CuGraphEdgeSet(g) y = dpr.translate(x, CuGraphEdgeSet) dpr.assert_equal(y, intermediate) assert (len(dpr.plan.translate(x, CuGraphEdgeSet)) == 1)
def test_scipy_edge_set_to_cugraph_edge_set(): '\n +-+\n ------> |1|\n | +-+\n |\n | |\n | v\n\n +-+ <-- +-+ +-+\n |0| |2| <-- |3|\n +-+ --> +-+ +-+' dpr = mg.resolver scipy_sparse_matrix = ss.csr_matrix(np.array([[0, 1, 1, 0], [0, 0, 1, 0], [1, 0, 0, 0], [0, 0, 1, 0]])) x = dpr.wrappers.EdgeSet.ScipyEdgeSet(scipy_sparse_matrix) sources = [0, 0, 1, 2, 3] destinations = [1, 2, 2, 0, 2] cdf = cudf.DataFrame({'Source': sources, 'Destination': destinations}) g = cugraph.DiGraph() g.from_cudf_edgelist(cdf, source='Source', destination='Destination') intermediate = dpr.wrappers.EdgeSet.CuGraphEdgeSet(g) y = dpr.translate(x, CuGraphEdgeSet) dpr.assert_equal(y, intermediate) assert (len(dpr.plan.translate(x, CuGraphEdgeSet)) == 1)<|docstring|>+-+ ------> |1| | +-+ | | | | v +-+ <-- +-+ +-+ |0| |2| <-- |3| +-+ --> +-+ +-+<|endoftext|>
35fd619a04a5bf379f3064a86e061f229d7f63bda50aeed5b3ceb62ceb16c95d
def test_scipy_edge_map_to_cugraph_edge_map(): '\n +-+\n ----9-> |1|\n | +-+\n |\n | |\n | 6\n | |\n | v\n\n +-+ <-7- +-+ +-+\n |0| |2| <-5- |3|\n +-+ -8-> +-+ +-+' dpr = mg.resolver scipy_sparse_matrix = ss.csr_matrix(np.array([[0, 9, 8, 0], [0, 0, 6, 0], [7, 0, 0, 0], [0, 0, 5, 0]])) x = dpr.wrappers.EdgeMap.ScipyEdgeMap(scipy_sparse_matrix) sources = [0, 0, 1, 2, 3] destinations = [1, 2, 2, 0, 2] weights = [9, 8, 6, 7, 5] cdf = cudf.DataFrame({'source': sources, 'destination': destinations, 'weights': weights}) g = cugraph.DiGraph() g.from_cudf_edgelist(cdf, source='source', destination='destination', edge_attr='weights') intermediate = dpr.wrappers.EdgeMap.CuGraphEdgeMap(g) y = dpr.translate(x, CuGraphEdgeMap) dpr.assert_equal(y, intermediate) assert (len(dpr.plan.translate(x, CuGraphEdgeMap)) == 1)
+-+ ----9-> |1| | +-+ | | | | 6 | | | v +-+ <-7- +-+ +-+ |0| |2| <-5- |3| +-+ -8-> +-+ +-+
metagraph_cuda/tests/translators/test_to_cugraph_translators.py
test_scipy_edge_map_to_cugraph_edge_map
jim22k/metagraph-cuda
0
python
def test_scipy_edge_map_to_cugraph_edge_map(): '\n +-+\n ----9-> |1|\n | +-+\n |\n | |\n | 6\n | |\n | v\n\n +-+ <-7- +-+ +-+\n |0| |2| <-5- |3|\n +-+ -8-> +-+ +-+' dpr = mg.resolver scipy_sparse_matrix = ss.csr_matrix(np.array([[0, 9, 8, 0], [0, 0, 6, 0], [7, 0, 0, 0], [0, 0, 5, 0]])) x = dpr.wrappers.EdgeMap.ScipyEdgeMap(scipy_sparse_matrix) sources = [0, 0, 1, 2, 3] destinations = [1, 2, 2, 0, 2] weights = [9, 8, 6, 7, 5] cdf = cudf.DataFrame({'source': sources, 'destination': destinations, 'weights': weights}) g = cugraph.DiGraph() g.from_cudf_edgelist(cdf, source='source', destination='destination', edge_attr='weights') intermediate = dpr.wrappers.EdgeMap.CuGraphEdgeMap(g) y = dpr.translate(x, CuGraphEdgeMap) dpr.assert_equal(y, intermediate) assert (len(dpr.plan.translate(x, CuGraphEdgeMap)) == 1)
def test_scipy_edge_map_to_cugraph_edge_map(): '\n +-+\n ----9-> |1|\n | +-+\n |\n | |\n | 6\n | |\n | v\n\n +-+ <-7- +-+ +-+\n |0| |2| <-5- |3|\n +-+ -8-> +-+ +-+' dpr = mg.resolver scipy_sparse_matrix = ss.csr_matrix(np.array([[0, 9, 8, 0], [0, 0, 6, 0], [7, 0, 0, 0], [0, 0, 5, 0]])) x = dpr.wrappers.EdgeMap.ScipyEdgeMap(scipy_sparse_matrix) sources = [0, 0, 1, 2, 3] destinations = [1, 2, 2, 0, 2] weights = [9, 8, 6, 7, 5] cdf = cudf.DataFrame({'source': sources, 'destination': destinations, 'weights': weights}) g = cugraph.DiGraph() g.from_cudf_edgelist(cdf, source='source', destination='destination', edge_attr='weights') intermediate = dpr.wrappers.EdgeMap.CuGraphEdgeMap(g) y = dpr.translate(x, CuGraphEdgeMap) dpr.assert_equal(y, intermediate) assert (len(dpr.plan.translate(x, CuGraphEdgeMap)) == 1)<|docstring|>+-+ ----9-> |1| | +-+ | | | | 6 | | | v +-+ <-7- +-+ +-+ |0| |2| <-5- |3| +-+ -8-> +-+ +-+<|endoftext|>
077c6178c18ee1e14676644dcf2b6984794d7592b8ff3d2e4178f6f26d462bcb
def test_unweighted_directed_networkx_to_cugraph(): '\n +-+\n ------> |1|\n | +-+\n |\n | |\n | v\n\n +-+ <-- +-+ +-+\n |0| |2| <-- |3|\n +-+ --> +-+ +-+' dpr = mg.resolver networkx_graph_data = [(0, 1), (0, 2), (2, 0), (1, 2), (3, 2)] networkx_graph_unwrapped = nx.DiGraph() networkx_graph_unwrapped.add_edges_from(networkx_graph_data) x = dpr.wrappers.Graph.NetworkXGraph(networkx_graph_unwrapped) sources = [0, 0, 1, 2, 3] destinations = [1, 2, 2, 0, 2] cdf = cudf.DataFrame({'source': sources, 'destination': destinations}) g = cugraph.DiGraph() g.from_cudf_edgelist(cdf, source='source', destination='destination') intermediate = dpr.wrappers.Graph.CuGraph(g, None) y = dpr.translate(x, CuGraph) dpr.assert_equal(y, intermediate) assert (len(dpr.plan.translate(x, CuGraph)) == 1)
+-+ ------> |1| | +-+ | | | | v +-+ <-- +-+ +-+ |0| |2| <-- |3| +-+ --> +-+ +-+
metagraph_cuda/tests/translators/test_to_cugraph_translators.py
test_unweighted_directed_networkx_to_cugraph
jim22k/metagraph-cuda
0
python
def test_unweighted_directed_networkx_to_cugraph(): '\n +-+\n ------> |1|\n | +-+\n |\n | |\n | v\n\n +-+ <-- +-+ +-+\n |0| |2| <-- |3|\n +-+ --> +-+ +-+' dpr = mg.resolver networkx_graph_data = [(0, 1), (0, 2), (2, 0), (1, 2), (3, 2)] networkx_graph_unwrapped = nx.DiGraph() networkx_graph_unwrapped.add_edges_from(networkx_graph_data) x = dpr.wrappers.Graph.NetworkXGraph(networkx_graph_unwrapped) sources = [0, 0, 1, 2, 3] destinations = [1, 2, 2, 0, 2] cdf = cudf.DataFrame({'source': sources, 'destination': destinations}) g = cugraph.DiGraph() g.from_cudf_edgelist(cdf, source='source', destination='destination') intermediate = dpr.wrappers.Graph.CuGraph(g, None) y = dpr.translate(x, CuGraph) dpr.assert_equal(y, intermediate) assert (len(dpr.plan.translate(x, CuGraph)) == 1)
def test_unweighted_directed_networkx_to_cugraph(): '\n +-+\n ------> |1|\n | +-+\n |\n | |\n | v\n\n +-+ <-- +-+ +-+\n |0| |2| <-- |3|\n +-+ --> +-+ +-+' dpr = mg.resolver networkx_graph_data = [(0, 1), (0, 2), (2, 0), (1, 2), (3, 2)] networkx_graph_unwrapped = nx.DiGraph() networkx_graph_unwrapped.add_edges_from(networkx_graph_data) x = dpr.wrappers.Graph.NetworkXGraph(networkx_graph_unwrapped) sources = [0, 0, 1, 2, 3] destinations = [1, 2, 2, 0, 2] cdf = cudf.DataFrame({'source': sources, 'destination': destinations}) g = cugraph.DiGraph() g.from_cudf_edgelist(cdf, source='source', destination='destination') intermediate = dpr.wrappers.Graph.CuGraph(g, None) y = dpr.translate(x, CuGraph) dpr.assert_equal(y, intermediate) assert (len(dpr.plan.translate(x, CuGraph)) == 1)<|docstring|>+-+ ------> |1| | +-+ | | | | v +-+ <-- +-+ +-+ |0| |2| <-- |3| +-+ --> +-+ +-+<|endoftext|>
586d5c4b8083f7b4bdb016d3380aa86880aa73e7118874aa9cc2b02c2e19caec
def test_weighted_directed_networkx_to_cugraph(): '\n +-+\n ----9-> |1|\n | +-+\n |\n | |\n | 6\n | |\n | v\n\n +-+ <-7- +-+ +-+\n |0| |2| <-5- |3|\n +-+ -8-> +-+ +-+' dpr = mg.resolver networkx_graph_data = [(0, 1, 9), (0, 2, 8), (2, 0, 7), (1, 2, 6), (3, 2, 5)] networkx_graph_unwrapped = nx.DiGraph() networkx_graph_unwrapped.add_weighted_edges_from(networkx_graph_data, weight='weight') x = dpr.wrappers.Graph.NetworkXGraph(networkx_graph_unwrapped) sources = [0, 0, 1, 2, 3] destinations = [1, 2, 2, 0, 2] weights = [9, 8, 6, 7, 5] cdf = cudf.DataFrame({'source': sources, 'destination': destinations, 'weight': weights}) g = cugraph.DiGraph() g.from_cudf_edgelist(cdf, source='source', destination='destination', edge_attr='weight') intermediate = dpr.wrappers.Graph.CuGraph(g, None) y = dpr.translate(x, CuGraph) dpr.assert_equal(y, intermediate) assert (len(dpr.plan.translate(x, CuGraph)) == 1)
+-+ ----9-> |1| | +-+ | | | | 6 | | | v +-+ <-7- +-+ +-+ |0| |2| <-5- |3| +-+ -8-> +-+ +-+
metagraph_cuda/tests/translators/test_to_cugraph_translators.py
test_weighted_directed_networkx_to_cugraph
jim22k/metagraph-cuda
0
python
def test_weighted_directed_networkx_to_cugraph(): '\n +-+\n ----9-> |1|\n | +-+\n |\n | |\n | 6\n | |\n | v\n\n +-+ <-7- +-+ +-+\n |0| |2| <-5- |3|\n +-+ -8-> +-+ +-+' dpr = mg.resolver networkx_graph_data = [(0, 1, 9), (0, 2, 8), (2, 0, 7), (1, 2, 6), (3, 2, 5)] networkx_graph_unwrapped = nx.DiGraph() networkx_graph_unwrapped.add_weighted_edges_from(networkx_graph_data, weight='weight') x = dpr.wrappers.Graph.NetworkXGraph(networkx_graph_unwrapped) sources = [0, 0, 1, 2, 3] destinations = [1, 2, 2, 0, 2] weights = [9, 8, 6, 7, 5] cdf = cudf.DataFrame({'source': sources, 'destination': destinations, 'weight': weights}) g = cugraph.DiGraph() g.from_cudf_edgelist(cdf, source='source', destination='destination', edge_attr='weight') intermediate = dpr.wrappers.Graph.CuGraph(g, None) y = dpr.translate(x, CuGraph) dpr.assert_equal(y, intermediate) assert (len(dpr.plan.translate(x, CuGraph)) == 1)
def test_weighted_directed_networkx_to_cugraph(): '\n +-+\n ----9-> |1|\n | +-+\n |\n | |\n | 6\n | |\n | v\n\n +-+ <-7- +-+ +-+\n |0| |2| <-5- |3|\n +-+ -8-> +-+ +-+' dpr = mg.resolver networkx_graph_data = [(0, 1, 9), (0, 2, 8), (2, 0, 7), (1, 2, 6), (3, 2, 5)] networkx_graph_unwrapped = nx.DiGraph() networkx_graph_unwrapped.add_weighted_edges_from(networkx_graph_data, weight='weight') x = dpr.wrappers.Graph.NetworkXGraph(networkx_graph_unwrapped) sources = [0, 0, 1, 2, 3] destinations = [1, 2, 2, 0, 2] weights = [9, 8, 6, 7, 5] cdf = cudf.DataFrame({'source': sources, 'destination': destinations, 'weight': weights}) g = cugraph.DiGraph() g.from_cudf_edgelist(cdf, source='source', destination='destination', edge_attr='weight') intermediate = dpr.wrappers.Graph.CuGraph(g, None) y = dpr.translate(x, CuGraph) dpr.assert_equal(y, intermediate) assert (len(dpr.plan.translate(x, CuGraph)) == 1)<|docstring|>+-+ ----9-> |1| | +-+ | | | | 6 | | | v +-+ <-7- +-+ +-+ |0| |2| <-5- |3| +-+ -8-> +-+ +-+<|endoftext|>
be23f74c7b065cb47b41b56bd0e61d4800817e66907baf22aa522d7f16310d37
def test_pandas_edge_set_to_cugraph_edge_set(): '\n +-+\n ------> |1|\n | +-+\n |\n | |\n | v\n\n +-+ <-- +-+ +-+\n |0| |2| <-- |3|\n +-+ --> +-+ +-+' dpr = mg.resolver pdf = pd.DataFrame({'src': (0, 0, 2, 1, 3), 'dst': (1, 2, 0, 2, 2)}) x = dpr.wrappers.EdgeSet.PandasEdgeSet(pdf, src_label='src', dst_label='dst', is_directed=True) sources = [0, 0, 1, 2, 3] destinations = [1, 2, 2, 0, 2] cdf = cudf.DataFrame({'source': sources, 'destination': destinations}) g = cugraph.DiGraph() g.from_cudf_edgelist(cdf, source='source', destination='destination') intermediate = dpr.wrappers.EdgeSet.CuGraphEdgeSet(g) y = dpr.translate(x, CuGraphEdgeSet) dpr.assert_equal(y, intermediate) assert (len(dpr.plan.translate(x, CuGraphEdgeSet)) == 1)
+-+ ------> |1| | +-+ | | | | v +-+ <-- +-+ +-+ |0| |2| <-- |3| +-+ --> +-+ +-+
metagraph_cuda/tests/translators/test_to_cugraph_translators.py
test_pandas_edge_set_to_cugraph_edge_set
jim22k/metagraph-cuda
0
python
def test_pandas_edge_set_to_cugraph_edge_set(): '\n +-+\n ------> |1|\n | +-+\n |\n | |\n | v\n\n +-+ <-- +-+ +-+\n |0| |2| <-- |3|\n +-+ --> +-+ +-+' dpr = mg.resolver pdf = pd.DataFrame({'src': (0, 0, 2, 1, 3), 'dst': (1, 2, 0, 2, 2)}) x = dpr.wrappers.EdgeSet.PandasEdgeSet(pdf, src_label='src', dst_label='dst', is_directed=True) sources = [0, 0, 1, 2, 3] destinations = [1, 2, 2, 0, 2] cdf = cudf.DataFrame({'source': sources, 'destination': destinations}) g = cugraph.DiGraph() g.from_cudf_edgelist(cdf, source='source', destination='destination') intermediate = dpr.wrappers.EdgeSet.CuGraphEdgeSet(g) y = dpr.translate(x, CuGraphEdgeSet) dpr.assert_equal(y, intermediate) assert (len(dpr.plan.translate(x, CuGraphEdgeSet)) == 1)
def test_pandas_edge_set_to_cugraph_edge_set(): '\n +-+\n ------> |1|\n | +-+\n |\n | |\n | v\n\n +-+ <-- +-+ +-+\n |0| |2| <-- |3|\n +-+ --> +-+ +-+' dpr = mg.resolver pdf = pd.DataFrame({'src': (0, 0, 2, 1, 3), 'dst': (1, 2, 0, 2, 2)}) x = dpr.wrappers.EdgeSet.PandasEdgeSet(pdf, src_label='src', dst_label='dst', is_directed=True) sources = [0, 0, 1, 2, 3] destinations = [1, 2, 2, 0, 2] cdf = cudf.DataFrame({'source': sources, 'destination': destinations}) g = cugraph.DiGraph() g.from_cudf_edgelist(cdf, source='source', destination='destination') intermediate = dpr.wrappers.EdgeSet.CuGraphEdgeSet(g) y = dpr.translate(x, CuGraphEdgeSet) dpr.assert_equal(y, intermediate) assert (len(dpr.plan.translate(x, CuGraphEdgeSet)) == 1)<|docstring|>+-+ ------> |1| | +-+ | | | | v +-+ <-- +-+ +-+ |0| |2| <-- |3| +-+ --> +-+ +-+<|endoftext|>
8a5452bcb4ca9dac3fc87705c29320679c0745ddb32206ebfd405704f279b4f0
def test_pandas_edge_map_to_cugraph_edge_map(): '\n +-+\n ----9-> |1|\n | +-+\n |\n | |\n | 6\n | |\n | v\n\n +-+ <-7- +-+ +-+\n |0| |2| <-5- |3|\n +-+ -8-> +-+ +-+' dpr = mg.resolver pdf = pd.DataFrame({'src': (0, 0, 2, 1, 3), 'dst': (1, 2, 0, 2, 2), 'w': (9, 8, 7, 6, 5)}) x = dpr.wrappers.EdgeMap.PandasEdgeMap(pdf, src_label='src', dst_label='dst', weight_label='w', is_directed=True) sources = [0, 0, 1, 2, 3] destinations = [1, 2, 2, 0, 2] weights = [9, 8, 7, 6, 5] cdf = cudf.DataFrame({'source': sources, 'destination': destinations, 'weights': weights}) g = cugraph.DiGraph() g.from_cudf_edgelist(cdf, source='source', destination='destination', edge_attr='weights') intermediate = dpr.wrappers.EdgeMap.CuGraphEdgeMap(g) y = dpr.translate(x, CuGraphEdgeMap) dpr.assert_equal(y, intermediate) assert (len(dpr.plan.translate(x, CuGraphEdgeMap)) == 1)
+-+ ----9-> |1| | +-+ | | | | 6 | | | v +-+ <-7- +-+ +-+ |0| |2| <-5- |3| +-+ -8-> +-+ +-+
metagraph_cuda/tests/translators/test_to_cugraph_translators.py
test_pandas_edge_map_to_cugraph_edge_map
jim22k/metagraph-cuda
0
python
def test_pandas_edge_map_to_cugraph_edge_map(): '\n +-+\n ----9-> |1|\n | +-+\n |\n | |\n | 6\n | |\n | v\n\n +-+ <-7- +-+ +-+\n |0| |2| <-5- |3|\n +-+ -8-> +-+ +-+' dpr = mg.resolver pdf = pd.DataFrame({'src': (0, 0, 2, 1, 3), 'dst': (1, 2, 0, 2, 2), 'w': (9, 8, 7, 6, 5)}) x = dpr.wrappers.EdgeMap.PandasEdgeMap(pdf, src_label='src', dst_label='dst', weight_label='w', is_directed=True) sources = [0, 0, 1, 2, 3] destinations = [1, 2, 2, 0, 2] weights = [9, 8, 7, 6, 5] cdf = cudf.DataFrame({'source': sources, 'destination': destinations, 'weights': weights}) g = cugraph.DiGraph() g.from_cudf_edgelist(cdf, source='source', destination='destination', edge_attr='weights') intermediate = dpr.wrappers.EdgeMap.CuGraphEdgeMap(g) y = dpr.translate(x, CuGraphEdgeMap) dpr.assert_equal(y, intermediate) assert (len(dpr.plan.translate(x, CuGraphEdgeMap)) == 1)
def test_pandas_edge_map_to_cugraph_edge_map(): '\n +-+\n ----9-> |1|\n | +-+\n |\n | |\n | 6\n | |\n | v\n\n +-+ <-7- +-+ +-+\n |0| |2| <-5- |3|\n +-+ -8-> +-+ +-+' dpr = mg.resolver pdf = pd.DataFrame({'src': (0, 0, 2, 1, 3), 'dst': (1, 2, 0, 2, 2), 'w': (9, 8, 7, 6, 5)}) x = dpr.wrappers.EdgeMap.PandasEdgeMap(pdf, src_label='src', dst_label='dst', weight_label='w', is_directed=True) sources = [0, 0, 1, 2, 3] destinations = [1, 2, 2, 0, 2] weights = [9, 8, 7, 6, 5] cdf = cudf.DataFrame({'source': sources, 'destination': destinations, 'weights': weights}) g = cugraph.DiGraph() g.from_cudf_edgelist(cdf, source='source', destination='destination', edge_attr='weights') intermediate = dpr.wrappers.EdgeMap.CuGraphEdgeMap(g) y = dpr.translate(x, CuGraphEdgeMap) dpr.assert_equal(y, intermediate) assert (len(dpr.plan.translate(x, CuGraphEdgeMap)) == 1)<|docstring|>+-+ ----9-> |1| | +-+ | | | | 6 | | | v +-+ <-7- +-+ +-+ |0| |2| <-5- |3| +-+ -8-> +-+ +-+<|endoftext|>
db2757a8eb2e338d77c5afa64be841560c965757c20a3403f71fbc42dc7147d1
def test_scipy_graph_to_cugraph_graph(): '\n +-+ +-+\n ------> |1| |4|\n | +-+ +-+\n |\n | |\n | v\n\n +-+ <-- +-+ +-+\n |0| |2| <-- |3|\n +-+ --> +-+ +-+\n ' dpr = mg.resolver scipy_sparse_matrix = ss.csr_matrix(np.array([[0, 1, 1, 0, 0], [0, 0, 1, 0, 0], [1, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0]], dtype=bool)) x = dpr.wrappers.Graph.ScipyGraph(scipy_sparse_matrix, np.arange(5)) sources = [0, 0, 1, 2, 3] destinations = [1, 2, 2, 0, 2] cdf = cudf.DataFrame({'Source': sources, 'Destination': destinations}) g = cugraph.DiGraph() g.from_cudf_edgelist(cdf, source='Source', destination='Destination') intermediate = dpr.wrappers.Graph.CuGraph(g, cudf.Series(range(5))) y = dpr.translate(x, CuGraph) dpr.assert_equal(y, intermediate) assert (len(dpr.plan.translate(x, CuGraph)) == 1)
+-+ +-+ ------> |1| |4| | +-+ +-+ | | | | v +-+ <-- +-+ +-+ |0| |2| <-- |3| +-+ --> +-+ +-+
metagraph_cuda/tests/translators/test_to_cugraph_translators.py
test_scipy_graph_to_cugraph_graph
jim22k/metagraph-cuda
0
python
def test_scipy_graph_to_cugraph_graph(): '\n +-+ +-+\n ------> |1| |4|\n | +-+ +-+\n |\n | |\n | v\n\n +-+ <-- +-+ +-+\n |0| |2| <-- |3|\n +-+ --> +-+ +-+\n ' dpr = mg.resolver scipy_sparse_matrix = ss.csr_matrix(np.array([[0, 1, 1, 0, 0], [0, 0, 1, 0, 0], [1, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0]], dtype=bool)) x = dpr.wrappers.Graph.ScipyGraph(scipy_sparse_matrix, np.arange(5)) sources = [0, 0, 1, 2, 3] destinations = [1, 2, 2, 0, 2] cdf = cudf.DataFrame({'Source': sources, 'Destination': destinations}) g = cugraph.DiGraph() g.from_cudf_edgelist(cdf, source='Source', destination='Destination') intermediate = dpr.wrappers.Graph.CuGraph(g, cudf.Series(range(5))) y = dpr.translate(x, CuGraph) dpr.assert_equal(y, intermediate) assert (len(dpr.plan.translate(x, CuGraph)) == 1)
def test_scipy_graph_to_cugraph_graph(): '\n +-+ +-+\n ------> |1| |4|\n | +-+ +-+\n |\n | |\n | v\n\n +-+ <-- +-+ +-+\n |0| |2| <-- |3|\n +-+ --> +-+ +-+\n ' dpr = mg.resolver scipy_sparse_matrix = ss.csr_matrix(np.array([[0, 1, 1, 0, 0], [0, 0, 1, 0, 0], [1, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0]], dtype=bool)) x = dpr.wrappers.Graph.ScipyGraph(scipy_sparse_matrix, np.arange(5)) sources = [0, 0, 1, 2, 3] destinations = [1, 2, 2, 0, 2] cdf = cudf.DataFrame({'Source': sources, 'Destination': destinations}) g = cugraph.DiGraph() g.from_cudf_edgelist(cdf, source='Source', destination='Destination') intermediate = dpr.wrappers.Graph.CuGraph(g, cudf.Series(range(5))) y = dpr.translate(x, CuGraph) dpr.assert_equal(y, intermediate) assert (len(dpr.plan.translate(x, CuGraph)) == 1)<|docstring|>+-+ +-+ ------> |1| |4| | +-+ +-+ | | | | v +-+ <-- +-+ +-+ |0| |2| <-- |3| +-+ --> +-+ +-+<|endoftext|>
ca93e22220258ce728459a5d5fbf63c9de1bacafa31099da17bc171b11870b1c
def __init__(self, host, key): '\n Requests are issued against an account specific domain\n ' if (not host.startswith('http')): self.host = ('https://' + host) else: self.host = host self.key = key self.contacts = Contacts(self)
Requests are issued against an account specific domain
activecampaign/client.py
__init__
bennylope/activecampaign
9
python
def __init__(self, host, key): '\n \n ' if (not host.startswith('http')): self.host = ('https://' + host) else: self.host = host self.key = key self.contacts = Contacts(self)
def __init__(self, host, key): '\n \n ' if (not host.startswith('http')): self.host = ('https://' + host) else: self.host = host self.key = key self.contacts = Contacts(self)<|docstring|>Requests are issued against an account specific domain<|endoftext|>
0880fe2a12fd464cdea7424098f56245d31cb1ac18d08e186d54bbc8f9b8a760
def request(self, method, action, data=None): '\n\n Args:\n method: the HTTP method to use as an uppercase string\n action: the name of the URL parameter defining the API action\n data: a list of tuples for the data. Used in lieu of a dictionary to allow\n for key duplication\n\n Returns:\n\n ' url = '{0}/admin/api.php'.format(self.host) params = [('api_action', action), ('api_key', self.key), ('api_output', 'json')] if (method in ('GET', 'DELETE')): if (data is not None): params += data data = [] else: data = data response = requests.request(method, url, params=params, data=data) return response
Args: method: the HTTP method to use as an uppercase string action: the name of the URL parameter defining the API action data: a list of tuples for the data. Used in lieu of a dictionary to allow for key duplication Returns:
activecampaign/client.py
request
bennylope/activecampaign
9
python
def request(self, method, action, data=None): '\n\n Args:\n method: the HTTP method to use as an uppercase string\n action: the name of the URL parameter defining the API action\n data: a list of tuples for the data. Used in lieu of a dictionary to allow\n for key duplication\n\n Returns:\n\n ' url = '{0}/admin/api.php'.format(self.host) params = [('api_action', action), ('api_key', self.key), ('api_output', 'json')] if (method in ('GET', 'DELETE')): if (data is not None): params += data data = [] else: data = data response = requests.request(method, url, params=params, data=data) return response
def request(self, method, action, data=None): '\n\n Args:\n method: the HTTP method to use as an uppercase string\n action: the name of the URL parameter defining the API action\n data: a list of tuples for the data. Used in lieu of a dictionary to allow\n for key duplication\n\n Returns:\n\n ' url = '{0}/admin/api.php'.format(self.host) params = [('api_action', action), ('api_key', self.key), ('api_output', 'json')] if (method in ('GET', 'DELETE')): if (data is not None): params += data data = [] else: data = data response = requests.request(method, url, params=params, data=data) return response<|docstring|>Args: method: the HTTP method to use as an uppercase string action: the name of the URL parameter defining the API action data: a list of tuples for the data. Used in lieu of a dictionary to allow for key duplication Returns:<|endoftext|>
6b374abf84061d85d61ee7fe7a276facfed1d8b31049239600df1455d37ab60d
def process_x(self, raw_x_batch): 'Pre-processes each batch of features\n before being fed to the model.' return self.client_dataset_processor.process_x(raw_x_batch)
Pre-processes each batch of features before being fed to the model.
sources/datasets/client_dataset_definitions/client_dataset.py
process_x
M4rukku/impact_of_non_iid_data_in_federated_learning
0
python
def process_x(self, raw_x_batch): 'Pre-processes each batch of features\n before being fed to the model.' return self.client_dataset_processor.process_x(raw_x_batch)
def process_x(self, raw_x_batch): 'Pre-processes each batch of features\n before being fed to the model.' return self.client_dataset_processor.process_x(raw_x_batch)<|docstring|>Pre-processes each batch of features before being fed to the model.<|endoftext|>
1f6e5d9d53073fe946859379e5aa8afe412e2a99195ceec06cefc6ef3757639d
def process_y(self, raw_y_batch): 'Pre-processes each batch of labels before being fed to the model.' return self.client_dataset_processor.process_y(raw_y_batch)
Pre-processes each batch of labels before being fed to the model.
sources/datasets/client_dataset_definitions/client_dataset.py
process_y
M4rukku/impact_of_non_iid_data_in_federated_learning
0
python
def process_y(self, raw_y_batch): return self.client_dataset_processor.process_y(raw_y_batch)
def process_y(self, raw_y_batch): return self.client_dataset_processor.process_y(raw_y_batch)<|docstring|>Pre-processes each batch of labels before being fed to the model.<|endoftext|>
1440063acfd3951c1f7f12b00b66164f318f1650dd44174c751c98b157e9cd1a
@property @throw_error_outside_context def training_data(self): 'Returns the Training Data as pair of arrays containing the samples x,\n and classification y' self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data
Returns the Training Data as pair of arrays containing the samples x, and classification y
sources/datasets/client_dataset_definitions/client_dataset.py
training_data
M4rukku/impact_of_non_iid_data_in_federated_learning
0
python
@property @throw_error_outside_context def training_data(self): 'Returns the Training Data as pair of arrays containing the samples x,\n and classification y' self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data
@property @throw_error_outside_context def training_data(self): 'Returns the Training Data as pair of arrays containing the samples x,\n and classification y' self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data<|docstring|>Returns the Training Data as pair of arrays containing the samples x, and classification y<|endoftext|>
cc70c459cc61755ca2a782826787d0a193832c89313f468f2f31651b2d6626e8
@property @throw_error_outside_context def training_data_x(self): 'Returns the Training Data as an array of samples' self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[0]
Returns the Training Data as an array of samples
sources/datasets/client_dataset_definitions/client_dataset.py
training_data_x
M4rukku/impact_of_non_iid_data_in_federated_learning
0
python
@property @throw_error_outside_context def training_data_x(self): self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[0]
@property @throw_error_outside_context def training_data_x(self): self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[0]<|docstring|>Returns the Training Data as an array of samples<|endoftext|>
61a02de4611988cca23f4e288199e369302319a8bd547d0e18e5fc08339b59d0
@property @throw_error_outside_context def training_data_y(self): 'Returns the Classifications for the Training Data as array' self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[1]
Returns the Classifications for the Training Data as array
sources/datasets/client_dataset_definitions/client_dataset.py
training_data_y
M4rukku/impact_of_non_iid_data_in_federated_learning
0
python
@property @throw_error_outside_context def training_data_y(self): self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[1]
@property @throw_error_outside_context def training_data_y(self): self._train_data = self._lazy_initialise_data(self._train_data, DatasetComponents.TRAIN) return self._train_data[1]<|docstring|>Returns the Classifications for the Training Data as array<|endoftext|>
cd25155f3f607cbc8a41ed90c84da721a3290e6c0078ab779ac8d0e591f60aa2
@property @throw_error_outside_context def test_data(self): 'Returns the Training Data as pair of arrays containing the samples x,\n and classification y' self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data
Returns the Training Data as pair of arrays containing the samples x, and classification y
sources/datasets/client_dataset_definitions/client_dataset.py
test_data
M4rukku/impact_of_non_iid_data_in_federated_learning
0
python
@property @throw_error_outside_context def test_data(self): 'Returns the Training Data as pair of arrays containing the samples x,\n and classification y' self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data
@property @throw_error_outside_context def test_data(self): 'Returns the Training Data as pair of arrays containing the samples x,\n and classification y' self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data<|docstring|>Returns the Training Data as pair of arrays containing the samples x, and classification y<|endoftext|>
1f50214012e84fac487affb753fda9f3187d9b9999fd5489fc4ef2c1cf04adaf
@property @throw_error_outside_context def test_data_x(self): 'Returns the Test Data as an array of samples' self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[0]
Returns the Test Data as an array of samples
sources/datasets/client_dataset_definitions/client_dataset.py
test_data_x
M4rukku/impact_of_non_iid_data_in_federated_learning
0
python
@property @throw_error_outside_context def test_data_x(self): self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[0]
@property @throw_error_outside_context def test_data_x(self): self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[0]<|docstring|>Returns the Test Data as an array of samples<|endoftext|>
2a8a272ba7ec06bab782a2da5cde907277f4f8fd2183ccdc545944efa51cae64
@property @throw_error_outside_context def test_data_y(self): 'Returns the Classifications for the Test Data as array' self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[1]
Returns the Classifications for the Test Data as array
sources/datasets/client_dataset_definitions/client_dataset.py
test_data_y
M4rukku/impact_of_non_iid_data_in_federated_learning
0
python
@property @throw_error_outside_context def test_data_y(self): self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[1]
@property @throw_error_outside_context def test_data_y(self): self._test_data = self._lazy_initialise_data(self._test_data, DatasetComponents.TEST) return self._test_data[1]<|docstring|>Returns the Classifications for the Test Data as array<|endoftext|>
3c91cd74748c94070c74360e0664b6c8c7d9dfa74aa10a28c729430c4e7c7e62
@property @throw_error_outside_context def validation_data(self): 'Returns the Validation Data as pair of arrays containing the\n samples x,\n and classification y' self._validation_data = self._lazy_initialise_data(self._validation_data, DatasetComponents.VALIDATION) return self._validation_data
Returns the Validation Data as pair of arrays containing the samples x, and classification y
sources/datasets/client_dataset_definitions/client_dataset.py
validation_data
M4rukku/impact_of_non_iid_data_in_federated_learning
0
python
@property @throw_error_outside_context def validation_data(self): 'Returns the Validation Data as pair of arrays containing the\n samples x,\n and classification y' self._validation_data = self._lazy_initialise_data(self._validation_data, DatasetComponents.VALIDATION) return self._validation_data
@property @throw_error_outside_context def validation_data(self): 'Returns the Validation Data as pair of arrays containing the\n samples x,\n and classification y' self._validation_data = self._lazy_initialise_data(self._validation_data, DatasetComponents.VALIDATION) return self._validation_data<|docstring|>Returns the Validation Data as pair of arrays containing the samples x, and classification y<|endoftext|>
9b2ee35dc3dc9a389efc6f0fd87f433cd6efd87041df5cb3b84b872ec58200bd
@property @throw_error_outside_context def validation_data_x(self): 'Returns the Validation Data as an array of samples' self._validation_data = self._lazy_initialise_data(self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[0]
Returns the Validation Data as an array of samples
sources/datasets/client_dataset_definitions/client_dataset.py
validation_data_x
M4rukku/impact_of_non_iid_data_in_federated_learning
0
python
@property @throw_error_outside_context def validation_data_x(self): self._validation_data = self._lazy_initialise_data(self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[0]
@property @throw_error_outside_context def validation_data_x(self): self._validation_data = self._lazy_initialise_data(self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[0]<|docstring|>Returns the Validation Data as an array of samples<|endoftext|>
1f1073b8dc375f852670699a0a31eaa7b1a6466589fdcf5d867e17c1e965a264
@property @throw_error_outside_context def validation_data_y(self): 'Returns the Classifications for the Validation Data as array' self._validation_data = self._lazy_initialise_data(self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[1]
Returns the Classifications for the Validation Data as array
sources/datasets/client_dataset_definitions/client_dataset.py
validation_data_y
M4rukku/impact_of_non_iid_data_in_federated_learning
0
python
@property @throw_error_outside_context def validation_data_y(self): self._validation_data = self._lazy_initialise_data(self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[1]
@property @throw_error_outside_context def validation_data_y(self): self._validation_data = self._lazy_initialise_data(self._validation_data, DatasetComponents.VALIDATION) return self._validation_data[1]<|docstring|>Returns the Classifications for the Validation Data as array<|endoftext|>
9e3e470f4cc7e455619fc66303b8d0c86f4e99e0b7b4cb291100cedde92d1031
def namedtuple_asdict(obj): '\n Serializing a nested namedtuple into a Python dict\n ' if (obj is None): return obj if hasattr(obj, '_asdict'): return OrderedDict(zip(obj._fields, (namedtuple_asdict(item) for item in obj))) if isinstance(obj, str): return obj if hasattr(obj, 'keys'): return OrderedDict(zip(obj.keys(), (namedtuple_asdict(item) for item in obj.values()))) if hasattr(obj, '__iter__'): return type(obj)((namedtuple_asdict(item) for item in obj)) return obj
Serializing a nested namedtuple into a Python dict
jsonrepo/record.py
namedtuple_asdict
romaryd/python-jsonrepo
1
python
def namedtuple_asdict(obj): '\n \n ' if (obj is None): return obj if hasattr(obj, '_asdict'): return OrderedDict(zip(obj._fields, (namedtuple_asdict(item) for item in obj))) if isinstance(obj, str): return obj if hasattr(obj, 'keys'): return OrderedDict(zip(obj.keys(), (namedtuple_asdict(item) for item in obj.values()))) if hasattr(obj, '__iter__'): return type(obj)((namedtuple_asdict(item) for item in obj)) return obj
def namedtuple_asdict(obj): '\n \n ' if (obj is None): return obj if hasattr(obj, '_asdict'): return OrderedDict(zip(obj._fields, (namedtuple_asdict(item) for item in obj))) if isinstance(obj, str): return obj if hasattr(obj, 'keys'): return OrderedDict(zip(obj.keys(), (namedtuple_asdict(item) for item in obj.values()))) if hasattr(obj, '__iter__'): return type(obj)((namedtuple_asdict(item) for item in obj)) return obj<|docstring|>Serializing a nested namedtuple into a Python dict<|endoftext|>
d0cf95bc7868761d5f3c22b395e8e5348a2a7d713e9f37f2cd859af3d195148b
@classmethod def from_json(cls, json_dump): '\n JSON deserialization\n ' raise NotImplementedError
JSON deserialization
jsonrepo/record.py
from_json
romaryd/python-jsonrepo
1
python
@classmethod def from_json(cls, json_dump): '\n \n ' raise NotImplementedError
@classmethod def from_json(cls, json_dump): '\n \n ' raise NotImplementedError<|docstring|>JSON deserialization<|endoftext|>
96b0aa63ceb7123a1c735e2cd2fcb311c224c20e55c705070db6f709e2495cae
def to_json(self): '\n JSON serialization\n ' raise NotImplementedError
JSON serialization
jsonrepo/record.py
to_json
romaryd/python-jsonrepo
1
python
def to_json(self): '\n \n ' raise NotImplementedError
def to_json(self): '\n \n ' raise NotImplementedError<|docstring|>JSON serialization<|endoftext|>
d637a900edf70880a2a93b0dd2827fe49a4f65110f4d03494359fef90d68250e
@classmethod def from_json(cls, json_dump): '\n How to get a context from a json dump\n ' context = cls() if (json_dump is None): return None ctxt = json.loads(json_dump) for k in ctxt: context[k] = ctxt[k] return context
How to get a context from a json dump
jsonrepo/record.py
from_json
romaryd/python-jsonrepo
1
python
@classmethod def from_json(cls, json_dump): '\n \n ' context = cls() if (json_dump is None): return None ctxt = json.loads(json_dump) for k in ctxt: context[k] = ctxt[k] return context
@classmethod def from_json(cls, json_dump): '\n \n ' context = cls() if (json_dump is None): return None ctxt = json.loads(json_dump) for k in ctxt: context[k] = ctxt[k] return context<|docstring|>How to get a context from a json dump<|endoftext|>
f0b2eea9406df40ed4eed341309e76d149c3b23de75a3fb26a3cfcec2315e7cb
def to_json(self): '\n JSON serialization\n ' return json.dumps(self.copy())
JSON serialization
jsonrepo/record.py
to_json
romaryd/python-jsonrepo
1
python
def to_json(self): '\n \n ' return json.dumps(self.copy())
def to_json(self): '\n \n ' return json.dumps(self.copy())<|docstring|>JSON serialization<|endoftext|>
324c65cad395b4b94dec47386ec6d6248e4131a542398b4d03d4f7af1a5b385d
@pytest.mark.parametrize('start, dest', [('1.1.1', '1.1.2'), ('1.1.2', '1.1.1'), ('1.1.1', '1.2.1'), ('1.1.1', '1.2.2'), ('1.1.2', '1.2.1')]) def test_semver_safe(start, dest): ' Tests semver safe loading' start = VersionInfo.parse(start) dest = VersionInfo.parse(dest) assert (semver_safe(start, dest) == True)
Tests semver safe loading
game_core/tests/quest/test_quest_semver.py
test_semver_safe
meseta/advent-of-code-2020
1
python
@pytest.mark.parametrize('start, dest', [('1.1.1', '1.1.2'), ('1.1.2', '1.1.1'), ('1.1.1', '1.2.1'), ('1.1.1', '1.2.2'), ('1.1.2', '1.2.1')]) def test_semver_safe(start, dest): ' ' start = VersionInfo.parse(start) dest = VersionInfo.parse(dest) assert (semver_safe(start, dest) == True)
@pytest.mark.parametrize('start, dest', [('1.1.1', '1.1.2'), ('1.1.2', '1.1.1'), ('1.1.1', '1.2.1'), ('1.1.1', '1.2.2'), ('1.1.2', '1.2.1')]) def test_semver_safe(start, dest): ' ' start = VersionInfo.parse(start) dest = VersionInfo.parse(dest) assert (semver_safe(start, dest) == True)<|docstring|>Tests semver safe loading<|endoftext|>
fd786b4d8aca31ef37b223e03719f012a688daf161b648980369759c96f75011
@pytest.mark.parametrize('start, dest', [('2.1.1', '1.1.1'), ('1.1.1', '2.1.1'), ('1.2.1', '1.1.1')]) def test_semver_unsafe(start, dest): ' Tests semver unsafe loading ' start = VersionInfo.parse(start) dest = VersionInfo.parse(dest) assert (semver_safe(start, dest) == False)
Tests semver unsafe loading
game_core/tests/quest/test_quest_semver.py
test_semver_unsafe
meseta/advent-of-code-2020
1
python
@pytest.mark.parametrize('start, dest', [('2.1.1', '1.1.1'), ('1.1.1', '2.1.1'), ('1.2.1', '1.1.1')]) def test_semver_unsafe(start, dest): ' ' start = VersionInfo.parse(start) dest = VersionInfo.parse(dest) assert (semver_safe(start, dest) == False)
@pytest.mark.parametrize('start, dest', [('2.1.1', '1.1.1'), ('1.1.1', '2.1.1'), ('1.2.1', '1.1.1')]) def test_semver_unsafe(start, dest): ' ' start = VersionInfo.parse(start) dest = VersionInfo.parse(dest) assert (semver_safe(start, dest) == False)<|docstring|>Tests semver unsafe loading<|endoftext|>
77548150a46dd1f641bf139da131d3e88c0e45da82cc831d7d4507bf616f25a9
def get_queue_manager_connection(config): '\n Get the queue manager connection\n ' if config.ssl: return get_ssl_connection(config) else: return get_normal_connection(config)
Get the queue manager connection
ibm_mq/datadog_checks/ibm_mq/connection.py
get_queue_manager_connection
tylerbenson/integrations-core
4
python
def get_queue_manager_connection(config): '\n \n ' if config.ssl: return get_ssl_connection(config) else: return get_normal_connection(config)
def get_queue_manager_connection(config): '\n \n ' if config.ssl: return get_ssl_connection(config) else: return get_normal_connection(config)<|docstring|>Get the queue manager connection<|endoftext|>
655e54acdb38319b3bcdaeeb317132e6e44403037f6e098fe11017544c0ef09e
def get_normal_connection(config): '\n Get the connection either with a username and password or without\n ' if (config.username and config.password): log.debug('connecting with username and password') queue_manager = pymqi.connect(config.queue_manager_name, config.channel, config.host_and_port, config.username, config.password) else: log.debug('connecting without a username and password') queue_manager = pymqi.connect(config.queue_manager_name, config.channel, config.host_and_port) return queue_manager
Get the connection either with a username and password or without
ibm_mq/datadog_checks/ibm_mq/connection.py
get_normal_connection
tylerbenson/integrations-core
4
python
def get_normal_connection(config): '\n \n ' if (config.username and config.password): log.debug('connecting with username and password') queue_manager = pymqi.connect(config.queue_manager_name, config.channel, config.host_and_port, config.username, config.password) else: log.debug('connecting without a username and password') queue_manager = pymqi.connect(config.queue_manager_name, config.channel, config.host_and_port) return queue_manager
def get_normal_connection(config): '\n \n ' if (config.username and config.password): log.debug('connecting with username and password') queue_manager = pymqi.connect(config.queue_manager_name, config.channel, config.host_and_port, config.username, config.password) else: log.debug('connecting without a username and password') queue_manager = pymqi.connect(config.queue_manager_name, config.channel, config.host_and_port) return queue_manager<|docstring|>Get the connection either with a username and password or without<|endoftext|>
21301dc2af321b208d0022d6690130b3e853bc0860e8d0eb6ddc6d39d75adbb1
def get_ssl_connection(config): '\n Get the connection with SSL\n ' cd = pymqi.CD() cd.ChannelName = config.channel cd.ConnectionName = config.host_and_port cd.ChannelType = pymqi.CMQC.MQCHT_CLNTCONN cd.TransportType = pymqi.CMQC.MQXPT_TCP cd.SSLCipherSpec = config.ssl_cipher_spec sco = pymqi.SCO() sco.KeyRepository = config.key_repo_location queue_manager = pymqi.QueueManager(None) queue_manager.connect_with_options(config.queue_manager, cd, sco) return queue_manager
Get the connection with SSL
ibm_mq/datadog_checks/ibm_mq/connection.py
get_ssl_connection
tylerbenson/integrations-core
4
python
def get_ssl_connection(config): '\n \n ' cd = pymqi.CD() cd.ChannelName = config.channel cd.ConnectionName = config.host_and_port cd.ChannelType = pymqi.CMQC.MQCHT_CLNTCONN cd.TransportType = pymqi.CMQC.MQXPT_TCP cd.SSLCipherSpec = config.ssl_cipher_spec sco = pymqi.SCO() sco.KeyRepository = config.key_repo_location queue_manager = pymqi.QueueManager(None) queue_manager.connect_with_options(config.queue_manager, cd, sco) return queue_manager
def get_ssl_connection(config): '\n \n ' cd = pymqi.CD() cd.ChannelName = config.channel cd.ConnectionName = config.host_and_port cd.ChannelType = pymqi.CMQC.MQCHT_CLNTCONN cd.TransportType = pymqi.CMQC.MQXPT_TCP cd.SSLCipherSpec = config.ssl_cipher_spec sco = pymqi.SCO() sco.KeyRepository = config.key_repo_location queue_manager = pymqi.QueueManager(None) queue_manager.connect_with_options(config.queue_manager, cd, sco) return queue_manager<|docstring|>Get the connection with SSL<|endoftext|>
f10136df4acadb01eec2094ec547276393fd49e391c67ac4d17833b752d35099
def rename_font(input, save_as, fontname=None, familyname=None, fullname=None, sfnt_ref=None, reps=(), clean_up=False, mono=False, remove=(), spaces=()): "\n Parameters\n ----------\n input : str\n Path to font to rename.\n fontname, familyname, fullname : str, str, str\n Font name parameters.\n save_as : str\n Save file path.\n sfnt_ref : str\n path to reference font with right SFNT section\n reps : tuple(tuple)\n replacements for SFNT section like: (('Roboto', 'Open'), ('STIX', 'STYX'))\n mono : bool\n set isFixedPitch flag to 1 and OS/2 PANOSE Proportion to Monospaced\n remove : iterable\n list of characters to delete form a font\n spaces : iterable\n list of characters to copy whitespace (U+0020) into\n " def _rep(obj): if isinstance(obj, str): for pair in reps: obj = obj.replace(pair[0], pair[1]) return obj elif isinstance(obj, tuple): t = tuple((_rep(o) for o in obj)) if (t[1] == 'PostScriptName'): t = tuple(([t[0], t[1], t[2].replace(' ', '')] + list(t[3:]))) return t else: raise RuntimeError('Tried to replace something that is not tuple/str superposition.') shutil.copy(input, save_as) if (sfnt_ref is not None): ref_font = fontforge.open(sfnt_ref) sfnt_names = _rep(ref_font.sfnt_names) ref_font.close() else: sfnt_names = () renamed = fontforge.open(save_as) renamed.sfnt_names = sfnt_names if (fontname is not None): renamed.fontname = fontname if (familyname is not None): renamed.familyname = familyname if (fullname is not None): renamed.fullname = fullname if mono: lst = list(renamed.os2_panose) lst[3] = 9 renamed.os2_panose = tuple(lst) if remove: for c in remove: renamed.selection[ord(c)] = True for i in renamed.selection.byGlyphs: renamed.removeGlyph(i) if spaces: renamed.selection.select(ord(' ')) renamed.copy() renamed.selection.none() for c in spaces: renamed.selection.select(ord(c)) renamed.paste() renamed.selection.none() if (save_as[(- 4):] == '.sfd'): renamed.save(save_as) else: renamed.generate(save_as) if clean_up: try: os.remove(input) except: pass renamed.close() if mono: if (os.name != 'nt'): pyexe = which('python3') if (not pyexe): pyexe = which('python') else: pyexe = which('python') if (not pyexe): raise RuntimeError("python executable wasn't found") print(subprocess.check_output([pyexe, p.join(here, 'setisFixedPitch-fonttools.py'), save_as]))
Parameters ---------- input : str Path to font to rename. fontname, familyname, fullname : str, str, str Font name parameters. save_as : str Save file path. sfnt_ref : str path to reference font with right SFNT section reps : tuple(tuple) replacements for SFNT section like: (('Roboto', 'Open'), ('STIX', 'STYX')) mono : bool set isFixedPitch flag to 1 and OS/2 PANOSE Proportion to Monospaced remove : iterable list of characters to delete form a font spaces : iterable list of characters to copy whitespace (U+0020) into
helper.py
rename_font
kiwi0fruit/open-fonts
30
python
def rename_font(input, save_as, fontname=None, familyname=None, fullname=None, sfnt_ref=None, reps=(), clean_up=False, mono=False, remove=(), spaces=()): "\n Parameters\n ----------\n input : str\n Path to font to rename.\n fontname, familyname, fullname : str, str, str\n Font name parameters.\n save_as : str\n Save file path.\n sfnt_ref : str\n path to reference font with right SFNT section\n reps : tuple(tuple)\n replacements for SFNT section like: (('Roboto', 'Open'), ('STIX', 'STYX'))\n mono : bool\n set isFixedPitch flag to 1 and OS/2 PANOSE Proportion to Monospaced\n remove : iterable\n list of characters to delete form a font\n spaces : iterable\n list of characters to copy whitespace (U+0020) into\n " def _rep(obj): if isinstance(obj, str): for pair in reps: obj = obj.replace(pair[0], pair[1]) return obj elif isinstance(obj, tuple): t = tuple((_rep(o) for o in obj)) if (t[1] == 'PostScriptName'): t = tuple(([t[0], t[1], t[2].replace(' ', )] + list(t[3:]))) return t else: raise RuntimeError('Tried to replace something that is not tuple/str superposition.') shutil.copy(input, save_as) if (sfnt_ref is not None): ref_font = fontforge.open(sfnt_ref) sfnt_names = _rep(ref_font.sfnt_names) ref_font.close() else: sfnt_names = () renamed = fontforge.open(save_as) renamed.sfnt_names = sfnt_names if (fontname is not None): renamed.fontname = fontname if (familyname is not None): renamed.familyname = familyname if (fullname is not None): renamed.fullname = fullname if mono: lst = list(renamed.os2_panose) lst[3] = 9 renamed.os2_panose = tuple(lst) if remove: for c in remove: renamed.selection[ord(c)] = True for i in renamed.selection.byGlyphs: renamed.removeGlyph(i) if spaces: renamed.selection.select(ord(' ')) renamed.copy() renamed.selection.none() for c in spaces: renamed.selection.select(ord(c)) renamed.paste() renamed.selection.none() if (save_as[(- 4):] == '.sfd'): renamed.save(save_as) else: renamed.generate(save_as) if clean_up: try: os.remove(input) except: pass renamed.close() if mono: if (os.name != 'nt'): pyexe = which('python3') if (not pyexe): pyexe = which('python') else: pyexe = which('python') if (not pyexe): raise RuntimeError("python executable wasn't found") print(subprocess.check_output([pyexe, p.join(here, 'setisFixedPitch-fonttools.py'), save_as]))
def rename_font(input, save_as, fontname=None, familyname=None, fullname=None, sfnt_ref=None, reps=(), clean_up=False, mono=False, remove=(), spaces=()): "\n Parameters\n ----------\n input : str\n Path to font to rename.\n fontname, familyname, fullname : str, str, str\n Font name parameters.\n save_as : str\n Save file path.\n sfnt_ref : str\n path to reference font with right SFNT section\n reps : tuple(tuple)\n replacements for SFNT section like: (('Roboto', 'Open'), ('STIX', 'STYX'))\n mono : bool\n set isFixedPitch flag to 1 and OS/2 PANOSE Proportion to Monospaced\n remove : iterable\n list of characters to delete form a font\n spaces : iterable\n list of characters to copy whitespace (U+0020) into\n " def _rep(obj): if isinstance(obj, str): for pair in reps: obj = obj.replace(pair[0], pair[1]) return obj elif isinstance(obj, tuple): t = tuple((_rep(o) for o in obj)) if (t[1] == 'PostScriptName'): t = tuple(([t[0], t[1], t[2].replace(' ', )] + list(t[3:]))) return t else: raise RuntimeError('Tried to replace something that is not tuple/str superposition.') shutil.copy(input, save_as) if (sfnt_ref is not None): ref_font = fontforge.open(sfnt_ref) sfnt_names = _rep(ref_font.sfnt_names) ref_font.close() else: sfnt_names = () renamed = fontforge.open(save_as) renamed.sfnt_names = sfnt_names if (fontname is not None): renamed.fontname = fontname if (familyname is not None): renamed.familyname = familyname if (fullname is not None): renamed.fullname = fullname if mono: lst = list(renamed.os2_panose) lst[3] = 9 renamed.os2_panose = tuple(lst) if remove: for c in remove: renamed.selection[ord(c)] = True for i in renamed.selection.byGlyphs: renamed.removeGlyph(i) if spaces: renamed.selection.select(ord(' ')) renamed.copy() renamed.selection.none() for c in spaces: renamed.selection.select(ord(c)) renamed.paste() renamed.selection.none() if (save_as[(- 4):] == '.sfd'): renamed.save(save_as) else: renamed.generate(save_as) if clean_up: try: os.remove(input) except: pass renamed.close() if mono: if (os.name != 'nt'): pyexe = which('python3') if (not pyexe): pyexe = which('python') else: pyexe = which('python') if (not pyexe): raise RuntimeError("python executable wasn't found") print(subprocess.check_output([pyexe, p.join(here, 'setisFixedPitch-fonttools.py'), save_as]))<|docstring|>Parameters ---------- input : str Path to font to rename. fontname, familyname, fullname : str, str, str Font name parameters. save_as : str Save file path. sfnt_ref : str path to reference font with right SFNT section reps : tuple(tuple) replacements for SFNT section like: (('Roboto', 'Open'), ('STIX', 'STYX')) mono : bool set isFixedPitch flag to 1 and OS/2 PANOSE Proportion to Monospaced remove : iterable list of characters to delete form a font spaces : iterable list of characters to copy whitespace (U+0020) into<|endoftext|>
cd0ca47bc642189dae7afe4f24ccc90c79a18b9361d7748bd035637f96a8a460
@staticmethod def get_schema(max_nesting_depth: Optional[int]=6, nesting_depth: int=0, nesting_list: List[str]=[], max_recursion_limit: Optional[int]=2, include_extension: Optional[bool]=False, extension_fields: Optional[List[str]]=['valueBoolean', 'valueCode', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueId', 'valueInteger', 'valuePositiveInt', 'valueString', 'valueTime', 'valueUnsignedInt', 'valueUri', 'valueUrl'], extension_depth: int=0, max_extension_depth: Optional[int]=2, include_modifierExtension: Optional[bool]=False) -> Union[(StructType, DataType)]: '\n A record of a clinical assessment performed to determine what problem(s) may\n affect the patient and before planning the treatments or management strategies\n that are best to manage a patient\'s condition. Assessments are often 1:1 with\n a clinical consultation / encounter, but this varies greatly depending on the\n clinical workflow. This resource is called "ClinicalImpression" rather than\n "ClinicalAssessment" to avoid confusion with the recording of assessment tools\n such as Apgar score.\n\n\n resourceType: This is a ClinicalImpression resource\n\n id: The logical id of the resource, as used in the URL for the resource. Once\n assigned, this value never changes.\n\n meta: The metadata about the resource. This is content that is maintained by the\n infrastructure. Changes to the content might not always be associated with\n version changes to the resource.\n\n implicitRules: A reference to a set of rules that were followed when the resource was\n constructed, and which must be understood when processing the content. Often,\n this is a reference to an implementation guide that defines the special rules\n along with other profiles etc.\n\n language: The base language in which the resource is written.\n\n text: A human-readable narrative that contains a summary of the resource and can be\n used to represent the content of the resource to a human. The narrative need\n not encode all the structured data, but is required to contain sufficient\n detail to make it "clinically safe" for a human to just read the narrative.\n Resource definitions may define what content should be represented in the\n narrative to ensure clinical safety.\n\n contained: These resources do not have an independent existence apart from the resource\n that contains them - they cannot be identified independently, and nor can they\n have their own independent transaction scope.\n\n extension: May be used to represent additional information that is not part of the basic\n definition of the resource. To make the use of extensions safe and manageable,\n there is a strict set of governance applied to the definition and use of\n extensions. Though any implementer can define an extension, there is a set of\n requirements that SHALL be met as part of the definition of the extension.\n\n modifierExtension: May be used to represent additional information that is not part of the basic\n definition of the resource and that modifies the understanding of the element\n that contains it and/or the understanding of the containing element\'s\n descendants. Usually modifier elements provide negation or qualification. To\n make the use of extensions safe and manageable, there is a strict set of\n governance applied to the definition and use of extensions. Though any\n implementer is allowed to define an extension, there is a set of requirements\n that SHALL be met as part of the definition of the extension. Applications\n processing a resource are required to check for modifier extensions.\n\n Modifier extensions SHALL NOT change the meaning of any elements on Resource\n or DomainResource (including cannot change the meaning of modifierExtension\n itself).\n\n identifier: Business identifiers assigned to this clinical impression by the performer or\n other systems which remain constant as the resource is updated and propagates\n from server to server.\n\n status: Identifies the workflow status of the assessment.\n\n statusReason: Captures the reason for the current state of the ClinicalImpression.\n\n code: Categorizes the type of clinical assessment performed.\n\n description: A summary of the context and/or cause of the assessment - why / where it was\n performed, and what patient events/status prompted it.\n\n subject: The patient or group of individuals assessed as part of this record.\n\n encounter: The Encounter during which this ClinicalImpression was created or to which the\n creation of this record is tightly associated.\n\n effectiveDateTime: The point in time or period over which the subject was assessed.\n\n effectivePeriod: The point in time or period over which the subject was assessed.\n\n date: Indicates when the documentation of the assessment was complete.\n\n assessor: The clinician performing the assessment.\n\n previous: A reference to the last assessment that was conducted on this patient.\n Assessments are often/usually ongoing in nature; a care provider (practitioner\n or team) will make new assessments on an ongoing basis as new data arises or\n the patient\'s conditions changes.\n\n problem: A list of the relevant problems/conditions for a patient.\n\n investigation: One or more sets of investigations (signs, symptoms, etc.). The actual\n grouping of investigations varies greatly depending on the type and context of\n the assessment. These investigations may include data generated during the\n assessment process, or data previously generated and recorded that is\n pertinent to the outcomes.\n\n protocol: Reference to a specific published clinical protocol that was followed during\n this assessment, and/or that provides evidence in support of the diagnosis.\n\n summary: A text summary of the investigations and the diagnosis.\n\n finding: Specific findings or diagnoses that were considered likely or relevant to\n ongoing treatment.\n\n prognosisCodeableConcept: Estimate of likely outcome.\n\n prognosisReference: RiskAssessment expressing likely outcome.\n\n supportingInfo: Information supporting the clinical impression.\n\n note: Commentary about the impression, typically recorded after the impression\n itself was made, though supplemental notes by the original author could also\n appear.\n\n ' from spark_fhir_schemas.r4.simple_types.id import idSchema from spark_fhir_schemas.r4.complex_types.meta import MetaSchema from spark_fhir_schemas.r4.simple_types.uri import uriSchema from spark_fhir_schemas.r4.simple_types.code import codeSchema from spark_fhir_schemas.r4.complex_types.narrative import NarrativeSchema from spark_fhir_schemas.r4.complex_types.resourcelist import ResourceListSchema from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema from spark_fhir_schemas.r4.complex_types.identifier import IdentifierSchema from spark_fhir_schemas.r4.complex_types.codeableconcept import CodeableConceptSchema from spark_fhir_schemas.r4.complex_types.reference import ReferenceSchema from spark_fhir_schemas.r4.complex_types.period import PeriodSchema from spark_fhir_schemas.r4.simple_types.datetime import dateTimeSchema from spark_fhir_schemas.r4.complex_types.clinicalimpression_investigation import ClinicalImpression_InvestigationSchema from spark_fhir_schemas.r4.complex_types.clinicalimpression_finding import ClinicalImpression_FindingSchema from spark_fhir_schemas.r4.complex_types.annotation import AnnotationSchema if ((max_recursion_limit and (nesting_list.count('ClinicalImpression') >= max_recursion_limit)) or (max_nesting_depth and (nesting_depth >= max_nesting_depth))): return StructType([StructField('id', StringType(), True)]) my_nesting_list: List[str] = (nesting_list + ['ClinicalImpression']) schema = StructType([StructField('resourceType', StringType(), True), StructField('id', idSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('meta', MetaSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('implicitRules', uriSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('language', codeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('text', NarrativeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('contained', ArrayType(ResourceListSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('extension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('modifierExtension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('identifier', ArrayType(IdentifierSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('status', codeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('statusReason', CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('code', CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('description', StringType(), True), StructField('subject', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('encounter', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('effectiveDateTime', TimestampType(), True), StructField('effectivePeriod', PeriodSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('date', dateTimeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('assessor', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('previous', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('problem', ArrayType(ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('investigation', ArrayType(ClinicalImpression_InvestigationSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('protocol', ArrayType(uriSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('summary', StringType(), True), StructField('finding', ArrayType(ClinicalImpression_FindingSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('prognosisCodeableConcept', ArrayType(CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('prognosisReference', ArrayType(ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('supportingInfo', ArrayType(ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('note', ArrayType(AnnotationSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True)]) if (not include_extension): schema.fields = [(c if (c.name != 'extension') else StructField('extension', StringType(), True)) for c in schema.fields] if (not include_modifierExtension): schema.fields = [(c if (c.name != 'modifierExtension') else StructField('modifierExtension', StringType(), True)) for c in schema.fields] return schema
A record of a clinical assessment performed to determine what problem(s) may affect the patient and before planning the treatments or management strategies that are best to manage a patient's condition. Assessments are often 1:1 with a clinical consultation / encounter, but this varies greatly depending on the clinical workflow. This resource is called "ClinicalImpression" rather than "ClinicalAssessment" to avoid confusion with the recording of assessment tools such as Apgar score. resourceType: This is a ClinicalImpression resource id: The logical id of the resource, as used in the URL for the resource. Once assigned, this value never changes. meta: The metadata about the resource. This is content that is maintained by the infrastructure. Changes to the content might not always be associated with version changes to the resource. implicitRules: A reference to a set of rules that were followed when the resource was constructed, and which must be understood when processing the content. Often, this is a reference to an implementation guide that defines the special rules along with other profiles etc. language: The base language in which the resource is written. text: A human-readable narrative that contains a summary of the resource and can be used to represent the content of the resource to a human. The narrative need not encode all the structured data, but is required to contain sufficient detail to make it "clinically safe" for a human to just read the narrative. Resource definitions may define what content should be represented in the narrative to ensure clinical safety. contained: These resources do not have an independent existence apart from the resource that contains them - they cannot be identified independently, and nor can they have their own independent transaction scope. extension: May be used to represent additional information that is not part of the basic definition of the resource. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. modifierExtension: May be used to represent additional information that is not part of the basic definition of the resource and that modifies the understanding of the element that contains it and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions. Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself). identifier: Business identifiers assigned to this clinical impression by the performer or other systems which remain constant as the resource is updated and propagates from server to server. status: Identifies the workflow status of the assessment. statusReason: Captures the reason for the current state of the ClinicalImpression. code: Categorizes the type of clinical assessment performed. description: A summary of the context and/or cause of the assessment - why / where it was performed, and what patient events/status prompted it. subject: The patient or group of individuals assessed as part of this record. encounter: The Encounter during which this ClinicalImpression was created or to which the creation of this record is tightly associated. effectiveDateTime: The point in time or period over which the subject was assessed. effectivePeriod: The point in time or period over which the subject was assessed. date: Indicates when the documentation of the assessment was complete. assessor: The clinician performing the assessment. previous: A reference to the last assessment that was conducted on this patient. Assessments are often/usually ongoing in nature; a care provider (practitioner or team) will make new assessments on an ongoing basis as new data arises or the patient's conditions changes. problem: A list of the relevant problems/conditions for a patient. investigation: One or more sets of investigations (signs, symptoms, etc.). The actual grouping of investigations varies greatly depending on the type and context of the assessment. These investigations may include data generated during the assessment process, or data previously generated and recorded that is pertinent to the outcomes. protocol: Reference to a specific published clinical protocol that was followed during this assessment, and/or that provides evidence in support of the diagnosis. summary: A text summary of the investigations and the diagnosis. finding: Specific findings or diagnoses that were considered likely or relevant to ongoing treatment. prognosisCodeableConcept: Estimate of likely outcome. prognosisReference: RiskAssessment expressing likely outcome. supportingInfo: Information supporting the clinical impression. note: Commentary about the impression, typically recorded after the impression itself was made, though supplemental notes by the original author could also appear.
spark_fhir_schemas/r4/resources/clinicalimpression.py
get_schema
imranq2/SparkFhirSchemas
2
python
@staticmethod def get_schema(max_nesting_depth: Optional[int]=6, nesting_depth: int=0, nesting_list: List[str]=[], max_recursion_limit: Optional[int]=2, include_extension: Optional[bool]=False, extension_fields: Optional[List[str]]=['valueBoolean', 'valueCode', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueId', 'valueInteger', 'valuePositiveInt', 'valueString', 'valueTime', 'valueUnsignedInt', 'valueUri', 'valueUrl'], extension_depth: int=0, max_extension_depth: Optional[int]=2, include_modifierExtension: Optional[bool]=False) -> Union[(StructType, DataType)]: '\n A record of a clinical assessment performed to determine what problem(s) may\n affect the patient and before planning the treatments or management strategies\n that are best to manage a patient\'s condition. Assessments are often 1:1 with\n a clinical consultation / encounter, but this varies greatly depending on the\n clinical workflow. This resource is called "ClinicalImpression" rather than\n "ClinicalAssessment" to avoid confusion with the recording of assessment tools\n such as Apgar score.\n\n\n resourceType: This is a ClinicalImpression resource\n\n id: The logical id of the resource, as used in the URL for the resource. Once\n assigned, this value never changes.\n\n meta: The metadata about the resource. This is content that is maintained by the\n infrastructure. Changes to the content might not always be associated with\n version changes to the resource.\n\n implicitRules: A reference to a set of rules that were followed when the resource was\n constructed, and which must be understood when processing the content. Often,\n this is a reference to an implementation guide that defines the special rules\n along with other profiles etc.\n\n language: The base language in which the resource is written.\n\n text: A human-readable narrative that contains a summary of the resource and can be\n used to represent the content of the resource to a human. The narrative need\n not encode all the structured data, but is required to contain sufficient\n detail to make it "clinically safe" for a human to just read the narrative.\n Resource definitions may define what content should be represented in the\n narrative to ensure clinical safety.\n\n contained: These resources do not have an independent existence apart from the resource\n that contains them - they cannot be identified independently, and nor can they\n have their own independent transaction scope.\n\n extension: May be used to represent additional information that is not part of the basic\n definition of the resource. To make the use of extensions safe and manageable,\n there is a strict set of governance applied to the definition and use of\n extensions. Though any implementer can define an extension, there is a set of\n requirements that SHALL be met as part of the definition of the extension.\n\n modifierExtension: May be used to represent additional information that is not part of the basic\n definition of the resource and that modifies the understanding of the element\n that contains it and/or the understanding of the containing element\'s\n descendants. Usually modifier elements provide negation or qualification. To\n make the use of extensions safe and manageable, there is a strict set of\n governance applied to the definition and use of extensions. Though any\n implementer is allowed to define an extension, there is a set of requirements\n that SHALL be met as part of the definition of the extension. Applications\n processing a resource are required to check for modifier extensions.\n\n Modifier extensions SHALL NOT change the meaning of any elements on Resource\n or DomainResource (including cannot change the meaning of modifierExtension\n itself).\n\n identifier: Business identifiers assigned to this clinical impression by the performer or\n other systems which remain constant as the resource is updated and propagates\n from server to server.\n\n status: Identifies the workflow status of the assessment.\n\n statusReason: Captures the reason for the current state of the ClinicalImpression.\n\n code: Categorizes the type of clinical assessment performed.\n\n description: A summary of the context and/or cause of the assessment - why / where it was\n performed, and what patient events/status prompted it.\n\n subject: The patient or group of individuals assessed as part of this record.\n\n encounter: The Encounter during which this ClinicalImpression was created or to which the\n creation of this record is tightly associated.\n\n effectiveDateTime: The point in time or period over which the subject was assessed.\n\n effectivePeriod: The point in time or period over which the subject was assessed.\n\n date: Indicates when the documentation of the assessment was complete.\n\n assessor: The clinician performing the assessment.\n\n previous: A reference to the last assessment that was conducted on this patient.\n Assessments are often/usually ongoing in nature; a care provider (practitioner\n or team) will make new assessments on an ongoing basis as new data arises or\n the patient\'s conditions changes.\n\n problem: A list of the relevant problems/conditions for a patient.\n\n investigation: One or more sets of investigations (signs, symptoms, etc.). The actual\n grouping of investigations varies greatly depending on the type and context of\n the assessment. These investigations may include data generated during the\n assessment process, or data previously generated and recorded that is\n pertinent to the outcomes.\n\n protocol: Reference to a specific published clinical protocol that was followed during\n this assessment, and/or that provides evidence in support of the diagnosis.\n\n summary: A text summary of the investigations and the diagnosis.\n\n finding: Specific findings or diagnoses that were considered likely or relevant to\n ongoing treatment.\n\n prognosisCodeableConcept: Estimate of likely outcome.\n\n prognosisReference: RiskAssessment expressing likely outcome.\n\n supportingInfo: Information supporting the clinical impression.\n\n note: Commentary about the impression, typically recorded after the impression\n itself was made, though supplemental notes by the original author could also\n appear.\n\n ' from spark_fhir_schemas.r4.simple_types.id import idSchema from spark_fhir_schemas.r4.complex_types.meta import MetaSchema from spark_fhir_schemas.r4.simple_types.uri import uriSchema from spark_fhir_schemas.r4.simple_types.code import codeSchema from spark_fhir_schemas.r4.complex_types.narrative import NarrativeSchema from spark_fhir_schemas.r4.complex_types.resourcelist import ResourceListSchema from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema from spark_fhir_schemas.r4.complex_types.identifier import IdentifierSchema from spark_fhir_schemas.r4.complex_types.codeableconcept import CodeableConceptSchema from spark_fhir_schemas.r4.complex_types.reference import ReferenceSchema from spark_fhir_schemas.r4.complex_types.period import PeriodSchema from spark_fhir_schemas.r4.simple_types.datetime import dateTimeSchema from spark_fhir_schemas.r4.complex_types.clinicalimpression_investigation import ClinicalImpression_InvestigationSchema from spark_fhir_schemas.r4.complex_types.clinicalimpression_finding import ClinicalImpression_FindingSchema from spark_fhir_schemas.r4.complex_types.annotation import AnnotationSchema if ((max_recursion_limit and (nesting_list.count('ClinicalImpression') >= max_recursion_limit)) or (max_nesting_depth and (nesting_depth >= max_nesting_depth))): return StructType([StructField('id', StringType(), True)]) my_nesting_list: List[str] = (nesting_list + ['ClinicalImpression']) schema = StructType([StructField('resourceType', StringType(), True), StructField('id', idSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('meta', MetaSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('implicitRules', uriSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('language', codeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('text', NarrativeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('contained', ArrayType(ResourceListSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('extension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('modifierExtension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('identifier', ArrayType(IdentifierSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('status', codeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('statusReason', CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('code', CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('description', StringType(), True), StructField('subject', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('encounter', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('effectiveDateTime', TimestampType(), True), StructField('effectivePeriod', PeriodSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('date', dateTimeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('assessor', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('previous', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('problem', ArrayType(ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('investigation', ArrayType(ClinicalImpression_InvestigationSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('protocol', ArrayType(uriSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('summary', StringType(), True), StructField('finding', ArrayType(ClinicalImpression_FindingSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('prognosisCodeableConcept', ArrayType(CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('prognosisReference', ArrayType(ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('supportingInfo', ArrayType(ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('note', ArrayType(AnnotationSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True)]) if (not include_extension): schema.fields = [(c if (c.name != 'extension') else StructField('extension', StringType(), True)) for c in schema.fields] if (not include_modifierExtension): schema.fields = [(c if (c.name != 'modifierExtension') else StructField('modifierExtension', StringType(), True)) for c in schema.fields] return schema
@staticmethod def get_schema(max_nesting_depth: Optional[int]=6, nesting_depth: int=0, nesting_list: List[str]=[], max_recursion_limit: Optional[int]=2, include_extension: Optional[bool]=False, extension_fields: Optional[List[str]]=['valueBoolean', 'valueCode', 'valueDate', 'valueDateTime', 'valueDecimal', 'valueId', 'valueInteger', 'valuePositiveInt', 'valueString', 'valueTime', 'valueUnsignedInt', 'valueUri', 'valueUrl'], extension_depth: int=0, max_extension_depth: Optional[int]=2, include_modifierExtension: Optional[bool]=False) -> Union[(StructType, DataType)]: '\n A record of a clinical assessment performed to determine what problem(s) may\n affect the patient and before planning the treatments or management strategies\n that are best to manage a patient\'s condition. Assessments are often 1:1 with\n a clinical consultation / encounter, but this varies greatly depending on the\n clinical workflow. This resource is called "ClinicalImpression" rather than\n "ClinicalAssessment" to avoid confusion with the recording of assessment tools\n such as Apgar score.\n\n\n resourceType: This is a ClinicalImpression resource\n\n id: The logical id of the resource, as used in the URL for the resource. Once\n assigned, this value never changes.\n\n meta: The metadata about the resource. This is content that is maintained by the\n infrastructure. Changes to the content might not always be associated with\n version changes to the resource.\n\n implicitRules: A reference to a set of rules that were followed when the resource was\n constructed, and which must be understood when processing the content. Often,\n this is a reference to an implementation guide that defines the special rules\n along with other profiles etc.\n\n language: The base language in which the resource is written.\n\n text: A human-readable narrative that contains a summary of the resource and can be\n used to represent the content of the resource to a human. The narrative need\n not encode all the structured data, but is required to contain sufficient\n detail to make it "clinically safe" for a human to just read the narrative.\n Resource definitions may define what content should be represented in the\n narrative to ensure clinical safety.\n\n contained: These resources do not have an independent existence apart from the resource\n that contains them - they cannot be identified independently, and nor can they\n have their own independent transaction scope.\n\n extension: May be used to represent additional information that is not part of the basic\n definition of the resource. To make the use of extensions safe and manageable,\n there is a strict set of governance applied to the definition and use of\n extensions. Though any implementer can define an extension, there is a set of\n requirements that SHALL be met as part of the definition of the extension.\n\n modifierExtension: May be used to represent additional information that is not part of the basic\n definition of the resource and that modifies the understanding of the element\n that contains it and/or the understanding of the containing element\'s\n descendants. Usually modifier elements provide negation or qualification. To\n make the use of extensions safe and manageable, there is a strict set of\n governance applied to the definition and use of extensions. Though any\n implementer is allowed to define an extension, there is a set of requirements\n that SHALL be met as part of the definition of the extension. Applications\n processing a resource are required to check for modifier extensions.\n\n Modifier extensions SHALL NOT change the meaning of any elements on Resource\n or DomainResource (including cannot change the meaning of modifierExtension\n itself).\n\n identifier: Business identifiers assigned to this clinical impression by the performer or\n other systems which remain constant as the resource is updated and propagates\n from server to server.\n\n status: Identifies the workflow status of the assessment.\n\n statusReason: Captures the reason for the current state of the ClinicalImpression.\n\n code: Categorizes the type of clinical assessment performed.\n\n description: A summary of the context and/or cause of the assessment - why / where it was\n performed, and what patient events/status prompted it.\n\n subject: The patient or group of individuals assessed as part of this record.\n\n encounter: The Encounter during which this ClinicalImpression was created or to which the\n creation of this record is tightly associated.\n\n effectiveDateTime: The point in time or period over which the subject was assessed.\n\n effectivePeriod: The point in time or period over which the subject was assessed.\n\n date: Indicates when the documentation of the assessment was complete.\n\n assessor: The clinician performing the assessment.\n\n previous: A reference to the last assessment that was conducted on this patient.\n Assessments are often/usually ongoing in nature; a care provider (practitioner\n or team) will make new assessments on an ongoing basis as new data arises or\n the patient\'s conditions changes.\n\n problem: A list of the relevant problems/conditions for a patient.\n\n investigation: One or more sets of investigations (signs, symptoms, etc.). The actual\n grouping of investigations varies greatly depending on the type and context of\n the assessment. These investigations may include data generated during the\n assessment process, or data previously generated and recorded that is\n pertinent to the outcomes.\n\n protocol: Reference to a specific published clinical protocol that was followed during\n this assessment, and/or that provides evidence in support of the diagnosis.\n\n summary: A text summary of the investigations and the diagnosis.\n\n finding: Specific findings or diagnoses that were considered likely or relevant to\n ongoing treatment.\n\n prognosisCodeableConcept: Estimate of likely outcome.\n\n prognosisReference: RiskAssessment expressing likely outcome.\n\n supportingInfo: Information supporting the clinical impression.\n\n note: Commentary about the impression, typically recorded after the impression\n itself was made, though supplemental notes by the original author could also\n appear.\n\n ' from spark_fhir_schemas.r4.simple_types.id import idSchema from spark_fhir_schemas.r4.complex_types.meta import MetaSchema from spark_fhir_schemas.r4.simple_types.uri import uriSchema from spark_fhir_schemas.r4.simple_types.code import codeSchema from spark_fhir_schemas.r4.complex_types.narrative import NarrativeSchema from spark_fhir_schemas.r4.complex_types.resourcelist import ResourceListSchema from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema from spark_fhir_schemas.r4.complex_types.identifier import IdentifierSchema from spark_fhir_schemas.r4.complex_types.codeableconcept import CodeableConceptSchema from spark_fhir_schemas.r4.complex_types.reference import ReferenceSchema from spark_fhir_schemas.r4.complex_types.period import PeriodSchema from spark_fhir_schemas.r4.simple_types.datetime import dateTimeSchema from spark_fhir_schemas.r4.complex_types.clinicalimpression_investigation import ClinicalImpression_InvestigationSchema from spark_fhir_schemas.r4.complex_types.clinicalimpression_finding import ClinicalImpression_FindingSchema from spark_fhir_schemas.r4.complex_types.annotation import AnnotationSchema if ((max_recursion_limit and (nesting_list.count('ClinicalImpression') >= max_recursion_limit)) or (max_nesting_depth and (nesting_depth >= max_nesting_depth))): return StructType([StructField('id', StringType(), True)]) my_nesting_list: List[str] = (nesting_list + ['ClinicalImpression']) schema = StructType([StructField('resourceType', StringType(), True), StructField('id', idSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('meta', MetaSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('implicitRules', uriSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('language', codeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('text', NarrativeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('contained', ArrayType(ResourceListSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('extension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('modifierExtension', ArrayType(ExtensionSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('identifier', ArrayType(IdentifierSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('status', codeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('statusReason', CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('code', CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('description', StringType(), True), StructField('subject', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('encounter', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('effectiveDateTime', TimestampType(), True), StructField('effectivePeriod', PeriodSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('date', dateTimeSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('assessor', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('previous', ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=(extension_depth + 1), max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension), True), StructField('problem', ArrayType(ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('investigation', ArrayType(ClinicalImpression_InvestigationSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('protocol', ArrayType(uriSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('summary', StringType(), True), StructField('finding', ArrayType(ClinicalImpression_FindingSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('prognosisCodeableConcept', ArrayType(CodeableConceptSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('prognosisReference', ArrayType(ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('supportingInfo', ArrayType(ReferenceSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True), StructField('note', ArrayType(AnnotationSchema.get_schema(max_nesting_depth=max_nesting_depth, nesting_depth=(nesting_depth + 1), nesting_list=my_nesting_list, max_recursion_limit=max_recursion_limit, include_extension=include_extension, extension_fields=extension_fields, extension_depth=extension_depth, max_extension_depth=max_extension_depth, include_modifierExtension=include_modifierExtension)), True)]) if (not include_extension): schema.fields = [(c if (c.name != 'extension') else StructField('extension', StringType(), True)) for c in schema.fields] if (not include_modifierExtension): schema.fields = [(c if (c.name != 'modifierExtension') else StructField('modifierExtension', StringType(), True)) for c in schema.fields] return schema<|docstring|>A record of a clinical assessment performed to determine what problem(s) may affect the patient and before planning the treatments or management strategies that are best to manage a patient's condition. Assessments are often 1:1 with a clinical consultation / encounter, but this varies greatly depending on the clinical workflow. This resource is called "ClinicalImpression" rather than "ClinicalAssessment" to avoid confusion with the recording of assessment tools such as Apgar score. resourceType: This is a ClinicalImpression resource id: The logical id of the resource, as used in the URL for the resource. Once assigned, this value never changes. meta: The metadata about the resource. This is content that is maintained by the infrastructure. Changes to the content might not always be associated with version changes to the resource. implicitRules: A reference to a set of rules that were followed when the resource was constructed, and which must be understood when processing the content. Often, this is a reference to an implementation guide that defines the special rules along with other profiles etc. language: The base language in which the resource is written. text: A human-readable narrative that contains a summary of the resource and can be used to represent the content of the resource to a human. The narrative need not encode all the structured data, but is required to contain sufficient detail to make it "clinically safe" for a human to just read the narrative. Resource definitions may define what content should be represented in the narrative to ensure clinical safety. contained: These resources do not have an independent existence apart from the resource that contains them - they cannot be identified independently, and nor can they have their own independent transaction scope. extension: May be used to represent additional information that is not part of the basic definition of the resource. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer can define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. modifierExtension: May be used to represent additional information that is not part of the basic definition of the resource and that modifies the understanding of the element that contains it and/or the understanding of the containing element's descendants. Usually modifier elements provide negation or qualification. To make the use of extensions safe and manageable, there is a strict set of governance applied to the definition and use of extensions. Though any implementer is allowed to define an extension, there is a set of requirements that SHALL be met as part of the definition of the extension. Applications processing a resource are required to check for modifier extensions. Modifier extensions SHALL NOT change the meaning of any elements on Resource or DomainResource (including cannot change the meaning of modifierExtension itself). identifier: Business identifiers assigned to this clinical impression by the performer or other systems which remain constant as the resource is updated and propagates from server to server. status: Identifies the workflow status of the assessment. statusReason: Captures the reason for the current state of the ClinicalImpression. code: Categorizes the type of clinical assessment performed. description: A summary of the context and/or cause of the assessment - why / where it was performed, and what patient events/status prompted it. subject: The patient or group of individuals assessed as part of this record. encounter: The Encounter during which this ClinicalImpression was created or to which the creation of this record is tightly associated. effectiveDateTime: The point in time or period over which the subject was assessed. effectivePeriod: The point in time or period over which the subject was assessed. date: Indicates when the documentation of the assessment was complete. assessor: The clinician performing the assessment. previous: A reference to the last assessment that was conducted on this patient. Assessments are often/usually ongoing in nature; a care provider (practitioner or team) will make new assessments on an ongoing basis as new data arises or the patient's conditions changes. problem: A list of the relevant problems/conditions for a patient. investigation: One or more sets of investigations (signs, symptoms, etc.). The actual grouping of investigations varies greatly depending on the type and context of the assessment. These investigations may include data generated during the assessment process, or data previously generated and recorded that is pertinent to the outcomes. protocol: Reference to a specific published clinical protocol that was followed during this assessment, and/or that provides evidence in support of the diagnosis. summary: A text summary of the investigations and the diagnosis. finding: Specific findings or diagnoses that were considered likely or relevant to ongoing treatment. prognosisCodeableConcept: Estimate of likely outcome. prognosisReference: RiskAssessment expressing likely outcome. supportingInfo: Information supporting the clinical impression. note: Commentary about the impression, typically recorded after the impression itself was made, though supplemental notes by the original author could also appear.<|endoftext|>
7826a2094140fa6117b4e6e6a8433d96bec0d4871037689ef3e713acf848f082
def connect(self, host, username, port=2200, key_filename=None): '\n Creates a connection to the remote server.\n\n :param host: Remote host\n :type host: String\n :param username: User name to connect to the remote host\n :type username: String\n :param port: Remote host port\n :type port: Number\n ' if (not host): raise ValueError('Host is missing') if (not username): raise ValueError('Username is missing') if (not port): raise ValueError('Missing port') self.host = host self.username = username self.port = port if (self.client is None): self.client = paramiko.SSHClient() self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) pkey = None if (key_filename is not None): pkey = _load_key(key_filename) self.client.connect(hostname=host, port=port, username=username, pkey=pkey) self.transport = self.client.get_transport() return (self.transport is not None)
Creates a connection to the remote server. :param host: Remote host :type host: String :param username: User name to connect to the remote host :type username: String :param port: Remote host port :type port: Number
src/azure-cli/azure/cli/command_modules/acs/acs_client.py
connect
ahelal/azure-cli
3,287
python
def connect(self, host, username, port=2200, key_filename=None): '\n Creates a connection to the remote server.\n\n :param host: Remote host\n :type host: String\n :param username: User name to connect to the remote host\n :type username: String\n :param port: Remote host port\n :type port: Number\n ' if (not host): raise ValueError('Host is missing') if (not username): raise ValueError('Username is missing') if (not port): raise ValueError('Missing port') self.host = host self.username = username self.port = port if (self.client is None): self.client = paramiko.SSHClient() self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) pkey = None if (key_filename is not None): pkey = _load_key(key_filename) self.client.connect(hostname=host, port=port, username=username, pkey=pkey) self.transport = self.client.get_transport() return (self.transport is not None)
def connect(self, host, username, port=2200, key_filename=None): '\n Creates a connection to the remote server.\n\n :param host: Remote host\n :type host: String\n :param username: User name to connect to the remote host\n :type username: String\n :param port: Remote host port\n :type port: Number\n ' if (not host): raise ValueError('Host is missing') if (not username): raise ValueError('Username is missing') if (not port): raise ValueError('Missing port') self.host = host self.username = username self.port = port if (self.client is None): self.client = paramiko.SSHClient() self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) pkey = None if (key_filename is not None): pkey = _load_key(key_filename) self.client.connect(hostname=host, port=port, username=username, pkey=pkey) self.transport = self.client.get_transport() return (self.transport is not None)<|docstring|>Creates a connection to the remote server. :param host: Remote host :type host: String :param username: User name to connect to the remote host :type username: String :param port: Remote host port :type port: Number<|endoftext|>
5b4da1d747d6575d9c3696abd3c43d87d8e430f6ebe6fa367f84947bf048ddbc
def run(self, command, background=False): '\n Runs a command on the remote host\n\n :param command: Command to run on the remote host\n :type command: String\n :param background: True to run it in a separate thread,\n False should be run in the foreground\n :type command: Boolean\n ' if background: t = threading.Thread(target=ACSClient._run_cmd, args=(self, command)) t.daemon = True t.start() return None return self._run_cmd(command)
Runs a command on the remote host :param command: Command to run on the remote host :type command: String :param background: True to run it in a separate thread, False should be run in the foreground :type command: Boolean
src/azure-cli/azure/cli/command_modules/acs/acs_client.py
run
ahelal/azure-cli
3,287
python
def run(self, command, background=False): '\n Runs a command on the remote host\n\n :param command: Command to run on the remote host\n :type command: String\n :param background: True to run it in a separate thread,\n False should be run in the foreground\n :type command: Boolean\n ' if background: t = threading.Thread(target=ACSClient._run_cmd, args=(self, command)) t.daemon = True t.start() return None return self._run_cmd(command)
def run(self, command, background=False): '\n Runs a command on the remote host\n\n :param command: Command to run on the remote host\n :type command: String\n :param background: True to run it in a separate thread,\n False should be run in the foreground\n :type command: Boolean\n ' if background: t = threading.Thread(target=ACSClient._run_cmd, args=(self, command)) t.daemon = True t.start() return None return self._run_cmd(command)<|docstring|>Runs a command on the remote host :param command: Command to run on the remote host :type command: String :param background: True to run it in a separate thread, False should be run in the foreground :type command: Boolean<|endoftext|>
2971f81c2d4b1824f573427c8f1228fd1775821aef512253dee3bf18d0113444
def _run_cmd(self, command): '\n Runs a command on the remote host\n\n :param command: Command to run on the remote host\n :type command: String\n ' if (not command): raise ValueError('Command is missing') (_, stdout, stderr) = self.client.exec_command(command) return (stdout, stderr)
Runs a command on the remote host :param command: Command to run on the remote host :type command: String
src/azure-cli/azure/cli/command_modules/acs/acs_client.py
_run_cmd
ahelal/azure-cli
3,287
python
def _run_cmd(self, command): '\n Runs a command on the remote host\n\n :param command: Command to run on the remote host\n :type command: String\n ' if (not command): raise ValueError('Command is missing') (_, stdout, stderr) = self.client.exec_command(command) return (stdout, stderr)
def _run_cmd(self, command): '\n Runs a command on the remote host\n\n :param command: Command to run on the remote host\n :type command: String\n ' if (not command): raise ValueError('Command is missing') (_, stdout, stderr) = self.client.exec_command(command) return (stdout, stderr)<|docstring|>Runs a command on the remote host :param command: Command to run on the remote host :type command: String<|endoftext|>
4cd82690da4ae11c6957322101627288bc420ed274bf6ad549c2b375fb72414e
def file_exists(self, file_path): '\n Checks if file on the remote exists\n\n :param file_path: Full path to the file on remote machine\n :type file_path: String\n ' if (not file_path): raise ValueError('Missing file path') if (self.transport is None): raise TypeError('Transport cannot be none') sftp = self.transport.open_sftp_client() result = None try: sftp.stat(file_path) result = True except IOError: result = False finally: sftp.close() return result
Checks if file on the remote exists :param file_path: Full path to the file on remote machine :type file_path: String
src/azure-cli/azure/cli/command_modules/acs/acs_client.py
file_exists
ahelal/azure-cli
3,287
python
def file_exists(self, file_path): '\n Checks if file on the remote exists\n\n :param file_path: Full path to the file on remote machine\n :type file_path: String\n ' if (not file_path): raise ValueError('Missing file path') if (self.transport is None): raise TypeError('Transport cannot be none') sftp = self.transport.open_sftp_client() result = None try: sftp.stat(file_path) result = True except IOError: result = False finally: sftp.close() return result
def file_exists(self, file_path): '\n Checks if file on the remote exists\n\n :param file_path: Full path to the file on remote machine\n :type file_path: String\n ' if (not file_path): raise ValueError('Missing file path') if (self.transport is None): raise TypeError('Transport cannot be none') sftp = self.transport.open_sftp_client() result = None try: sftp.stat(file_path) result = True except IOError: result = False finally: sftp.close() return result<|docstring|>Checks if file on the remote exists :param file_path: Full path to the file on remote machine :type file_path: String<|endoftext|>
8a5e093f46abbe1ef8db03f27903fee193badb0b8cfc50d3b12d6abbae92851a
def create_tunnel(self, remote_host, remote_port, local_port=0): '\n Creates a tunnel to the remote host\n\n :param remote_host: Remote host to tunnel to\n :type remote_host: String\n :param remote_port: Remote port to tunnel to\n :type remote_port: Number\n :param local_port: Local port. If set to 0, random local port is selected\n :type local_port: Number\n ' if (local_port == 0): local_port = self.get_available_local_port() with SSHTunnelForwarder((self.host, self.port), ssh_username=self.username, remote_bind_address=(remote_host, remote_port), local_bind_address=('0.0.0.0', local_port)): try: while True: sleep(1) except KeyboardInterrupt: pass
Creates a tunnel to the remote host :param remote_host: Remote host to tunnel to :type remote_host: String :param remote_port: Remote port to tunnel to :type remote_port: Number :param local_port: Local port. If set to 0, random local port is selected :type local_port: Number
src/azure-cli/azure/cli/command_modules/acs/acs_client.py
create_tunnel
ahelal/azure-cli
3,287
python
def create_tunnel(self, remote_host, remote_port, local_port=0): '\n Creates a tunnel to the remote host\n\n :param remote_host: Remote host to tunnel to\n :type remote_host: String\n :param remote_port: Remote port to tunnel to\n :type remote_port: Number\n :param local_port: Local port. If set to 0, random local port is selected\n :type local_port: Number\n ' if (local_port == 0): local_port = self.get_available_local_port() with SSHTunnelForwarder((self.host, self.port), ssh_username=self.username, remote_bind_address=(remote_host, remote_port), local_bind_address=('0.0.0.0', local_port)): try: while True: sleep(1) except KeyboardInterrupt: pass
def create_tunnel(self, remote_host, remote_port, local_port=0): '\n Creates a tunnel to the remote host\n\n :param remote_host: Remote host to tunnel to\n :type remote_host: String\n :param remote_port: Remote port to tunnel to\n :type remote_port: Number\n :param local_port: Local port. If set to 0, random local port is selected\n :type local_port: Number\n ' if (local_port == 0): local_port = self.get_available_local_port() with SSHTunnelForwarder((self.host, self.port), ssh_username=self.username, remote_bind_address=(remote_host, remote_port), local_bind_address=('0.0.0.0', local_port)): try: while True: sleep(1) except KeyboardInterrupt: pass<|docstring|>Creates a tunnel to the remote host :param remote_host: Remote host to tunnel to :type remote_host: String :param remote_port: Remote port to tunnel to :type remote_port: Number :param local_port: Local port. If set to 0, random local port is selected :type local_port: Number<|endoftext|>
f6ece3acafcede74360b1594ccd02052b65d76fb390bffa959cfdd260f082a21
@staticmethod def get_available_local_port(): '\n Gets a random, available local port\n ' s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind(('', 0)) s.listen(1) port = s.getsockname()[1] s.close() return port
Gets a random, available local port
src/azure-cli/azure/cli/command_modules/acs/acs_client.py
get_available_local_port
ahelal/azure-cli
3,287
python
@staticmethod def get_available_local_port(): '\n \n ' s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((, 0)) s.listen(1) port = s.getsockname()[1] s.close() return port
@staticmethod def get_available_local_port(): '\n \n ' s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.bind((, 0)) s.listen(1) port = s.getsockname()[1] s.close() return port<|docstring|>Gets a random, available local port<|endoftext|>
b15977e370274d962099e9f18312fe9a34d0ddbbc50f8d951edb40aa88315813
def __init__(self, blob, chunk_size=None, **download_kwargs): 'docstring note that download_kwargs also used for reload()' for kwarg in download_kwargs: if (kwarg not in VALID_DOWNLOAD_KWARGS): raise ValueError('BlobReader does not support keyword argument {}.'.format(kwarg)) self._blob = blob self._pos = 0 self._buffer = io.BytesIO() self._chunk_size = (chunk_size or blob.chunk_size or DEFAULT_CHUNK_SIZE) self._download_kwargs = download_kwargs
docstring note that download_kwargs also used for reload()
google/cloud/storage/fileio.py
__init__
smita2006/pythonstorageRepo
2
python
def __init__(self, blob, chunk_size=None, **download_kwargs): for kwarg in download_kwargs: if (kwarg not in VALID_DOWNLOAD_KWARGS): raise ValueError('BlobReader does not support keyword argument {}.'.format(kwarg)) self._blob = blob self._pos = 0 self._buffer = io.BytesIO() self._chunk_size = (chunk_size or blob.chunk_size or DEFAULT_CHUNK_SIZE) self._download_kwargs = download_kwargs
def __init__(self, blob, chunk_size=None, **download_kwargs): for kwarg in download_kwargs: if (kwarg not in VALID_DOWNLOAD_KWARGS): raise ValueError('BlobReader does not support keyword argument {}.'.format(kwarg)) self._blob = blob self._pos = 0 self._buffer = io.BytesIO() self._chunk_size = (chunk_size or blob.chunk_size or DEFAULT_CHUNK_SIZE) self._download_kwargs = download_kwargs<|docstring|>docstring note that download_kwargs also used for reload()<|endoftext|>
0a72eeea43503b911be2fb7f83a217ede9de405cfbaace44d87cf4c344e50295
def seek(self, pos, whence=0): 'Seek within the blob.\n\n This implementation of seek() uses knowledge of the blob size to\n validate that the reported position does not exceed the blob last byte.\n If the blob size is not already known it will call blob.reload().\n ' self._checkClosed() if (self._blob.size is None): self._blob.reload(**self._download_kwargs) initial_pos = self._pos if (whence == 0): self._pos = pos elif (whence == 1): self._pos += pos elif (whence == 2): self._pos = (self._blob.size + pos) if (whence not in {0, 1, 2}): raise ValueError('invalid whence value') if (self._pos > self._blob.size): self._pos = self._blob.size difference = (self._pos - initial_pos) new_buffer_pos = self._buffer.seek(difference, 1) if (new_buffer_pos != difference): self._buffer.seek(0) self._buffer.truncate(0) return self._pos
Seek within the blob. This implementation of seek() uses knowledge of the blob size to validate that the reported position does not exceed the blob last byte. If the blob size is not already known it will call blob.reload().
google/cloud/storage/fileio.py
seek
smita2006/pythonstorageRepo
2
python
def seek(self, pos, whence=0): 'Seek within the blob.\n\n This implementation of seek() uses knowledge of the blob size to\n validate that the reported position does not exceed the blob last byte.\n If the blob size is not already known it will call blob.reload().\n ' self._checkClosed() if (self._blob.size is None): self._blob.reload(**self._download_kwargs) initial_pos = self._pos if (whence == 0): self._pos = pos elif (whence == 1): self._pos += pos elif (whence == 2): self._pos = (self._blob.size + pos) if (whence not in {0, 1, 2}): raise ValueError('invalid whence value') if (self._pos > self._blob.size): self._pos = self._blob.size difference = (self._pos - initial_pos) new_buffer_pos = self._buffer.seek(difference, 1) if (new_buffer_pos != difference): self._buffer.seek(0) self._buffer.truncate(0) return self._pos
def seek(self, pos, whence=0): 'Seek within the blob.\n\n This implementation of seek() uses knowledge of the blob size to\n validate that the reported position does not exceed the blob last byte.\n If the blob size is not already known it will call blob.reload().\n ' self._checkClosed() if (self._blob.size is None): self._blob.reload(**self._download_kwargs) initial_pos = self._pos if (whence == 0): self._pos = pos elif (whence == 1): self._pos += pos elif (whence == 2): self._pos = (self._blob.size + pos) if (whence not in {0, 1, 2}): raise ValueError('invalid whence value') if (self._pos > self._blob.size): self._pos = self._blob.size difference = (self._pos - initial_pos) new_buffer_pos = self._buffer.seek(difference, 1) if (new_buffer_pos != difference): self._buffer.seek(0) self._buffer.truncate(0) return self._pos<|docstring|>Seek within the blob. This implementation of seek() uses knowledge of the blob size to validate that the reported position does not exceed the blob last byte. If the blob size is not already known it will call blob.reload().<|endoftext|>
c50699e882c9212658c1321fd4ce49504a871c5cf07d5fd612e0a3850d5bcd5b
@property def _chunk_size(self): "Get the blob's default chunk size.\n\n :rtype: int or ``NoneType``\n :returns: The current blob's chunk size, if it is set.\n " return self.__chunk_size
Get the blob's default chunk size. :rtype: int or ``NoneType`` :returns: The current blob's chunk size, if it is set.
google/cloud/storage/fileio.py
_chunk_size
smita2006/pythonstorageRepo
2
python
@property def _chunk_size(self): "Get the blob's default chunk size.\n\n :rtype: int or ``NoneType``\n :returns: The current blob's chunk size, if it is set.\n " return self.__chunk_size
@property def _chunk_size(self): "Get the blob's default chunk size.\n\n :rtype: int or ``NoneType``\n :returns: The current blob's chunk size, if it is set.\n " return self.__chunk_size<|docstring|>Get the blob's default chunk size. :rtype: int or ``NoneType`` :returns: The current blob's chunk size, if it is set.<|endoftext|>
07be2141d02948ee9ea086db47a3e3f9a819fc1293e3625029991b461c1567c4
@_chunk_size.setter def _chunk_size(self, value): "Set the blob's default chunk size.\n\n :type value: int\n :param value: (Optional) The current blob's chunk size, if it is set.\n\n :raises: :class:`ValueError` if ``value`` is not ``None`` and is not a\n multiple of 256 KiB.\n " if ((value is not None) and (value > 0) and ((value % CHUNK_SIZE_MULTIPLE) != 0)): raise ValueError(('Chunk size must be a multiple of %d.' % CHUNK_SIZE_MULTIPLE)) self.__chunk_size = value
Set the blob's default chunk size. :type value: int :param value: (Optional) The current blob's chunk size, if it is set. :raises: :class:`ValueError` if ``value`` is not ``None`` and is not a multiple of 256 KiB.
google/cloud/storage/fileio.py
_chunk_size
smita2006/pythonstorageRepo
2
python
@_chunk_size.setter def _chunk_size(self, value): "Set the blob's default chunk size.\n\n :type value: int\n :param value: (Optional) The current blob's chunk size, if it is set.\n\n :raises: :class:`ValueError` if ``value`` is not ``None`` and is not a\n multiple of 256 KiB.\n " if ((value is not None) and (value > 0) and ((value % CHUNK_SIZE_MULTIPLE) != 0)): raise ValueError(('Chunk size must be a multiple of %d.' % CHUNK_SIZE_MULTIPLE)) self.__chunk_size = value
@_chunk_size.setter def _chunk_size(self, value): "Set the blob's default chunk size.\n\n :type value: int\n :param value: (Optional) The current blob's chunk size, if it is set.\n\n :raises: :class:`ValueError` if ``value`` is not ``None`` and is not a\n multiple of 256 KiB.\n " if ((value is not None) and (value > 0) and ((value % CHUNK_SIZE_MULTIPLE) != 0)): raise ValueError(('Chunk size must be a multiple of %d.' % CHUNK_SIZE_MULTIPLE)) self.__chunk_size = value<|docstring|>Set the blob's default chunk size. :type value: int :param value: (Optional) The current blob's chunk size, if it is set. :raises: :class:`ValueError` if ``value`` is not ``None`` and is not a multiple of 256 KiB.<|endoftext|>
981746303d0a92a5e52e03e298658522ae607eb73f6f1d58964af4af90c3b36b
def _upload_chunks_from_buffer(self, num_chunks): 'Upload a specified number of chunks.' if (not self._upload_and_transport): self._initiate_upload() (upload, transport) = self._upload_and_transport for _ in range(num_chunks): upload.transmit_next_chunk(transport) self._buffer.flush()
Upload a specified number of chunks.
google/cloud/storage/fileio.py
_upload_chunks_from_buffer
smita2006/pythonstorageRepo
2
python
def _upload_chunks_from_buffer(self, num_chunks): if (not self._upload_and_transport): self._initiate_upload() (upload, transport) = self._upload_and_transport for _ in range(num_chunks): upload.transmit_next_chunk(transport) self._buffer.flush()
def _upload_chunks_from_buffer(self, num_chunks): if (not self._upload_and_transport): self._initiate_upload() (upload, transport) = self._upload_and_transport for _ in range(num_chunks): upload.transmit_next_chunk(transport) self._buffer.flush()<|docstring|>Upload a specified number of chunks.<|endoftext|>
c2af0b72642f6a0addaac0244d62561a04561e6892bf56529760cb3e29fd008e
def write(self, b): 'Append to the end of the buffer without changing the position.' self._checkClosed() bookmark = self._buffer.tell() self._buffer.seek(0, io.SEEK_END) pos = self._buffer.write(b) self._buffer.seek(bookmark) return (self._cursor + pos)
Append to the end of the buffer without changing the position.
google/cloud/storage/fileio.py
write
smita2006/pythonstorageRepo
2
python
def write(self, b): self._checkClosed() bookmark = self._buffer.tell() self._buffer.seek(0, io.SEEK_END) pos = self._buffer.write(b) self._buffer.seek(bookmark) return (self._cursor + pos)
def write(self, b): self._checkClosed() bookmark = self._buffer.tell() self._buffer.seek(0, io.SEEK_END) pos = self._buffer.write(b) self._buffer.seek(bookmark) return (self._cursor + pos)<|docstring|>Append to the end of the buffer without changing the position.<|endoftext|>
07d59ddcd168c89084949d796fd9a30d77ac01c98948404aec0ed0b62f8d4049
def read(self, size=(- 1)): 'Read and move the cursor.' self._checkClosed() data = self._buffer.read(size) self._cursor += len(data) return data
Read and move the cursor.
google/cloud/storage/fileio.py
read
smita2006/pythonstorageRepo
2
python
def read(self, size=(- 1)): self._checkClosed() data = self._buffer.read(size) self._cursor += len(data) return data
def read(self, size=(- 1)): self._checkClosed() data = self._buffer.read(size) self._cursor += len(data) return data<|docstring|>Read and move the cursor.<|endoftext|>
4c2917e8dff633a53da11a40c5f25dde9686746dacfc33b2411eb7f66da1aefd
def flush(self): 'Delete already-read data (all data to the left of the position).' self._checkClosed() leftover = self._buffer.read() self._buffer.seek(0) self._buffer.truncate(0) self._buffer.write(leftover) self._buffer.seek(0)
Delete already-read data (all data to the left of the position).
google/cloud/storage/fileio.py
flush
smita2006/pythonstorageRepo
2
python
def flush(self): self._checkClosed() leftover = self._buffer.read() self._buffer.seek(0) self._buffer.truncate(0) self._buffer.write(leftover) self._buffer.seek(0)
def flush(self): self._checkClosed() leftover = self._buffer.read() self._buffer.seek(0) self._buffer.truncate(0) self._buffer.write(leftover) self._buffer.seek(0)<|docstring|>Delete already-read data (all data to the left of the position).<|endoftext|>
d58274ddbbe1367ed3fc050b39916f8e53d65dc2df686cbb3aca8d47e3d42ab2
def tell(self): 'Report how many bytes have been read from the buffer in total.' return self._cursor
Report how many bytes have been read from the buffer in total.
google/cloud/storage/fileio.py
tell
smita2006/pythonstorageRepo
2
python
def tell(self): return self._cursor
def tell(self): return self._cursor<|docstring|>Report how many bytes have been read from the buffer in total.<|endoftext|>
09ac7b9585d04b165a5d9d7cbb86cbe1b1d9baf6abfc694299cb6e37a115bdaf
def seek(self, pos): 'Seek to a position (backwards only) within the internal buffer.\n\n This implementation of seek() verifies that the seek destination is\n contained in _buffer. It will raise ValueError if the destination byte\n has already been purged from the buffer.\n\n The "whence" argument is not supported in this implementation.\n ' self._checkClosed() buffer_initial_pos = self._buffer.tell() difference = (pos - self._cursor) buffer_seek_result = self._buffer.seek(difference, io.SEEK_CUR) if ((not ((buffer_seek_result - buffer_initial_pos) == difference)) or (pos > self._cursor)): self._buffer.seek(buffer_initial_pos) raise ValueError('Cannot seek() to that value.') self._cursor = pos return self._cursor
Seek to a position (backwards only) within the internal buffer. This implementation of seek() verifies that the seek destination is contained in _buffer. It will raise ValueError if the destination byte has already been purged from the buffer. The "whence" argument is not supported in this implementation.
google/cloud/storage/fileio.py
seek
smita2006/pythonstorageRepo
2
python
def seek(self, pos): 'Seek to a position (backwards only) within the internal buffer.\n\n This implementation of seek() verifies that the seek destination is\n contained in _buffer. It will raise ValueError if the destination byte\n has already been purged from the buffer.\n\n The "whence" argument is not supported in this implementation.\n ' self._checkClosed() buffer_initial_pos = self._buffer.tell() difference = (pos - self._cursor) buffer_seek_result = self._buffer.seek(difference, io.SEEK_CUR) if ((not ((buffer_seek_result - buffer_initial_pos) == difference)) or (pos > self._cursor)): self._buffer.seek(buffer_initial_pos) raise ValueError('Cannot seek() to that value.') self._cursor = pos return self._cursor
def seek(self, pos): 'Seek to a position (backwards only) within the internal buffer.\n\n This implementation of seek() verifies that the seek destination is\n contained in _buffer. It will raise ValueError if the destination byte\n has already been purged from the buffer.\n\n The "whence" argument is not supported in this implementation.\n ' self._checkClosed() buffer_initial_pos = self._buffer.tell() difference = (pos - self._cursor) buffer_seek_result = self._buffer.seek(difference, io.SEEK_CUR) if ((not ((buffer_seek_result - buffer_initial_pos) == difference)) or (pos > self._cursor)): self._buffer.seek(buffer_initial_pos) raise ValueError('Cannot seek() to that value.') self._cursor = pos return self._cursor<|docstring|>Seek to a position (backwards only) within the internal buffer. This implementation of seek() verifies that the seek destination is contained in _buffer. It will raise ValueError if the destination byte has already been purged from the buffer. The "whence" argument is not supported in this implementation.<|endoftext|>
18eaa1b35ae49f1675b2e563f01790e9edfb57d0b57902ae353ba12e0e0c8cce
def __len__(self): 'Determine the size of the buffer by seeking to the end.' bookmark = self._buffer.tell() length = self._buffer.seek(0, io.SEEK_END) self._buffer.seek(bookmark) return length
Determine the size of the buffer by seeking to the end.
google/cloud/storage/fileio.py
__len__
smita2006/pythonstorageRepo
2
python
def __len__(self): bookmark = self._buffer.tell() length = self._buffer.seek(0, io.SEEK_END) self._buffer.seek(bookmark) return length
def __len__(self): bookmark = self._buffer.tell() length = self._buffer.seek(0, io.SEEK_END) self._buffer.seek(bookmark) return length<|docstring|>Determine the size of the buffer by seeking to the end.<|endoftext|>
a7c7013955949340a2603f05b2cc599577cfe7a4ccf10e9dd26935891f478a9b
def gs_quad_version_of_rz_array(r, z): 'Returns: (X,Q)' from numpy import size, zeros, ravel, shape, meshgrid, array n = size(r) X = zeros((n, 2)) X[(:, 0)] = ravel(r) X[(:, 1)] = ravel(z) n1 = shape(r)[0] n2 = shape(r)[1] (i1, i2) = meshgrid(range((n1 - 1)), range((n2 - 1)), indexing='ij') i1 = ravel(i1) i2 = ravel(i2) idx = (lambda i1, i2: ((n2 * i1) + i2)) Q = array([idx(i1, i2), idx(i1, (i2 + 1)), idx((i1 + 1), (i2 + 1)), idx((i1 + 1), i2)]).T return (X, Q)
Returns: (X,Q)
gs/gs_quad_version_of_rz_array.py
gs_quad_version_of_rz_array
achronist/python-grad-shafranov
0
python
def gs_quad_version_of_rz_array(r, z): from numpy import size, zeros, ravel, shape, meshgrid, array n = size(r) X = zeros((n, 2)) X[(:, 0)] = ravel(r) X[(:, 1)] = ravel(z) n1 = shape(r)[0] n2 = shape(r)[1] (i1, i2) = meshgrid(range((n1 - 1)), range((n2 - 1)), indexing='ij') i1 = ravel(i1) i2 = ravel(i2) idx = (lambda i1, i2: ((n2 * i1) + i2)) Q = array([idx(i1, i2), idx(i1, (i2 + 1)), idx((i1 + 1), (i2 + 1)), idx((i1 + 1), i2)]).T return (X, Q)
def gs_quad_version_of_rz_array(r, z): from numpy import size, zeros, ravel, shape, meshgrid, array n = size(r) X = zeros((n, 2)) X[(:, 0)] = ravel(r) X[(:, 1)] = ravel(z) n1 = shape(r)[0] n2 = shape(r)[1] (i1, i2) = meshgrid(range((n1 - 1)), range((n2 - 1)), indexing='ij') i1 = ravel(i1) i2 = ravel(i2) idx = (lambda i1, i2: ((n2 * i1) + i2)) Q = array([idx(i1, i2), idx(i1, (i2 + 1)), idx((i1 + 1), (i2 + 1)), idx((i1 + 1), i2)]).T return (X, Q)<|docstring|>Returns: (X,Q)<|endoftext|>
a266b9578f152d2f91d145d5d4187804d8c0a9ca54e22a910194e2524cf7d536
def test_init(token_data, expires_at): '\n Creating a MockRenewer with partial data (expires_at, access_token) results in\n errors.\n Either complete data or no partial data works.\n ' authorizer = MockRenewer(token_data, access_token=ACCESS_TOKEN, expires_at=expires_at) assert (authorizer.access_token == ACCESS_TOKEN) assert (authorizer.access_token != token_data['access_token']) authorizer = MockRenewer(token_data) assert (authorizer.access_token == token_data['access_token']) assert (authorizer.expires_at == token_data['expires_at_seconds']) with pytest.raises(exc.GlobusSDKUsageError): MockRenewer(token_data, access_token=ACCESS_TOKEN) with pytest.raises(exc.GlobusSDKUsageError): MockRenewer(token_data, expires_at=expires_at)
Creating a MockRenewer with partial data (expires_at, access_token) results in errors. Either complete data or no partial data works.
tests/unit/authorizers/test_renewing_authorizer.py
test_init
mh-globus/globus-sdk-python
47
python
def test_init(token_data, expires_at): '\n Creating a MockRenewer with partial data (expires_at, access_token) results in\n errors.\n Either complete data or no partial data works.\n ' authorizer = MockRenewer(token_data, access_token=ACCESS_TOKEN, expires_at=expires_at) assert (authorizer.access_token == ACCESS_TOKEN) assert (authorizer.access_token != token_data['access_token']) authorizer = MockRenewer(token_data) assert (authorizer.access_token == token_data['access_token']) assert (authorizer.expires_at == token_data['expires_at_seconds']) with pytest.raises(exc.GlobusSDKUsageError): MockRenewer(token_data, access_token=ACCESS_TOKEN) with pytest.raises(exc.GlobusSDKUsageError): MockRenewer(token_data, expires_at=expires_at)
def test_init(token_data, expires_at): '\n Creating a MockRenewer with partial data (expires_at, access_token) results in\n errors.\n Either complete data or no partial data works.\n ' authorizer = MockRenewer(token_data, access_token=ACCESS_TOKEN, expires_at=expires_at) assert (authorizer.access_token == ACCESS_TOKEN) assert (authorizer.access_token != token_data['access_token']) authorizer = MockRenewer(token_data) assert (authorizer.access_token == token_data['access_token']) assert (authorizer.expires_at == token_data['expires_at_seconds']) with pytest.raises(exc.GlobusSDKUsageError): MockRenewer(token_data, access_token=ACCESS_TOKEN) with pytest.raises(exc.GlobusSDKUsageError): MockRenewer(token_data, expires_at=expires_at)<|docstring|>Creating a MockRenewer with partial data (expires_at, access_token) results in errors. Either complete data or no partial data works.<|endoftext|>
3f0c85c8e3a28a579ff6a455f3abeab15c8f8aa65327deba7ad555568fa8bbc5
def test_get_new_access_token(authorizer, token_data, on_refresh): '\n Calls get_new_acces token, confirms that the mock _get_token_data\n is used and that the mock on_refresh function is called.\n ' original_hash = authorizer._access_token_hash authorizer._get_new_access_token() assert (authorizer.access_token == token_data['access_token']) assert (authorizer.expires_at == token_data['expires_at_seconds']) assert (authorizer._access_token_hash != original_hash) on_refresh.assert_called_once()
Calls get_new_acces token, confirms that the mock _get_token_data is used and that the mock on_refresh function is called.
tests/unit/authorizers/test_renewing_authorizer.py
test_get_new_access_token
mh-globus/globus-sdk-python
47
python
def test_get_new_access_token(authorizer, token_data, on_refresh): '\n Calls get_new_acces token, confirms that the mock _get_token_data\n is used and that the mock on_refresh function is called.\n ' original_hash = authorizer._access_token_hash authorizer._get_new_access_token() assert (authorizer.access_token == token_data['access_token']) assert (authorizer.expires_at == token_data['expires_at_seconds']) assert (authorizer._access_token_hash != original_hash) on_refresh.assert_called_once()
def test_get_new_access_token(authorizer, token_data, on_refresh): '\n Calls get_new_acces token, confirms that the mock _get_token_data\n is used and that the mock on_refresh function is called.\n ' original_hash = authorizer._access_token_hash authorizer._get_new_access_token() assert (authorizer.access_token == token_data['access_token']) assert (authorizer.expires_at == token_data['expires_at_seconds']) assert (authorizer._access_token_hash != original_hash) on_refresh.assert_called_once()<|docstring|>Calls get_new_acces token, confirms that the mock _get_token_data is used and that the mock on_refresh function is called.<|endoftext|>
d900bd748e44c6c79219f97dcef54a7b0fa01e206fd0a01a655e968140e68658
def test_ensure_valid_token_ok(authorizer): '\n Confirms nothing is done before the access_token expires,\n ' authorizer.ensure_valid_token() assert (authorizer.access_token == ACCESS_TOKEN)
Confirms nothing is done before the access_token expires,
tests/unit/authorizers/test_renewing_authorizer.py
test_ensure_valid_token_ok
mh-globus/globus-sdk-python
47
python
def test_ensure_valid_token_ok(authorizer): '\n \n ' authorizer.ensure_valid_token() assert (authorizer.access_token == ACCESS_TOKEN)
def test_ensure_valid_token_ok(authorizer): '\n \n ' authorizer.ensure_valid_token() assert (authorizer.access_token == ACCESS_TOKEN)<|docstring|>Confirms nothing is done before the access_token expires,<|endoftext|>
23cddbd85255f42bd488b77eab7ff18e95183f5a67d81c40489e60ed1ab74595
def test_ensure_valid_token_expired(expired_authorizer, token_data): '\n Confirms a new access_token is gotten after expiration\n ' expired_authorizer.ensure_valid_token() assert (expired_authorizer.access_token == token_data['access_token']) assert (expired_authorizer.expires_at == token_data['expires_at_seconds'])
Confirms a new access_token is gotten after expiration
tests/unit/authorizers/test_renewing_authorizer.py
test_ensure_valid_token_expired
mh-globus/globus-sdk-python
47
python
def test_ensure_valid_token_expired(expired_authorizer, token_data): '\n \n ' expired_authorizer.ensure_valid_token() assert (expired_authorizer.access_token == token_data['access_token']) assert (expired_authorizer.expires_at == token_data['expires_at_seconds'])
def test_ensure_valid_token_expired(expired_authorizer, token_data): '\n \n ' expired_authorizer.ensure_valid_token() assert (expired_authorizer.access_token == token_data['access_token']) assert (expired_authorizer.expires_at == token_data['expires_at_seconds'])<|docstring|>Confirms a new access_token is gotten after expiration<|endoftext|>
6abd7c1c1ab0ced4bc3347a210ddef83422f6678da54416a69156b25c75211d3
def test_ensure_valid_token_no_token(authorizer, token_data): '\n Confirms a new access_token is gotten if the old one is set to None\n ' authorizer.access_token = None authorizer.ensure_valid_token() assert (authorizer.access_token == token_data['access_token']) assert (authorizer.expires_at == token_data['expires_at_seconds'])
Confirms a new access_token is gotten if the old one is set to None
tests/unit/authorizers/test_renewing_authorizer.py
test_ensure_valid_token_no_token
mh-globus/globus-sdk-python
47
python
def test_ensure_valid_token_no_token(authorizer, token_data): '\n \n ' authorizer.access_token = None authorizer.ensure_valid_token() assert (authorizer.access_token == token_data['access_token']) assert (authorizer.expires_at == token_data['expires_at_seconds'])
def test_ensure_valid_token_no_token(authorizer, token_data): '\n \n ' authorizer.access_token = None authorizer.ensure_valid_token() assert (authorizer.access_token == token_data['access_token']) assert (authorizer.expires_at == token_data['expires_at_seconds'])<|docstring|>Confirms a new access_token is gotten if the old one is set to None<|endoftext|>
96d454ab544f07b221557fc5d50628e27d3bae20619e5a3b8abe62eb824f6087
def test_ensure_valid_token_no_expiration(authorizer, token_data): '\n Confirms a new access_token is gotten if expires_at is set to None\n ' authorizer.expires_at = None authorizer.ensure_valid_token() assert (authorizer.access_token == token_data['access_token']) assert (authorizer.expires_at == token_data['expires_at_seconds'])
Confirms a new access_token is gotten if expires_at is set to None
tests/unit/authorizers/test_renewing_authorizer.py
test_ensure_valid_token_no_expiration
mh-globus/globus-sdk-python
47
python
def test_ensure_valid_token_no_expiration(authorizer, token_data): '\n \n ' authorizer.expires_at = None authorizer.ensure_valid_token() assert (authorizer.access_token == token_data['access_token']) assert (authorizer.expires_at == token_data['expires_at_seconds'])
def test_ensure_valid_token_no_expiration(authorizer, token_data): '\n \n ' authorizer.expires_at = None authorizer.ensure_valid_token() assert (authorizer.access_token == token_data['access_token']) assert (authorizer.expires_at == token_data['expires_at_seconds'])<|docstring|>Confirms a new access_token is gotten if expires_at is set to None<|endoftext|>
f184f2611c89372ec92f998f13905baa3de42c9267a388099f9d1c4b76425f11
def test_get_authorization_header(authorizer): '\n Gets authorization header, confirms expected value\n ' assert (authorizer.get_authorization_header() == ('Bearer ' + ACCESS_TOKEN))
Gets authorization header, confirms expected value
tests/unit/authorizers/test_renewing_authorizer.py
test_get_authorization_header
mh-globus/globus-sdk-python
47
python
def test_get_authorization_header(authorizer): '\n \n ' assert (authorizer.get_authorization_header() == ('Bearer ' + ACCESS_TOKEN))
def test_get_authorization_header(authorizer): '\n \n ' assert (authorizer.get_authorization_header() == ('Bearer ' + ACCESS_TOKEN))<|docstring|>Gets authorization header, confirms expected value<|endoftext|>
499fd1adbbbb2b12dc35f44044e67f35bd512e826686756a54720141fbbc7e6c
def test_get_authorization_header_expired(expired_authorizer, token_data): '\n Sets the access_token to be expired, then gets authorization header\n Confirms header value uses the new access_token.\n ' assert (expired_authorizer.get_authorization_header() == ('Bearer ' + token_data['access_token']))
Sets the access_token to be expired, then gets authorization header Confirms header value uses the new access_token.
tests/unit/authorizers/test_renewing_authorizer.py
test_get_authorization_header_expired
mh-globus/globus-sdk-python
47
python
def test_get_authorization_header_expired(expired_authorizer, token_data): '\n Sets the access_token to be expired, then gets authorization header\n Confirms header value uses the new access_token.\n ' assert (expired_authorizer.get_authorization_header() == ('Bearer ' + token_data['access_token']))
def test_get_authorization_header_expired(expired_authorizer, token_data): '\n Sets the access_token to be expired, then gets authorization header\n Confirms header value uses the new access_token.\n ' assert (expired_authorizer.get_authorization_header() == ('Bearer ' + token_data['access_token']))<|docstring|>Sets the access_token to be expired, then gets authorization header Confirms header value uses the new access_token.<|endoftext|>
2d91c8929f3a84a5fc013c9cd0e33952fab7111e595416ef5daba7a0dd553e45
def test_get_authorization_header_no_token(authorizer, token_data): '\n Sets the access_token to None, then gets authorization header\n Confirms header value uses the new access_token.\n ' authorizer.access_token = None assert (authorizer.get_authorization_header() == ('Bearer ' + token_data['access_token']))
Sets the access_token to None, then gets authorization header Confirms header value uses the new access_token.
tests/unit/authorizers/test_renewing_authorizer.py
test_get_authorization_header_no_token
mh-globus/globus-sdk-python
47
python
def test_get_authorization_header_no_token(authorizer, token_data): '\n Sets the access_token to None, then gets authorization header\n Confirms header value uses the new access_token.\n ' authorizer.access_token = None assert (authorizer.get_authorization_header() == ('Bearer ' + token_data['access_token']))
def test_get_authorization_header_no_token(authorizer, token_data): '\n Sets the access_token to None, then gets authorization header\n Confirms header value uses the new access_token.\n ' authorizer.access_token = None assert (authorizer.get_authorization_header() == ('Bearer ' + token_data['access_token']))<|docstring|>Sets the access_token to None, then gets authorization header Confirms header value uses the new access_token.<|endoftext|>
81cf833c3095fb2526822d109a647dc55882c9de0e1960cfe5832529cf6dfa5f
def test_get_authorization_header_no_expires(authorizer, token_data): '\n Sets expires_at to None, then gets authorization header\n Confirms header value uses the new access_token.\n ' authorizer.expires_at = None assert (authorizer.get_authorization_header() == ('Bearer ' + token_data['access_token']))
Sets expires_at to None, then gets authorization header Confirms header value uses the new access_token.
tests/unit/authorizers/test_renewing_authorizer.py
test_get_authorization_header_no_expires
mh-globus/globus-sdk-python
47
python
def test_get_authorization_header_no_expires(authorizer, token_data): '\n Sets expires_at to None, then gets authorization header\n Confirms header value uses the new access_token.\n ' authorizer.expires_at = None assert (authorizer.get_authorization_header() == ('Bearer ' + token_data['access_token']))
def test_get_authorization_header_no_expires(authorizer, token_data): '\n Sets expires_at to None, then gets authorization header\n Confirms header value uses the new access_token.\n ' authorizer.expires_at = None assert (authorizer.get_authorization_header() == ('Bearer ' + token_data['access_token']))<|docstring|>Sets expires_at to None, then gets authorization header Confirms header value uses the new access_token.<|endoftext|>
0ee1af694f3305666dd48e0b437e98ffcc7357af4845ec020ed318ecd5806212
def test_handle_missing_authorization(authorizer): '\n Confirms that RenewingAuthorizers will attempt to fix 401s\n by treating their existing access_token as expired\n ' assert authorizer.handle_missing_authorization() assert (authorizer.expires_at is None)
Confirms that RenewingAuthorizers will attempt to fix 401s by treating their existing access_token as expired
tests/unit/authorizers/test_renewing_authorizer.py
test_handle_missing_authorization
mh-globus/globus-sdk-python
47
python
def test_handle_missing_authorization(authorizer): '\n Confirms that RenewingAuthorizers will attempt to fix 401s\n by treating their existing access_token as expired\n ' assert authorizer.handle_missing_authorization() assert (authorizer.expires_at is None)
def test_handle_missing_authorization(authorizer): '\n Confirms that RenewingAuthorizers will attempt to fix 401s\n by treating their existing access_token as expired\n ' assert authorizer.handle_missing_authorization() assert (authorizer.expires_at is None)<|docstring|>Confirms that RenewingAuthorizers will attempt to fix 401s by treating their existing access_token as expired<|endoftext|>
f904304a5d0ebbf98566a431ddabd6f256a7a8741b4ef80b3994eaff89f5a5a5
def data_json(self, pretty=False): 'Returns the data as a valid JSON string.' if pretty: return json.dumps(self.data, sort_keys=True, indent=4, separators=(',', ': ')) else: return json.dumps(self.data)
Returns the data as a valid JSON string.
ThreadFixProApi/ThreadFixProResponse.py
data_json
denimgroup/threadfix-python-api
1
python
def data_json(self, pretty=False): if pretty: return json.dumps(self.data, sort_keys=True, indent=4, separators=(',', ': ')) else: return json.dumps(self.data)
def data_json(self, pretty=False): if pretty: return json.dumps(self.data, sort_keys=True, indent=4, separators=(',', ': ')) else: return json.dumps(self.data)<|docstring|>Returns the data as a valid JSON string.<|endoftext|>
6a4d6b935cb784471478b7f838c9078d514aa29a7ce065990c48cf813bc9abc4
def _wait_for_node(config, host, check_function, retries, delay): '\n Polls the node until the check_function check passes.\n\n :param host: The target host on which to run the check\n :param check_function: called to determine node state\n :param retries: The number of times to retry the health check. Defaults to 10\n :param delay: A delay in seconds to wait before polling again. Defaults to 6 seconds.\n :return: True when the node is determined to be up. If the retries are exhausted, returns False\n ' attempts = 0 while (attempts < retries): if check_function(config, host): return True else: time.sleep(delay) attempts = (attempts + 1) return False
Polls the node until the check_function check passes. :param host: The target host on which to run the check :param check_function: called to determine node state :param retries: The number of times to retry the health check. Defaults to 10 :param delay: A delay in seconds to wait before polling again. Defaults to 6 seconds. :return: True when the node is determined to be up. If the retries are exhausted, returns False
medusa/cassandra_utils.py
_wait_for_node
bolteddown/cassandra-medusa
0
python
def _wait_for_node(config, host, check_function, retries, delay): '\n Polls the node until the check_function check passes.\n\n :param host: The target host on which to run the check\n :param check_function: called to determine node state\n :param retries: The number of times to retry the health check. Defaults to 10\n :param delay: A delay in seconds to wait before polling again. Defaults to 6 seconds.\n :return: True when the node is determined to be up. If the retries are exhausted, returns False\n ' attempts = 0 while (attempts < retries): if check_function(config, host): return True else: time.sleep(delay) attempts = (attempts + 1) return False
def _wait_for_node(config, host, check_function, retries, delay): '\n Polls the node until the check_function check passes.\n\n :param host: The target host on which to run the check\n :param check_function: called to determine node state\n :param retries: The number of times to retry the health check. Defaults to 10\n :param delay: A delay in seconds to wait before polling again. Defaults to 6 seconds.\n :return: True when the node is determined to be up. If the retries are exhausted, returns False\n ' attempts = 0 while (attempts < retries): if check_function(config, host): return True else: time.sleep(delay) attempts = (attempts + 1) return False<|docstring|>Polls the node until the check_function check passes. :param host: The target host on which to run the check :param check_function: called to determine node state :param retries: The number of times to retry the health check. Defaults to 10 :param delay: A delay in seconds to wait before polling again. Defaults to 6 seconds. :return: True when the node is determined to be up. If the retries are exhausted, returns False<|endoftext|>
4523dc428a9a410a05734effb664b1d7e39ef290086b2a2d4776504debfe8f76
def is_node_up(config, host): '\n Calls nodetool statusbinary, nodetool statusthrift or both. This function checks the output returned from nodetool\n and not the return code. There could be a normal return code of zero when the node is an unhealthy state and not\n accepting requests.\n\n :param health_check: Supported values are cql, thrift, and all. The latter will perform both checks. Defaults to\n cql.\n :param host: The target host on which to perform the check\n :return: True if the node is accepting requests, False otherwise. If both cql and thrift are checked, then the node\n must be ready to accept requests for both in order for the health check to be successful.\n ' health_check = config.checks.health_check if (int(config.cassandra.is_ccm) == 1): args = ['ccm', 'node1', 'nodetool'] if (health_check == 'thrift'): return is_ccm_up(args, 'statusthrift') elif (health_check == 'all'): return (is_ccm_up(list(args), 'statusbinary') and is_ccm_up(list(args), 'statusthrift')) else: return is_ccm_up(args, 'statusbinary') else: cassandra = Cassandra(config.cassandra) native_port = cassandra.native_port rpc_port = cassandra.rpc_port nc_timeout = 10 args = ['timeout', str(nc_timeout), 'nc', '-zv', host] if (health_check == 'thrift'): return is_cassandra_up(args, rpc_port) elif (health_check == 'all'): return (is_cassandra_up(list(args), rpc_port) and is_cassandra_up(list(args), native_port)) else: return is_cassandra_up(args, native_port)
Calls nodetool statusbinary, nodetool statusthrift or both. This function checks the output returned from nodetool and not the return code. There could be a normal return code of zero when the node is an unhealthy state and not accepting requests. :param health_check: Supported values are cql, thrift, and all. The latter will perform both checks. Defaults to cql. :param host: The target host on which to perform the check :return: True if the node is accepting requests, False otherwise. If both cql and thrift are checked, then the node must be ready to accept requests for both in order for the health check to be successful.
medusa/cassandra_utils.py
is_node_up
bolteddown/cassandra-medusa
0
python
def is_node_up(config, host): '\n Calls nodetool statusbinary, nodetool statusthrift or both. This function checks the output returned from nodetool\n and not the return code. There could be a normal return code of zero when the node is an unhealthy state and not\n accepting requests.\n\n :param health_check: Supported values are cql, thrift, and all. The latter will perform both checks. Defaults to\n cql.\n :param host: The target host on which to perform the check\n :return: True if the node is accepting requests, False otherwise. If both cql and thrift are checked, then the node\n must be ready to accept requests for both in order for the health check to be successful.\n ' health_check = config.checks.health_check if (int(config.cassandra.is_ccm) == 1): args = ['ccm', 'node1', 'nodetool'] if (health_check == 'thrift'): return is_ccm_up(args, 'statusthrift') elif (health_check == 'all'): return (is_ccm_up(list(args), 'statusbinary') and is_ccm_up(list(args), 'statusthrift')) else: return is_ccm_up(args, 'statusbinary') else: cassandra = Cassandra(config.cassandra) native_port = cassandra.native_port rpc_port = cassandra.rpc_port nc_timeout = 10 args = ['timeout', str(nc_timeout), 'nc', '-zv', host] if (health_check == 'thrift'): return is_cassandra_up(args, rpc_port) elif (health_check == 'all'): return (is_cassandra_up(list(args), rpc_port) and is_cassandra_up(list(args), native_port)) else: return is_cassandra_up(args, native_port)
def is_node_up(config, host): '\n Calls nodetool statusbinary, nodetool statusthrift or both. This function checks the output returned from nodetool\n and not the return code. There could be a normal return code of zero when the node is an unhealthy state and not\n accepting requests.\n\n :param health_check: Supported values are cql, thrift, and all. The latter will perform both checks. Defaults to\n cql.\n :param host: The target host on which to perform the check\n :return: True if the node is accepting requests, False otherwise. If both cql and thrift are checked, then the node\n must be ready to accept requests for both in order for the health check to be successful.\n ' health_check = config.checks.health_check if (int(config.cassandra.is_ccm) == 1): args = ['ccm', 'node1', 'nodetool'] if (health_check == 'thrift'): return is_ccm_up(args, 'statusthrift') elif (health_check == 'all'): return (is_ccm_up(list(args), 'statusbinary') and is_ccm_up(list(args), 'statusthrift')) else: return is_ccm_up(args, 'statusbinary') else: cassandra = Cassandra(config.cassandra) native_port = cassandra.native_port rpc_port = cassandra.rpc_port nc_timeout = 10 args = ['timeout', str(nc_timeout), 'nc', '-zv', host] if (health_check == 'thrift'): return is_cassandra_up(args, rpc_port) elif (health_check == 'all'): return (is_cassandra_up(list(args), rpc_port) and is_cassandra_up(list(args), native_port)) else: return is_cassandra_up(args, native_port)<|docstring|>Calls nodetool statusbinary, nodetool statusthrift or both. This function checks the output returned from nodetool and not the return code. There could be a normal return code of zero when the node is an unhealthy state and not accepting requests. :param health_check: Supported values are cql, thrift, and all. The latter will perform both checks. Defaults to cql. :param host: The target host on which to perform the check :return: True if the node is accepting requests, False otherwise. If both cql and thrift are checked, then the node must be ready to accept requests for both in order for the health check to be successful.<|endoftext|>
6fd6a134ed9d7c4550c5acd359d746fde50232eb8169a17f4e26e033f0bb2ed2
def new_session(self, retry=False): '\n Creates a new CQL session. If retry is True then attempt to create a CQL session with retry logic. The max\n number of retries is currently hard coded at 5 and the delay between attempts is also hard coded at 5 sec. If\n no session can be created after the max retries is reached, an exception is raised.\n ' cluster = Cluster(contact_points=self._ip_addresses, auth_provider=self._auth_provider, execution_profiles=self._execution_profiles, ssl_context=self._ssl_context) if retry: max_retries = 5 attempts = 0 delay = 5 while (attempts < max_retries): try: session = cluster.connect() return CqlSession(session, self._cassandra_config.resolve_ip_addresses) except Exception as e: logging.debug('Failed to create session', exc_info=e) time.sleep(delay) attempts = (attempts + 1) raise Exception('Could not establish CQL session after {attempts}'.format(attempts=attempts)) else: session = cluster.connect() return CqlSession(session, self._cassandra_config.resolve_ip_addresses)
Creates a new CQL session. If retry is True then attempt to create a CQL session with retry logic. The max number of retries is currently hard coded at 5 and the delay between attempts is also hard coded at 5 sec. If no session can be created after the max retries is reached, an exception is raised.
medusa/cassandra_utils.py
new_session
bolteddown/cassandra-medusa
0
python
def new_session(self, retry=False): '\n Creates a new CQL session. If retry is True then attempt to create a CQL session with retry logic. The max\n number of retries is currently hard coded at 5 and the delay between attempts is also hard coded at 5 sec. If\n no session can be created after the max retries is reached, an exception is raised.\n ' cluster = Cluster(contact_points=self._ip_addresses, auth_provider=self._auth_provider, execution_profiles=self._execution_profiles, ssl_context=self._ssl_context) if retry: max_retries = 5 attempts = 0 delay = 5 while (attempts < max_retries): try: session = cluster.connect() return CqlSession(session, self._cassandra_config.resolve_ip_addresses) except Exception as e: logging.debug('Failed to create session', exc_info=e) time.sleep(delay) attempts = (attempts + 1) raise Exception('Could not establish CQL session after {attempts}'.format(attempts=attempts)) else: session = cluster.connect() return CqlSession(session, self._cassandra_config.resolve_ip_addresses)
def new_session(self, retry=False): '\n Creates a new CQL session. If retry is True then attempt to create a CQL session with retry logic. The max\n number of retries is currently hard coded at 5 and the delay between attempts is also hard coded at 5 sec. If\n no session can be created after the max retries is reached, an exception is raised.\n ' cluster = Cluster(contact_points=self._ip_addresses, auth_provider=self._auth_provider, execution_profiles=self._execution_profiles, ssl_context=self._ssl_context) if retry: max_retries = 5 attempts = 0 delay = 5 while (attempts < max_retries): try: session = cluster.connect() return CqlSession(session, self._cassandra_config.resolve_ip_addresses) except Exception as e: logging.debug('Failed to create session', exc_info=e) time.sleep(delay) attempts = (attempts + 1) raise Exception('Could not establish CQL session after {attempts}'.format(attempts=attempts)) else: session = cluster.connect() return CqlSession(session, self._cassandra_config.resolve_ip_addresses)<|docstring|>Creates a new CQL session. If retry is True then attempt to create a CQL session with retry logic. The max number of retries is currently hard coded at 5 and the delay between attempts is also hard coded at 5 sec. If no session can be created after the max retries is reached, an exception is raised.<|endoftext|>
c9c32a51c92f38a341edebdca6313943c3a6a4ba860fd77de08f12bd9b1b27ba
def helper_function(input1, inputs2=None): '<Description>\n\n Assumptions:\n <any assumptions>\n\n Source:\n <source>\n\n Inputs:\n <input1> <units>\n <input2> <units>\n ..\n\n Outputs:\n <output1> <units>\n <output2> <units>\n ..\n\n Properties Used:\n N/A\n ' var1 = input1.var1 var2 = inputs.var2 var3 = (var1 * var2) magic = np.log(var3) output = Data() output.magic = magic output.var3 = var3 return output
<Description> Assumptions: <any assumptions> Source: <source> Inputs: <input1> <units> <input2> <units> .. Outputs: <output1> <units> <output2> <units> .. Properties Used: N/A
SUAVE/SUAVE-2.5.0/templates/Example_Callable_Attribute.py
helper_function
Vinicius-Tanigawa/Undergraduate-Research-Project
0
python
def helper_function(input1, inputs2=None): '<Description>\n\n Assumptions:\n <any assumptions>\n\n Source:\n <source>\n\n Inputs:\n <input1> <units>\n <input2> <units>\n ..\n\n Outputs:\n <output1> <units>\n <output2> <units>\n ..\n\n Properties Used:\n N/A\n ' var1 = input1.var1 var2 = inputs.var2 var3 = (var1 * var2) magic = np.log(var3) output = Data() output.magic = magic output.var3 = var3 return output
def helper_function(input1, inputs2=None): '<Description>\n\n Assumptions:\n <any assumptions>\n\n Source:\n <source>\n\n Inputs:\n <input1> <units>\n <input2> <units>\n ..\n\n Outputs:\n <output1> <units>\n <output2> <units>\n ..\n\n Properties Used:\n N/A\n ' var1 = input1.var1 var2 = inputs.var2 var3 = (var1 * var2) magic = np.log(var3) output = Data() output.magic = magic output.var3 = var3 return output<|docstring|><Description> Assumptions: <any assumptions> Source: <source> Inputs: <input1> <units> <input2> <units> .. Outputs: <output1> <units> <output2> <units> .. Properties Used: N/A<|endoftext|>
ead0098dfa73a8ccb352b73d4aac38b04b87de4ce7acc9b16ada30741c46f756
def __defaults__(self): '<Description>\n\n Assumptions:\n <any assumptions>\n\n Source:\n <source>\n\n Inputs:\n <input1> <units>\n <input2> <units>\n ..\n\n Outputs:\n <output1> <units>\n <output2> <units>\n ..\n\n Properties Used:\n <property1> <units>\n <property2> <units>\n ..\n ' self.area = None self.taper = None
<Description> Assumptions: <any assumptions> Source: <source> Inputs: <input1> <units> <input2> <units> .. Outputs: <output1> <units> <output2> <units> .. Properties Used: <property1> <units> <property2> <units> ..
SUAVE/SUAVE-2.5.0/templates/Example_Callable_Attribute.py
__defaults__
Vinicius-Tanigawa/Undergraduate-Research-Project
0
python
def __defaults__(self): '<Description>\n\n Assumptions:\n <any assumptions>\n\n Source:\n <source>\n\n Inputs:\n <input1> <units>\n <input2> <units>\n ..\n\n Outputs:\n <output1> <units>\n <output2> <units>\n ..\n\n Properties Used:\n <property1> <units>\n <property2> <units>\n ..\n ' self.area = None self.taper = None
def __defaults__(self): '<Description>\n\n Assumptions:\n <any assumptions>\n\n Source:\n <source>\n\n Inputs:\n <input1> <units>\n <input2> <units>\n ..\n\n Outputs:\n <output1> <units>\n <output2> <units>\n ..\n\n Properties Used:\n <property1> <units>\n <property2> <units>\n ..\n ' self.area = None self.taper = None<|docstring|><Description> Assumptions: <any assumptions> Source: <source> Inputs: <input1> <units> <input2> <units> .. Outputs: <output1> <units> <output2> <units> .. Properties Used: <property1> <units> <property2> <units> ..<|endoftext|>
c3f53acc51c6278b3671c05effa9d21ce22f34c890c05978bc556e4e30147126
def __check__(self): '<Description>\n\n Assumptions:\n <any assumptions>\n\n Source:\n <source>\n\n Inputs:\n <input1> <units>\n <input2> <units>\n ..\n\n Outputs:\n <output1> <units>\n <output2> <units>\n ..\n\n Properties Used:\n <property1> <units>\n <property2> <units>\n ..\n ' if (self.taper == 10): self.area = 20
<Description> Assumptions: <any assumptions> Source: <source> Inputs: <input1> <units> <input2> <units> .. Outputs: <output1> <units> <output2> <units> .. Properties Used: <property1> <units> <property2> <units> ..
SUAVE/SUAVE-2.5.0/templates/Example_Callable_Attribute.py
__check__
Vinicius-Tanigawa/Undergraduate-Research-Project
0
python
def __check__(self): '<Description>\n\n Assumptions:\n <any assumptions>\n\n Source:\n <source>\n\n Inputs:\n <input1> <units>\n <input2> <units>\n ..\n\n Outputs:\n <output1> <units>\n <output2> <units>\n ..\n\n Properties Used:\n <property1> <units>\n <property2> <units>\n ..\n ' if (self.taper == 10): self.area = 20
def __check__(self): '<Description>\n\n Assumptions:\n <any assumptions>\n\n Source:\n <source>\n\n Inputs:\n <input1> <units>\n <input2> <units>\n ..\n\n Outputs:\n <output1> <units>\n <output2> <units>\n ..\n\n Properties Used:\n <property1> <units>\n <property2> <units>\n ..\n ' if (self.taper == 10): self.area = 20<|docstring|><Description> Assumptions: <any assumptions> Source: <source> Inputs: <input1> <units> <input2> <units> .. Outputs: <output1> <units> <output2> <units> .. Properties Used: <property1> <units> <property2> <units> ..<|endoftext|>
962f447e214a61bc069aa8f22c954f8917ab4e570f4a436952c3017f6f20e169
def __call__(self, input1, input2=None): '<Description>\n\n Assumptions:\n <any assumptions>\n\n Source:\n <source>\n\n Inputs:\n <input1> <units>\n <input2> <units>\n ..\n\n Outputs:\n <output1> <units>\n <output2> <units>\n ..\n\n Properties Used:\n <property1> <units>\n <property2> <units>\n ..\n ' var1 = input1.var1 var2 = inputs.var2 var3 = (var1 * var2) magic = np.log(var3) output = Data() output.magic = magic output.var3 = var3 return output
<Description> Assumptions: <any assumptions> Source: <source> Inputs: <input1> <units> <input2> <units> .. Outputs: <output1> <units> <output2> <units> .. Properties Used: <property1> <units> <property2> <units> ..
SUAVE/SUAVE-2.5.0/templates/Example_Callable_Attribute.py
__call__
Vinicius-Tanigawa/Undergraduate-Research-Project
0
python
def __call__(self, input1, input2=None): '<Description>\n\n Assumptions:\n <any assumptions>\n\n Source:\n <source>\n\n Inputs:\n <input1> <units>\n <input2> <units>\n ..\n\n Outputs:\n <output1> <units>\n <output2> <units>\n ..\n\n Properties Used:\n <property1> <units>\n <property2> <units>\n ..\n ' var1 = input1.var1 var2 = inputs.var2 var3 = (var1 * var2) magic = np.log(var3) output = Data() output.magic = magic output.var3 = var3 return output
def __call__(self, input1, input2=None): '<Description>\n\n Assumptions:\n <any assumptions>\n\n Source:\n <source>\n\n Inputs:\n <input1> <units>\n <input2> <units>\n ..\n\n Outputs:\n <output1> <units>\n <output2> <units>\n ..\n\n Properties Used:\n <property1> <units>\n <property2> <units>\n ..\n ' var1 = input1.var1 var2 = inputs.var2 var3 = (var1 * var2) magic = np.log(var3) output = Data() output.magic = magic output.var3 = var3 return output<|docstring|><Description> Assumptions: <any assumptions> Source: <source> Inputs: <input1> <units> <input2> <units> .. Outputs: <output1> <units> <output2> <units> .. Properties Used: <property1> <units> <property2> <units> ..<|endoftext|>
cd0f0cfcb5848b4703e217385c4019d264439b740b6abf9c09d3035a11ae0d4b
def do_this(input1, input2=None): '<Description>\n\n Assumptions:\n <any assumptions>\n\n Source:\n <source>\n\n Inputs:\n <input1> <units>\n <input2> <units>\n ..\n\n Outputs:\n <output1> <units>\n <output2> <units>\n ..\n\n Properties Used:\n <property1> <units>\n <property2> <units>\n ..\n ' var1 = input1.var1 var2 = inputs.var2 var3 = (var1 * var2) magic = np.log(var3) output = Data() output.magic = magic output.var3 = var3 return output
<Description> Assumptions: <any assumptions> Source: <source> Inputs: <input1> <units> <input2> <units> .. Outputs: <output1> <units> <output2> <units> .. Properties Used: <property1> <units> <property2> <units> ..
SUAVE/SUAVE-2.5.0/templates/Example_Callable_Attribute.py
do_this
Vinicius-Tanigawa/Undergraduate-Research-Project
0
python
def do_this(input1, input2=None): '<Description>\n\n Assumptions:\n <any assumptions>\n\n Source:\n <source>\n\n Inputs:\n <input1> <units>\n <input2> <units>\n ..\n\n Outputs:\n <output1> <units>\n <output2> <units>\n ..\n\n Properties Used:\n <property1> <units>\n <property2> <units>\n ..\n ' var1 = input1.var1 var2 = inputs.var2 var3 = (var1 * var2) magic = np.log(var3) output = Data() output.magic = magic output.var3 = var3 return output
def do_this(input1, input2=None): '<Description>\n\n Assumptions:\n <any assumptions>\n\n Source:\n <source>\n\n Inputs:\n <input1> <units>\n <input2> <units>\n ..\n\n Outputs:\n <output1> <units>\n <output2> <units>\n ..\n\n Properties Used:\n <property1> <units>\n <property2> <units>\n ..\n ' var1 = input1.var1 var2 = inputs.var2 var3 = (var1 * var2) magic = np.log(var3) output = Data() output.magic = magic output.var3 = var3 return output<|docstring|><Description> Assumptions: <any assumptions> Source: <source> Inputs: <input1> <units> <input2> <units> .. Outputs: <output1> <units> <output2> <units> .. Properties Used: <property1> <units> <property2> <units> ..<|endoftext|>
988a811c0ece006dea7c3ad8d8bab62449ef28210d5172615195bdb3e01d6dd6
@receiver(post_save, sender=settings.AUTH_USER_MODEL) def create_auth_token(sender, instance=None, created=False, **kwargs): 'Automatically generate an API key when a user is created, then create Agent.' if created: api_token = Token.objects.create(user=instance) if (instance.is_superuser is False): Agent.objects.create(scan_agent=instance, api_token=api_token)
Automatically generate an API key when a user is created, then create Agent.
master/django_scantron/models.py
create_auth_token
hdsnc1473/scantron
2
python
@receiver(post_save, sender=settings.AUTH_USER_MODEL) def create_auth_token(sender, instance=None, created=False, **kwargs): if created: api_token = Token.objects.create(user=instance) if (instance.is_superuser is False): Agent.objects.create(scan_agent=instance, api_token=api_token)
@receiver(post_save, sender=settings.AUTH_USER_MODEL) def create_auth_token(sender, instance=None, created=False, **kwargs): if created: api_token = Token.objects.create(user=instance) if (instance.is_superuser is False): Agent.objects.create(scan_agent=instance, api_token=api_token)<|docstring|>Automatically generate an API key when a user is created, then create Agent.<|endoftext|>
c4befed939b02a27d1cc11f52d09ed5c1d978b32f0d09642f046df3b6e2d94e7
def clean(self): 'Checks for any invalid IPs, IP subnets, or FQDNs in the targets and excluded_targets fields.' target_extractor = extract_targets.TargetExtractor(targets_string=self.targets, private_ips_allowed=True, sort_targets=True) targets_dict = target_extractor.targets_dict if targets_dict['invalid_targets']: invalid_targets = ','.join(target_extractor.targets_dict['invalid_targets']) raise ValidationError(f'Invalid targets provided: {invalid_targets}') self.targets = targets_dict['as_nmap'] target_extractor = extract_targets.TargetExtractor(targets_string=self.excluded_targets, private_ips_allowed=True, sort_targets=True) targets_dict = target_extractor.targets_dict if targets_dict['invalid_targets']: invalid_targets = ','.join(target_extractor.targets_dict['invalid_targets']) raise ValidationError(f'Invalid excluded targets provided: {invalid_targets}') self.excluded_targets = targets_dict['as_nmap'] if (self.email_scan_alerts and (not self.email_alert_address)): raise ValidationError(f"Provide an email address if enabling 'Email scan alerts'")
Checks for any invalid IPs, IP subnets, or FQDNs in the targets and excluded_targets fields.
master/django_scantron/models.py
clean
hdsnc1473/scantron
2
python
def clean(self): target_extractor = extract_targets.TargetExtractor(targets_string=self.targets, private_ips_allowed=True, sort_targets=True) targets_dict = target_extractor.targets_dict if targets_dict['invalid_targets']: invalid_targets = ','.join(target_extractor.targets_dict['invalid_targets']) raise ValidationError(f'Invalid targets provided: {invalid_targets}') self.targets = targets_dict['as_nmap'] target_extractor = extract_targets.TargetExtractor(targets_string=self.excluded_targets, private_ips_allowed=True, sort_targets=True) targets_dict = target_extractor.targets_dict if targets_dict['invalid_targets']: invalid_targets = ','.join(target_extractor.targets_dict['invalid_targets']) raise ValidationError(f'Invalid excluded targets provided: {invalid_targets}') self.excluded_targets = targets_dict['as_nmap'] if (self.email_scan_alerts and (not self.email_alert_address)): raise ValidationError(f"Provide an email address if enabling 'Email scan alerts'")
def clean(self): target_extractor = extract_targets.TargetExtractor(targets_string=self.targets, private_ips_allowed=True, sort_targets=True) targets_dict = target_extractor.targets_dict if targets_dict['invalid_targets']: invalid_targets = ','.join(target_extractor.targets_dict['invalid_targets']) raise ValidationError(f'Invalid targets provided: {invalid_targets}') self.targets = targets_dict['as_nmap'] target_extractor = extract_targets.TargetExtractor(targets_string=self.excluded_targets, private_ips_allowed=True, sort_targets=True) targets_dict = target_extractor.targets_dict if targets_dict['invalid_targets']: invalid_targets = ','.join(target_extractor.targets_dict['invalid_targets']) raise ValidationError(f'Invalid excluded targets provided: {invalid_targets}') self.excluded_targets = targets_dict['as_nmap'] if (self.email_scan_alerts and (not self.email_alert_address)): raise ValidationError(f"Provide an email address if enabling 'Email scan alerts'")<|docstring|>Checks for any invalid IPs, IP subnets, or FQDNs in the targets and excluded_targets fields.<|endoftext|>
5e1ddc802eb11c4fe241f2eea468310d72eda6f5721bf46a0e35d28aca2f0d69
def _show_stream_items(self, stream_vis, stream_items): 'Paint the given stream_items in to visualizer. If visualizer is dirty then return False else True.\n ' vals = self._extract_vals(stream_items) if (not len(vals)): return True stream_vis.series += vals self.clear_artists(stream_vis) (n, bins, stream_vis.bars_artists) = stream_vis.ax.hist(stream_vis.series, bins=stream_vis.bins, normed=stream_vis.normed, color=stream_vis.color, edgecolor=stream_vis.edge_color, histtype=stream_vis.histtype, alpha=stream_vis.opacity, linewidth=stream_vis.linewidth) stream_vis.ax.set_xticks(bins) return False
Paint the given stream_items in to visualizer. If visualizer is dirty then return False else True.
tensorwatch/mpl/histogram.py
_show_stream_items
NunoEdgarGFlowHub/tensorwatch
3,453
python
def _show_stream_items(self, stream_vis, stream_items): '\n ' vals = self._extract_vals(stream_items) if (not len(vals)): return True stream_vis.series += vals self.clear_artists(stream_vis) (n, bins, stream_vis.bars_artists) = stream_vis.ax.hist(stream_vis.series, bins=stream_vis.bins, normed=stream_vis.normed, color=stream_vis.color, edgecolor=stream_vis.edge_color, histtype=stream_vis.histtype, alpha=stream_vis.opacity, linewidth=stream_vis.linewidth) stream_vis.ax.set_xticks(bins) return False
def _show_stream_items(self, stream_vis, stream_items): '\n ' vals = self._extract_vals(stream_items) if (not len(vals)): return True stream_vis.series += vals self.clear_artists(stream_vis) (n, bins, stream_vis.bars_artists) = stream_vis.ax.hist(stream_vis.series, bins=stream_vis.bins, normed=stream_vis.normed, color=stream_vis.color, edgecolor=stream_vis.edge_color, histtype=stream_vis.histtype, alpha=stream_vis.opacity, linewidth=stream_vis.linewidth) stream_vis.ax.set_xticks(bins) return False<|docstring|>Paint the given stream_items in to visualizer. If visualizer is dirty then return False else True.<|endoftext|>
5cf2151ce0dac8bae49ced839fe6cb2c075566dec122ccc67bb81362ed1f6dd2
def get_all_state_managers(conf): '\n @param conf - An instance of Config class\n Reads the config for requested state managers.\n Instantiates them, start and then return them.\n ' state_managers = [] state_managers.extend(get_all_zk_state_managers(conf)) state_managers.extend(get_all_file_state_managers(conf)) return state_managers
@param conf - An instance of Config class Reads the config for requested state managers. Instantiates them, start and then return them.
heron/statemgrs/src/python/statemanagerfactory.py
get_all_state_managers
NunoEdgarGFlowHub/heron
1
python
def get_all_state_managers(conf): '\n @param conf - An instance of Config class\n Reads the config for requested state managers.\n Instantiates them, start and then return them.\n ' state_managers = [] state_managers.extend(get_all_zk_state_managers(conf)) state_managers.extend(get_all_file_state_managers(conf)) return state_managers
def get_all_state_managers(conf): '\n @param conf - An instance of Config class\n Reads the config for requested state managers.\n Instantiates them, start and then return them.\n ' state_managers = [] state_managers.extend(get_all_zk_state_managers(conf)) state_managers.extend(get_all_file_state_managers(conf)) return state_managers<|docstring|>@param conf - An instance of Config class Reads the config for requested state managers. Instantiates them, start and then return them.<|endoftext|>
0ed1c1418bbd33a8c0b22eba1299711850e29c971669419bd58501afe7aa434c
def get_all_zk_state_managers(conf): '\n Connects to all the zookeeper state_managers and returns\n the connected state_managers instances.\n ' state_managers = [] state_locations = conf.get_state_locations_of_type('zookeeper') for location in state_locations: name = location['name'] hostport = location['hostport'] host = None port = None if (':' in hostport): hostportlist = hostport.split(':') if (len(hostportlist) == 2): host = hostportlist[0] port = int(hostportlist[1]) if ((not host) or (not port)): raise Exception(("Hostport for %s must be of the format 'host:port'." % name)) tunnelhost = location['tunnelhost'] rootpath = location['rootpath'] LOG.info(((((('Connecting to zk hostport: ' + host) + ':') + str(port)) + ' rootpath: ') + rootpath)) state_manager = ZkStateManager(name, host, port, rootpath, tunnelhost) try: state_manager.start() except Exception as e: LOG.error('Exception while connecting to state_manager.') traceback.print_exc() state_managers.append(state_manager) return state_managers
Connects to all the zookeeper state_managers and returns the connected state_managers instances.
heron/statemgrs/src/python/statemanagerfactory.py
get_all_zk_state_managers
NunoEdgarGFlowHub/heron
1
python
def get_all_zk_state_managers(conf): '\n Connects to all the zookeeper state_managers and returns\n the connected state_managers instances.\n ' state_managers = [] state_locations = conf.get_state_locations_of_type('zookeeper') for location in state_locations: name = location['name'] hostport = location['hostport'] host = None port = None if (':' in hostport): hostportlist = hostport.split(':') if (len(hostportlist) == 2): host = hostportlist[0] port = int(hostportlist[1]) if ((not host) or (not port)): raise Exception(("Hostport for %s must be of the format 'host:port'." % name)) tunnelhost = location['tunnelhost'] rootpath = location['rootpath'] LOG.info(((((('Connecting to zk hostport: ' + host) + ':') + str(port)) + ' rootpath: ') + rootpath)) state_manager = ZkStateManager(name, host, port, rootpath, tunnelhost) try: state_manager.start() except Exception as e: LOG.error('Exception while connecting to state_manager.') traceback.print_exc() state_managers.append(state_manager) return state_managers
def get_all_zk_state_managers(conf): '\n Connects to all the zookeeper state_managers and returns\n the connected state_managers instances.\n ' state_managers = [] state_locations = conf.get_state_locations_of_type('zookeeper') for location in state_locations: name = location['name'] hostport = location['hostport'] host = None port = None if (':' in hostport): hostportlist = hostport.split(':') if (len(hostportlist) == 2): host = hostportlist[0] port = int(hostportlist[1]) if ((not host) or (not port)): raise Exception(("Hostport for %s must be of the format 'host:port'." % name)) tunnelhost = location['tunnelhost'] rootpath = location['rootpath'] LOG.info(((((('Connecting to zk hostport: ' + host) + ':') + str(port)) + ' rootpath: ') + rootpath)) state_manager = ZkStateManager(name, host, port, rootpath, tunnelhost) try: state_manager.start() except Exception as e: LOG.error('Exception while connecting to state_manager.') traceback.print_exc() state_managers.append(state_manager) return state_managers<|docstring|>Connects to all the zookeeper state_managers and returns the connected state_managers instances.<|endoftext|>
7325910d7b0ccca81344fb662c5d2fc36af3243bbb82b43ad5165fcb950d3f85
def get_all_file_state_managers(conf): '\n Returns all the file state_managers.\n ' state_managers = [] state_locations = conf.get_state_locations_of_type('file') for location in state_locations: name = location['name'] rootpath = os.path.expanduser(location['rootpath']) LOG.info(('Connecting to file state with rootpath: ' + rootpath)) state_manager = FileStateManager(name, rootpath) try: state_manager.start() except Exception as e: LOG.error('Exception while connecting to state_manager.') traceback.print_exc() state_managers.append(state_manager) return state_managers
Returns all the file state_managers.
heron/statemgrs/src/python/statemanagerfactory.py
get_all_file_state_managers
NunoEdgarGFlowHub/heron
1
python
def get_all_file_state_managers(conf): '\n \n ' state_managers = [] state_locations = conf.get_state_locations_of_type('file') for location in state_locations: name = location['name'] rootpath = os.path.expanduser(location['rootpath']) LOG.info(('Connecting to file state with rootpath: ' + rootpath)) state_manager = FileStateManager(name, rootpath) try: state_manager.start() except Exception as e: LOG.error('Exception while connecting to state_manager.') traceback.print_exc() state_managers.append(state_manager) return state_managers
def get_all_file_state_managers(conf): '\n \n ' state_managers = [] state_locations = conf.get_state_locations_of_type('file') for location in state_locations: name = location['name'] rootpath = os.path.expanduser(location['rootpath']) LOG.info(('Connecting to file state with rootpath: ' + rootpath)) state_manager = FileStateManager(name, rootpath) try: state_manager.start() except Exception as e: LOG.error('Exception while connecting to state_manager.') traceback.print_exc() state_managers.append(state_manager) return state_managers<|docstring|>Returns all the file state_managers.<|endoftext|>
25ab3d5714233796fc48fdbcc86258320b3b41192b729a5a28c874dbcbf7a8b6
def __init__(self, filename): '\n Create a new logger with an open file handle to the given filename.\n Will overwrite the file if it already exists.\n :param filename:\n ' self.filename = filename self.file = open(self.filename, 'w')
Create a new logger with an open file handle to the given filename. Will overwrite the file if it already exists. :param filename:
utility/logger.py
__init__
theBraindonor/chicago-crime-arrests
1
python
def __init__(self, filename): '\n Create a new logger with an open file handle to the given filename.\n Will overwrite the file if it already exists.\n :param filename:\n ' self.filename = filename self.file = open(self.filename, 'w')
def __init__(self, filename): '\n Create a new logger with an open file handle to the given filename.\n Will overwrite the file if it already exists.\n :param filename:\n ' self.filename = filename self.file = open(self.filename, 'w')<|docstring|>Create a new logger with an open file handle to the given filename. Will overwrite the file if it already exists. :param filename:<|endoftext|>
c3b8bea475e2d8df4490c42b2269296b8b0bd538d65891ab5dcfc8bb18e3d119
def close(self): '\n Close the file handle of the logger.\n :return:\n ' self.file.close()
Close the file handle of the logger. :return:
utility/logger.py
close
theBraindonor/chicago-crime-arrests
1
python
def close(self): '\n Close the file handle of the logger.\n :return:\n ' self.file.close()
def close(self): '\n Close the file handle of the logger.\n :return:\n ' self.file.close()<|docstring|>Close the file handle of the logger. :return:<|endoftext|>
15b0584658a7e724060be4b25f8d3549dcb7b311eb60c5a2ea8468d68ed3abf7
def log(self, message): '\n Log a message. Will update pandas formatting to provide extra-wide display.\n :param message:\n :return:\n ' with pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.width', 1024, 'display.max_colwidth', 256): self.file.write(('%s\n' % message)) print(message)
Log a message. Will update pandas formatting to provide extra-wide display. :param message: :return:
utility/logger.py
log
theBraindonor/chicago-crime-arrests
1
python
def log(self, message): '\n Log a message. Will update pandas formatting to provide extra-wide display.\n :param message:\n :return:\n ' with pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.width', 1024, 'display.max_colwidth', 256): self.file.write(('%s\n' % message)) print(message)
def log(self, message): '\n Log a message. Will update pandas formatting to provide extra-wide display.\n :param message:\n :return:\n ' with pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.width', 1024, 'display.max_colwidth', 256): self.file.write(('%s\n' % message)) print(message)<|docstring|>Log a message. Will update pandas formatting to provide extra-wide display. :param message: :return:<|endoftext|>
ff13c87c01eaaf580ec5a05703ab0a41f2ee117ed41353c6a4ba5e75b22d0732
def time_log(self, message): '\n Log a message with a timestamp.\n :param message:\n :return:\n ' self.log(('[%s] %s' % (str(datetime.now()), message)))
Log a message with a timestamp. :param message: :return:
utility/logger.py
time_log
theBraindonor/chicago-crime-arrests
1
python
def time_log(self, message): '\n Log a message with a timestamp.\n :param message:\n :return:\n ' self.log(('[%s] %s' % (str(datetime.now()), message)))
def time_log(self, message): '\n Log a message with a timestamp.\n :param message:\n :return:\n ' self.log(('[%s] %s' % (str(datetime.now()), message)))<|docstring|>Log a message with a timestamp. :param message: :return:<|endoftext|>
dd8f4f814b6c06c4b9cc64e26fa482993d613f3d5dc5217e1acb1b3ab508d6d9
@property def value(self): 'The current analog value on the pin, as an integer from 0..65535 (inclusive)' return self._seesaw.analog_read(self._pin)
The current analog value on the pin, as an integer from 0..65535 (inclusive)
adafruit_seesaw/analoginput.py
value
mcauser/Adafruit_CircuitPython_seesaw
1
python
@property def value(self): return self._seesaw.analog_read(self._pin)
@property def value(self): return self._seesaw.analog_read(self._pin)<|docstring|>The current analog value on the pin, as an integer from 0..65535 (inclusive)<|endoftext|>