body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
2f28f798805a2eb791fa4d05e48ea51a2ddce133a70f8662ad4e1039c28834b4
def GetObject(self, object_ref): 'Get the object metadata of a GCS object.\n\n Args:\n object_ref: A proto message of the object to fetch. Only the bucket and\n name need be set.\n\n Raises:\n HttpError:\n If the responses status is not 2xx or 404.\n\n Returns:\n The object if it exists otherwise None.\n ' return self._GetObject(object_ref)
Get the object metadata of a GCS object. Args: object_ref: A proto message of the object to fetch. Only the bucket and name need be set. Raises: HttpError: If the responses status is not 2xx or 404. Returns: The object if it exists otherwise None.
lib/googlecloudsdk/api_lib/dataproc/storage_helpers.py
GetObject
google-cloud-sdk-unofficial/google-cloud-sdk
2
python
def GetObject(self, object_ref): 'Get the object metadata of a GCS object.\n\n Args:\n object_ref: A proto message of the object to fetch. Only the bucket and\n name need be set.\n\n Raises:\n HttpError:\n If the responses status is not 2xx or 404.\n\n Returns:\n The object if it exists otherwise None.\n ' return self._GetObject(object_ref)
def GetObject(self, object_ref): 'Get the object metadata of a GCS object.\n\n Args:\n object_ref: A proto message of the object to fetch. Only the bucket and\n name need be set.\n\n Raises:\n HttpError:\n If the responses status is not 2xx or 404.\n\n Returns:\n The object if it exists otherwise None.\n ' return self._GetObject(object_ref)<|docstring|>Get the object metadata of a GCS object. Args: object_ref: A proto message of the object to fetch. Only the bucket and name need be set. Raises: HttpError: If the responses status is not 2xx or 404. Returns: The object if it exists otherwise None.<|endoftext|>
8a9aed726ac1241a25bb5a91c2f95e840e3ecfdcd24dbcd14b6b6bcb656af385
def BuildObjectStream(self, stream, object_ref): "Build an apitools Download from a stream and a GCS object reference.\n\n Note: This will always succeed, but HttpErrors with downloading will be\n raised when the download's methods are called.\n\n Args:\n stream: An Stream-like object that implements write(<string>) to write\n into.\n object_ref: A proto message of the object to fetch. Only the bucket and\n name need be set.\n\n Returns:\n The download.\n " download = transfer.Download.FromStream(stream, total_size=object_ref.size, auto_transfer=False) self._GetObject(object_ref, download=download) return download
Build an apitools Download from a stream and a GCS object reference. Note: This will always succeed, but HttpErrors with downloading will be raised when the download's methods are called. Args: stream: An Stream-like object that implements write(<string>) to write into. object_ref: A proto message of the object to fetch. Only the bucket and name need be set. Returns: The download.
lib/googlecloudsdk/api_lib/dataproc/storage_helpers.py
BuildObjectStream
google-cloud-sdk-unofficial/google-cloud-sdk
2
python
def BuildObjectStream(self, stream, object_ref): "Build an apitools Download from a stream and a GCS object reference.\n\n Note: This will always succeed, but HttpErrors with downloading will be\n raised when the download's methods are called.\n\n Args:\n stream: An Stream-like object that implements write(<string>) to write\n into.\n object_ref: A proto message of the object to fetch. Only the bucket and\n name need be set.\n\n Returns:\n The download.\n " download = transfer.Download.FromStream(stream, total_size=object_ref.size, auto_transfer=False) self._GetObject(object_ref, download=download) return download
def BuildObjectStream(self, stream, object_ref): "Build an apitools Download from a stream and a GCS object reference.\n\n Note: This will always succeed, but HttpErrors with downloading will be\n raised when the download's methods are called.\n\n Args:\n stream: An Stream-like object that implements write(<string>) to write\n into.\n object_ref: A proto message of the object to fetch. Only the bucket and\n name need be set.\n\n Returns:\n The download.\n " download = transfer.Download.FromStream(stream, total_size=object_ref.size, auto_transfer=False) self._GetObject(object_ref, download=download) return download<|docstring|>Build an apitools Download from a stream and a GCS object reference. Note: This will always succeed, but HttpErrors with downloading will be raised when the download's methods are called. Args: stream: An Stream-like object that implements write(<string>) to write into. object_ref: A proto message of the object to fetch. Only the bucket and name need be set. Returns: The download.<|endoftext|>
1bf56263cba28e09f7d20c71d8d443b076ab21e46e4965c3e13ca94e816e818a
def __init__(self, path, storage_client=None): 'Construct a StorageObjectSeriesStream for a specific gcs path.\n\n Args:\n path: A GCS object prefix which will be the base of the objects used to\n communicate across the channel.\n storage_client: a StorageClient for accessing GCS.\n\n Returns:\n The constructed stream.\n ' self._base_path = path self._gcs = (storage_client or StorageClient()) self._open = True self._current_object_index = 0 self._current_object_pos = 0
Construct a StorageObjectSeriesStream for a specific gcs path. Args: path: A GCS object prefix which will be the base of the objects used to communicate across the channel. storage_client: a StorageClient for accessing GCS. Returns: The constructed stream.
lib/googlecloudsdk/api_lib/dataproc/storage_helpers.py
__init__
google-cloud-sdk-unofficial/google-cloud-sdk
2
python
def __init__(self, path, storage_client=None): 'Construct a StorageObjectSeriesStream for a specific gcs path.\n\n Args:\n path: A GCS object prefix which will be the base of the objects used to\n communicate across the channel.\n storage_client: a StorageClient for accessing GCS.\n\n Returns:\n The constructed stream.\n ' self._base_path = path self._gcs = (storage_client or StorageClient()) self._open = True self._current_object_index = 0 self._current_object_pos = 0
def __init__(self, path, storage_client=None): 'Construct a StorageObjectSeriesStream for a specific gcs path.\n\n Args:\n path: A GCS object prefix which will be the base of the objects used to\n communicate across the channel.\n storage_client: a StorageClient for accessing GCS.\n\n Returns:\n The constructed stream.\n ' self._base_path = path self._gcs = (storage_client or StorageClient()) self._open = True self._current_object_index = 0 self._current_object_pos = 0<|docstring|>Construct a StorageObjectSeriesStream for a specific gcs path. Args: path: A GCS object prefix which will be the base of the objects used to communicate across the channel. storage_client: a StorageClient for accessing GCS. Returns: The constructed stream.<|endoftext|>
7f73d17e6007a5af5784e47b7f8dc908e14e62f06b2a254759f040d1c404f46f
@property def open(self): 'Whether the stream is open.' return self._open
Whether the stream is open.
lib/googlecloudsdk/api_lib/dataproc/storage_helpers.py
open
google-cloud-sdk-unofficial/google-cloud-sdk
2
python
@property def open(self): return self._open
@property def open(self): return self._open<|docstring|>Whether the stream is open.<|endoftext|>
4bcc104add5a8b3b906651708abee3aeeb771eefdd3fdaa68de7a090fcb33f71
def Close(self): 'Close the stream.' self._open = False
Close the stream.
lib/googlecloudsdk/api_lib/dataproc/storage_helpers.py
Close
google-cloud-sdk-unofficial/google-cloud-sdk
2
python
def Close(self): self._open = False
def Close(self): self._open = False<|docstring|>Close the stream.<|endoftext|>
6bcb38eea67a2c96b9c0104668478d7a6b2da9a5ce5ae4b820e98af532db5db0
def _GetObject(self, i): 'Get the ith object in the series.' path = '{0}.{1:09d}'.format(self._base_path, i) return self._gcs.GetObject(GetObjectRef(path, self._gcs.messages))
Get the ith object in the series.
lib/googlecloudsdk/api_lib/dataproc/storage_helpers.py
_GetObject
google-cloud-sdk-unofficial/google-cloud-sdk
2
python
def _GetObject(self, i): path = '{0}.{1:09d}'.format(self._base_path, i) return self._gcs.GetObject(GetObjectRef(path, self._gcs.messages))
def _GetObject(self, i): path = '{0}.{1:09d}'.format(self._base_path, i) return self._gcs.GetObject(GetObjectRef(path, self._gcs.messages))<|docstring|>Get the ith object in the series.<|endoftext|>
b3d19a33a4cd25b0c32a84b9eee95d0f514937be089c8b6d05698ea7eb5c8085
def ReadIntoWritable(self, writable, n=sys.maxsize): "Read from this stream into a writable.\n\n Reads at most n bytes, or until it sees there is not a next object in the\n series. This will block for the duration of each object's download,\n and possibly indefinitely if new objects are being added to the channel\n frequently enough.\n\n Args:\n writable: The stream-like object that implements write(<string>) to\n write into.\n n: A maximum number of bytes to read. Defaults to sys.maxsize\n (usually ~4 GB).\n\n Raises:\n ValueError: If the stream is closed or objects in the series are\n detected to shrink.\n\n Returns:\n The number of bytes read.\n " self._AssertOpen() bytes_read = 0 object_info = None max_bytes_to_read = n while (bytes_read < max_bytes_to_read): next_object_info = self._GetObject((self._current_object_index + 1)) if ((not object_info) or next_object_info): try: object_info = self._GetObject(self._current_object_index) except apitools_exceptions.HttpError as error: log.warning('Failed to fetch GCS output:\n%s', error) break if (not object_info): break new_bytes_available = (object_info.size - self._current_object_pos) if (new_bytes_available < 0): raise ValueError('Object [{0}] shrunk.'.format(object_info.name)) if (object_info.size == 0): self.Close() break bytes_left_to_read = (max_bytes_to_read - bytes_read) new_bytes_to_read = min(bytes_left_to_read, new_bytes_available) if (new_bytes_to_read > 0): download = self._gcs.BuildObjectStream(writable, object_info) download.GetRange(self._current_object_pos, ((self._current_object_pos + new_bytes_to_read) - 1)) self._current_object_pos += new_bytes_to_read bytes_read += new_bytes_to_read object_finished = (next_object_info and (self._current_object_pos == object_info.size)) if object_finished: object_info = next_object_info self._current_object_index += 1 self._current_object_pos = 0 continue else: break return bytes_read
Read from this stream into a writable. Reads at most n bytes, or until it sees there is not a next object in the series. This will block for the duration of each object's download, and possibly indefinitely if new objects are being added to the channel frequently enough. Args: writable: The stream-like object that implements write(<string>) to write into. n: A maximum number of bytes to read. Defaults to sys.maxsize (usually ~4 GB). Raises: ValueError: If the stream is closed or objects in the series are detected to shrink. Returns: The number of bytes read.
lib/googlecloudsdk/api_lib/dataproc/storage_helpers.py
ReadIntoWritable
google-cloud-sdk-unofficial/google-cloud-sdk
2
python
def ReadIntoWritable(self, writable, n=sys.maxsize): "Read from this stream into a writable.\n\n Reads at most n bytes, or until it sees there is not a next object in the\n series. This will block for the duration of each object's download,\n and possibly indefinitely if new objects are being added to the channel\n frequently enough.\n\n Args:\n writable: The stream-like object that implements write(<string>) to\n write into.\n n: A maximum number of bytes to read. Defaults to sys.maxsize\n (usually ~4 GB).\n\n Raises:\n ValueError: If the stream is closed or objects in the series are\n detected to shrink.\n\n Returns:\n The number of bytes read.\n " self._AssertOpen() bytes_read = 0 object_info = None max_bytes_to_read = n while (bytes_read < max_bytes_to_read): next_object_info = self._GetObject((self._current_object_index + 1)) if ((not object_info) or next_object_info): try: object_info = self._GetObject(self._current_object_index) except apitools_exceptions.HttpError as error: log.warning('Failed to fetch GCS output:\n%s', error) break if (not object_info): break new_bytes_available = (object_info.size - self._current_object_pos) if (new_bytes_available < 0): raise ValueError('Object [{0}] shrunk.'.format(object_info.name)) if (object_info.size == 0): self.Close() break bytes_left_to_read = (max_bytes_to_read - bytes_read) new_bytes_to_read = min(bytes_left_to_read, new_bytes_available) if (new_bytes_to_read > 0): download = self._gcs.BuildObjectStream(writable, object_info) download.GetRange(self._current_object_pos, ((self._current_object_pos + new_bytes_to_read) - 1)) self._current_object_pos += new_bytes_to_read bytes_read += new_bytes_to_read object_finished = (next_object_info and (self._current_object_pos == object_info.size)) if object_finished: object_info = next_object_info self._current_object_index += 1 self._current_object_pos = 0 continue else: break return bytes_read
def ReadIntoWritable(self, writable, n=sys.maxsize): "Read from this stream into a writable.\n\n Reads at most n bytes, or until it sees there is not a next object in the\n series. This will block for the duration of each object's download,\n and possibly indefinitely if new objects are being added to the channel\n frequently enough.\n\n Args:\n writable: The stream-like object that implements write(<string>) to\n write into.\n n: A maximum number of bytes to read. Defaults to sys.maxsize\n (usually ~4 GB).\n\n Raises:\n ValueError: If the stream is closed or objects in the series are\n detected to shrink.\n\n Returns:\n The number of bytes read.\n " self._AssertOpen() bytes_read = 0 object_info = None max_bytes_to_read = n while (bytes_read < max_bytes_to_read): next_object_info = self._GetObject((self._current_object_index + 1)) if ((not object_info) or next_object_info): try: object_info = self._GetObject(self._current_object_index) except apitools_exceptions.HttpError as error: log.warning('Failed to fetch GCS output:\n%s', error) break if (not object_info): break new_bytes_available = (object_info.size - self._current_object_pos) if (new_bytes_available < 0): raise ValueError('Object [{0}] shrunk.'.format(object_info.name)) if (object_info.size == 0): self.Close() break bytes_left_to_read = (max_bytes_to_read - bytes_read) new_bytes_to_read = min(bytes_left_to_read, new_bytes_available) if (new_bytes_to_read > 0): download = self._gcs.BuildObjectStream(writable, object_info) download.GetRange(self._current_object_pos, ((self._current_object_pos + new_bytes_to_read) - 1)) self._current_object_pos += new_bytes_to_read bytes_read += new_bytes_to_read object_finished = (next_object_info and (self._current_object_pos == object_info.size)) if object_finished: object_info = next_object_info self._current_object_index += 1 self._current_object_pos = 0 continue else: break return bytes_read<|docstring|>Read from this stream into a writable. Reads at most n bytes, or until it sees there is not a next object in the series. This will block for the duration of each object's download, and possibly indefinitely if new objects are being added to the channel frequently enough. Args: writable: The stream-like object that implements write(<string>) to write into. n: A maximum number of bytes to read. Defaults to sys.maxsize (usually ~4 GB). Raises: ValueError: If the stream is closed or objects in the series are detected to shrink. Returns: The number of bytes read.<|endoftext|>
c83e8a86d4b70bdf97166b46ba847f99e7faadb3119365eefa4da217b02cf275
@classmethod def setUpClass(cls): 'Initialize the test schema once for all tests, and disable max diff limits.' cls.maxDiff = None cls.schema = get_schema()
Initialize the test schema once for all tests, and disable max diff limits.
graphql_compiler/tests/integration_tests/test_backends_integration.py
setUpClass
LWprogramming/graphql-compiler
0
python
@classmethod def setUpClass(cls): cls.maxDiff = None cls.schema = get_schema()
@classmethod def setUpClass(cls): cls.maxDiff = None cls.schema = get_schema()<|docstring|>Initialize the test schema once for all tests, and disable max diff limits.<|endoftext|>
e41b37de05895811da5ccbdf22435609739ace772640031450348063e4e06867
def assertResultsEqual(self, graphql_query, parameters, backend_name, expected_results): 'Assert that two lists of DB results are equal, independent of order.' backend_results = self.compile_and_run_query(graphql_query, parameters, backend_name) try: self.assertListEqual(sort_db_results(expected_results), sort_db_results(backend_results)) except AssertionError as error: args = [u'Failure for backend "{}": {}'.format(backend_name, error.args[0])] args.extend(error.args[1:]) error.args = tuple(args) raise
Assert that two lists of DB results are equal, independent of order.
graphql_compiler/tests/integration_tests/test_backends_integration.py
assertResultsEqual
LWprogramming/graphql-compiler
0
python
def assertResultsEqual(self, graphql_query, parameters, backend_name, expected_results): backend_results = self.compile_and_run_query(graphql_query, parameters, backend_name) try: self.assertListEqual(sort_db_results(expected_results), sort_db_results(backend_results)) except AssertionError as error: args = [u'Failure for backend "{}": {}'.format(backend_name, error.args[0])] args.extend(error.args[1:]) error.args = tuple(args) raise
def assertResultsEqual(self, graphql_query, parameters, backend_name, expected_results): backend_results = self.compile_and_run_query(graphql_query, parameters, backend_name) try: self.assertListEqual(sort_db_results(expected_results), sort_db_results(backend_results)) except AssertionError as error: args = [u'Failure for backend "{}": {}'.format(backend_name, error.args[0])] args.extend(error.args[1:]) error.args = tuple(args) raise<|docstring|>Assert that two lists of DB results are equal, independent of order.<|endoftext|>
6882f70fcae6d231bf0b3954ec3eb89a27b656793e3888b6fe1865d134854f4f
@classmethod def compile_and_run_query(cls, graphql_query, parameters, backend_name): 'Compiles and runs the graphql query with the supplied parameters against all backends.\n\n Args:\n graphql_query: str, GraphQL query string to run against every backend.\n parameters: Dict[str, Any], input parameters to the query.\n backend_name: str, the name of the test backend to get results from.\n\n Returns:\n List[Dict[str, Any]], backend results as a list of dictionaries.\n ' if (backend_name in SQL_BACKENDS): engine = cls.sql_backend_name_to_engine[backend_name] results = compile_and_run_sql_query(cls.schema, graphql_query, parameters, engine, cls.sql_metadata) elif (backend_name in MATCH_BACKENDS): results = compile_and_run_match_query(cls.schema, graphql_query, parameters, cls.graph_client) else: raise AssertionError(u'Unknown test backend {}.'.format(backend_name)) return results
Compiles and runs the graphql query with the supplied parameters against all backends. Args: graphql_query: str, GraphQL query string to run against every backend. parameters: Dict[str, Any], input parameters to the query. backend_name: str, the name of the test backend to get results from. Returns: List[Dict[str, Any]], backend results as a list of dictionaries.
graphql_compiler/tests/integration_tests/test_backends_integration.py
compile_and_run_query
LWprogramming/graphql-compiler
0
python
@classmethod def compile_and_run_query(cls, graphql_query, parameters, backend_name): 'Compiles and runs the graphql query with the supplied parameters against all backends.\n\n Args:\n graphql_query: str, GraphQL query string to run against every backend.\n parameters: Dict[str, Any], input parameters to the query.\n backend_name: str, the name of the test backend to get results from.\n\n Returns:\n List[Dict[str, Any]], backend results as a list of dictionaries.\n ' if (backend_name in SQL_BACKENDS): engine = cls.sql_backend_name_to_engine[backend_name] results = compile_and_run_sql_query(cls.schema, graphql_query, parameters, engine, cls.sql_metadata) elif (backend_name in MATCH_BACKENDS): results = compile_and_run_match_query(cls.schema, graphql_query, parameters, cls.graph_client) else: raise AssertionError(u'Unknown test backend {}.'.format(backend_name)) return results
@classmethod def compile_and_run_query(cls, graphql_query, parameters, backend_name): 'Compiles and runs the graphql query with the supplied parameters against all backends.\n\n Args:\n graphql_query: str, GraphQL query string to run against every backend.\n parameters: Dict[str, Any], input parameters to the query.\n backend_name: str, the name of the test backend to get results from.\n\n Returns:\n List[Dict[str, Any]], backend results as a list of dictionaries.\n ' if (backend_name in SQL_BACKENDS): engine = cls.sql_backend_name_to_engine[backend_name] results = compile_and_run_sql_query(cls.schema, graphql_query, parameters, engine, cls.sql_metadata) elif (backend_name in MATCH_BACKENDS): results = compile_and_run_match_query(cls.schema, graphql_query, parameters, cls.graph_client) else: raise AssertionError(u'Unknown test backend {}.'.format(backend_name)) return results<|docstring|>Compiles and runs the graphql query with the supplied parameters against all backends. Args: graphql_query: str, GraphQL query string to run against every backend. parameters: Dict[str, Any], input parameters to the query. backend_name: str, the name of the test backend to get results from. Returns: List[Dict[str, Any]], backend results as a list of dictionaries.<|endoftext|>
c4f2e20126dde8aadde0502d08a60ef117fdb4e2f8733fc33a86d025fd608e9f
def write_instance_to_example_files(instances, tokenizer, max_seq_length, max_predictions_per_seq, output_files): 'Create TF example files from `TrainingInstance`s.' writers = [] for output_file in output_files: writers.append(tf.python_io.TFRecordWriter(output_file)) writer_index = 0 total_written = 0 for (inst_index, instance) in enumerate(instances): input_ids = tokenizer.convert_tokens_to_ids(instance.tokens) input_mask = ([1] * len(input_ids)) segment_ids = list(instance.segment_ids) assert (len(input_ids) <= max_seq_length) while (len(input_ids) < max_seq_length): input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert (len(input_ids) == max_seq_length) assert (len(input_mask) == max_seq_length) assert (len(segment_ids) == max_seq_length) masked_lm_positions = list(instance.masked_lm_positions) masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels) masked_lm_weights = ([1.0] * len(masked_lm_ids)) while (len(masked_lm_positions) < max_predictions_per_seq): masked_lm_positions.append(0) masked_lm_ids.append(0) masked_lm_weights.append(0.0) next_sentence_label = (1 if instance.is_random_next else 0) features = collections.OrderedDict() features['input_ids'] = create_int_feature(input_ids) features['input_mask'] = create_int_feature(input_mask) features['segment_ids'] = create_int_feature(segment_ids) features['masked_lm_positions'] = create_int_feature(masked_lm_positions) features['masked_lm_ids'] = create_int_feature(masked_lm_ids) features['masked_lm_weights'] = create_float_feature(masked_lm_weights) features['next_sentence_labels'] = create_int_feature([next_sentence_label]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writers[writer_index].write(tf_example.SerializeToString()) writer_index = ((writer_index + 1) % len(writers)) total_written += 1 if (inst_index < 20): tf.logging.info('*** Example ***') tf.logging.info(('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in instance.tokens]))) for feature_name in features.keys(): feature = features[feature_name] values = [] if feature.int64_list.value: values = feature.int64_list.value elif feature.float_list.value: values = feature.float_list.value tf.logging.info(('%s: %s' % (feature_name, ' '.join([str(x) for x in values])))) for writer in writers: writer.close() tf.logging.info('Wrote %d total instances', total_written)
Create TF example files from `TrainingInstance`s.
create_pretraining_data.py
write_instance_to_example_files
zhouzhanzhao123/bert
32,077
python
def write_instance_to_example_files(instances, tokenizer, max_seq_length, max_predictions_per_seq, output_files): writers = [] for output_file in output_files: writers.append(tf.python_io.TFRecordWriter(output_file)) writer_index = 0 total_written = 0 for (inst_index, instance) in enumerate(instances): input_ids = tokenizer.convert_tokens_to_ids(instance.tokens) input_mask = ([1] * len(input_ids)) segment_ids = list(instance.segment_ids) assert (len(input_ids) <= max_seq_length) while (len(input_ids) < max_seq_length): input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert (len(input_ids) == max_seq_length) assert (len(input_mask) == max_seq_length) assert (len(segment_ids) == max_seq_length) masked_lm_positions = list(instance.masked_lm_positions) masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels) masked_lm_weights = ([1.0] * len(masked_lm_ids)) while (len(masked_lm_positions) < max_predictions_per_seq): masked_lm_positions.append(0) masked_lm_ids.append(0) masked_lm_weights.append(0.0) next_sentence_label = (1 if instance.is_random_next else 0) features = collections.OrderedDict() features['input_ids'] = create_int_feature(input_ids) features['input_mask'] = create_int_feature(input_mask) features['segment_ids'] = create_int_feature(segment_ids) features['masked_lm_positions'] = create_int_feature(masked_lm_positions) features['masked_lm_ids'] = create_int_feature(masked_lm_ids) features['masked_lm_weights'] = create_float_feature(masked_lm_weights) features['next_sentence_labels'] = create_int_feature([next_sentence_label]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writers[writer_index].write(tf_example.SerializeToString()) writer_index = ((writer_index + 1) % len(writers)) total_written += 1 if (inst_index < 20): tf.logging.info('*** Example ***') tf.logging.info(('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in instance.tokens]))) for feature_name in features.keys(): feature = features[feature_name] values = [] if feature.int64_list.value: values = feature.int64_list.value elif feature.float_list.value: values = feature.float_list.value tf.logging.info(('%s: %s' % (feature_name, ' '.join([str(x) for x in values])))) for writer in writers: writer.close() tf.logging.info('Wrote %d total instances', total_written)
def write_instance_to_example_files(instances, tokenizer, max_seq_length, max_predictions_per_seq, output_files): writers = [] for output_file in output_files: writers.append(tf.python_io.TFRecordWriter(output_file)) writer_index = 0 total_written = 0 for (inst_index, instance) in enumerate(instances): input_ids = tokenizer.convert_tokens_to_ids(instance.tokens) input_mask = ([1] * len(input_ids)) segment_ids = list(instance.segment_ids) assert (len(input_ids) <= max_seq_length) while (len(input_ids) < max_seq_length): input_ids.append(0) input_mask.append(0) segment_ids.append(0) assert (len(input_ids) == max_seq_length) assert (len(input_mask) == max_seq_length) assert (len(segment_ids) == max_seq_length) masked_lm_positions = list(instance.masked_lm_positions) masked_lm_ids = tokenizer.convert_tokens_to_ids(instance.masked_lm_labels) masked_lm_weights = ([1.0] * len(masked_lm_ids)) while (len(masked_lm_positions) < max_predictions_per_seq): masked_lm_positions.append(0) masked_lm_ids.append(0) masked_lm_weights.append(0.0) next_sentence_label = (1 if instance.is_random_next else 0) features = collections.OrderedDict() features['input_ids'] = create_int_feature(input_ids) features['input_mask'] = create_int_feature(input_mask) features['segment_ids'] = create_int_feature(segment_ids) features['masked_lm_positions'] = create_int_feature(masked_lm_positions) features['masked_lm_ids'] = create_int_feature(masked_lm_ids) features['masked_lm_weights'] = create_float_feature(masked_lm_weights) features['next_sentence_labels'] = create_int_feature([next_sentence_label]) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writers[writer_index].write(tf_example.SerializeToString()) writer_index = ((writer_index + 1) % len(writers)) total_written += 1 if (inst_index < 20): tf.logging.info('*** Example ***') tf.logging.info(('tokens: %s' % ' '.join([tokenization.printable_text(x) for x in instance.tokens]))) for feature_name in features.keys(): feature = features[feature_name] values = [] if feature.int64_list.value: values = feature.int64_list.value elif feature.float_list.value: values = feature.float_list.value tf.logging.info(('%s: %s' % (feature_name, ' '.join([str(x) for x in values])))) for writer in writers: writer.close() tf.logging.info('Wrote %d total instances', total_written)<|docstring|>Create TF example files from `TrainingInstance`s.<|endoftext|>
919ff872993a20ab18be6d8a1f054cbdb57bc5e1dc34871df8011f85d0d75ae3
def create_training_instances(input_files, tokenizer, max_seq_length, dupe_factor, short_seq_prob, masked_lm_prob, max_predictions_per_seq, rng): 'Create `TrainingInstance`s from raw text.' all_documents = [[]] for input_file in input_files: with tf.gfile.GFile(input_file, 'r') as reader: while True: line = tokenization.convert_to_unicode(reader.readline()) if (not line): break line = line.strip() if (not line): all_documents.append([]) tokens = tokenizer.tokenize(line) if tokens: all_documents[(- 1)].append(tokens) all_documents = [x for x in all_documents if x] rng.shuffle(all_documents) vocab_words = list(tokenizer.vocab.keys()) instances = [] for _ in range(dupe_factor): for document_index in range(len(all_documents)): instances.extend(create_instances_from_document(all_documents, document_index, max_seq_length, short_seq_prob, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)) rng.shuffle(instances) return instances
Create `TrainingInstance`s from raw text.
create_pretraining_data.py
create_training_instances
zhouzhanzhao123/bert
32,077
python
def create_training_instances(input_files, tokenizer, max_seq_length, dupe_factor, short_seq_prob, masked_lm_prob, max_predictions_per_seq, rng): all_documents = [[]] for input_file in input_files: with tf.gfile.GFile(input_file, 'r') as reader: while True: line = tokenization.convert_to_unicode(reader.readline()) if (not line): break line = line.strip() if (not line): all_documents.append([]) tokens = tokenizer.tokenize(line) if tokens: all_documents[(- 1)].append(tokens) all_documents = [x for x in all_documents if x] rng.shuffle(all_documents) vocab_words = list(tokenizer.vocab.keys()) instances = [] for _ in range(dupe_factor): for document_index in range(len(all_documents)): instances.extend(create_instances_from_document(all_documents, document_index, max_seq_length, short_seq_prob, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)) rng.shuffle(instances) return instances
def create_training_instances(input_files, tokenizer, max_seq_length, dupe_factor, short_seq_prob, masked_lm_prob, max_predictions_per_seq, rng): all_documents = [[]] for input_file in input_files: with tf.gfile.GFile(input_file, 'r') as reader: while True: line = tokenization.convert_to_unicode(reader.readline()) if (not line): break line = line.strip() if (not line): all_documents.append([]) tokens = tokenizer.tokenize(line) if tokens: all_documents[(- 1)].append(tokens) all_documents = [x for x in all_documents if x] rng.shuffle(all_documents) vocab_words = list(tokenizer.vocab.keys()) instances = [] for _ in range(dupe_factor): for document_index in range(len(all_documents)): instances.extend(create_instances_from_document(all_documents, document_index, max_seq_length, short_seq_prob, masked_lm_prob, max_predictions_per_seq, vocab_words, rng)) rng.shuffle(instances) return instances<|docstring|>Create `TrainingInstance`s from raw text.<|endoftext|>
8c9d20cc5b6560b7b3c0cec63b937e16b305bfa1f5a301d47551b7f3f80048b2
def create_instances_from_document(all_documents, document_index, max_seq_length, short_seq_prob, masked_lm_prob, max_predictions_per_seq, vocab_words, rng): 'Creates `TrainingInstance`s for a single document.' document = all_documents[document_index] max_num_tokens = (max_seq_length - 3) target_seq_length = max_num_tokens if (rng.random() < short_seq_prob): target_seq_length = rng.randint(2, max_num_tokens) instances = [] current_chunk = [] current_length = 0 i = 0 while (i < len(document)): segment = document[i] current_chunk.append(segment) current_length += len(segment) if ((i == (len(document) - 1)) or (current_length >= target_seq_length)): if current_chunk: a_end = 1 if (len(current_chunk) >= 2): a_end = rng.randint(1, (len(current_chunk) - 1)) tokens_a = [] for j in range(a_end): tokens_a.extend(current_chunk[j]) tokens_b = [] is_random_next = False if ((len(current_chunk) == 1) or (rng.random() < 0.5)): is_random_next = True target_b_length = (target_seq_length - len(tokens_a)) for _ in range(10): random_document_index = rng.randint(0, (len(all_documents) - 1)) if (random_document_index != document_index): break random_document = all_documents[random_document_index] random_start = rng.randint(0, (len(random_document) - 1)) for j in range(random_start, len(random_document)): tokens_b.extend(random_document[j]) if (len(tokens_b) >= target_b_length): break num_unused_segments = (len(current_chunk) - a_end) i -= num_unused_segments else: is_random_next = False for j in range(a_end, len(current_chunk)): tokens_b.extend(current_chunk[j]) truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng) assert (len(tokens_a) >= 1) assert (len(tokens_b) >= 1) tokens = [] segment_ids = [] tokens.append('[CLS]') segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append('[SEP]') segment_ids.append(0) for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append('[SEP]') segment_ids.append(1) (tokens, masked_lm_positions, masked_lm_labels) = create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng) instance = TrainingInstance(tokens=tokens, segment_ids=segment_ids, is_random_next=is_random_next, masked_lm_positions=masked_lm_positions, masked_lm_labels=masked_lm_labels) instances.append(instance) current_chunk = [] current_length = 0 i += 1 return instances
Creates `TrainingInstance`s for a single document.
create_pretraining_data.py
create_instances_from_document
zhouzhanzhao123/bert
32,077
python
def create_instances_from_document(all_documents, document_index, max_seq_length, short_seq_prob, masked_lm_prob, max_predictions_per_seq, vocab_words, rng): document = all_documents[document_index] max_num_tokens = (max_seq_length - 3) target_seq_length = max_num_tokens if (rng.random() < short_seq_prob): target_seq_length = rng.randint(2, max_num_tokens) instances = [] current_chunk = [] current_length = 0 i = 0 while (i < len(document)): segment = document[i] current_chunk.append(segment) current_length += len(segment) if ((i == (len(document) - 1)) or (current_length >= target_seq_length)): if current_chunk: a_end = 1 if (len(current_chunk) >= 2): a_end = rng.randint(1, (len(current_chunk) - 1)) tokens_a = [] for j in range(a_end): tokens_a.extend(current_chunk[j]) tokens_b = [] is_random_next = False if ((len(current_chunk) == 1) or (rng.random() < 0.5)): is_random_next = True target_b_length = (target_seq_length - len(tokens_a)) for _ in range(10): random_document_index = rng.randint(0, (len(all_documents) - 1)) if (random_document_index != document_index): break random_document = all_documents[random_document_index] random_start = rng.randint(0, (len(random_document) - 1)) for j in range(random_start, len(random_document)): tokens_b.extend(random_document[j]) if (len(tokens_b) >= target_b_length): break num_unused_segments = (len(current_chunk) - a_end) i -= num_unused_segments else: is_random_next = False for j in range(a_end, len(current_chunk)): tokens_b.extend(current_chunk[j]) truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng) assert (len(tokens_a) >= 1) assert (len(tokens_b) >= 1) tokens = [] segment_ids = [] tokens.append('[CLS]') segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append('[SEP]') segment_ids.append(0) for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append('[SEP]') segment_ids.append(1) (tokens, masked_lm_positions, masked_lm_labels) = create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng) instance = TrainingInstance(tokens=tokens, segment_ids=segment_ids, is_random_next=is_random_next, masked_lm_positions=masked_lm_positions, masked_lm_labels=masked_lm_labels) instances.append(instance) current_chunk = [] current_length = 0 i += 1 return instances
def create_instances_from_document(all_documents, document_index, max_seq_length, short_seq_prob, masked_lm_prob, max_predictions_per_seq, vocab_words, rng): document = all_documents[document_index] max_num_tokens = (max_seq_length - 3) target_seq_length = max_num_tokens if (rng.random() < short_seq_prob): target_seq_length = rng.randint(2, max_num_tokens) instances = [] current_chunk = [] current_length = 0 i = 0 while (i < len(document)): segment = document[i] current_chunk.append(segment) current_length += len(segment) if ((i == (len(document) - 1)) or (current_length >= target_seq_length)): if current_chunk: a_end = 1 if (len(current_chunk) >= 2): a_end = rng.randint(1, (len(current_chunk) - 1)) tokens_a = [] for j in range(a_end): tokens_a.extend(current_chunk[j]) tokens_b = [] is_random_next = False if ((len(current_chunk) == 1) or (rng.random() < 0.5)): is_random_next = True target_b_length = (target_seq_length - len(tokens_a)) for _ in range(10): random_document_index = rng.randint(0, (len(all_documents) - 1)) if (random_document_index != document_index): break random_document = all_documents[random_document_index] random_start = rng.randint(0, (len(random_document) - 1)) for j in range(random_start, len(random_document)): tokens_b.extend(random_document[j]) if (len(tokens_b) >= target_b_length): break num_unused_segments = (len(current_chunk) - a_end) i -= num_unused_segments else: is_random_next = False for j in range(a_end, len(current_chunk)): tokens_b.extend(current_chunk[j]) truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng) assert (len(tokens_a) >= 1) assert (len(tokens_b) >= 1) tokens = [] segment_ids = [] tokens.append('[CLS]') segment_ids.append(0) for token in tokens_a: tokens.append(token) segment_ids.append(0) tokens.append('[SEP]') segment_ids.append(0) for token in tokens_b: tokens.append(token) segment_ids.append(1) tokens.append('[SEP]') segment_ids.append(1) (tokens, masked_lm_positions, masked_lm_labels) = create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng) instance = TrainingInstance(tokens=tokens, segment_ids=segment_ids, is_random_next=is_random_next, masked_lm_positions=masked_lm_positions, masked_lm_labels=masked_lm_labels) instances.append(instance) current_chunk = [] current_length = 0 i += 1 return instances<|docstring|>Creates `TrainingInstance`s for a single document.<|endoftext|>
05563ba968330225abce52375d8adca660532dc7114a15e96500b46674da8987
def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng): 'Creates the predictions for the masked LM objective.' cand_indexes = [] for (i, token) in enumerate(tokens): if ((token == '[CLS]') or (token == '[SEP]')): continue if (FLAGS.do_whole_word_mask and (len(cand_indexes) >= 1) and token.startswith('##')): cand_indexes[(- 1)].append(i) else: cand_indexes.append([i]) rng.shuffle(cand_indexes) output_tokens = list(tokens) num_to_predict = min(max_predictions_per_seq, max(1, int(round((len(tokens) * masked_lm_prob))))) masked_lms = [] covered_indexes = set() for index_set in cand_indexes: if (len(masked_lms) >= num_to_predict): break if ((len(masked_lms) + len(index_set)) > num_to_predict): continue is_any_index_covered = False for index in index_set: if (index in covered_indexes): is_any_index_covered = True break if is_any_index_covered: continue for index in index_set: covered_indexes.add(index) masked_token = None if (rng.random() < 0.8): masked_token = '[MASK]' elif (rng.random() < 0.5): masked_token = tokens[index] else: masked_token = vocab_words[rng.randint(0, (len(vocab_words) - 1))] output_tokens[index] = masked_token masked_lms.append(MaskedLmInstance(index=index, label=tokens[index])) assert (len(masked_lms) <= num_to_predict) masked_lms = sorted(masked_lms, key=(lambda x: x.index)) masked_lm_positions = [] masked_lm_labels = [] for p in masked_lms: masked_lm_positions.append(p.index) masked_lm_labels.append(p.label) return (output_tokens, masked_lm_positions, masked_lm_labels)
Creates the predictions for the masked LM objective.
create_pretraining_data.py
create_masked_lm_predictions
zhouzhanzhao123/bert
32,077
python
def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng): cand_indexes = [] for (i, token) in enumerate(tokens): if ((token == '[CLS]') or (token == '[SEP]')): continue if (FLAGS.do_whole_word_mask and (len(cand_indexes) >= 1) and token.startswith('##')): cand_indexes[(- 1)].append(i) else: cand_indexes.append([i]) rng.shuffle(cand_indexes) output_tokens = list(tokens) num_to_predict = min(max_predictions_per_seq, max(1, int(round((len(tokens) * masked_lm_prob))))) masked_lms = [] covered_indexes = set() for index_set in cand_indexes: if (len(masked_lms) >= num_to_predict): break if ((len(masked_lms) + len(index_set)) > num_to_predict): continue is_any_index_covered = False for index in index_set: if (index in covered_indexes): is_any_index_covered = True break if is_any_index_covered: continue for index in index_set: covered_indexes.add(index) masked_token = None if (rng.random() < 0.8): masked_token = '[MASK]' elif (rng.random() < 0.5): masked_token = tokens[index] else: masked_token = vocab_words[rng.randint(0, (len(vocab_words) - 1))] output_tokens[index] = masked_token masked_lms.append(MaskedLmInstance(index=index, label=tokens[index])) assert (len(masked_lms) <= num_to_predict) masked_lms = sorted(masked_lms, key=(lambda x: x.index)) masked_lm_positions = [] masked_lm_labels = [] for p in masked_lms: masked_lm_positions.append(p.index) masked_lm_labels.append(p.label) return (output_tokens, masked_lm_positions, masked_lm_labels)
def create_masked_lm_predictions(tokens, masked_lm_prob, max_predictions_per_seq, vocab_words, rng): cand_indexes = [] for (i, token) in enumerate(tokens): if ((token == '[CLS]') or (token == '[SEP]')): continue if (FLAGS.do_whole_word_mask and (len(cand_indexes) >= 1) and token.startswith('##')): cand_indexes[(- 1)].append(i) else: cand_indexes.append([i]) rng.shuffle(cand_indexes) output_tokens = list(tokens) num_to_predict = min(max_predictions_per_seq, max(1, int(round((len(tokens) * masked_lm_prob))))) masked_lms = [] covered_indexes = set() for index_set in cand_indexes: if (len(masked_lms) >= num_to_predict): break if ((len(masked_lms) + len(index_set)) > num_to_predict): continue is_any_index_covered = False for index in index_set: if (index in covered_indexes): is_any_index_covered = True break if is_any_index_covered: continue for index in index_set: covered_indexes.add(index) masked_token = None if (rng.random() < 0.8): masked_token = '[MASK]' elif (rng.random() < 0.5): masked_token = tokens[index] else: masked_token = vocab_words[rng.randint(0, (len(vocab_words) - 1))] output_tokens[index] = masked_token masked_lms.append(MaskedLmInstance(index=index, label=tokens[index])) assert (len(masked_lms) <= num_to_predict) masked_lms = sorted(masked_lms, key=(lambda x: x.index)) masked_lm_positions = [] masked_lm_labels = [] for p in masked_lms: masked_lm_positions.append(p.index) masked_lm_labels.append(p.label) return (output_tokens, masked_lm_positions, masked_lm_labels)<|docstring|>Creates the predictions for the masked LM objective.<|endoftext|>
cce5f951b2e8216c9ff5e27a7a75fbdb30c82292d30c5b4f7227e9b637d97537
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng): 'Truncates a pair of sequences to a maximum sequence length.' while True: total_length = (len(tokens_a) + len(tokens_b)) if (total_length <= max_num_tokens): break trunc_tokens = (tokens_a if (len(tokens_a) > len(tokens_b)) else tokens_b) assert (len(trunc_tokens) >= 1) if (rng.random() < 0.5): del trunc_tokens[0] else: trunc_tokens.pop()
Truncates a pair of sequences to a maximum sequence length.
create_pretraining_data.py
truncate_seq_pair
zhouzhanzhao123/bert
32,077
python
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng): while True: total_length = (len(tokens_a) + len(tokens_b)) if (total_length <= max_num_tokens): break trunc_tokens = (tokens_a if (len(tokens_a) > len(tokens_b)) else tokens_b) assert (len(trunc_tokens) >= 1) if (rng.random() < 0.5): del trunc_tokens[0] else: trunc_tokens.pop()
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng): while True: total_length = (len(tokens_a) + len(tokens_b)) if (total_length <= max_num_tokens): break trunc_tokens = (tokens_a if (len(tokens_a) > len(tokens_b)) else tokens_b) assert (len(trunc_tokens) >= 1) if (rng.random() < 0.5): del trunc_tokens[0] else: trunc_tokens.pop()<|docstring|>Truncates a pair of sequences to a maximum sequence length.<|endoftext|>
10f20db247413f4c041de70b96784680abecba8c94aedbae3556018c31a4db68
def Add(self, exportLinetypeKey, exportLinetypeInfo): '\n Add(self: ExportLinetypeTable,exportLinetypeKey: ExportLinetypeKey,exportLinetypeInfo: ExportLinetypeInfo)\n\n Inserts a (key,info) pair into Export line type table.\n\n \n\n exportLinetypeKey: The export line type Key to be added.\n\n exportLinetypeInfo: The export line type info to be added.\n ' pass
Add(self: ExportLinetypeTable,exportLinetypeKey: ExportLinetypeKey,exportLinetypeInfo: ExportLinetypeInfo) Inserts a (key,info) pair into Export line type table. exportLinetypeKey: The export line type Key to be added. exportLinetypeInfo: The export line type info to be added.
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExportLinetypeTable.py
Add
htlcnn/ironpython-stubs
182
python
def Add(self, exportLinetypeKey, exportLinetypeInfo): '\n Add(self: ExportLinetypeTable,exportLinetypeKey: ExportLinetypeKey,exportLinetypeInfo: ExportLinetypeInfo)\n\n Inserts a (key,info) pair into Export line type table.\n\n \n\n exportLinetypeKey: The export line type Key to be added.\n\n exportLinetypeInfo: The export line type info to be added.\n ' pass
def Add(self, exportLinetypeKey, exportLinetypeInfo): '\n Add(self: ExportLinetypeTable,exportLinetypeKey: ExportLinetypeKey,exportLinetypeInfo: ExportLinetypeInfo)\n\n Inserts a (key,info) pair into Export line type table.\n\n \n\n exportLinetypeKey: The export line type Key to be added.\n\n exportLinetypeInfo: The export line type info to be added.\n ' pass<|docstring|>Add(self: ExportLinetypeTable,exportLinetypeKey: ExportLinetypeKey,exportLinetypeInfo: ExportLinetypeInfo) Inserts a (key,info) pair into Export line type table. exportLinetypeKey: The export line type Key to be added. exportLinetypeInfo: The export line type info to be added.<|endoftext|>
f731f983719e6748939e3798db77f3ea6c36a803d32461a4d08e3aa4aaba8edf
def Clear(self): '\n Clear(self: ExportLinetypeTable)\n\n Removes all contents stored in Export line type table.\n ' pass
Clear(self: ExportLinetypeTable) Removes all contents stored in Export line type table.
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExportLinetypeTable.py
Clear
htlcnn/ironpython-stubs
182
python
def Clear(self): '\n Clear(self: ExportLinetypeTable)\n\n Removes all contents stored in Export line type table.\n ' pass
def Clear(self): '\n Clear(self: ExportLinetypeTable)\n\n Removes all contents stored in Export line type table.\n ' pass<|docstring|>Clear(self: ExportLinetypeTable) Removes all contents stored in Export line type table.<|endoftext|>
92d2346461d1bd2619eeebbbaefdbf04a0db2b9820e1b649bbd12e005a4aaba0
def ContainsKey(self, exportLinetypeKey): '\n ContainsKey(self: ExportLinetypeTable,exportLinetypeKey: ExportLinetypeKey) -> bool\n\n \n\n Checks whether a pattern key exists in the table.\n\n \n\n exportLinetypeKey: The export line type key.\n\n Returns: True if the line type exists in the table.\n ' pass
ContainsKey(self: ExportLinetypeTable,exportLinetypeKey: ExportLinetypeKey) -> bool Checks whether a pattern key exists in the table. exportLinetypeKey: The export line type key. Returns: True if the line type exists in the table.
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExportLinetypeTable.py
ContainsKey
htlcnn/ironpython-stubs
182
python
def ContainsKey(self, exportLinetypeKey): '\n ContainsKey(self: ExportLinetypeTable,exportLinetypeKey: ExportLinetypeKey) -> bool\n\n \n\n Checks whether a pattern key exists in the table.\n\n \n\n exportLinetypeKey: The export line type key.\n\n Returns: True if the line type exists in the table.\n ' pass
def ContainsKey(self, exportLinetypeKey): '\n ContainsKey(self: ExportLinetypeTable,exportLinetypeKey: ExportLinetypeKey) -> bool\n\n \n\n Checks whether a pattern key exists in the table.\n\n \n\n exportLinetypeKey: The export line type key.\n\n Returns: True if the line type exists in the table.\n ' pass<|docstring|>ContainsKey(self: ExportLinetypeTable,exportLinetypeKey: ExportLinetypeKey) -> bool Checks whether a pattern key exists in the table. exportLinetypeKey: The export line type key. Returns: True if the line type exists in the table.<|endoftext|>
a9148a4bd644a1d309ea6198090d96dab928c924157b3688a5d68cd27f1a6093
def Dispose(self): ' Dispose(self: ExportLinetypeTable) ' pass
Dispose(self: ExportLinetypeTable)
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExportLinetypeTable.py
Dispose
htlcnn/ironpython-stubs
182
python
def Dispose(self): ' ' pass
def Dispose(self): ' ' pass<|docstring|>Dispose(self: ExportLinetypeTable)<|endoftext|>
6a5304b8cb2c5510ab76e21568991940c9a2e2845c294f90510f201d105e2156
def GetEnumerator(self): '\n GetEnumerator(self: ExportLinetypeTable) -> IEnumerator[KeyValuePair[ExportLinetypeKey,ExportLinetypeInfo]]\n\n \n\n Returns an enumerator that iterates through a collection.\n\n Returns: An IEnumerator object that can be used to iterate through the collection.\n ' pass
GetEnumerator(self: ExportLinetypeTable) -> IEnumerator[KeyValuePair[ExportLinetypeKey,ExportLinetypeInfo]] Returns an enumerator that iterates through a collection. Returns: An IEnumerator object that can be used to iterate through the collection.
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExportLinetypeTable.py
GetEnumerator
htlcnn/ironpython-stubs
182
python
def GetEnumerator(self): '\n GetEnumerator(self: ExportLinetypeTable) -> IEnumerator[KeyValuePair[ExportLinetypeKey,ExportLinetypeInfo]]\n\n \n\n Returns an enumerator that iterates through a collection.\n\n Returns: An IEnumerator object that can be used to iterate through the collection.\n ' pass
def GetEnumerator(self): '\n GetEnumerator(self: ExportLinetypeTable) -> IEnumerator[KeyValuePair[ExportLinetypeKey,ExportLinetypeInfo]]\n\n \n\n Returns an enumerator that iterates through a collection.\n\n Returns: An IEnumerator object that can be used to iterate through the collection.\n ' pass<|docstring|>GetEnumerator(self: ExportLinetypeTable) -> IEnumerator[KeyValuePair[ExportLinetypeKey,ExportLinetypeInfo]] Returns an enumerator that iterates through a collection. Returns: An IEnumerator object that can be used to iterate through the collection.<|endoftext|>
67c1bba99071edcc12675758e174e0c3ce67806c711485b4711baafda71f8532
def GetExportLinetypeInfo(self, exportLinetypeKey): '\n GetExportLinetypeInfo(self: ExportLinetypeTable,exportLinetypeKey: ExportLinetypeKey) -> ExportLinetypeInfo\n\n \n\n Gets a copy of the ExportLinetypeInfo corresponding to the given \n\n ExportLinetypeKey.\n\n \n\n \n\n exportLinetypeKey: The export line type Key.\n\n Returns: Returns the line type info for this key.\n ' pass
GetExportLinetypeInfo(self: ExportLinetypeTable,exportLinetypeKey: ExportLinetypeKey) -> ExportLinetypeInfo Gets a copy of the ExportLinetypeInfo corresponding to the given ExportLinetypeKey. exportLinetypeKey: The export line type Key. Returns: Returns the line type info for this key.
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExportLinetypeTable.py
GetExportLinetypeInfo
htlcnn/ironpython-stubs
182
python
def GetExportLinetypeInfo(self, exportLinetypeKey): '\n GetExportLinetypeInfo(self: ExportLinetypeTable,exportLinetypeKey: ExportLinetypeKey) -> ExportLinetypeInfo\n\n \n\n Gets a copy of the ExportLinetypeInfo corresponding to the given \n\n ExportLinetypeKey.\n\n \n\n \n\n exportLinetypeKey: The export line type Key.\n\n Returns: Returns the line type info for this key.\n ' pass
def GetExportLinetypeInfo(self, exportLinetypeKey): '\n GetExportLinetypeInfo(self: ExportLinetypeTable,exportLinetypeKey: ExportLinetypeKey) -> ExportLinetypeInfo\n\n \n\n Gets a copy of the ExportLinetypeInfo corresponding to the given \n\n ExportLinetypeKey.\n\n \n\n \n\n exportLinetypeKey: The export line type Key.\n\n Returns: Returns the line type info for this key.\n ' pass<|docstring|>GetExportLinetypeInfo(self: ExportLinetypeTable,exportLinetypeKey: ExportLinetypeKey) -> ExportLinetypeInfo Gets a copy of the ExportLinetypeInfo corresponding to the given ExportLinetypeKey. exportLinetypeKey: The export line type Key. Returns: Returns the line type info for this key.<|endoftext|>
03d16965e144ce96997901ecef32a629065e6290b7f33d7e9b2bd1ede930eeb7
def GetKeys(self): '\n GetKeys(self: ExportLinetypeTable) -> IList[ExportLinetypeKey]\n\n \n\n Gets all the keys stored in the map.\n\n Returns: The keys.\n ' pass
GetKeys(self: ExportLinetypeTable) -> IList[ExportLinetypeKey] Gets all the keys stored in the map. Returns: The keys.
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExportLinetypeTable.py
GetKeys
htlcnn/ironpython-stubs
182
python
def GetKeys(self): '\n GetKeys(self: ExportLinetypeTable) -> IList[ExportLinetypeKey]\n\n \n\n Gets all the keys stored in the map.\n\n Returns: The keys.\n ' pass
def GetKeys(self): '\n GetKeys(self: ExportLinetypeTable) -> IList[ExportLinetypeKey]\n\n \n\n Gets all the keys stored in the map.\n\n Returns: The keys.\n ' pass<|docstring|>GetKeys(self: ExportLinetypeTable) -> IList[ExportLinetypeKey] Gets all the keys stored in the map. Returns: The keys.<|endoftext|>
1c91ff5bf52d89f6860c7689fe6fee4f5eb176710fa144e253198281370a4b64
def GetLinetypeTableIterator(self): '\n GetLinetypeTableIterator(self: ExportLinetypeTable) -> ExportLinetypeTableIterator\n\n \n\n Returns a ExportLinetypeTableIterator that iterates through the collection.\n\n Returns: A ExportLinetypeTableIterator object that can be used to iterate through \n\n key-value pairs in the collection.\n ' pass
GetLinetypeTableIterator(self: ExportLinetypeTable) -> ExportLinetypeTableIterator Returns a ExportLinetypeTableIterator that iterates through the collection. Returns: A ExportLinetypeTableIterator object that can be used to iterate through key-value pairs in the collection.
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExportLinetypeTable.py
GetLinetypeTableIterator
htlcnn/ironpython-stubs
182
python
def GetLinetypeTableIterator(self): '\n GetLinetypeTableIterator(self: ExportLinetypeTable) -> ExportLinetypeTableIterator\n\n \n\n Returns a ExportLinetypeTableIterator that iterates through the collection.\n\n Returns: A ExportLinetypeTableIterator object that can be used to iterate through \n\n key-value pairs in the collection.\n ' pass
def GetLinetypeTableIterator(self): '\n GetLinetypeTableIterator(self: ExportLinetypeTable) -> ExportLinetypeTableIterator\n\n \n\n Returns a ExportLinetypeTableIterator that iterates through the collection.\n\n Returns: A ExportLinetypeTableIterator object that can be used to iterate through \n\n key-value pairs in the collection.\n ' pass<|docstring|>GetLinetypeTableIterator(self: ExportLinetypeTable) -> ExportLinetypeTableIterator Returns a ExportLinetypeTableIterator that iterates through the collection. Returns: A ExportLinetypeTableIterator object that can be used to iterate through key-value pairs in the collection.<|endoftext|>
ffa77950cb1b25e14d454b5e03839ae09913ab64ccd603875597947a929d4f76
def GetValues(self): '\n GetValues(self: ExportLinetypeTable) -> IList[ExportLinetypeInfo]\n\n \n\n Returns all the values stored in the map.\n\n Returns: The info.\n ' pass
GetValues(self: ExportLinetypeTable) -> IList[ExportLinetypeInfo] Returns all the values stored in the map. Returns: The info.
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExportLinetypeTable.py
GetValues
htlcnn/ironpython-stubs
182
python
def GetValues(self): '\n GetValues(self: ExportLinetypeTable) -> IList[ExportLinetypeInfo]\n\n \n\n Returns all the values stored in the map.\n\n Returns: The info.\n ' pass
def GetValues(self): '\n GetValues(self: ExportLinetypeTable) -> IList[ExportLinetypeInfo]\n\n \n\n Returns all the values stored in the map.\n\n Returns: The info.\n ' pass<|docstring|>GetValues(self: ExportLinetypeTable) -> IList[ExportLinetypeInfo] Returns all the values stored in the map. Returns: The info.<|endoftext|>
b41d3bf6862ad4936723a2e5bd41e7d9b7e0ce6df2e40bdb82fb670d953d445c
def ReleaseUnmanagedResources(self, *args): ' ReleaseUnmanagedResources(self: ExportLinetypeTable,disposing: bool) ' pass
ReleaseUnmanagedResources(self: ExportLinetypeTable,disposing: bool)
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExportLinetypeTable.py
ReleaseUnmanagedResources
htlcnn/ironpython-stubs
182
python
def ReleaseUnmanagedResources(self, *args): ' ' pass
def ReleaseUnmanagedResources(self, *args): ' ' pass<|docstring|>ReleaseUnmanagedResources(self: ExportLinetypeTable,disposing: bool)<|endoftext|>
570967465ea53ef537331519fe0319762dd00740ae551e1694235cac40de952e
def Remove(self, exportLinetypeKey): '\n Remove(self: ExportLinetypeTable,exportLinetypeKey: ExportLinetypeKey)\n\n Removes the pair (key,info) corresponding to the given ExportLinetypeKey.\n\n \n\n exportLinetypeKey: The export line type key\n ' pass
Remove(self: ExportLinetypeTable,exportLinetypeKey: ExportLinetypeKey) Removes the pair (key,info) corresponding to the given ExportLinetypeKey. exportLinetypeKey: The export line type key
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExportLinetypeTable.py
Remove
htlcnn/ironpython-stubs
182
python
def Remove(self, exportLinetypeKey): '\n Remove(self: ExportLinetypeTable,exportLinetypeKey: ExportLinetypeKey)\n\n Removes the pair (key,info) corresponding to the given ExportLinetypeKey.\n\n \n\n exportLinetypeKey: The export line type key\n ' pass
def Remove(self, exportLinetypeKey): '\n Remove(self: ExportLinetypeTable,exportLinetypeKey: ExportLinetypeKey)\n\n Removes the pair (key,info) corresponding to the given ExportLinetypeKey.\n\n \n\n exportLinetypeKey: The export line type key\n ' pass<|docstring|>Remove(self: ExportLinetypeTable,exportLinetypeKey: ExportLinetypeKey) Removes the pair (key,info) corresponding to the given ExportLinetypeKey. exportLinetypeKey: The export line type key<|endoftext|>
b944abb12e70fbd666b49a4246473e60dd44baa16514de42cd68f71a936dc1f3
def __add__(self, *args): ' x.__add__(y) <==> x+y ' pass
x.__add__(y) <==> x+y
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExportLinetypeTable.py
__add__
htlcnn/ironpython-stubs
182
python
def __add__(self, *args): ' ' pass
def __add__(self, *args): ' ' pass<|docstring|>x.__add__(y) <==> x+y<|endoftext|>
9a4e38de8322f87c935eebff2e9b70d3f9170dda410d4c00c631430c4b83cdae
def __contains__(self, *args): ' __contains__[KeyValuePair[ExportLinetypeKey,ExportLinetypeInfo]](enumerable: IEnumerable[KeyValuePair[ExportLinetypeKey,ExportLinetypeInfo]],value: KeyValuePair[ExportLinetypeKey,ExportLinetypeInfo]) -> bool ' pass
__contains__[KeyValuePair[ExportLinetypeKey,ExportLinetypeInfo]](enumerable: IEnumerable[KeyValuePair[ExportLinetypeKey,ExportLinetypeInfo]],value: KeyValuePair[ExportLinetypeKey,ExportLinetypeInfo]) -> bool
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExportLinetypeTable.py
__contains__
htlcnn/ironpython-stubs
182
python
def __contains__(self, *args): ' ' pass
def __contains__(self, *args): ' ' pass<|docstring|>__contains__[KeyValuePair[ExportLinetypeKey,ExportLinetypeInfo]](enumerable: IEnumerable[KeyValuePair[ExportLinetypeKey,ExportLinetypeInfo]],value: KeyValuePair[ExportLinetypeKey,ExportLinetypeInfo]) -> bool<|endoftext|>
91dccc0e30eae0de7422b530148c81d58465353736df52af7f5166947963de51
def __enter__(self, *args): ' __enter__(self: IDisposable) -> object ' pass
__enter__(self: IDisposable) -> object
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExportLinetypeTable.py
__enter__
htlcnn/ironpython-stubs
182
python
def __enter__(self, *args): ' ' pass
def __enter__(self, *args): ' ' pass<|docstring|>__enter__(self: IDisposable) -> object<|endoftext|>
c8943e5ece59897a88105cce884ed30ac7b28eececb0afe96eb7d0274b80284a
def __exit__(self, *args): ' __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) ' pass
__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExportLinetypeTable.py
__exit__
htlcnn/ironpython-stubs
182
python
def __exit__(self, *args): ' ' pass
def __exit__(self, *args): ' ' pass<|docstring|>__exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object)<|endoftext|>
66a55fa3e1972659026ecc5d95aa71c13251168d9deedbe2a57ac24432e95b57
def __getitem__(self, *args): ' x.__getitem__(y) <==> x[y] ' pass
x.__getitem__(y) <==> x[y]
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExportLinetypeTable.py
__getitem__
htlcnn/ironpython-stubs
182
python
def __getitem__(self, *args): ' ' pass
def __getitem__(self, *args): ' ' pass<|docstring|>x.__getitem__(y) <==> x[y]<|endoftext|>
32b5271afcd5ecc37febb67dd854fa2d1b2c4c68b2c41d2ec119d33157e9bbaa
def __init__(self, *args): ' x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature ' pass
x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExportLinetypeTable.py
__init__
htlcnn/ironpython-stubs
182
python
def __init__(self, *args): ' ' pass
def __init__(self, *args): ' ' pass<|docstring|>x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature<|endoftext|>
4f39caa98ded7dabe727a1bb58c2b211df42710089574e89c4cc4c9aeff25649
def __iter__(self, *args): ' __iter__(self: IEnumerable) -> object ' pass
__iter__(self: IEnumerable) -> object
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExportLinetypeTable.py
__iter__
htlcnn/ironpython-stubs
182
python
def __iter__(self, *args): ' ' pass
def __iter__(self, *args): ' ' pass<|docstring|>__iter__(self: IEnumerable) -> object<|endoftext|>
746ad82e56350ce0365d6c7705d304bec5ee6fa3615ee6dcbcff4ffb0e8e54d3
def __repr__(self, *args): ' __repr__(self: object) -> str ' pass
__repr__(self: object) -> str
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExportLinetypeTable.py
__repr__
htlcnn/ironpython-stubs
182
python
def __repr__(self, *args): ' ' pass
def __repr__(self, *args): ' ' pass<|docstring|>__repr__(self: object) -> str<|endoftext|>
c20330807bff72a1be2fbb5ba33391f524a4100c47ea96b7bf33de14ba63011d
def __setitem__(self, *args): ' x.__setitem__(i,y) <==> x[i]= ' pass
x.__setitem__(i,y) <==> x[i]=
release/stubs.min/Autodesk/Revit/DB/__init___parts/ExportLinetypeTable.py
__setitem__
htlcnn/ironpython-stubs
182
python
def __setitem__(self, *args): ' ' pass
def __setitem__(self, *args): ' ' pass<|docstring|>x.__setitem__(i,y) <==> x[i]=<|endoftext|>
bfa5ec28aefd33b61f8d71ffa7b41580a5139d2d4bbb0b82855e6227edbfe16b
def get(self): '\n Answer to GET requests.\n\n Redirects to search view.\n ' self.redirect('search', status=301)
Answer to GET requests. Redirects to search view.
rubberband/handlers/fe/compare.py
get
ambros-gleixner/rubberband
4
python
def get(self): '\n Answer to GET requests.\n\n Redirects to search view.\n ' self.redirect('search', status=301)
def get(self): '\n Answer to GET requests.\n\n Redirects to search view.\n ' self.redirect('search', status=301)<|docstring|>Answer to GET requests. Redirects to search view.<|endoftext|>
9f3a849750a580c804ee355924273485de9be94cbab8ba574b904c864c5be4c7
def post(self): '\n Answer to POST requests.\n\n Gets called when the user clicks on "change comparison base".\n Organizes instances and constructs url.\n Redirects to CompareView.get().\n ' compares = list(self.request.arguments.keys()) if ('_xsrf' in compares): compares.remove('_xsrf') if ('compare' in compares): compares.remove('compare') base = self.get_argument('base', None) if ((base is not None) and (len(compares) == 1)): raise HTTPError(status_code=400, msg='Please select at least 1 Testrun to compare to.') elif ((base is None) and (len(compares) <= 1)): raise HTTPError(status_code=400, log_message='Please select at least 2 Testruns to compare.') if base: compares.remove('base') if (base in compares): compares.remove(base) else: base = compares.pop(0) cmp_string = ','.join(compares) next_url = '{}/result/{}?compare={}'.format(self.application.base_url, base, cmp_string) self.redirect(next_url)
Answer to POST requests. Gets called when the user clicks on "change comparison base". Organizes instances and constructs url. Redirects to CompareView.get().
rubberband/handlers/fe/compare.py
post
ambros-gleixner/rubberband
4
python
def post(self): '\n Answer to POST requests.\n\n Gets called when the user clicks on "change comparison base".\n Organizes instances and constructs url.\n Redirects to CompareView.get().\n ' compares = list(self.request.arguments.keys()) if ('_xsrf' in compares): compares.remove('_xsrf') if ('compare' in compares): compares.remove('compare') base = self.get_argument('base', None) if ((base is not None) and (len(compares) == 1)): raise HTTPError(status_code=400, msg='Please select at least 1 Testrun to compare to.') elif ((base is None) and (len(compares) <= 1)): raise HTTPError(status_code=400, log_message='Please select at least 2 Testruns to compare.') if base: compares.remove('base') if (base in compares): compares.remove(base) else: base = compares.pop(0) cmp_string = ','.join(compares) next_url = '{}/result/{}?compare={}'.format(self.application.base_url, base, cmp_string) self.redirect(next_url)
def post(self): '\n Answer to POST requests.\n\n Gets called when the user clicks on "change comparison base".\n Organizes instances and constructs url.\n Redirects to CompareView.get().\n ' compares = list(self.request.arguments.keys()) if ('_xsrf' in compares): compares.remove('_xsrf') if ('compare' in compares): compares.remove('compare') base = self.get_argument('base', None) if ((base is not None) and (len(compares) == 1)): raise HTTPError(status_code=400, msg='Please select at least 1 Testrun to compare to.') elif ((base is None) and (len(compares) <= 1)): raise HTTPError(status_code=400, log_message='Please select at least 2 Testruns to compare.') if base: compares.remove('base') if (base in compares): compares.remove(base) else: base = compares.pop(0) cmp_string = ','.join(compares) next_url = '{}/result/{}?compare={}'.format(self.application.base_url, base, cmp_string) self.redirect(next_url)<|docstring|>Answer to POST requests. Gets called when the user clicks on "change comparison base". Organizes instances and constructs url. Redirects to CompareView.get().<|endoftext|>
fe08f5df93266120829cded35075a0f023adeb5075fea372ee1ef4cec6528c11
def __init__(self, parent: 'RainCloudy', controller_id: str, index: int, faucets: (list[dict[(str, Any)]] | None)=None): '\n Initialize RainCloudy Controller object.\n\n :param parent: RainCloudy parent object\n :param controller_id: Control Unit ID\n :param valve_id: Value Unit ID assigned controller\n :type parent: RainCloudy object\n :type controller_id: string\n :type valve_id: string\n :return: RainCloudyController object\n :rtype: RainCloudyController object\n ' self._parent = parent self.home = parent.html['home'] self._controller_id = controller_id self.index = index self.attributes: dict[(str, Any)] = {} self._faucets = self._create_faucets(faucets)
Initialize RainCloudy Controller object. :param parent: RainCloudy parent object :param controller_id: Control Unit ID :param valve_id: Value Unit ID assigned controller :type parent: RainCloudy object :type controller_id: string :type valve_id: string :return: RainCloudyController object :rtype: RainCloudyController object
raincloudy/aio/controller.py
__init__
tchellomello/raincloudy
16
python
def __init__(self, parent: 'RainCloudy', controller_id: str, index: int, faucets: (list[dict[(str, Any)]] | None)=None): '\n Initialize RainCloudy Controller object.\n\n :param parent: RainCloudy parent object\n :param controller_id: Control Unit ID\n :param valve_id: Value Unit ID assigned controller\n :type parent: RainCloudy object\n :type controller_id: string\n :type valve_id: string\n :return: RainCloudyController object\n :rtype: RainCloudyController object\n ' self._parent = parent self.home = parent.html['home'] self._controller_id = controller_id self.index = index self.attributes: dict[(str, Any)] = {} self._faucets = self._create_faucets(faucets)
def __init__(self, parent: 'RainCloudy', controller_id: str, index: int, faucets: (list[dict[(str, Any)]] | None)=None): '\n Initialize RainCloudy Controller object.\n\n :param parent: RainCloudy parent object\n :param controller_id: Control Unit ID\n :param valve_id: Value Unit ID assigned controller\n :type parent: RainCloudy object\n :type controller_id: string\n :type valve_id: string\n :return: RainCloudyController object\n :rtype: RainCloudyController object\n ' self._parent = parent self.home = parent.html['home'] self._controller_id = controller_id self.index = index self.attributes: dict[(str, Any)] = {} self._faucets = self._create_faucets(faucets)<|docstring|>Initialize RainCloudy Controller object. :param parent: RainCloudy parent object :param controller_id: Control Unit ID :param valve_id: Value Unit ID assigned controller :type parent: RainCloudy object :type controller_id: string :type valve_id: string :return: RainCloudyController object :rtype: RainCloudyController object<|endoftext|>
461c4803ee9e56fd77f6cbbd7b8b2fe163bd1c45032b4f9c0e3d4d2a1b4f20a2
def _create_faucets(self, faucets: (list[dict] | None)) -> list[RainCloudyFaucetCore]: 'Assign RainCloudyFaucet objects to self._faucets.' if (not faucets): raise TypeError('Controller does not have a faucet assigned.') return [RainCloudyFaucet(self._parent, self, faucet['serial'], index, faucet['zones']) for (index, faucet) in enumerate(faucets)]
Assign RainCloudyFaucet objects to self._faucets.
raincloudy/aio/controller.py
_create_faucets
tchellomello/raincloudy
16
python
def _create_faucets(self, faucets: (list[dict] | None)) -> list[RainCloudyFaucetCore]: if (not faucets): raise TypeError('Controller does not have a faucet assigned.') return [RainCloudyFaucet(self._parent, self, faucet['serial'], index, faucet['zones']) for (index, faucet) in enumerate(faucets)]
def _create_faucets(self, faucets: (list[dict] | None)) -> list[RainCloudyFaucetCore]: if (not faucets): raise TypeError('Controller does not have a faucet assigned.') return [RainCloudyFaucet(self._parent, self, faucet['serial'], index, faucet['zones']) for (index, faucet) in enumerate(faucets)]<|docstring|>Assign RainCloudyFaucet objects to self._faucets.<|endoftext|>
83362c1afd1cdcc97f73593dedc95398732b0779347339a5c4681cca817a3e04
def __repr__(self) -> str: 'Object representation.' try: return f'<{self.__class__.__name__}: {self.name}>' except AttributeError: return f'<{self.__class__.__name__}: {self.id}>'
Object representation.
raincloudy/aio/controller.py
__repr__
tchellomello/raincloudy
16
python
def __repr__(self) -> str: try: return f'<{self.__class__.__name__}: {self.name}>' except AttributeError: return f'<{self.__class__.__name__}: {self.id}>'
def __repr__(self) -> str: try: return f'<{self.__class__.__name__}: {self.name}>' except AttributeError: return f'<{self.__class__.__name__}: {self.id}>'<|docstring|>Object representation.<|endoftext|>
8dca6fa079374798a598d5c022f17f51949d06750faac38bdb34ed2a3faefa25
async def update(self) -> None: 'Call 1 method to update zone attributes.' (await asyncio.gather(*[faucet.update() for faucet in self._faucets]))
Call 1 method to update zone attributes.
raincloudy/aio/controller.py
update
tchellomello/raincloudy
16
python
async def update(self) -> None: (await asyncio.gather(*[faucet.update() for faucet in self._faucets]))
async def update(self) -> None: (await asyncio.gather(*[faucet.update() for faucet in self._faucets]))<|docstring|>Call 1 method to update zone attributes.<|endoftext|>
101f7c83382f5aa03a58be14e6a40fb1acc52420549bf724807462cfa91556c1
@property def serial(self) -> str: 'Return controller id.' return self._controller_id
Return controller id.
raincloudy/aio/controller.py
serial
tchellomello/raincloudy
16
python
@property def serial(self) -> str: return self._controller_id
@property def serial(self) -> str: return self._controller_id<|docstring|>Return controller id.<|endoftext|>
315e2f77d973bdb802a3d7a2476f2a90f5c0dd279e307daae349bf38b448cb17
@property def id(self) -> str: 'Return controller id.' return self._controller_id
Return controller id.
raincloudy/aio/controller.py
id
tchellomello/raincloudy
16
python
@property def id(self) -> str: return self._controller_id
@property def id(self) -> str: return self._controller_id<|docstring|>Return controller id.<|endoftext|>
8da1ac37a5fab46b945b459d910941538fcb616b82dba3bc101faf5377905fe5
@property def name(self) -> (str | None): 'Return controller name.' return find_controller_or_faucet_name(self._parent.html['home'], 'controller', self.index)
Return controller name.
raincloudy/aio/controller.py
name
tchellomello/raincloudy
16
python
@property def name(self) -> (str | None): return find_controller_or_faucet_name(self._parent.html['home'], 'controller', self.index)
@property def name(self) -> (str | None): return find_controller_or_faucet_name(self._parent.html['home'], 'controller', self.index)<|docstring|>Return controller name.<|endoftext|>
f8efdc74fda5a56bf989be9d3e2b9c9dc00ebfbafcc07a4fb7592c0dc93614fb
async def update_name(self, value) -> None: 'Set a new name to controller.' data = {'select_controller': self.index, '_set_controller_name': 'Set Name', 'controller_name': value} (await self._parent.post(data, url=SETUP_ENDPOINT, referer=SETUP_ENDPOINT))
Set a new name to controller.
raincloudy/aio/controller.py
update_name
tchellomello/raincloudy
16
python
async def update_name(self, value) -> None: data = {'select_controller': self.index, '_set_controller_name': 'Set Name', 'controller_name': value} (await self._parent.post(data, url=SETUP_ENDPOINT, referer=SETUP_ENDPOINT))
async def update_name(self, value) -> None: data = {'select_controller': self.index, '_set_controller_name': 'Set Name', 'controller_name': value} (await self._parent.post(data, url=SETUP_ENDPOINT, referer=SETUP_ENDPOINT))<|docstring|>Set a new name to controller.<|endoftext|>
e3231806b9f7f2f72026f498a5e6fb516d6a8f6f5596863bca46fcfbb67bc1d3
@property def status(self) -> str: 'Return controller status.' return self.attributes['controller_status']
Return controller status.
raincloudy/aio/controller.py
status
tchellomello/raincloudy
16
python
@property def status(self) -> str: return self.attributes['controller_status']
@property def status(self) -> str: return self.attributes['controller_status']<|docstring|>Return controller status.<|endoftext|>
7ab8dfd0b5f5ea3e8ff3389309e12e6ebbf2ef3db9f4e9d994bc47008fd1078b
@property def current_time(self) -> str: 'Return controller current time.' return self.attributes['current_time']
Return controller current time.
raincloudy/aio/controller.py
current_time
tchellomello/raincloudy
16
python
@property def current_time(self) -> str: return self.attributes['current_time']
@property def current_time(self) -> str: return self.attributes['current_time']<|docstring|>Return controller current time.<|endoftext|>
b3b5a909538e997821298856411cb0dca79b9cfd3c12bc2c9fba016bf330dc9c
@property def faucets(self) -> list[RainCloudyFaucetCore]: 'Show current linked faucet.' if hasattr(self, '_faucets'): return self._faucets raise AttributeError('There are no faucets assigned.')
Show current linked faucet.
raincloudy/aio/controller.py
faucets
tchellomello/raincloudy
16
python
@property def faucets(self) -> list[RainCloudyFaucetCore]: if hasattr(self, '_faucets'): return self._faucets raise AttributeError('There are no faucets assigned.')
@property def faucets(self) -> list[RainCloudyFaucetCore]: if hasattr(self, '_faucets'): return self._faucets raise AttributeError('There are no faucets assigned.')<|docstring|>Show current linked faucet.<|endoftext|>
4d2ceab7b10dd7a566b0716b69d38824829a7cde2312f77b071308e47a19e116
def _write_err(text): 'Default writer' if (not text.strip()): return if (not text.endswith('\n')): text += '\n' sys.stderr.write(text)
Default writer
friendly_traceback/config.py
_write_err
alexmojaki/friendly-traceback
0
python
def _write_err(text): if (not text.strip()): return if (not text.endswith('\n')): text += '\n' sys.stderr.write(text)
def _write_err(text): if (not text.strip()): return if (not text.endswith('\n')): text += '\n' sys.stderr.write(text)<|docstring|>Default writer<|endoftext|>
0898350cd67a917ebf650d54ee83bf049a4895b04b550b67a21bd260e0266119
def show_traceback_info_again(self): 'If has not been cleared, write the traceback info again, using\n the default stream.\n\n This is intended to be used when a user changes the verbosity\n level and wishes to see a traceback reexplained without having\n to execute the code again.\n ' _ = current_lang.translate if (not self.saved_info): print(_('Nothing to show: no exception recorded.')) return explanation = self.formatter(self.saved_info[(- 1)], include=self.include) self.write_err(explanation) self.write_err('\n')
If has not been cleared, write the traceback info again, using the default stream. This is intended to be used when a user changes the verbosity level and wishes to see a traceback reexplained without having to execute the code again.
friendly_traceback/config.py
show_traceback_info_again
alexmojaki/friendly-traceback
0
python
def show_traceback_info_again(self): 'If has not been cleared, write the traceback info again, using\n the default stream.\n\n This is intended to be used when a user changes the verbosity\n level and wishes to see a traceback reexplained without having\n to execute the code again.\n ' _ = current_lang.translate if (not self.saved_info): print(_('Nothing to show: no exception recorded.')) return explanation = self.formatter(self.saved_info[(- 1)], include=self.include) self.write_err(explanation) self.write_err('\n')
def show_traceback_info_again(self): 'If has not been cleared, write the traceback info again, using\n the default stream.\n\n This is intended to be used when a user changes the verbosity\n level and wishes to see a traceback reexplained without having\n to execute the code again.\n ' _ = current_lang.translate if (not self.saved_info): print(_('Nothing to show: no exception recorded.')) return explanation = self.formatter(self.saved_info[(- 1)], include=self.include) self.write_err(explanation) self.write_err('\n')<|docstring|>If has not been cleared, write the traceback info again, using the default stream. This is intended to be used when a user changes the verbosity level and wishes to see a traceback reexplained without having to execute the code again.<|endoftext|>
4a0a1c52b884da96e98dc8b96588fd9494ae0e3df1cdf3beddcad52fec5547c3
def capture(self, txt): 'Captures the output instead of writing to stderr.' self._captured.append(txt)
Captures the output instead of writing to stderr.
friendly_traceback/config.py
capture
alexmojaki/friendly-traceback
0
python
def capture(self, txt): self._captured.append(txt)
def capture(self, txt): self._captured.append(txt)<|docstring|>Captures the output instead of writing to stderr.<|endoftext|>
d395d5874e22abff57b9060db3f809eb042660f0f37c5662ca911a8280a4046e
def get_captured(self, flush=True): 'Returns the result of captured output as a string' result = ''.join(self._captured) if flush: self._captured.clear() return result
Returns the result of captured output as a string
friendly_traceback/config.py
get_captured
alexmojaki/friendly-traceback
0
python
def get_captured(self, flush=True): result = .join(self._captured) if flush: self._captured.clear() return result
def get_captured(self, flush=True): result = .join(self._captured) if flush: self._captured.clear() return result<|docstring|>Returns the result of captured output as a string<|endoftext|>
9748b8944a91aae32f8dda0834cf697dfefb0bf2ba325e961c8353d619706e94
def set_lang(self, lang): 'Sets the language and, if it is not the current language\n and a traceback exists, the information is recompiled for the\n new target language.\n ' if (lang == self.lang): return current_lang.install(lang) self.lang = lang if self.saved_info: if (not self.friendly): debug_helper.log("Problem: saved_info includes content but friendly doesn't.") self.friendly[(- 1)].recompile_info() self.friendly[(- 1)].info['lang'] = lang
Sets the language and, if it is not the current language and a traceback exists, the information is recompiled for the new target language.
friendly_traceback/config.py
set_lang
alexmojaki/friendly-traceback
0
python
def set_lang(self, lang): 'Sets the language and, if it is not the current language\n and a traceback exists, the information is recompiled for the\n new target language.\n ' if (lang == self.lang): return current_lang.install(lang) self.lang = lang if self.saved_info: if (not self.friendly): debug_helper.log("Problem: saved_info includes content but friendly doesn't.") self.friendly[(- 1)].recompile_info() self.friendly[(- 1)].info['lang'] = lang
def set_lang(self, lang): 'Sets the language and, if it is not the current language\n and a traceback exists, the information is recompiled for the\n new target language.\n ' if (lang == self.lang): return current_lang.install(lang) self.lang = lang if self.saved_info: if (not self.friendly): debug_helper.log("Problem: saved_info includes content but friendly doesn't.") self.friendly[(- 1)].recompile_info() self.friendly[(- 1)].info['lang'] = lang<|docstring|>Sets the language and, if it is not the current language and a traceback exists, the information is recompiled for the new target language.<|endoftext|>
18eb40484ace09e1d74f139df117020621dd4a6fab87a210b27095f95c4058f0
def install_gettext(self, lang): 'Sets the current language for gettext.' current_lang.install(lang) self.lang = lang
Sets the current language for gettext.
friendly_traceback/config.py
install_gettext
alexmojaki/friendly-traceback
0
python
def install_gettext(self, lang): current_lang.install(lang) self.lang = lang
def install_gettext(self, lang): current_lang.install(lang) self.lang = lang<|docstring|>Sets the current language for gettext.<|endoftext|>
91bd6d4cb2c8166a41030386693c4217d47784fbe24daca3ec009f53b33e8746
def set_formatter(self, formatter=None): 'Sets the default formatter. If no argument is given, the default\n formatter is used.\n ' if ((formatter is None) or (formatter == 'repl')): self.formatter = base_formatters.repl elif (formatter == 'docs'): self.formatter = base_formatters.docs elif isinstance(formatter, str): self.write_err(f'''Unknown formatter: {formatter} ''') self.formatter = base_formatters.repl else: self.formatter = formatter
Sets the default formatter. If no argument is given, the default formatter is used.
friendly_traceback/config.py
set_formatter
alexmojaki/friendly-traceback
0
python
def set_formatter(self, formatter=None): 'Sets the default formatter. If no argument is given, the default\n formatter is used.\n ' if ((formatter is None) or (formatter == 'repl')): self.formatter = base_formatters.repl elif (formatter == 'docs'): self.formatter = base_formatters.docs elif isinstance(formatter, str): self.write_err(f'Unknown formatter: {formatter} ') self.formatter = base_formatters.repl else: self.formatter = formatter
def set_formatter(self, formatter=None): 'Sets the default formatter. If no argument is given, the default\n formatter is used.\n ' if ((formatter is None) or (formatter == 'repl')): self.formatter = base_formatters.repl elif (formatter == 'docs'): self.formatter = base_formatters.docs elif isinstance(formatter, str): self.write_err(f'Unknown formatter: {formatter} ') self.formatter = base_formatters.repl else: self.formatter = formatter<|docstring|>Sets the default formatter. If no argument is given, the default formatter is used.<|endoftext|>
fef639b62ea149ec3b4db36a542dbd6fcc6ff07400f7df15b6c13c8d89ba172d
def install(self, lang=None, redirect=None, include='explain'): "Replaces sys.excepthook by friendly's own version." _ = current_lang.translate if (lang is not None): self.install_gettext(lang) if (redirect is not None): self.set_redirect(redirect=redirect) if (include != self.include): self.set_include(include) sys.excepthook = self.exception_hook self.installed = True
Replaces sys.excepthook by friendly's own version.
friendly_traceback/config.py
install
alexmojaki/friendly-traceback
0
python
def install(self, lang=None, redirect=None, include='explain'): _ = current_lang.translate if (lang is not None): self.install_gettext(lang) if (redirect is not None): self.set_redirect(redirect=redirect) if (include != self.include): self.set_include(include) sys.excepthook = self.exception_hook self.installed = True
def install(self, lang=None, redirect=None, include='explain'): _ = current_lang.translate if (lang is not None): self.install_gettext(lang) if (redirect is not None): self.set_redirect(redirect=redirect) if (include != self.include): self.set_include(include) sys.excepthook = self.exception_hook self.installed = True<|docstring|>Replaces sys.excepthook by friendly's own version.<|endoftext|>
e7e9cfe3dfafd6042e25045870dcbecbff5287aaa15662ac45f98f32eaaee029
def uninstall(self): 'Resets sys.excepthook to the Python default.' self.installed = False sys.excepthook = sys.__excepthook__
Resets sys.excepthook to the Python default.
friendly_traceback/config.py
uninstall
alexmojaki/friendly-traceback
0
python
def uninstall(self): self.installed = False sys.excepthook = sys.__excepthook__
def uninstall(self): self.installed = False sys.excepthook = sys.__excepthook__<|docstring|>Resets sys.excepthook to the Python default.<|endoftext|>
fb4481675d957155dad7b66e7ea7a73b48faf51ca6c8fbbdeb3a4986c749d125
def set_redirect(self, redirect=None): 'Sets where the output is redirected.' if (redirect == 'capture'): self.write_err = self.capture elif (redirect is not None): self.write_err = redirect else: self.write_err = _write_err
Sets where the output is redirected.
friendly_traceback/config.py
set_redirect
alexmojaki/friendly-traceback
0
python
def set_redirect(self, redirect=None): if (redirect == 'capture'): self.write_err = self.capture elif (redirect is not None): self.write_err = redirect else: self.write_err = _write_err
def set_redirect(self, redirect=None): if (redirect == 'capture'): self.write_err = self.capture elif (redirect is not None): self.write_err = redirect else: self.write_err = _write_err<|docstring|>Sets where the output is redirected.<|endoftext|>
3bf53d02239320e2ea3a7139b40299353f7ad9c4823ba66400304404f8e6fae2
def explain_traceback(self, redirect=None): 'Replaces a standard traceback by a friendlier one, giving more\n information about a given exception than a standard traceback.\n Note that this excludes SystemExit and KeyboardInterrupt which\n are re-raised.\n\n By default, the output goes to sys.stderr or to some other stream\n set to be the default by another API call. However, if\n redirect = some_stream\n is specified, the output goes to that stream, but without changing\n the global settings.\n ' _ = current_lang.translate (etype, value, tb) = sys.exc_info() if (etype is None): print(_('Nothing to show: no exception recorded.')) return self.exception_hook(etype, value, tb, redirect=redirect)
Replaces a standard traceback by a friendlier one, giving more information about a given exception than a standard traceback. Note that this excludes SystemExit and KeyboardInterrupt which are re-raised. By default, the output goes to sys.stderr or to some other stream set to be the default by another API call. However, if redirect = some_stream is specified, the output goes to that stream, but without changing the global settings.
friendly_traceback/config.py
explain_traceback
alexmojaki/friendly-traceback
0
python
def explain_traceback(self, redirect=None): 'Replaces a standard traceback by a friendlier one, giving more\n information about a given exception than a standard traceback.\n Note that this excludes SystemExit and KeyboardInterrupt which\n are re-raised.\n\n By default, the output goes to sys.stderr or to some other stream\n set to be the default by another API call. However, if\n redirect = some_stream\n is specified, the output goes to that stream, but without changing\n the global settings.\n ' _ = current_lang.translate (etype, value, tb) = sys.exc_info() if (etype is None): print(_('Nothing to show: no exception recorded.')) return self.exception_hook(etype, value, tb, redirect=redirect)
def explain_traceback(self, redirect=None): 'Replaces a standard traceback by a friendlier one, giving more\n information about a given exception than a standard traceback.\n Note that this excludes SystemExit and KeyboardInterrupt which\n are re-raised.\n\n By default, the output goes to sys.stderr or to some other stream\n set to be the default by another API call. However, if\n redirect = some_stream\n is specified, the output goes to that stream, but without changing\n the global settings.\n ' _ = current_lang.translate (etype, value, tb) = sys.exc_info() if (etype is None): print(_('Nothing to show: no exception recorded.')) return self.exception_hook(etype, value, tb, redirect=redirect)<|docstring|>Replaces a standard traceback by a friendlier one, giving more information about a given exception than a standard traceback. Note that this excludes SystemExit and KeyboardInterrupt which are re-raised. By default, the output goes to sys.stderr or to some other stream set to be the default by another API call. However, if redirect = some_stream is specified, the output goes to that stream, but without changing the global settings.<|endoftext|>
3428709f11584e72cbcdee557b15d7ea3066fa91a0d5e714321c4852f304386a
def exception_hook(self, etype, value, tb, redirect=None): 'Replaces a standard traceback by a friendlier one,\n except for SystemExit and KeyboardInterrupt which\n are re-raised.\n\n The values of the required arguments are typically the following:\n\n etype, value, tb = sys.exc_info()\n\n By default, the output goes to sys.stderr or to some other stream\n set to be the default by another API call. However, if\n redirect = some_stream\n is specified, the output goes to that stream for this call,\n but the session settings is restored afterwards.\n ' if (etype.__name__ == 'SystemExit'): raise SystemExit(str(value)) if (etype.__name__ == 'KeyboardInterrupt'): raise KeyboardInterrupt(str(value)) saved_current_redirect = None if (redirect is not None): saved_current_redirect = self.write_err self.set_redirect(redirect=redirect) try: self.friendly.append(core.FriendlyTraceback(etype, value, tb)) self.friendly[(- 1)].compile_info() info = self.friendly[(- 1)].info info['lang'] = self.lang self.saved_info.append(info) explanation = self.formatter(info, include=self.include) except Exception as e: debug_helper.log('Exception raised in exception_hook().') try: debug_helper.log(self.friendly[(- 1)].tb_data.filename) except Exception: pass debug_helper.log_error(e) return self.write_err(explanation) if (hasattr(explanation, 'endswith') and (not explanation.endswith('\n'))): self.write_err('\n') if (saved_current_redirect is not None): self.set_redirect(redirect=saved_current_redirect)
Replaces a standard traceback by a friendlier one, except for SystemExit and KeyboardInterrupt which are re-raised. The values of the required arguments are typically the following: etype, value, tb = sys.exc_info() By default, the output goes to sys.stderr or to some other stream set to be the default by another API call. However, if redirect = some_stream is specified, the output goes to that stream for this call, but the session settings is restored afterwards.
friendly_traceback/config.py
exception_hook
alexmojaki/friendly-traceback
0
python
def exception_hook(self, etype, value, tb, redirect=None): 'Replaces a standard traceback by a friendlier one,\n except for SystemExit and KeyboardInterrupt which\n are re-raised.\n\n The values of the required arguments are typically the following:\n\n etype, value, tb = sys.exc_info()\n\n By default, the output goes to sys.stderr or to some other stream\n set to be the default by another API call. However, if\n redirect = some_stream\n is specified, the output goes to that stream for this call,\n but the session settings is restored afterwards.\n ' if (etype.__name__ == 'SystemExit'): raise SystemExit(str(value)) if (etype.__name__ == 'KeyboardInterrupt'): raise KeyboardInterrupt(str(value)) saved_current_redirect = None if (redirect is not None): saved_current_redirect = self.write_err self.set_redirect(redirect=redirect) try: self.friendly.append(core.FriendlyTraceback(etype, value, tb)) self.friendly[(- 1)].compile_info() info = self.friendly[(- 1)].info info['lang'] = self.lang self.saved_info.append(info) explanation = self.formatter(info, include=self.include) except Exception as e: debug_helper.log('Exception raised in exception_hook().') try: debug_helper.log(self.friendly[(- 1)].tb_data.filename) except Exception: pass debug_helper.log_error(e) return self.write_err(explanation) if (hasattr(explanation, 'endswith') and (not explanation.endswith('\n'))): self.write_err('\n') if (saved_current_redirect is not None): self.set_redirect(redirect=saved_current_redirect)
def exception_hook(self, etype, value, tb, redirect=None): 'Replaces a standard traceback by a friendlier one,\n except for SystemExit and KeyboardInterrupt which\n are re-raised.\n\n The values of the required arguments are typically the following:\n\n etype, value, tb = sys.exc_info()\n\n By default, the output goes to sys.stderr or to some other stream\n set to be the default by another API call. However, if\n redirect = some_stream\n is specified, the output goes to that stream for this call,\n but the session settings is restored afterwards.\n ' if (etype.__name__ == 'SystemExit'): raise SystemExit(str(value)) if (etype.__name__ == 'KeyboardInterrupt'): raise KeyboardInterrupt(str(value)) saved_current_redirect = None if (redirect is not None): saved_current_redirect = self.write_err self.set_redirect(redirect=redirect) try: self.friendly.append(core.FriendlyTraceback(etype, value, tb)) self.friendly[(- 1)].compile_info() info = self.friendly[(- 1)].info info['lang'] = self.lang self.saved_info.append(info) explanation = self.formatter(info, include=self.include) except Exception as e: debug_helper.log('Exception raised in exception_hook().') try: debug_helper.log(self.friendly[(- 1)].tb_data.filename) except Exception: pass debug_helper.log_error(e) return self.write_err(explanation) if (hasattr(explanation, 'endswith') and (not explanation.endswith('\n'))): self.write_err('\n') if (saved_current_redirect is not None): self.set_redirect(redirect=saved_current_redirect)<|docstring|>Replaces a standard traceback by a friendlier one, except for SystemExit and KeyboardInterrupt which are re-raised. The values of the required arguments are typically the following: etype, value, tb = sys.exc_info() By default, the output goes to sys.stderr or to some other stream set to be the default by another API call. However, if redirect = some_stream is specified, the output goes to that stream for this call, but the session settings is restored afterwards.<|endoftext|>
5d3d36dba477f6e1852fb052bd12b9fdec484062e46948d0a843ba809261d9a7
def __init__(self, hostnames=None, ip=None): 'V1HostAlias - a model defined in Swagger' self._hostnames = None self._ip = None self.discriminator = None if (hostnames is not None): self.hostnames = hostnames if (ip is not None): self.ip = ip
V1HostAlias - a model defined in Swagger
aiokubernetes/models/v1_host_alias.py
__init__
tantioch/aiokubernetes
24
python
def __init__(self, hostnames=None, ip=None): self._hostnames = None self._ip = None self.discriminator = None if (hostnames is not None): self.hostnames = hostnames if (ip is not None): self.ip = ip
def __init__(self, hostnames=None, ip=None): self._hostnames = None self._ip = None self.discriminator = None if (hostnames is not None): self.hostnames = hostnames if (ip is not None): self.ip = ip<|docstring|>V1HostAlias - a model defined in Swagger<|endoftext|>
5956eff08a7fc6c59202f1fc230646226a93c31a65bcc7ce2414ab100b625246
@property def hostnames(self): 'Gets the hostnames of this V1HostAlias. # noqa: E501\n\n Hostnames for the above IP address. # noqa: E501\n\n :return: The hostnames of this V1HostAlias. # noqa: E501\n :rtype: list[str]\n ' return self._hostnames
Gets the hostnames of this V1HostAlias. # noqa: E501 Hostnames for the above IP address. # noqa: E501 :return: The hostnames of this V1HostAlias. # noqa: E501 :rtype: list[str]
aiokubernetes/models/v1_host_alias.py
hostnames
tantioch/aiokubernetes
24
python
@property def hostnames(self): 'Gets the hostnames of this V1HostAlias. # noqa: E501\n\n Hostnames for the above IP address. # noqa: E501\n\n :return: The hostnames of this V1HostAlias. # noqa: E501\n :rtype: list[str]\n ' return self._hostnames
@property def hostnames(self): 'Gets the hostnames of this V1HostAlias. # noqa: E501\n\n Hostnames for the above IP address. # noqa: E501\n\n :return: The hostnames of this V1HostAlias. # noqa: E501\n :rtype: list[str]\n ' return self._hostnames<|docstring|>Gets the hostnames of this V1HostAlias. # noqa: E501 Hostnames for the above IP address. # noqa: E501 :return: The hostnames of this V1HostAlias. # noqa: E501 :rtype: list[str]<|endoftext|>
8215eabe73dfb462f8e2d5b2f29063376e690ee2effec40bf6616b58e7f552a3
@hostnames.setter def hostnames(self, hostnames): 'Sets the hostnames of this V1HostAlias.\n\n Hostnames for the above IP address. # noqa: E501\n\n :param hostnames: The hostnames of this V1HostAlias. # noqa: E501\n :type: list[str]\n ' self._hostnames = hostnames
Sets the hostnames of this V1HostAlias. Hostnames for the above IP address. # noqa: E501 :param hostnames: The hostnames of this V1HostAlias. # noqa: E501 :type: list[str]
aiokubernetes/models/v1_host_alias.py
hostnames
tantioch/aiokubernetes
24
python
@hostnames.setter def hostnames(self, hostnames): 'Sets the hostnames of this V1HostAlias.\n\n Hostnames for the above IP address. # noqa: E501\n\n :param hostnames: The hostnames of this V1HostAlias. # noqa: E501\n :type: list[str]\n ' self._hostnames = hostnames
@hostnames.setter def hostnames(self, hostnames): 'Sets the hostnames of this V1HostAlias.\n\n Hostnames for the above IP address. # noqa: E501\n\n :param hostnames: The hostnames of this V1HostAlias. # noqa: E501\n :type: list[str]\n ' self._hostnames = hostnames<|docstring|>Sets the hostnames of this V1HostAlias. Hostnames for the above IP address. # noqa: E501 :param hostnames: The hostnames of this V1HostAlias. # noqa: E501 :type: list[str]<|endoftext|>
7051209df6e5c244baa9b59cdd40c672cd3225ff9b2c15e90698563fd5c8df82
@property def ip(self): 'Gets the ip of this V1HostAlias. # noqa: E501\n\n IP address of the host file entry. # noqa: E501\n\n :return: The ip of this V1HostAlias. # noqa: E501\n :rtype: str\n ' return self._ip
Gets the ip of this V1HostAlias. # noqa: E501 IP address of the host file entry. # noqa: E501 :return: The ip of this V1HostAlias. # noqa: E501 :rtype: str
aiokubernetes/models/v1_host_alias.py
ip
tantioch/aiokubernetes
24
python
@property def ip(self): 'Gets the ip of this V1HostAlias. # noqa: E501\n\n IP address of the host file entry. # noqa: E501\n\n :return: The ip of this V1HostAlias. # noqa: E501\n :rtype: str\n ' return self._ip
@property def ip(self): 'Gets the ip of this V1HostAlias. # noqa: E501\n\n IP address of the host file entry. # noqa: E501\n\n :return: The ip of this V1HostAlias. # noqa: E501\n :rtype: str\n ' return self._ip<|docstring|>Gets the ip of this V1HostAlias. # noqa: E501 IP address of the host file entry. # noqa: E501 :return: The ip of this V1HostAlias. # noqa: E501 :rtype: str<|endoftext|>
3a85b93b867a4f2b16634b0f8f4f059666b1b7ae570e824a05a766de48c28de6
@ip.setter def ip(self, ip): 'Sets the ip of this V1HostAlias.\n\n IP address of the host file entry. # noqa: E501\n\n :param ip: The ip of this V1HostAlias. # noqa: E501\n :type: str\n ' self._ip = ip
Sets the ip of this V1HostAlias. IP address of the host file entry. # noqa: E501 :param ip: The ip of this V1HostAlias. # noqa: E501 :type: str
aiokubernetes/models/v1_host_alias.py
ip
tantioch/aiokubernetes
24
python
@ip.setter def ip(self, ip): 'Sets the ip of this V1HostAlias.\n\n IP address of the host file entry. # noqa: E501\n\n :param ip: The ip of this V1HostAlias. # noqa: E501\n :type: str\n ' self._ip = ip
@ip.setter def ip(self, ip): 'Sets the ip of this V1HostAlias.\n\n IP address of the host file entry. # noqa: E501\n\n :param ip: The ip of this V1HostAlias. # noqa: E501\n :type: str\n ' self._ip = ip<|docstring|>Sets the ip of this V1HostAlias. IP address of the host file entry. # noqa: E501 :param ip: The ip of this V1HostAlias. # noqa: E501 :type: str<|endoftext|>
1570c4018b2c60e62db83457514b9e7d113af3fae3212fc5f09308caae8a7a34
def to_dict(self): 'Returns the model properties as a dict' result = {} for (attr, _) in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
Returns the model properties as a dict
aiokubernetes/models/v1_host_alias.py
to_dict
tantioch/aiokubernetes
24
python
def to_dict(self): result = {} for (attr, _) in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result
def to_dict(self): result = {} for (attr, _) in self.swagger_types.items(): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value)) elif hasattr(value, 'to_dict'): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items())) else: result[attr] = value return result<|docstring|>Returns the model properties as a dict<|endoftext|>
cbb19eaa2fc8a113d9e32f924ef280a7e97563f8915f94f65dab438997af2e99
def to_str(self): 'Returns the string representation of the model' return pprint.pformat(self.to_dict())
Returns the string representation of the model
aiokubernetes/models/v1_host_alias.py
to_str
tantioch/aiokubernetes
24
python
def to_str(self): return pprint.pformat(self.to_dict())
def to_str(self): return pprint.pformat(self.to_dict())<|docstring|>Returns the string representation of the model<|endoftext|>
772243a2c2b3261a9b954d07aaf295e3c1242a579a495e2d6a5679c677861703
def __repr__(self): 'For `print` and `pprint`' return self.to_str()
For `print` and `pprint`
aiokubernetes/models/v1_host_alias.py
__repr__
tantioch/aiokubernetes
24
python
def __repr__(self): return self.to_str()
def __repr__(self): return self.to_str()<|docstring|>For `print` and `pprint`<|endoftext|>
007bf7950ebcd2e3a7071ac44f7f174c22d056077a266f444cb4a9358aa6b301
def __eq__(self, other): 'Returns true if both objects are equal' if (not isinstance(other, V1HostAlias)): return False return (self.__dict__ == other.__dict__)
Returns true if both objects are equal
aiokubernetes/models/v1_host_alias.py
__eq__
tantioch/aiokubernetes
24
python
def __eq__(self, other): if (not isinstance(other, V1HostAlias)): return False return (self.__dict__ == other.__dict__)
def __eq__(self, other): if (not isinstance(other, V1HostAlias)): return False return (self.__dict__ == other.__dict__)<|docstring|>Returns true if both objects are equal<|endoftext|>
43dc6740163eb9fc1161d09cb2208a64c7ad0cc8d9c8637ac3264522d3ec7e42
def __ne__(self, other): 'Returns true if both objects are not equal' return (not (self == other))
Returns true if both objects are not equal
aiokubernetes/models/v1_host_alias.py
__ne__
tantioch/aiokubernetes
24
python
def __ne__(self, other): return (not (self == other))
def __ne__(self, other): return (not (self == other))<|docstring|>Returns true if both objects are not equal<|endoftext|>
6e7d4b401e8c735383cf9e4e6c6f831867f9e84956ec8d72977b794a5a417302
def validate_ticket_name(name): '\n Validates that a given ticket name is valid\n :param name: The ticket name\n :return: True if name is valid\n ' if (not name.isalnum()): return 'Name must have alphanumeric characters only.' if (len(name) > 60): return 'Name must be less than 60 characters.' return False
Validates that a given ticket name is valid :param name: The ticket name :return: True if name is valid
qa327/utils.py
validate_ticket_name
jacob-seiler/seetgeek
0
python
def validate_ticket_name(name): '\n Validates that a given ticket name is valid\n :param name: The ticket name\n :return: True if name is valid\n ' if (not name.isalnum()): return 'Name must have alphanumeric characters only.' if (len(name) > 60): return 'Name must be less than 60 characters.' return False
def validate_ticket_name(name): '\n Validates that a given ticket name is valid\n :param name: The ticket name\n :return: True if name is valid\n ' if (not name.isalnum()): return 'Name must have alphanumeric characters only.' if (len(name) > 60): return 'Name must be less than 60 characters.' return False<|docstring|>Validates that a given ticket name is valid :param name: The ticket name :return: True if name is valid<|endoftext|>
cc02dba32f2e748722ffe5ecd5a565dddc543de5e6d5fee627811e1ed3d40b72
def validate_ticket(name, quantity, price, date): '\n Validates that all ticket data is correct\n :param name: The ticket name\n :param quantity: The ticket quantity\n :param price: The ticket price\n :param date: The ticket experation date\n :return: False if all data is valid, error message if not\n ' error = validate_ticket_name(name) if (error == False): error = validate_ticket_quantity(quantity) if (error == False): error = validate_ticket_price(price) if (error == False): error = validate_ticket_date(date) return error
Validates that all ticket data is correct :param name: The ticket name :param quantity: The ticket quantity :param price: The ticket price :param date: The ticket experation date :return: False if all data is valid, error message if not
qa327/utils.py
validate_ticket
jacob-seiler/seetgeek
0
python
def validate_ticket(name, quantity, price, date): '\n Validates that all ticket data is correct\n :param name: The ticket name\n :param quantity: The ticket quantity\n :param price: The ticket price\n :param date: The ticket experation date\n :return: False if all data is valid, error message if not\n ' error = validate_ticket_name(name) if (error == False): error = validate_ticket_quantity(quantity) if (error == False): error = validate_ticket_price(price) if (error == False): error = validate_ticket_date(date) return error
def validate_ticket(name, quantity, price, date): '\n Validates that all ticket data is correct\n :param name: The ticket name\n :param quantity: The ticket quantity\n :param price: The ticket price\n :param date: The ticket experation date\n :return: False if all data is valid, error message if not\n ' error = validate_ticket_name(name) if (error == False): error = validate_ticket_quantity(quantity) if (error == False): error = validate_ticket_price(price) if (error == False): error = validate_ticket_date(date) return error<|docstring|>Validates that all ticket data is correct :param name: The ticket name :param quantity: The ticket quantity :param price: The ticket price :param date: The ticket experation date :return: False if all data is valid, error message if not<|endoftext|>
28f8146528740c43a06aa8582d10308df6c9936a6b016f79048020b031565696
def math_plot(): '\n 繪圖練習\n ' num = 6 styles = ['r-', 'g-*', 'b-o', 'y-x', 'c-^', 'm-+', 'k-d'] legends = ['log2', 'linear', '線性對數', 'squaur', 'cubic', '3 order', 'factorial'] x_data = [x for x in range(1, (num + 1))] y_data1 = [log2(y) for y in range(1, (num + 1))] y_data2 = [y for y in range(1, (num + 1))] y_data3 = [(y * log2(y)) for y in range(1, (num + 1))] y_data4 = [(y ** 2) for y in range(1, (num + 1))] y_data5 = [(y ** 3) for y in range(1, (num + 1))] y_data6 = [(3 ** y) for y in range(1, (num + 1))] y_data7 = [factorial(y) for y in range(1, (num + 1))] y_datas = [y_data1, y_data2, y_data3, y_data4, y_data5, y_data6, y_data7] for (index, y_data) in enumerate(y_datas): pyplot.plot(x_data, y_data, styles[index]) pyplot.legend(legends) pyplot.xticks(numpy.arange(0, 8, step=1)) pyplot.yticks(numpy.arange(0, 751, step=50)) pyplot.show()
繪圖練習
Day16-20/practice_code/R_Day_16.py
math_plot
reic/groupLearning-Python-100-Days
4
python
def math_plot(): '\n \n ' num = 6 styles = ['r-', 'g-*', 'b-o', 'y-x', 'c-^', 'm-+', 'k-d'] legends = ['log2', 'linear', '線性對數', 'squaur', 'cubic', '3 order', 'factorial'] x_data = [x for x in range(1, (num + 1))] y_data1 = [log2(y) for y in range(1, (num + 1))] y_data2 = [y for y in range(1, (num + 1))] y_data3 = [(y * log2(y)) for y in range(1, (num + 1))] y_data4 = [(y ** 2) for y in range(1, (num + 1))] y_data5 = [(y ** 3) for y in range(1, (num + 1))] y_data6 = [(3 ** y) for y in range(1, (num + 1))] y_data7 = [factorial(y) for y in range(1, (num + 1))] y_datas = [y_data1, y_data2, y_data3, y_data4, y_data5, y_data6, y_data7] for (index, y_data) in enumerate(y_datas): pyplot.plot(x_data, y_data, styles[index]) pyplot.legend(legends) pyplot.xticks(numpy.arange(0, 8, step=1)) pyplot.yticks(numpy.arange(0, 751, step=50)) pyplot.show()
def math_plot(): '\n \n ' num = 6 styles = ['r-', 'g-*', 'b-o', 'y-x', 'c-^', 'm-+', 'k-d'] legends = ['log2', 'linear', '線性對數', 'squaur', 'cubic', '3 order', 'factorial'] x_data = [x for x in range(1, (num + 1))] y_data1 = [log2(y) for y in range(1, (num + 1))] y_data2 = [y for y in range(1, (num + 1))] y_data3 = [(y * log2(y)) for y in range(1, (num + 1))] y_data4 = [(y ** 2) for y in range(1, (num + 1))] y_data5 = [(y ** 3) for y in range(1, (num + 1))] y_data6 = [(3 ** y) for y in range(1, (num + 1))] y_data7 = [factorial(y) for y in range(1, (num + 1))] y_datas = [y_data1, y_data2, y_data3, y_data4, y_data5, y_data6, y_data7] for (index, y_data) in enumerate(y_datas): pyplot.plot(x_data, y_data, styles[index]) pyplot.legend(legends) pyplot.xticks(numpy.arange(0, 8, step=1)) pyplot.yticks(numpy.arange(0, 751, step=50)) pyplot.show()<|docstring|>繪圖練習<|endoftext|>
94da6d1400dfc31cbdae020bd778d73e9e644e1bf02b00946518d9c780ac2b48
@property def inputs(self): '\n Function that the ContentToTextRead operator defines while returning graph inputs\n\n :returns: Inputs to the node of the Auto tagging graph\n DS_DATA_HOME: a localpath where the folders get created\n localpathTocontentMeta: path to content meta\n pathTocredentials: path to config file with credentials \n\n ' return {'DS_DATA_HOME': ReadDaggitTask_Folderpath(self.node.inputs[0]), 'localpathTocontentMeta': ReadDaggitTask_Folderpath(self.node.inputs[1]), 'pathTocredentials': ReadDaggitTask_Folderpath(self.node.inputs[2])}
Function that the ContentToTextRead operator defines while returning graph inputs :returns: Inputs to the node of the Auto tagging graph DS_DATA_HOME: a localpath where the folders get created localpathTocontentMeta: path to content meta pathTocredentials: path to config file with credentials
src/main/python/daggit/contrib/sunbird/nodes/contenttagging.py
inputs
aleenaraj/sunbird-ml-workbench
14
python
@property def inputs(self): '\n Function that the ContentToTextRead operator defines while returning graph inputs\n\n :returns: Inputs to the node of the Auto tagging graph\n DS_DATA_HOME: a localpath where the folders get created\n localpathTocontentMeta: path to content meta\n pathTocredentials: path to config file with credentials \n\n ' return {'DS_DATA_HOME': ReadDaggitTask_Folderpath(self.node.inputs[0]), 'localpathTocontentMeta': ReadDaggitTask_Folderpath(self.node.inputs[1]), 'pathTocredentials': ReadDaggitTask_Folderpath(self.node.inputs[2])}
@property def inputs(self): '\n Function that the ContentToTextRead operator defines while returning graph inputs\n\n :returns: Inputs to the node of the Auto tagging graph\n DS_DATA_HOME: a localpath where the folders get created\n localpathTocontentMeta: path to content meta\n pathTocredentials: path to config file with credentials \n\n ' return {'DS_DATA_HOME': ReadDaggitTask_Folderpath(self.node.inputs[0]), 'localpathTocontentMeta': ReadDaggitTask_Folderpath(self.node.inputs[1]), 'pathTocredentials': ReadDaggitTask_Folderpath(self.node.inputs[2])}<|docstring|>Function that the ContentToTextRead operator defines while returning graph inputs :returns: Inputs to the node of the Auto tagging graph DS_DATA_HOME: a localpath where the folders get created localpathTocontentMeta: path to content meta pathTocredentials: path to config file with credentials<|endoftext|>
7862ca065b51559c1be07ca2f59515e58313020cb1ebce20e4729e830c427726
@property def outputs(self): '\n Function that the ContentToTextRead operator defines while returning graph outputs\n\n :returns: Returns the path to timestamp folder in which auto tagging results get generated\n\n ' return {'timestamp_folder': File_Txt(self.node.outputs[0])}
Function that the ContentToTextRead operator defines while returning graph outputs :returns: Returns the path to timestamp folder in which auto tagging results get generated
src/main/python/daggit/contrib/sunbird/nodes/contenttagging.py
outputs
aleenaraj/sunbird-ml-workbench
14
python
@property def outputs(self): '\n Function that the ContentToTextRead operator defines while returning graph outputs\n\n :returns: Returns the path to timestamp folder in which auto tagging results get generated\n\n ' return {'timestamp_folder': File_Txt(self.node.outputs[0])}
@property def outputs(self): '\n Function that the ContentToTextRead operator defines while returning graph outputs\n\n :returns: Returns the path to timestamp folder in which auto tagging results get generated\n\n ' return {'timestamp_folder': File_Txt(self.node.outputs[0])}<|docstring|>Function that the ContentToTextRead operator defines while returning graph outputs :returns: Returns the path to timestamp folder in which auto tagging results get generated<|endoftext|>
638db49d8021a71bad9d71b059579d38f6d67bec7f9e9218b284b868de7c7efa
def run(self, range_start, range_end, num_of_processes, content_type): '\n This is the main method to derive when creating an operator. This takes in the parameters, \n runs text enrichment pipline and writes back the path to the \n timestamp folder with the content id and its enriched text to an h5 file that gets saved as an intermediate result \n\n ' DS_DATA_HOME = self.inputs['DS_DATA_HOME'].read_loc() pathTocredentials = self.inputs['pathTocredentials'].read_loc() timestr = time.strftime('%Y%m%d-%H%M%S') path_to_timestamp_folder = os.path.join(DS_DATA_HOME, timestr) content_to_text_path = os.path.join(path_to_timestamp_folder, 'content_to_text') if (not os.path.exists(content_to_text_path)): os.makedirs(content_to_text_path) print('content_to_text: ', content_to_text_path) contentmeta_path = self.inputs['localpathTocontentMeta'].read_loc() shutil.move(contentmeta_path, os.path.join(path_to_timestamp_folder, os.path.split(contentmeta_path)[1])) moved_contentmeta_path = os.path.join(path_to_timestamp_folder, os.path.split(contentmeta_path)[1]) content_meta = pd.read_csv(moved_contentmeta_path) if ('derived_contentType' not in list(content_meta.columns)): content_meta['derived_contentType'] = np.nan for (row_ind, artifact_url) in enumerate(content_meta['artifactUrl']): try: content_meta['derived_contentType'][row_ind] = identify_contentType(artifact_url) except BaseException: pass content_meta = content_meta[pd.notnull(content_meta['derived_contentType'])] content_meta.reset_index(inplace=True, drop=True) print(self.outputs['timestamp_folder'].location_specify()) oldwd = os.getcwd() contentMeta_mandatory_fields = ['artifactUrl', 'derived_contentType', 'downloadUrl', 'gradeLevel', 'identifier', 'language', 'subject', 'graph_id', 'nodeType', 'objectType', 'node_id'] assert df_feature_check(content_meta, contentMeta_mandatory_fields) logging.info('CTT_CONTENT_TO_TEXT_START') if (content_meta.columns[0] == '0'): content_meta = content_meta.drop('0', axis=1) if (list(content_meta[content_meta.duplicated(['artifactUrl'], keep=False)]['artifactUrl']) != []): content_meta.drop_duplicates(subset='artifactUrl', inplace=True) content_meta.reset_index(drop=True, inplace=True) content_meta.dropna(subset=['artifactUrl'], inplace=True) content_meta.reset_index(drop=True, inplace=True) start = time.time() logging.info(('Contents detected in the content meta: ' + str(len(content_meta)))) logging.info('----Running Content_to_Text for contents from {0} to {1}:'.format(range_start, range_end)) logging.info('time started: {0}'.format(start)) content_meta.reset_index(drop=True, inplace=True) if (range_start == 'START'): range_start = 0 if (range_end == 'END'): range_end = len(content_meta) logging.info('CTT_Config: content_meta from {0} to {1} created in: {2}'.format(range_start, range_end, content_to_text_path)) print('Number of processes: ', num_of_processes) status = False if os.path.exists(pathTocredentials): try: config = configparser.ConfigParser(allow_no_value=True) config.read(pathTocredentials) status = True try: path_to_googlecred = config['google application credentials']['GOOGLE_APPLICATION_CREDENTIALS'] with open(path_to_googlecred, 'r') as cred_json: GOOGLE_APPLICATION_CREDENTIALS = cred_json.read() except BaseException: logging.info('Invalid GOOGLE_APPLICATION_CREDENTIALS in config.') logging.info('***Checking for GOOGLE_APPLICATION_CREDENTIALS environment variable') status = False except BaseException: logging.info('Invalid config file') logging.info('***Checking for GOOGLE_APPLICATION_CREDENTIALS environment variable') if (not status): try: GOOGLE_APPLICATION_CREDENTIALS = os.environ['GOOGLE_APPLICATION_CREDENTIALS'] with open(GOOGLE_APPLICATION_CREDENTIALS, 'r') as f: GOOGLE_APPLICATION_CREDENTIALS = f.read() except BaseException: GOOGLE_APPLICATION_CREDENTIALS = '' logging.info('Not a valid google credential') result = [multimodal_text_enrichment(i, timestr, content_meta, content_type, content_to_text_path, GOOGLE_APPLICATION_CREDENTIALS) for i in range(range_start, range_end)] print(result) os.chdir(oldwd) print('Current directory c2t: ', os.getcwd()) print('timestamp_folder path:', path_to_timestamp_folder) self.outputs['timestamp_folder'].write(path_to_timestamp_folder)
This is the main method to derive when creating an operator. This takes in the parameters, runs text enrichment pipline and writes back the path to the timestamp folder with the content id and its enriched text to an h5 file that gets saved as an intermediate result
src/main/python/daggit/contrib/sunbird/nodes/contenttagging.py
run
aleenaraj/sunbird-ml-workbench
14
python
def run(self, range_start, range_end, num_of_processes, content_type): '\n This is the main method to derive when creating an operator. This takes in the parameters, \n runs text enrichment pipline and writes back the path to the \n timestamp folder with the content id and its enriched text to an h5 file that gets saved as an intermediate result \n\n ' DS_DATA_HOME = self.inputs['DS_DATA_HOME'].read_loc() pathTocredentials = self.inputs['pathTocredentials'].read_loc() timestr = time.strftime('%Y%m%d-%H%M%S') path_to_timestamp_folder = os.path.join(DS_DATA_HOME, timestr) content_to_text_path = os.path.join(path_to_timestamp_folder, 'content_to_text') if (not os.path.exists(content_to_text_path)): os.makedirs(content_to_text_path) print('content_to_text: ', content_to_text_path) contentmeta_path = self.inputs['localpathTocontentMeta'].read_loc() shutil.move(contentmeta_path, os.path.join(path_to_timestamp_folder, os.path.split(contentmeta_path)[1])) moved_contentmeta_path = os.path.join(path_to_timestamp_folder, os.path.split(contentmeta_path)[1]) content_meta = pd.read_csv(moved_contentmeta_path) if ('derived_contentType' not in list(content_meta.columns)): content_meta['derived_contentType'] = np.nan for (row_ind, artifact_url) in enumerate(content_meta['artifactUrl']): try: content_meta['derived_contentType'][row_ind] = identify_contentType(artifact_url) except BaseException: pass content_meta = content_meta[pd.notnull(content_meta['derived_contentType'])] content_meta.reset_index(inplace=True, drop=True) print(self.outputs['timestamp_folder'].location_specify()) oldwd = os.getcwd() contentMeta_mandatory_fields = ['artifactUrl', 'derived_contentType', 'downloadUrl', 'gradeLevel', 'identifier', 'language', 'subject', 'graph_id', 'nodeType', 'objectType', 'node_id'] assert df_feature_check(content_meta, contentMeta_mandatory_fields) logging.info('CTT_CONTENT_TO_TEXT_START') if (content_meta.columns[0] == '0'): content_meta = content_meta.drop('0', axis=1) if (list(content_meta[content_meta.duplicated(['artifactUrl'], keep=False)]['artifactUrl']) != []): content_meta.drop_duplicates(subset='artifactUrl', inplace=True) content_meta.reset_index(drop=True, inplace=True) content_meta.dropna(subset=['artifactUrl'], inplace=True) content_meta.reset_index(drop=True, inplace=True) start = time.time() logging.info(('Contents detected in the content meta: ' + str(len(content_meta)))) logging.info('----Running Content_to_Text for contents from {0} to {1}:'.format(range_start, range_end)) logging.info('time started: {0}'.format(start)) content_meta.reset_index(drop=True, inplace=True) if (range_start == 'START'): range_start = 0 if (range_end == 'END'): range_end = len(content_meta) logging.info('CTT_Config: content_meta from {0} to {1} created in: {2}'.format(range_start, range_end, content_to_text_path)) print('Number of processes: ', num_of_processes) status = False if os.path.exists(pathTocredentials): try: config = configparser.ConfigParser(allow_no_value=True) config.read(pathTocredentials) status = True try: path_to_googlecred = config['google application credentials']['GOOGLE_APPLICATION_CREDENTIALS'] with open(path_to_googlecred, 'r') as cred_json: GOOGLE_APPLICATION_CREDENTIALS = cred_json.read() except BaseException: logging.info('Invalid GOOGLE_APPLICATION_CREDENTIALS in config.') logging.info('***Checking for GOOGLE_APPLICATION_CREDENTIALS environment variable') status = False except BaseException: logging.info('Invalid config file') logging.info('***Checking for GOOGLE_APPLICATION_CREDENTIALS environment variable') if (not status): try: GOOGLE_APPLICATION_CREDENTIALS = os.environ['GOOGLE_APPLICATION_CREDENTIALS'] with open(GOOGLE_APPLICATION_CREDENTIALS, 'r') as f: GOOGLE_APPLICATION_CREDENTIALS = f.read() except BaseException: GOOGLE_APPLICATION_CREDENTIALS = logging.info('Not a valid google credential') result = [multimodal_text_enrichment(i, timestr, content_meta, content_type, content_to_text_path, GOOGLE_APPLICATION_CREDENTIALS) for i in range(range_start, range_end)] print(result) os.chdir(oldwd) print('Current directory c2t: ', os.getcwd()) print('timestamp_folder path:', path_to_timestamp_folder) self.outputs['timestamp_folder'].write(path_to_timestamp_folder)
def run(self, range_start, range_end, num_of_processes, content_type): '\n This is the main method to derive when creating an operator. This takes in the parameters, \n runs text enrichment pipline and writes back the path to the \n timestamp folder with the content id and its enriched text to an h5 file that gets saved as an intermediate result \n\n ' DS_DATA_HOME = self.inputs['DS_DATA_HOME'].read_loc() pathTocredentials = self.inputs['pathTocredentials'].read_loc() timestr = time.strftime('%Y%m%d-%H%M%S') path_to_timestamp_folder = os.path.join(DS_DATA_HOME, timestr) content_to_text_path = os.path.join(path_to_timestamp_folder, 'content_to_text') if (not os.path.exists(content_to_text_path)): os.makedirs(content_to_text_path) print('content_to_text: ', content_to_text_path) contentmeta_path = self.inputs['localpathTocontentMeta'].read_loc() shutil.move(contentmeta_path, os.path.join(path_to_timestamp_folder, os.path.split(contentmeta_path)[1])) moved_contentmeta_path = os.path.join(path_to_timestamp_folder, os.path.split(contentmeta_path)[1]) content_meta = pd.read_csv(moved_contentmeta_path) if ('derived_contentType' not in list(content_meta.columns)): content_meta['derived_contentType'] = np.nan for (row_ind, artifact_url) in enumerate(content_meta['artifactUrl']): try: content_meta['derived_contentType'][row_ind] = identify_contentType(artifact_url) except BaseException: pass content_meta = content_meta[pd.notnull(content_meta['derived_contentType'])] content_meta.reset_index(inplace=True, drop=True) print(self.outputs['timestamp_folder'].location_specify()) oldwd = os.getcwd() contentMeta_mandatory_fields = ['artifactUrl', 'derived_contentType', 'downloadUrl', 'gradeLevel', 'identifier', 'language', 'subject', 'graph_id', 'nodeType', 'objectType', 'node_id'] assert df_feature_check(content_meta, contentMeta_mandatory_fields) logging.info('CTT_CONTENT_TO_TEXT_START') if (content_meta.columns[0] == '0'): content_meta = content_meta.drop('0', axis=1) if (list(content_meta[content_meta.duplicated(['artifactUrl'], keep=False)]['artifactUrl']) != []): content_meta.drop_duplicates(subset='artifactUrl', inplace=True) content_meta.reset_index(drop=True, inplace=True) content_meta.dropna(subset=['artifactUrl'], inplace=True) content_meta.reset_index(drop=True, inplace=True) start = time.time() logging.info(('Contents detected in the content meta: ' + str(len(content_meta)))) logging.info('----Running Content_to_Text for contents from {0} to {1}:'.format(range_start, range_end)) logging.info('time started: {0}'.format(start)) content_meta.reset_index(drop=True, inplace=True) if (range_start == 'START'): range_start = 0 if (range_end == 'END'): range_end = len(content_meta) logging.info('CTT_Config: content_meta from {0} to {1} created in: {2}'.format(range_start, range_end, content_to_text_path)) print('Number of processes: ', num_of_processes) status = False if os.path.exists(pathTocredentials): try: config = configparser.ConfigParser(allow_no_value=True) config.read(pathTocredentials) status = True try: path_to_googlecred = config['google application credentials']['GOOGLE_APPLICATION_CREDENTIALS'] with open(path_to_googlecred, 'r') as cred_json: GOOGLE_APPLICATION_CREDENTIALS = cred_json.read() except BaseException: logging.info('Invalid GOOGLE_APPLICATION_CREDENTIALS in config.') logging.info('***Checking for GOOGLE_APPLICATION_CREDENTIALS environment variable') status = False except BaseException: logging.info('Invalid config file') logging.info('***Checking for GOOGLE_APPLICATION_CREDENTIALS environment variable') if (not status): try: GOOGLE_APPLICATION_CREDENTIALS = os.environ['GOOGLE_APPLICATION_CREDENTIALS'] with open(GOOGLE_APPLICATION_CREDENTIALS, 'r') as f: GOOGLE_APPLICATION_CREDENTIALS = f.read() except BaseException: GOOGLE_APPLICATION_CREDENTIALS = logging.info('Not a valid google credential') result = [multimodal_text_enrichment(i, timestr, content_meta, content_type, content_to_text_path, GOOGLE_APPLICATION_CREDENTIALS) for i in range(range_start, range_end)] print(result) os.chdir(oldwd) print('Current directory c2t: ', os.getcwd()) print('timestamp_folder path:', path_to_timestamp_folder) self.outputs['timestamp_folder'].write(path_to_timestamp_folder)<|docstring|>This is the main method to derive when creating an operator. This takes in the parameters, runs text enrichment pipline and writes back the path to the timestamp folder with the content id and its enriched text to an h5 file that gets saved as an intermediate result<|endoftext|>
f4a834c9ff4f1e3d831d8c1f4205cae750ec31d8ec27891cb2506a9f4da8565d
def get_sample_compiled_module(target_dir): 'Support function that returns a TFLite compiled module' base_url = 'https://storage.googleapis.com/download.tensorflow.org/models' model_url = 'mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz' model_file = download_and_untar('{}/{}'.format(base_url, model_url), 'mobilenet_v1_1.0_224_quant.tflite', temp_dir=target_dir) return tvmc.compiler.compile_model(model_file, target='llvm')
Support function that returns a TFLite compiled module
tests/python/driver/tvmc/conftest.py
get_sample_compiled_module
limenghao/incubator-tvm
40
python
def get_sample_compiled_module(target_dir): base_url = 'https://storage.googleapis.com/download.tensorflow.org/models' model_url = 'mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz' model_file = download_and_untar('{}/{}'.format(base_url, model_url), 'mobilenet_v1_1.0_224_quant.tflite', temp_dir=target_dir) return tvmc.compiler.compile_model(model_file, target='llvm')
def get_sample_compiled_module(target_dir): base_url = 'https://storage.googleapis.com/download.tensorflow.org/models' model_url = 'mobilenet_v1_2018_08_02/mobilenet_v1_1.0_224_quant.tgz' model_file = download_and_untar('{}/{}'.format(base_url, model_url), 'mobilenet_v1_1.0_224_quant.tflite', temp_dir=target_dir) return tvmc.compiler.compile_model(model_file, target='llvm')<|docstring|>Support function that returns a TFLite compiled module<|endoftext|>
4a33e56b23dd1d962b2de7ee7f903781d345729b2b3b9fc35a2e6bdddb116e2d
def __virtual__(): '\n Only load if gem module is available in __salt__\n ' return ('gem.list' in __salt__)
Only load if gem module is available in __salt__
salt/states/gem.py
__virtual__
TiteiKo/salt
2
python
def __virtual__(): '\n \n ' return ('gem.list' in __salt__)
def __virtual__(): '\n \n ' return ('gem.list' in __salt__)<|docstring|>Only load if gem module is available in __salt__<|endoftext|>
bf95f4ae4f5b79c1221ac52193d991ad5100f93488c99db12789bb841e40b31f
def installed(name, ruby=None, runas=None, user=None, version=None, rdoc=False, ri=False): "\n Make sure that a gem is installed.\n\n name\n The name of the gem to install\n\n ruby: None\n For RVM or rbenv installations: the ruby version and gemset to target.\n\n runas: None\n The user under which to run the ``gem`` command\n\n .. deprecated:: 0.17.0\n\n user: None\n The user under which to run the ``gem`` command\n\n .. versionadded:: 0.17.0\n\n version : None\n Specify the version to install for the gem.\n Doesn't play nice with multiple gems at once\n\n rdoc : False\n Generate RDoc documentation for the gem(s).\n\n ri : False\n Generate RI documentation for the gem(s).\n " ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} salt.utils.warn_until('Lithium', "Please remove 'runas' support at this stage. 'user' support was added in 0.17.0", _dont_call_warnings=True) if runas: ret.setdefault('warnings', []).append("The 'runas' argument is being deprecated in favor of 'user', please update your state files.") if ((user is not None) and (runas is not None)): ret.setdefault('warnings', []).append("Passed both the 'runas' and 'user' arguments. Please don't. 'runas' is being ignored in favor of 'user'.") runas = None elif (runas is not None): user = runas runas = None gems = __salt__['gem.list'](name, ruby, runas=user) if ((name in gems) and (version is not None) and (version in gems[name])): ret['result'] = True ret['comment'] = 'Gem is already installed.' return ret elif ((name in gems) and (version is None)): ret['result'] = True ret['comment'] = 'Gem is already installed.' return ret if __opts__['test']: ret['comment'] = 'The gem {0} would have been installed'.format(name) return ret if __salt__['gem.install'](name, ruby=ruby, runas=user, version=version, rdoc=rdoc, ri=ri): ret['result'] = True ret['changes'][name] = 'Installed' ret['comment'] = 'Gem was successfully installed' else: ret['result'] = False ret['comment'] = 'Could not install gem.' return ret
Make sure that a gem is installed. name The name of the gem to install ruby: None For RVM or rbenv installations: the ruby version and gemset to target. runas: None The user under which to run the ``gem`` command .. deprecated:: 0.17.0 user: None The user under which to run the ``gem`` command .. versionadded:: 0.17.0 version : None Specify the version to install for the gem. Doesn't play nice with multiple gems at once rdoc : False Generate RDoc documentation for the gem(s). ri : False Generate RI documentation for the gem(s).
salt/states/gem.py
installed
TiteiKo/salt
2
python
def installed(name, ruby=None, runas=None, user=None, version=None, rdoc=False, ri=False): "\n Make sure that a gem is installed.\n\n name\n The name of the gem to install\n\n ruby: None\n For RVM or rbenv installations: the ruby version and gemset to target.\n\n runas: None\n The user under which to run the ``gem`` command\n\n .. deprecated:: 0.17.0\n\n user: None\n The user under which to run the ``gem`` command\n\n .. versionadded:: 0.17.0\n\n version : None\n Specify the version to install for the gem.\n Doesn't play nice with multiple gems at once\n\n rdoc : False\n Generate RDoc documentation for the gem(s).\n\n ri : False\n Generate RI documentation for the gem(s).\n " ret = {'name': name, 'result': None, 'comment': , 'changes': {}} salt.utils.warn_until('Lithium', "Please remove 'runas' support at this stage. 'user' support was added in 0.17.0", _dont_call_warnings=True) if runas: ret.setdefault('warnings', []).append("The 'runas' argument is being deprecated in favor of 'user', please update your state files.") if ((user is not None) and (runas is not None)): ret.setdefault('warnings', []).append("Passed both the 'runas' and 'user' arguments. Please don't. 'runas' is being ignored in favor of 'user'.") runas = None elif (runas is not None): user = runas runas = None gems = __salt__['gem.list'](name, ruby, runas=user) if ((name in gems) and (version is not None) and (version in gems[name])): ret['result'] = True ret['comment'] = 'Gem is already installed.' return ret elif ((name in gems) and (version is None)): ret['result'] = True ret['comment'] = 'Gem is already installed.' return ret if __opts__['test']: ret['comment'] = 'The gem {0} would have been installed'.format(name) return ret if __salt__['gem.install'](name, ruby=ruby, runas=user, version=version, rdoc=rdoc, ri=ri): ret['result'] = True ret['changes'][name] = 'Installed' ret['comment'] = 'Gem was successfully installed' else: ret['result'] = False ret['comment'] = 'Could not install gem.' return ret
def installed(name, ruby=None, runas=None, user=None, version=None, rdoc=False, ri=False): "\n Make sure that a gem is installed.\n\n name\n The name of the gem to install\n\n ruby: None\n For RVM or rbenv installations: the ruby version and gemset to target.\n\n runas: None\n The user under which to run the ``gem`` command\n\n .. deprecated:: 0.17.0\n\n user: None\n The user under which to run the ``gem`` command\n\n .. versionadded:: 0.17.0\n\n version : None\n Specify the version to install for the gem.\n Doesn't play nice with multiple gems at once\n\n rdoc : False\n Generate RDoc documentation for the gem(s).\n\n ri : False\n Generate RI documentation for the gem(s).\n " ret = {'name': name, 'result': None, 'comment': , 'changes': {}} salt.utils.warn_until('Lithium', "Please remove 'runas' support at this stage. 'user' support was added in 0.17.0", _dont_call_warnings=True) if runas: ret.setdefault('warnings', []).append("The 'runas' argument is being deprecated in favor of 'user', please update your state files.") if ((user is not None) and (runas is not None)): ret.setdefault('warnings', []).append("Passed both the 'runas' and 'user' arguments. Please don't. 'runas' is being ignored in favor of 'user'.") runas = None elif (runas is not None): user = runas runas = None gems = __salt__['gem.list'](name, ruby, runas=user) if ((name in gems) and (version is not None) and (version in gems[name])): ret['result'] = True ret['comment'] = 'Gem is already installed.' return ret elif ((name in gems) and (version is None)): ret['result'] = True ret['comment'] = 'Gem is already installed.' return ret if __opts__['test']: ret['comment'] = 'The gem {0} would have been installed'.format(name) return ret if __salt__['gem.install'](name, ruby=ruby, runas=user, version=version, rdoc=rdoc, ri=ri): ret['result'] = True ret['changes'][name] = 'Installed' ret['comment'] = 'Gem was successfully installed' else: ret['result'] = False ret['comment'] = 'Could not install gem.' return ret<|docstring|>Make sure that a gem is installed. name The name of the gem to install ruby: None For RVM or rbenv installations: the ruby version and gemset to target. runas: None The user under which to run the ``gem`` command .. deprecated:: 0.17.0 user: None The user under which to run the ``gem`` command .. versionadded:: 0.17.0 version : None Specify the version to install for the gem. Doesn't play nice with multiple gems at once rdoc : False Generate RDoc documentation for the gem(s). ri : False Generate RI documentation for the gem(s).<|endoftext|>
3f21c83dc5a4806fe1b5eb3784aae61a69687b41eb7a4f94dcfa85bb79f7328e
def removed(name, ruby=None, runas=None, user=None): '\n Make sure that a gem is not installed.\n\n name\n The name of the gem to uninstall\n\n ruby: None\n For RVM or rbenv installations: the ruby version and gemset to target.\n\n runas: None\n The user under which to run the ``gem`` command\n\n .. deprecated:: 0.17.0\n\n user: None\n The user under which to run the ``gem`` command\n\n .. versionadded:: 0.17.0\n ' ret = {'name': name, 'result': None, 'comment': '', 'changes': {}} salt.utils.warn_until('Lithium', "Please remove 'runas' support at this stage. 'user' support was added in 0.17.0", _dont_call_warnings=True) if runas: ret.setdefault('warnings', []).append("The 'runas' argument is being deprecated in favor of 'user', please update your state files.") if ((user is not None) and (runas is not None)): ret.setdefault('warnings', []).append("Passed both the 'runas' and 'user' arguments. Please don't. 'runas' is being ignored in favor of 'user'.") runas = None elif (runas is not None): user = runas runas = None if (name not in __salt__['gem.list'](name, ruby, runas=user)): ret['result'] = True ret['comment'] = 'Gem is not installed.' return ret if __opts__['test']: ret['comment'] = 'The gem {0} would have been removed'.format(name) return ret if __salt__['gem.uninstall'](name, ruby, runas=user): ret['result'] = True ret['changes'][name] = 'Removed' ret['comment'] = 'Gem was successfully removed.' else: ret['result'] = False ret['comment'] = 'Could not remove gem.' return ret
Make sure that a gem is not installed. name The name of the gem to uninstall ruby: None For RVM or rbenv installations: the ruby version and gemset to target. runas: None The user under which to run the ``gem`` command .. deprecated:: 0.17.0 user: None The user under which to run the ``gem`` command .. versionadded:: 0.17.0
salt/states/gem.py
removed
TiteiKo/salt
2
python
def removed(name, ruby=None, runas=None, user=None): '\n Make sure that a gem is not installed.\n\n name\n The name of the gem to uninstall\n\n ruby: None\n For RVM or rbenv installations: the ruby version and gemset to target.\n\n runas: None\n The user under which to run the ``gem`` command\n\n .. deprecated:: 0.17.0\n\n user: None\n The user under which to run the ``gem`` command\n\n .. versionadded:: 0.17.0\n ' ret = {'name': name, 'result': None, 'comment': , 'changes': {}} salt.utils.warn_until('Lithium', "Please remove 'runas' support at this stage. 'user' support was added in 0.17.0", _dont_call_warnings=True) if runas: ret.setdefault('warnings', []).append("The 'runas' argument is being deprecated in favor of 'user', please update your state files.") if ((user is not None) and (runas is not None)): ret.setdefault('warnings', []).append("Passed both the 'runas' and 'user' arguments. Please don't. 'runas' is being ignored in favor of 'user'.") runas = None elif (runas is not None): user = runas runas = None if (name not in __salt__['gem.list'](name, ruby, runas=user)): ret['result'] = True ret['comment'] = 'Gem is not installed.' return ret if __opts__['test']: ret['comment'] = 'The gem {0} would have been removed'.format(name) return ret if __salt__['gem.uninstall'](name, ruby, runas=user): ret['result'] = True ret['changes'][name] = 'Removed' ret['comment'] = 'Gem was successfully removed.' else: ret['result'] = False ret['comment'] = 'Could not remove gem.' return ret
def removed(name, ruby=None, runas=None, user=None): '\n Make sure that a gem is not installed.\n\n name\n The name of the gem to uninstall\n\n ruby: None\n For RVM or rbenv installations: the ruby version and gemset to target.\n\n runas: None\n The user under which to run the ``gem`` command\n\n .. deprecated:: 0.17.0\n\n user: None\n The user under which to run the ``gem`` command\n\n .. versionadded:: 0.17.0\n ' ret = {'name': name, 'result': None, 'comment': , 'changes': {}} salt.utils.warn_until('Lithium', "Please remove 'runas' support at this stage. 'user' support was added in 0.17.0", _dont_call_warnings=True) if runas: ret.setdefault('warnings', []).append("The 'runas' argument is being deprecated in favor of 'user', please update your state files.") if ((user is not None) and (runas is not None)): ret.setdefault('warnings', []).append("Passed both the 'runas' and 'user' arguments. Please don't. 'runas' is being ignored in favor of 'user'.") runas = None elif (runas is not None): user = runas runas = None if (name not in __salt__['gem.list'](name, ruby, runas=user)): ret['result'] = True ret['comment'] = 'Gem is not installed.' return ret if __opts__['test']: ret['comment'] = 'The gem {0} would have been removed'.format(name) return ret if __salt__['gem.uninstall'](name, ruby, runas=user): ret['result'] = True ret['changes'][name] = 'Removed' ret['comment'] = 'Gem was successfully removed.' else: ret['result'] = False ret['comment'] = 'Could not remove gem.' return ret<|docstring|>Make sure that a gem is not installed. name The name of the gem to uninstall ruby: None For RVM or rbenv installations: the ruby version and gemset to target. runas: None The user under which to run the ``gem`` command .. deprecated:: 0.17.0 user: None The user under which to run the ``gem`` command .. versionadded:: 0.17.0<|endoftext|>
4ed67a02a0ee6d450403526f400fb86f6226d2cb04134e95299d28e9c77615d6
def init_pixels(self): '\n Sets up the pixel array\n ' (self._max_strand, self._max_fixture, self._max_pixel) = self.scene().get_matrix_extents() self._pixel_buffer = BufferUtils.create_buffer()
Sets up the pixel array
lib/raw_preset.py
init_pixels
ikea-lisp-code/firemix
2
python
def init_pixels(self): '\n \n ' (self._max_strand, self._max_fixture, self._max_pixel) = self.scene().get_matrix_extents() self._pixel_buffer = BufferUtils.create_buffer()
def init_pixels(self): '\n \n ' (self._max_strand, self._max_fixture, self._max_pixel) = self.scene().get_matrix_extents() self._pixel_buffer = BufferUtils.create_buffer()<|docstring|>Sets up the pixel array<|endoftext|>
75e5bd5aa920b1f648408482a0ad958e88f7902cf79e434f1ca6d3523128c226
def draw(self, dt): '\n Override this method to define per-pixel behavior.\n Write output to self._pixel_buffer\n ' pass
Override this method to define per-pixel behavior. Write output to self._pixel_buffer
lib/raw_preset.py
draw
ikea-lisp-code/firemix
2
python
def draw(self, dt): '\n Override this method to define per-pixel behavior.\n Write output to self._pixel_buffer\n ' pass
def draw(self, dt): '\n Override this method to define per-pixel behavior.\n Write output to self._pixel_buffer\n ' pass<|docstring|>Override this method to define per-pixel behavior. Write output to self._pixel_buffer<|endoftext|>
e18d4b5b98a368a49ee63b4ceb59deeeaf9394eefdae510ac158f6c323ee3599
def current_color(self, address): '\n Returns the current color of a pixel in RGB float\n address is a tuple of (strand, fixture, pixel)\n ' return self._pixel_buffer[address]
Returns the current color of a pixel in RGB float address is a tuple of (strand, fixture, pixel)
lib/raw_preset.py
current_color
ikea-lisp-code/firemix
2
python
def current_color(self, address): '\n Returns the current color of a pixel in RGB float\n address is a tuple of (strand, fixture, pixel)\n ' return self._pixel_buffer[address]
def current_color(self, address): '\n Returns the current color of a pixel in RGB float\n address is a tuple of (strand, fixture, pixel)\n ' return self._pixel_buffer[address]<|docstring|>Returns the current color of a pixel in RGB float address is a tuple of (strand, fixture, pixel)<|endoftext|>
3e5d60411f4676f0b845022f496ff5816366e3c68eb74bb64da5371c3a7b126e
def setAllHLS(self, hues, luminances, saturations): '\n Sets the entire buffer, assuming an input list.\n ' self._pixel_buffer[(:, 0)] = hues self._pixel_buffer[(:, 1)] = luminances self._pixel_buffer[(:, 2)] = saturations
Sets the entire buffer, assuming an input list.
lib/raw_preset.py
setAllHLS
ikea-lisp-code/firemix
2
python
def setAllHLS(self, hues, luminances, saturations): '\n \n ' self._pixel_buffer[(:, 0)] = hues self._pixel_buffer[(:, 1)] = luminances self._pixel_buffer[(:, 2)] = saturations
def setAllHLS(self, hues, luminances, saturations): '\n \n ' self._pixel_buffer[(:, 0)] = hues self._pixel_buffer[(:, 1)] = luminances self._pixel_buffer[(:, 2)] = saturations<|docstring|>Sets the entire buffer, assuming an input list.<|endoftext|>
326ad57639050440be3bf12c7e777bcc1132f54e2f2f136c6723bb5914fb5069
def get_buffer(self): '\n Used by Mixer to render output\n ' return self._pixel_buffer
Used by Mixer to render output
lib/raw_preset.py
get_buffer
ikea-lisp-code/firemix
2
python
def get_buffer(self): '\n \n ' return self._pixel_buffer
def get_buffer(self): '\n \n ' return self._pixel_buffer<|docstring|>Used by Mixer to render output<|endoftext|>
9d77e781ef248ea96efe040dd020f647bc4b0d276ca12de4655e6297f2413c09
def tick(self, dt): '\n Unlike tick() in Preset, this method applies pixel_behavior to all pixels.\n ' for parameter in self._parameters.values(): parameter.tick(dt) self.draw(dt) self._ticks += 1
Unlike tick() in Preset, this method applies pixel_behavior to all pixels.
lib/raw_preset.py
tick
ikea-lisp-code/firemix
2
python
def tick(self, dt): '\n \n ' for parameter in self._parameters.values(): parameter.tick(dt) self.draw(dt) self._ticks += 1
def tick(self, dt): '\n \n ' for parameter in self._parameters.values(): parameter.tick(dt) self.draw(dt) self._ticks += 1<|docstring|>Unlike tick() in Preset, this method applies pixel_behavior to all pixels.<|endoftext|>
eba6f5697ee42f3618d62b44e73fa8962544e0c3e6801a4fd3b5983b17109193
def CorynebacteriumGlutamicum(directed: bool=False, verbose: int=2, cache_path: str='graphs/string', **additional_graph_kwargs: Dict) -> EnsmallenGraph: 'Return new instance of the Corynebacterium glutamicum graph.\n\n The graph is automatically retrieved from the STRING repository. \n\n\t\n\n Parameters\n -------------------\n directed: bool = False,\n Wether to load the graph as directed or undirected.\n By default false.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache_path: str = "graphs",\n Where to store the downloaded graphs.\n additional_graph_kwargs: Dict,\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Corynebacterium glutamicum graph.\n\n\tReport\n\t---------------------\n\tAt the time of rendering these methods (please see datetime below), the graph\n\thad the following characteristics:\n\t\n\tDatetime: 2021-02-02 19:58:58.312842\n\t\n\tThe undirected graph Corynebacterium glutamicum has 3028 nodes and 181633\n\tweighted edges, of which none are self-loops. The graph is dense as it\n\thas a density of 0.03963 and has 12 connected components, where the component\n\twith most nodes has 2996 nodes and the component with the least nodes has\n\t2 nodes. The graph median node degree is 93, the mean node degree is 119.97,\n\tand the node degree mode is 3. The top 5 most central nodes are 196627.cg1525\n\t(degree 1130), 196627.cg0957 (degree 904), 196627.cg2743 (degree 902),\n\t196627.cg3178 (degree 811) and 196627.cg0703 (degree 787).\n\t\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t\n\n\tUsage example\n\t----------------------\n\tThe usage of this graph is relatively straightforward:\n\t\n\t.. code:: python\n\t\n\t # First import the function to retrieve the graph from the datasets\n\t from ensmallen_graph.datasets.string import CorynebacteriumGlutamicum\n\t\n\t # Then load the graph\n\t graph = CorynebacteriumGlutamicum()\n\t\n\t # Finally, you can do anything with it, for instance, compute its report:\n\t print(graph)\n\t\n\t # If you need to run a link prediction task with validation,\n\t # you can split the graph using a connected holdout as follows:\n\t train_graph, validation_graph = graph.connected_holdout(\n\t # You can use an 80/20 split the holdout, for example.\n\t train_size=0.8,\n\t # The random state is used to reproduce the holdout.\n\t random_state=42,\n\t # Wether to show a loading bar.\n\t verbose=True\n\t )\n\t\n\t # Remember that, if you need, you can enable the memory-time trade-offs:\n\t train_graph.enable(\n\t vector_sources=True,\n\t vector_destinations=True,\n\t vector_outbounds=True\n\t )\n\t\n\t # Consider using the methods made available in the Embiggen package\n\t # to run graph embedding or link prediction tasks.\n ' return AutomaticallyRetrievedGraph(graph_name='CorynebacteriumGlutamicum', dataset='string', directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()
Return new instance of the Corynebacterium glutamicum graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False, Wether to load the graph as directed or undirected. By default false. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache_path: str = "graphs", Where to store the downloaded graphs. additional_graph_kwargs: Dict, Additional graph kwargs. Returns ----------------------- Instace of Corynebacterium glutamicum graph. Report --------------------- At the time of rendering these methods (please see datetime below), the graph had the following characteristics: Datetime: 2021-02-02 19:58:58.312842 The undirected graph Corynebacterium glutamicum has 3028 nodes and 181633 weighted edges, of which none are self-loops. The graph is dense as it has a density of 0.03963 and has 12 connected components, where the component with most nodes has 2996 nodes and the component with the least nodes has 2 nodes. The graph median node degree is 93, the mean node degree is 119.97, and the node degree mode is 3. The top 5 most central nodes are 196627.cg1525 (degree 1130), 196627.cg0957 (degree 904), 196627.cg2743 (degree 902), 196627.cg3178 (degree 811) and 196627.cg0703 (degree 787). References --------------------- Please cite the following if you use the data: @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } Usage example ---------------------- The usage of this graph is relatively straightforward: .. code:: python # First import the function to retrieve the graph from the datasets from ensmallen_graph.datasets.string import CorynebacteriumGlutamicum # Then load the graph graph = CorynebacteriumGlutamicum() # Finally, you can do anything with it, for instance, compute its report: print(graph) # If you need to run a link prediction task with validation, # you can split the graph using a connected holdout as follows: train_graph, validation_graph = graph.connected_holdout( # You can use an 80/20 split the holdout, for example. train_size=0.8, # The random state is used to reproduce the holdout. random_state=42, # Wether to show a loading bar. verbose=True ) # Remember that, if you need, you can enable the memory-time trade-offs: train_graph.enable( vector_sources=True, vector_destinations=True, vector_outbounds=True ) # Consider using the methods made available in the Embiggen package # to run graph embedding or link prediction tasks.
bindings/python/ensmallen_graph/datasets/string/corynebacteriumglutamicum.py
CorynebacteriumGlutamicum
caufieldjh/ensmallen_graph
0
python
def CorynebacteriumGlutamicum(directed: bool=False, verbose: int=2, cache_path: str='graphs/string', **additional_graph_kwargs: Dict) -> EnsmallenGraph: 'Return new instance of the Corynebacterium glutamicum graph.\n\n The graph is automatically retrieved from the STRING repository. \n\n\t\n\n Parameters\n -------------------\n directed: bool = False,\n Wether to load the graph as directed or undirected.\n By default false.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache_path: str = "graphs",\n Where to store the downloaded graphs.\n additional_graph_kwargs: Dict,\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Corynebacterium glutamicum graph.\n\n\tReport\n\t---------------------\n\tAt the time of rendering these methods (please see datetime below), the graph\n\thad the following characteristics:\n\t\n\tDatetime: 2021-02-02 19:58:58.312842\n\t\n\tThe undirected graph Corynebacterium glutamicum has 3028 nodes and 181633\n\tweighted edges, of which none are self-loops. The graph is dense as it\n\thas a density of 0.03963 and has 12 connected components, where the component\n\twith most nodes has 2996 nodes and the component with the least nodes has\n\t2 nodes. The graph median node degree is 93, the mean node degree is 119.97,\n\tand the node degree mode is 3. The top 5 most central nodes are 196627.cg1525\n\t(degree 1130), 196627.cg0957 (degree 904), 196627.cg2743 (degree 902),\n\t196627.cg3178 (degree 811) and 196627.cg0703 (degree 787).\n\t\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t\n\n\tUsage example\n\t----------------------\n\tThe usage of this graph is relatively straightforward:\n\t\n\t.. code:: python\n\t\n\t # First import the function to retrieve the graph from the datasets\n\t from ensmallen_graph.datasets.string import CorynebacteriumGlutamicum\n\t\n\t # Then load the graph\n\t graph = CorynebacteriumGlutamicum()\n\t\n\t # Finally, you can do anything with it, for instance, compute its report:\n\t print(graph)\n\t\n\t # If you need to run a link prediction task with validation,\n\t # you can split the graph using a connected holdout as follows:\n\t train_graph, validation_graph = graph.connected_holdout(\n\t # You can use an 80/20 split the holdout, for example.\n\t train_size=0.8,\n\t # The random state is used to reproduce the holdout.\n\t random_state=42,\n\t # Wether to show a loading bar.\n\t verbose=True\n\t )\n\t\n\t # Remember that, if you need, you can enable the memory-time trade-offs:\n\t train_graph.enable(\n\t vector_sources=True,\n\t vector_destinations=True,\n\t vector_outbounds=True\n\t )\n\t\n\t # Consider using the methods made available in the Embiggen package\n\t # to run graph embedding or link prediction tasks.\n ' return AutomaticallyRetrievedGraph(graph_name='CorynebacteriumGlutamicum', dataset='string', directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()
def CorynebacteriumGlutamicum(directed: bool=False, verbose: int=2, cache_path: str='graphs/string', **additional_graph_kwargs: Dict) -> EnsmallenGraph: 'Return new instance of the Corynebacterium glutamicum graph.\n\n The graph is automatically retrieved from the STRING repository. \n\n\t\n\n Parameters\n -------------------\n directed: bool = False,\n Wether to load the graph as directed or undirected.\n By default false.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache_path: str = "graphs",\n Where to store the downloaded graphs.\n additional_graph_kwargs: Dict,\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Corynebacterium glutamicum graph.\n\n\tReport\n\t---------------------\n\tAt the time of rendering these methods (please see datetime below), the graph\n\thad the following characteristics:\n\t\n\tDatetime: 2021-02-02 19:58:58.312842\n\t\n\tThe undirected graph Corynebacterium glutamicum has 3028 nodes and 181633\n\tweighted edges, of which none are self-loops. The graph is dense as it\n\thas a density of 0.03963 and has 12 connected components, where the component\n\twith most nodes has 2996 nodes and the component with the least nodes has\n\t2 nodes. The graph median node degree is 93, the mean node degree is 119.97,\n\tand the node degree mode is 3. The top 5 most central nodes are 196627.cg1525\n\t(degree 1130), 196627.cg0957 (degree 904), 196627.cg2743 (degree 902),\n\t196627.cg3178 (degree 811) and 196627.cg0703 (degree 787).\n\t\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t\n\n\tUsage example\n\t----------------------\n\tThe usage of this graph is relatively straightforward:\n\t\n\t.. code:: python\n\t\n\t # First import the function to retrieve the graph from the datasets\n\t from ensmallen_graph.datasets.string import CorynebacteriumGlutamicum\n\t\n\t # Then load the graph\n\t graph = CorynebacteriumGlutamicum()\n\t\n\t # Finally, you can do anything with it, for instance, compute its report:\n\t print(graph)\n\t\n\t # If you need to run a link prediction task with validation,\n\t # you can split the graph using a connected holdout as follows:\n\t train_graph, validation_graph = graph.connected_holdout(\n\t # You can use an 80/20 split the holdout, for example.\n\t train_size=0.8,\n\t # The random state is used to reproduce the holdout.\n\t random_state=42,\n\t # Wether to show a loading bar.\n\t verbose=True\n\t )\n\t\n\t # Remember that, if you need, you can enable the memory-time trade-offs:\n\t train_graph.enable(\n\t vector_sources=True,\n\t vector_destinations=True,\n\t vector_outbounds=True\n\t )\n\t\n\t # Consider using the methods made available in the Embiggen package\n\t # to run graph embedding or link prediction tasks.\n ' return AutomaticallyRetrievedGraph(graph_name='CorynebacteriumGlutamicum', dataset='string', directed=directed, verbose=verbose, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()<|docstring|>Return new instance of the Corynebacterium glutamicum graph. The graph is automatically retrieved from the STRING repository. Parameters ------------------- directed: bool = False, Wether to load the graph as directed or undirected. By default false. verbose: int = 2, Wether to show loading bars during the retrieval and building of the graph. cache_path: str = "graphs", Where to store the downloaded graphs. additional_graph_kwargs: Dict, Additional graph kwargs. Returns ----------------------- Instace of Corynebacterium glutamicum graph. Report --------------------- At the time of rendering these methods (please see datetime below), the graph had the following characteristics: Datetime: 2021-02-02 19:58:58.312842 The undirected graph Corynebacterium glutamicum has 3028 nodes and 181633 weighted edges, of which none are self-loops. The graph is dense as it has a density of 0.03963 and has 12 connected components, where the component with most nodes has 2996 nodes and the component with the least nodes has 2 nodes. The graph median node degree is 93, the mean node degree is 119.97, and the node degree mode is 3. The top 5 most central nodes are 196627.cg1525 (degree 1130), 196627.cg0957 (degree 904), 196627.cg2743 (degree 902), 196627.cg3178 (degree 811) and 196627.cg0703 (degree 787). References --------------------- Please cite the following if you use the data: @article{szklarczyk2019string, title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets}, author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others}, journal={Nucleic acids research}, volume={47}, number={D1}, pages={D607--D613}, year={2019}, publisher={Oxford University Press} } Usage example ---------------------- The usage of this graph is relatively straightforward: .. code:: python # First import the function to retrieve the graph from the datasets from ensmallen_graph.datasets.string import CorynebacteriumGlutamicum # Then load the graph graph = CorynebacteriumGlutamicum() # Finally, you can do anything with it, for instance, compute its report: print(graph) # If you need to run a link prediction task with validation, # you can split the graph using a connected holdout as follows: train_graph, validation_graph = graph.connected_holdout( # You can use an 80/20 split the holdout, for example. train_size=0.8, # The random state is used to reproduce the holdout. random_state=42, # Wether to show a loading bar. verbose=True ) # Remember that, if you need, you can enable the memory-time trade-offs: train_graph.enable( vector_sources=True, vector_destinations=True, vector_outbounds=True ) # Consider using the methods made available in the Embiggen package # to run graph embedding or link prediction tasks.<|endoftext|>
67825db8ba2ad723a30ab7a578dfccf7304e7954286360202abe9db7d59bd15f
def split_property(prop): 'Extract key street address information from the full address.' street_address = prop[:prop.find(', ')] return street_address.split(' ', maxsplit=1)
Extract key street address information from the full address.
rateslookup.py
split_property
timbledum/tmscraper
1
python
def split_property(prop): street_address = prop[:prop.find(', ')] return street_address.split(' ', maxsplit=1)
def split_property(prop): street_address = prop[:prop.find(', ')] return street_address.split(' ', maxsplit=1)<|docstring|>Extract key street address information from the full address.<|endoftext|>
608a3d2d3301d589edd5ab9670111bd4b371c23b4a1e83a906d73f3c1b45d004
def get_rates(prop): 'Scrape the rates information from the Hamilton Council website.' (number, street) = split_property(prop) prop_params = {'searchType': 'StreetAddress', 'streetNumber': number, 'streetName': street} r = requests.get(settings.rates_url, params=prop_params) r_bs = BeautifulSoup(r.text, 'html.parser') results = r_bs.find(attrs={'class': 'form-results'}) try: link = results.find('a')['href'] except TypeError: print('No RV found :(') return 'No RV found.' prop = requests.get(link) rates_bs = BeautifulSoup(prop.text, 'html.parser') label = rates_bs.find(string='Capital value') rates = label.next_element.next_element.text return rates
Scrape the rates information from the Hamilton Council website.
rateslookup.py
get_rates
timbledum/tmscraper
1
python
def get_rates(prop): (number, street) = split_property(prop) prop_params = {'searchType': 'StreetAddress', 'streetNumber': number, 'streetName': street} r = requests.get(settings.rates_url, params=prop_params) r_bs = BeautifulSoup(r.text, 'html.parser') results = r_bs.find(attrs={'class': 'form-results'}) try: link = results.find('a')['href'] except TypeError: print('No RV found :(') return 'No RV found.' prop = requests.get(link) rates_bs = BeautifulSoup(prop.text, 'html.parser') label = rates_bs.find(string='Capital value') rates = label.next_element.next_element.text return rates
def get_rates(prop): (number, street) = split_property(prop) prop_params = {'searchType': 'StreetAddress', 'streetNumber': number, 'streetName': street} r = requests.get(settings.rates_url, params=prop_params) r_bs = BeautifulSoup(r.text, 'html.parser') results = r_bs.find(attrs={'class': 'form-results'}) try: link = results.find('a')['href'] except TypeError: print('No RV found :(') return 'No RV found.' prop = requests.get(link) rates_bs = BeautifulSoup(prop.text, 'html.parser') label = rates_bs.find(string='Capital value') rates = label.next_element.next_element.text return rates<|docstring|>Scrape the rates information from the Hamilton Council website.<|endoftext|>
6c5825ae20b0650a5ad2a33d001502c2b943ec7a8dd16a8eaa6ce6bf2f4db1dd
def to_int(toks): ' Parser action for converting strings of digits to int. ' return int(toks[0])
Parser action for converting strings of digits to int.
campbellsoup/parsers.py
to_int
NBOCampbellToets/CampbellSoup
0
python
def to_int(toks): ' ' return int(toks[0])
def to_int(toks): ' ' return int(toks[0])<|docstring|>Parser action for converting strings of digits to int.<|endoftext|>
c8755a14de9bf4a12e88e88a5ebc15985c44b79223d5fa4c33d978d6c8503be2
def twoOrMore(parserElement): ' returns a parser that matches `parserElement` twice or more. ' return (parserElement + pp.OneOrMore(parserElement))
returns a parser that matches `parserElement` twice or more.
campbellsoup/parsers.py
twoOrMore
NBOCampbellToets/CampbellSoup
0
python
def twoOrMore(parserElement): ' ' return (parserElement + pp.OneOrMore(parserElement))
def twoOrMore(parserElement): ' ' return (parserElement + pp.OneOrMore(parserElement))<|docstring|>returns a parser that matches `parserElement` twice or more.<|endoftext|>
de4db5afe49192847c7c48681bc3b73f1b21f69239a3330aef0ec65de80bb484
def w_is_typed(tokens): ' Check whether a sequence of commands includes a type specifier. ' return (('type' in tokens) or ('answerblock' in tokens) or ('drawbox' in tokens) or ('answerfigure' in tokens))
Check whether a sequence of commands includes a type specifier.
campbellsoup/parsers.py
w_is_typed
NBOCampbellToets/CampbellSoup
0
python
def w_is_typed(tokens): ' ' return (('type' in tokens) or ('answerblock' in tokens) or ('drawbox' in tokens) or ('answerfigure' in tokens))
def w_is_typed(tokens): ' ' return (('type' in tokens) or ('answerblock' in tokens) or ('drawbox' in tokens) or ('answerfigure' in tokens))<|docstring|>Check whether a sequence of commands includes a type specifier.<|endoftext|>
6e689943d2cfadad3a55d3d18088a9f3c8e1ebe9494f09df29704e42ed9574fd
def p_parse(toks): ' Parsing action for applying p_question_group to a nested token. ' return p_question_group.parseString(toks[0])
Parsing action for applying p_question_group to a nested token.
campbellsoup/parsers.py
p_parse
NBOCampbellToets/CampbellSoup
0
python
def p_parse(toks): ' ' return p_question_group.parseString(toks[0])
def p_parse(toks): ' ' return p_question_group.parseString(toks[0])<|docstring|>Parsing action for applying p_question_group to a nested token.<|endoftext|>
98ebacee78a4c086b3018e9380bac9145890a6d4ec6668bbe90a8f691b5aeaa4
def debug_all(): ' Helper function for the developer: call .setDebug() on all parsers. ' import sys this_module = sys.modules[__name__] for name in dir(this_module): candidate = getattr(this_module, name) if isinstance(candidate, pp.ParserElement): candidate.setDebug()
Helper function for the developer: call .setDebug() on all parsers.
campbellsoup/parsers.py
debug_all
NBOCampbellToets/CampbellSoup
0
python
def debug_all(): ' ' import sys this_module = sys.modules[__name__] for name in dir(this_module): candidate = getattr(this_module, name) if isinstance(candidate, pp.ParserElement): candidate.setDebug()
def debug_all(): ' ' import sys this_module = sys.modules[__name__] for name in dir(this_module): candidate = getattr(this_module, name) if isinstance(candidate, pp.ParserElement): candidate.setDebug()<|docstring|>Helper function for the developer: call .setDebug() on all parsers.<|endoftext|>
d3faba5cf89f2a9ce3a42fc093973a7826c20bb0035b46e3a7c2d47a52b6cfd1
def __init__(self, grid2dfile, filename, name=None): '\n Convert grid2d file into a temporary hdf5 file for reducing memory\n load.\n\n Args:\n grid2dfile: grid2d file object to save\n filename (str): Path to where file should be saved (recommended\n it be a temporary dir).\n name (str): Name of layer, if None, will use filename minus the\n extension, or if a multihazard grid2d object, each layer will\n have its own name.\n ' (filename1, file_ext) = os.path.splitext(filename) if (file_ext != '.hdf5'): filename = (filename1 + '.hdf5') print(('Changed extension from %s to .hdf5' % (file_ext,))) filters = tables.Filters(complevel=5, complib='blosc') with tables.open_file(filename, mode='w') as self.tempfile: self.gdict = grid2dfile.getGeoDict() if (type(grid2dfile) == ShakeGrid): for layer in grid2dfile.getLayerNames(): filldat = grid2dfile.getLayer(layer).getData() self.tempfile.create_carray(self.tempfile.root, name=layer, obj=filldat, filters=filters) self.shakedict = grid2dfile.getShakeDict() self.edict = grid2dfile.getEventDict() else: if (name is None): name = os.path.basename(filename1) filldat = grid2dfile.getData() self.tempfile.create_carray(self.tempfile.root, name=name, obj=filldat, filters=filters) self.filename = os.path.abspath(filename)
Convert grid2d file into a temporary hdf5 file for reducing memory load. Args: grid2dfile: grid2d file object to save filename (str): Path to where file should be saved (recommended it be a temporary dir). name (str): Name of layer, if None, will use filename minus the extension, or if a multihazard grid2d object, each layer will have its own name.
gfail/temphdf.py
__init__
mhearne-usgs/groundfailure
9
python
def __init__(self, grid2dfile, filename, name=None): '\n Convert grid2d file into a temporary hdf5 file for reducing memory\n load.\n\n Args:\n grid2dfile: grid2d file object to save\n filename (str): Path to where file should be saved (recommended\n it be a temporary dir).\n name (str): Name of layer, if None, will use filename minus the\n extension, or if a multihazard grid2d object, each layer will\n have its own name.\n ' (filename1, file_ext) = os.path.splitext(filename) if (file_ext != '.hdf5'): filename = (filename1 + '.hdf5') print(('Changed extension from %s to .hdf5' % (file_ext,))) filters = tables.Filters(complevel=5, complib='blosc') with tables.open_file(filename, mode='w') as self.tempfile: self.gdict = grid2dfile.getGeoDict() if (type(grid2dfile) == ShakeGrid): for layer in grid2dfile.getLayerNames(): filldat = grid2dfile.getLayer(layer).getData() self.tempfile.create_carray(self.tempfile.root, name=layer, obj=filldat, filters=filters) self.shakedict = grid2dfile.getShakeDict() self.edict = grid2dfile.getEventDict() else: if (name is None): name = os.path.basename(filename1) filldat = grid2dfile.getData() self.tempfile.create_carray(self.tempfile.root, name=name, obj=filldat, filters=filters) self.filename = os.path.abspath(filename)
def __init__(self, grid2dfile, filename, name=None): '\n Convert grid2d file into a temporary hdf5 file for reducing memory\n load.\n\n Args:\n grid2dfile: grid2d file object to save\n filename (str): Path to where file should be saved (recommended\n it be a temporary dir).\n name (str): Name of layer, if None, will use filename minus the\n extension, or if a multihazard grid2d object, each layer will\n have its own name.\n ' (filename1, file_ext) = os.path.splitext(filename) if (file_ext != '.hdf5'): filename = (filename1 + '.hdf5') print(('Changed extension from %s to .hdf5' % (file_ext,))) filters = tables.Filters(complevel=5, complib='blosc') with tables.open_file(filename, mode='w') as self.tempfile: self.gdict = grid2dfile.getGeoDict() if (type(grid2dfile) == ShakeGrid): for layer in grid2dfile.getLayerNames(): filldat = grid2dfile.getLayer(layer).getData() self.tempfile.create_carray(self.tempfile.root, name=layer, obj=filldat, filters=filters) self.shakedict = grid2dfile.getShakeDict() self.edict = grid2dfile.getEventDict() else: if (name is None): name = os.path.basename(filename1) filldat = grid2dfile.getData() self.tempfile.create_carray(self.tempfile.root, name=name, obj=filldat, filters=filters) self.filename = os.path.abspath(filename)<|docstring|>Convert grid2d file into a temporary hdf5 file for reducing memory load. Args: grid2dfile: grid2d file object to save filename (str): Path to where file should be saved (recommended it be a temporary dir). name (str): Name of layer, if None, will use filename minus the extension, or if a multihazard grid2d object, each layer will have its own name.<|endoftext|>
81eca51b947250ff8f9de17b439a7bd5f9adf186748798cdef7f8d83faf7ee51
def getFilepath(self): '\n Return file path.\n ' return self.filename
Return file path.
gfail/temphdf.py
getFilepath
mhearne-usgs/groundfailure
9
python
def getFilepath(self): '\n \n ' return self.filename
def getFilepath(self): '\n \n ' return self.filename<|docstring|>Return file path.<|endoftext|>
3b3d082d8d67ea7b51ac1289270c6027d0353d54e16a238862a94411b763d19e
def getGeoDict(self): '\n Return geodictionary.\n ' return self.gdict
Return geodictionary.
gfail/temphdf.py
getGeoDict
mhearne-usgs/groundfailure
9
python
def getGeoDict(self): '\n \n ' return self.gdict
def getGeoDict(self): '\n \n ' return self.gdict<|docstring|>Return geodictionary.<|endoftext|>
584a0c6eb8a0d72540a7044ac7e74bcbc0da707a2751c1ec0737f26fb80f8c39
def getShakeDict(self): '\n Return shake dictionary if it exists.\n ' try: return self.shakedict except Exception as e: print(e) print('no shake dictionary found') return None
Return shake dictionary if it exists.
gfail/temphdf.py
getShakeDict
mhearne-usgs/groundfailure
9
python
def getShakeDict(self): '\n \n ' try: return self.shakedict except Exception as e: print(e) print('no shake dictionary found') return None
def getShakeDict(self): '\n \n ' try: return self.shakedict except Exception as e: print(e) print('no shake dictionary found') return None<|docstring|>Return shake dictionary if it exists.<|endoftext|>
92fdd637b676e07c4f267f14136771f3ecb3f6c686991ad4693ac79cd8ed67fc
def getEventDict(self): '\n Return event dictionary if it exists.\n ' try: return self.edict except Exception as e: print(e) print('no event dictionary found') return None
Return event dictionary if it exists.
gfail/temphdf.py
getEventDict
mhearne-usgs/groundfailure
9
python
def getEventDict(self): '\n \n ' try: return self.edict except Exception as e: print(e) print('no event dictionary found') return None
def getEventDict(self): '\n \n ' try: return self.edict except Exception as e: print(e) print('no event dictionary found') return None<|docstring|>Return event dictionary if it exists.<|endoftext|>
730e42b980f48d640ac331462e962d3eab1e4b75dc0bbd13473fa373f37548ea
def getSlice(self, rowstart=None, rowend=None, colstart=None, colend=None, name=None): '\n Return specified slice of data.\n\n Args:\n rowstart (int, None): Starting row index (inclusive), if None, will\n start at 0.\n rowend (int, None): Ending row index (exclusive), if None, will\n end at last row.\n colstart (int, None): Starting column index (inclusive), if None,\n will start at 0.\n colend (int, None): Ending column index (exclusive), if None, will\n end at last row.\n name (str): Name of layer/child name to return.\n\n Returns:\n numpy array of data.\n ' if (name is None): (name, ext) = os.path.splitext(os.path.basename(self.getFilepath())) if (rowstart is None): rowstart = '' else: rowstart = int(rowstart) if (rowend is None): rowend = '' else: rowend = int(rowend) if (colstart is None): colstart = '' else: colstart = int(colstart) if (colend is None): colend = '' else: colend = int(colend) indstr = ('%s:%s, %s:%s' % (rowstart, rowend, colstart, colend)) indstr = indstr.replace('-1', '') with tables.open_file(self.filename, mode='r') as file1: try: dataslice = eval(('file1.root.%s[%s]' % (name, indstr))) return dataslice except Exception as e: raise Exception(e) return
Return specified slice of data. Args: rowstart (int, None): Starting row index (inclusive), if None, will start at 0. rowend (int, None): Ending row index (exclusive), if None, will end at last row. colstart (int, None): Starting column index (inclusive), if None, will start at 0. colend (int, None): Ending column index (exclusive), if None, will end at last row. name (str): Name of layer/child name to return. Returns: numpy array of data.
gfail/temphdf.py
getSlice
mhearne-usgs/groundfailure
9
python
def getSlice(self, rowstart=None, rowend=None, colstart=None, colend=None, name=None): '\n Return specified slice of data.\n\n Args:\n rowstart (int, None): Starting row index (inclusive), if None, will\n start at 0.\n rowend (int, None): Ending row index (exclusive), if None, will\n end at last row.\n colstart (int, None): Starting column index (inclusive), if None,\n will start at 0.\n colend (int, None): Ending column index (exclusive), if None, will\n end at last row.\n name (str): Name of layer/child name to return.\n\n Returns:\n numpy array of data.\n ' if (name is None): (name, ext) = os.path.splitext(os.path.basename(self.getFilepath())) if (rowstart is None): rowstart = else: rowstart = int(rowstart) if (rowend is None): rowend = else: rowend = int(rowend) if (colstart is None): colstart = else: colstart = int(colstart) if (colend is None): colend = else: colend = int(colend) indstr = ('%s:%s, %s:%s' % (rowstart, rowend, colstart, colend)) indstr = indstr.replace('-1', ) with tables.open_file(self.filename, mode='r') as file1: try: dataslice = eval(('file1.root.%s[%s]' % (name, indstr))) return dataslice except Exception as e: raise Exception(e) return
def getSlice(self, rowstart=None, rowend=None, colstart=None, colend=None, name=None): '\n Return specified slice of data.\n\n Args:\n rowstart (int, None): Starting row index (inclusive), if None, will\n start at 0.\n rowend (int, None): Ending row index (exclusive), if None, will\n end at last row.\n colstart (int, None): Starting column index (inclusive), if None,\n will start at 0.\n colend (int, None): Ending column index (exclusive), if None, will\n end at last row.\n name (str): Name of layer/child name to return.\n\n Returns:\n numpy array of data.\n ' if (name is None): (name, ext) = os.path.splitext(os.path.basename(self.getFilepath())) if (rowstart is None): rowstart = else: rowstart = int(rowstart) if (rowend is None): rowend = else: rowend = int(rowend) if (colstart is None): colstart = else: colstart = int(colstart) if (colend is None): colend = else: colend = int(colend) indstr = ('%s:%s, %s:%s' % (rowstart, rowend, colstart, colend)) indstr = indstr.replace('-1', ) with tables.open_file(self.filename, mode='r') as file1: try: dataslice = eval(('file1.root.%s[%s]' % (name, indstr))) return dataslice except Exception as e: raise Exception(e) return<|docstring|>Return specified slice of data. Args: rowstart (int, None): Starting row index (inclusive), if None, will start at 0. rowend (int, None): Ending row index (exclusive), if None, will end at last row. colstart (int, None): Starting column index (inclusive), if None, will start at 0. colend (int, None): Ending column index (exclusive), if None, will end at last row. name (str): Name of layer/child name to return. Returns: numpy array of data.<|endoftext|>