body_hash
stringlengths
64
64
body
stringlengths
23
109k
docstring
stringlengths
1
57k
path
stringlengths
4
198
name
stringlengths
1
115
repository_name
stringlengths
7
111
repository_stars
float64
0
191k
lang
stringclasses
1 value
body_without_docstring
stringlengths
14
108k
unified
stringlengths
45
133k
9397005a087bcbdc622d4112a3d4bd0ab2157f3cbb4938139ec4a028ac87226c
def get_region_wise_buckets(self): '\n Fetch all buckets in all regions.\n ' try: buckets = self.list_existing_buckets() if buckets: region_wise_bucket = {} for bucket in buckets: region = self.get_bucket_region(bucket) if (region in region_wise_bucket): region_wise_bucket[region].append(bucket) else: region_wise_bucket[region] = [bucket] return region_wise_bucket else: return {} except Exception as e: self.logger.error(e) return {}
Fetch all buckets in all regions.
lib/awsLib/S3.py
get_region_wise_buckets
umang-cb/TAF
9
python
def get_region_wise_buckets(self): '\n \n ' try: buckets = self.list_existing_buckets() if buckets: region_wise_bucket = {} for bucket in buckets: region = self.get_bucket_region(bucket) if (region in region_wise_bucket): region_wise_bucket[region].append(bucket) else: region_wise_bucket[region] = [bucket] return region_wise_bucket else: return {} except Exception as e: self.logger.error(e) return {}
def get_region_wise_buckets(self): '\n \n ' try: buckets = self.list_existing_buckets() if buckets: region_wise_bucket = {} for bucket in buckets: region = self.get_bucket_region(bucket) if (region in region_wise_bucket): region_wise_bucket[region].append(bucket) else: region_wise_bucket[region] = [bucket] return region_wise_bucket else: return {} except Exception as e: self.logger.error(e) return {}<|docstring|>Fetch all buckets in all regions.<|endoftext|>
3e679084fb53fdfa89a859e269c4ea76973f860d3e838ebf71726a270d52b029
def get_bucket_region(self, bucket_name): '\n Gets the region where the bucket is located\n ' try: response = self.s3_client.list_buckets(Bucket=bucket_name) if response: if (response['LocationConstraint'] == None): return 'us-east-1' else: return response['LocationConstraint'] else: return '' except Exception as e: self.logger.error(e) return ''
Gets the region where the bucket is located
lib/awsLib/S3.py
get_bucket_region
umang-cb/TAF
9
python
def get_bucket_region(self, bucket_name): '\n \n ' try: response = self.s3_client.list_buckets(Bucket=bucket_name) if response: if (response['LocationConstraint'] == None): return 'us-east-1' else: return response['LocationConstraint'] else: return except Exception as e: self.logger.error(e) return
def get_bucket_region(self, bucket_name): '\n \n ' try: response = self.s3_client.list_buckets(Bucket=bucket_name) if response: if (response['LocationConstraint'] == None): return 'us-east-1' else: return response['LocationConstraint'] else: return except Exception as e: self.logger.error(e) return <|docstring|>Gets the region where the bucket is located<|endoftext|>
98a7bb25266ddd849cd20252a4e4269054151428ca5ee335b597f9b3a280a90f
def upload_file(self, bucket_name, source_path, destination_path): '\n Uploads a file to bucket specified.\n :param bucket_name: name of the bucket where file has to be uploaded.\n :param source_path: path of the file to be uploaded.\n :param destination_path: path relative to aws bucket. If only file name is specified\n then file will be loaded in root folder relative to bucket.\n :return: True/False\n ' try: response = self.s3_resource.Bucket(bucket_name).upload_file(source_path, destination_path) if (not response): return True else: return False except Exception as e: self.logger.error(e) return False
Uploads a file to bucket specified. :param bucket_name: name of the bucket where file has to be uploaded. :param source_path: path of the file to be uploaded. :param destination_path: path relative to aws bucket. If only file name is specified then file will be loaded in root folder relative to bucket. :return: True/False
lib/awsLib/S3.py
upload_file
umang-cb/TAF
9
python
def upload_file(self, bucket_name, source_path, destination_path): '\n Uploads a file to bucket specified.\n :param bucket_name: name of the bucket where file has to be uploaded.\n :param source_path: path of the file to be uploaded.\n :param destination_path: path relative to aws bucket. If only file name is specified\n then file will be loaded in root folder relative to bucket.\n :return: True/False\n ' try: response = self.s3_resource.Bucket(bucket_name).upload_file(source_path, destination_path) if (not response): return True else: return False except Exception as e: self.logger.error(e) return False
def upload_file(self, bucket_name, source_path, destination_path): '\n Uploads a file to bucket specified.\n :param bucket_name: name of the bucket where file has to be uploaded.\n :param source_path: path of the file to be uploaded.\n :param destination_path: path relative to aws bucket. If only file name is specified\n then file will be loaded in root folder relative to bucket.\n :return: True/False\n ' try: response = self.s3_resource.Bucket(bucket_name).upload_file(source_path, destination_path) if (not response): return True else: return False except Exception as e: self.logger.error(e) return False<|docstring|>Uploads a file to bucket specified. :param bucket_name: name of the bucket where file has to be uploaded. :param source_path: path of the file to be uploaded. :param destination_path: path relative to aws bucket. If only file name is specified then file will be loaded in root folder relative to bucket. :return: True/False<|endoftext|>
eaf06910785f20123b4b878be63d75effbcbdee74f11428b43c9db0c5ee5c00b
def upload_large_file(self, bucket_name, source_path, destination_path, multipart_threshold=((1024 * 1024) * 8), max_concurrency=10, multipart_chunksize=((1024 * 1024) * 8), use_threads=True): '\n Uploads a large file to bucket specified.\n :param bucket_name: name of the bucket where file has to be uploaded.\n :param source_path: path of the file to be uploaded.\n :param destination_path: path relative to aws bucket. If only file name is specified\n then file will be loaded in root folder relative to bucket.\n :param multipart_threshold: The transfer size threshold.\n :param max_concurrency: The maximum number of threads that will be\n making requests to perform a transfer. If ``use_threads`` is\n set to ``False``, the value provided is ignored as the transfer\n will only ever use the main thread.\n :param multipart_chunksize: The partition size of each part for a\n multipart transfer.\n :param use_threads: If True, threads will be used when performing\n S3 transfers. If False, no threads will be used in\n performing transfers\n :return: True/False\n ' '\n WARNING : Please use this function if you want to upload a large file only (ex - file above 10 MB, \n again this value is only subjective), as this API call to AWS is charged extra.\n ' try: config = TransferConfig(multipart_threshold=multipart_threshold, max_concurrency=max_concurrency, multipart_chunksize=multipart_chunksize, use_threads=use_threads) response = self.s3_resource.Bucket(bucket_name).upload_file(source_path, destination_path, Config=config) if (not response): return True else: return False except Exception as e: self.logger.error(e) return False
Uploads a large file to bucket specified. :param bucket_name: name of the bucket where file has to be uploaded. :param source_path: path of the file to be uploaded. :param destination_path: path relative to aws bucket. If only file name is specified then file will be loaded in root folder relative to bucket. :param multipart_threshold: The transfer size threshold. :param max_concurrency: The maximum number of threads that will be making requests to perform a transfer. If ``use_threads`` is set to ``False``, the value provided is ignored as the transfer will only ever use the main thread. :param multipart_chunksize: The partition size of each part for a multipart transfer. :param use_threads: If True, threads will be used when performing S3 transfers. If False, no threads will be used in performing transfers :return: True/False
lib/awsLib/S3.py
upload_large_file
umang-cb/TAF
9
python
def upload_large_file(self, bucket_name, source_path, destination_path, multipart_threshold=((1024 * 1024) * 8), max_concurrency=10, multipart_chunksize=((1024 * 1024) * 8), use_threads=True): '\n Uploads a large file to bucket specified.\n :param bucket_name: name of the bucket where file has to be uploaded.\n :param source_path: path of the file to be uploaded.\n :param destination_path: path relative to aws bucket. If only file name is specified\n then file will be loaded in root folder relative to bucket.\n :param multipart_threshold: The transfer size threshold.\n :param max_concurrency: The maximum number of threads that will be\n making requests to perform a transfer. If ``use_threads`` is\n set to ``False``, the value provided is ignored as the transfer\n will only ever use the main thread.\n :param multipart_chunksize: The partition size of each part for a\n multipart transfer.\n :param use_threads: If True, threads will be used when performing\n S3 transfers. If False, no threads will be used in\n performing transfers\n :return: True/False\n ' '\n WARNING : Please use this function if you want to upload a large file only (ex - file above 10 MB, \n again this value is only subjective), as this API call to AWS is charged extra.\n ' try: config = TransferConfig(multipart_threshold=multipart_threshold, max_concurrency=max_concurrency, multipart_chunksize=multipart_chunksize, use_threads=use_threads) response = self.s3_resource.Bucket(bucket_name).upload_file(source_path, destination_path, Config=config) if (not response): return True else: return False except Exception as e: self.logger.error(e) return False
def upload_large_file(self, bucket_name, source_path, destination_path, multipart_threshold=((1024 * 1024) * 8), max_concurrency=10, multipart_chunksize=((1024 * 1024) * 8), use_threads=True): '\n Uploads a large file to bucket specified.\n :param bucket_name: name of the bucket where file has to be uploaded.\n :param source_path: path of the file to be uploaded.\n :param destination_path: path relative to aws bucket. If only file name is specified\n then file will be loaded in root folder relative to bucket.\n :param multipart_threshold: The transfer size threshold.\n :param max_concurrency: The maximum number of threads that will be\n making requests to perform a transfer. If ``use_threads`` is\n set to ``False``, the value provided is ignored as the transfer\n will only ever use the main thread.\n :param multipart_chunksize: The partition size of each part for a\n multipart transfer.\n :param use_threads: If True, threads will be used when performing\n S3 transfers. If False, no threads will be used in\n performing transfers\n :return: True/False\n ' '\n WARNING : Please use this function if you want to upload a large file only (ex - file above 10 MB, \n again this value is only subjective), as this API call to AWS is charged extra.\n ' try: config = TransferConfig(multipart_threshold=multipart_threshold, max_concurrency=max_concurrency, multipart_chunksize=multipart_chunksize, use_threads=use_threads) response = self.s3_resource.Bucket(bucket_name).upload_file(source_path, destination_path, Config=config) if (not response): return True else: return False except Exception as e: self.logger.error(e) return False<|docstring|>Uploads a large file to bucket specified. :param bucket_name: name of the bucket where file has to be uploaded. :param source_path: path of the file to be uploaded. :param destination_path: path relative to aws bucket. If only file name is specified then file will be loaded in root folder relative to bucket. :param multipart_threshold: The transfer size threshold. :param max_concurrency: The maximum number of threads that will be making requests to perform a transfer. If ``use_threads`` is set to ``False``, the value provided is ignored as the transfer will only ever use the main thread. :param multipart_chunksize: The partition size of each part for a multipart transfer. :param use_threads: If True, threads will be used when performing S3 transfers. If False, no threads will be used in performing transfers :return: True/False<|endoftext|>
5be83905dbac7f4adec2eda8683042ea20d668266e206c498fae8ae227868315
def perform(self, data): '\n Parses the given request data and returns a matching response header.\n ' key = self._build_web_socket_accept_from_request_header(data.decode('utf-8')) return self._build_response_header(key)
Parses the given request data and returns a matching response header.
WebSocket/Handshake.py
perform
Cottin/BrowserREPL-for-Sublime
32
python
def perform(self, data): '\n \n ' key = self._build_web_socket_accept_from_request_header(data.decode('utf-8')) return self._build_response_header(key)
def perform(self, data): '\n \n ' key = self._build_web_socket_accept_from_request_header(data.decode('utf-8')) return self._build_response_header(key)<|docstring|>Parses the given request data and returns a matching response header.<|endoftext|>
ee71dd7d48306988994792e91b41c09d50a40b12eb055b656b10a070b73ffd83
def _build_web_socket_accept_from_request_header(self, header): '\n Parses the response header and builds a sec web socket accept.\n ' search_term = 'Sec-WebSocket-Key: ' start = (header.find(search_term) + len(search_term)) end = header.find('\r\n', start) key = header[start:end] guid = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11' key = (key + guid).encode('utf-8') sha1 = hashlib.sha1(key).digest() return base64.b64encode(sha1)
Parses the response header and builds a sec web socket accept.
WebSocket/Handshake.py
_build_web_socket_accept_from_request_header
Cottin/BrowserREPL-for-Sublime
32
python
def _build_web_socket_accept_from_request_header(self, header): '\n \n ' search_term = 'Sec-WebSocket-Key: ' start = (header.find(search_term) + len(search_term)) end = header.find('\r\n', start) key = header[start:end] guid = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11' key = (key + guid).encode('utf-8') sha1 = hashlib.sha1(key).digest() return base64.b64encode(sha1)
def _build_web_socket_accept_from_request_header(self, header): '\n \n ' search_term = 'Sec-WebSocket-Key: ' start = (header.find(search_term) + len(search_term)) end = header.find('\r\n', start) key = header[start:end] guid = '258EAFA5-E914-47DA-95CA-C5AB0DC85B11' key = (key + guid).encode('utf-8') sha1 = hashlib.sha1(key).digest() return base64.b64encode(sha1)<|docstring|>Parses the response header and builds a sec web socket accept.<|endoftext|>
32654773c14170ddf2b497565dceb7a95869d6b430b6cf6a46da460e93b04eaa
def _build_response_header(self, key): '\n Builds the response header containing the given key.\n ' return str(((((('HTTP/1.1 101 Switching Protocols\r\n' + 'Upgrade: websocket\r\n') + 'Connection: Upgrade\r\n') + 'Sec-WebSocket-Accept: ') + key.decode('utf-8')) + '\r\n\r\n'))
Builds the response header containing the given key.
WebSocket/Handshake.py
_build_response_header
Cottin/BrowserREPL-for-Sublime
32
python
def _build_response_header(self, key): '\n \n ' return str(((((('HTTP/1.1 101 Switching Protocols\r\n' + 'Upgrade: websocket\r\n') + 'Connection: Upgrade\r\n') + 'Sec-WebSocket-Accept: ') + key.decode('utf-8')) + '\r\n\r\n'))
def _build_response_header(self, key): '\n \n ' return str(((((('HTTP/1.1 101 Switching Protocols\r\n' + 'Upgrade: websocket\r\n') + 'Connection: Upgrade\r\n') + 'Sec-WebSocket-Accept: ') + key.decode('utf-8')) + '\r\n\r\n'))<|docstring|>Builds the response header containing the given key.<|endoftext|>
b3fe5ebca447393db9dc6aebf147e7c3a7f0319d3220edf6c0c7ab4a76e8359f
@group.command('get') @click.argument('id', type=int) @click.pass_context def get(ctx, id): 'Gets a single record from the table.' record = model.get(id) if (not record): ctx.fail(click.style(f'No record found with id "{id}".', fg='red')) click.echo(record.to_json())
Gets a single record from the table.
rfidsecuritysvc/cli/guest.py
get
bcurnow/rfid-security-svc
0
python
@group.command('get') @click.argument('id', type=int) @click.pass_context def get(ctx, id): record = model.get(id) if (not record): ctx.fail(click.style(f'No record found with id "{id}".', fg='red')) click.echo(record.to_json())
@group.command('get') @click.argument('id', type=int) @click.pass_context def get(ctx, id): record = model.get(id) if (not record): ctx.fail(click.style(f'No record found with id "{id}".', fg='red')) click.echo(record.to_json())<|docstring|>Gets a single record from the table.<|endoftext|>
0307e9c17897a35ee853097b1d412805d9c60eec2d9c149cecdb56beff7bd256
@group.command('list') def list(): 'List all the records in the table.' for i in model.list(): click.echo(i.to_json())
List all the records in the table.
rfidsecuritysvc/cli/guest.py
list
bcurnow/rfid-security-svc
0
python
@group.command('list') def list(): for i in model.list(): click.echo(i.to_json())
@group.command('list') def list(): for i in model.list(): click.echo(i.to_json())<|docstring|>List all the records in the table.<|endoftext|>
0587dcc4d6f624e7c278758ce8724e5814b135c91b086fe48f2d26e2dfa20a35
@group.command('create') @click.argument('first_name') @click.argument('last_name') @click.argument('sound', type=int, required=False) @click.argument('color', type=int, required=False) @click.pass_context def create(ctx, first_name, last_name, sound, color): 'Manually adds a record to the table.' try: model.create(first_name, last_name, sound, color) ctx.invoke(list) except exception.DuplicateGuestError: ctx.fail(click.style(f'Record with first_name "{first_name}" and last_name "{last_name}" already exists.', fg='red'))
Manually adds a record to the table.
rfidsecuritysvc/cli/guest.py
create
bcurnow/rfid-security-svc
0
python
@group.command('create') @click.argument('first_name') @click.argument('last_name') @click.argument('sound', type=int, required=False) @click.argument('color', type=int, required=False) @click.pass_context def create(ctx, first_name, last_name, sound, color): try: model.create(first_name, last_name, sound, color) ctx.invoke(list) except exception.DuplicateGuestError: ctx.fail(click.style(f'Record with first_name "{first_name}" and last_name "{last_name}" already exists.', fg='red'))
@group.command('create') @click.argument('first_name') @click.argument('last_name') @click.argument('sound', type=int, required=False) @click.argument('color', type=int, required=False) @click.pass_context def create(ctx, first_name, last_name, sound, color): try: model.create(first_name, last_name, sound, color) ctx.invoke(list) except exception.DuplicateGuestError: ctx.fail(click.style(f'Record with first_name "{first_name}" and last_name "{last_name}" already exists.', fg='red'))<|docstring|>Manually adds a record to the table.<|endoftext|>
f618e2a779cda06c22ac560b655eaa282ef5551662697d46559179fbe83b0d38
@group.command('delete') @click.argument('id', type=int) @click.pass_context def delete(ctx, id): 'Manually deletes a record from the table.' click.echo(click.style(f'{model.delete(id)} record(s) deleted.', bg='green', fg='black')) ctx.invoke(list)
Manually deletes a record from the table.
rfidsecuritysvc/cli/guest.py
delete
bcurnow/rfid-security-svc
0
python
@group.command('delete') @click.argument('id', type=int) @click.pass_context def delete(ctx, id): click.echo(click.style(f'{model.delete(id)} record(s) deleted.', bg='green', fg='black')) ctx.invoke(list)
@group.command('delete') @click.argument('id', type=int) @click.pass_context def delete(ctx, id): click.echo(click.style(f'{model.delete(id)} record(s) deleted.', bg='green', fg='black')) ctx.invoke(list)<|docstring|>Manually deletes a record from the table.<|endoftext|>
756701c45e61b45cfddc2409751ce904757c55e95afab8b5fb83e39f32d74bd0
@group.command('update') @click.argument('id', type=int) @click.argument('first_name') @click.argument('last_name') @click.argument('sound', type=int, required=False) @click.argument('color', type=int, required=False) @click.pass_context def update(ctx, id, first_name, last_name, sound, color): 'Manually updates a record in the table.' try: model.update(id, first_name, last_name, sound, color) click.echo(click.style('Record updated.', bg='green', fg='black')) ctx.invoke(list) except exception.GuestNotFoundError: ctx.fail(click.style(f'Record with id "{id}" does not exist.', fg='red'))
Manually updates a record in the table.
rfidsecuritysvc/cli/guest.py
update
bcurnow/rfid-security-svc
0
python
@group.command('update') @click.argument('id', type=int) @click.argument('first_name') @click.argument('last_name') @click.argument('sound', type=int, required=False) @click.argument('color', type=int, required=False) @click.pass_context def update(ctx, id, first_name, last_name, sound, color): try: model.update(id, first_name, last_name, sound, color) click.echo(click.style('Record updated.', bg='green', fg='black')) ctx.invoke(list) except exception.GuestNotFoundError: ctx.fail(click.style(f'Record with id "{id}" does not exist.', fg='red'))
@group.command('update') @click.argument('id', type=int) @click.argument('first_name') @click.argument('last_name') @click.argument('sound', type=int, required=False) @click.argument('color', type=int, required=False) @click.pass_context def update(ctx, id, first_name, last_name, sound, color): try: model.update(id, first_name, last_name, sound, color) click.echo(click.style('Record updated.', bg='green', fg='black')) ctx.invoke(list) except exception.GuestNotFoundError: ctx.fail(click.style(f'Record with id "{id}" does not exist.', fg='red'))<|docstring|>Manually updates a record in the table.<|endoftext|>
e0232aecbd67cc7d4490a3e598b4252f88f783b737a276f9898880eb733c16d4
def setStyleSheet(stylesheetname): 'Set stylesheet from the _stylesheets resource (from https://github.com/Alexhuszagh/BreezeStyleSheets).\n\n NOT USED BECAUSE THIS IS UNUSABLE!\n ' if (stylesheetname == 'dark'): ss = qdarkstyle.load_stylesheet(palette=qdarkstyle.DarkPalette) elif (stylesheetname == 'light'): ss = qdarkstyle.load_stylesheet(palette=qdarkstyle.LightPalette) else: ss = '' qw.QApplication.instance().setStyleSheet(ss) settings = qc.QSettings() settings.setValue('theme', stylesheetname)
Set stylesheet from the _stylesheets resource (from https://github.com/Alexhuszagh/BreezeStyleSheets). NOT USED BECAUSE THIS IS UNUSABLE!
argos/utility.py
setStyleSheet
subhacom/argos
1
python
def setStyleSheet(stylesheetname): 'Set stylesheet from the _stylesheets resource (from https://github.com/Alexhuszagh/BreezeStyleSheets).\n\n NOT USED BECAUSE THIS IS UNUSABLE!\n ' if (stylesheetname == 'dark'): ss = qdarkstyle.load_stylesheet(palette=qdarkstyle.DarkPalette) elif (stylesheetname == 'light'): ss = qdarkstyle.load_stylesheet(palette=qdarkstyle.LightPalette) else: ss = qw.QApplication.instance().setStyleSheet(ss) settings = qc.QSettings() settings.setValue('theme', stylesheetname)
def setStyleSheet(stylesheetname): 'Set stylesheet from the _stylesheets resource (from https://github.com/Alexhuszagh/BreezeStyleSheets).\n\n NOT USED BECAUSE THIS IS UNUSABLE!\n ' if (stylesheetname == 'dark'): ss = qdarkstyle.load_stylesheet(palette=qdarkstyle.DarkPalette) elif (stylesheetname == 'light'): ss = qdarkstyle.load_stylesheet(palette=qdarkstyle.LightPalette) else: ss = qw.QApplication.instance().setStyleSheet(ss) settings = qc.QSettings() settings.setValue('theme', stylesheetname)<|docstring|>Set stylesheet from the _stylesheets resource (from https://github.com/Alexhuszagh/BreezeStyleSheets). NOT USED BECAUSE THIS IS UNUSABLE!<|endoftext|>
daf772d391934f3ddd7119145d9d5dcbf114daea403da3841e227d83a40802bd
def init(): 'Initialize logging and Qt settings.' qc.QCoreApplication.setOrganizationName('NIH') qc.QCoreApplication.setOrganizationDomain('nih.gov') qc.QCoreApplication.setApplicationName('Argos') settings = qc.QSettings() logging.basicConfig(stream=sys.stdout, format='%(asctime)s p=%(processName)s[%(process)d] t=%(threadName)s[%(thread)d] %(filename)s#%(lineno)d:%(funcName)s: %(message)s', level=logging.DEBUG) return settings
Initialize logging and Qt settings.
argos/utility.py
init
subhacom/argos
1
python
def init(): qc.QCoreApplication.setOrganizationName('NIH') qc.QCoreApplication.setOrganizationDomain('nih.gov') qc.QCoreApplication.setApplicationName('Argos') settings = qc.QSettings() logging.basicConfig(stream=sys.stdout, format='%(asctime)s p=%(processName)s[%(process)d] t=%(threadName)s[%(thread)d] %(filename)s#%(lineno)d:%(funcName)s: %(message)s', level=logging.DEBUG) return settings
def init(): qc.QCoreApplication.setOrganizationName('NIH') qc.QCoreApplication.setOrganizationDomain('nih.gov') qc.QCoreApplication.setApplicationName('Argos') settings = qc.QSettings() logging.basicConfig(stream=sys.stdout, format='%(asctime)s p=%(processName)s[%(process)d] t=%(threadName)s[%(thread)d] %(filename)s#%(lineno)d:%(funcName)s: %(message)s', level=logging.DEBUG) return settings<|docstring|>Initialize logging and Qt settings.<|endoftext|>
55f0b8db79084862ece37333226c378168a9d5e9407796701d3831b806e80d61
def to_qpolygon(points, scale=1.0): 'Convert a sequence of (x, y) points into a `qg.QPolygonF`.' return qg.QPolygonF([qc.QPointF((p0 * scale), (p1 * scale)) for (p0, p1) in points])
Convert a sequence of (x, y) points into a `qg.QPolygonF`.
argos/utility.py
to_qpolygon
subhacom/argos
1
python
def to_qpolygon(points, scale=1.0): return qg.QPolygonF([qc.QPointF((p0 * scale), (p1 * scale)) for (p0, p1) in points])
def to_qpolygon(points, scale=1.0): return qg.QPolygonF([qc.QPointF((p0 * scale), (p1 * scale)) for (p0, p1) in points])<|docstring|>Convert a sequence of (x, y) points into a `qg.QPolygonF`.<|endoftext|>
843aceec090182b793f1707a2b7fdd365f4e8111ed3b590452d6b0dc66ef8b82
def cond_bbox_overlap(ra, rb, min_iou): 'Check if IoU of axis-aligned bounding boxes overlap.\n\n Parameters\n ----------\n ra, rb: array like\n Rectangles specified as (x, y, w, h)\n min_iou: flat\n Minimum value of IoU to consider overlap.\n\n Returns\n -------\n bool\n True if `ra` and `rb` have IoU >= `min_iou`. False otherwise.\n ' return (rect_iou(ra, rb) >= min_iou)
Check if IoU of axis-aligned bounding boxes overlap. Parameters ---------- ra, rb: array like Rectangles specified as (x, y, w, h) min_iou: flat Minimum value of IoU to consider overlap. Returns ------- bool True if `ra` and `rb` have IoU >= `min_iou`. False otherwise.
argos/utility.py
cond_bbox_overlap
subhacom/argos
1
python
def cond_bbox_overlap(ra, rb, min_iou): 'Check if IoU of axis-aligned bounding boxes overlap.\n\n Parameters\n ----------\n ra, rb: array like\n Rectangles specified as (x, y, w, h)\n min_iou: flat\n Minimum value of IoU to consider overlap.\n\n Returns\n -------\n bool\n True if `ra` and `rb` have IoU >= `min_iou`. False otherwise.\n ' return (rect_iou(ra, rb) >= min_iou)
def cond_bbox_overlap(ra, rb, min_iou): 'Check if IoU of axis-aligned bounding boxes overlap.\n\n Parameters\n ----------\n ra, rb: array like\n Rectangles specified as (x, y, w, h)\n min_iou: flat\n Minimum value of IoU to consider overlap.\n\n Returns\n -------\n bool\n True if `ra` and `rb` have IoU >= `min_iou`. False otherwise.\n ' return (rect_iou(ra, rb) >= min_iou)<|docstring|>Check if IoU of axis-aligned bounding boxes overlap. Parameters ---------- ra, rb: array like Rectangles specified as (x, y, w, h) min_iou: flat Minimum value of IoU to consider overlap. Returns ------- bool True if `ra` and `rb` have IoU >= `min_iou`. False otherwise.<|endoftext|>
53069f0e7d8dc4d9aae9e49455c9204f014c619a1fef4be8b10758496c7572b0
def cond_minrect_overlap(ra, rb, min_iou): 'Check if IoU of minimum area (rotated) bounding rectangles is at least\n `min_iou`.\n\n Parameters\n ----------\n ra: array like\n First rectangle defined by the coordinates of four corners.\n rb: array like\n Second rectangle defined by the coordinates of four corners.\n min_iou: float\n Minimum overlap defined by intersection over union of bounding boxes.\n\n Returns\n -------\n bool\n True if area of overlap is greater or equal to `min_iou`.\n ' (area_i, _) = cv2.intersectConvexConvex(ra, rb) area_u = ((cv2.contourArea(ra) + cv2.contourArea(rb)) - area_i) return (area_i >= (min_iou * area_u))
Check if IoU of minimum area (rotated) bounding rectangles is at least `min_iou`. Parameters ---------- ra: array like First rectangle defined by the coordinates of four corners. rb: array like Second rectangle defined by the coordinates of four corners. min_iou: float Minimum overlap defined by intersection over union of bounding boxes. Returns ------- bool True if area of overlap is greater or equal to `min_iou`.
argos/utility.py
cond_minrect_overlap
subhacom/argos
1
python
def cond_minrect_overlap(ra, rb, min_iou): 'Check if IoU of minimum area (rotated) bounding rectangles is at least\n `min_iou`.\n\n Parameters\n ----------\n ra: array like\n First rectangle defined by the coordinates of four corners.\n rb: array like\n Second rectangle defined by the coordinates of four corners.\n min_iou: float\n Minimum overlap defined by intersection over union of bounding boxes.\n\n Returns\n -------\n bool\n True if area of overlap is greater or equal to `min_iou`.\n ' (area_i, _) = cv2.intersectConvexConvex(ra, rb) area_u = ((cv2.contourArea(ra) + cv2.contourArea(rb)) - area_i) return (area_i >= (min_iou * area_u))
def cond_minrect_overlap(ra, rb, min_iou): 'Check if IoU of minimum area (rotated) bounding rectangles is at least\n `min_iou`.\n\n Parameters\n ----------\n ra: array like\n First rectangle defined by the coordinates of four corners.\n rb: array like\n Second rectangle defined by the coordinates of four corners.\n min_iou: float\n Minimum overlap defined by intersection over union of bounding boxes.\n\n Returns\n -------\n bool\n True if area of overlap is greater or equal to `min_iou`.\n ' (area_i, _) = cv2.intersectConvexConvex(ra, rb) area_u = ((cv2.contourArea(ra) + cv2.contourArea(rb)) - area_i) return (area_i >= (min_iou * area_u))<|docstring|>Check if IoU of minimum area (rotated) bounding rectangles is at least `min_iou`. Parameters ---------- ra: array like First rectangle defined by the coordinates of four corners. rb: array like Second rectangle defined by the coordinates of four corners. min_iou: float Minimum overlap defined by intersection over union of bounding boxes. Returns ------- bool True if area of overlap is greater or equal to `min_iou`.<|endoftext|>
0e4069cf6319e20be1e4a437d9cdc9715c7f522c9f419a017a3beb7d3609cb2e
def cond_proximity(points_a, points_b, min_dist): 'Check if the proximity of two arrays of points is more than `min_dist`.\n\n To take the shape of the object into account, I use the following measure\n of distance:\n scale the distance between centres of mass by the geometric mean of the\n square roots of the second moments.\n\n (x1 - x2) / sqrt(sigma_1_x * sigma_2_x)\n (y1 - y2) / sqrt(sigma_1_y * sigma_2_y)\n\n Parameters\n ----------\n points_a: array like\n Sequence of points\n points_b: array like\n Sequence of points\n min_dist: float\n Minimum distance.\n\n Returns\n -------\n bool\n `True` if the centres of mass (mean position) of `points_a` and\n `points_b` are closer than `min_dist`, `False` otherwise.\n ' sigma = (np.std(points_a, axis=0) * np.std(points_b, axis=0)) dx2 = (((np.mean(points_a[(:, 0)]) - np.mean(points_b[(:, 0)])) ** 2) / sigma[0]) dy2 = (((np.mean(points_a[(:, 1)]) - np.mean(points_b[(:, 1)])) ** 2) / sigma[1]) return ((dx2 + dy2) < (min_dist ** 2))
Check if the proximity of two arrays of points is more than `min_dist`. To take the shape of the object into account, I use the following measure of distance: scale the distance between centres of mass by the geometric mean of the square roots of the second moments. (x1 - x2) / sqrt(sigma_1_x * sigma_2_x) (y1 - y2) / sqrt(sigma_1_y * sigma_2_y) Parameters ---------- points_a: array like Sequence of points points_b: array like Sequence of points min_dist: float Minimum distance. Returns ------- bool `True` if the centres of mass (mean position) of `points_a` and `points_b` are closer than `min_dist`, `False` otherwise.
argos/utility.py
cond_proximity
subhacom/argos
1
python
def cond_proximity(points_a, points_b, min_dist): 'Check if the proximity of two arrays of points is more than `min_dist`.\n\n To take the shape of the object into account, I use the following measure\n of distance:\n scale the distance between centres of mass by the geometric mean of the\n square roots of the second moments.\n\n (x1 - x2) / sqrt(sigma_1_x * sigma_2_x)\n (y1 - y2) / sqrt(sigma_1_y * sigma_2_y)\n\n Parameters\n ----------\n points_a: array like\n Sequence of points\n points_b: array like\n Sequence of points\n min_dist: float\n Minimum distance.\n\n Returns\n -------\n bool\n `True` if the centres of mass (mean position) of `points_a` and\n `points_b` are closer than `min_dist`, `False` otherwise.\n ' sigma = (np.std(points_a, axis=0) * np.std(points_b, axis=0)) dx2 = (((np.mean(points_a[(:, 0)]) - np.mean(points_b[(:, 0)])) ** 2) / sigma[0]) dy2 = (((np.mean(points_a[(:, 1)]) - np.mean(points_b[(:, 1)])) ** 2) / sigma[1]) return ((dx2 + dy2) < (min_dist ** 2))
def cond_proximity(points_a, points_b, min_dist): 'Check if the proximity of two arrays of points is more than `min_dist`.\n\n To take the shape of the object into account, I use the following measure\n of distance:\n scale the distance between centres of mass by the geometric mean of the\n square roots of the second moments.\n\n (x1 - x2) / sqrt(sigma_1_x * sigma_2_x)\n (y1 - y2) / sqrt(sigma_1_y * sigma_2_y)\n\n Parameters\n ----------\n points_a: array like\n Sequence of points\n points_b: array like\n Sequence of points\n min_dist: float\n Minimum distance.\n\n Returns\n -------\n bool\n `True` if the centres of mass (mean position) of `points_a` and\n `points_b` are closer than `min_dist`, `False` otherwise.\n ' sigma = (np.std(points_a, axis=0) * np.std(points_b, axis=0)) dx2 = (((np.mean(points_a[(:, 0)]) - np.mean(points_b[(:, 0)])) ** 2) / sigma[0]) dy2 = (((np.mean(points_a[(:, 1)]) - np.mean(points_b[(:, 1)])) ** 2) / sigma[1]) return ((dx2 + dy2) < (min_dist ** 2))<|docstring|>Check if the proximity of two arrays of points is more than `min_dist`. To take the shape of the object into account, I use the following measure of distance: scale the distance between centres of mass by the geometric mean of the square roots of the second moments. (x1 - x2) / sqrt(sigma_1_x * sigma_2_x) (y1 - y2) / sqrt(sigma_1_y * sigma_2_y) Parameters ---------- points_a: array like Sequence of points points_b: array like Sequence of points min_dist: float Minimum distance. Returns ------- bool `True` if the centres of mass (mean position) of `points_a` and `points_b` are closer than `min_dist`, `False` otherwise.<|endoftext|>
9162c9d0949fafaa8ccca8e1b854819d01093d37771bc71d88741aae66309892
def cv2qimage(frame: np.ndarray, copy: bool=False) -> qg.QImage: 'Convert BGR/gray/bw frame from array into QImage".\n\n OpenCV reads images into 2D or 3D matrix. This function converts it into\n Qt QImage.\n\n Parameters\n ----------\n frame: numpy.ndarray\n Input image data as a 2D (black and white, gray() or 3D (color, OpenCV\n reads images in BGR instead of RGB format) array.\n copy: bool, default False\n If True Make a copy of the image data.\n\n Returns\n -------\n qg.QImage\n Converted image.\n ' if ((len(frame.shape) == 3) and (frame.shape[2] == 3)): img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) (h, w, c) = img.shape qimg = qg.QImage(img.tobytes(), w, h, (w * c), qg.QImage.Format_RGB888) elif (len(frame.shape) == 2): (h, w) = frame.shape qimg = qg.QImage(frame.tobytes(), w, h, (w * 1), qg.QImage.Format_Grayscale8) if copy: return qimg.copy() return qimg
Convert BGR/gray/bw frame from array into QImage". OpenCV reads images into 2D or 3D matrix. This function converts it into Qt QImage. Parameters ---------- frame: numpy.ndarray Input image data as a 2D (black and white, gray() or 3D (color, OpenCV reads images in BGR instead of RGB format) array. copy: bool, default False If True Make a copy of the image data. Returns ------- qg.QImage Converted image.
argos/utility.py
cv2qimage
subhacom/argos
1
python
def cv2qimage(frame: np.ndarray, copy: bool=False) -> qg.QImage: 'Convert BGR/gray/bw frame from array into QImage".\n\n OpenCV reads images into 2D or 3D matrix. This function converts it into\n Qt QImage.\n\n Parameters\n ----------\n frame: numpy.ndarray\n Input image data as a 2D (black and white, gray() or 3D (color, OpenCV\n reads images in BGR instead of RGB format) array.\n copy: bool, default False\n If True Make a copy of the image data.\n\n Returns\n -------\n qg.QImage\n Converted image.\n ' if ((len(frame.shape) == 3) and (frame.shape[2] == 3)): img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) (h, w, c) = img.shape qimg = qg.QImage(img.tobytes(), w, h, (w * c), qg.QImage.Format_RGB888) elif (len(frame.shape) == 2): (h, w) = frame.shape qimg = qg.QImage(frame.tobytes(), w, h, (w * 1), qg.QImage.Format_Grayscale8) if copy: return qimg.copy() return qimg
def cv2qimage(frame: np.ndarray, copy: bool=False) -> qg.QImage: 'Convert BGR/gray/bw frame from array into QImage".\n\n OpenCV reads images into 2D or 3D matrix. This function converts it into\n Qt QImage.\n\n Parameters\n ----------\n frame: numpy.ndarray\n Input image data as a 2D (black and white, gray() or 3D (color, OpenCV\n reads images in BGR instead of RGB format) array.\n copy: bool, default False\n If True Make a copy of the image data.\n\n Returns\n -------\n qg.QImage\n Converted image.\n ' if ((len(frame.shape) == 3) and (frame.shape[2] == 3)): img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) (h, w, c) = img.shape qimg = qg.QImage(img.tobytes(), w, h, (w * c), qg.QImage.Format_RGB888) elif (len(frame.shape) == 2): (h, w) = frame.shape qimg = qg.QImage(frame.tobytes(), w, h, (w * 1), qg.QImage.Format_Grayscale8) if copy: return qimg.copy() return qimg<|docstring|>Convert BGR/gray/bw frame from array into QImage". OpenCV reads images into 2D or 3D matrix. This function converts it into Qt QImage. Parameters ---------- frame: numpy.ndarray Input image data as a 2D (black and white, gray() or 3D (color, OpenCV reads images in BGR instead of RGB format) array. copy: bool, default False If True Make a copy of the image data. Returns ------- qg.QImage Converted image.<|endoftext|>
4257d44bcaeff862fce5adfe1e4b8754e818df60f1a812108c1b399d5a6c8107
def match_bboxes(id_bboxes: dict, new_bboxes: np.ndarray, boxtype: OutlineStyle, metric: DistanceMetric=DistanceMetric.euclidean, max_dist: float=10000) -> Tuple[(Dict[(int, int)], Set[int], Set[int])]: 'Match the rectangular bounding boxes in `new_bboxes` to the closest\n object in the `id_bboxes` dictionary.\n\n Parameters\n ----------\n id_bboxes: dict[int, np.ndarray]\n Mapping ids to bounding boxes\n new_bboxes: np.ndarray\n Array of new bounding boxes to be matched to those in ``id_bboxes``.\n boxtype: {OutlineStyle.bbox, OutlineStyle.minrect}\n Type of bounding box to match.\n max_dist: int, default 10000\n Anything that is more than this distance from all of the bboxes in\n ``id_bboxes`` are put in the unmatched list\n metric: {DistanceMetric.euclidean, DistanceMetric.iou}\n `DistanceMetric.euclidean` for Euclidean distance between centers of the\n boxes. `DistanceMetric.iou` for area of inetersection over union of the\n boxes,\n\n Returns\n -------\n matched: dict[int, int]\n Mapping keys in ``id_bboxes`` to bbox indices in ``new_bboxes`` that are\n closest.\n new_unmatched: set[int]\n Set of indices into `bboxes` that did not match anything in\n ``id_bboxes``\n old_unmatched: set[int]\n Set of keys in ``id_bboxes`` whose corresponding bbox values did not\n match anything in ``bboxes``.\n ' if (len(id_bboxes) == 0): return ({}, set(range(len(new_bboxes))), {}) labels = list(id_bboxes.keys()) bboxes = np.array(np.rint(list(id_bboxes.values())), dtype=np.int_) dist_matrix = pairwise_distance(new_bboxes, bboxes, boxtype=boxtype, metric=metric) (row_ind, col_ind) = optimize.linear_sum_assignment(dist_matrix) if (metric == DistanceMetric.euclidean): max_dist *= max_dist result = [(row, labels[col], (labels[col], row)) for (row, col) in zip(row_ind, col_ind) if (dist_matrix[(row, col)] < max_dist)] if (len(result) > 0): (good_rows, good_cols, matched) = zip(*result) good_rows = set(good_rows) good_cols = set(good_cols) matched = dict(matched) new_unmatched = (set(range(len(new_bboxes))) - good_rows) old_unmatched = (set(id_bboxes.keys()) - good_cols) else: matched = {} new_unmatched = set(range(len(new_bboxes))) old_unmatched = set(id_bboxes.keys()) return (matched, new_unmatched, old_unmatched)
Match the rectangular bounding boxes in `new_bboxes` to the closest object in the `id_bboxes` dictionary. Parameters ---------- id_bboxes: dict[int, np.ndarray] Mapping ids to bounding boxes new_bboxes: np.ndarray Array of new bounding boxes to be matched to those in ``id_bboxes``. boxtype: {OutlineStyle.bbox, OutlineStyle.minrect} Type of bounding box to match. max_dist: int, default 10000 Anything that is more than this distance from all of the bboxes in ``id_bboxes`` are put in the unmatched list metric: {DistanceMetric.euclidean, DistanceMetric.iou} `DistanceMetric.euclidean` for Euclidean distance between centers of the boxes. `DistanceMetric.iou` for area of inetersection over union of the boxes, Returns ------- matched: dict[int, int] Mapping keys in ``id_bboxes`` to bbox indices in ``new_bboxes`` that are closest. new_unmatched: set[int] Set of indices into `bboxes` that did not match anything in ``id_bboxes`` old_unmatched: set[int] Set of keys in ``id_bboxes`` whose corresponding bbox values did not match anything in ``bboxes``.
argos/utility.py
match_bboxes
subhacom/argos
1
python
def match_bboxes(id_bboxes: dict, new_bboxes: np.ndarray, boxtype: OutlineStyle, metric: DistanceMetric=DistanceMetric.euclidean, max_dist: float=10000) -> Tuple[(Dict[(int, int)], Set[int], Set[int])]: 'Match the rectangular bounding boxes in `new_bboxes` to the closest\n object in the `id_bboxes` dictionary.\n\n Parameters\n ----------\n id_bboxes: dict[int, np.ndarray]\n Mapping ids to bounding boxes\n new_bboxes: np.ndarray\n Array of new bounding boxes to be matched to those in ``id_bboxes``.\n boxtype: {OutlineStyle.bbox, OutlineStyle.minrect}\n Type of bounding box to match.\n max_dist: int, default 10000\n Anything that is more than this distance from all of the bboxes in\n ``id_bboxes`` are put in the unmatched list\n metric: {DistanceMetric.euclidean, DistanceMetric.iou}\n `DistanceMetric.euclidean` for Euclidean distance between centers of the\n boxes. `DistanceMetric.iou` for area of inetersection over union of the\n boxes,\n\n Returns\n -------\n matched: dict[int, int]\n Mapping keys in ``id_bboxes`` to bbox indices in ``new_bboxes`` that are\n closest.\n new_unmatched: set[int]\n Set of indices into `bboxes` that did not match anything in\n ``id_bboxes``\n old_unmatched: set[int]\n Set of keys in ``id_bboxes`` whose corresponding bbox values did not\n match anything in ``bboxes``.\n ' if (len(id_bboxes) == 0): return ({}, set(range(len(new_bboxes))), {}) labels = list(id_bboxes.keys()) bboxes = np.array(np.rint(list(id_bboxes.values())), dtype=np.int_) dist_matrix = pairwise_distance(new_bboxes, bboxes, boxtype=boxtype, metric=metric) (row_ind, col_ind) = optimize.linear_sum_assignment(dist_matrix) if (metric == DistanceMetric.euclidean): max_dist *= max_dist result = [(row, labels[col], (labels[col], row)) for (row, col) in zip(row_ind, col_ind) if (dist_matrix[(row, col)] < max_dist)] if (len(result) > 0): (good_rows, good_cols, matched) = zip(*result) good_rows = set(good_rows) good_cols = set(good_cols) matched = dict(matched) new_unmatched = (set(range(len(new_bboxes))) - good_rows) old_unmatched = (set(id_bboxes.keys()) - good_cols) else: matched = {} new_unmatched = set(range(len(new_bboxes))) old_unmatched = set(id_bboxes.keys()) return (matched, new_unmatched, old_unmatched)
def match_bboxes(id_bboxes: dict, new_bboxes: np.ndarray, boxtype: OutlineStyle, metric: DistanceMetric=DistanceMetric.euclidean, max_dist: float=10000) -> Tuple[(Dict[(int, int)], Set[int], Set[int])]: 'Match the rectangular bounding boxes in `new_bboxes` to the closest\n object in the `id_bboxes` dictionary.\n\n Parameters\n ----------\n id_bboxes: dict[int, np.ndarray]\n Mapping ids to bounding boxes\n new_bboxes: np.ndarray\n Array of new bounding boxes to be matched to those in ``id_bboxes``.\n boxtype: {OutlineStyle.bbox, OutlineStyle.minrect}\n Type of bounding box to match.\n max_dist: int, default 10000\n Anything that is more than this distance from all of the bboxes in\n ``id_bboxes`` are put in the unmatched list\n metric: {DistanceMetric.euclidean, DistanceMetric.iou}\n `DistanceMetric.euclidean` for Euclidean distance between centers of the\n boxes. `DistanceMetric.iou` for area of inetersection over union of the\n boxes,\n\n Returns\n -------\n matched: dict[int, int]\n Mapping keys in ``id_bboxes`` to bbox indices in ``new_bboxes`` that are\n closest.\n new_unmatched: set[int]\n Set of indices into `bboxes` that did not match anything in\n ``id_bboxes``\n old_unmatched: set[int]\n Set of keys in ``id_bboxes`` whose corresponding bbox values did not\n match anything in ``bboxes``.\n ' if (len(id_bboxes) == 0): return ({}, set(range(len(new_bboxes))), {}) labels = list(id_bboxes.keys()) bboxes = np.array(np.rint(list(id_bboxes.values())), dtype=np.int_) dist_matrix = pairwise_distance(new_bboxes, bboxes, boxtype=boxtype, metric=metric) (row_ind, col_ind) = optimize.linear_sum_assignment(dist_matrix) if (metric == DistanceMetric.euclidean): max_dist *= max_dist result = [(row, labels[col], (labels[col], row)) for (row, col) in zip(row_ind, col_ind) if (dist_matrix[(row, col)] < max_dist)] if (len(result) > 0): (good_rows, good_cols, matched) = zip(*result) good_rows = set(good_rows) good_cols = set(good_cols) matched = dict(matched) new_unmatched = (set(range(len(new_bboxes))) - good_rows) old_unmatched = (set(id_bboxes.keys()) - good_cols) else: matched = {} new_unmatched = set(range(len(new_bboxes))) old_unmatched = set(id_bboxes.keys()) return (matched, new_unmatched, old_unmatched)<|docstring|>Match the rectangular bounding boxes in `new_bboxes` to the closest object in the `id_bboxes` dictionary. Parameters ---------- id_bboxes: dict[int, np.ndarray] Mapping ids to bounding boxes new_bboxes: np.ndarray Array of new bounding boxes to be matched to those in ``id_bboxes``. boxtype: {OutlineStyle.bbox, OutlineStyle.minrect} Type of bounding box to match. max_dist: int, default 10000 Anything that is more than this distance from all of the bboxes in ``id_bboxes`` are put in the unmatched list metric: {DistanceMetric.euclidean, DistanceMetric.iou} `DistanceMetric.euclidean` for Euclidean distance between centers of the boxes. `DistanceMetric.iou` for area of inetersection over union of the boxes, Returns ------- matched: dict[int, int] Mapping keys in ``id_bboxes`` to bbox indices in ``new_bboxes`` that are closest. new_unmatched: set[int] Set of indices into `bboxes` that did not match anything in ``id_bboxes`` old_unmatched: set[int] Set of keys in ``id_bboxes`` whose corresponding bbox values did not match anything in ``bboxes``.<|endoftext|>
9c41626a4bf8c4719ecccfb2bba3a8ece732e1d102b05af526fbb71f75bdf923
def reconnect(signal, newhandler=None, oldhandler=None): 'Disconnect PyQt signal from oldhandler and connect to newhandler' while True: try: if (oldhandler is not None): signal.disconnect(oldhandler) else: signal.disconnect() except TypeError: break if (newhandler is not None): signal.connect(newhandler)
Disconnect PyQt signal from oldhandler and connect to newhandler
argos/utility.py
reconnect
subhacom/argos
1
python
def reconnect(signal, newhandler=None, oldhandler=None): while True: try: if (oldhandler is not None): signal.disconnect(oldhandler) else: signal.disconnect() except TypeError: break if (newhandler is not None): signal.connect(newhandler)
def reconnect(signal, newhandler=None, oldhandler=None): while True: try: if (oldhandler is not None): signal.disconnect(oldhandler) else: signal.disconnect() except TypeError: break if (newhandler is not None): signal.connect(newhandler)<|docstring|>Disconnect PyQt signal from oldhandler and connect to newhandler<|endoftext|>
b6e83f7a9c0e62632e3be6b6aa2ce2865cc70ae27ad48a5ca42be29c0456f0c9
def make_color(num: int) -> Tuple[int]: 'Create a random color based on number.\n\n The provided number is passed through the murmur hash function in order\n to generate bytes which are somewhat apart from each other. The three least\n significant byte values are taken as r, g, and b.\n\n Parameters\n ----------\n num: int\n number to use as hash key\n\n Returns\n -------\n bytes[3]\n (r, g, b) values\n\n ' val = murmurhash3_32(num, positive=True).to_bytes(8, 'little') return val[:3]
Create a random color based on number. The provided number is passed through the murmur hash function in order to generate bytes which are somewhat apart from each other. The three least significant byte values are taken as r, g, and b. Parameters ---------- num: int number to use as hash key Returns ------- bytes[3] (r, g, b) values
argos/utility.py
make_color
subhacom/argos
1
python
def make_color(num: int) -> Tuple[int]: 'Create a random color based on number.\n\n The provided number is passed through the murmur hash function in order\n to generate bytes which are somewhat apart from each other. The three least\n significant byte values are taken as r, g, and b.\n\n Parameters\n ----------\n num: int\n number to use as hash key\n\n Returns\n -------\n bytes[3]\n (r, g, b) values\n\n ' val = murmurhash3_32(num, positive=True).to_bytes(8, 'little') return val[:3]
def make_color(num: int) -> Tuple[int]: 'Create a random color based on number.\n\n The provided number is passed through the murmur hash function in order\n to generate bytes which are somewhat apart from each other. The three least\n significant byte values are taken as r, g, and b.\n\n Parameters\n ----------\n num: int\n number to use as hash key\n\n Returns\n -------\n bytes[3]\n (r, g, b) values\n\n ' val = murmurhash3_32(num, positive=True).to_bytes(8, 'little') return val[:3]<|docstring|>Create a random color based on number. The provided number is passed through the murmur hash function in order to generate bytes which are somewhat apart from each other. The three least significant byte values are taken as r, g, and b. Parameters ---------- num: int number to use as hash key Returns ------- bytes[3] (r, g, b) values<|endoftext|>
079790c218ea818fc5969476e6c333ca37802b432ca80370527cda4429797a5c
def get_cmap_color(num, maxnum, cmap): 'Get rgb based on specified colormap `cmap` for index `num` where the\n total range of values is (0, maxnum].\n\n Parameters\n ----------\n num: real number\n Position into colormap.\n maxnum: real number\n Normalize `num` by this value.\n cmap: str\n Name of colormap\n\n Returns\n -------\n tuple: (r, g, b)\n The red, green and blue value for the color at position `num`/`maxnum`\n in the (0, 1) range of the colormap.\n ' rgba = cm.get_cmap(cmap)((float(num) / maxnum)) int_rgb = (max(0, min(255, floor((v * 256)))) for v in rgba[:3]) return int_rgb
Get rgb based on specified colormap `cmap` for index `num` where the total range of values is (0, maxnum]. Parameters ---------- num: real number Position into colormap. maxnum: real number Normalize `num` by this value. cmap: str Name of colormap Returns ------- tuple: (r, g, b) The red, green and blue value for the color at position `num`/`maxnum` in the (0, 1) range of the colormap.
argos/utility.py
get_cmap_color
subhacom/argos
1
python
def get_cmap_color(num, maxnum, cmap): 'Get rgb based on specified colormap `cmap` for index `num` where the\n total range of values is (0, maxnum].\n\n Parameters\n ----------\n num: real number\n Position into colormap.\n maxnum: real number\n Normalize `num` by this value.\n cmap: str\n Name of colormap\n\n Returns\n -------\n tuple: (r, g, b)\n The red, green and blue value for the color at position `num`/`maxnum`\n in the (0, 1) range of the colormap.\n ' rgba = cm.get_cmap(cmap)((float(num) / maxnum)) int_rgb = (max(0, min(255, floor((v * 256)))) for v in rgba[:3]) return int_rgb
def get_cmap_color(num, maxnum, cmap): 'Get rgb based on specified colormap `cmap` for index `num` where the\n total range of values is (0, maxnum].\n\n Parameters\n ----------\n num: real number\n Position into colormap.\n maxnum: real number\n Normalize `num` by this value.\n cmap: str\n Name of colormap\n\n Returns\n -------\n tuple: (r, g, b)\n The red, green and blue value for the color at position `num`/`maxnum`\n in the (0, 1) range of the colormap.\n ' rgba = cm.get_cmap(cmap)((float(num) / maxnum)) int_rgb = (max(0, min(255, floor((v * 256)))) for v in rgba[:3]) return int_rgb<|docstring|>Get rgb based on specified colormap `cmap` for index `num` where the total range of values is (0, maxnum]. Parameters ---------- num: real number Position into colormap. maxnum: real number Normalize `num` by this value. cmap: str Name of colormap Returns ------- tuple: (r, g, b) The red, green and blue value for the color at position `num`/`maxnum` in the (0, 1) range of the colormap.<|endoftext|>
29cea57d5853fa01f4f57192b7398bd33dfec26e4bcb0a74638a9868e6f36357
def extract_frames(vidfile, nframes, scale=1.0, outdir='.', random=False): 'Extract `nframes` frames from `vidfile` into `outdir`' cap = cv2.VideoCapture(vidfile) fname = os.path.basename(vidfile) prefix = fname.rpartition('.')[0] frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) idx = np.arange(frame_count, dtype=int) if (frame_count < nframes): if random: np.random.shuffle(idx) idx = idx[:nframes] else: step = (frame_count // nframes) idx = idx[::step] idx = sorted(idx) ii = 0 jj = 0 while cap.isOpened(): (ret, frame) = cap.read() if (frame is None): break if (idx[jj] == ii): size = (int((frame.shape[1] * scale)), int((frame.shape[0] * scale))) if (scale < 1): frame = cv2.resize(frame, size, cv2.INTER_AREA) elif (scale > 1): frame = cv2.resize(frame, size, cv2.INTER_CUBIC) cv2.imwrite(os.path.join(outdir, f'{prefix}_{idx[jj]:06d}.png'), frame) jj += 1 ii += 1 cap.release()
Extract `nframes` frames from `vidfile` into `outdir`
argos/utility.py
extract_frames
subhacom/argos
1
python
def extract_frames(vidfile, nframes, scale=1.0, outdir='.', random=False): cap = cv2.VideoCapture(vidfile) fname = os.path.basename(vidfile) prefix = fname.rpartition('.')[0] frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) idx = np.arange(frame_count, dtype=int) if (frame_count < nframes): if random: np.random.shuffle(idx) idx = idx[:nframes] else: step = (frame_count // nframes) idx = idx[::step] idx = sorted(idx) ii = 0 jj = 0 while cap.isOpened(): (ret, frame) = cap.read() if (frame is None): break if (idx[jj] == ii): size = (int((frame.shape[1] * scale)), int((frame.shape[0] * scale))) if (scale < 1): frame = cv2.resize(frame, size, cv2.INTER_AREA) elif (scale > 1): frame = cv2.resize(frame, size, cv2.INTER_CUBIC) cv2.imwrite(os.path.join(outdir, f'{prefix}_{idx[jj]:06d}.png'), frame) jj += 1 ii += 1 cap.release()
def extract_frames(vidfile, nframes, scale=1.0, outdir='.', random=False): cap = cv2.VideoCapture(vidfile) fname = os.path.basename(vidfile) prefix = fname.rpartition('.')[0] frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) idx = np.arange(frame_count, dtype=int) if (frame_count < nframes): if random: np.random.shuffle(idx) idx = idx[:nframes] else: step = (frame_count // nframes) idx = idx[::step] idx = sorted(idx) ii = 0 jj = 0 while cap.isOpened(): (ret, frame) = cap.read() if (frame is None): break if (idx[jj] == ii): size = (int((frame.shape[1] * scale)), int((frame.shape[0] * scale))) if (scale < 1): frame = cv2.resize(frame, size, cv2.INTER_AREA) elif (scale > 1): frame = cv2.resize(frame, size, cv2.INTER_CUBIC) cv2.imwrite(os.path.join(outdir, f'{prefix}_{idx[jj]:06d}.png'), frame) jj += 1 ii += 1 cap.release()<|docstring|>Extract `nframes` frames from `vidfile` into `outdir`<|endoftext|>
722c1a6e5a3cbe11725e839f5883dae6ed2bcda2a2edea4aa31a58d10166bc0b
def points2rect(p0: np.ndarray, p1: np.ndarray) -> np.ndarray: 'Convert diagonally opposite vertices into (x, y, w, h) format\n rectangle.\n\n Returns\n -------\n np.ndarray:\n Rectangle with diagonal corners `p0` and `p1` after scaling\n by `scale`. This will work with both top-left - bottom-right\n and bottom-left - top-right diagonals.\n\n ' x = (p0[0], p1[0]) y = (p0[1], p1[1]) xleft = min(x) w = (max(x) - xleft) ytop = min(y) h = (max(y) - ytop) return np.array((xleft, ytop, w, h))
Convert diagonally opposite vertices into (x, y, w, h) format rectangle. Returns ------- np.ndarray: Rectangle with diagonal corners `p0` and `p1` after scaling by `scale`. This will work with both top-left - bottom-right and bottom-left - top-right diagonals.
argos/utility.py
points2rect
subhacom/argos
1
python
def points2rect(p0: np.ndarray, p1: np.ndarray) -> np.ndarray: 'Convert diagonally opposite vertices into (x, y, w, h) format\n rectangle.\n\n Returns\n -------\n np.ndarray:\n Rectangle with diagonal corners `p0` and `p1` after scaling\n by `scale`. This will work with both top-left - bottom-right\n and bottom-left - top-right diagonals.\n\n ' x = (p0[0], p1[0]) y = (p0[1], p1[1]) xleft = min(x) w = (max(x) - xleft) ytop = min(y) h = (max(y) - ytop) return np.array((xleft, ytop, w, h))
def points2rect(p0: np.ndarray, p1: np.ndarray) -> np.ndarray: 'Convert diagonally opposite vertices into (x, y, w, h) format\n rectangle.\n\n Returns\n -------\n np.ndarray:\n Rectangle with diagonal corners `p0` and `p1` after scaling\n by `scale`. This will work with both top-left - bottom-right\n and bottom-left - top-right diagonals.\n\n ' x = (p0[0], p1[0]) y = (p0[1], p1[1]) xleft = min(x) w = (max(x) - xleft) ytop = min(y) h = (max(y) - ytop) return np.array((xleft, ytop, w, h))<|docstring|>Convert diagonally opposite vertices into (x, y, w, h) format rectangle. Returns ------- np.ndarray: Rectangle with diagonal corners `p0` and `p1` after scaling by `scale`. This will work with both top-left - bottom-right and bottom-left - top-right diagonals.<|endoftext|>
e318297c9e10ab5cfc40fd53917f9305f80e2a7baaf2f5601d421acc98e10767
def rect2points(rect: np.ndarray) -> np.ndarray: 'Convert topleft, width, height format rectangle into four anti-clockwise\n vertices' return np.vstack([rect[:2], (rect[0], (rect[1] + rect[3])), (rect[:2] + rect[2:]), ((rect[0] + rect[2]), rect[1])])
Convert topleft, width, height format rectangle into four anti-clockwise vertices
argos/utility.py
rect2points
subhacom/argos
1
python
def rect2points(rect: np.ndarray) -> np.ndarray: 'Convert topleft, width, height format rectangle into four anti-clockwise\n vertices' return np.vstack([rect[:2], (rect[0], (rect[1] + rect[3])), (rect[:2] + rect[2:]), ((rect[0] + rect[2]), rect[1])])
def rect2points(rect: np.ndarray) -> np.ndarray: 'Convert topleft, width, height format rectangle into four anti-clockwise\n vertices' return np.vstack([rect[:2], (rect[0], (rect[1] + rect[3])), (rect[:2] + rect[2:]), ((rect[0] + rect[2]), rect[1])])<|docstring|>Convert topleft, width, height format rectangle into four anti-clockwise vertices<|endoftext|>
6103ca89e4023ad97e3ca970520f7a08aad46e9339511e96c74188b41a26f936
def tlwh2xyrh(rect): 'Convert top-left, width, height into center, aspect ratio, height' return np.array(((rect[0] + (rect[2] / 2.0)), (rect[1] + (rect[3] / 2.0)), (rect[2] / float(rect[3])), rect[3]))
Convert top-left, width, height into center, aspect ratio, height
argos/utility.py
tlwh2xyrh
subhacom/argos
1
python
def tlwh2xyrh(rect): return np.array(((rect[0] + (rect[2] / 2.0)), (rect[1] + (rect[3] / 2.0)), (rect[2] / float(rect[3])), rect[3]))
def tlwh2xyrh(rect): return np.array(((rect[0] + (rect[2] / 2.0)), (rect[1] + (rect[3] / 2.0)), (rect[2] / float(rect[3])), rect[3]))<|docstring|>Convert top-left, width, height into center, aspect ratio, height<|endoftext|>
79f59dddbdf1c173a781ab6a717957dca935351571c90489ed77ba240cecfa89
def xyrh2tlwh(rect: np.ndarray) -> np.ndarray: 'Convert centre, aspect ratio, height into top-left, width, height\n format' w = (rect[2] * rect[3]) return np.asanyarray(((rect[0] - (w / 2.0)), (rect[1] - (rect[3] / 2.0)), w, rect[3]), dtype=int)
Convert centre, aspect ratio, height into top-left, width, height format
argos/utility.py
xyrh2tlwh
subhacom/argos
1
python
def xyrh2tlwh(rect: np.ndarray) -> np.ndarray: 'Convert centre, aspect ratio, height into top-left, width, height\n format' w = (rect[2] * rect[3]) return np.asanyarray(((rect[0] - (w / 2.0)), (rect[1] - (rect[3] / 2.0)), w, rect[3]), dtype=int)
def xyrh2tlwh(rect: np.ndarray) -> np.ndarray: 'Convert centre, aspect ratio, height into top-left, width, height\n format' w = (rect[2] * rect[3]) return np.asanyarray(((rect[0] - (w / 2.0)), (rect[1] - (rect[3] / 2.0)), w, rect[3]), dtype=int)<|docstring|>Convert centre, aspect ratio, height into top-left, width, height format<|endoftext|>
2b790ef145f906d90010cba2903a414e12239485b24ca2afe436ab625776dc7c
def rect_intersection(ra: np.ndarray, rb: np.ndarray) -> np.ndarray: 'Find if two axis-aligned rectangles intersect.\n\n This runs almost 50 times faster than Polygon intersection in shapely.\n and ~5 times faster than cv2.intersectConvexConvex.\n\n Parameters\n ----------\n ra: np.ndarray\n rb: np.ndarray\n Rectangles specified as (x, y, w, h) where (x, y) is the coordinate\n of the lower left corner, w and h are width and height.\n\n Returns\n -------\n np.ndarray\n (x, y, dx, dy) specifying the overlap rectangle. If there is no\n overlap, all entries are 0.\n ' ret = np.zeros((4,), dtype=int) (xa, ya, wa, ha) = ra (xb, yb, wb, hb) = rb x = max(xa, xb) y = max(ya, yb) dx = (min((xa + wa), (xb + wb)) - x) dy = (min((ya + ha), (yb + hb)) - y) if ((dx > 0) and (dy > 0)): ret[:] = (x, y, dx, dy) return ret
Find if two axis-aligned rectangles intersect. This runs almost 50 times faster than Polygon intersection in shapely. and ~5 times faster than cv2.intersectConvexConvex. Parameters ---------- ra: np.ndarray rb: np.ndarray Rectangles specified as (x, y, w, h) where (x, y) is the coordinate of the lower left corner, w and h are width and height. Returns ------- np.ndarray (x, y, dx, dy) specifying the overlap rectangle. If there is no overlap, all entries are 0.
argos/utility.py
rect_intersection
subhacom/argos
1
python
def rect_intersection(ra: np.ndarray, rb: np.ndarray) -> np.ndarray: 'Find if two axis-aligned rectangles intersect.\n\n This runs almost 50 times faster than Polygon intersection in shapely.\n and ~5 times faster than cv2.intersectConvexConvex.\n\n Parameters\n ----------\n ra: np.ndarray\n rb: np.ndarray\n Rectangles specified as (x, y, w, h) where (x, y) is the coordinate\n of the lower left corner, w and h are width and height.\n\n Returns\n -------\n np.ndarray\n (x, y, dx, dy) specifying the overlap rectangle. If there is no\n overlap, all entries are 0.\n ' ret = np.zeros((4,), dtype=int) (xa, ya, wa, ha) = ra (xb, yb, wb, hb) = rb x = max(xa, xb) y = max(ya, yb) dx = (min((xa + wa), (xb + wb)) - x) dy = (min((ya + ha), (yb + hb)) - y) if ((dx > 0) and (dy > 0)): ret[:] = (x, y, dx, dy) return ret
def rect_intersection(ra: np.ndarray, rb: np.ndarray) -> np.ndarray: 'Find if two axis-aligned rectangles intersect.\n\n This runs almost 50 times faster than Polygon intersection in shapely.\n and ~5 times faster than cv2.intersectConvexConvex.\n\n Parameters\n ----------\n ra: np.ndarray\n rb: np.ndarray\n Rectangles specified as (x, y, w, h) where (x, y) is the coordinate\n of the lower left corner, w and h are width and height.\n\n Returns\n -------\n np.ndarray\n (x, y, dx, dy) specifying the overlap rectangle. If there is no\n overlap, all entries are 0.\n ' ret = np.zeros((4,), dtype=int) (xa, ya, wa, ha) = ra (xb, yb, wb, hb) = rb x = max(xa, xb) y = max(ya, yb) dx = (min((xa + wa), (xb + wb)) - x) dy = (min((ya + ha), (yb + hb)) - y) if ((dx > 0) and (dy > 0)): ret[:] = (x, y, dx, dy) return ret<|docstring|>Find if two axis-aligned rectangles intersect. This runs almost 50 times faster than Polygon intersection in shapely. and ~5 times faster than cv2.intersectConvexConvex. Parameters ---------- ra: np.ndarray rb: np.ndarray Rectangles specified as (x, y, w, h) where (x, y) is the coordinate of the lower left corner, w and h are width and height. Returns ------- np.ndarray (x, y, dx, dy) specifying the overlap rectangle. If there is no overlap, all entries are 0.<|endoftext|>
63deae5b5d49aa1e6ea915995b4b869f93d910a437976f90c70cc394bc409c7e
def rect_iou(ra: np.ndarray, rb: np.ndarray) -> float: 'Compute Intersection over Union of two axis-aligned rectangles.\n\n This is the ratio of the are of intersection to the area of the union\n of the two rectangles.\n\n Parameters\n ----------\n ra: np.ndarray\n rb: np.ndarray\n Axis aligned rectangles specified as (x, y, w, h) where (x, y) is\n the position of the lower left corner, w and h are width and height.\n\n Returns\n -------\n float\n The Intersection over Union of two rectangles.\n ' (x, y, dx, dy) = rect_intersection(ra, rb) area_i = (dx * dy) area_u = (((ra[2] * ra[3]) + (rb[2] * rb[3])) - area_i) if ((area_u <= 0) or (area_i < 0)): raise ValueError('Area not positive') ret = ((1.0 * area_i) / area_u) if (np.isinf(ret) or np.isnan(ret) or (ret < 0)): raise ValueError('Invalid intersection') return ret
Compute Intersection over Union of two axis-aligned rectangles. This is the ratio of the are of intersection to the area of the union of the two rectangles. Parameters ---------- ra: np.ndarray rb: np.ndarray Axis aligned rectangles specified as (x, y, w, h) where (x, y) is the position of the lower left corner, w and h are width and height. Returns ------- float The Intersection over Union of two rectangles.
argos/utility.py
rect_iou
subhacom/argos
1
python
def rect_iou(ra: np.ndarray, rb: np.ndarray) -> float: 'Compute Intersection over Union of two axis-aligned rectangles.\n\n This is the ratio of the are of intersection to the area of the union\n of the two rectangles.\n\n Parameters\n ----------\n ra: np.ndarray\n rb: np.ndarray\n Axis aligned rectangles specified as (x, y, w, h) where (x, y) is\n the position of the lower left corner, w and h are width and height.\n\n Returns\n -------\n float\n The Intersection over Union of two rectangles.\n ' (x, y, dx, dy) = rect_intersection(ra, rb) area_i = (dx * dy) area_u = (((ra[2] * ra[3]) + (rb[2] * rb[3])) - area_i) if ((area_u <= 0) or (area_i < 0)): raise ValueError('Area not positive') ret = ((1.0 * area_i) / area_u) if (np.isinf(ret) or np.isnan(ret) or (ret < 0)): raise ValueError('Invalid intersection') return ret
def rect_iou(ra: np.ndarray, rb: np.ndarray) -> float: 'Compute Intersection over Union of two axis-aligned rectangles.\n\n This is the ratio of the are of intersection to the area of the union\n of the two rectangles.\n\n Parameters\n ----------\n ra: np.ndarray\n rb: np.ndarray\n Axis aligned rectangles specified as (x, y, w, h) where (x, y) is\n the position of the lower left corner, w and h are width and height.\n\n Returns\n -------\n float\n The Intersection over Union of two rectangles.\n ' (x, y, dx, dy) = rect_intersection(ra, rb) area_i = (dx * dy) area_u = (((ra[2] * ra[3]) + (rb[2] * rb[3])) - area_i) if ((area_u <= 0) or (area_i < 0)): raise ValueError('Area not positive') ret = ((1.0 * area_i) / area_u) if (np.isinf(ret) or np.isnan(ret) or (ret < 0)): raise ValueError('Invalid intersection') return ret<|docstring|>Compute Intersection over Union of two axis-aligned rectangles. This is the ratio of the are of intersection to the area of the union of the two rectangles. Parameters ---------- ra: np.ndarray rb: np.ndarray Axis aligned rectangles specified as (x, y, w, h) where (x, y) is the position of the lower left corner, w and h are width and height. Returns ------- float The Intersection over Union of two rectangles.<|endoftext|>
3c20ec57fdc3e590f284ddd05ec65bb23f8f5a1ed5fc4ae1382060dd3a0430b7
def rect_ios(ra: np.ndarray, rb: np.ndarray) -> float: 'Compute intersection over area of smaller of two axis-aligned\n rectangles.\n\n This is the ratio of the area of intersection to the area of the smaller\n of the two rectangles.\n\n Parameters\n ----------\n ra: np.ndarray\n rb: np.ndarray\n Axis aligned rectangles specified as (x, y, w, h) where (x, y) is\n the position of the lower left corner, w and h are width and height.\n\n Returns\n -------\n float\n The Intersection over area of the smaller of two rectangles.\n ' (x, y, dx, dy) = rect_intersection(ra, rb) area_i = (dx * dy) area_a = (ra[2] * ra[3]) area_b = (rb[2] * rb[3]) if ((area_i < 0) or (area_a <= 0) or (area_b <= 0)): raise ValueError('Area not positive') ret = (area_i / min(area_a, area_b)) if (np.isinf(ret) or np.isnan(ret) or (ret < 0)): raise ValueError('Invalid intersection') return ret
Compute intersection over area of smaller of two axis-aligned rectangles. This is the ratio of the area of intersection to the area of the smaller of the two rectangles. Parameters ---------- ra: np.ndarray rb: np.ndarray Axis aligned rectangles specified as (x, y, w, h) where (x, y) is the position of the lower left corner, w and h are width and height. Returns ------- float The Intersection over area of the smaller of two rectangles.
argos/utility.py
rect_ios
subhacom/argos
1
python
def rect_ios(ra: np.ndarray, rb: np.ndarray) -> float: 'Compute intersection over area of smaller of two axis-aligned\n rectangles.\n\n This is the ratio of the area of intersection to the area of the smaller\n of the two rectangles.\n\n Parameters\n ----------\n ra: np.ndarray\n rb: np.ndarray\n Axis aligned rectangles specified as (x, y, w, h) where (x, y) is\n the position of the lower left corner, w and h are width and height.\n\n Returns\n -------\n float\n The Intersection over area of the smaller of two rectangles.\n ' (x, y, dx, dy) = rect_intersection(ra, rb) area_i = (dx * dy) area_a = (ra[2] * ra[3]) area_b = (rb[2] * rb[3]) if ((area_i < 0) or (area_a <= 0) or (area_b <= 0)): raise ValueError('Area not positive') ret = (area_i / min(area_a, area_b)) if (np.isinf(ret) or np.isnan(ret) or (ret < 0)): raise ValueError('Invalid intersection') return ret
def rect_ios(ra: np.ndarray, rb: np.ndarray) -> float: 'Compute intersection over area of smaller of two axis-aligned\n rectangles.\n\n This is the ratio of the area of intersection to the area of the smaller\n of the two rectangles.\n\n Parameters\n ----------\n ra: np.ndarray\n rb: np.ndarray\n Axis aligned rectangles specified as (x, y, w, h) where (x, y) is\n the position of the lower left corner, w and h are width and height.\n\n Returns\n -------\n float\n The Intersection over area of the smaller of two rectangles.\n ' (x, y, dx, dy) = rect_intersection(ra, rb) area_i = (dx * dy) area_a = (ra[2] * ra[3]) area_b = (rb[2] * rb[3]) if ((area_i < 0) or (area_a <= 0) or (area_b <= 0)): raise ValueError('Area not positive') ret = (area_i / min(area_a, area_b)) if (np.isinf(ret) or np.isnan(ret) or (ret < 0)): raise ValueError('Invalid intersection') return ret<|docstring|>Compute intersection over area of smaller of two axis-aligned rectangles. This is the ratio of the area of intersection to the area of the smaller of the two rectangles. Parameters ---------- ra: np.ndarray rb: np.ndarray Axis aligned rectangles specified as (x, y, w, h) where (x, y) is the position of the lower left corner, w and h are width and height. Returns ------- float The Intersection over area of the smaller of two rectangles.<|endoftext|>
c06d778958fda6fbf798a47ab04600c58848a452a6f7482d816da2ad4e874bf3
def pairwise_distance(new_bboxes: np.ndarray, bboxes: np.ndarray, boxtype: OutlineStyle, metric: DistanceMetric) -> np.ndarray: 'Computes the distance between all pairs of rectangles.\n\n Parameters\n ----------\n new_bboxes: np.ndarray\n Array of bounding boxes, each row as (x, y, w, h)\n bboxes: np.ndarray\n Array of bounding boxes, each row as (x, y, w, h)\n boxtype: {OutlineStyle.bbox, OulineStyle.minrect}\n OutlineStyle.bbox for axis aligned rectangle bounding box or\n OulineStyle.minrect for minimum area rotated rectangle\n metric: {DistanceMetric.euclidean, DistanceMetric.iou}\n When `DistanceMetric.euclidean`, the squared Euclidean distance is\n used (calculating square root is expensive and unnecessary. If\n `DistanceMetric.iou`, use the area of intersection divided by the\n area of union.\n\n Returns\n --------\n np.ndarray\n Row ``ii``, column ``jj`` contains the computed distance between\n ``new_bboxes[ii]`` and ``bboxes[jj]``.\n ' dist = np.zeros((new_bboxes.shape[0], bboxes.shape[0]), dtype=np.float) if (metric == DistanceMetric.euclidean): centers = (bboxes[(:, :2)] + (bboxes[(:, 2:)] * 0.5)) new_centers = (new_bboxes[(:, :2)] + (new_bboxes[(:, 2:)] * 0.5)) for ii in range(len(new_bboxes)): for jj in range(len(bboxes)): dist[(ii, jj)] = np.sum(((new_centers[ii] - centers[jj]) ** 2)) elif (metric == DistanceMetric.iou): if (boxtype == OutlineStyle.bbox): for ii in range(len(new_bboxes)): for jj in range(len(bboxes)): dist[(ii, jj)] = (1.0 - rect_iou(bboxes[jj], new_bboxes[ii])) else: raise NotImplementedError('Only handling axis-aligned bounding boxes') elif ((metric == DistanceMetric.ios) and (boxtype == OutlineStyle.bbox)): for ii in range(len(new_bboxes)): for jj in range(len(bboxes)): dist[(ii, jj)] = (1.0 - rect_ios(bboxes[jj], new_bboxes[ii])) else: raise NotImplementedError(f'Unknown metric {metric}') return dist
Computes the distance between all pairs of rectangles. Parameters ---------- new_bboxes: np.ndarray Array of bounding boxes, each row as (x, y, w, h) bboxes: np.ndarray Array of bounding boxes, each row as (x, y, w, h) boxtype: {OutlineStyle.bbox, OulineStyle.minrect} OutlineStyle.bbox for axis aligned rectangle bounding box or OulineStyle.minrect for minimum area rotated rectangle metric: {DistanceMetric.euclidean, DistanceMetric.iou} When `DistanceMetric.euclidean`, the squared Euclidean distance is used (calculating square root is expensive and unnecessary. If `DistanceMetric.iou`, use the area of intersection divided by the area of union. Returns -------- np.ndarray Row ``ii``, column ``jj`` contains the computed distance between ``new_bboxes[ii]`` and ``bboxes[jj]``.
argos/utility.py
pairwise_distance
subhacom/argos
1
python
def pairwise_distance(new_bboxes: np.ndarray, bboxes: np.ndarray, boxtype: OutlineStyle, metric: DistanceMetric) -> np.ndarray: 'Computes the distance between all pairs of rectangles.\n\n Parameters\n ----------\n new_bboxes: np.ndarray\n Array of bounding boxes, each row as (x, y, w, h)\n bboxes: np.ndarray\n Array of bounding boxes, each row as (x, y, w, h)\n boxtype: {OutlineStyle.bbox, OulineStyle.minrect}\n OutlineStyle.bbox for axis aligned rectangle bounding box or\n OulineStyle.minrect for minimum area rotated rectangle\n metric: {DistanceMetric.euclidean, DistanceMetric.iou}\n When `DistanceMetric.euclidean`, the squared Euclidean distance is\n used (calculating square root is expensive and unnecessary. If\n `DistanceMetric.iou`, use the area of intersection divided by the\n area of union.\n\n Returns\n --------\n np.ndarray\n Row ``ii``, column ``jj`` contains the computed distance between\n ``new_bboxes[ii]`` and ``bboxes[jj]``.\n ' dist = np.zeros((new_bboxes.shape[0], bboxes.shape[0]), dtype=np.float) if (metric == DistanceMetric.euclidean): centers = (bboxes[(:, :2)] + (bboxes[(:, 2:)] * 0.5)) new_centers = (new_bboxes[(:, :2)] + (new_bboxes[(:, 2:)] * 0.5)) for ii in range(len(new_bboxes)): for jj in range(len(bboxes)): dist[(ii, jj)] = np.sum(((new_centers[ii] - centers[jj]) ** 2)) elif (metric == DistanceMetric.iou): if (boxtype == OutlineStyle.bbox): for ii in range(len(new_bboxes)): for jj in range(len(bboxes)): dist[(ii, jj)] = (1.0 - rect_iou(bboxes[jj], new_bboxes[ii])) else: raise NotImplementedError('Only handling axis-aligned bounding boxes') elif ((metric == DistanceMetric.ios) and (boxtype == OutlineStyle.bbox)): for ii in range(len(new_bboxes)): for jj in range(len(bboxes)): dist[(ii, jj)] = (1.0 - rect_ios(bboxes[jj], new_bboxes[ii])) else: raise NotImplementedError(f'Unknown metric {metric}') return dist
def pairwise_distance(new_bboxes: np.ndarray, bboxes: np.ndarray, boxtype: OutlineStyle, metric: DistanceMetric) -> np.ndarray: 'Computes the distance between all pairs of rectangles.\n\n Parameters\n ----------\n new_bboxes: np.ndarray\n Array of bounding boxes, each row as (x, y, w, h)\n bboxes: np.ndarray\n Array of bounding boxes, each row as (x, y, w, h)\n boxtype: {OutlineStyle.bbox, OulineStyle.minrect}\n OutlineStyle.bbox for axis aligned rectangle bounding box or\n OulineStyle.minrect for minimum area rotated rectangle\n metric: {DistanceMetric.euclidean, DistanceMetric.iou}\n When `DistanceMetric.euclidean`, the squared Euclidean distance is\n used (calculating square root is expensive and unnecessary. If\n `DistanceMetric.iou`, use the area of intersection divided by the\n area of union.\n\n Returns\n --------\n np.ndarray\n Row ``ii``, column ``jj`` contains the computed distance between\n ``new_bboxes[ii]`` and ``bboxes[jj]``.\n ' dist = np.zeros((new_bboxes.shape[0], bboxes.shape[0]), dtype=np.float) if (metric == DistanceMetric.euclidean): centers = (bboxes[(:, :2)] + (bboxes[(:, 2:)] * 0.5)) new_centers = (new_bboxes[(:, :2)] + (new_bboxes[(:, 2:)] * 0.5)) for ii in range(len(new_bboxes)): for jj in range(len(bboxes)): dist[(ii, jj)] = np.sum(((new_centers[ii] - centers[jj]) ** 2)) elif (metric == DistanceMetric.iou): if (boxtype == OutlineStyle.bbox): for ii in range(len(new_bboxes)): for jj in range(len(bboxes)): dist[(ii, jj)] = (1.0 - rect_iou(bboxes[jj], new_bboxes[ii])) else: raise NotImplementedError('Only handling axis-aligned bounding boxes') elif ((metric == DistanceMetric.ios) and (boxtype == OutlineStyle.bbox)): for ii in range(len(new_bboxes)): for jj in range(len(bboxes)): dist[(ii, jj)] = (1.0 - rect_ios(bboxes[jj], new_bboxes[ii])) else: raise NotImplementedError(f'Unknown metric {metric}') return dist<|docstring|>Computes the distance between all pairs of rectangles. Parameters ---------- new_bboxes: np.ndarray Array of bounding boxes, each row as (x, y, w, h) bboxes: np.ndarray Array of bounding boxes, each row as (x, y, w, h) boxtype: {OutlineStyle.bbox, OulineStyle.minrect} OutlineStyle.bbox for axis aligned rectangle bounding box or OulineStyle.minrect for minimum area rotated rectangle metric: {DistanceMetric.euclidean, DistanceMetric.iou} When `DistanceMetric.euclidean`, the squared Euclidean distance is used (calculating square root is expensive and unnecessary. If `DistanceMetric.iou`, use the area of intersection divided by the area of union. Returns -------- np.ndarray Row ``ii``, column ``jj`` contains the computed distance between ``new_bboxes[ii]`` and ``bboxes[jj]``.<|endoftext|>
7e51b637c95bb695d2f45aba5b76fce7014eb9c2e5eb180af3c712f237df530a
def forward(self, x): '\n Feed forward the model.\n \n Args:\n x (torch.Tensor): Input data.\n \n Raises:\n -\n\n Returns:\n x (torch.Tensor): Output of the feed forward execution.\n \n ' x = self.bn1(self.pool(F.relu(self.conv1(x)))) x = self.bn2(self.pool(F.relu(self.conv2(x)))) x = self.bn3(self.pool(F.relu(self.conv3(x)))) x = self.bn4(self.pool(F.relu(self.conv4(x)))) x = self.bn5(self.pool(F.relu(self.conv5(x)))) x = self.bn6(self.pool(F.relu(self.conv6(x)))) x = x.view((- 1), ((512 * 4) * 4)) x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.dropout(x) x = F.relu(self.fc2(x)) x = self.dropout(x) x = F.relu(self.fc3(x)) x = self.dropout(x) x = self.fc4(x) return x
Feed forward the model. Args: x (torch.Tensor): Input data. Raises: - Returns: x (torch.Tensor): Output of the feed forward execution.
project_cnn/cnn.py
forward
vsaveris/deep-learning
0
python
def forward(self, x): '\n Feed forward the model.\n \n Args:\n x (torch.Tensor): Input data.\n \n Raises:\n -\n\n Returns:\n x (torch.Tensor): Output of the feed forward execution.\n \n ' x = self.bn1(self.pool(F.relu(self.conv1(x)))) x = self.bn2(self.pool(F.relu(self.conv2(x)))) x = self.bn3(self.pool(F.relu(self.conv3(x)))) x = self.bn4(self.pool(F.relu(self.conv4(x)))) x = self.bn5(self.pool(F.relu(self.conv5(x)))) x = self.bn6(self.pool(F.relu(self.conv6(x)))) x = x.view((- 1), ((512 * 4) * 4)) x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.dropout(x) x = F.relu(self.fc2(x)) x = self.dropout(x) x = F.relu(self.fc3(x)) x = self.dropout(x) x = self.fc4(x) return x
def forward(self, x): '\n Feed forward the model.\n \n Args:\n x (torch.Tensor): Input data.\n \n Raises:\n -\n\n Returns:\n x (torch.Tensor): Output of the feed forward execution.\n \n ' x = self.bn1(self.pool(F.relu(self.conv1(x)))) x = self.bn2(self.pool(F.relu(self.conv2(x)))) x = self.bn3(self.pool(F.relu(self.conv3(x)))) x = self.bn4(self.pool(F.relu(self.conv4(x)))) x = self.bn5(self.pool(F.relu(self.conv5(x)))) x = self.bn6(self.pool(F.relu(self.conv6(x)))) x = x.view((- 1), ((512 * 4) * 4)) x = self.dropout(x) x = F.relu(self.fc1(x)) x = self.dropout(x) x = F.relu(self.fc2(x)) x = self.dropout(x) x = F.relu(self.fc3(x)) x = self.dropout(x) x = self.fc4(x) return x<|docstring|>Feed forward the model. Args: x (torch.Tensor): Input data. Raises: - Returns: x (torch.Tensor): Output of the feed forward execution.<|endoftext|>
d63c4e4a7766eac736902b263222214c728574cff69a606ed05f4ed38ed07a50
def __init__(self, max_failures=None): "Creates a new Benchmark.\n\n Args:\n max_failures: The number of story run's failures before bailing\n from executing subsequent page runs. If None, we never bail.\n " self._expectations = None self._max_failures = max_failures
Creates a new Benchmark. Args: max_failures: The number of story run's failures before bailing from executing subsequent page runs. If None, we never bail.
telemetry/telemetry/benchmark.py
__init__
tdresser/catapult-csm
4
python
def __init__(self, max_failures=None): "Creates a new Benchmark.\n\n Args:\n max_failures: The number of story run's failures before bailing\n from executing subsequent page runs. If None, we never bail.\n " self._expectations = None self._max_failures = max_failures
def __init__(self, max_failures=None): "Creates a new Benchmark.\n\n Args:\n max_failures: The number of story run's failures before bailing\n from executing subsequent page runs. If None, we never bail.\n " self._expectations = None self._max_failures = max_failures<|docstring|>Creates a new Benchmark. Args: max_failures: The number of story run's failures before bailing from executing subsequent page runs. If None, we never bail.<|endoftext|>
5fd62d2674a4f650614f6551fac62abde71d5bf933148d14fe09bce598a79b46
@classmethod def ShouldDisable(cls, possible_browser): 'Override this method to disable a benchmark under specific conditions.\n\n Supports logic too complex for simple Enabled and Disabled decorators.\n Decorators are still respected in cases where this function returns False.\n ' return False
Override this method to disable a benchmark under specific conditions. Supports logic too complex for simple Enabled and Disabled decorators. Decorators are still respected in cases where this function returns False.
telemetry/telemetry/benchmark.py
ShouldDisable
tdresser/catapult-csm
4
python
@classmethod def ShouldDisable(cls, possible_browser): 'Override this method to disable a benchmark under specific conditions.\n\n Supports logic too complex for simple Enabled and Disabled decorators.\n Decorators are still respected in cases where this function returns False.\n ' return False
@classmethod def ShouldDisable(cls, possible_browser): 'Override this method to disable a benchmark under specific conditions.\n\n Supports logic too complex for simple Enabled and Disabled decorators.\n Decorators are still respected in cases where this function returns False.\n ' return False<|docstring|>Override this method to disable a benchmark under specific conditions. Supports logic too complex for simple Enabled and Disabled decorators. Decorators are still respected in cases where this function returns False.<|endoftext|>
75ff77d8db41f683c3a71e34a6d7a96e8346c8309e740d18703ab36708de81cb
def Run(self, finder_options): 'Do not override this method.' return story_runner.RunBenchmark(self, finder_options)
Do not override this method.
telemetry/telemetry/benchmark.py
Run
tdresser/catapult-csm
4
python
def Run(self, finder_options): return story_runner.RunBenchmark(self, finder_options)
def Run(self, finder_options): return story_runner.RunBenchmark(self, finder_options)<|docstring|>Do not override this method.<|endoftext|>
629f54e84d42aae4504f16e9f25bc0efcfba3c9b6a95b5dfc34acb49ca3fb67b
@classmethod def ShouldTearDownStateAfterEachStoryRun(cls): 'Override to specify whether to tear down state after each story run.\n\n Tearing down all states after each story run, e.g., clearing profiles,\n stopping the browser, stopping local server, etc. So the browser will not be\n reused among multiple stories. This is particularly useful to get the\n startup part of launching the browser in each story.\n\n This should only be used by TimelineBasedMeasurement (TBM) benchmarks, but\n not by PageTest based benchmarks.\n ' return True
Override to specify whether to tear down state after each story run. Tearing down all states after each story run, e.g., clearing profiles, stopping the browser, stopping local server, etc. So the browser will not be reused among multiple stories. This is particularly useful to get the startup part of launching the browser in each story. This should only be used by TimelineBasedMeasurement (TBM) benchmarks, but not by PageTest based benchmarks.
telemetry/telemetry/benchmark.py
ShouldTearDownStateAfterEachStoryRun
tdresser/catapult-csm
4
python
@classmethod def ShouldTearDownStateAfterEachStoryRun(cls): 'Override to specify whether to tear down state after each story run.\n\n Tearing down all states after each story run, e.g., clearing profiles,\n stopping the browser, stopping local server, etc. So the browser will not be\n reused among multiple stories. This is particularly useful to get the\n startup part of launching the browser in each story.\n\n This should only be used by TimelineBasedMeasurement (TBM) benchmarks, but\n not by PageTest based benchmarks.\n ' return True
@classmethod def ShouldTearDownStateAfterEachStoryRun(cls): 'Override to specify whether to tear down state after each story run.\n\n Tearing down all states after each story run, e.g., clearing profiles,\n stopping the browser, stopping local server, etc. So the browser will not be\n reused among multiple stories. This is particularly useful to get the\n startup part of launching the browser in each story.\n\n This should only be used by TimelineBasedMeasurement (TBM) benchmarks, but\n not by PageTest based benchmarks.\n ' return True<|docstring|>Override to specify whether to tear down state after each story run. Tearing down all states after each story run, e.g., clearing profiles, stopping the browser, stopping local server, etc. So the browser will not be reused among multiple stories. This is particularly useful to get the startup part of launching the browser in each story. This should only be used by TimelineBasedMeasurement (TBM) benchmarks, but not by PageTest based benchmarks.<|endoftext|>
a5acf980952e56ae225a697cdb7c369bb35071fcc07205298bd0d2097e8bcc01
@classmethod def ShouldTearDownStateAfterEachStorySetRun(cls): 'Override to specify whether to tear down state after each story set run.\n\n Defaults to True in order to reset the state and make individual story set\n repeats more independent of each other. The intended effect is to average\n out noise in measurements between repeats.\n\n Long running benchmarks willing to stess test the browser and have it run\n for long periods of time may switch this value to False.\n\n This should only be used by TimelineBasedMeasurement (TBM) benchmarks, but\n not by PageTest based benchmarks.\n ' return True
Override to specify whether to tear down state after each story set run. Defaults to True in order to reset the state and make individual story set repeats more independent of each other. The intended effect is to average out noise in measurements between repeats. Long running benchmarks willing to stess test the browser and have it run for long periods of time may switch this value to False. This should only be used by TimelineBasedMeasurement (TBM) benchmarks, but not by PageTest based benchmarks.
telemetry/telemetry/benchmark.py
ShouldTearDownStateAfterEachStorySetRun
tdresser/catapult-csm
4
python
@classmethod def ShouldTearDownStateAfterEachStorySetRun(cls): 'Override to specify whether to tear down state after each story set run.\n\n Defaults to True in order to reset the state and make individual story set\n repeats more independent of each other. The intended effect is to average\n out noise in measurements between repeats.\n\n Long running benchmarks willing to stess test the browser and have it run\n for long periods of time may switch this value to False.\n\n This should only be used by TimelineBasedMeasurement (TBM) benchmarks, but\n not by PageTest based benchmarks.\n ' return True
@classmethod def ShouldTearDownStateAfterEachStorySetRun(cls): 'Override to specify whether to tear down state after each story set run.\n\n Defaults to True in order to reset the state and make individual story set\n repeats more independent of each other. The intended effect is to average\n out noise in measurements between repeats.\n\n Long running benchmarks willing to stess test the browser and have it run\n for long periods of time may switch this value to False.\n\n This should only be used by TimelineBasedMeasurement (TBM) benchmarks, but\n not by PageTest based benchmarks.\n ' return True<|docstring|>Override to specify whether to tear down state after each story set run. Defaults to True in order to reset the state and make individual story set repeats more independent of each other. The intended effect is to average out noise in measurements between repeats. Long running benchmarks willing to stess test the browser and have it run for long periods of time may switch this value to False. This should only be used by TimelineBasedMeasurement (TBM) benchmarks, but not by PageTest based benchmarks.<|endoftext|>
a6b1043e15350620b13c6a410fb8d6ecaa3cd208f83df7c34208c294d3a5ac79
def SetupBenchmarkDefaultTraceRerunOptions(self, tbm_options): 'Setup tracing categories associated with default trace option.'
Setup tracing categories associated with default trace option.
telemetry/telemetry/benchmark.py
SetupBenchmarkDefaultTraceRerunOptions
tdresser/catapult-csm
4
python
def SetupBenchmarkDefaultTraceRerunOptions(self, tbm_options):
def SetupBenchmarkDefaultTraceRerunOptions(self, tbm_options): <|docstring|>Setup tracing categories associated with default trace option.<|endoftext|>
3ed4aa6d7a31e78d1d0f790463b50401944852b3bb60bfbbbfdbecbfb8d8bb7d
def SetupBenchmarkDebugTraceRerunOptions(self, tbm_options): 'Setup tracing categories associated with debug trace option.'
Setup tracing categories associated with debug trace option.
telemetry/telemetry/benchmark.py
SetupBenchmarkDebugTraceRerunOptions
tdresser/catapult-csm
4
python
def SetupBenchmarkDebugTraceRerunOptions(self, tbm_options):
def SetupBenchmarkDebugTraceRerunOptions(self, tbm_options): <|docstring|>Setup tracing categories associated with debug trace option.<|endoftext|>
6a69a453a5ca09a3bbd54d43b2a601f8bfa5f5509a7d27168cfa0c1e0fc35e2d
@classmethod def ValueCanBeAddedPredicate(cls, value, is_first_result): 'Returns whether |value| can be added to the test results.\n\n Override this method to customize the logic of adding values to test\n results.\n\n Args:\n value: a value.Value instance (except failure.FailureValue,\n skip.SkipValue or trace.TraceValue which will always be added).\n is_first_result: True if |value| is the first result for its\n corresponding story.\n\n Returns:\n True if |value| should be added to the test results.\n Otherwise, it returns False.\n ' return True
Returns whether |value| can be added to the test results. Override this method to customize the logic of adding values to test results. Args: value: a value.Value instance (except failure.FailureValue, skip.SkipValue or trace.TraceValue which will always be added). is_first_result: True if |value| is the first result for its corresponding story. Returns: True if |value| should be added to the test results. Otherwise, it returns False.
telemetry/telemetry/benchmark.py
ValueCanBeAddedPredicate
tdresser/catapult-csm
4
python
@classmethod def ValueCanBeAddedPredicate(cls, value, is_first_result): 'Returns whether |value| can be added to the test results.\n\n Override this method to customize the logic of adding values to test\n results.\n\n Args:\n value: a value.Value instance (except failure.FailureValue,\n skip.SkipValue or trace.TraceValue which will always be added).\n is_first_result: True if |value| is the first result for its\n corresponding story.\n\n Returns:\n True if |value| should be added to the test results.\n Otherwise, it returns False.\n ' return True
@classmethod def ValueCanBeAddedPredicate(cls, value, is_first_result): 'Returns whether |value| can be added to the test results.\n\n Override this method to customize the logic of adding values to test\n results.\n\n Args:\n value: a value.Value instance (except failure.FailureValue,\n skip.SkipValue or trace.TraceValue which will always be added).\n is_first_result: True if |value| is the first result for its\n corresponding story.\n\n Returns:\n True if |value| should be added to the test results.\n Otherwise, it returns False.\n ' return True<|docstring|>Returns whether |value| can be added to the test results. Override this method to customize the logic of adding values to test results. Args: value: a value.Value instance (except failure.FailureValue, skip.SkipValue or trace.TraceValue which will always be added). is_first_result: True if |value| is the first result for its corresponding story. Returns: True if |value| should be added to the test results. Otherwise, it returns False.<|endoftext|>
6ae0d6a0e40c308ebbd9c421c068d6f21aa07c21aee071c360e7b7757754bd76
def CustomizeBrowserOptions(self, options): 'Add browser options that are required by this benchmark.'
Add browser options that are required by this benchmark.
telemetry/telemetry/benchmark.py
CustomizeBrowserOptions
tdresser/catapult-csm
4
python
def CustomizeBrowserOptions(self, options):
def CustomizeBrowserOptions(self, options): <|docstring|>Add browser options that are required by this benchmark.<|endoftext|>
5cb77061689bbe1373429100ef98442e1dc23806f029ff1449e3fe40ccfe20a9
def GetBugComponents(self): "Returns a GenericSet Diagnostic containing the benchmark's Monorail\n component.\n\n Returns:\n GenericSet Diagnostic with the benchmark's bug component name\n " benchmark_component = decorators.GetComponent(self) component_diagnostic_value = ([benchmark_component] if benchmark_component else []) return histogram.GenericSet(component_diagnostic_value)
Returns a GenericSet Diagnostic containing the benchmark's Monorail component. Returns: GenericSet Diagnostic with the benchmark's bug component name
telemetry/telemetry/benchmark.py
GetBugComponents
tdresser/catapult-csm
4
python
def GetBugComponents(self): "Returns a GenericSet Diagnostic containing the benchmark's Monorail\n component.\n\n Returns:\n GenericSet Diagnostic with the benchmark's bug component name\n " benchmark_component = decorators.GetComponent(self) component_diagnostic_value = ([benchmark_component] if benchmark_component else []) return histogram.GenericSet(component_diagnostic_value)
def GetBugComponents(self): "Returns a GenericSet Diagnostic containing the benchmark's Monorail\n component.\n\n Returns:\n GenericSet Diagnostic with the benchmark's bug component name\n " benchmark_component = decorators.GetComponent(self) component_diagnostic_value = ([benchmark_component] if benchmark_component else []) return histogram.GenericSet(component_diagnostic_value)<|docstring|>Returns a GenericSet Diagnostic containing the benchmark's Monorail component. Returns: GenericSet Diagnostic with the benchmark's bug component name<|endoftext|>
cc5ccae8c05c03041076feeeb98a1a0ade2fe3bc63f9b481ba2075e3676d9805
def GetOwners(self): "Returns a Generic Diagnostic containing the benchmark's owners' emails\n in a list.\n\n Returns:\n Diagnostic with a list of the benchmark's owners' emails\n " return histogram.GenericSet((decorators.GetEmails(self) or []))
Returns a Generic Diagnostic containing the benchmark's owners' emails in a list. Returns: Diagnostic with a list of the benchmark's owners' emails
telemetry/telemetry/benchmark.py
GetOwners
tdresser/catapult-csm
4
python
def GetOwners(self): "Returns a Generic Diagnostic containing the benchmark's owners' emails\n in a list.\n\n Returns:\n Diagnostic with a list of the benchmark's owners' emails\n " return histogram.GenericSet((decorators.GetEmails(self) or []))
def GetOwners(self): "Returns a Generic Diagnostic containing the benchmark's owners' emails\n in a list.\n\n Returns:\n Diagnostic with a list of the benchmark's owners' emails\n " return histogram.GenericSet((decorators.GetEmails(self) or []))<|docstring|>Returns a Generic Diagnostic containing the benchmark's owners' emails in a list. Returns: Diagnostic with a list of the benchmark's owners' emails<|endoftext|>
b05ca62487cd660107a7dc1f9adb4c64574ddb9c2f6f8f9258a9fdf650d11b3f
@decorators.Deprecated(2017, 7, 29, 'Use CreateCoreTimelineBasedMeasurementOptions instead.') def CreateTimelineBasedMeasurementOptions(self): 'See CreateCoreTimelineBasedMeasurementOptions.' return self.CreateCoreTimelineBasedMeasurementOptions()
See CreateCoreTimelineBasedMeasurementOptions.
telemetry/telemetry/benchmark.py
CreateTimelineBasedMeasurementOptions
tdresser/catapult-csm
4
python
@decorators.Deprecated(2017, 7, 29, 'Use CreateCoreTimelineBasedMeasurementOptions instead.') def CreateTimelineBasedMeasurementOptions(self): return self.CreateCoreTimelineBasedMeasurementOptions()
@decorators.Deprecated(2017, 7, 29, 'Use CreateCoreTimelineBasedMeasurementOptions instead.') def CreateTimelineBasedMeasurementOptions(self): return self.CreateCoreTimelineBasedMeasurementOptions()<|docstring|>See CreateCoreTimelineBasedMeasurementOptions.<|endoftext|>
64c1af7336b67175c5b3025e77ac1caf6bf0a88f3faff197c2a9c49d3bff18d7
def CreateCoreTimelineBasedMeasurementOptions(self): 'Return the base TimelineBasedMeasurementOptions for this Benchmark.\n\n Additional chrome and atrace categories can be appended when running the\n benchmark with the --extra-chrome-categories and --extra-atrace-categories\n flags.\n\n Override this method to configure a TimelineBasedMeasurement benchmark. If\n this is not a TimelineBasedMeasurement benchmark, override CreatePageTest\n for PageTest tests. Do not override both methods.\n ' return timeline_based_measurement.Options()
Return the base TimelineBasedMeasurementOptions for this Benchmark. Additional chrome and atrace categories can be appended when running the benchmark with the --extra-chrome-categories and --extra-atrace-categories flags. Override this method to configure a TimelineBasedMeasurement benchmark. If this is not a TimelineBasedMeasurement benchmark, override CreatePageTest for PageTest tests. Do not override both methods.
telemetry/telemetry/benchmark.py
CreateCoreTimelineBasedMeasurementOptions
tdresser/catapult-csm
4
python
def CreateCoreTimelineBasedMeasurementOptions(self): 'Return the base TimelineBasedMeasurementOptions for this Benchmark.\n\n Additional chrome and atrace categories can be appended when running the\n benchmark with the --extra-chrome-categories and --extra-atrace-categories\n flags.\n\n Override this method to configure a TimelineBasedMeasurement benchmark. If\n this is not a TimelineBasedMeasurement benchmark, override CreatePageTest\n for PageTest tests. Do not override both methods.\n ' return timeline_based_measurement.Options()
def CreateCoreTimelineBasedMeasurementOptions(self): 'Return the base TimelineBasedMeasurementOptions for this Benchmark.\n\n Additional chrome and atrace categories can be appended when running the\n benchmark with the --extra-chrome-categories and --extra-atrace-categories\n flags.\n\n Override this method to configure a TimelineBasedMeasurement benchmark. If\n this is not a TimelineBasedMeasurement benchmark, override CreatePageTest\n for PageTest tests. Do not override both methods.\n ' return timeline_based_measurement.Options()<|docstring|>Return the base TimelineBasedMeasurementOptions for this Benchmark. Additional chrome and atrace categories can be appended when running the benchmark with the --extra-chrome-categories and --extra-atrace-categories flags. Override this method to configure a TimelineBasedMeasurement benchmark. If this is not a TimelineBasedMeasurement benchmark, override CreatePageTest for PageTest tests. Do not override both methods.<|endoftext|>
93374d170e4df7453d061296ffac3c5eb2ac7d1c2ed299cf59d5990a1e3b2afe
def _GetTimelineBasedMeasurementOptions(self, options): 'Return all timeline based measurements for the curren benchmark run.\n\n This includes the benchmark-configured measurements in\n CreateCoreTimelineBasedMeasurementOptions as well as the user-flag-\n configured options from --extra-chrome-categories and\n --extra-atrace-categories.\n ' tbm_options = None assert (not (class_util.IsMethodOverridden(Benchmark, self.__class__, 'CreateTimelineBasedMeasurementOptions') and class_util.IsMethodOverridden(Benchmark, self.__class__, 'CreateCoreTimelineBasedMeasurementOptions'))), 'Benchmarks should override CreateCoreTimelineBasedMeasurementOptions and NOT also CreateTimelineBasedMeasurementOptions.' if class_util.IsMethodOverridden(Benchmark, self.__class__, 'CreateCoreTimelineBasedMeasurementOptions'): tbm_options = self.CreateCoreTimelineBasedMeasurementOptions() else: tbm_options = self.CreateTimelineBasedMeasurementOptions() if (options and options.extra_chrome_categories): assert tbm_options.config.enable_chrome_trace, 'This benchmark does not support Chrome tracing.' tbm_options.config.chrome_trace_config.category_filter.AddFilterString(options.extra_chrome_categories) if (options and options.extra_atrace_categories): tbm_options.config.enable_atrace_trace = True categories = tbm_options.config.atrace_config.categories if (type(categories) != list): categories = categories.split(',') for category in options.extra_atrace_categories.split(','): if (category not in categories): categories.append(category) tbm_options.config.atrace_config.categories = categories return tbm_options
Return all timeline based measurements for the curren benchmark run. This includes the benchmark-configured measurements in CreateCoreTimelineBasedMeasurementOptions as well as the user-flag- configured options from --extra-chrome-categories and --extra-atrace-categories.
telemetry/telemetry/benchmark.py
_GetTimelineBasedMeasurementOptions
tdresser/catapult-csm
4
python
def _GetTimelineBasedMeasurementOptions(self, options): 'Return all timeline based measurements for the curren benchmark run.\n\n This includes the benchmark-configured measurements in\n CreateCoreTimelineBasedMeasurementOptions as well as the user-flag-\n configured options from --extra-chrome-categories and\n --extra-atrace-categories.\n ' tbm_options = None assert (not (class_util.IsMethodOverridden(Benchmark, self.__class__, 'CreateTimelineBasedMeasurementOptions') and class_util.IsMethodOverridden(Benchmark, self.__class__, 'CreateCoreTimelineBasedMeasurementOptions'))), 'Benchmarks should override CreateCoreTimelineBasedMeasurementOptions and NOT also CreateTimelineBasedMeasurementOptions.' if class_util.IsMethodOverridden(Benchmark, self.__class__, 'CreateCoreTimelineBasedMeasurementOptions'): tbm_options = self.CreateCoreTimelineBasedMeasurementOptions() else: tbm_options = self.CreateTimelineBasedMeasurementOptions() if (options and options.extra_chrome_categories): assert tbm_options.config.enable_chrome_trace, 'This benchmark does not support Chrome tracing.' tbm_options.config.chrome_trace_config.category_filter.AddFilterString(options.extra_chrome_categories) if (options and options.extra_atrace_categories): tbm_options.config.enable_atrace_trace = True categories = tbm_options.config.atrace_config.categories if (type(categories) != list): categories = categories.split(',') for category in options.extra_atrace_categories.split(','): if (category not in categories): categories.append(category) tbm_options.config.atrace_config.categories = categories return tbm_options
def _GetTimelineBasedMeasurementOptions(self, options): 'Return all timeline based measurements for the curren benchmark run.\n\n This includes the benchmark-configured measurements in\n CreateCoreTimelineBasedMeasurementOptions as well as the user-flag-\n configured options from --extra-chrome-categories and\n --extra-atrace-categories.\n ' tbm_options = None assert (not (class_util.IsMethodOverridden(Benchmark, self.__class__, 'CreateTimelineBasedMeasurementOptions') and class_util.IsMethodOverridden(Benchmark, self.__class__, 'CreateCoreTimelineBasedMeasurementOptions'))), 'Benchmarks should override CreateCoreTimelineBasedMeasurementOptions and NOT also CreateTimelineBasedMeasurementOptions.' if class_util.IsMethodOverridden(Benchmark, self.__class__, 'CreateCoreTimelineBasedMeasurementOptions'): tbm_options = self.CreateCoreTimelineBasedMeasurementOptions() else: tbm_options = self.CreateTimelineBasedMeasurementOptions() if (options and options.extra_chrome_categories): assert tbm_options.config.enable_chrome_trace, 'This benchmark does not support Chrome tracing.' tbm_options.config.chrome_trace_config.category_filter.AddFilterString(options.extra_chrome_categories) if (options and options.extra_atrace_categories): tbm_options.config.enable_atrace_trace = True categories = tbm_options.config.atrace_config.categories if (type(categories) != list): categories = categories.split(',') for category in options.extra_atrace_categories.split(','): if (category not in categories): categories.append(category) tbm_options.config.atrace_config.categories = categories return tbm_options<|docstring|>Return all timeline based measurements for the curren benchmark run. This includes the benchmark-configured measurements in CreateCoreTimelineBasedMeasurementOptions as well as the user-flag- configured options from --extra-chrome-categories and --extra-atrace-categories.<|endoftext|>
822dc929c75e6d936303d8164c5cbdd089010993f40ef92e8809f1a01a277e86
def CreatePageTest(self, options): 'Return the PageTest for this Benchmark.\n\n Override this method for PageTest tests.\n Override, CreateCoreTimelineBasedMeasurementOptions to configure\n TimelineBasedMeasurement tests. Do not override both methods.\n\n Args:\n options: a browser_options.BrowserFinderOptions instance\n Returns:\n |test()| if |test| is a PageTest class.\n Otherwise, a TimelineBasedMeasurement instance.\n ' is_page_test = issubclass(self.test, legacy_page_test.LegacyPageTest) is_tbm = (self.test == timeline_based_measurement.TimelineBasedMeasurement) if ((not is_page_test) and (not is_tbm)): raise TypeError(('"%s" is not a PageTest or a TimelineBasedMeasurement.' % self.test.__name__)) if is_page_test: return self.test() opts = self._GetTimelineBasedMeasurementOptions(options) self.SetupTraceRerunOptions(options, opts) return timeline_based_measurement.TimelineBasedMeasurement(opts)
Return the PageTest for this Benchmark. Override this method for PageTest tests. Override, CreateCoreTimelineBasedMeasurementOptions to configure TimelineBasedMeasurement tests. Do not override both methods. Args: options: a browser_options.BrowserFinderOptions instance Returns: |test()| if |test| is a PageTest class. Otherwise, a TimelineBasedMeasurement instance.
telemetry/telemetry/benchmark.py
CreatePageTest
tdresser/catapult-csm
4
python
def CreatePageTest(self, options): 'Return the PageTest for this Benchmark.\n\n Override this method for PageTest tests.\n Override, CreateCoreTimelineBasedMeasurementOptions to configure\n TimelineBasedMeasurement tests. Do not override both methods.\n\n Args:\n options: a browser_options.BrowserFinderOptions instance\n Returns:\n |test()| if |test| is a PageTest class.\n Otherwise, a TimelineBasedMeasurement instance.\n ' is_page_test = issubclass(self.test, legacy_page_test.LegacyPageTest) is_tbm = (self.test == timeline_based_measurement.TimelineBasedMeasurement) if ((not is_page_test) and (not is_tbm)): raise TypeError(('"%s" is not a PageTest or a TimelineBasedMeasurement.' % self.test.__name__)) if is_page_test: return self.test() opts = self._GetTimelineBasedMeasurementOptions(options) self.SetupTraceRerunOptions(options, opts) return timeline_based_measurement.TimelineBasedMeasurement(opts)
def CreatePageTest(self, options): 'Return the PageTest for this Benchmark.\n\n Override this method for PageTest tests.\n Override, CreateCoreTimelineBasedMeasurementOptions to configure\n TimelineBasedMeasurement tests. Do not override both methods.\n\n Args:\n options: a browser_options.BrowserFinderOptions instance\n Returns:\n |test()| if |test| is a PageTest class.\n Otherwise, a TimelineBasedMeasurement instance.\n ' is_page_test = issubclass(self.test, legacy_page_test.LegacyPageTest) is_tbm = (self.test == timeline_based_measurement.TimelineBasedMeasurement) if ((not is_page_test) and (not is_tbm)): raise TypeError(('"%s" is not a PageTest or a TimelineBasedMeasurement.' % self.test.__name__)) if is_page_test: return self.test() opts = self._GetTimelineBasedMeasurementOptions(options) self.SetupTraceRerunOptions(options, opts) return timeline_based_measurement.TimelineBasedMeasurement(opts)<|docstring|>Return the PageTest for this Benchmark. Override this method for PageTest tests. Override, CreateCoreTimelineBasedMeasurementOptions to configure TimelineBasedMeasurement tests. Do not override both methods. Args: options: a browser_options.BrowserFinderOptions instance Returns: |test()| if |test| is a PageTest class. Otherwise, a TimelineBasedMeasurement instance.<|endoftext|>
174a8e0f885cb59701a70e6d9c6583c509cb45232308ee9a57a722b05952cebb
def CreateStorySet(self, options): 'Creates the instance of StorySet used to run the benchmark.\n\n Can be overridden by subclasses.\n ' del options if (not self.page_set): raise NotImplementedError('This test has no "page_set" attribute.') return self.page_set()
Creates the instance of StorySet used to run the benchmark. Can be overridden by subclasses.
telemetry/telemetry/benchmark.py
CreateStorySet
tdresser/catapult-csm
4
python
def CreateStorySet(self, options): 'Creates the instance of StorySet used to run the benchmark.\n\n Can be overridden by subclasses.\n ' del options if (not self.page_set): raise NotImplementedError('This test has no "page_set" attribute.') return self.page_set()
def CreateStorySet(self, options): 'Creates the instance of StorySet used to run the benchmark.\n\n Can be overridden by subclasses.\n ' del options if (not self.page_set): raise NotImplementedError('This test has no "page_set" attribute.') return self.page_set()<|docstring|>Creates the instance of StorySet used to run the benchmark. Can be overridden by subclasses.<|endoftext|>
92b8d21cd9417ab984be8db1a6cbcdb65378408e02421767e918cabfb60f058d
def InitializeExpectations(self): 'Returns StoryExpectation object.\n\n This is a wrapper for GetExpectations. The user overrides GetExpectatoins\n in the benchmark class to have it use the correct expectations. This is what\n story_runner.py uses to get the expectations.\n ' if (not self._expectations): self._expectations = self.GetExpectations() return self._expectations
Returns StoryExpectation object. This is a wrapper for GetExpectations. The user overrides GetExpectatoins in the benchmark class to have it use the correct expectations. This is what story_runner.py uses to get the expectations.
telemetry/telemetry/benchmark.py
InitializeExpectations
tdresser/catapult-csm
4
python
def InitializeExpectations(self): 'Returns StoryExpectation object.\n\n This is a wrapper for GetExpectations. The user overrides GetExpectatoins\n in the benchmark class to have it use the correct expectations. This is what\n story_runner.py uses to get the expectations.\n ' if (not self._expectations): self._expectations = self.GetExpectations() return self._expectations
def InitializeExpectations(self): 'Returns StoryExpectation object.\n\n This is a wrapper for GetExpectations. The user overrides GetExpectatoins\n in the benchmark class to have it use the correct expectations. This is what\n story_runner.py uses to get the expectations.\n ' if (not self._expectations): self._expectations = self.GetExpectations() return self._expectations<|docstring|>Returns StoryExpectation object. This is a wrapper for GetExpectations. The user overrides GetExpectatoins in the benchmark class to have it use the correct expectations. This is what story_runner.py uses to get the expectations.<|endoftext|>
50ebc9f767c89afba63885c56534ac140da4e6c2b0a504e83b10b26db32ebbc1
def GetExpectations(self): 'Returns a StoryExpectation object.\n\n This object is used to determine what stories are disabled. This needs to be\n overridden by the subclass. It defaults to an empty expectations object.\n ' return expectations.StoryExpectations()
Returns a StoryExpectation object. This object is used to determine what stories are disabled. This needs to be overridden by the subclass. It defaults to an empty expectations object.
telemetry/telemetry/benchmark.py
GetExpectations
tdresser/catapult-csm
4
python
def GetExpectations(self): 'Returns a StoryExpectation object.\n\n This object is used to determine what stories are disabled. This needs to be\n overridden by the subclass. It defaults to an empty expectations object.\n ' return expectations.StoryExpectations()
def GetExpectations(self): 'Returns a StoryExpectation object.\n\n This object is used to determine what stories are disabled. This needs to be\n overridden by the subclass. It defaults to an empty expectations object.\n ' return expectations.StoryExpectations()<|docstring|>Returns a StoryExpectation object. This object is used to determine what stories are disabled. This needs to be overridden by the subclass. It defaults to an empty expectations object.<|endoftext|>
39d7d43fd0b0afd7854c2a6b2a95b38c4f61eef7de499355e7b0a042a332e61f
def _dct(self, x, y, u, v, n): ' calculate discrete cosine transformation ' a = tf.math.cos((((((2 * x) + 1) * u) * math.pi) / (2 * n))) b = tf.math.cos((((((2 * y) + 1) * v) * math.pi) / (2 * n))) return (a * b)
calculate discrete cosine transformation
noise/dct.py
_dct
marco-willi/HiDDeN-tensorflow
0
python
def _dct(self, x, y, u, v, n): ' ' a = tf.math.cos((((((2 * x) + 1) * u) * math.pi) / (2 * n))) b = tf.math.cos((((((2 * y) + 1) * v) * math.pi) / (2 * n))) return (a * b)
def _dct(self, x, y, u, v, n): ' ' a = tf.math.cos((((((2 * x) + 1) * u) * math.pi) / (2 * n))) b = tf.math.cos((((((2 * y) + 1) * v) * math.pi) / (2 * n))) return (a * b)<|docstring|>calculate discrete cosine transformation<|endoftext|>
ec782feb3b2aeb26a58072239867258f606fdf1460b65bb664c0aecaa28116b6
def _dct_kernel(self, n, normalize): ' Build DCT 2D Convolutional Kernels ' full_kernel = ((n * n), (n * n)) G = np.zeros(shape=full_kernel) for x in range(0, n): for y in range(0, n): for u in range(0, n): for v in range(0, n): val = self._dct(x, y, u, v, n) if normalize: val *= self._normalize(u, v) x_coord = ((n * u) + v) y_coord = ((n * x) + y) G[(x_coord, y_coord)] = val G = tf.cast(tf.Variable(G), tf.float32) G_filter = tf.reshape(G, shape=((n * n), n, n)) G_filter_conv = tf.transpose(G_filter, perm=[1, 2, 0]) G_filter_conv = tf.expand_dims(G_filter_conv, 2) return G_filter_conv
Build DCT 2D Convolutional Kernels
noise/dct.py
_dct_kernel
marco-willi/HiDDeN-tensorflow
0
python
def _dct_kernel(self, n, normalize): ' ' full_kernel = ((n * n), (n * n)) G = np.zeros(shape=full_kernel) for x in range(0, n): for y in range(0, n): for u in range(0, n): for v in range(0, n): val = self._dct(x, y, u, v, n) if normalize: val *= self._normalize(u, v) x_coord = ((n * u) + v) y_coord = ((n * x) + y) G[(x_coord, y_coord)] = val G = tf.cast(tf.Variable(G), tf.float32) G_filter = tf.reshape(G, shape=((n * n), n, n)) G_filter_conv = tf.transpose(G_filter, perm=[1, 2, 0]) G_filter_conv = tf.expand_dims(G_filter_conv, 2) return G_filter_conv
def _dct_kernel(self, n, normalize): ' ' full_kernel = ((n * n), (n * n)) G = np.zeros(shape=full_kernel) for x in range(0, n): for y in range(0, n): for u in range(0, n): for v in range(0, n): val = self._dct(x, y, u, v, n) if normalize: val *= self._normalize(u, v) x_coord = ((n * u) + v) y_coord = ((n * x) + y) G[(x_coord, y_coord)] = val G = tf.cast(tf.Variable(G), tf.float32) G_filter = tf.reshape(G, shape=((n * n), n, n)) G_filter_conv = tf.transpose(G_filter, perm=[1, 2, 0]) G_filter_conv = tf.expand_dims(G_filter_conv, 2) return G_filter_conv<|docstring|>Build DCT 2D Convolutional Kernels<|endoftext|>
7cf8167c04e082aac4064119b06dbffb380389ba6abe2b81253578b928bb7997
def _mask_filters(self, res_channel, mask): ' Mask filters according to mask ' mask = tf.reshape(mask, shape=(res_channel.shape[(- 1)],)) mask = tf.cast(mask, tf.float32) return tf.multiply(res_channel, mask)
Mask filters according to mask
noise/dct.py
_mask_filters
marco-willi/HiDDeN-tensorflow
0
python
def _mask_filters(self, res_channel, mask): ' ' mask = tf.reshape(mask, shape=(res_channel.shape[(- 1)],)) mask = tf.cast(mask, tf.float32) return tf.multiply(res_channel, mask)
def _mask_filters(self, res_channel, mask): ' ' mask = tf.reshape(mask, shape=(res_channel.shape[(- 1)],)) mask = tf.cast(mask, tf.float32) return tf.multiply(res_channel, mask)<|docstring|>Mask filters according to mask<|endoftext|>
de2c70aaa28d3d34c5f5d6044db74df2c4a20f5dfdbc91e173709b224e28bda6
def __call__(self, inputs, masks=None): '\n Args:\n inputs: tensor (batch, x, y, n x n, c)\n masks: list of c (n x n) binary masks\n ' n_channels = inputs.shape[(- 1)] if (masks is not None): assert (len(masks) == n_channels), 'length of masks ({}) must equal n_channels ({})'.format(len(masks), n_channels) res = list() splits = tf.split(inputs, n_channels, (- 1)) for (i, split) in enumerate(splits): res_channel = super(DCT2D, self).__call__(split) if (masks is not None): res_channel = self._mask_filters(res_channel, masks[i]) res.append(res_channel) return tf.concat(res, (- 1))
Args: inputs: tensor (batch, x, y, n x n, c) masks: list of c (n x n) binary masks
noise/dct.py
__call__
marco-willi/HiDDeN-tensorflow
0
python
def __call__(self, inputs, masks=None): '\n Args:\n inputs: tensor (batch, x, y, n x n, c)\n masks: list of c (n x n) binary masks\n ' n_channels = inputs.shape[(- 1)] if (masks is not None): assert (len(masks) == n_channels), 'length of masks ({}) must equal n_channels ({})'.format(len(masks), n_channels) res = list() splits = tf.split(inputs, n_channels, (- 1)) for (i, split) in enumerate(splits): res_channel = super(DCT2D, self).__call__(split) if (masks is not None): res_channel = self._mask_filters(res_channel, masks[i]) res.append(res_channel) return tf.concat(res, (- 1))
def __call__(self, inputs, masks=None): '\n Args:\n inputs: tensor (batch, x, y, n x n, c)\n masks: list of c (n x n) binary masks\n ' n_channels = inputs.shape[(- 1)] if (masks is not None): assert (len(masks) == n_channels), 'length of masks ({}) must equal n_channels ({})'.format(len(masks), n_channels) res = list() splits = tf.split(inputs, n_channels, (- 1)) for (i, split) in enumerate(splits): res_channel = super(DCT2D, self).__call__(split) if (masks is not None): res_channel = self._mask_filters(res_channel, masks[i]) res.append(res_channel) return tf.concat(res, (- 1))<|docstring|>Args: inputs: tensor (batch, x, y, n x n, c) masks: list of c (n x n) binary masks<|endoftext|>
d4c8b303c540695dd315f2badb30d4ac09827d6f58b13cba03b00c2743997c95
def __init__(self, path: str): 'Initializes Dotfile class.' self.path = Path(path) self.local_base = 'dotfiles' self.absolute = self._get_absolute(self.path) self.category = self._get_path_category(self.path) self.factory = DotfileHandlerFactory()
Initializes Dotfile class.
handlers/dotfile_handler.py
__init__
tomislavperich/nomad
0
python
def __init__(self, path: str): self.path = Path(path) self.local_base = 'dotfiles' self.absolute = self._get_absolute(self.path) self.category = self._get_path_category(self.path) self.factory = DotfileHandlerFactory()
def __init__(self, path: str): self.path = Path(path) self.local_base = 'dotfiles' self.absolute = self._get_absolute(self.path) self.category = self._get_path_category(self.path) self.factory = DotfileHandlerFactory()<|docstring|>Initializes Dotfile class.<|endoftext|>
b9308d241608bfcf34912c3e2b58bcecfd290df3037b805d9269f1a6565c0f70
def _get_absolute(self, path: Path) -> Path: 'Resolves given path to absolute.\n\n Args:\n path: Path to be resolved.\n\n Returns:\n Path: resolved, absolute Path.\n ' return path.expanduser().absolute()
Resolves given path to absolute. Args: path: Path to be resolved. Returns: Path: resolved, absolute Path.
handlers/dotfile_handler.py
_get_absolute
tomislavperich/nomad
0
python
def _get_absolute(self, path: Path) -> Path: 'Resolves given path to absolute.\n\n Args:\n path: Path to be resolved.\n\n Returns:\n Path: resolved, absolute Path.\n ' return path.expanduser().absolute()
def _get_absolute(self, path: Path) -> Path: 'Resolves given path to absolute.\n\n Args:\n path: Path to be resolved.\n\n Returns:\n Path: resolved, absolute Path.\n ' return path.expanduser().absolute()<|docstring|>Resolves given path to absolute. Args: path: Path to be resolved. Returns: Path: resolved, absolute Path.<|endoftext|>
41036983710b76e93935139e71a60a2c505b43df5bdd45d86fb1eecab11c8bcb
def _get_path_type(self, path: Path) -> str: 'Determines path type.\n\n Determines whether the path is a file or a directory.\n\n Args:\n path: Path to the dotfile.\n\n Returns:\n str: A string indicating path type.\n ' if path.is_dir(): return 'dir' elif path.is_file(): return 'file' else: raise FileNotFoundError(f'File {path} not found')
Determines path type. Determines whether the path is a file or a directory. Args: path: Path to the dotfile. Returns: str: A string indicating path type.
handlers/dotfile_handler.py
_get_path_type
tomislavperich/nomad
0
python
def _get_path_type(self, path: Path) -> str: 'Determines path type.\n\n Determines whether the path is a file or a directory.\n\n Args:\n path: Path to the dotfile.\n\n Returns:\n str: A string indicating path type.\n ' if path.is_dir(): return 'dir' elif path.is_file(): return 'file' else: raise FileNotFoundError(f'File {path} not found')
def _get_path_type(self, path: Path) -> str: 'Determines path type.\n\n Determines whether the path is a file or a directory.\n\n Args:\n path: Path to the dotfile.\n\n Returns:\n str: A string indicating path type.\n ' if path.is_dir(): return 'dir' elif path.is_file(): return 'file' else: raise FileNotFoundError(f'File {path} not found')<|docstring|>Determines path type. Determines whether the path is a file or a directory. Args: path: Path to the dotfile. Returns: str: A string indicating path type.<|endoftext|>
399801753458cb20dc810294a6a1f0bf814448e73cd77bde5c1d7cc5c9f999ad
def _get_path_category(self, path: Path) -> str: 'Determines path category.\n\n Determines path category for placing files locally.\n\n Args:\n path: Path str to determine category of.\n\n Returns:\n str: Category in which file belongs.\n ' if str(path).startswith('/'): return 'global' elif str(path).startswith('~'): return 'local' return 'custom'
Determines path category. Determines path category for placing files locally. Args: path: Path str to determine category of. Returns: str: Category in which file belongs.
handlers/dotfile_handler.py
_get_path_category
tomislavperich/nomad
0
python
def _get_path_category(self, path: Path) -> str: 'Determines path category.\n\n Determines path category for placing files locally.\n\n Args:\n path: Path str to determine category of.\n\n Returns:\n str: Category in which file belongs.\n ' if str(path).startswith('/'): return 'global' elif str(path).startswith('~'): return 'local' return 'custom'
def _get_path_category(self, path: Path) -> str: 'Determines path category.\n\n Determines path category for placing files locally.\n\n Args:\n path: Path str to determine category of.\n\n Returns:\n str: Category in which file belongs.\n ' if str(path).startswith('/'): return 'global' elif str(path).startswith('~'): return 'local' return 'custom'<|docstring|>Determines path category. Determines path category for placing files locally. Args: path: Path str to determine category of. Returns: str: Category in which file belongs.<|endoftext|>
ea17f4763927a62e42190ee67b04f1ea64e787d38a1f197e3070423bda34f999
def _get_local_dest(self, path: Path) -> Path: 'Gets local destination for copying.\n\n Gets local destination based on source path.\n\n Args:\n path: Path to build destination path from.\n\n Returns:\n str: Path pointing to local destination.\n ' dest = '' if str(path).startswith('~'): path = path.relative_to('~') if (self.category == 'global'): dest = f'{self.local_base}/global/{path}' elif (self.category == 'local'): dest = f'{self.local_base}/local/{path}' else: dest = f'{self.local_base}/custom/{path}' return Path(dest)
Gets local destination for copying. Gets local destination based on source path. Args: path: Path to build destination path from. Returns: str: Path pointing to local destination.
handlers/dotfile_handler.py
_get_local_dest
tomislavperich/nomad
0
python
def _get_local_dest(self, path: Path) -> Path: 'Gets local destination for copying.\n\n Gets local destination based on source path.\n\n Args:\n path: Path to build destination path from.\n\n Returns:\n str: Path pointing to local destination.\n ' dest = if str(path).startswith('~'): path = path.relative_to('~') if (self.category == 'global'): dest = f'{self.local_base}/global/{path}' elif (self.category == 'local'): dest = f'{self.local_base}/local/{path}' else: dest = f'{self.local_base}/custom/{path}' return Path(dest)
def _get_local_dest(self, path: Path) -> Path: 'Gets local destination for copying.\n\n Gets local destination based on source path.\n\n Args:\n path: Path to build destination path from.\n\n Returns:\n str: Path pointing to local destination.\n ' dest = if str(path).startswith('~'): path = path.relative_to('~') if (self.category == 'global'): dest = f'{self.local_base}/global/{path}' elif (self.category == 'local'): dest = f'{self.local_base}/local/{path}' else: dest = f'{self.local_base}/custom/{path}' return Path(dest)<|docstring|>Gets local destination for copying. Gets local destination based on source path. Args: path: Path to build destination path from. Returns: str: Path pointing to local destination.<|endoftext|>
04fd5c28098981d21b976189fd5999ffd8abc36fd7f73c4ae5da40c87acc5afe
def _get_local_src(self, path: Path) -> Path: 'Gets local source path for copying.\n\n Gets local source path based on passed source path.\n\n Args:\n path: Path to build local source path from.\n\n Returns:\n str: Path pointing to local source.\n ' src = '' if str(path).startswith('~'): path = Path(str(path).replace('~/', '')) if (self.category == 'global'): src = f'{self.local_base}/global{path}' elif (self.category == 'local'): src = f'{self.local_base}/local/{path}' else: src = f'{self.local_base}/custom/{path}' return Path(src)
Gets local source path for copying. Gets local source path based on passed source path. Args: path: Path to build local source path from. Returns: str: Path pointing to local source.
handlers/dotfile_handler.py
_get_local_src
tomislavperich/nomad
0
python
def _get_local_src(self, path: Path) -> Path: 'Gets local source path for copying.\n\n Gets local source path based on passed source path.\n\n Args:\n path: Path to build local source path from.\n\n Returns:\n str: Path pointing to local source.\n ' src = if str(path).startswith('~'): path = Path(str(path).replace('~/', )) if (self.category == 'global'): src = f'{self.local_base}/global{path}' elif (self.category == 'local'): src = f'{self.local_base}/local/{path}' else: src = f'{self.local_base}/custom/{path}' return Path(src)
def _get_local_src(self, path: Path) -> Path: 'Gets local source path for copying.\n\n Gets local source path based on passed source path.\n\n Args:\n path: Path to build local source path from.\n\n Returns:\n str: Path pointing to local source.\n ' src = if str(path).startswith('~'): path = Path(str(path).replace('~/', )) if (self.category == 'global'): src = f'{self.local_base}/global{path}' elif (self.category == 'local'): src = f'{self.local_base}/local/{path}' else: src = f'{self.local_base}/custom/{path}' return Path(src)<|docstring|>Gets local source path for copying. Gets local source path based on passed source path. Args: path: Path to build local source path from. Returns: str: Path pointing to local source.<|endoftext|>
fb9e53da6997cce51e771d8128e10662a634172611edf77c99990b23e390e4c5
def update(self) -> None: 'Fetches dotfiles from given path' destination = self._get_local_dest(self.path) try: path_type = self._get_path_type(self.absolute) handler = self.factory.get_handler(path_type) handler.update(self.absolute, destination) except Exception as e: print(f'[!] Skipping {self.path}: {e}')
Fetches dotfiles from given path
handlers/dotfile_handler.py
update
tomislavperich/nomad
0
python
def update(self) -> None: destination = self._get_local_dest(self.path) try: path_type = self._get_path_type(self.absolute) handler = self.factory.get_handler(path_type) handler.update(self.absolute, destination) except Exception as e: print(f'[!] Skipping {self.path}: {e}')
def update(self) -> None: destination = self._get_local_dest(self.path) try: path_type = self._get_path_type(self.absolute) handler = self.factory.get_handler(path_type) handler.update(self.absolute, destination) except Exception as e: print(f'[!] Skipping {self.path}: {e}')<|docstring|>Fetches dotfiles from given path<|endoftext|>
c8957070e3e0443a3296d401c8289fbe82d199f851c18db6a8ceaf5296281123
def bootstrap(self, backup: bool, overwrite: bool) -> None: 'Bootstraps dotfiles to given path.' src = self._get_local_src(self.path) try: path_type = self._get_path_type(src) handler = self.factory.get_handler(path_type) handler.bootstrap(src, self.absolute, backup, overwrite) except Exception as e: print(f'[!] Skipping {self.path}: {e}')
Bootstraps dotfiles to given path.
handlers/dotfile_handler.py
bootstrap
tomislavperich/nomad
0
python
def bootstrap(self, backup: bool, overwrite: bool) -> None: src = self._get_local_src(self.path) try: path_type = self._get_path_type(src) handler = self.factory.get_handler(path_type) handler.bootstrap(src, self.absolute, backup, overwrite) except Exception as e: print(f'[!] Skipping {self.path}: {e}')
def bootstrap(self, backup: bool, overwrite: bool) -> None: src = self._get_local_src(self.path) try: path_type = self._get_path_type(src) handler = self.factory.get_handler(path_type) handler.bootstrap(src, self.absolute, backup, overwrite) except Exception as e: print(f'[!] Skipping {self.path}: {e}')<|docstring|>Bootstraps dotfiles to given path.<|endoftext|>
ed10c555baa875ac2a4f7283f5c79b74fd9e5df622c6aa89314f6e3fbc0a988f
def is_matrix_spd(matrix: np.ndarray) -> bool: '\n Mengembalikan True jika matriks\n input adalah definit positif simetris.\n Mengembalikan False sebaliknya.\n >>> import numpy as np\n >>> dimension = 3\n >>> set_matrix = create_spd_matrix(dimension)\n >>> is_matrix_spd(set_matrix)\n True\n ' assert (np.shape(matrix)[0] == np.shape(matrix)[1]) if (np.allclose(matrix, matrix.T) is False): return False (eigen_value, _) = np.linalg.eigh(matrix) return bool(np.all((eigen_value > 0)))
Mengembalikan True jika matriks input adalah definit positif simetris. Mengembalikan False sebaliknya. >>> import numpy as np >>> dimension = 3 >>> set_matrix = create_spd_matrix(dimension) >>> is_matrix_spd(set_matrix) True
implementation/linear_algebra/conjugate_gradient.py
is_matrix_spd
reskimulud/Python
79
python
def is_matrix_spd(matrix: np.ndarray) -> bool: '\n Mengembalikan True jika matriks\n input adalah definit positif simetris.\n Mengembalikan False sebaliknya.\n >>> import numpy as np\n >>> dimension = 3\n >>> set_matrix = create_spd_matrix(dimension)\n >>> is_matrix_spd(set_matrix)\n True\n ' assert (np.shape(matrix)[0] == np.shape(matrix)[1]) if (np.allclose(matrix, matrix.T) is False): return False (eigen_value, _) = np.linalg.eigh(matrix) return bool(np.all((eigen_value > 0)))
def is_matrix_spd(matrix: np.ndarray) -> bool: '\n Mengembalikan True jika matriks\n input adalah definit positif simetris.\n Mengembalikan False sebaliknya.\n >>> import numpy as np\n >>> dimension = 3\n >>> set_matrix = create_spd_matrix(dimension)\n >>> is_matrix_spd(set_matrix)\n True\n ' assert (np.shape(matrix)[0] == np.shape(matrix)[1]) if (np.allclose(matrix, matrix.T) is False): return False (eigen_value, _) = np.linalg.eigh(matrix) return bool(np.all((eigen_value > 0)))<|docstring|>Mengembalikan True jika matriks input adalah definit positif simetris. Mengembalikan False sebaliknya. >>> import numpy as np >>> dimension = 3 >>> set_matrix = create_spd_matrix(dimension) >>> is_matrix_spd(set_matrix) True<|endoftext|>
56db85872710d8f7f9713dc4cc45b3b4d5792ba73338c60cde7f82cd88123e22
def create_spd_matrix(dimension: int) -> Any: '\n Mengembalikan matriks definit positif\n simetris yang diberi dimensi.\n ' random_matrix = np.random.randn(dimension, dimension) spd_matrix = np.dot(random_matrix, random_matrix.T) assert is_matrix_spd(spd_matrix) return spd_matrix
Mengembalikan matriks definit positif simetris yang diberi dimensi.
implementation/linear_algebra/conjugate_gradient.py
create_spd_matrix
reskimulud/Python
79
python
def create_spd_matrix(dimension: int) -> Any: '\n Mengembalikan matriks definit positif\n simetris yang diberi dimensi.\n ' random_matrix = np.random.randn(dimension, dimension) spd_matrix = np.dot(random_matrix, random_matrix.T) assert is_matrix_spd(spd_matrix) return spd_matrix
def create_spd_matrix(dimension: int) -> Any: '\n Mengembalikan matriks definit positif\n simetris yang diberi dimensi.\n ' random_matrix = np.random.randn(dimension, dimension) spd_matrix = np.dot(random_matrix, random_matrix.T) assert is_matrix_spd(spd_matrix) return spd_matrix<|docstring|>Mengembalikan matriks definit positif simetris yang diberi dimensi.<|endoftext|>
2bd3bfcb05fcd6b744b3fbc600736b8214683fffdd214ea36d9ab1bff3e14635
def conjugate_gradient(spd_matrix, load_vector, max_iterations=1000, tol=1e-08): '\n return solusi linear sistem np.dot(spd_matrix, x) = b\n >>> import numpy as np\n >>> spd_matrix_1= np.array([\n ... [8.73256573, -5.02034289, -2.68709226],\n ... [-5.02034289, 3.78188322, 0.91980451],\n ... [-2.68709226, 0.91980451, 1.94746467]])\n >>> b = np.array([\n ... [-5.80872761],\n ... [ 3.23807431],\n ... [ 1.95381422]])\n >>> conjugate_gradient(spd_matrix_1, b)\n array([[-0.63114139],\n [-0.01561498],\n [ 0.13979294]])\n ' assert (np.shape(spd_matrix)[0] == np.shape(spd_matrix)[1]) assert (np.shape(load_vector)[0] == np.shape(spd_matrix)[0]) assert is_matrix_spd(spd_matrix) x0 = np.zeros((np.shape(load_vector)[0], 1)) r0 = np.copy(load_vector) p0 = np.copy(r0) error_residual = 1000000000.0 error_x_solution = 1000000000.0 error = 1000000000.0 iterations = 0 while (error > tol): w = np.dot(spd_matrix, p0) alpha = (np.dot(r0.T, r0) / np.dot(p0.T, w)) x = (x0 + (alpha * p0)) r = (r0 - (alpha * w)) beta = (np.dot(r.T, r) / np.dot(r0.T, r0)) p = (r + (beta * p0)) error_residual = np.linalg.norm((r - r0)) error_x_solution = np.linalg.norm((x - x0)) error = np.maximum(error_residual, error_x_solution) x0 = np.copy(x) r0 = np.copy(r) p0 = np.copy(p) iterations += 1 if (iterations > max_iterations): break return x
return solusi linear sistem np.dot(spd_matrix, x) = b >>> import numpy as np >>> spd_matrix_1= np.array([ ... [8.73256573, -5.02034289, -2.68709226], ... [-5.02034289, 3.78188322, 0.91980451], ... [-2.68709226, 0.91980451, 1.94746467]]) >>> b = np.array([ ... [-5.80872761], ... [ 3.23807431], ... [ 1.95381422]]) >>> conjugate_gradient(spd_matrix_1, b) array([[-0.63114139], [-0.01561498], [ 0.13979294]])
implementation/linear_algebra/conjugate_gradient.py
conjugate_gradient
reskimulud/Python
79
python
def conjugate_gradient(spd_matrix, load_vector, max_iterations=1000, tol=1e-08): '\n return solusi linear sistem np.dot(spd_matrix, x) = b\n >>> import numpy as np\n >>> spd_matrix_1= np.array([\n ... [8.73256573, -5.02034289, -2.68709226],\n ... [-5.02034289, 3.78188322, 0.91980451],\n ... [-2.68709226, 0.91980451, 1.94746467]])\n >>> b = np.array([\n ... [-5.80872761],\n ... [ 3.23807431],\n ... [ 1.95381422]])\n >>> conjugate_gradient(spd_matrix_1, b)\n array([[-0.63114139],\n [-0.01561498],\n [ 0.13979294]])\n ' assert (np.shape(spd_matrix)[0] == np.shape(spd_matrix)[1]) assert (np.shape(load_vector)[0] == np.shape(spd_matrix)[0]) assert is_matrix_spd(spd_matrix) x0 = np.zeros((np.shape(load_vector)[0], 1)) r0 = np.copy(load_vector) p0 = np.copy(r0) error_residual = 1000000000.0 error_x_solution = 1000000000.0 error = 1000000000.0 iterations = 0 while (error > tol): w = np.dot(spd_matrix, p0) alpha = (np.dot(r0.T, r0) / np.dot(p0.T, w)) x = (x0 + (alpha * p0)) r = (r0 - (alpha * w)) beta = (np.dot(r.T, r) / np.dot(r0.T, r0)) p = (r + (beta * p0)) error_residual = np.linalg.norm((r - r0)) error_x_solution = np.linalg.norm((x - x0)) error = np.maximum(error_residual, error_x_solution) x0 = np.copy(x) r0 = np.copy(r) p0 = np.copy(p) iterations += 1 if (iterations > max_iterations): break return x
def conjugate_gradient(spd_matrix, load_vector, max_iterations=1000, tol=1e-08): '\n return solusi linear sistem np.dot(spd_matrix, x) = b\n >>> import numpy as np\n >>> spd_matrix_1= np.array([\n ... [8.73256573, -5.02034289, -2.68709226],\n ... [-5.02034289, 3.78188322, 0.91980451],\n ... [-2.68709226, 0.91980451, 1.94746467]])\n >>> b = np.array([\n ... [-5.80872761],\n ... [ 3.23807431],\n ... [ 1.95381422]])\n >>> conjugate_gradient(spd_matrix_1, b)\n array([[-0.63114139],\n [-0.01561498],\n [ 0.13979294]])\n ' assert (np.shape(spd_matrix)[0] == np.shape(spd_matrix)[1]) assert (np.shape(load_vector)[0] == np.shape(spd_matrix)[0]) assert is_matrix_spd(spd_matrix) x0 = np.zeros((np.shape(load_vector)[0], 1)) r0 = np.copy(load_vector) p0 = np.copy(r0) error_residual = 1000000000.0 error_x_solution = 1000000000.0 error = 1000000000.0 iterations = 0 while (error > tol): w = np.dot(spd_matrix, p0) alpha = (np.dot(r0.T, r0) / np.dot(p0.T, w)) x = (x0 + (alpha * p0)) r = (r0 - (alpha * w)) beta = (np.dot(r.T, r) / np.dot(r0.T, r0)) p = (r + (beta * p0)) error_residual = np.linalg.norm((r - r0)) error_x_solution = np.linalg.norm((x - x0)) error = np.maximum(error_residual, error_x_solution) x0 = np.copy(x) r0 = np.copy(r) p0 = np.copy(p) iterations += 1 if (iterations > max_iterations): break return x<|docstring|>return solusi linear sistem np.dot(spd_matrix, x) = b >>> import numpy as np >>> spd_matrix_1= np.array([ ... [8.73256573, -5.02034289, -2.68709226], ... [-5.02034289, 3.78188322, 0.91980451], ... [-2.68709226, 0.91980451, 1.94746467]]) >>> b = np.array([ ... [-5.80872761], ... [ 3.23807431], ... [ 1.95381422]]) >>> conjugate_gradient(spd_matrix_1, b) array([[-0.63114139], [-0.01561498], [ 0.13979294]])<|endoftext|>
ad95a5993472ee314211e221c4d69dccffa1e154072e4f7c109f4adeef8279b6
def testing_conjugate_gradient() -> None: '\n >>> testing_conjugate_gradient()\n ' dimension = 3 spd_matrix = create_spd_matrix(dimension) x_true = np.random.randn(dimension, 1) b = np.dot(spd_matrix, x_true) x_numpy = np.linalg.solve(spd_matrix, b) x_conjugate_gradient = conjugate_gradient(spd_matrix, b) assert (np.linalg.norm((x_numpy - x_true)) <= 1e-06) assert (np.linalg.norm((x_conjugate_gradient - x_true)) <= 1e-06)
>>> testing_conjugate_gradient()
implementation/linear_algebra/conjugate_gradient.py
testing_conjugate_gradient
reskimulud/Python
79
python
def testing_conjugate_gradient() -> None: '\n \n ' dimension = 3 spd_matrix = create_spd_matrix(dimension) x_true = np.random.randn(dimension, 1) b = np.dot(spd_matrix, x_true) x_numpy = np.linalg.solve(spd_matrix, b) x_conjugate_gradient = conjugate_gradient(spd_matrix, b) assert (np.linalg.norm((x_numpy - x_true)) <= 1e-06) assert (np.linalg.norm((x_conjugate_gradient - x_true)) <= 1e-06)
def testing_conjugate_gradient() -> None: '\n \n ' dimension = 3 spd_matrix = create_spd_matrix(dimension) x_true = np.random.randn(dimension, 1) b = np.dot(spd_matrix, x_true) x_numpy = np.linalg.solve(spd_matrix, b) x_conjugate_gradient = conjugate_gradient(spd_matrix, b) assert (np.linalg.norm((x_numpy - x_true)) <= 1e-06) assert (np.linalg.norm((x_conjugate_gradient - x_true)) <= 1e-06)<|docstring|>>>> testing_conjugate_gradient()<|endoftext|>
67a58621ee17448f1e63bc14b6c02f17319ef350d6f2d8cc4bda1e82dc7af658
def smallest_size_at_least(height, width, resize_min): 'Computes new shape with the smallest side equal to `smallest_side`.\n\n Computes new shape with the smallest side equal to `smallest_side` while\n preserving the original aspect ratio.\n\n Args:\n height: an int32 scalar tensor indicating the current height.\n width: an int32 scalar tensor indicating the current width.\n resize_min: A python integer or scalar `Tensor` indicating the size of\n the smallest side after resize.\n\n Returns:\n new_height: an int32 scalar tensor indicating the new height.\n new_width: an int32 scalar tensor indicating the new width.\n ' resize_min = tf.cast(resize_min, tf.float32) (height, width) = (tf.cast(height, tf.float32), tf.cast(width, tf.float32)) smaller_dim = tf.minimum(height, width) scale_ratio = (resize_min / smaller_dim) new_height = tf.cast(tf.round((height * scale_ratio)), tf.int32) new_width = tf.cast(tf.round((width * scale_ratio)), tf.int32) return (new_height, new_width)
Computes new shape with the smallest side equal to `smallest_side`. Computes new shape with the smallest side equal to `smallest_side` while preserving the original aspect ratio. Args: height: an int32 scalar tensor indicating the current height. width: an int32 scalar tensor indicating the current width. resize_min: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: new_height: an int32 scalar tensor indicating the new height. new_width: an int32 scalar tensor indicating the new width.
dataset/preprocess_dataset.py
smallest_size_at_least
bolide2006/r329_aipu
0
python
def smallest_size_at_least(height, width, resize_min): 'Computes new shape with the smallest side equal to `smallest_side`.\n\n Computes new shape with the smallest side equal to `smallest_side` while\n preserving the original aspect ratio.\n\n Args:\n height: an int32 scalar tensor indicating the current height.\n width: an int32 scalar tensor indicating the current width.\n resize_min: A python integer or scalar `Tensor` indicating the size of\n the smallest side after resize.\n\n Returns:\n new_height: an int32 scalar tensor indicating the new height.\n new_width: an int32 scalar tensor indicating the new width.\n ' resize_min = tf.cast(resize_min, tf.float32) (height, width) = (tf.cast(height, tf.float32), tf.cast(width, tf.float32)) smaller_dim = tf.minimum(height, width) scale_ratio = (resize_min / smaller_dim) new_height = tf.cast(tf.round((height * scale_ratio)), tf.int32) new_width = tf.cast(tf.round((width * scale_ratio)), tf.int32) return (new_height, new_width)
def smallest_size_at_least(height, width, resize_min): 'Computes new shape with the smallest side equal to `smallest_side`.\n\n Computes new shape with the smallest side equal to `smallest_side` while\n preserving the original aspect ratio.\n\n Args:\n height: an int32 scalar tensor indicating the current height.\n width: an int32 scalar tensor indicating the current width.\n resize_min: A python integer or scalar `Tensor` indicating the size of\n the smallest side after resize.\n\n Returns:\n new_height: an int32 scalar tensor indicating the new height.\n new_width: an int32 scalar tensor indicating the new width.\n ' resize_min = tf.cast(resize_min, tf.float32) (height, width) = (tf.cast(height, tf.float32), tf.cast(width, tf.float32)) smaller_dim = tf.minimum(height, width) scale_ratio = (resize_min / smaller_dim) new_height = tf.cast(tf.round((height * scale_ratio)), tf.int32) new_width = tf.cast(tf.round((width * scale_ratio)), tf.int32) return (new_height, new_width)<|docstring|>Computes new shape with the smallest side equal to `smallest_side`. Computes new shape with the smallest side equal to `smallest_side` while preserving the original aspect ratio. Args: height: an int32 scalar tensor indicating the current height. width: an int32 scalar tensor indicating the current width. resize_min: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: new_height: an int32 scalar tensor indicating the new height. new_width: an int32 scalar tensor indicating the new width.<|endoftext|>
a7ae15b1c6e25d6748c1a69cec955f72803ace5684400043e05bd92a86945e8f
def resize_image(image, height, width, method='BILINEAR'): 'Simple wrapper around tf.resize_images.\n\n This is primarily to make sure we use the same `ResizeMethod` and other\n details each time.\n\n Args:\n image: A 3-D image `Tensor`.\n height: The target height for the resized image.\n width: The target width for the resized image.\n\n Returns:\n resized_image: A 3-D tensor containing the resized image. The first two\n dimensions have the shape [height, width].\n ' resize_func = (tf.image.ResizeMethod.NEAREST_NEIGHBOR if (method == 'NEAREST') else tf.image.ResizeMethod.BILINEAR) return tf.image.resize_images(image, [height, width], method=resize_func, align_corners=False)
Simple wrapper around tf.resize_images. This is primarily to make sure we use the same `ResizeMethod` and other details each time. Args: image: A 3-D image `Tensor`. height: The target height for the resized image. width: The target width for the resized image. Returns: resized_image: A 3-D tensor containing the resized image. The first two dimensions have the shape [height, width].
dataset/preprocess_dataset.py
resize_image
bolide2006/r329_aipu
0
python
def resize_image(image, height, width, method='BILINEAR'): 'Simple wrapper around tf.resize_images.\n\n This is primarily to make sure we use the same `ResizeMethod` and other\n details each time.\n\n Args:\n image: A 3-D image `Tensor`.\n height: The target height for the resized image.\n width: The target width for the resized image.\n\n Returns:\n resized_image: A 3-D tensor containing the resized image. The first two\n dimensions have the shape [height, width].\n ' resize_func = (tf.image.ResizeMethod.NEAREST_NEIGHBOR if (method == 'NEAREST') else tf.image.ResizeMethod.BILINEAR) return tf.image.resize_images(image, [height, width], method=resize_func, align_corners=False)
def resize_image(image, height, width, method='BILINEAR'): 'Simple wrapper around tf.resize_images.\n\n This is primarily to make sure we use the same `ResizeMethod` and other\n details each time.\n\n Args:\n image: A 3-D image `Tensor`.\n height: The target height for the resized image.\n width: The target width for the resized image.\n\n Returns:\n resized_image: A 3-D tensor containing the resized image. The first two\n dimensions have the shape [height, width].\n ' resize_func = (tf.image.ResizeMethod.NEAREST_NEIGHBOR if (method == 'NEAREST') else tf.image.ResizeMethod.BILINEAR) return tf.image.resize_images(image, [height, width], method=resize_func, align_corners=False)<|docstring|>Simple wrapper around tf.resize_images. This is primarily to make sure we use the same `ResizeMethod` and other details each time. Args: image: A 3-D image `Tensor`. height: The target height for the resized image. width: The target width for the resized image. Returns: resized_image: A 3-D tensor containing the resized image. The first two dimensions have the shape [height, width].<|endoftext|>
cc4dd2d1c3afe57dceba709c14567bb6ca56b9801700d9e4451b6491d601bc48
def aspect_preserving_resize(image, resize_min, channels=3, method='BILINEAR'): 'Resize images preserving the original aspect ratio.\n\n Args:\n image: A 3-D image `Tensor`.\n resize_min: A python integer or scalar `Tensor` indicating the size of\n the smallest side after resize.\n\n Returns:\n resized_image: A 3-D tensor containing the resized image.\n ' shape = tf.shape(image) (height, width) = (shape[0], shape[1]) (new_height, new_width) = smallest_size_at_least(height, width, resize_min) return resize_image(image, new_height, new_width, method)
Resize images preserving the original aspect ratio. Args: image: A 3-D image `Tensor`. resize_min: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D tensor containing the resized image.
dataset/preprocess_dataset.py
aspect_preserving_resize
bolide2006/r329_aipu
0
python
def aspect_preserving_resize(image, resize_min, channels=3, method='BILINEAR'): 'Resize images preserving the original aspect ratio.\n\n Args:\n image: A 3-D image `Tensor`.\n resize_min: A python integer or scalar `Tensor` indicating the size of\n the smallest side after resize.\n\n Returns:\n resized_image: A 3-D tensor containing the resized image.\n ' shape = tf.shape(image) (height, width) = (shape[0], shape[1]) (new_height, new_width) = smallest_size_at_least(height, width, resize_min) return resize_image(image, new_height, new_width, method)
def aspect_preserving_resize(image, resize_min, channels=3, method='BILINEAR'): 'Resize images preserving the original aspect ratio.\n\n Args:\n image: A 3-D image `Tensor`.\n resize_min: A python integer or scalar `Tensor` indicating the size of\n the smallest side after resize.\n\n Returns:\n resized_image: A 3-D tensor containing the resized image.\n ' shape = tf.shape(image) (height, width) = (shape[0], shape[1]) (new_height, new_width) = smallest_size_at_least(height, width, resize_min) return resize_image(image, new_height, new_width, method)<|docstring|>Resize images preserving the original aspect ratio. Args: image: A 3-D image `Tensor`. resize_min: A python integer or scalar `Tensor` indicating the size of the smallest side after resize. Returns: resized_image: A 3-D tensor containing the resized image.<|endoftext|>
c310ea0577d8785bcf5b7961cfa0d754ab325588584bf72e938cd98ef17db505
def central_crop(image, crop_height, crop_width, channels=3): 'Performs central crops of the given image list.\n\n Args:\n image: a 3-D image tensor\n crop_height: the height of the image following the crop.\n crop_width: the width of the image following the crop.\n\n Returns:\n 3-D tensor with cropped image.\n ' shape = tf.shape(image) (height, width) = (shape[0], shape[1]) amount_to_be_cropped_h = (height - crop_height) crop_top = (amount_to_be_cropped_h // 2) amount_to_be_cropped_w = (width - crop_width) crop_left = (amount_to_be_cropped_w // 2) size_assertion = tf.Assert(tf.logical_and(tf.greater_equal(height, crop_height), tf.greater_equal(width, crop_width)), ['Crop size greater than the image size.']) with tf.control_dependencies([size_assertion]): if (channels == 1): image = tf.squeeze(image) crop_start = [crop_top, crop_left] crop_shape = [crop_height, crop_width] elif (channels >= 3): crop_start = [crop_top, crop_left, 0] crop_shape = [crop_height, crop_width, (- 1)] image = tf.slice(image, crop_start, crop_shape) return tf.reshape(image, [crop_height, crop_width, (- 1)])
Performs central crops of the given image list. Args: image: a 3-D image tensor crop_height: the height of the image following the crop. crop_width: the width of the image following the crop. Returns: 3-D tensor with cropped image.
dataset/preprocess_dataset.py
central_crop
bolide2006/r329_aipu
0
python
def central_crop(image, crop_height, crop_width, channels=3): 'Performs central crops of the given image list.\n\n Args:\n image: a 3-D image tensor\n crop_height: the height of the image following the crop.\n crop_width: the width of the image following the crop.\n\n Returns:\n 3-D tensor with cropped image.\n ' shape = tf.shape(image) (height, width) = (shape[0], shape[1]) amount_to_be_cropped_h = (height - crop_height) crop_top = (amount_to_be_cropped_h // 2) amount_to_be_cropped_w = (width - crop_width) crop_left = (amount_to_be_cropped_w // 2) size_assertion = tf.Assert(tf.logical_and(tf.greater_equal(height, crop_height), tf.greater_equal(width, crop_width)), ['Crop size greater than the image size.']) with tf.control_dependencies([size_assertion]): if (channels == 1): image = tf.squeeze(image) crop_start = [crop_top, crop_left] crop_shape = [crop_height, crop_width] elif (channels >= 3): crop_start = [crop_top, crop_left, 0] crop_shape = [crop_height, crop_width, (- 1)] image = tf.slice(image, crop_start, crop_shape) return tf.reshape(image, [crop_height, crop_width, (- 1)])
def central_crop(image, crop_height, crop_width, channels=3): 'Performs central crops of the given image list.\n\n Args:\n image: a 3-D image tensor\n crop_height: the height of the image following the crop.\n crop_width: the width of the image following the crop.\n\n Returns:\n 3-D tensor with cropped image.\n ' shape = tf.shape(image) (height, width) = (shape[0], shape[1]) amount_to_be_cropped_h = (height - crop_height) crop_top = (amount_to_be_cropped_h // 2) amount_to_be_cropped_w = (width - crop_width) crop_left = (amount_to_be_cropped_w // 2) size_assertion = tf.Assert(tf.logical_and(tf.greater_equal(height, crop_height), tf.greater_equal(width, crop_width)), ['Crop size greater than the image size.']) with tf.control_dependencies([size_assertion]): if (channels == 1): image = tf.squeeze(image) crop_start = [crop_top, crop_left] crop_shape = [crop_height, crop_width] elif (channels >= 3): crop_start = [crop_top, crop_left, 0] crop_shape = [crop_height, crop_width, (- 1)] image = tf.slice(image, crop_start, crop_shape) return tf.reshape(image, [crop_height, crop_width, (- 1)])<|docstring|>Performs central crops of the given image list. Args: image: a 3-D image tensor crop_height: the height of the image following the crop. crop_width: the width of the image following the crop. Returns: 3-D tensor with cropped image.<|endoftext|>
3a1ff86ccbe563ab787b43a78ada8d16a1fefbef6b032b1a10ce5aadcae55203
def _block_diag(arrays): ' Create block-diagonal matrix from `arrays`. ' result = None for arr in arrays: arr[(arr == (- 0))] = 0 if (result is None): result = arr else: (r_rows, r_cols) = result.shape (a_rows, a_cols) = arr.shape result = np.vstack((np.hstack((result, np.zeros((r_rows, a_cols)))), np.hstack((np.zeros((a_rows, r_cols)), arr)))) return result
Create block-diagonal matrix from `arrays`.
openmdao.lib/src/openmdao/lib/geometry/stl_group.py
_block_diag
mjfwest/OpenMDAO-Framework
69
python
def _block_diag(arrays): ' ' result = None for arr in arrays: arr[(arr == (- 0))] = 0 if (result is None): result = arr else: (r_rows, r_cols) = result.shape (a_rows, a_cols) = arr.shape result = np.vstack((np.hstack((result, np.zeros((r_rows, a_cols)))), np.hstack((np.zeros((a_rows, r_cols)), arr)))) return result
def _block_diag(arrays): ' ' result = None for arr in arrays: arr[(arr == (- 0))] = 0 if (result is None): result = arr else: (r_rows, r_cols) = result.shape (a_rows, a_cols) = arr.shape result = np.vstack((np.hstack((result, np.zeros((r_rows, a_cols)))), np.hstack((np.zeros((a_rows, r_cols)), arr)))) return result<|docstring|>Create block-diagonal matrix from `arrays`.<|endoftext|>
ff79cbf4e166fa28984c0b436788c4b62d15f24b47a0cb7cfa7c3c62be996ff6
def _build_io(self): " returns a dictionary of io sets key'd to component names" self.comp_param_count = {} params = [] for comp in self._comps: name = comp.name if isinstance(comp, Body): val = comp.delta_C[(:, 0)] meta = {'value': val, 'iotype': 'in', 'shape': val.shape, 'desc': 'axial location of control points for the ffd'} tup = ((name, 'X'), meta) params.append(tup) n_X = val.shape[0] val = comp.delta_C[(:, 1)] meta = {'value': val, 'iotype': 'in', 'shape': val.shape, 'desc': 'radial location of control points for the ffd'} tup = ((name, 'R'), meta) params.append(tup) n_R = val.shape[0] self.comp_param_count[comp] = (n_X, n_R) else: val = comp.delta_Cc[(:, 0)] meta = {'value': val, 'iotype': 'in', 'shape': val.shape, 'desc': 'axial location of the control points for the centerline of the shell'} tup = ((name, 'X'), meta) params.append(tup) n_X = val.shape[0] val = comp.delta_Cc[(:, 1)] meta = {'value': val, 'iotype': 'in', 'shape': val.shape, 'desc': 'radial location of the control points for the centerline of the shell'} tup = ((name, 'R'), meta) params.append(tup) n_R = val.shape[0] val = comp.delta_Ct[(:, 1)] meta = {'value': val, 'iotype': 'in', 'shape': val.shape, 'desc': 'thickness of the shell at each axial station'} tup = ((name, 'thickness'), meta) params.append(tup) n_T = val.shape[0] self.comp_param_count[comp] = (n_X, n_R, n_T) points = [] triangles = [] i_offset = 0 n_controls = 0 for comp in self._comps: n_controls += sum(self.comp_param_count[comp]) if isinstance(comp, Body): points.extend(comp.stl.points) size = len(points) triangles.extend((comp.stl.triangles + i_offset)) i_offset = size else: points.extend(comp.outer_stl.points) size = len(points) triangles.extend((comp.outer_stl.triangles + i_offset)) i_offset = size points.extend(comp.inner_stl.points) size = len(points) triangles.extend((comp.inner_stl.triangles + i_offset)) i_offset = size self.points = np.array(points) self.n_controls = n_controls self.n_points = len(points) self.triangles = np.array(triangles) self.n_triangles = len(triangles) return params
returns a dictionary of io sets key'd to component names
openmdao.lib/src/openmdao/lib/geometry/stl_group.py
_build_io
mjfwest/OpenMDAO-Framework
69
python
def _build_io(self): " " self.comp_param_count = {} params = [] for comp in self._comps: name = comp.name if isinstance(comp, Body): val = comp.delta_C[(:, 0)] meta = {'value': val, 'iotype': 'in', 'shape': val.shape, 'desc': 'axial location of control points for the ffd'} tup = ((name, 'X'), meta) params.append(tup) n_X = val.shape[0] val = comp.delta_C[(:, 1)] meta = {'value': val, 'iotype': 'in', 'shape': val.shape, 'desc': 'radial location of control points for the ffd'} tup = ((name, 'R'), meta) params.append(tup) n_R = val.shape[0] self.comp_param_count[comp] = (n_X, n_R) else: val = comp.delta_Cc[(:, 0)] meta = {'value': val, 'iotype': 'in', 'shape': val.shape, 'desc': 'axial location of the control points for the centerline of the shell'} tup = ((name, 'X'), meta) params.append(tup) n_X = val.shape[0] val = comp.delta_Cc[(:, 1)] meta = {'value': val, 'iotype': 'in', 'shape': val.shape, 'desc': 'radial location of the control points for the centerline of the shell'} tup = ((name, 'R'), meta) params.append(tup) n_R = val.shape[0] val = comp.delta_Ct[(:, 1)] meta = {'value': val, 'iotype': 'in', 'shape': val.shape, 'desc': 'thickness of the shell at each axial station'} tup = ((name, 'thickness'), meta) params.append(tup) n_T = val.shape[0] self.comp_param_count[comp] = (n_X, n_R, n_T) points = [] triangles = [] i_offset = 0 n_controls = 0 for comp in self._comps: n_controls += sum(self.comp_param_count[comp]) if isinstance(comp, Body): points.extend(comp.stl.points) size = len(points) triangles.extend((comp.stl.triangles + i_offset)) i_offset = size else: points.extend(comp.outer_stl.points) size = len(points) triangles.extend((comp.outer_stl.triangles + i_offset)) i_offset = size points.extend(comp.inner_stl.points) size = len(points) triangles.extend((comp.inner_stl.triangles + i_offset)) i_offset = size self.points = np.array(points) self.n_controls = n_controls self.n_points = len(points) self.triangles = np.array(triangles) self.n_triangles = len(triangles) return params
def _build_io(self): " " self.comp_param_count = {} params = [] for comp in self._comps: name = comp.name if isinstance(comp, Body): val = comp.delta_C[(:, 0)] meta = {'value': val, 'iotype': 'in', 'shape': val.shape, 'desc': 'axial location of control points for the ffd'} tup = ((name, 'X'), meta) params.append(tup) n_X = val.shape[0] val = comp.delta_C[(:, 1)] meta = {'value': val, 'iotype': 'in', 'shape': val.shape, 'desc': 'radial location of control points for the ffd'} tup = ((name, 'R'), meta) params.append(tup) n_R = val.shape[0] self.comp_param_count[comp] = (n_X, n_R) else: val = comp.delta_Cc[(:, 0)] meta = {'value': val, 'iotype': 'in', 'shape': val.shape, 'desc': 'axial location of the control points for the centerline of the shell'} tup = ((name, 'X'), meta) params.append(tup) n_X = val.shape[0] val = comp.delta_Cc[(:, 1)] meta = {'value': val, 'iotype': 'in', 'shape': val.shape, 'desc': 'radial location of the control points for the centerline of the shell'} tup = ((name, 'R'), meta) params.append(tup) n_R = val.shape[0] val = comp.delta_Ct[(:, 1)] meta = {'value': val, 'iotype': 'in', 'shape': val.shape, 'desc': 'thickness of the shell at each axial station'} tup = ((name, 'thickness'), meta) params.append(tup) n_T = val.shape[0] self.comp_param_count[comp] = (n_X, n_R, n_T) points = [] triangles = [] i_offset = 0 n_controls = 0 for comp in self._comps: n_controls += sum(self.comp_param_count[comp]) if isinstance(comp, Body): points.extend(comp.stl.points) size = len(points) triangles.extend((comp.stl.triangles + i_offset)) i_offset = size else: points.extend(comp.outer_stl.points) size = len(points) triangles.extend((comp.outer_stl.triangles + i_offset)) i_offset = size points.extend(comp.inner_stl.points) size = len(points) triangles.extend((comp.inner_stl.triangles + i_offset)) i_offset = size self.points = np.array(points) self.n_controls = n_controls self.n_points = len(points) self.triangles = np.array(triangles) self.n_triangles = len(triangles) return params<|docstring|>returns a dictionary of io sets key'd to component names<|endoftext|>
f7e5f6d23caf396cb9460746104596114c189b19f69055fea78af7f0019beaa3
def deform(self, **kwargs): ' deforms the geometry applying the new locations for the control points, given by body name' for (name, delta_C) in kwargs.iteritems(): i = self._i_comps[name] comp = self._comps[i] if isinstance(comp, Body): comp.deform(delta_C) else: comp.deform(*delta_C) self.list_parameters()
deforms the geometry applying the new locations for the control points, given by body name
openmdao.lib/src/openmdao/lib/geometry/stl_group.py
deform
mjfwest/OpenMDAO-Framework
69
python
def deform(self, **kwargs): ' ' for (name, delta_C) in kwargs.iteritems(): i = self._i_comps[name] comp = self._comps[i] if isinstance(comp, Body): comp.deform(delta_C) else: comp.deform(*delta_C) self.list_parameters()
def deform(self, **kwargs): ' ' for (name, delta_C) in kwargs.iteritems(): i = self._i_comps[name] comp = self._comps[i] if isinstance(comp, Body): comp.deform(delta_C) else: comp.deform(*delta_C) self.list_parameters()<|docstring|>deforms the geometry applying the new locations for the control points, given by body name<|endoftext|>
c5db9bc63ae298da32a9ba56af4ddce26a2cb526e5d556ce3d39261ee7c37ecc
def _build_ascii_stl(self, facets): 'returns a list of ascii lines for the stl file ' lines = ['solid ffd_geom'] for facet in facets: lines.append(ASCII_FACET.format(face=facet)) lines.append('endsolid ffd_geom') return lines
returns a list of ascii lines for the stl file
openmdao.lib/src/openmdao/lib/geometry/stl_group.py
_build_ascii_stl
mjfwest/OpenMDAO-Framework
69
python
def _build_ascii_stl(self, facets): ' ' lines = ['solid ffd_geom'] for facet in facets: lines.append(ASCII_FACET.format(face=facet)) lines.append('endsolid ffd_geom') return lines
def _build_ascii_stl(self, facets): ' ' lines = ['solid ffd_geom'] for facet in facets: lines.append(ASCII_FACET.format(face=facet)) lines.append('endsolid ffd_geom') return lines<|docstring|>returns a list of ascii lines for the stl file<|endoftext|>
af9fc9e8e7c236fe488e3f9954d6d4e61cd81fcb76b6496ca7f435abd094325e
def _build_binary_stl(self, facets): 'returns a string of binary binary data for the stl file' lines = [struct.pack(BINARY_HEADER, b'Binary STL Writer', len(facets))] for facet in facets: facet = list(facet) facet.append(0) lines.append(struct.pack(BINARY_FACET, *facet)) return lines
returns a string of binary binary data for the stl file
openmdao.lib/src/openmdao/lib/geometry/stl_group.py
_build_binary_stl
mjfwest/OpenMDAO-Framework
69
python
def _build_binary_stl(self, facets): lines = [struct.pack(BINARY_HEADER, b'Binary STL Writer', len(facets))] for facet in facets: facet = list(facet) facet.append(0) lines.append(struct.pack(BINARY_FACET, *facet)) return lines
def _build_binary_stl(self, facets): lines = [struct.pack(BINARY_HEADER, b'Binary STL Writer', len(facets))] for facet in facets: facet = list(facet) facet.append(0) lines.append(struct.pack(BINARY_FACET, *facet)) return lines<|docstring|>returns a string of binary binary data for the stl file<|endoftext|>
0e778363d2721445b57cf5c63aaf783969b7ba8acfc16b547a2b04ce42313777
def writeSTL(self, file_name, ascii=False): 'outputs an STL file' facets = [] for comp in self._comps: if isinstance(comp, Body): facets.extend(comp.stl.get_facets()) else: facets.extend(comp.outer_stl.get_facets()) facets.extend(comp.inner_stl.get_facets()) f = open(file_name, 'w') if ascii: lines = self._build_ascii_stl(facets) f.write('\n'.join(lines)) else: data = self._build_binary_stl(facets) f.write(''.join(data)) f.close()
outputs an STL file
openmdao.lib/src/openmdao/lib/geometry/stl_group.py
writeSTL
mjfwest/OpenMDAO-Framework
69
python
def writeSTL(self, file_name, ascii=False): facets = [] for comp in self._comps: if isinstance(comp, Body): facets.extend(comp.stl.get_facets()) else: facets.extend(comp.outer_stl.get_facets()) facets.extend(comp.inner_stl.get_facets()) f = open(file_name, 'w') if ascii: lines = self._build_ascii_stl(facets) f.write('\n'.join(lines)) else: data = self._build_binary_stl(facets) f.write(.join(data)) f.close()
def writeSTL(self, file_name, ascii=False): facets = [] for comp in self._comps: if isinstance(comp, Body): facets.extend(comp.stl.get_facets()) else: facets.extend(comp.outer_stl.get_facets()) facets.extend(comp.inner_stl.get_facets()) f = open(file_name, 'w') if ascii: lines = self._build_ascii_stl(facets) f.write('\n'.join(lines)) else: data = self._build_binary_stl(facets) f.write(.join(data)) f.close()<|docstring|>outputs an STL file<|endoftext|>
216a23bd1431c8cf120baf9c515f620ae9752bc52b2c421a64be8d68d45ad77e
def writeFEPOINT(self, stream): 'writes out a new FEPOINT file with the given name, using the supplied points.\n derivs is of size (3,len(points),len(control_points)), giving matricies of\n X,Y,Z drivatives\n\n jacobian should have a shape of (len(points),len(control_points))' self.provideJ() lines = ['TITLE = "FFD_geom"'] var_line = 'VARIABLES = "X" "Y" "Z" "ID" ' deriv_X_names = [] deriv_R_names = [] deriv_T_names = [] deriv_tmpl = string.Template('"dx_d${name}_${type}$i" "dy_d${name}_${type}$i" "dz_d${name}_${type}$i"') for comp in self._comps: if isinstance(comp, Body): deriv_X_names.extend([deriv_tmpl.substitute({'name': comp.name, 'i': str(i), 'type': 'X'}) for i in xrange(0, comp.n_controls)]) deriv_R_names.extend([deriv_tmpl.substitute({'name': comp.name, 'i': str(i), 'type': 'R'}) for i in xrange(0, comp.n_controls)]) else: deriv_X_names.extend([deriv_tmpl.substitute({'name': comp.name, 'i': str(i), 'type': 'X'}) for i in xrange(0, comp.n_c_controls)]) deriv_R_names.extend([deriv_tmpl.substitute({'name': comp.name, 'i': str(i), 'type': 'R'}) for i in xrange(0, comp.n_c_controls)]) deriv_T_names.extend([deriv_tmpl.substitute({'name': comp.name, 'i': str(i), 'type': 'T'}) for i in xrange(0, comp.n_t_controls)]) var_line += ' '.join(deriv_X_names) var_line += ' '.join(deriv_R_names) var_line += ' '.join(deriv_T_names) lines.append(var_line) lines.append(('ZONE T = group0, I = %d, J = %d, F=FEPOINT' % (self.n_points, self.n_triangles))) nx = (3 * self.dXqdC.shape[1]) nr = (3 * self.dYqdCr.shape[1]) nt = (3 * self.dYqdCt.shape[1]) j_cols = ((nx + nr) + nt) for (i, p) in enumerate(self.points): line = ('%.8f %.8f %.8f %d ' % (p[0], p[1], p[2], (i + 1))) deriv_values = np.zeros((j_cols,)) deriv_values[:nx:3] = self.dXqdC[i] deriv_values[(nx + 1):(nx + nr):3] = self.dYqdCr[i] deriv_values[(nx + 2):(nx + nr):3] = self.dZqdCr[i] deriv_values[((nx + nr) + 1)::3] = self.dYqdCt[i] deriv_values[((nx + nr) + 2)::3] = self.dZqdCt[i] line += ' '.join(np.char.mod('%.8f', deriv_values)) lines.append(line) for tri in self.triangles: line = ('%d %d %d %d' % ((tri[0] + 1), (tri[1] + 1), (tri[2] + 1), (tri[2] + 1))) lines.append(line) needs_close = False if isinstance(stream, basestring): stream = open(stream, 'w') needs_close = True ((print >> stream), '\n'.join(lines)) if needs_close: stream.close()
writes out a new FEPOINT file with the given name, using the supplied points. derivs is of size (3,len(points),len(control_points)), giving matricies of X,Y,Z drivatives jacobian should have a shape of (len(points),len(control_points))
openmdao.lib/src/openmdao/lib/geometry/stl_group.py
writeFEPOINT
mjfwest/OpenMDAO-Framework
69
python
def writeFEPOINT(self, stream): 'writes out a new FEPOINT file with the given name, using the supplied points.\n derivs is of size (3,len(points),len(control_points)), giving matricies of\n X,Y,Z drivatives\n\n jacobian should have a shape of (len(points),len(control_points))' self.provideJ() lines = ['TITLE = "FFD_geom"'] var_line = 'VARIABLES = "X" "Y" "Z" "ID" ' deriv_X_names = [] deriv_R_names = [] deriv_T_names = [] deriv_tmpl = string.Template('"dx_d${name}_${type}$i" "dy_d${name}_${type}$i" "dz_d${name}_${type}$i"') for comp in self._comps: if isinstance(comp, Body): deriv_X_names.extend([deriv_tmpl.substitute({'name': comp.name, 'i': str(i), 'type': 'X'}) for i in xrange(0, comp.n_controls)]) deriv_R_names.extend([deriv_tmpl.substitute({'name': comp.name, 'i': str(i), 'type': 'R'}) for i in xrange(0, comp.n_controls)]) else: deriv_X_names.extend([deriv_tmpl.substitute({'name': comp.name, 'i': str(i), 'type': 'X'}) for i in xrange(0, comp.n_c_controls)]) deriv_R_names.extend([deriv_tmpl.substitute({'name': comp.name, 'i': str(i), 'type': 'R'}) for i in xrange(0, comp.n_c_controls)]) deriv_T_names.extend([deriv_tmpl.substitute({'name': comp.name, 'i': str(i), 'type': 'T'}) for i in xrange(0, comp.n_t_controls)]) var_line += ' '.join(deriv_X_names) var_line += ' '.join(deriv_R_names) var_line += ' '.join(deriv_T_names) lines.append(var_line) lines.append(('ZONE T = group0, I = %d, J = %d, F=FEPOINT' % (self.n_points, self.n_triangles))) nx = (3 * self.dXqdC.shape[1]) nr = (3 * self.dYqdCr.shape[1]) nt = (3 * self.dYqdCt.shape[1]) j_cols = ((nx + nr) + nt) for (i, p) in enumerate(self.points): line = ('%.8f %.8f %.8f %d ' % (p[0], p[1], p[2], (i + 1))) deriv_values = np.zeros((j_cols,)) deriv_values[:nx:3] = self.dXqdC[i] deriv_values[(nx + 1):(nx + nr):3] = self.dYqdCr[i] deriv_values[(nx + 2):(nx + nr):3] = self.dZqdCr[i] deriv_values[((nx + nr) + 1)::3] = self.dYqdCt[i] deriv_values[((nx + nr) + 2)::3] = self.dZqdCt[i] line += ' '.join(np.char.mod('%.8f', deriv_values)) lines.append(line) for tri in self.triangles: line = ('%d %d %d %d' % ((tri[0] + 1), (tri[1] + 1), (tri[2] + 1), (tri[2] + 1))) lines.append(line) needs_close = False if isinstance(stream, basestring): stream = open(stream, 'w') needs_close = True ((print >> stream), '\n'.join(lines)) if needs_close: stream.close()
def writeFEPOINT(self, stream): 'writes out a new FEPOINT file with the given name, using the supplied points.\n derivs is of size (3,len(points),len(control_points)), giving matricies of\n X,Y,Z drivatives\n\n jacobian should have a shape of (len(points),len(control_points))' self.provideJ() lines = ['TITLE = "FFD_geom"'] var_line = 'VARIABLES = "X" "Y" "Z" "ID" ' deriv_X_names = [] deriv_R_names = [] deriv_T_names = [] deriv_tmpl = string.Template('"dx_d${name}_${type}$i" "dy_d${name}_${type}$i" "dz_d${name}_${type}$i"') for comp in self._comps: if isinstance(comp, Body): deriv_X_names.extend([deriv_tmpl.substitute({'name': comp.name, 'i': str(i), 'type': 'X'}) for i in xrange(0, comp.n_controls)]) deriv_R_names.extend([deriv_tmpl.substitute({'name': comp.name, 'i': str(i), 'type': 'R'}) for i in xrange(0, comp.n_controls)]) else: deriv_X_names.extend([deriv_tmpl.substitute({'name': comp.name, 'i': str(i), 'type': 'X'}) for i in xrange(0, comp.n_c_controls)]) deriv_R_names.extend([deriv_tmpl.substitute({'name': comp.name, 'i': str(i), 'type': 'R'}) for i in xrange(0, comp.n_c_controls)]) deriv_T_names.extend([deriv_tmpl.substitute({'name': comp.name, 'i': str(i), 'type': 'T'}) for i in xrange(0, comp.n_t_controls)]) var_line += ' '.join(deriv_X_names) var_line += ' '.join(deriv_R_names) var_line += ' '.join(deriv_T_names) lines.append(var_line) lines.append(('ZONE T = group0, I = %d, J = %d, F=FEPOINT' % (self.n_points, self.n_triangles))) nx = (3 * self.dXqdC.shape[1]) nr = (3 * self.dYqdCr.shape[1]) nt = (3 * self.dYqdCt.shape[1]) j_cols = ((nx + nr) + nt) for (i, p) in enumerate(self.points): line = ('%.8f %.8f %.8f %d ' % (p[0], p[1], p[2], (i + 1))) deriv_values = np.zeros((j_cols,)) deriv_values[:nx:3] = self.dXqdC[i] deriv_values[(nx + 1):(nx + nr):3] = self.dYqdCr[i] deriv_values[(nx + 2):(nx + nr):3] = self.dZqdCr[i] deriv_values[((nx + nr) + 1)::3] = self.dYqdCt[i] deriv_values[((nx + nr) + 2)::3] = self.dZqdCt[i] line += ' '.join(np.char.mod('%.8f', deriv_values)) lines.append(line) for tri in self.triangles: line = ('%d %d %d %d' % ((tri[0] + 1), (tri[1] + 1), (tri[2] + 1), (tri[2] + 1))) lines.append(line) needs_close = False if isinstance(stream, basestring): stream = open(stream, 'w') needs_close = True ((print >> stream), '\n'.join(lines)) if needs_close: stream.close()<|docstring|>writes out a new FEPOINT file with the given name, using the supplied points. derivs is of size (3,len(points),len(control_points)), giving matricies of X,Y,Z drivatives jacobian should have a shape of (len(points),len(control_points))<|endoftext|>
c871307e65e0083e315b3a1d7272819f622f8fdf13ed7bf2898586ab97578234
def read_corpus(): '读取语料,每行一个json\n ' while True: with open(corpus_path) as f: for l in f: (yield json.loads(l))
读取语料,每行一个json
simbert_sim.py
read_corpus
baokui/simbert
0
python
def read_corpus(): '\n ' while True: with open(corpus_path) as f: for l in f: (yield json.loads(l))
def read_corpus(): '\n ' while True: with open(corpus_path) as f: for l in f: (yield json.loads(l))<|docstring|>读取语料,每行一个json<|endoftext|>
1e7348a5eab099d2db86c8d08fb91ac1269df85f39b3d4c19ec02aa6322236b5
def truncate(text): '截断句子\n ' (seps, strips) = (u'\n。!?!?;;,, ', u';;,, ') return text_segmentate(text, (maxlen - 2), seps, strips)[0]
截断句子
simbert_sim.py
truncate
baokui/simbert
0
python
def truncate(text): '\n ' (seps, strips) = (u'\n。!?!?;;,, ', u';;,, ') return text_segmentate(text, (maxlen - 2), seps, strips)[0]
def truncate(text): '\n ' (seps, strips) = (u'\n。!?!?;;,, ', u';;,, ') return text_segmentate(text, (maxlen - 2), seps, strips)[0]<|docstring|>截断句子<|endoftext|>
7e508cbc47524de233c45cbb92c0624f1eb324eed1c5e0ba6eae83e92493fcb4
def gen_synonyms(text, n=100, k=20): '"含义: 产生sent的n个相似句,然后返回最相似的k个。\n 做法:用seq2seq生成,并用encoder算相似度并排序。\n 效果:\n >>> gen_synonyms(u\'微信和支付宝哪个好?\')\n [\n u\'微信和支付宝,哪个好?\',\n u\'微信和支付宝哪个好\',\n u\'支付宝和微信哪个好\',\n u\'支付宝和微信哪个好啊\',\n u\'微信和支付宝那个好用?\',\n u\'微信和支付宝哪个好用\',\n u\'支付宝和微信那个更好\',\n u\'支付宝和微信哪个好用\',\n u\'微信和支付宝用起来哪个好?\',\n u\'微信和支付宝选哪个好\',\n ]\n ' r = synonyms_generator.generate(text, n) r = [i for i in set(r) if (i != text)] r = ([text] + r) (X, S) = ([], []) for t in r: (x, s) = tokenizer.encode(t) X.append(x) S.append(s) X = sequence_padding(X) S = sequence_padding(S) Z = encoder.predict([X, S]) Z /= ((Z ** 2).sum(axis=1, keepdims=True) ** 0.5) argsort = np.dot(Z[1:], (- Z[0])).argsort() return [r[(i + 1)] for i in argsort[:k]]
"含义: 产生sent的n个相似句,然后返回最相似的k个。 做法:用seq2seq生成,并用encoder算相似度并排序。 效果: >>> gen_synonyms(u'微信和支付宝哪个好?') [ u'微信和支付宝,哪个好?', u'微信和支付宝哪个好', u'支付宝和微信哪个好', u'支付宝和微信哪个好啊', u'微信和支付宝那个好用?', u'微信和支付宝哪个好用', u'支付宝和微信那个更好', u'支付宝和微信哪个好用', u'微信和支付宝用起来哪个好?', u'微信和支付宝选哪个好', ]
simbert_sim.py
gen_synonyms
baokui/simbert
0
python
def gen_synonyms(text, n=100, k=20): '"含义: 产生sent的n个相似句,然后返回最相似的k个。\n 做法:用seq2seq生成,并用encoder算相似度并排序。\n 效果:\n >>> gen_synonyms(u\'微信和支付宝哪个好?\')\n [\n u\'微信和支付宝,哪个好?\',\n u\'微信和支付宝哪个好\',\n u\'支付宝和微信哪个好\',\n u\'支付宝和微信哪个好啊\',\n u\'微信和支付宝那个好用?\',\n u\'微信和支付宝哪个好用\',\n u\'支付宝和微信那个更好\',\n u\'支付宝和微信哪个好用\',\n u\'微信和支付宝用起来哪个好?\',\n u\'微信和支付宝选哪个好\',\n ]\n ' r = synonyms_generator.generate(text, n) r = [i for i in set(r) if (i != text)] r = ([text] + r) (X, S) = ([], []) for t in r: (x, s) = tokenizer.encode(t) X.append(x) S.append(s) X = sequence_padding(X) S = sequence_padding(S) Z = encoder.predict([X, S]) Z /= ((Z ** 2).sum(axis=1, keepdims=True) ** 0.5) argsort = np.dot(Z[1:], (- Z[0])).argsort() return [r[(i + 1)] for i in argsort[:k]]
def gen_synonyms(text, n=100, k=20): '"含义: 产生sent的n个相似句,然后返回最相似的k个。\n 做法:用seq2seq生成,并用encoder算相似度并排序。\n 效果:\n >>> gen_synonyms(u\'微信和支付宝哪个好?\')\n [\n u\'微信和支付宝,哪个好?\',\n u\'微信和支付宝哪个好\',\n u\'支付宝和微信哪个好\',\n u\'支付宝和微信哪个好啊\',\n u\'微信和支付宝那个好用?\',\n u\'微信和支付宝哪个好用\',\n u\'支付宝和微信那个更好\',\n u\'支付宝和微信哪个好用\',\n u\'微信和支付宝用起来哪个好?\',\n u\'微信和支付宝选哪个好\',\n ]\n ' r = synonyms_generator.generate(text, n) r = [i for i in set(r) if (i != text)] r = ([text] + r) (X, S) = ([], []) for t in r: (x, s) = tokenizer.encode(t) X.append(x) S.append(s) X = sequence_padding(X) S = sequence_padding(S) Z = encoder.predict([X, S]) Z /= ((Z ** 2).sum(axis=1, keepdims=True) ** 0.5) argsort = np.dot(Z[1:], (- Z[0])).argsort() return [r[(i + 1)] for i in argsort[:k]]<|docstring|>"含义: 产生sent的n个相似句,然后返回最相似的k个。 做法:用seq2seq生成,并用encoder算相似度并排序。 效果: >>> gen_synonyms(u'微信和支付宝哪个好?') [ u'微信和支付宝,哪个好?', u'微信和支付宝哪个好', u'支付宝和微信哪个好', u'支付宝和微信哪个好啊', u'微信和支付宝那个好用?', u'微信和支付宝哪个好用', u'支付宝和微信那个更好', u'支付宝和微信哪个好用', u'微信和支付宝用起来哪个好?', u'微信和支付宝选哪个好', ]<|endoftext|>
f59f99a5db95da73b745c5b952df56506003c1b800f44856d61f5ed2a7a13cd7
def just_show(): '随机观察一些样本的效果\n ' S = random.sample(TrnData, k=10) for s in S: try: print('###########################') print('------------------') print((u'原句子:%s' % s['input'])) print(u'同义句子:') r = gen_synonyms(s['click'], 10, 10) for rr in r: print(rr) print('------------------') print((u'原句子:%s' % s['click'][0])) print(u'同义句子:') r = gen_synonyms(s['click'][0], 10, 10) for rr in r: print(rr) except: pass
随机观察一些样本的效果
simbert_sim.py
just_show
baokui/simbert
0
python
def just_show(): '\n ' S = random.sample(TrnData, k=10) for s in S: try: print('###########################') print('------------------') print((u'原句子:%s' % s['input'])) print(u'同义句子:') r = gen_synonyms(s['click'], 10, 10) for rr in r: print(rr) print('------------------') print((u'原句子:%s' % s['click'][0])) print(u'同义句子:') r = gen_synonyms(s['click'][0], 10, 10) for rr in r: print(rr) except: pass
def just_show(): '\n ' S = random.sample(TrnData, k=10) for s in S: try: print('###########################') print('------------------') print((u'原句子:%s' % s['input'])) print(u'同义句子:') r = gen_synonyms(s['click'], 10, 10) for rr in r: print(rr) print('------------------') print((u'原句子:%s' % s['click'][0])) print(u'同义句子:') r = gen_synonyms(s['click'][0], 10, 10) for rr in r: print(rr) except: pass<|docstring|>随机观察一些样本的效果<|endoftext|>
b8a658d3665433788cf321b0219acf08f3dfdf610eedd6f775df0c71c33e922f
def __init__(__self__, *, vpc_id: pulumi.Input[str], assign_ipv6_address_on_creation: Optional[pulumi.Input[bool]]=None, availability_zone: Optional[pulumi.Input[str]]=None, availability_zone_id: Optional[pulumi.Input[str]]=None, cidr_block: Optional[pulumi.Input[str]]=None, enable_dns64: Optional[pulumi.Input[bool]]=None, ipv6_cidr_block: Optional[pulumi.Input[str]]=None, ipv6_native: Optional[pulumi.Input[bool]]=None, map_public_ip_on_launch: Optional[pulumi.Input[bool]]=None, outpost_arn: Optional[pulumi.Input[str]]=None, private_dns_name_options_on_launch: Optional[pulumi.Input['PrivateDnsNameOptionsOnLaunchPropertiesArgs']]=None, tags: Optional[pulumi.Input[Sequence[pulumi.Input['SubnetTagArgs']]]]=None): '\n The set of arguments for constructing a Subnet resource.\n ' pulumi.set(__self__, 'vpc_id', vpc_id) if (assign_ipv6_address_on_creation is not None): pulumi.set(__self__, 'assign_ipv6_address_on_creation', assign_ipv6_address_on_creation) if (availability_zone is not None): pulumi.set(__self__, 'availability_zone', availability_zone) if (availability_zone_id is not None): pulumi.set(__self__, 'availability_zone_id', availability_zone_id) if (cidr_block is not None): pulumi.set(__self__, 'cidr_block', cidr_block) if (enable_dns64 is not None): pulumi.set(__self__, 'enable_dns64', enable_dns64) if (ipv6_cidr_block is not None): pulumi.set(__self__, 'ipv6_cidr_block', ipv6_cidr_block) if (ipv6_native is not None): pulumi.set(__self__, 'ipv6_native', ipv6_native) if (map_public_ip_on_launch is not None): pulumi.set(__self__, 'map_public_ip_on_launch', map_public_ip_on_launch) if (outpost_arn is not None): pulumi.set(__self__, 'outpost_arn', outpost_arn) if (private_dns_name_options_on_launch is not None): pulumi.set(__self__, 'private_dns_name_options_on_launch', private_dns_name_options_on_launch) if (tags is not None): pulumi.set(__self__, 'tags', tags)
The set of arguments for constructing a Subnet resource.
sdk/python/pulumi_aws_native/ec2/subnet.py
__init__
pulumi/pulumi-aws-native
29
python
def __init__(__self__, *, vpc_id: pulumi.Input[str], assign_ipv6_address_on_creation: Optional[pulumi.Input[bool]]=None, availability_zone: Optional[pulumi.Input[str]]=None, availability_zone_id: Optional[pulumi.Input[str]]=None, cidr_block: Optional[pulumi.Input[str]]=None, enable_dns64: Optional[pulumi.Input[bool]]=None, ipv6_cidr_block: Optional[pulumi.Input[str]]=None, ipv6_native: Optional[pulumi.Input[bool]]=None, map_public_ip_on_launch: Optional[pulumi.Input[bool]]=None, outpost_arn: Optional[pulumi.Input[str]]=None, private_dns_name_options_on_launch: Optional[pulumi.Input['PrivateDnsNameOptionsOnLaunchPropertiesArgs']]=None, tags: Optional[pulumi.Input[Sequence[pulumi.Input['SubnetTagArgs']]]]=None): '\n \n ' pulumi.set(__self__, 'vpc_id', vpc_id) if (assign_ipv6_address_on_creation is not None): pulumi.set(__self__, 'assign_ipv6_address_on_creation', assign_ipv6_address_on_creation) if (availability_zone is not None): pulumi.set(__self__, 'availability_zone', availability_zone) if (availability_zone_id is not None): pulumi.set(__self__, 'availability_zone_id', availability_zone_id) if (cidr_block is not None): pulumi.set(__self__, 'cidr_block', cidr_block) if (enable_dns64 is not None): pulumi.set(__self__, 'enable_dns64', enable_dns64) if (ipv6_cidr_block is not None): pulumi.set(__self__, 'ipv6_cidr_block', ipv6_cidr_block) if (ipv6_native is not None): pulumi.set(__self__, 'ipv6_native', ipv6_native) if (map_public_ip_on_launch is not None): pulumi.set(__self__, 'map_public_ip_on_launch', map_public_ip_on_launch) if (outpost_arn is not None): pulumi.set(__self__, 'outpost_arn', outpost_arn) if (private_dns_name_options_on_launch is not None): pulumi.set(__self__, 'private_dns_name_options_on_launch', private_dns_name_options_on_launch) if (tags is not None): pulumi.set(__self__, 'tags', tags)
def __init__(__self__, *, vpc_id: pulumi.Input[str], assign_ipv6_address_on_creation: Optional[pulumi.Input[bool]]=None, availability_zone: Optional[pulumi.Input[str]]=None, availability_zone_id: Optional[pulumi.Input[str]]=None, cidr_block: Optional[pulumi.Input[str]]=None, enable_dns64: Optional[pulumi.Input[bool]]=None, ipv6_cidr_block: Optional[pulumi.Input[str]]=None, ipv6_native: Optional[pulumi.Input[bool]]=None, map_public_ip_on_launch: Optional[pulumi.Input[bool]]=None, outpost_arn: Optional[pulumi.Input[str]]=None, private_dns_name_options_on_launch: Optional[pulumi.Input['PrivateDnsNameOptionsOnLaunchPropertiesArgs']]=None, tags: Optional[pulumi.Input[Sequence[pulumi.Input['SubnetTagArgs']]]]=None): '\n \n ' pulumi.set(__self__, 'vpc_id', vpc_id) if (assign_ipv6_address_on_creation is not None): pulumi.set(__self__, 'assign_ipv6_address_on_creation', assign_ipv6_address_on_creation) if (availability_zone is not None): pulumi.set(__self__, 'availability_zone', availability_zone) if (availability_zone_id is not None): pulumi.set(__self__, 'availability_zone_id', availability_zone_id) if (cidr_block is not None): pulumi.set(__self__, 'cidr_block', cidr_block) if (enable_dns64 is not None): pulumi.set(__self__, 'enable_dns64', enable_dns64) if (ipv6_cidr_block is not None): pulumi.set(__self__, 'ipv6_cidr_block', ipv6_cidr_block) if (ipv6_native is not None): pulumi.set(__self__, 'ipv6_native', ipv6_native) if (map_public_ip_on_launch is not None): pulumi.set(__self__, 'map_public_ip_on_launch', map_public_ip_on_launch) if (outpost_arn is not None): pulumi.set(__self__, 'outpost_arn', outpost_arn) if (private_dns_name_options_on_launch is not None): pulumi.set(__self__, 'private_dns_name_options_on_launch', private_dns_name_options_on_launch) if (tags is not None): pulumi.set(__self__, 'tags', tags)<|docstring|>The set of arguments for constructing a Subnet resource.<|endoftext|>
5393f4d8eb647a36fd4907361ce1dc12369303a9709eabff58c05c92ec331efa
@overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, assign_ipv6_address_on_creation: Optional[pulumi.Input[bool]]=None, availability_zone: Optional[pulumi.Input[str]]=None, availability_zone_id: Optional[pulumi.Input[str]]=None, cidr_block: Optional[pulumi.Input[str]]=None, enable_dns64: Optional[pulumi.Input[bool]]=None, ipv6_cidr_block: Optional[pulumi.Input[str]]=None, ipv6_native: Optional[pulumi.Input[bool]]=None, map_public_ip_on_launch: Optional[pulumi.Input[bool]]=None, outpost_arn: Optional[pulumi.Input[str]]=None, private_dns_name_options_on_launch: Optional[pulumi.Input[pulumi.InputType['PrivateDnsNameOptionsOnLaunchPropertiesArgs']]]=None, tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubnetTagArgs']]]]]=None, vpc_id: Optional[pulumi.Input[str]]=None, __props__=None): '\n Resource Type definition for AWS::EC2::Subnet\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n ' ...
Resource Type definition for AWS::EC2::Subnet :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource.
sdk/python/pulumi_aws_native/ec2/subnet.py
__init__
pulumi/pulumi-aws-native
29
python
@overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, assign_ipv6_address_on_creation: Optional[pulumi.Input[bool]]=None, availability_zone: Optional[pulumi.Input[str]]=None, availability_zone_id: Optional[pulumi.Input[str]]=None, cidr_block: Optional[pulumi.Input[str]]=None, enable_dns64: Optional[pulumi.Input[bool]]=None, ipv6_cidr_block: Optional[pulumi.Input[str]]=None, ipv6_native: Optional[pulumi.Input[bool]]=None, map_public_ip_on_launch: Optional[pulumi.Input[bool]]=None, outpost_arn: Optional[pulumi.Input[str]]=None, private_dns_name_options_on_launch: Optional[pulumi.Input[pulumi.InputType['PrivateDnsNameOptionsOnLaunchPropertiesArgs']]]=None, tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubnetTagArgs']]]]]=None, vpc_id: Optional[pulumi.Input[str]]=None, __props__=None): '\n Resource Type definition for AWS::EC2::Subnet\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n ' ...
@overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions]=None, assign_ipv6_address_on_creation: Optional[pulumi.Input[bool]]=None, availability_zone: Optional[pulumi.Input[str]]=None, availability_zone_id: Optional[pulumi.Input[str]]=None, cidr_block: Optional[pulumi.Input[str]]=None, enable_dns64: Optional[pulumi.Input[bool]]=None, ipv6_cidr_block: Optional[pulumi.Input[str]]=None, ipv6_native: Optional[pulumi.Input[bool]]=None, map_public_ip_on_launch: Optional[pulumi.Input[bool]]=None, outpost_arn: Optional[pulumi.Input[str]]=None, private_dns_name_options_on_launch: Optional[pulumi.Input[pulumi.InputType['PrivateDnsNameOptionsOnLaunchPropertiesArgs']]]=None, tags: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['SubnetTagArgs']]]]]=None, vpc_id: Optional[pulumi.Input[str]]=None, __props__=None): '\n Resource Type definition for AWS::EC2::Subnet\n\n :param str resource_name: The name of the resource.\n :param pulumi.ResourceOptions opts: Options for the resource.\n ' ...<|docstring|>Resource Type definition for AWS::EC2::Subnet :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource.<|endoftext|>
40e9c9542b6ddcc67d348ce0bd2d5d79bbf7e648aa338c5c43c85cce55317580
@overload def __init__(__self__, resource_name: str, args: SubnetArgs, opts: Optional[pulumi.ResourceOptions]=None): "\n Resource Type definition for AWS::EC2::Subnet\n\n :param str resource_name: The name of the resource.\n :param SubnetArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " ...
Resource Type definition for AWS::EC2::Subnet :param str resource_name: The name of the resource. :param SubnetArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource.
sdk/python/pulumi_aws_native/ec2/subnet.py
__init__
pulumi/pulumi-aws-native
29
python
@overload def __init__(__self__, resource_name: str, args: SubnetArgs, opts: Optional[pulumi.ResourceOptions]=None): "\n Resource Type definition for AWS::EC2::Subnet\n\n :param str resource_name: The name of the resource.\n :param SubnetArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " ...
@overload def __init__(__self__, resource_name: str, args: SubnetArgs, opts: Optional[pulumi.ResourceOptions]=None): "\n Resource Type definition for AWS::EC2::Subnet\n\n :param str resource_name: The name of the resource.\n :param SubnetArgs args: The arguments to use to populate this resource's properties.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " ...<|docstring|>Resource Type definition for AWS::EC2::Subnet :param str resource_name: The name of the resource. :param SubnetArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource.<|endoftext|>
860218a028505098e8be91473c27b8e76f21a1702fc4cfafa24e5118e426df09
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'Subnet': "\n Get an existing Subnet resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = SubnetArgs.__new__(SubnetArgs) __props__.__dict__['assign_ipv6_address_on_creation'] = None __props__.__dict__['availability_zone'] = None __props__.__dict__['availability_zone_id'] = None __props__.__dict__['cidr_block'] = None __props__.__dict__['enable_dns64'] = None __props__.__dict__['ipv6_cidr_block'] = None __props__.__dict__['ipv6_cidr_blocks'] = None __props__.__dict__['ipv6_native'] = None __props__.__dict__['map_public_ip_on_launch'] = None __props__.__dict__['network_acl_association_id'] = None __props__.__dict__['outpost_arn'] = None __props__.__dict__['private_dns_name_options_on_launch'] = None __props__.__dict__['subnet_id'] = None __props__.__dict__['tags'] = None __props__.__dict__['vpc_id'] = None return Subnet(resource_name, opts=opts, __props__=__props__)
Get an existing Subnet resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource.
sdk/python/pulumi_aws_native/ec2/subnet.py
get
pulumi/pulumi-aws-native
29
python
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'Subnet': "\n Get an existing Subnet resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = SubnetArgs.__new__(SubnetArgs) __props__.__dict__['assign_ipv6_address_on_creation'] = None __props__.__dict__['availability_zone'] = None __props__.__dict__['availability_zone_id'] = None __props__.__dict__['cidr_block'] = None __props__.__dict__['enable_dns64'] = None __props__.__dict__['ipv6_cidr_block'] = None __props__.__dict__['ipv6_cidr_blocks'] = None __props__.__dict__['ipv6_native'] = None __props__.__dict__['map_public_ip_on_launch'] = None __props__.__dict__['network_acl_association_id'] = None __props__.__dict__['outpost_arn'] = None __props__.__dict__['private_dns_name_options_on_launch'] = None __props__.__dict__['subnet_id'] = None __props__.__dict__['tags'] = None __props__.__dict__['vpc_id'] = None return Subnet(resource_name, opts=opts, __props__=__props__)
@staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions]=None) -> 'Subnet': "\n Get an existing Subnet resource's state with the given name, id, and optional extra\n properties used to qualify the lookup.\n\n :param str resource_name: The unique name of the resulting resource.\n :param pulumi.Input[str] id: The unique provider ID of the resource to lookup.\n :param pulumi.ResourceOptions opts: Options for the resource.\n " opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = SubnetArgs.__new__(SubnetArgs) __props__.__dict__['assign_ipv6_address_on_creation'] = None __props__.__dict__['availability_zone'] = None __props__.__dict__['availability_zone_id'] = None __props__.__dict__['cidr_block'] = None __props__.__dict__['enable_dns64'] = None __props__.__dict__['ipv6_cidr_block'] = None __props__.__dict__['ipv6_cidr_blocks'] = None __props__.__dict__['ipv6_native'] = None __props__.__dict__['map_public_ip_on_launch'] = None __props__.__dict__['network_acl_association_id'] = None __props__.__dict__['outpost_arn'] = None __props__.__dict__['private_dns_name_options_on_launch'] = None __props__.__dict__['subnet_id'] = None __props__.__dict__['tags'] = None __props__.__dict__['vpc_id'] = None return Subnet(resource_name, opts=opts, __props__=__props__)<|docstring|>Get an existing Subnet resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource.<|endoftext|>
a9cdf47e586f64d1121407f57c9c5928c4754ed33b5cf70acd553cad78e8e102
@pytest.fixture(name='simple_sim') def sim_fixt(tmp_path): 'Pytest fixture for basic simulation class' dic = {'Grid': {'N': 2, 'r_min': 0, 'r_max': 1}, 'Clock': {'start_time': 0, 'end_time': 10, 'num_steps': 100}, 'Tools': {'ExampleTool': [{'custom_name': 'example'}, {'custom_name': 'example2'}]}, 'PhysicsModules': {'ExampleModule': {}}, 'Diagnostics': {'directory': f'{tmp_path}/default_output', 'clock': {}, 'ExampleDiagnostic': [{}, {}]}} return Simulation(dic)
Pytest fixture for basic simulation class
tests/test_core.py
sim_fixt
kpf59/turbopy
0
python
@pytest.fixture(name='simple_sim') def sim_fixt(tmp_path): dic = {'Grid': {'N': 2, 'r_min': 0, 'r_max': 1}, 'Clock': {'start_time': 0, 'end_time': 10, 'num_steps': 100}, 'Tools': {'ExampleTool': [{'custom_name': 'example'}, {'custom_name': 'example2'}]}, 'PhysicsModules': {'ExampleModule': {}}, 'Diagnostics': {'directory': f'{tmp_path}/default_output', 'clock': {}, 'ExampleDiagnostic': [{}, {}]}} return Simulation(dic)
@pytest.fixture(name='simple_sim') def sim_fixt(tmp_path): dic = {'Grid': {'N': 2, 'r_min': 0, 'r_max': 1}, 'Clock': {'start_time': 0, 'end_time': 10, 'num_steps': 100}, 'Tools': {'ExampleTool': [{'custom_name': 'example'}, {'custom_name': 'example2'}]}, 'PhysicsModules': {'ExampleModule': {}}, 'Diagnostics': {'directory': f'{tmp_path}/default_output', 'clock': {}, 'ExampleDiagnostic': [{}, {}]}} return Simulation(dic)<|docstring|>Pytest fixture for basic simulation class<|endoftext|>
ce514e70506a31ff3a1145ee3a83c0afe305b2020bd5b6f41c251bc529c59d26
def test_simulation_init_should_create_class_instance_when_called(simple_sim, tmp_path): 'Test init method for Simulation class' assert (simple_sim.physics_modules == []) assert (simple_sim.compute_tools == []) assert (simple_sim.diagnostics == []) assert (simple_sim.grid is None) assert (simple_sim.clock is None) assert (simple_sim.units is None) dic = {'Grid': {'N': 2, 'r_min': 0, 'r_max': 1}, 'Clock': {'start_time': 0, 'end_time': 10, 'num_steps': 100}, 'Tools': {'ExampleTool': [{'custom_name': 'example'}, {'custom_name': 'example2'}]}, 'PhysicsModules': {'ExampleModule': {}}, 'Diagnostics': {'directory': f'{tmp_path}/default_output', 'clock': {}, 'ExampleDiagnostic': [{}, {}]}} assert (simple_sim.input_data == dic)
Test init method for Simulation class
tests/test_core.py
test_simulation_init_should_create_class_instance_when_called
kpf59/turbopy
0
python
def test_simulation_init_should_create_class_instance_when_called(simple_sim, tmp_path): assert (simple_sim.physics_modules == []) assert (simple_sim.compute_tools == []) assert (simple_sim.diagnostics == []) assert (simple_sim.grid is None) assert (simple_sim.clock is None) assert (simple_sim.units is None) dic = {'Grid': {'N': 2, 'r_min': 0, 'r_max': 1}, 'Clock': {'start_time': 0, 'end_time': 10, 'num_steps': 100}, 'Tools': {'ExampleTool': [{'custom_name': 'example'}, {'custom_name': 'example2'}]}, 'PhysicsModules': {'ExampleModule': {}}, 'Diagnostics': {'directory': f'{tmp_path}/default_output', 'clock': {}, 'ExampleDiagnostic': [{}, {}]}} assert (simple_sim.input_data == dic)
def test_simulation_init_should_create_class_instance_when_called(simple_sim, tmp_path): assert (simple_sim.physics_modules == []) assert (simple_sim.compute_tools == []) assert (simple_sim.diagnostics == []) assert (simple_sim.grid is None) assert (simple_sim.clock is None) assert (simple_sim.units is None) dic = {'Grid': {'N': 2, 'r_min': 0, 'r_max': 1}, 'Clock': {'start_time': 0, 'end_time': 10, 'num_steps': 100}, 'Tools': {'ExampleTool': [{'custom_name': 'example'}, {'custom_name': 'example2'}]}, 'PhysicsModules': {'ExampleModule': {}}, 'Diagnostics': {'directory': f'{tmp_path}/default_output', 'clock': {}, 'ExampleDiagnostic': [{}, {}]}} assert (simple_sim.input_data == dic)<|docstring|>Test init method for Simulation class<|endoftext|>
ac3ddd15502e63e0fd74c58d2b882e59239f5eb7248fe8b28f09730d991007b4
def test_read_grid_from_input_should_set_grid_attr_when_called(simple_sim): 'Test read_grid_from_input method in Simulation class' simple_sim.read_grid_from_input() assert (simple_sim.grid.num_points == 2) assert (simple_sim.grid.r_min == 0) assert (simple_sim.grid.r_max == 1)
Test read_grid_from_input method in Simulation class
tests/test_core.py
test_read_grid_from_input_should_set_grid_attr_when_called
kpf59/turbopy
0
python
def test_read_grid_from_input_should_set_grid_attr_when_called(simple_sim): simple_sim.read_grid_from_input() assert (simple_sim.grid.num_points == 2) assert (simple_sim.grid.r_min == 0) assert (simple_sim.grid.r_max == 1)
def test_read_grid_from_input_should_set_grid_attr_when_called(simple_sim): simple_sim.read_grid_from_input() assert (simple_sim.grid.num_points == 2) assert (simple_sim.grid.r_min == 0) assert (simple_sim.grid.r_max == 1)<|docstring|>Test read_grid_from_input method in Simulation class<|endoftext|>
6385a4b251f5f8b667060a6310dced8e880a5fcbc4aec6e87505fb58d2dc76c1
def test_gridless_simulation(tmp_path): 'Test a gridless simulation' dic = {'Clock': {'start_time': 0, 'end_time': 10, 'num_steps': 100}, 'Tools': {'ExampleTool': [{'custom_name': 'example'}, {'custom_name': 'example2'}]}, 'PhysicsModules': {'ExampleModule': {}}, 'Diagnostics': {'directory': f'{tmp_path}/default_output', 'clock': {}, 'ExampleDiagnostic': [{}, {}]}} with warnings.catch_warnings(record=True) as w: sim = Simulation(dic) sim.run() assert (sim.clock is not None) assert (sim.grid is None) assert (len(w) == 1) assert (str(w[(- 1)].message) == 'No Grid Found.')
Test a gridless simulation
tests/test_core.py
test_gridless_simulation
kpf59/turbopy
0
python
def test_gridless_simulation(tmp_path): dic = {'Clock': {'start_time': 0, 'end_time': 10, 'num_steps': 100}, 'Tools': {'ExampleTool': [{'custom_name': 'example'}, {'custom_name': 'example2'}]}, 'PhysicsModules': {'ExampleModule': {}}, 'Diagnostics': {'directory': f'{tmp_path}/default_output', 'clock': {}, 'ExampleDiagnostic': [{}, {}]}} with warnings.catch_warnings(record=True) as w: sim = Simulation(dic) sim.run() assert (sim.clock is not None) assert (sim.grid is None) assert (len(w) == 1) assert (str(w[(- 1)].message) == 'No Grid Found.')
def test_gridless_simulation(tmp_path): dic = {'Clock': {'start_time': 0, 'end_time': 10, 'num_steps': 100}, 'Tools': {'ExampleTool': [{'custom_name': 'example'}, {'custom_name': 'example2'}]}, 'PhysicsModules': {'ExampleModule': {}}, 'Diagnostics': {'directory': f'{tmp_path}/default_output', 'clock': {}, 'ExampleDiagnostic': [{}, {}]}} with warnings.catch_warnings(record=True) as w: sim = Simulation(dic) sim.run() assert (sim.clock is not None) assert (sim.grid is None) assert (len(w) == 1) assert (str(w[(- 1)].message) == 'No Grid Found.')<|docstring|>Test a gridless simulation<|endoftext|>
9753007957265bd15a9565312cf39a26f82144f36ffe6a7d8158928d6f7be499
def test_read_clock_from_input_should_set_clock_attr_when_called(simple_sim): 'Test read_clock_from_input method in Simulation class' simple_sim.read_clock_from_input() assert (simple_sim.clock._owner == simple_sim) assert (simple_sim.clock.start_time == 0) assert (simple_sim.clock.time == 0) assert (simple_sim.clock.end_time == 10) assert (simple_sim.clock.this_step == 0) assert (simple_sim.clock.print_time is False) assert (simple_sim.clock.num_steps == 100) assert (simple_sim.clock.dt == 0.1) dic = {'Grid': {'N': 2, 'r_min': 0, 'r_max': 1}, 'Clock': {'start_time': 0, 'end_time': 10, 'dt': 0.2, 'print_time': True}} other_sim = Simulation(dic) other_sim.read_clock_from_input() assert (other_sim.clock.dt == 0.2) assert (other_sim.clock.num_steps == 50) assert (other_sim.clock.print_time is True)
Test read_clock_from_input method in Simulation class
tests/test_core.py
test_read_clock_from_input_should_set_clock_attr_when_called
kpf59/turbopy
0
python
def test_read_clock_from_input_should_set_clock_attr_when_called(simple_sim): simple_sim.read_clock_from_input() assert (simple_sim.clock._owner == simple_sim) assert (simple_sim.clock.start_time == 0) assert (simple_sim.clock.time == 0) assert (simple_sim.clock.end_time == 10) assert (simple_sim.clock.this_step == 0) assert (simple_sim.clock.print_time is False) assert (simple_sim.clock.num_steps == 100) assert (simple_sim.clock.dt == 0.1) dic = {'Grid': {'N': 2, 'r_min': 0, 'r_max': 1}, 'Clock': {'start_time': 0, 'end_time': 10, 'dt': 0.2, 'print_time': True}} other_sim = Simulation(dic) other_sim.read_clock_from_input() assert (other_sim.clock.dt == 0.2) assert (other_sim.clock.num_steps == 50) assert (other_sim.clock.print_time is True)
def test_read_clock_from_input_should_set_clock_attr_when_called(simple_sim): simple_sim.read_clock_from_input() assert (simple_sim.clock._owner == simple_sim) assert (simple_sim.clock.start_time == 0) assert (simple_sim.clock.time == 0) assert (simple_sim.clock.end_time == 10) assert (simple_sim.clock.this_step == 0) assert (simple_sim.clock.print_time is False) assert (simple_sim.clock.num_steps == 100) assert (simple_sim.clock.dt == 0.1) dic = {'Grid': {'N': 2, 'r_min': 0, 'r_max': 1}, 'Clock': {'start_time': 0, 'end_time': 10, 'dt': 0.2, 'print_time': True}} other_sim = Simulation(dic) other_sim.read_clock_from_input() assert (other_sim.clock.dt == 0.2) assert (other_sim.clock.num_steps == 50) assert (other_sim.clock.print_time is True)<|docstring|>Test read_clock_from_input method in Simulation class<|endoftext|>
0b42e10385c6626de1593ca9d75aa0986cd6f3519d54a64b7fe69e9f474303db
def test_read_tools_from_input_should_set_tools_attr_when_called(simple_sim): 'Test read_tools_from_input method in Simulation class' simple_sim.read_tools_from_input() assert (simple_sim.compute_tools[0]._owner == simple_sim) assert (simple_sim.compute_tools[0]._input_data == {'type': 'ExampleTool', 'custom_name': 'example'}) assert (simple_sim.compute_tools[1]._owner == simple_sim) assert (simple_sim.compute_tools[1]._input_data == {'type': 'ExampleTool', 'custom_name': 'example2'})
Test read_tools_from_input method in Simulation class
tests/test_core.py
test_read_tools_from_input_should_set_tools_attr_when_called
kpf59/turbopy
0
python
def test_read_tools_from_input_should_set_tools_attr_when_called(simple_sim): simple_sim.read_tools_from_input() assert (simple_sim.compute_tools[0]._owner == simple_sim) assert (simple_sim.compute_tools[0]._input_data == {'type': 'ExampleTool', 'custom_name': 'example'}) assert (simple_sim.compute_tools[1]._owner == simple_sim) assert (simple_sim.compute_tools[1]._input_data == {'type': 'ExampleTool', 'custom_name': 'example2'})
def test_read_tools_from_input_should_set_tools_attr_when_called(simple_sim): simple_sim.read_tools_from_input() assert (simple_sim.compute_tools[0]._owner == simple_sim) assert (simple_sim.compute_tools[0]._input_data == {'type': 'ExampleTool', 'custom_name': 'example'}) assert (simple_sim.compute_tools[1]._owner == simple_sim) assert (simple_sim.compute_tools[1]._input_data == {'type': 'ExampleTool', 'custom_name': 'example2'})<|docstring|>Test read_tools_from_input method in Simulation class<|endoftext|>
e8716d4bd20a853cbaf1e0c4dfccac1bfc2c7e924a74ca03eee7385606154cc9
def test_fundamental_cycle_should_advance_clock_when_called(simple_sim): 'Test fundamental_cycle method in Simulation class' simple_sim.read_clock_from_input() simple_sim.fundamental_cycle() assert (simple_sim.clock.this_step == 1) assert (simple_sim.clock.time == 0.1)
Test fundamental_cycle method in Simulation class
tests/test_core.py
test_fundamental_cycle_should_advance_clock_when_called
kpf59/turbopy
0
python
def test_fundamental_cycle_should_advance_clock_when_called(simple_sim): simple_sim.read_clock_from_input() simple_sim.fundamental_cycle() assert (simple_sim.clock.this_step == 1) assert (simple_sim.clock.time == 0.1)
def test_fundamental_cycle_should_advance_clock_when_called(simple_sim): simple_sim.read_clock_from_input() simple_sim.fundamental_cycle() assert (simple_sim.clock.this_step == 1) assert (simple_sim.clock.time == 0.1)<|docstring|>Test fundamental_cycle method in Simulation class<|endoftext|>
7baab71f04d314290fbd740cad044e2ef2ed6b7aba2a0dd75a4e6de1c8fee11d
def test_run_should_run_simulation_while_clock_is_running(simple_sim): 'Test run method in Simulation class' simple_sim.run() assert (simple_sim.clock.this_step == 100) assert (simple_sim.clock.time == 10)
Test run method in Simulation class
tests/test_core.py
test_run_should_run_simulation_while_clock_is_running
kpf59/turbopy
0
python
def test_run_should_run_simulation_while_clock_is_running(simple_sim): simple_sim.run() assert (simple_sim.clock.this_step == 100) assert (simple_sim.clock.time == 10)
def test_run_should_run_simulation_while_clock_is_running(simple_sim): simple_sim.run() assert (simple_sim.clock.this_step == 100) assert (simple_sim.clock.time == 10)<|docstring|>Test run method in Simulation class<|endoftext|>
7c761f8c98641eb8b2ffae88013fbb44b0c612fc83c046cdb41f86328d3a609e
def test_turn_back_should_turn_back_time_when_called(simple_sim): 'Test fundamental_cycle method in Simulation class' simple_sim.read_clock_from_input() simple_sim.fundamental_cycle() assert (simple_sim.clock.this_step == 1) assert (simple_sim.clock.time == 0.1) simple_sim.clock.turn_back() assert (simple_sim.clock.this_step == 0) assert (simple_sim.clock.time == 0)
Test fundamental_cycle method in Simulation class
tests/test_core.py
test_turn_back_should_turn_back_time_when_called
kpf59/turbopy
0
python
def test_turn_back_should_turn_back_time_when_called(simple_sim): simple_sim.read_clock_from_input() simple_sim.fundamental_cycle() assert (simple_sim.clock.this_step == 1) assert (simple_sim.clock.time == 0.1) simple_sim.clock.turn_back() assert (simple_sim.clock.this_step == 0) assert (simple_sim.clock.time == 0)
def test_turn_back_should_turn_back_time_when_called(simple_sim): simple_sim.read_clock_from_input() simple_sim.fundamental_cycle() assert (simple_sim.clock.this_step == 1) assert (simple_sim.clock.time == 0.1) simple_sim.clock.turn_back() assert (simple_sim.clock.this_step == 0) assert (simple_sim.clock.time == 0)<|docstring|>Test fundamental_cycle method in Simulation class<|endoftext|>
f157c4c9e9b1926bcbea6e5c1162010f564f9d906f3980c1240860cfaaa4305b
def test_read_modules_from_input_should_set_modules_attr_when_called(simple_sim): 'Test read_modules_from_input method in Simulation calss' simple_sim.read_modules_from_input() assert (simple_sim.physics_modules[0]._owner == simple_sim) assert (simple_sim.physics_modules[0]._input_data == {'name': 'ExampleModule'})
Test read_modules_from_input method in Simulation calss
tests/test_core.py
test_read_modules_from_input_should_set_modules_attr_when_called
kpf59/turbopy
0
python
def test_read_modules_from_input_should_set_modules_attr_when_called(simple_sim): simple_sim.read_modules_from_input() assert (simple_sim.physics_modules[0]._owner == simple_sim) assert (simple_sim.physics_modules[0]._input_data == {'name': 'ExampleModule'})
def test_read_modules_from_input_should_set_modules_attr_when_called(simple_sim): simple_sim.read_modules_from_input() assert (simple_sim.physics_modules[0]._owner == simple_sim) assert (simple_sim.physics_modules[0]._input_data == {'name': 'ExampleModule'})<|docstring|>Test read_modules_from_input method in Simulation calss<|endoftext|>
c582456a7cb6727d4ca58c557abeca02ebd9d416b66c367549499ee06224411c
def test_default_diagnostic_filename_is_generated_if_no_name_specified(simple_sim, tmp_path): 'Test read_diagnostic_from_input method in Simulation class' simple_sim.read_diagnostics_from_input() input_data = simple_sim.diagnostics[0]._input_data assert (input_data['directory'] == str(Path(f'{tmp_path}/default_output'))) assert (input_data['filename'] == str((Path(f'{tmp_path}/default_output') / Path('clock0.out'))))
Test read_diagnostic_from_input method in Simulation class
tests/test_core.py
test_default_diagnostic_filename_is_generated_if_no_name_specified
kpf59/turbopy
0
python
def test_default_diagnostic_filename_is_generated_if_no_name_specified(simple_sim, tmp_path): simple_sim.read_diagnostics_from_input() input_data = simple_sim.diagnostics[0]._input_data assert (input_data['directory'] == str(Path(f'{tmp_path}/default_output'))) assert (input_data['filename'] == str((Path(f'{tmp_path}/default_output') / Path('clock0.out'))))
def test_default_diagnostic_filename_is_generated_if_no_name_specified(simple_sim, tmp_path): simple_sim.read_diagnostics_from_input() input_data = simple_sim.diagnostics[0]._input_data assert (input_data['directory'] == str(Path(f'{tmp_path}/default_output'))) assert (input_data['filename'] == str((Path(f'{tmp_path}/default_output') / Path('clock0.out'))))<|docstring|>Test read_diagnostic_from_input method in Simulation class<|endoftext|>
ec8bf6b1776371d50dcd65efc62dbd843dcb27dda5380eda6350653725957d33
def test_default_diagnostic_filename_increments_for_multiple_diagnostics(simple_sim, tmp_path): 'Test read_diagnostic_from_input method in Simulation class' simple_sim.read_diagnostics_from_input() assert (simple_sim.diagnostics[0]._input_data['directory'] == str(Path(f'{tmp_path}/default_output'))) assert (simple_sim.diagnostics[0]._input_data['filename'] == str((Path(f'{tmp_path}/default_output') / Path('clock0.out')))) input_data = simple_sim.diagnostics[2]._input_data assert (input_data['directory'] == str(Path(f'{tmp_path}/default_output'))) assert (input_data['filename'] == str((Path(f'{tmp_path}/default_output') / Path('ExampleDiagnostic1.out'))))
Test read_diagnostic_from_input method in Simulation class
tests/test_core.py
test_default_diagnostic_filename_increments_for_multiple_diagnostics
kpf59/turbopy
0
python
def test_default_diagnostic_filename_increments_for_multiple_diagnostics(simple_sim, tmp_path): simple_sim.read_diagnostics_from_input() assert (simple_sim.diagnostics[0]._input_data['directory'] == str(Path(f'{tmp_path}/default_output'))) assert (simple_sim.diagnostics[0]._input_data['filename'] == str((Path(f'{tmp_path}/default_output') / Path('clock0.out')))) input_data = simple_sim.diagnostics[2]._input_data assert (input_data['directory'] == str(Path(f'{tmp_path}/default_output'))) assert (input_data['filename'] == str((Path(f'{tmp_path}/default_output') / Path('ExampleDiagnostic1.out'))))
def test_default_diagnostic_filename_increments_for_multiple_diagnostics(simple_sim, tmp_path): simple_sim.read_diagnostics_from_input() assert (simple_sim.diagnostics[0]._input_data['directory'] == str(Path(f'{tmp_path}/default_output'))) assert (simple_sim.diagnostics[0]._input_data['filename'] == str((Path(f'{tmp_path}/default_output') / Path('clock0.out')))) input_data = simple_sim.diagnostics[2]._input_data assert (input_data['directory'] == str(Path(f'{tmp_path}/default_output'))) assert (input_data['filename'] == str((Path(f'{tmp_path}/default_output') / Path('ExampleDiagnostic1.out'))))<|docstring|>Test read_diagnostic_from_input method in Simulation class<|endoftext|>
c410c9e7bd30ce1e65e3fb203df5b65d1409dcd42cd01b7ecd63972072b62cb5
@pytest.fixture(name='simple_grid') def grid_conf(): 'Pytest fixture for grid configuration dictionary' grid = {'N': 8, 'r_min': 0, 'r_max': 0.1} return Grid(grid)
Pytest fixture for grid configuration dictionary
tests/test_core.py
grid_conf
kpf59/turbopy
0
python
@pytest.fixture(name='simple_grid') def grid_conf(): grid = {'N': 8, 'r_min': 0, 'r_max': 0.1} return Grid(grid)
@pytest.fixture(name='simple_grid') def grid_conf(): grid = {'N': 8, 'r_min': 0, 'r_max': 0.1} return Grid(grid)<|docstring|>Pytest fixture for grid configuration dictionary<|endoftext|>
b5532f3e01c51dca5485ed938eeb3589408e45e4973c6df734e676125af8e33e
def test_grid_init(simple_grid): 'Test initialization of the Grid class' assert (simple_grid.r_min == 0.0) assert (simple_grid.r_max == 0.1)
Test initialization of the Grid class
tests/test_core.py
test_grid_init
kpf59/turbopy
0
python
def test_grid_init(simple_grid): assert (simple_grid.r_min == 0.0) assert (simple_grid.r_max == 0.1)
def test_grid_init(simple_grid): assert (simple_grid.r_min == 0.0) assert (simple_grid.r_max == 0.1)<|docstring|>Test initialization of the Grid class<|endoftext|>