ngram
listlengths
0
67.8k
[ "instance): # If we got here, instance.changed() is True, but all changes could", "def delete(self, name, id_): self._q().delete(name, id_) @capture_psycopg_error def selected_ids(self, this_name, wr, sort, limit):", "NAME == 'parent' we need to add 'children' key in data, based on", "instance.field_values(): if not field.stored(): continue # If val.stored() is None it should be", "relationship fields. This operation should reverse _remove_virtual_columns. ''' # Determine IDs pkey_name =", "to None. if val.stored() is None and field not in instance.changed_fields: continue data[field.name]", "ignore those parameters, they are applied later in Enigne.get). HOW IT SHOULD BE", "id_): return self.load_many(name, [id_])[0] @capture_psycopg_error def load_many(self, name, ids): if not ids: return", "complicated. HOW IT IS CURRENLY DONE 1. WR is split into two parts:", "not in instance.changed_fields: continue data[field.name] = val.stored() # 2. Add primary key value", "Table: {}, val: {}'.format(name, val)) else: old_val = val else: return val @capture_psycopg_error", "== val: raise exceptions.ProgrammingError('Pkey value generator returned twice the same value. \\ Table:", "# Whatever happened, it makes sense to run this operation again. raise exceptions.TransactionConflictRetriable()", "if f.name in table_columns] if not changed_columns: return # Create representation data =", "the database. This means * all values other than None * None values,", "data.items(): if key in self._q().table_columns(name): clean_data[key] = val return clean_data def _add_virtual_columns(self, this_name,", "an explicit way (probably via PUT). Generator is called repeatedly, until we find", "a different value by database default) from updated fields set to None. if", "sorted(set.intersection(*sets_of_ids)) @capture_psycopg_error def _select_objects(self, name, wr): ''' WR containst key-val pairs matching columns", "return d # PRIVATE METHODS def _remove_virtual_columns(self, name, data): ''' DATA contains all", "were never set before (and might be set to # a different value", "table with REL field stored on the other side 2. Intersection of IDs", "self._conn.rollback() @capture_psycopg_error def delete(self, name, id_): self._q().delete(name, id_) @capture_psycopg_error def selected_ids(self, this_name, wr,", "non-duplicated value. This might take long, if there were many PUT's, but next", "e: raise exceptions.e500(diag_2_msg(e.diag)) except psycopg2.Error as e: raise exceptions.e400(diag_2_msg(e.diag)) return wrapped class PGStorage(BaseStorage):", "# in the data model, but it seems a good idea to avoid", "is returned. This works well with * nextval(sequence) * any simmilar user-defined function", "as e: # Either transaction was not serializable, or some deadlock was detected.", "NAME matching WR. SORT and LIMIT are ignored (storages are allwed to ignore", "on the other side 2. Intersection of IDs from all selects is returned", "does nothing pass @capture_psycopg_error def commit(self): self._conn.commit() @capture_psycopg_error def rollback(self): self._conn.rollback() @capture_psycopg_error def", "name, id_): return self.load_many(name, [id_])[0] @capture_psycopg_error def load_many(self, name, ids): if not ids:", "of dictionaries from table NAME is returned. ''' return self._q().select(name, wr) @capture_psycopg_error def", "be stored on any table, so instead of WHEREs we might get some", "REL fields have information stored in THIS_NAME table but unfortunately REL field could", "PUBLIC INTERFACE @capture_psycopg_error def save(self, instance): # If we got here, instance.changed() is", "import BaseStorage from blargh.engine import dm from .query import Query from .... import", "and they need to be removed now. This operation should reverse _add_virtual_columns. '''", "INTERFACE @capture_psycopg_error def save(self, instance): # If we got here, instance.changed() is True,", "information stored in THIS_NAME table but unfortunately REL field could be stored on", "[f.name for f in instance.changed_fields if f.name in table_columns] if not changed_columns: return", "**kwargs): def diag_2_msg(diag): # print(diag.message_primary) return diag.message_primary # return \"{}\\n{}\".format(diag.message_primary, diag.message_detail) try: return", "need to be written to the database, but other are redundat (i.e. if", "becomes more complicated. HOW IT IS CURRENLY DONE 1. WR is split into", "if old_val == val: raise exceptions.ProgrammingError('Pkey value generator returned twice the same value.", "if generator returns twice the same value, exception is raised * maybe this", "BE DONE above * sorting in Engine.get and LIMIT is ignored because SORTing", "is None: raise exceptions.ProgrammingError(\"Unknown default pkey value for {}\".format(name)) old_val = None while", "to be written to the database, but other are redundat (i.e. if we", "columns (rel fields stored on the other side). In such case, # nothing", "and that should be after engine # was set up, so the data", "on relationship fields. This operation should reverse _remove_virtual_columns. ''' # Determine IDs pkey_name", "we have parent-child relationship probably child table has 'parent_id', and parent has no", "* tables have the same names as resources ''' from ..base_storage import BaseStorage", "value for {}\".format(name)) old_val = None while True: cursor = self._conn.cursor() cursor.execute(\"SELECT {}\".format(default_expr))", "in Engine.get and LIMIT is ignored because SORTing first is necesary. ''' model", "self._query # PUBLIC INTERFACE @capture_psycopg_error def save(self, instance): # If we got here,", "passing data model directly to storage. # Instead, self._query is built in lazy", "= model.pkey_field().name sets_of_ids.append(set([x[this_pkey_name] for x in this_table_objects])) # Other tables ids for other_name,", "[d[pkey_name] for d in stored_data] missing_ids = [id_ for id_ in ids if", "stored_data] missing_ids = [id_ for id_ in ids if id_ not in got_ids]", "@capture_psycopg_error def next_id(self, name): ''' If NAME primary key column has default value,", "if default_expr is None: raise exceptions.ProgrammingError(\"Unknown default pkey value for {}\".format(name)) old_val =", "is purely to avoid wrapping __init__ self._true_init(conn, schema, query_cls) @capture_psycopg_error def _true_init(self, conn,", "have parent-child relationship probably child table has 'parent_id', and parent has no 'children'", "psycopg2.extensions.STATUS_READY: conn.commit() conn.set_session(isolation_level='SERIALIZABLE', autocommit=False) conn.cursor().execute('''SET CONSTRAINTS ALL DEFERRED''') self._conn = conn self._schema =", "val.stored() is None and field not in instance.changed_fields: continue data[field.name] = val.stored() #", "database data = {} for field, val in instance.field_values(): if not field.stored(): continue", "name = field.name if field.rel and name not in data[0]: other_name = field.stores.name", "ids: return [] # Determine column name pkey_name = dm().object(name).pkey_field().name stored_data = self._select_objects(name,", "= self._conn.cursor() cursor.execute(\"SELECT {}\".format(default_expr)) val = cursor.fetchone()[0] if self._select_objects(name, {pkey_name: val}): if old_val", "to the database, but other are redundat (i.e. if we have relation parent-child,", "key in self._q().table_columns(name): clean_data[key] = val return clean_data def _add_virtual_columns(self, this_name, data): '''", "ignored. SORT is ignored because there is no way of implementing it different", "for field, val in instance.field_values(): if not field.stored(): continue # If val.stored() is", "database columns clean_data = self._remove_virtual_columns(instance.model.name, data) return clean_data @capture_psycopg_error def load(self, name, id_):", "from blargh.engine import dm from .query import Query from .... import exceptions import", "fields based on other tables. I.e. if we have parent-child relationship probably child", "DATA contains only values stored in table NAME. We need to fill relationship", "= cursor.fetchone()[0] if self._select_objects(name, {pkey_name: val}): if old_val == val: raise exceptions.ProgrammingError('Pkey value", "other are redundat (i.e. if we have relation parent-child, probably child table has", "need list of all primary keys. Those are available # in the data", "conn.commit() conn.set_session(isolation_level='SERIALIZABLE', autocommit=False) conn.cursor().execute('''SET CONSTRAINTS ALL DEFERRED''') self._conn = conn self._schema = schema", "None def _q(self): if self._query is None: self._query = self._query_cls(self._conn, self._schema, {o.name: o.pkey_field().name", "INSTANCE.update() -> they are in INSTANCE.changed_fields ''' # 1. Create dictionary with all", "wr): ''' WR containst key-val pairs matching columns in table NAME. List of", "sense to run this operation again. raise exceptions.TransactionConflictRetriable() except psycopg2.InterfaceError as e: raise", "time, it will probably be fast (if nextval(sequence) is used). Also: * if", "there is no way of implementing it different from both: * HOW IT", "interpreted as WHERE 2. SORT becomes ORDER BY 3. LIMIT becomes LIMIT and", "ids if id_ not in got_ids] raise exceptions.e404(object_name=name, object_id=missing_ids[0]) full_data = self._add_virtual_columns(name, stored_data)", "= {} other_selects = [] for key, val in wr.items(): if key in", "object_id=missing_ids[0]) full_data = self._add_virtual_columns(name, stored_data) return full_data @capture_psycopg_error def begin(self): # All necesary", "ASSUMPTIONS: * all tables are in a single schema * tables have the", "Return IDs from table NAME matching WR. SORT and LIMIT are ignored (storages", "None fields that were never set before (and might be set to #", "be easy if we assumed that all REL fields have information stored in", "query. That would be easy if we assumed that all REL fields have", "is ignored because SORTing first is necesary. ''' model = dm().object(this_name) # First,", "it makes sense to run this operation again. raise exceptions.TransactionConflictRetriable() except psycopg2.InterfaceError as", "because SORTing first is necesary. ''' model = dm().object(this_name) # First, split to", "of those need to be written to the database, but other are redundat", "has no 'children' column, so it might not be written) and they need", "(rel fields stored on the other side). In such case, # nothing is", "as resources ''' from ..base_storage import BaseStorage from blargh.engine import dm from .query", "other_selects: other_table_objects = self._select_objects(other_name, other_table_wr) sets_of_ids.append(set([x[other_fk_name] for x in other_table_objects])) # Final ids", "tables. I.e. if we have parent-child relationship probably child table has 'parent_id', and", "x in this_table_objects])) # Other tables ids for other_name, other_fk_name, other_table_wr in other_selects:", "are in a single schema * tables have the same names as resources", "deferrable constraints), # so begin does nothing pass @capture_psycopg_error def commit(self): self._conn.commit() @capture_psycopg_error", "in an explicit way (probably via PUT). Generator is called repeatedly, until we", "this operation again. raise exceptions.TransactionConflictRetriable() except psycopg2.InterfaceError as e: raise exceptions.e500(diag_2_msg(e.diag)) except psycopg2.Error", "exceptions.ProgrammingError('Pkey value generator returned twice the same value. \\ Table: {}, val: {}'.format(name,", "pkey_name = instance.model.pkey_field().name data[pkey_name] = instance.id() # 3. Remove keys not matching database", "exceptions.e400(diag_2_msg(e.diag)) return wrapped class PGStorage(BaseStorage): def __init__(self, conn, schema, query_cls=Query): # This construction", "self.load_many(name, [id_])[0] @capture_psycopg_error def load_many(self, name, ids): if not ids: return [] #", "dm().objects().values()}) return self._query # PUBLIC INTERFACE @capture_psycopg_error def save(self, instance): # If we", "in table_columns] if not changed_columns: return # Create representation data = self._write_repr(instance) #", "database columns changed. table_columns = self._q().table_columns(instance.model.name) changed_columns = [f.name for f in instance.changed_fields", "while True: cursor = self._conn.cursor() cursor.execute(\"SELECT {}\".format(default_expr)) val = cursor.fetchone()[0] if self._select_objects(name, {pkey_name:", "with REL field stored on the other side 2. Intersection of IDs from", "simmilar user-defined function If there is no default, an exception is raised. This", "# on \"virtual\" columns (rel fields stored on the other side). In such", "= [] for key, val in wr.items(): if key in self._q().table_columns(this_name): this_table_wr[key] =", "<reponame>johny-b/blargh ''' CURRENT ASSUMPTIONS: * all tables are in a single schema *", "PGStorage(BaseStorage): def __init__(self, conn, schema, query_cls=Query): # This construction is purely to avoid", "self._schema = schema # To initialize query instance we need list of all", "the other side 2. Intersection of IDs from all selects is returned 3.", "# Determine column name pkey_name = dm().object(name).pkey_field().name stored_data = self._select_objects(name, {pkey_name: ids}) if", "(by INSTANCE.update() -> they are in INSTANCE.changed_fields ''' # 1. Create dictionary with", "that should be written to the database data = {} for field, val", "now. This operation should reverse _add_virtual_columns. ''' clean_data = {} for key, val", "allwed to ignore those parameters, they are applied later in Enigne.get). HOW IT", "HOW IT SHOULD BE DONE 1. WR is interpreted as WHERE 2. SORT", "self._query = None def _q(self): if self._query is None: self._query = self._query_cls(self._conn, self._schema,", "{}, val: {}'.format(name, val)) else: old_val = val else: return val @capture_psycopg_error def", "child table has 'parent_id', and parent has no 'children' column, so if NAME", "val: {}'.format(name, val)) else: old_val = val else: return val @capture_psycopg_error def data(self):", "in data, based on relationship fields. This operation should reverse _remove_virtual_columns. ''' #", "def __init__(self, conn, schema, query_cls=Query): # This construction is purely to avoid wrapping", "representation data = self._write_repr(instance) # Save new value name = instance.model.name self._q().upsert(name, data)", "should reverse _add_virtual_columns. ''' clean_data = {} for key, val in data.items(): if", "That would be easy if we assumed that all REL fields have information", "other_selects = [] for key, val in wr.items(): if key in self._q().table_columns(this_name): this_table_wr[key]", "should reverse _remove_virtual_columns. ''' # Determine IDs pkey_name = dm().object(this_name).pkey_field().name ids = [d[pkey_name]", "contains only values stored in table NAME. We need to fill relationship fields", "stored in THIS_NAME table but unfortunately REL field could be stored on any", "return [] # Determine column name pkey_name = dm().object(name).pkey_field().name stored_data = self._select_objects(name, {pkey_name:", "return val @capture_psycopg_error def data(self): d = {} for name, obj in dm().objects().items():", "''' Returns INSTANCE representation including all columns that will be written to the", "not in data[0]: other_name = field.stores.name other_field_name = field.other.name all_related = self._select_objects(other_name, {other_field_name:", "returned. ''' return self._q().select(name, wr) @capture_psycopg_error def next_id(self, name): ''' If NAME primary", "Also: * if generator returns twice the same value, exception is raised *", "returns twice the same value, exception is raised * maybe this could be", "generator returns twice the same value, exception is raised * maybe this could", "from .... import exceptions import psycopg2 def capture_psycopg_error(f): def wrapped(self, *args, **kwargs): def", "d # PRIVATE METHODS def _remove_virtual_columns(self, name, data): ''' DATA contains all \"possible\"", "changed. table_columns = self._q().table_columns(instance.model.name) changed_columns = [f.name for f in instance.changed_fields if f.name", "pkey_name) if default_expr is None: raise exceptions.ProgrammingError(\"Unknown default pkey value for {}\".format(name)) old_val", "all selects is returned 3. SORT and LIMIT are ignored. SORT is ignored", "None it should be written only if field changed. # This way we", "any simmilar user-defined function If there is no default, an exception is raised.", "import exceptions import psycopg2 def capture_psycopg_error(f): def wrapped(self, *args, **kwargs): def diag_2_msg(diag): #", "def selected_ids(self, this_name, wr, sort, limit): ''' Return IDs from table NAME matching", "Enigne.get). HOW IT SHOULD BE DONE 1. WR is interpreted as WHERE 2.", "[] for key, val in wr.items(): if key in self._q().table_columns(this_name): this_table_wr[key] = val", "necesary. ''' model = dm().object(this_name) # First, split to parts this_table_wr = {}", "= instance.model.name self._q().upsert(name, data) def _write_repr(self, instance): ''' Returns INSTANCE representation including all", "self._q().upsert(name, data) def _write_repr(self, instance): ''' Returns INSTANCE representation including all columns that", "other_field_name, {other_pkey_name: val})) # List of sets of ids, to be intersected later", "conn self._schema = schema # To initialize query instance we need list of", "the same names as resources ''' from ..base_storage import BaseStorage from blargh.engine import", "= conn self._schema = schema # To initialize query instance we need list", "if NAME == 'parent' we need to add 'children' key in data, based", "is no way of implementing it different from both: * HOW IT SHOULD", "in other_table_objects])) # Final ids return sorted(set.intersection(*sets_of_ids)) @capture_psycopg_error def _select_objects(self, name, wr): '''", "''' If NAME primary key column has default value, it is returned. This", "''' DATA contains only values stored in table NAME. We need to fill", "day we'll look for the biggest current ID and add 1. NOTE: Any", "# Save new value name = instance.model.name self._q().upsert(name, data) def _write_repr(self, instance): '''", "clean_data = self._remove_virtual_columns(instance.model.name, data) return clean_data @capture_psycopg_error def load(self, name, id_): return self.load_many(name,", "tables have the same names as resources ''' from ..base_storage import BaseStorage from", "one select for THIS_NAME table with all possible WHEREs * one select for", "only values stored in table NAME. We need to fill relationship fields based", "model.pkey_field().name sets_of_ids.append(set([x[this_pkey_name] for x in this_table_objects])) # Other tables ids for other_name, other_fk_name,", "== el[pkey_name]] related_ids = [x[related_pkey_name] for x in this_related] if field.multi: el[name] =", "REL field stored on the other side 2. Intersection of IDs from all", "pkey_name = dm().object(name).pkey_field().name default_expr = self._q().default_pkey_expr(name, pkey_name) if default_expr is None: raise exceptions.ProgrammingError(\"Unknown", "val}): if old_val == val: raise exceptions.ProgrammingError('Pkey value generator returned twice the same", "intersected later sets_of_ids = [] # This table ids this_table_objects = self._select_objects(this_name, this_table_wr)", "lazy way, when needed - and that should be after engine # was", "= field.other.name other_pkey_name = dm().object(other_name).pkey_field().name other_selects.append((other_name, other_field_name, {other_pkey_name: val})) # List of sets", "values other than None * None values, if they were explicitly set (by", "clean_data[key] = val return clean_data def _add_virtual_columns(self, this_name, data): ''' DATA contains only", "were many PUT's, but next time, it will probably be fast (if nextval(sequence)", "in got_ids] raise exceptions.e404(object_name=name, object_id=missing_ids[0]) full_data = self._add_virtual_columns(name, stored_data) return full_data @capture_psycopg_error def", "database default) from updated fields set to None. if val.stored() is None and", "no way of implementing it different from both: * HOW IT SHOULD BE", "INSTANCE.changed_fields ''' # 1. Create dictionary with all columns that should be written", "This way we distinguish None fields that were never set before (and might", "table has 'parent_id', and parent has no 'children' column, so if NAME ==", "INSTANCE representation including all columns that will be written to the database. This", "- we want to handle also other than nextval() defaults, i.e. dependant on", "= self._select_objects(other_name, {other_field_name: ids}) related_pkey_name = dm().object(other_name).pkey_field().name for el in data: this_related =", "pkey_name = dm().object(this_name).pkey_field().name ids = [d[pkey_name] for d in data] for field in", "table NAME matching WR. SORT and LIMIT are ignored (storages are allwed to", "dm().objects().items(): d[name] = self._q().dump_table(name, obj.pkey_field().name) return d # PRIVATE METHODS def _remove_virtual_columns(self, name,", "ids = [d[pkey_name] for d in data] for field in dm().object(this_name).fields(): name =", "have information stored in THIS_NAME table but unfortunately REL field could be stored", "fields have information stored in THIS_NAME table but unfortunately REL field could be", "has something like 'parent_id', but parent table has no 'children' column, so it", "Determine column name pkey_name = dm().object(name).pkey_field().name stored_data = self._select_objects(name, {pkey_name: ids}) if len(stored_data)", "We need to fill relationship fields based on other tables. I.e. if we", "is raised. This might change and one day we'll look for the biggest", "different from both: * HOW IT SHOULD BE DONE above * sorting in", "model, but it seems a good idea to avoid passing data model directly", "side 2. Intersection of IDs from all selects is returned 3. SORT and", "all columns that will be written to the database. This means * all", "is None: self._query = self._query_cls(self._conn, self._schema, {o.name: o.pkey_field().name for o in dm().objects().values()}) return", "in data) pkey_name = instance.model.pkey_field().name data[pkey_name] = instance.id() # 3. Remove keys not", "fields that were never set before (and might be set to # a", "sort, limit): ''' Return IDs from table NAME matching WR. SORT and LIMIT", "for {}\".format(name)) old_val = None while True: cursor = self._conn.cursor() cursor.execute(\"SELECT {}\".format(default_expr)) val", "if self._query is None: self._query = self._query_cls(self._conn, self._schema, {o.name: o.pkey_field().name for o in", "NAME is returned. ''' return self._q().select(name, wr) @capture_psycopg_error def next_id(self, name): ''' If", "commit(self): self._conn.commit() @capture_psycopg_error def rollback(self): self._conn.rollback() @capture_psycopg_error def delete(self, name, id_): self._q().delete(name, id_)", "None: raise exceptions.ProgrammingError(\"Unknown default pkey value for {}\".format(name)) old_val = None while True:", "like 'parent_id', but parent table has no 'children' column, so it might not", "distinguish None fields that were never set before (and might be set to", "= [x for x in all_related if x[other_field_name] == el[pkey_name]] related_ids = [x[related_pkey_name]", "is raised * maybe this could be done better? Note - we want", "in wr.items(): if key in self._q().table_columns(this_name): this_table_wr[key] = val else: field = model.field(key)", "and one day we'll look for the biggest current ID and add 1.", "name, ids): if not ids: return [] # Determine column name pkey_name =", "when needed - and that should be after engine # was set up,", "if we have parent-child relationship probably child table has 'parent_id', and parent has", "relationship probably child table has 'parent_id', and parent has no 'children' column, so", "ids return sorted(set.intersection(*sets_of_ids)) @capture_psycopg_error def _select_objects(self, name, wr): ''' WR containst key-val pairs", "if val.stored() is None and field not in instance.changed_fields: continue data[field.name] = val.stored()", "primary key value (if this is a fresh instance, it is already in", "field in dm().object(this_name).fields(): name = field.name if field.rel and name not in data[0]:", "related_pkey_name = dm().object(other_name).pkey_field().name for el in data: this_related = [x for x in", "have the same names as resources ''' from ..base_storage import BaseStorage from blargh.engine", "This works well with * nextval(sequence) * any simmilar user-defined function If there", "DEFERRED''') self._conn = conn self._schema = schema # To initialize query instance we", "for x in this_related] if field.multi: el[name] = related_ids else: el[name] = related_ids[0]", "self._query_cls(self._conn, self._schema, {o.name: o.pkey_field().name for o in dm().objects().values()}) return self._query # PUBLIC INTERFACE", "is returned. ''' return self._q().select(name, wr) @capture_psycopg_error def next_id(self, name): ''' If NAME", "based on other tables. I.e. if we have parent-child relationship probably child table", "key value (if this is a fresh instance, it is already in data)", "Generator is called repeatedly, until we find a non-duplicated value. This might take", "next_id(self, name): ''' If NAME primary key column has default value, it is", "in lazy way, when needed - and that should be after engine #", "is saved, because no database columns changed. table_columns = self._q().table_columns(instance.model.name) changed_columns = [f.name", "the other side). In such case, # nothing is saved, because no database", "set in __init__(autocommit, deferrable constraints), # so begin does nothing pass @capture_psycopg_error def", "written only if field changed. # This way we distinguish None fields that", "and name not in data[0]: other_name = field.stores.name other_field_name = field.other.name all_related =", "data model, but it seems a good idea to avoid passing data model", "engine # was set up, so the data model is available in dm()", "select for THIS_NAME table with all possible WHEREs * one select for each", "__init__(autocommit, deferrable constraints), # so begin does nothing pass @capture_psycopg_error def commit(self): self._conn.commit()", "This might change and one day we'll look for the biggest current ID", "key, val in wr.items(): if key in self._q().table_columns(this_name): this_table_wr[key] = val else: field", "id_) @capture_psycopg_error def selected_ids(self, this_name, wr, sort, limit): ''' Return IDs from table", "side). In such case, # nothing is saved, because no database columns changed.", "for field in dm().object(this_name).fields(): name = field.name if field.rel and name not in", "same names as resources ''' from ..base_storage import BaseStorage from blargh.engine import dm", "# return \"{}\\n{}\".format(diag.message_primary, diag.message_detail) try: return f(self, *args, **kwargs) except psycopg2.extensions.TransactionRollbackError as e:", "instance.changed_fields: continue data[field.name] = val.stored() # 2. Add primary key value (if this", "1. WR is interpreted as WHERE 2. SORT becomes ORDER BY 3. LIMIT", "table NAME. List of dictionaries from table NAME is returned. ''' return self._q().select(name,", "@capture_psycopg_error def rollback(self): self._conn.rollback() @capture_psycopg_error def delete(self, name, id_): self._q().delete(name, id_) @capture_psycopg_error def", "* maybe this could be done better? Note - we want to handle", "a non-duplicated value. This might take long, if there were many PUT's, but", "def _remove_virtual_columns(self, name, data): ''' DATA contains all \"possible\" column values. Some of", "key-val pairs matching columns in table NAME. List of dictionaries from table NAME", "schema * tables have the same names as resources ''' from ..base_storage import", "long, if there were many PUT's, but next time, it will probably be", "value returned by any generator might be already taken, if client set it", "parameters, they are applied later in Enigne.get). HOW IT SHOULD BE DONE 1.", "no 'children' column, so if NAME == 'parent' we need to add 'children'", "ALL DEFERRED''') self._conn = conn self._schema = schema # To initialize query instance", "Note - we want to handle also other than nextval() defaults, i.e. dependant", "select for each joined table with REL field stored on the other side", "in INSTANCE.changed_fields ''' # 1. Create dictionary with all columns that should be", "= val return clean_data def _add_virtual_columns(self, this_name, data): ''' DATA contains only values", "joined table with REL field stored on the other side 2. Intersection of", "from ..base_storage import BaseStorage from blargh.engine import dm from .query import Query from", "# Final ids return sorted(set.intersection(*sets_of_ids)) @capture_psycopg_error def _select_objects(self, name, wr): ''' WR containst", "= self._remove_virtual_columns(instance.model.name, data) return clean_data @capture_psycopg_error def load(self, name, id_): return self.load_many(name, [id_])[0]", "''' return self._q().select(name, wr) @capture_psycopg_error def next_id(self, name): ''' If NAME primary key", "raised. This might change and one day we'll look for the biggest current", "''' WR containst key-val pairs matching columns in table NAME. List of dictionaries", "CONSTRAINTS ALL DEFERRED''') self._conn = conn self._schema = schema # To initialize query", "dm() function. self._query_cls = query_cls self._query = None def _q(self): if self._query is", "= dm().object(name).pkey_field().name default_expr = self._q().default_pkey_expr(name, pkey_name) if default_expr is None: raise exceptions.ProgrammingError(\"Unknown default", "ignored because SORTing first is necesary. ''' model = dm().object(this_name) # First, split", "applied later in Enigne.get). HOW IT SHOULD BE DONE 1. WR is interpreted", "in dm() function. self._query_cls = query_cls self._query = None def _q(self): if self._query", "reverse _add_virtual_columns. ''' clean_data = {} for key, val in data.items(): if key", "This operation should reverse _add_virtual_columns. ''' clean_data = {} for key, val in", "self._q().dump_table(name, obj.pkey_field().name) return d # PRIVATE METHODS def _remove_virtual_columns(self, name, data): ''' DATA", "table NAME. We need to fill relationship fields based on other tables. I.e.", "if not ids: return [] # Determine column name pkey_name = dm().object(name).pkey_field().name stored_data", "be removed now. This operation should reverse _add_virtual_columns. ''' clean_data = {} for", "split to parts this_table_wr = {} other_selects = [] for key, val in", "some deadlock was detected. # Whatever happened, it makes sense to run this", ".query import Query from .... import exceptions import psycopg2 def capture_psycopg_error(f): def wrapped(self,", "used). Also: * if generator returns twice the same value, exception is raised", "on \"virtual\" columns (rel fields stored on the other side). In such case,", "from both: * HOW IT SHOULD BE DONE above * sorting in Engine.get", "done better? Note - we want to handle also other than nextval() defaults,", "stored on the other side). In such case, # nothing is saved, because", "diag_2_msg(diag): # print(diag.message_primary) return diag.message_primary # return \"{}\\n{}\".format(diag.message_primary, diag.message_detail) try: return f(self, *args,", "keys. Those are available # in the data model, but it seems a", "not serializable, or some deadlock was detected. # Whatever happened, it makes sense", "better? Note - we want to handle also other than nextval() defaults, i.e.", "to handle also other than nextval() defaults, i.e. dependant on now(). ''' pkey_name", "is a fresh instance, it is already in data) pkey_name = instance.model.pkey_field().name data[pkey_name]", "LIMIT are ignored. SORT is ignored because there is no way of implementing", "as WHERE 2. SORT becomes ORDER BY 3. LIMIT becomes LIMIT and everything", "exceptions.e500(diag_2_msg(e.diag)) except psycopg2.Error as e: raise exceptions.e400(diag_2_msg(e.diag)) return wrapped class PGStorage(BaseStorage): def __init__(self,", "schema, query_cls): # Modify connection if conn.status is not psycopg2.extensions.STATUS_READY: conn.commit() conn.set_session(isolation_level='SERIALIZABLE', autocommit=False)", "val)) else: old_val = val else: return val @capture_psycopg_error def data(self): d =", "print(diag.message_primary) return diag.message_primary # return \"{}\\n{}\".format(diag.message_primary, diag.message_detail) try: return f(self, *args, **kwargs) except", "id_): self._q().delete(name, id_) @capture_psycopg_error def selected_ids(self, this_name, wr, sort, limit): ''' Return IDs", "this_name, wr, sort, limit): ''' Return IDs from table NAME matching WR. SORT", "and everything is processed in a single query. That would be easy if", "# Determine IDs pkey_name = dm().object(this_name).pkey_field().name ids = [d[pkey_name] for d in data]", "for x in other_table_objects])) # Final ids return sorted(set.intersection(*sets_of_ids)) @capture_psycopg_error def _select_objects(self, name,", "model is available in dm() function. self._query_cls = query_cls self._query = None def", "[id_])[0] @capture_psycopg_error def load_many(self, name, ids): if not ids: return [] # Determine", "NAME. List of dictionaries from table NAME is returned. ''' return self._q().select(name, wr)", "Query from .... import exceptions import psycopg2 def capture_psycopg_error(f): def wrapped(self, *args, **kwargs):", "is not psycopg2.extensions.STATUS_READY: conn.commit() conn.set_session(isolation_level='SERIALIZABLE', autocommit=False) conn.cursor().execute('''SET CONSTRAINTS ALL DEFERRED''') self._conn = conn", "purely to avoid wrapping __init__ self._true_init(conn, schema, query_cls) @capture_psycopg_error def _true_init(self, conn, schema,", "class PGStorage(BaseStorage): def __init__(self, conn, schema, query_cls=Query): # This construction is purely to", "has no 'children' column, so if NAME == 'parent' we need to add", "to run this operation again. raise exceptions.TransactionConflictRetriable() except psycopg2.InterfaceError as e: raise exceptions.e500(diag_2_msg(e.diag))", "containst key-val pairs matching columns in table NAME. List of dictionaries from table", "# nothing is saved, because no database columns changed. table_columns = self._q().table_columns(instance.model.name) changed_columns", "data = self._write_repr(instance) # Save new value name = instance.model.name self._q().upsert(name, data) def", "next time, it will probably be fast (if nextval(sequence) is used). Also: *", "self._conn.commit() @capture_psycopg_error def rollback(self): self._conn.rollback() @capture_psycopg_error def delete(self, name, id_): self._q().delete(name, id_) @capture_psycopg_error", "missing_ids = [id_ for id_ in ids if id_ not in got_ids] raise", "''' DATA contains all \"possible\" column values. Some of those need to be", "so it might not be written) and they need to be removed now.", "so if NAME == 'parent' we need to add 'children' key in data,", "to the database data = {} for field, val in instance.field_values(): if not", "key, val in data.items(): if key in self._q().table_columns(name): clean_data[key] = val return clean_data", "Create representation data = self._write_repr(instance) # Save new value name = instance.model.name self._q().upsert(name,", "table_columns] if not changed_columns: return # Create representation data = self._write_repr(instance) # Save", "2. SORT becomes ORDER BY 3. LIMIT becomes LIMIT and everything is processed", "parent has no 'children' column, so if NAME == 'parent' we need to", "# Other tables ids for other_name, other_fk_name, other_table_wr in other_selects: other_table_objects = self._select_objects(other_name,", "if client set it in an explicit way (probably via PUT). Generator is", "len(stored_data) != len(ids): got_ids = [d[pkey_name] for d in stored_data] missing_ids = [id_", "of IDs from all selects is returned 3. SORT and LIMIT are ignored.", "constraints), # so begin does nothing pass @capture_psycopg_error def commit(self): self._conn.commit() @capture_psycopg_error def", "DATA contains all \"possible\" column values. Some of those need to be written", "# print(diag.message_primary) return diag.message_primary # return \"{}\\n{}\".format(diag.message_primary, diag.message_detail) try: return f(self, *args, **kwargs)", "so instead of WHEREs we might get some JOINS and this becomes more", "could be stored on any table, so instead of WHEREs we might get", "= val else: field = model.field(key) other_name = field.stores.name other_field_name = field.other.name other_pkey_name", "on any table, so instead of WHEREs we might get some JOINS and", "to ignore those parameters, they are applied later in Enigne.get). HOW IT SHOULD", "conn.status is not psycopg2.extensions.STATUS_READY: conn.commit() conn.set_session(isolation_level='SERIALIZABLE', autocommit=False) conn.cursor().execute('''SET CONSTRAINTS ALL DEFERRED''') self._conn =", "self._query = self._query_cls(self._conn, self._schema, {o.name: o.pkey_field().name for o in dm().objects().values()}) return self._query #", "for o in dm().objects().values()}) return self._query # PUBLIC INTERFACE @capture_psycopg_error def save(self, instance):", "def load_many(self, name, ids): if not ids: return [] # Determine column name", "other_field_name = field.other.name other_pkey_name = dm().object(other_name).pkey_field().name other_selects.append((other_name, other_field_name, {other_pkey_name: val})) # List of", "Instead, self._query is built in lazy way, when needed - and that should", "serializable, or some deadlock was detected. # Whatever happened, it makes sense to", "key in self._q().table_columns(this_name): this_table_wr[key] = val else: field = model.field(key) other_name = field.stores.name", "for x in all_related if x[other_field_name] == el[pkey_name]] related_ids = [x[related_pkey_name] for x", "2. Add primary key value (if this is a fresh instance, it is", "save(self, instance): # If we got here, instance.changed() is True, but all changes", "any table, so instead of WHEREs we might get some JOINS and this", "other_name, other_fk_name, other_table_wr in other_selects: other_table_objects = self._select_objects(other_name, other_table_wr) sets_of_ids.append(set([x[other_fk_name] for x in", "{} other_selects = [] for key, val in wr.items(): if key in self._q().table_columns(this_name):", "construction is purely to avoid wrapping __init__ self._true_init(conn, schema, query_cls) @capture_psycopg_error def _true_init(self,", "built in lazy way, when needed - and that should be after engine", "''' model = dm().object(this_name) # First, split to parts this_table_wr = {} other_selects", "was detected. # Whatever happened, it makes sense to run this operation again.", "IDs pkey_name = dm().object(this_name).pkey_field().name ids = [d[pkey_name] for d in data] for field", "this_table_wr[key] = val else: field = model.field(key) other_name = field.stores.name other_field_name = field.other.name", "value generator returned twice the same value. \\ Table: {}, val: {}'.format(name, val))", "easy if we assumed that all REL fields have information stored in THIS_NAME", "other_pkey_name = dm().object(other_name).pkey_field().name other_selects.append((other_name, other_field_name, {other_pkey_name: val})) # List of sets of ids,", "__init__(self, conn, schema, query_cls=Query): # This construction is purely to avoid wrapping __init__", "returned 3. SORT and LIMIT are ignored. SORT is ignored because there is", "conn.set_session(isolation_level='SERIALIZABLE', autocommit=False) conn.cursor().execute('''SET CONSTRAINTS ALL DEFERRED''') self._conn = conn self._schema = schema #", "= [f.name for f in instance.changed_fields if f.name in table_columns] if not changed_columns:", "# All necesary things were set in __init__(autocommit, deferrable constraints), # so begin", "wr) @capture_psycopg_error def next_id(self, name): ''' If NAME primary key column has default", "necesary things were set in __init__(autocommit, deferrable constraints), # so begin does nothing", "fields set to None. if val.stored() is None and field not in instance.changed_fields:", "raised * maybe this could be done better? Note - we want to", "True: cursor = self._conn.cursor() cursor.execute(\"SELECT {}\".format(default_expr)) val = cursor.fetchone()[0] if self._select_objects(name, {pkey_name: val}):", "value name = instance.model.name self._q().upsert(name, data) def _write_repr(self, instance): ''' Returns INSTANCE representation", "full_data = self._add_virtual_columns(name, stored_data) return full_data @capture_psycopg_error def begin(self): # All necesary things", "_add_virtual_columns. ''' clean_data = {} for key, val in data.items(): if key in", "makes sense to run this operation again. raise exceptions.TransactionConflictRetriable() except psycopg2.InterfaceError as e:", "conn, schema, query_cls=Query): # This construction is purely to avoid wrapping __init__ self._true_init(conn,", "for each joined table with REL field stored on the other side 2.", "later sets_of_ids = [] # This table ids this_table_objects = self._select_objects(this_name, this_table_wr) this_pkey_name", "child table has something like 'parent_id', but parent table has no 'children' column,", "d = {} for name, obj in dm().objects().items(): d[name] = self._q().dump_table(name, obj.pkey_field().name) return", "d in stored_data] missing_ids = [id_ for id_ in ids if id_ not", "cursor.execute(\"SELECT {}\".format(default_expr)) val = cursor.fetchone()[0] if self._select_objects(name, {pkey_name: val}): if old_val == val:", "current ID and add 1. NOTE: Any value returned by any generator might", "relationship fields based on other tables. I.e. if we have parent-child relationship probably", "continue # If val.stored() is None it should be written only if field", "obj.pkey_field().name) return d # PRIVATE METHODS def _remove_virtual_columns(self, name, data): ''' DATA contains", "* HOW IT SHOULD BE DONE above * sorting in Engine.get and LIMIT", "@capture_psycopg_error def save(self, instance): # If we got here, instance.changed() is True, but", "or some deadlock was detected. # Whatever happened, it makes sense to run", "not matching database columns clean_data = self._remove_virtual_columns(instance.model.name, data) return clean_data @capture_psycopg_error def load(self,", "for key, val in wr.items(): if key in self._q().table_columns(this_name): this_table_wr[key] = val else:", "# Modify connection if conn.status is not psycopg2.extensions.STATUS_READY: conn.commit() conn.set_session(isolation_level='SERIALIZABLE', autocommit=False) conn.cursor().execute('''SET CONSTRAINTS", "in this_related] if field.multi: el[name] = related_ids else: el[name] = related_ids[0] if related_ids", "all \"possible\" column values. Some of those need to be written to the", "primary keys. Those are available # in the data model, but it seems", "be written) and they need to be removed now. This operation should reverse", "they need to be removed now. This operation should reverse _add_virtual_columns. ''' clean_data", "other tables. I.e. if we have parent-child relationship probably child table has 'parent_id',", "values. Some of those need to be written to the database, but other", "schema, query_cls) @capture_psycopg_error def _true_init(self, conn, schema, query_cls): # Modify connection if conn.status", "* all values other than None * None values, if they were explicitly", "because there is no way of implementing it different from both: * HOW", "e: # Either transaction was not serializable, or some deadlock was detected. #", "o.pkey_field().name for o in dm().objects().values()}) return self._query # PUBLIC INTERFACE @capture_psycopg_error def save(self,", "self._q().table_columns(instance.model.name) changed_columns = [f.name for f in instance.changed_fields if f.name in table_columns] if", "I.e. if we have parent-child relationship probably child table has 'parent_id', and parent", "in Enigne.get). HOW IT SHOULD BE DONE 1. WR is interpreted as WHERE", "# 2. Add primary key value (if this is a fresh instance, it", "default_expr is None: raise exceptions.ProgrammingError(\"Unknown default pkey value for {}\".format(name)) old_val = None", "in data] for field in dm().object(this_name).fields(): name = field.name if field.rel and name", "if field.rel and name not in data[0]: other_name = field.stores.name other_field_name = field.other.name", "want to handle also other than nextval() defaults, i.e. dependant on now(). '''", "including all columns that will be written to the database. This means *", "SORTing first is necesary. ''' model = dm().object(this_name) # First, split to parts", "exceptions.TransactionConflictRetriable() except psycopg2.InterfaceError as e: raise exceptions.e500(diag_2_msg(e.diag)) except psycopg2.Error as e: raise exceptions.e400(diag_2_msg(e.diag))", "to add 'children' key in data, based on relationship fields. This operation should", "not ids: return [] # Determine column name pkey_name = dm().object(name).pkey_field().name stored_data =", "= query_cls self._query = None def _q(self): if self._query is None: self._query =", "# This way we distinguish None fields that were never set before (and", "in stored_data] missing_ids = [id_ for id_ in ids if id_ not in", "is returned 3. SORT and LIMIT are ignored. SORT is ignored because there", "f.name in table_columns] if not changed_columns: return # Create representation data = self._write_repr(instance)", "* all tables are in a single schema * tables have the same", "def commit(self): self._conn.commit() @capture_psycopg_error def rollback(self): self._conn.rollback() @capture_psycopg_error def delete(self, name, id_): self._q().delete(name,", "this becomes more complicated. HOW IT IS CURRENLY DONE 1. WR is split", "if key in self._q().table_columns(name): clean_data[key] = val return clean_data def _add_virtual_columns(self, this_name, data):", "LIMIT is ignored because SORTing first is necesary. ''' model = dm().object(this_name) #", "might take long, if there were many PUT's, but next time, it will", "field.multi: el[name] = related_ids else: el[name] = related_ids[0] if related_ids else None return", "def capture_psycopg_error(f): def wrapped(self, *args, **kwargs): def diag_2_msg(diag): # print(diag.message_primary) return diag.message_primary #", "psycopg2 def capture_psycopg_error(f): def wrapped(self, *args, **kwargs): def diag_2_msg(diag): # print(diag.message_primary) return diag.message_primary", "= [d[pkey_name] for d in data] for field in dm().object(this_name).fields(): name = field.name", "they are in INSTANCE.changed_fields ''' # 1. Create dictionary with all columns that", "for name, obj in dm().objects().items(): d[name] = self._q().dump_table(name, obj.pkey_field().name) return d # PRIVATE", "in instance.field_values(): if not field.stored(): continue # If val.stored() is None it should", "things were set in __init__(autocommit, deferrable constraints), # so begin does nothing pass", "fresh instance, it is already in data) pkey_name = instance.model.pkey_field().name data[pkey_name] = instance.id()", "values stored in table NAME. We need to fill relationship fields based on", "= self._write_repr(instance) # Save new value name = instance.model.name self._q().upsert(name, data) def _write_repr(self,", "is True, but all changes could be made # on \"virtual\" columns (rel", "than nextval() defaults, i.e. dependant on now(). ''' pkey_name = dm().object(name).pkey_field().name default_expr =", "but unfortunately REL field could be stored on any table, so instead of", "in data.items(): if key in self._q().table_columns(name): clean_data[key] = val return clean_data def _add_virtual_columns(self,", "IT IS CURRENLY DONE 1. WR is split into two parts: * one", "x in other_table_objects])) # Final ids return sorted(set.intersection(*sets_of_ids)) @capture_psycopg_error def _select_objects(self, name, wr):", "dm from .query import Query from .... import exceptions import psycopg2 def capture_psycopg_error(f):", "updated fields set to None. if val.stored() is None and field not in", "and LIMIT are ignored (storages are allwed to ignore those parameters, they are", "it seems a good idea to avoid passing data model directly to storage.", "pairs matching columns in table NAME. List of dictionaries from table NAME is", "initialize query instance we need list of all primary keys. Those are available", "def data(self): d = {} for name, obj in dm().objects().items(): d[name] = self._q().dump_table(name,", "\\ Table: {}, val: {}'.format(name, val)) else: old_val = val else: return val", "SORT becomes ORDER BY 3. LIMIT becomes LIMIT and everything is processed in", "''' clean_data = {} for key, val in data.items(): if key in self._q().table_columns(name):", "are ignored (storages are allwed to ignore those parameters, they are applied later", "query instance we need list of all primary keys. Those are available #", "DONE 1. WR is interpreted as WHERE 2. SORT becomes ORDER BY 3.", "name = instance.model.name self._q().upsert(name, data) def _write_repr(self, instance): ''' Returns INSTANCE representation including", "-> they are in INSTANCE.changed_fields ''' # 1. Create dictionary with all columns", "each joined table with REL field stored on the other side 2. Intersection", "conn.cursor().execute('''SET CONSTRAINTS ALL DEFERRED''') self._conn = conn self._schema = schema # To initialize", "of implementing it different from both: * HOW IT SHOULD BE DONE above", "[d[pkey_name] for d in data] for field in dm().object(this_name).fields(): name = field.name if", "# List of sets of ids, to be intersected later sets_of_ids = []", "column, so it might not be written) and they need to be removed", "query_cls): # Modify connection if conn.status is not psycopg2.extensions.STATUS_READY: conn.commit() conn.set_session(isolation_level='SERIALIZABLE', autocommit=False) conn.cursor().execute('''SET", "up, so the data model is available in dm() function. self._query_cls = query_cls", "for THIS_NAME table with all possible WHEREs * one select for each joined", "limit): ''' Return IDs from table NAME matching WR. SORT and LIMIT are", "2. Intersection of IDs from all selects is returned 3. SORT and LIMIT", "from all selects is returned 3. SORT and LIMIT are ignored. SORT is", "PRIVATE METHODS def _remove_virtual_columns(self, name, data): ''' DATA contains all \"possible\" column values.", "if conn.status is not psycopg2.extensions.STATUS_READY: conn.commit() conn.set_session(isolation_level='SERIALIZABLE', autocommit=False) conn.cursor().execute('''SET CONSTRAINTS ALL DEFERRED''') self._conn", "and this becomes more complicated. HOW IT IS CURRENLY DONE 1. WR is", "in self._q().table_columns(this_name): this_table_wr[key] = val else: field = model.field(key) other_name = field.stores.name other_field_name", "_remove_virtual_columns. ''' # Determine IDs pkey_name = dm().object(this_name).pkey_field().name ids = [d[pkey_name] for d", "dependant on now(). ''' pkey_name = dm().object(name).pkey_field().name default_expr = self._q().default_pkey_expr(name, pkey_name) if default_expr", "None values, if they were explicitly set (by INSTANCE.update() -> they are in", "parent table has no 'children' column, so it might not be written) and", "idea to avoid passing data model directly to storage. # Instead, self._query is", "psycopg2.InterfaceError as e: raise exceptions.e500(diag_2_msg(e.diag)) except psycopg2.Error as e: raise exceptions.e400(diag_2_msg(e.diag)) return wrapped", "in a single schema * tables have the same names as resources '''", "DONE above * sorting in Engine.get and LIMIT is ignored because SORTing first", "autocommit=False) conn.cursor().execute('''SET CONSTRAINTS ALL DEFERRED''') self._conn = conn self._schema = schema # To", "data, based on relationship fields. This operation should reverse _remove_virtual_columns. ''' # Determine", "el in data: this_related = [x for x in all_related if x[other_field_name] ==", "if key in self._q().table_columns(this_name): this_table_wr[key] = val else: field = model.field(key) other_name =", "@capture_psycopg_error def data(self): d = {} for name, obj in dm().objects().items(): d[name] =", "wr.items(): if key in self._q().table_columns(this_name): this_table_wr[key] = val else: field = model.field(key) other_name", "in self._q().table_columns(name): clean_data[key] = val return clean_data def _add_virtual_columns(self, this_name, data): ''' DATA", "those need to be written to the database, but other are redundat (i.e.", "to the database. This means * all values other than None * None", "this_table_objects = self._select_objects(this_name, this_table_wr) this_pkey_name = model.pkey_field().name sets_of_ids.append(set([x[this_pkey_name] for x in this_table_objects])) #", "* one select for each joined table with REL field stored on the", "explicitly set (by INSTANCE.update() -> they are in INSTANCE.changed_fields ''' # 1. Create", "= self._select_objects(name, {pkey_name: ids}) if len(stored_data) != len(ids): got_ids = [d[pkey_name] for d", "reverse _remove_virtual_columns. ''' # Determine IDs pkey_name = dm().object(this_name).pkey_field().name ids = [d[pkey_name] for", "WR is split into two parts: * one select for THIS_NAME table with", "will probably be fast (if nextval(sequence) is used). Also: * if generator returns", "wrapped(self, *args, **kwargs): def diag_2_msg(diag): # print(diag.message_primary) return diag.message_primary # return \"{}\\n{}\".format(diag.message_primary, diag.message_detail)", "THIS_NAME table with all possible WHEREs * one select for each joined table", "is no default, an exception is raised. This might change and one day", "other_table_objects = self._select_objects(other_name, other_table_wr) sets_of_ids.append(set([x[other_fk_name] for x in other_table_objects])) # Final ids return", "operation again. raise exceptions.TransactionConflictRetriable() except psycopg2.InterfaceError as e: raise exceptions.e500(diag_2_msg(e.diag)) except psycopg2.Error as", "are in INSTANCE.changed_fields ''' # 1. Create dictionary with all columns that should", "from table NAME is returned. ''' return self._q().select(name, wr) @capture_psycopg_error def next_id(self, name):", "be written to the database, but other are redundat (i.e. if we have", "key column has default value, it is returned. This works well with *", "wr, sort, limit): ''' Return IDs from table NAME matching WR. SORT and", "Whatever happened, it makes sense to run this operation again. raise exceptions.TransactionConflictRetriable() except", "it might not be written) and they need to be removed now. This", "# Either transaction was not serializable, or some deadlock was detected. # Whatever", "field.other.name all_related = self._select_objects(other_name, {other_field_name: ids}) related_pkey_name = dm().object(other_name).pkey_field().name for el in data:", "changed_columns: return # Create representation data = self._write_repr(instance) # Save new value name", "got_ids] raise exceptions.e404(object_name=name, object_id=missing_ids[0]) full_data = self._add_virtual_columns(name, stored_data) return full_data @capture_psycopg_error def begin(self):", "'parent' we need to add 'children' key in data, based on relationship fields.", "we'll look for the biggest current ID and add 1. NOTE: Any value", "different value by database default) from updated fields set to None. if val.stored()", "This construction is purely to avoid wrapping __init__ self._true_init(conn, schema, query_cls) @capture_psycopg_error def", "might get some JOINS and this becomes more complicated. HOW IT IS CURRENLY", "(i.e. if we have relation parent-child, probably child table has something like 'parent_id',", "# First, split to parts this_table_wr = {} other_selects = [] for key,", "= instance.model.pkey_field().name data[pkey_name] = instance.id() # 3. Remove keys not matching database columns", "Intersection of IDs from all selects is returned 3. SORT and LIMIT are", "Add primary key value (if this is a fresh instance, it is already", "all REL fields have information stored in THIS_NAME table but unfortunately REL field", "diag.message_primary # return \"{}\\n{}\".format(diag.message_primary, diag.message_detail) try: return f(self, *args, **kwargs) except psycopg2.extensions.TransactionRollbackError as", "be made # on \"virtual\" columns (rel fields stored on the other side).", "val else: return val @capture_psycopg_error def data(self): d = {} for name, obj", "'children' column, so it might not be written) and they need to be", "@capture_psycopg_error def commit(self): self._conn.commit() @capture_psycopg_error def rollback(self): self._conn.rollback() @capture_psycopg_error def delete(self, name, id_):", "''' # Determine IDs pkey_name = dm().object(this_name).pkey_field().name ids = [d[pkey_name] for d in", "this could be done better? Note - we want to handle also other", "*args, **kwargs): def diag_2_msg(diag): # print(diag.message_primary) return diag.message_primary # return \"{}\\n{}\".format(diag.message_primary, diag.message_detail) try:", "only if field changed. # This way we distinguish None fields that were", "This table ids this_table_objects = self._select_objects(this_name, this_table_wr) this_pkey_name = model.pkey_field().name sets_of_ids.append(set([x[this_pkey_name] for x", "we distinguish None fields that were never set before (and might be set", "table has no 'children' column, so it might not be written) and they", "1. WR is split into two parts: * one select for THIS_NAME table", "clean_data @capture_psycopg_error def load(self, name, id_): return self.load_many(name, [id_])[0] @capture_psycopg_error def load_many(self, name,", "might not be written) and they need to be removed now. This operation", "nothing is saved, because no database columns changed. table_columns = self._q().table_columns(instance.model.name) changed_columns =", "@capture_psycopg_error def _true_init(self, conn, schema, query_cls): # Modify connection if conn.status is not", "not in got_ids] raise exceptions.e404(object_name=name, object_id=missing_ids[0]) full_data = self._add_virtual_columns(name, stored_data) return full_data @capture_psycopg_error", "but it seems a good idea to avoid passing data model directly to", "that will be written to the database. This means * all values other", "everything is processed in a single query. That would be easy if we", "default) from updated fields set to None. if val.stored() is None and field", "are allwed to ignore those parameters, they are applied later in Enigne.get). HOW", "= [x[related_pkey_name] for x in this_related] if field.multi: el[name] = related_ids else: el[name]", "got_ids = [d[pkey_name] for d in stored_data] missing_ids = [id_ for id_ in", "# If val.stored() is None it should be written only if field changed.", "= dm().object(name).pkey_field().name stored_data = self._select_objects(name, {pkey_name: ids}) if len(stored_data) != len(ids): got_ids =", "default value, it is returned. This works well with * nextval(sequence) * any", "WHERE 2. SORT becomes ORDER BY 3. LIMIT becomes LIMIT and everything is", "\"{}\\n{}\".format(diag.message_primary, diag.message_detail) try: return f(self, *args, **kwargs) except psycopg2.extensions.TransactionRollbackError as e: # Either", "[x[related_pkey_name] for x in this_related] if field.multi: el[name] = related_ids else: el[name] =", "self._conn.cursor() cursor.execute(\"SELECT {}\".format(default_expr)) val = cursor.fetchone()[0] if self._select_objects(name, {pkey_name: val}): if old_val ==", "same value. \\ Table: {}, val: {}'.format(name, val)) else: old_val = val else:", "relation parent-child, probably child table has something like 'parent_id', but parent table has", "but other are redundat (i.e. if we have relation parent-child, probably child table", "might change and one day we'll look for the biggest current ID and", "operation should reverse _add_virtual_columns. ''' clean_data = {} for key, val in data.items():", "query_cls self._query = None def _q(self): if self._query is None: self._query = self._query_cls(self._conn,", "return self._query # PUBLIC INTERFACE @capture_psycopg_error def save(self, instance): # If we got", "1. NOTE: Any value returned by any generator might be already taken, if", "table has something like 'parent_id', but parent table has no 'children' column, so", "has 'parent_id', and parent has no 'children' column, so if NAME == 'parent'", "case, # nothing is saved, because no database columns changed. table_columns = self._q().table_columns(instance.model.name)", "field stored on the other side 2. Intersection of IDs from all selects", "instance we need list of all primary keys. Those are available # in", "name, wr): ''' WR containst key-val pairs matching columns in table NAME. List", "changed_columns = [f.name for f in instance.changed_fields if f.name in table_columns] if not", "If NAME primary key column has default value, it is returned. This works", "selects is returned 3. SORT and LIMIT are ignored. SORT is ignored because", "psycopg2.extensions.TransactionRollbackError as e: # Either transaction was not serializable, or some deadlock was", "dictionaries from table NAME is returned. ''' return self._q().select(name, wr) @capture_psycopg_error def next_id(self,", "WHEREs * one select for each joined table with REL field stored on", "available # in the data model, but it seems a good idea to", "capture_psycopg_error(f): def wrapped(self, *args, **kwargs): def diag_2_msg(diag): # print(diag.message_primary) return diag.message_primary # return", "if there were many PUT's, but next time, it will probably be fast", "to fill relationship fields based on other tables. I.e. if we have parent-child", "exception is raised. This might change and one day we'll look for the", "Other tables ids for other_name, other_fk_name, other_table_wr in other_selects: other_table_objects = self._select_objects(other_name, other_table_wr)", "good idea to avoid passing data model directly to storage. # Instead, self._query", "other side). In such case, # nothing is saved, because no database columns", "= field.name if field.rel and name not in data[0]: other_name = field.stores.name other_field_name", "load_many(self, name, ids): if not ids: return [] # Determine column name pkey_name", "parts: * one select for THIS_NAME table with all possible WHEREs * one", "exceptions.ProgrammingError(\"Unknown default pkey value for {}\".format(name)) old_val = None while True: cursor =", "table with all possible WHEREs * one select for each joined table with", "in table NAME. We need to fill relationship fields based on other tables.", "redundat (i.e. if we have relation parent-child, probably child table has something like", "could be done better? Note - we want to handle also other than", "data) return clean_data @capture_psycopg_error def load(self, name, id_): return self.load_many(name, [id_])[0] @capture_psycopg_error def", "= {} for name, obj in dm().objects().items(): d[name] = self._q().dump_table(name, obj.pkey_field().name) return d", "is None it should be written only if field changed. # This way", "NAME. We need to fill relationship fields based on other tables. I.e. if", "dm().object(name).pkey_field().name stored_data = self._select_objects(name, {pkey_name: ids}) if len(stored_data) != len(ids): got_ids = [d[pkey_name]", "= dm().object(this_name).pkey_field().name ids = [d[pkey_name] for d in data] for field in dm().object(this_name).fields():", "never set before (and might be set to # a different value by", "field, val in instance.field_values(): if not field.stored(): continue # If val.stored() is None", "matching WR. SORT and LIMIT are ignored (storages are allwed to ignore those", "and parent has no 'children' column, so if NAME == 'parent' we need", "pkey value for {}\".format(name)) old_val = None while True: cursor = self._conn.cursor() cursor.execute(\"SELECT", "resources ''' from ..base_storage import BaseStorage from blargh.engine import dm from .query import", "from table NAME matching WR. SORT and LIMIT are ignored (storages are allwed", "d in data] for field in dm().object(this_name).fields(): name = field.name if field.rel and", "from updated fields set to None. if val.stored() is None and field not", "database. This means * all values other than None * None values, if", "nextval() defaults, i.e. dependant on now(). ''' pkey_name = dm().object(name).pkey_field().name default_expr = self._q().default_pkey_expr(name,", "@capture_psycopg_error def selected_ids(self, this_name, wr, sort, limit): ''' Return IDs from table NAME", "list of all primary keys. Those are available # in the data model,", "single schema * tables have the same names as resources ''' from ..base_storage", "value by database default) from updated fields set to None. if val.stored() is", "it is already in data) pkey_name = instance.model.pkey_field().name data[pkey_name] = instance.id() # 3.", "wrapping __init__ self._true_init(conn, schema, query_cls) @capture_psycopg_error def _true_init(self, conn, schema, query_cls): # Modify", "be set to # a different value by database default) from updated fields", "[x for x in all_related if x[other_field_name] == el[pkey_name]] related_ids = [x[related_pkey_name] for", "of sets of ids, to be intersected later sets_of_ids = [] # This", "other_field_name = field.other.name all_related = self._select_objects(other_name, {other_field_name: ids}) related_pkey_name = dm().object(other_name).pkey_field().name for el", "maybe this could be done better? Note - we want to handle also", "= self._q().table_columns(instance.model.name) changed_columns = [f.name for f in instance.changed_fields if f.name in table_columns]", "SORT and LIMIT are ignored. SORT is ignored because there is no way", "so begin does nothing pass @capture_psycopg_error def commit(self): self._conn.commit() @capture_psycopg_error def rollback(self): self._conn.rollback()", "it different from both: * HOW IT SHOULD BE DONE above * sorting", "raise exceptions.e400(diag_2_msg(e.diag)) return wrapped class PGStorage(BaseStorage): def __init__(self, conn, schema, query_cls=Query): # This", "{}'.format(name, val)) else: old_val = val else: return val @capture_psycopg_error def data(self): d", "unfortunately REL field could be stored on any table, so instead of WHEREs", "conn, schema, query_cls): # Modify connection if conn.status is not psycopg2.extensions.STATUS_READY: conn.commit() conn.set_session(isolation_level='SERIALIZABLE',", "val.stored() is None it should be written only if field changed. # This", "they are applied later in Enigne.get). HOW IT SHOULD BE DONE 1. WR", "need to add 'children' key in data, based on relationship fields. This operation", "the same value. \\ Table: {}, val: {}'.format(name, val)) else: old_val = val", "via PUT). Generator is called repeatedly, until we find a non-duplicated value. This", "other_fk_name, other_table_wr in other_selects: other_table_objects = self._select_objects(other_name, other_table_wr) sets_of_ids.append(set([x[other_fk_name] for x in other_table_objects]))", "# Create representation data = self._write_repr(instance) # Save new value name = instance.model.name", "becomes LIMIT and everything is processed in a single query. That would be", "field.other.name other_pkey_name = dm().object(other_name).pkey_field().name other_selects.append((other_name, other_field_name, {other_pkey_name: val})) # List of sets of", "might be set to # a different value by database default) from updated", "tables are in a single schema * tables have the same names as", "fields. This operation should reverse _remove_virtual_columns. ''' # Determine IDs pkey_name = dm().object(this_name).pkey_field().name", "self._true_init(conn, schema, query_cls) @capture_psycopg_error def _true_init(self, conn, schema, query_cls): # Modify connection if", "BY 3. LIMIT becomes LIMIT and everything is processed in a single query.", "def load(self, name, id_): return self.load_many(name, [id_])[0] @capture_psycopg_error def load_many(self, name, ids): if", "# This construction is purely to avoid wrapping __init__ self._true_init(conn, schema, query_cls) @capture_psycopg_error", "already taken, if client set it in an explicit way (probably via PUT).", "contains all \"possible\" column values. Some of those need to be written to", "the biggest current ID and add 1. NOTE: Any value returned by any", "not field.stored(): continue # If val.stored() is None it should be written only", "both: * HOW IT SHOULD BE DONE above * sorting in Engine.get and", "new value name = instance.model.name self._q().upsert(name, data) def _write_repr(self, instance): ''' Returns INSTANCE", "on now(). ''' pkey_name = dm().object(name).pkey_field().name default_expr = self._q().default_pkey_expr(name, pkey_name) if default_expr is", "connection if conn.status is not psycopg2.extensions.STATUS_READY: conn.commit() conn.set_session(isolation_level='SERIALIZABLE', autocommit=False) conn.cursor().execute('''SET CONSTRAINTS ALL DEFERRED''')", "happened, it makes sense to run this operation again. raise exceptions.TransactionConflictRetriable() except psycopg2.InterfaceError", "data: this_related = [x for x in all_related if x[other_field_name] == el[pkey_name]] related_ids", "no database columns changed. table_columns = self._q().table_columns(instance.model.name) changed_columns = [f.name for f in", "possible WHEREs * one select for each joined table with REL field stored", "old_val = val else: return val @capture_psycopg_error def data(self): d = {} for", "get some JOINS and this becomes more complicated. HOW IT IS CURRENLY DONE", "dictionary with all columns that should be written to the database data =", "is called repeatedly, until we find a non-duplicated value. This might take long,", "from .query import Query from .... import exceptions import psycopg2 def capture_psycopg_error(f): def", "return clean_data def _add_virtual_columns(self, this_name, data): ''' DATA contains only values stored in", "run this operation again. raise exceptions.TransactionConflictRetriable() except psycopg2.InterfaceError as e: raise exceptions.e500(diag_2_msg(e.diag)) except", "more complicated. HOW IT IS CURRENLY DONE 1. WR is split into two", "{other_field_name: ids}) related_pkey_name = dm().object(other_name).pkey_field().name for el in data: this_related = [x for", "with all possible WHEREs * one select for each joined table with REL", "columns in table NAME. List of dictionaries from table NAME is returned. '''", "into two parts: * one select for THIS_NAME table with all possible WHEREs", "schema, query_cls=Query): # This construction is purely to avoid wrapping __init__ self._true_init(conn, schema,", "split into two parts: * one select for THIS_NAME table with all possible", "= [id_ for id_ in ids if id_ not in got_ids] raise exceptions.e404(object_name=name,", "of ids, to be intersected later sets_of_ids = [] # This table ids", "table_columns = self._q().table_columns(instance.model.name) changed_columns = [f.name for f in instance.changed_fields if f.name in", "to be removed now. This operation should reverse _add_virtual_columns. ''' clean_data = {}", "self._select_objects(name, {pkey_name: val}): if old_val == val: raise exceptions.ProgrammingError('Pkey value generator returned twice", "(if this is a fresh instance, it is already in data) pkey_name =", "table but unfortunately REL field could be stored on any table, so instead", "True, but all changes could be made # on \"virtual\" columns (rel fields", "val.stored() # 2. Add primary key value (if this is a fresh instance,", "return sorted(set.intersection(*sets_of_ids)) @capture_psycopg_error def _select_objects(self, name, wr): ''' WR containst key-val pairs matching", "i.e. dependant on now(). ''' pkey_name = dm().object(name).pkey_field().name default_expr = self._q().default_pkey_expr(name, pkey_name) if", "any generator might be already taken, if client set it in an explicit", "generator might be already taken, if client set it in an explicit way", "probably child table has something like 'parent_id', but parent table has no 'children'", "return self.load_many(name, [id_])[0] @capture_psycopg_error def load_many(self, name, ids): if not ids: return []", "other_table_wr) sets_of_ids.append(set([x[other_fk_name] for x in other_table_objects])) # Final ids return sorted(set.intersection(*sets_of_ids)) @capture_psycopg_error def", "for d in data] for field in dm().object(this_name).fields(): name = field.name if field.rel", "changes could be made # on \"virtual\" columns (rel fields stored on the", "cursor.fetchone()[0] if self._select_objects(name, {pkey_name: val}): if old_val == val: raise exceptions.ProgrammingError('Pkey value generator", "This means * all values other than None * None values, if they", "other_table_wr in other_selects: other_table_objects = self._select_objects(other_name, other_table_wr) sets_of_ids.append(set([x[other_fk_name] for x in other_table_objects])) #", "table ids this_table_objects = self._select_objects(this_name, this_table_wr) this_pkey_name = model.pkey_field().name sets_of_ids.append(set([x[this_pkey_name] for x in", "3. SORT and LIMIT are ignored. SORT is ignored because there is no", "model = dm().object(this_name) # First, split to parts this_table_wr = {} other_selects =", "name pkey_name = dm().object(name).pkey_field().name stored_data = self._select_objects(name, {pkey_name: ids}) if len(stored_data) != len(ids):", "related_ids = [x[related_pkey_name] for x in this_related] if field.multi: el[name] = related_ids else:", "Engine.get and LIMIT is ignored because SORTing first is necesary. ''' model =", "- and that should be after engine # was set up, so the", "if self._select_objects(name, {pkey_name: val}): if old_val == val: raise exceptions.ProgrammingError('Pkey value generator returned", "* sorting in Engine.get and LIMIT is ignored because SORTing first is necesary.", "@capture_psycopg_error def load(self, name, id_): return self.load_many(name, [id_])[0] @capture_psycopg_error def load_many(self, name, ids):", "\"virtual\" columns (rel fields stored on the other side). In such case, #", "name, id_): self._q().delete(name, id_) @capture_psycopg_error def selected_ids(self, this_name, wr, sort, limit): ''' Return", "removed now. This operation should reverse _add_virtual_columns. ''' clean_data = {} for key,", "Any value returned by any generator might be already taken, if client set", "name not in data[0]: other_name = field.stores.name other_field_name = field.other.name all_related = self._select_objects(other_name,", "len(ids): got_ids = [d[pkey_name] for d in stored_data] missing_ids = [id_ for id_", "be written only if field changed. # This way we distinguish None fields", "import psycopg2 def capture_psycopg_error(f): def wrapped(self, *args, **kwargs): def diag_2_msg(diag): # print(diag.message_primary) return", "LIMIT becomes LIMIT and everything is processed in a single query. That would", "field not in instance.changed_fields: continue data[field.name] = val.stored() # 2. Add primary key", "val else: field = model.field(key) other_name = field.stores.name other_field_name = field.other.name other_pkey_name =", "_remove_virtual_columns(self, name, data): ''' DATA contains all \"possible\" column values. Some of those", "data) pkey_name = instance.model.pkey_field().name data[pkey_name] = instance.id() # 3. Remove keys not matching", "all columns that should be written to the database data = {} for", "dm().object(this_name) # First, split to parts this_table_wr = {} other_selects = [] for", "data[pkey_name] = instance.id() # 3. Remove keys not matching database columns clean_data =", "def _true_init(self, conn, schema, query_cls): # Modify connection if conn.status is not psycopg2.extensions.STATUS_READY:", "sets_of_ids.append(set([x[other_fk_name] for x in other_table_objects])) # Final ids return sorted(set.intersection(*sets_of_ids)) @capture_psycopg_error def _select_objects(self,", "in all_related if x[other_field_name] == el[pkey_name]] related_ids = [x[related_pkey_name] for x in this_related]", "name, data): ''' DATA contains all \"possible\" column values. Some of those need", "be intersected later sets_of_ids = [] # This table ids this_table_objects = self._select_objects(this_name,", "based on relationship fields. This operation should reverse _remove_virtual_columns. ''' # Determine IDs", "if not field.stored(): continue # If val.stored() is None it should be written", "old_val == val: raise exceptions.ProgrammingError('Pkey value generator returned twice the same value. \\", "schema # To initialize query instance we need list of all primary keys.", "''' # 1. Create dictionary with all columns that should be written to", "is ignored because there is no way of implementing it different from both:", "available in dm() function. self._query_cls = query_cls self._query = None def _q(self): if", "ids for other_name, other_fk_name, other_table_wr in other_selects: other_table_objects = self._select_objects(other_name, other_table_wr) sets_of_ids.append(set([x[other_fk_name] for", "twice the same value, exception is raised * maybe this could be done", "column, so if NAME == 'parent' we need to add 'children' key in", "model directly to storage. # Instead, self._query is built in lazy way, when", "Returns INSTANCE representation including all columns that will be written to the database.", "we need list of all primary keys. Those are available # in the", "fields stored on the other side). In such case, # nothing is saved,", "that should be after engine # was set up, so the data model", "nextval(sequence) is used). Also: * if generator returns twice the same value, exception", "el[name] = related_ids else: el[name] = related_ids[0] if related_ids else None return data", "user-defined function If there is no default, an exception is raised. This might", "already in data) pkey_name = instance.model.pkey_field().name data[pkey_name] = instance.id() # 3. Remove keys", "raise exceptions.e500(diag_2_msg(e.diag)) except psycopg2.Error as e: raise exceptions.e400(diag_2_msg(e.diag)) return wrapped class PGStorage(BaseStorage): def", "way we distinguish None fields that were never set before (and might be", "def wrapped(self, *args, **kwargs): def diag_2_msg(diag): # print(diag.message_primary) return diag.message_primary # return \"{}\\n{}\".format(diag.message_primary,", "instance.model.pkey_field().name data[pkey_name] = instance.id() # 3. Remove keys not matching database columns clean_data", "__init__ self._true_init(conn, schema, query_cls) @capture_psycopg_error def _true_init(self, conn, schema, query_cls): # Modify connection", "def save(self, instance): # If we got here, instance.changed() is True, but all", "one day we'll look for the biggest current ID and add 1. NOTE:", "this_table_wr) this_pkey_name = model.pkey_field().name sets_of_ids.append(set([x[this_pkey_name] for x in this_table_objects])) # Other tables ids", "with * nextval(sequence) * any simmilar user-defined function If there is no default,", "stored_data) return full_data @capture_psycopg_error def begin(self): # All necesary things were set in", "NOTE: Any value returned by any generator might be already taken, if client", "return clean_data @capture_psycopg_error def load(self, name, id_): return self.load_many(name, [id_])[0] @capture_psycopg_error def load_many(self,", "= field.other.name all_related = self._select_objects(other_name, {other_field_name: ids}) related_pkey_name = dm().object(other_name).pkey_field().name for el in", "in a single query. That would be easy if we assumed that all", "function If there is no default, an exception is raised. This might change", "primary key column has default value, it is returned. This works well with", "sets_of_ids = [] # This table ids this_table_objects = self._select_objects(this_name, this_table_wr) this_pkey_name =", "operation should reverse _remove_virtual_columns. ''' # Determine IDs pkey_name = dm().object(this_name).pkey_field().name ids =", "were explicitly set (by INSTANCE.update() -> they are in INSTANCE.changed_fields ''' # 1.", "before (and might be set to # a different value by database default)", "None * None values, if they were explicitly set (by INSTANCE.update() -> they", "pass @capture_psycopg_error def commit(self): self._conn.commit() @capture_psycopg_error def rollback(self): self._conn.rollback() @capture_psycopg_error def delete(self, name,", "'parent_id', and parent has no 'children' column, so if NAME == 'parent' we", "..base_storage import BaseStorage from blargh.engine import dm from .query import Query from ....", "until we find a non-duplicated value. This might take long, if there were", "key in data, based on relationship fields. This operation should reverse _remove_virtual_columns. '''", "of WHEREs we might get some JOINS and this becomes more complicated. HOW", "raise exceptions.TransactionConflictRetriable() except psycopg2.InterfaceError as e: raise exceptions.e500(diag_2_msg(e.diag)) except psycopg2.Error as e: raise", "return wrapped class PGStorage(BaseStorage): def __init__(self, conn, schema, query_cls=Query): # This construction is", "# PRIVATE METHODS def _remove_virtual_columns(self, name, data): ''' DATA contains all \"possible\" column", "if we have relation parent-child, probably child table has something like 'parent_id', but", "keys not matching database columns clean_data = self._remove_virtual_columns(instance.model.name, data) return clean_data @capture_psycopg_error def", "names as resources ''' from ..base_storage import BaseStorage from blargh.engine import dm from", "are ignored. SORT is ignored because there is no way of implementing it", "_q(self): if self._query is None: self._query = self._query_cls(self._conn, self._schema, {o.name: o.pkey_field().name for o", "self._select_objects(other_name, other_table_wr) sets_of_ids.append(set([x[other_fk_name] for x in other_table_objects])) # Final ids return sorted(set.intersection(*sets_of_ids)) @capture_psycopg_error", "saved, because no database columns changed. table_columns = self._q().table_columns(instance.model.name) changed_columns = [f.name for", "WR containst key-val pairs matching columns in table NAME. List of dictionaries from", "IT SHOULD BE DONE 1. WR is interpreted as WHERE 2. SORT becomes", "id_ in ids if id_ not in got_ids] raise exceptions.e404(object_name=name, object_id=missing_ids[0]) full_data =", "{pkey_name: val}): if old_val == val: raise exceptions.ProgrammingError('Pkey value generator returned twice the", "is None and field not in instance.changed_fields: continue data[field.name] = val.stored() # 2.", "return f(self, *args, **kwargs) except psycopg2.extensions.TransactionRollbackError as e: # Either transaction was not", "the same value, exception is raised * maybe this could be done better?", "return \"{}\\n{}\".format(diag.message_primary, diag.message_detail) try: return f(self, *args, **kwargs) except psycopg2.extensions.TransactionRollbackError as e: #", "'children' key in data, based on relationship fields. This operation should reverse _remove_virtual_columns.", "ids}) if len(stored_data) != len(ids): got_ids = [d[pkey_name] for d in stored_data] missing_ids", "delete(self, name, id_): self._q().delete(name, id_) @capture_psycopg_error def selected_ids(self, this_name, wr, sort, limit): '''", "processed in a single query. That would be easy if we assumed that", "PUT). Generator is called repeatedly, until we find a non-duplicated value. This might", "because no database columns changed. table_columns = self._q().table_columns(instance.model.name) changed_columns = [f.name for f", "other_name = field.stores.name other_field_name = field.other.name other_pkey_name = dm().object(other_name).pkey_field().name other_selects.append((other_name, other_field_name, {other_pkey_name: val}))", "stored on any table, so instead of WHEREs we might get some JOINS", "HOW IT SHOULD BE DONE above * sorting in Engine.get and LIMIT is", "in dm().objects().items(): d[name] = self._q().dump_table(name, obj.pkey_field().name) return d # PRIVATE METHODS def _remove_virtual_columns(self,", "parent-child, probably child table has something like 'parent_id', but parent table has no", "full_data @capture_psycopg_error def begin(self): # All necesary things were set in __init__(autocommit, deferrable", "value, exception is raised * maybe this could be done better? Note -", "as e: raise exceptions.e500(diag_2_msg(e.diag)) except psycopg2.Error as e: raise exceptions.e400(diag_2_msg(e.diag)) return wrapped class", "f(self, *args, **kwargs) except psycopg2.extensions.TransactionRollbackError as e: # Either transaction was not serializable,", "probably child table has 'parent_id', and parent has no 'children' column, so if", "METHODS def _remove_virtual_columns(self, name, data): ''' DATA contains all \"possible\" column values. Some", "HOW IT IS CURRENLY DONE 1. WR is split into two parts: *", "needed - and that should be after engine # was set up, so", "def begin(self): # All necesary things were set in __init__(autocommit, deferrable constraints), #", "generator returned twice the same value. \\ Table: {}, val: {}'.format(name, val)) else:", "columns that should be written to the database data = {} for field,", "# To initialize query instance we need list of all primary keys. Those", "= {} for field, val in instance.field_values(): if not field.stored(): continue # If", "columns that will be written to the database. This means * all values", "dm().object(name).pkey_field().name default_expr = self._q().default_pkey_expr(name, pkey_name) if default_expr is None: raise exceptions.ProgrammingError(\"Unknown default pkey", "now(). ''' pkey_name = dm().object(name).pkey_field().name default_expr = self._q().default_pkey_expr(name, pkey_name) if default_expr is None:", "import Query from .... import exceptions import psycopg2 def capture_psycopg_error(f): def wrapped(self, *args,", "by any generator might be already taken, if client set it in an", "= dm().object(this_name) # First, split to parts this_table_wr = {} other_selects = []", "self._q().table_columns(this_name): this_table_wr[key] = val else: field = model.field(key) other_name = field.stores.name other_field_name =", "set up, so the data model is available in dm() function. self._query_cls =", "= self._q().dump_table(name, obj.pkey_field().name) return d # PRIVATE METHODS def _remove_virtual_columns(self, name, data): '''", "after engine # was set up, so the data model is available in", "on the other side). In such case, # nothing is saved, because no", "clean_data = {} for key, val in data.items(): if key in self._q().table_columns(name): clean_data[key]", "stored on the other side 2. Intersection of IDs from all selects is", "Final ids return sorted(set.intersection(*sets_of_ids)) @capture_psycopg_error def _select_objects(self, name, wr): ''' WR containst key-val", "* one select for THIS_NAME table with all possible WHEREs * one select", "written) and they need to be removed now. This operation should reverse _add_virtual_columns.", "except psycopg2.InterfaceError as e: raise exceptions.e500(diag_2_msg(e.diag)) except psycopg2.Error as e: raise exceptions.e400(diag_2_msg(e.diag)) return", "def rollback(self): self._conn.rollback() @capture_psycopg_error def delete(self, name, id_): self._q().delete(name, id_) @capture_psycopg_error def selected_ids(self,", "many PUT's, but next time, it will probably be fast (if nextval(sequence) is", "need to fill relationship fields based on other tables. I.e. if we have", "this_pkey_name = model.pkey_field().name sets_of_ids.append(set([x[this_pkey_name] for x in this_table_objects])) # Other tables ids for", "matching columns in table NAME. List of dictionaries from table NAME is returned.", "ids}) related_pkey_name = dm().object(other_name).pkey_field().name for el in data: this_related = [x for x", "self._select_objects(name, {pkey_name: ids}) if len(stored_data) != len(ids): got_ids = [d[pkey_name] for d in", "ID and add 1. NOTE: Any value returned by any generator might be", "except psycopg2.extensions.TransactionRollbackError as e: # Either transaction was not serializable, or some deadlock", "other side 2. Intersection of IDs from all selects is returned 3. SORT", "(probably via PUT). Generator is called repeatedly, until we find a non-duplicated value.", "# was set up, so the data model is available in dm() function.", "fast (if nextval(sequence) is used). Also: * if generator returns twice the same", "except psycopg2.Error as e: raise exceptions.e400(diag_2_msg(e.diag)) return wrapped class PGStorage(BaseStorage): def __init__(self, conn,", "val: raise exceptions.ProgrammingError('Pkey value generator returned twice the same value. \\ Table: {},", "nextval(sequence) * any simmilar user-defined function If there is no default, an exception", "{pkey_name: ids}) if len(stored_data) != len(ids): got_ids = [d[pkey_name] for d in stored_data]", "data) def _write_repr(self, instance): ''' Returns INSTANCE representation including all columns that will", "WHEREs we might get some JOINS and this becomes more complicated. HOW IT", "data model directly to storage. # Instead, self._query is built in lazy way,", "WR is interpreted as WHERE 2. SORT becomes ORDER BY 3. LIMIT becomes", "# 3. Remove keys not matching database columns clean_data = self._remove_virtual_columns(instance.model.name, data) return", "IDs from table NAME matching WR. SORT and LIMIT are ignored (storages are", "be written to the database data = {} for field, val in instance.field_values():", "def diag_2_msg(diag): # print(diag.message_primary) return diag.message_primary # return \"{}\\n{}\".format(diag.message_primary, diag.message_detail) try: return f(self,", "nothing pass @capture_psycopg_error def commit(self): self._conn.commit() @capture_psycopg_error def rollback(self): self._conn.rollback() @capture_psycopg_error def delete(self,", "= self._query_cls(self._conn, self._schema, {o.name: o.pkey_field().name for o in dm().objects().values()}) return self._query # PUBLIC", "database, but other are redundat (i.e. if we have relation parent-child, probably child", "1. Create dictionary with all columns that should be written to the database", "val in data.items(): if key in self._q().table_columns(name): clean_data[key] = val return clean_data def", "x in this_related] if field.multi: el[name] = related_ids else: el[name] = related_ids[0] if", "be already taken, if client set it in an explicit way (probably via", "if not changed_columns: return # Create representation data = self._write_repr(instance) # Save new", "should be written to the database data = {} for field, val in", "IT SHOULD BE DONE above * sorting in Engine.get and LIMIT is ignored", "In such case, # nothing is saved, because no database columns changed. table_columns", "@capture_psycopg_error def delete(self, name, id_): self._q().delete(name, id_) @capture_psycopg_error def selected_ids(self, this_name, wr, sort,", "instead of WHEREs we might get some JOINS and this becomes more complicated.", "First, split to parts this_table_wr = {} other_selects = [] for key, val", "first is necesary. ''' model = dm().object(this_name) # First, split to parts this_table_wr", "written to the database. This means * all values other than None *", "instance, it is already in data) pkey_name = instance.model.pkey_field().name data[pkey_name] = instance.id() #", "dm().object(this_name).fields(): name = field.name if field.rel and name not in data[0]: other_name =", "single query. That would be easy if we assumed that all REL fields", "Those are available # in the data model, but it seems a good", "be after engine # was set up, so the data model is available", "should be written only if field changed. # This way we distinguish None", "add 1. NOTE: Any value returned by any generator might be already taken,", "(if nextval(sequence) is used). Also: * if generator returns twice the same value,", "= {} for key, val in data.items(): if key in self._q().table_columns(name): clean_data[key] =", "# PUBLIC INTERFACE @capture_psycopg_error def save(self, instance): # If we got here, instance.changed()", "matching database columns clean_data = self._remove_virtual_columns(instance.model.name, data) return clean_data @capture_psycopg_error def load(self, name,", "name, obj in dm().objects().items(): d[name] = self._q().dump_table(name, obj.pkey_field().name) return d # PRIVATE METHODS", "works well with * nextval(sequence) * any simmilar user-defined function If there is", "it in an explicit way (probably via PUT). Generator is called repeatedly, until", "data = {} for field, val in instance.field_values(): if not field.stored(): continue #", "the database, but other are redundat (i.e. if we have relation parent-child, probably", "old_val = None while True: cursor = self._conn.cursor() cursor.execute(\"SELECT {}\".format(default_expr)) val = cursor.fetchone()[0]", "Either transaction was not serializable, or some deadlock was detected. # Whatever happened,", "with all columns that should be written to the database data = {}", "columns clean_data = self._remove_virtual_columns(instance.model.name, data) return clean_data @capture_psycopg_error def load(self, name, id_): return", "self._q().table_columns(name): clean_data[key] = val return clean_data def _add_virtual_columns(self, this_name, data): ''' DATA contains", "_add_virtual_columns(self, this_name, data): ''' DATA contains only values stored in table NAME. We", "begin does nothing pass @capture_psycopg_error def commit(self): self._conn.commit() @capture_psycopg_error def rollback(self): self._conn.rollback() @capture_psycopg_error", "in data: this_related = [x for x in all_related if x[other_field_name] == el[pkey_name]]", "IDs from all selects is returned 3. SORT and LIMIT are ignored. SORT", "List of dictionaries from table NAME is returned. ''' return self._q().select(name, wr) @capture_psycopg_error", "val in instance.field_values(): if not field.stored(): continue # If val.stored() is None it", "set (by INSTANCE.update() -> they are in INSTANCE.changed_fields ''' # 1. Create dictionary", "was set up, so the data model is available in dm() function. self._query_cls", "other_name = field.stores.name other_field_name = field.other.name all_related = self._select_objects(other_name, {other_field_name: ids}) related_pkey_name =", "transaction was not serializable, or some deadlock was detected. # Whatever happened, it", "LIMIT and everything is processed in a single query. That would be easy", "''' pkey_name = dm().object(name).pkey_field().name default_expr = self._q().default_pkey_expr(name, pkey_name) if default_expr is None: raise", "BaseStorage from blargh.engine import dm from .query import Query from .... import exceptions", "return self._q().select(name, wr) @capture_psycopg_error def next_id(self, name): ''' If NAME primary key column", "not be written) and they need to be removed now. This operation should", "in data[0]: other_name = field.stores.name other_field_name = field.other.name all_related = self._select_objects(other_name, {other_field_name: ids})", "is processed in a single query. That would be easy if we assumed", "change and one day we'll look for the biggest current ID and add", "val})) # List of sets of ids, to be intersected later sets_of_ids =", "(storages are allwed to ignore those parameters, they are applied later in Enigne.get).", "all tables are in a single schema * tables have the same names", "all_related = self._select_objects(other_name, {other_field_name: ids}) related_pkey_name = dm().object(other_name).pkey_field().name for el in data: this_related", "None: self._query = self._query_cls(self._conn, self._schema, {o.name: o.pkey_field().name for o in dm().objects().values()}) return self._query", "for the biggest current ID and add 1. NOTE: Any value returned by", "we might get some JOINS and this becomes more complicated. HOW IT IS", "we want to handle also other than nextval() defaults, i.e. dependant on now().", "field.stores.name other_field_name = field.other.name all_related = self._select_objects(other_name, {other_field_name: ids}) related_pkey_name = dm().object(other_name).pkey_field().name for", "avoid wrapping __init__ self._true_init(conn, schema, query_cls) @capture_psycopg_error def _true_init(self, conn, schema, query_cls): #", "If we got here, instance.changed() is True, but all changes could be made", "instance.changed_fields if f.name in table_columns] if not changed_columns: return # Create representation data", "sets_of_ids.append(set([x[this_pkey_name] for x in this_table_objects])) # Other tables ids for other_name, other_fk_name, other_table_wr", "3. Remove keys not matching database columns clean_data = self._remove_virtual_columns(instance.model.name, data) return clean_data", "self._remove_virtual_columns(instance.model.name, data) return clean_data @capture_psycopg_error def load(self, name, id_): return self.load_many(name, [id_])[0] @capture_psycopg_error", "field.stores.name other_field_name = field.other.name other_pkey_name = dm().object(other_name).pkey_field().name other_selects.append((other_name, other_field_name, {other_pkey_name: val})) # List", "but parent table has no 'children' column, so it might not be written)", "self._q().select(name, wr) @capture_psycopg_error def next_id(self, name): ''' If NAME primary key column has", "same value, exception is raised * maybe this could be done better? Note", "it will probably be fast (if nextval(sequence) is used). Also: * if generator", "= dm().object(other_name).pkey_field().name for el in data: this_related = [x for x in all_related", "would be easy if we assumed that all REL fields have information stored", "repeatedly, until we find a non-duplicated value. This might take long, if there", "ids): if not ids: return [] # Determine column name pkey_name = dm().object(name).pkey_field().name", "for other_name, other_fk_name, other_table_wr in other_selects: other_table_objects = self._select_objects(other_name, other_table_wr) sets_of_ids.append(set([x[other_fk_name] for x", "SHOULD BE DONE 1. WR is interpreted as WHERE 2. SORT becomes ORDER", "for x in this_table_objects])) # Other tables ids for other_name, other_fk_name, other_table_wr in", "instance.id() # 3. Remove keys not matching database columns clean_data = self._remove_virtual_columns(instance.model.name, data)", "no default, an exception is raised. This might change and one day we'll", "Remove keys not matching database columns clean_data = self._remove_virtual_columns(instance.model.name, data) return clean_data @capture_psycopg_error", "pkey_name = dm().object(name).pkey_field().name stored_data = self._select_objects(name, {pkey_name: ids}) if len(stored_data) != len(ids): got_ids", "field.stored(): continue # If val.stored() is None it should be written only if", "to # a different value by database default) from updated fields set to", "This might take long, if there were many PUT's, but next time, it", "''' from ..base_storage import BaseStorage from blargh.engine import dm from .query import Query", "exception is raised * maybe this could be done better? Note - we", "CURRENT ASSUMPTIONS: * all tables are in a single schema * tables have", "could be made # on \"virtual\" columns (rel fields stored on the other", "else: return val @capture_psycopg_error def data(self): d = {} for name, obj in", "also other than nextval() defaults, i.e. dependant on now(). ''' pkey_name = dm().object(name).pkey_field().name", "data(self): d = {} for name, obj in dm().objects().items(): d[name] = self._q().dump_table(name, obj.pkey_field().name)", "exceptions.e404(object_name=name, object_id=missing_ids[0]) full_data = self._add_virtual_columns(name, stored_data) return full_data @capture_psycopg_error def begin(self): # All", "is used). Also: * if generator returns twice the same value, exception is", "== 'parent' we need to add 'children' key in data, based on relationship", "* if generator returns twice the same value, exception is raised * maybe", "dm().object(other_name).pkey_field().name other_selects.append((other_name, other_field_name, {other_pkey_name: val})) # List of sets of ids, to be", "there were many PUT's, but next time, it will probably be fast (if", "this_name, data): ''' DATA contains only values stored in table NAME. We need", "it is returned. This works well with * nextval(sequence) * any simmilar user-defined", "written to the database data = {} for field, val in instance.field_values(): if", "taken, if client set it in an explicit way (probably via PUT). Generator", "data): ''' DATA contains only values stored in table NAME. We need to", "add 'children' key in data, based on relationship fields. This operation should reverse", "assumed that all REL fields have information stored in THIS_NAME table but unfortunately", "diag.message_detail) try: return f(self, *args, **kwargs) except psycopg2.extensions.TransactionRollbackError as e: # Either transaction", "JOINS and this becomes more complicated. HOW IT IS CURRENLY DONE 1. WR", "were set in __init__(autocommit, deferrable constraints), # so begin does nothing pass @capture_psycopg_error", "try: return f(self, *args, **kwargs) except psycopg2.extensions.TransactionRollbackError as e: # Either transaction was", "so the data model is available in dm() function. self._query_cls = query_cls self._query", "well with * nextval(sequence) * any simmilar user-defined function If there is no", "we need to add 'children' key in data, based on relationship fields. This", "columns changed. table_columns = self._q().table_columns(instance.model.name) changed_columns = [f.name for f in instance.changed_fields if", "values, if they were explicitly set (by INSTANCE.update() -> they are in INSTANCE.changed_fields", "take long, if there were many PUT's, but next time, it will probably", "returned by any generator might be already taken, if client set it in", "not psycopg2.extensions.STATUS_READY: conn.commit() conn.set_session(isolation_level='SERIALIZABLE', autocommit=False) conn.cursor().execute('''SET CONSTRAINTS ALL DEFERRED''') self._conn = conn self._schema", "stored in table NAME. We need to fill relationship fields based on other", "CURRENLY DONE 1. WR is split into two parts: * one select for", "ids, to be intersected later sets_of_ids = [] # This table ids this_table_objects", "SORT and LIMIT are ignored (storages are allwed to ignore those parameters, they", "column values. Some of those need to be written to the database, but", "in the data model, but it seems a good idea to avoid passing", "tables ids for other_name, other_fk_name, other_table_wr in other_selects: other_table_objects = self._select_objects(other_name, other_table_wr) sets_of_ids.append(set([x[other_fk_name]", "x[other_field_name] == el[pkey_name]] related_ids = [x[related_pkey_name] for x in this_related] if field.multi: el[name]", "be fast (if nextval(sequence) is used). Also: * if generator returns twice the", "function. self._query_cls = query_cls self._query = None def _q(self): if self._query is None:", "table, so instead of WHEREs we might get some JOINS and this becomes", "This operation should reverse _remove_virtual_columns. ''' # Determine IDs pkey_name = dm().object(this_name).pkey_field().name ids", "= val.stored() # 2. Add primary key value (if this is a fresh", "{o.name: o.pkey_field().name for o in dm().objects().values()}) return self._query # PUBLIC INTERFACE @capture_psycopg_error def", "def _q(self): if self._query is None: self._query = self._query_cls(self._conn, self._schema, {o.name: o.pkey_field().name for", "changed. # This way we distinguish None fields that were never set before", "here, instance.changed() is True, but all changes could be made # on \"virtual\"", "e: raise exceptions.e400(diag_2_msg(e.diag)) return wrapped class PGStorage(BaseStorage): def __init__(self, conn, schema, query_cls=Query): #", "need to be removed now. This operation should reverse _add_virtual_columns. ''' clean_data =", "avoid passing data model directly to storage. # Instead, self._query is built in", "id_ not in got_ids] raise exceptions.e404(object_name=name, object_id=missing_ids[0]) full_data = self._add_virtual_columns(name, stored_data) return full_data", "an exception is raised. This might change and one day we'll look for", "query_cls) @capture_psycopg_error def _true_init(self, conn, schema, query_cls): # Modify connection if conn.status is", "self._select_objects(this_name, this_table_wr) this_pkey_name = model.pkey_field().name sets_of_ids.append(set([x[this_pkey_name] for x in this_table_objects])) # Other tables", "If val.stored() is None it should be written only if field changed. #", "_true_init(self, conn, schema, query_cls): # Modify connection if conn.status is not psycopg2.extensions.STATUS_READY: conn.commit()", "if they were explicitly set (by INSTANCE.update() -> they are in INSTANCE.changed_fields '''", "**kwargs) except psycopg2.extensions.TransactionRollbackError as e: # Either transaction was not serializable, or some", "all changes could be made # on \"virtual\" columns (rel fields stored on", "in instance.changed_fields if f.name in table_columns] if not changed_columns: return # Create representation", "data model is available in dm() function. self._query_cls = query_cls self._query = None", "explicit way (probably via PUT). Generator is called repeatedly, until we find a", "by database default) from updated fields set to None. if val.stored() is None", "is split into two parts: * one select for THIS_NAME table with all", "might be already taken, if client set it in an explicit way (probably", "return full_data @capture_psycopg_error def begin(self): # All necesary things were set in __init__(autocommit,", "Modify connection if conn.status is not psycopg2.extensions.STATUS_READY: conn.commit() conn.set_session(isolation_level='SERIALIZABLE', autocommit=False) conn.cursor().execute('''SET CONSTRAINTS ALL", "again. raise exceptions.TransactionConflictRetriable() except psycopg2.InterfaceError as e: raise exceptions.e500(diag_2_msg(e.diag)) except psycopg2.Error as e:", "find a non-duplicated value. This might take long, if there were many PUT's,", "= instance.id() # 3. Remove keys not matching database columns clean_data = self._remove_virtual_columns(instance.model.name,", "o in dm().objects().values()}) return self._query # PUBLIC INTERFACE @capture_psycopg_error def save(self, instance): #", "default_expr = self._q().default_pkey_expr(name, pkey_name) if default_expr is None: raise exceptions.ProgrammingError(\"Unknown default pkey value", "ignored (storages are allwed to ignore those parameters, they are applied later in", "defaults, i.e. dependant on now(). ''' pkey_name = dm().object(name).pkey_field().name default_expr = self._q().default_pkey_expr(name, pkey_name)", "is available in dm() function. self._query_cls = query_cls self._query = None def _q(self):", "above * sorting in Engine.get and LIMIT is ignored because SORTing first is", "{other_pkey_name: val})) # List of sets of ids, to be intersected later sets_of_ids", "= None def _q(self): if self._query is None: self._query = self._query_cls(self._conn, self._schema, {o.name:", "for el in data: this_related = [x for x in all_related if x[other_field_name]", "def next_id(self, name): ''' If NAME primary key column has default value, it", "returned. This works well with * nextval(sequence) * any simmilar user-defined function If", "as e: raise exceptions.e400(diag_2_msg(e.diag)) return wrapped class PGStorage(BaseStorage): def __init__(self, conn, schema, query_cls=Query):", "seems a good idea to avoid passing data model directly to storage. #", "[] # Determine column name pkey_name = dm().object(name).pkey_field().name stored_data = self._select_objects(name, {pkey_name: ids})", "SORT is ignored because there is no way of implementing it different from", "but next time, it will probably be fast (if nextval(sequence) is used). Also:", "such case, # nothing is saved, because no database columns changed. table_columns =", "we find a non-duplicated value. This might take long, if there were many", "None. if val.stored() is None and field not in instance.changed_fields: continue data[field.name] =", "# Instead, self._query is built in lazy way, when needed - and that", "else: field = model.field(key) other_name = field.stores.name other_field_name = field.other.name other_pkey_name = dm().object(other_name).pkey_field().name", "for id_ in ids if id_ not in got_ids] raise exceptions.e404(object_name=name, object_id=missing_ids[0]) full_data", "fill relationship fields based on other tables. I.e. if we have parent-child relationship", "# If we got here, instance.changed() is True, but all changes could be", "data[0]: other_name = field.stores.name other_field_name = field.other.name all_related = self._select_objects(other_name, {other_field_name: ids}) related_pkey_name", "in __init__(autocommit, deferrable constraints), # so begin does nothing pass @capture_psycopg_error def commit(self):", "has default value, it is returned. This works well with * nextval(sequence) *", "this is a fresh instance, it is already in data) pkey_name = instance.model.pkey_field().name", "table NAME is returned. ''' return self._q().select(name, wr) @capture_psycopg_error def next_id(self, name): '''", "client set it in an explicit way (probably via PUT). Generator is called", "val = cursor.fetchone()[0] if self._select_objects(name, {pkey_name: val}): if old_val == val: raise exceptions.ProgrammingError('Pkey", "_select_objects(self, name, wr): ''' WR containst key-val pairs matching columns in table NAME.", "wrapped class PGStorage(BaseStorage): def __init__(self, conn, schema, query_cls=Query): # This construction is purely", "to avoid passing data model directly to storage. # Instead, self._query is built", "the data model is available in dm() function. self._query_cls = query_cls self._query =", "@capture_psycopg_error def _select_objects(self, name, wr): ''' WR containst key-val pairs matching columns in", "SHOULD BE DONE above * sorting in Engine.get and LIMIT is ignored because", "written to the database, but other are redundat (i.e. if we have relation", "becomes ORDER BY 3. LIMIT becomes LIMIT and everything is processed in a", "other_selects.append((other_name, other_field_name, {other_pkey_name: val})) # List of sets of ids, to be intersected", "Create dictionary with all columns that should be written to the database data", ".... import exceptions import psycopg2 def capture_psycopg_error(f): def wrapped(self, *args, **kwargs): def diag_2_msg(diag):", "default pkey value for {}\".format(name)) old_val = None while True: cursor = self._conn.cursor()", "for f in instance.changed_fields if f.name in table_columns] if not changed_columns: return #", "default, an exception is raised. This might change and one day we'll look", "other_table_objects])) # Final ids return sorted(set.intersection(*sets_of_ids)) @capture_psycopg_error def _select_objects(self, name, wr): ''' WR", "Determine IDs pkey_name = dm().object(this_name).pkey_field().name ids = [d[pkey_name] for d in data] for", "self._add_virtual_columns(name, stored_data) return full_data @capture_psycopg_error def begin(self): # All necesary things were set", "if x[other_field_name] == el[pkey_name]] related_ids = [x[related_pkey_name] for x in this_related] if field.multi:", "all values other than None * None values, if they were explicitly set", "''' Return IDs from table NAME matching WR. SORT and LIMIT are ignored", "column has default value, it is returned. This works well with * nextval(sequence)", "# 1. Create dictionary with all columns that should be written to the", "we have relation parent-child, probably child table has something like 'parent_id', but parent", "if field changed. # This way we distinguish None fields that were never", "set before (and might be set to # a different value by database", "= model.field(key) other_name = field.stores.name other_field_name = field.other.name other_pkey_name = dm().object(other_name).pkey_field().name other_selects.append((other_name, other_field_name,", "* any simmilar user-defined function If there is no default, an exception is", "{} for name, obj in dm().objects().items(): d[name] = self._q().dump_table(name, obj.pkey_field().name) return d #", "if len(stored_data) != len(ids): got_ids = [d[pkey_name] for d in stored_data] missing_ids =", "query_cls=Query): # This construction is purely to avoid wrapping __init__ self._true_init(conn, schema, query_cls)", "name): ''' If NAME primary key column has default value, it is returned.", "way, when needed - and that should be after engine # was set", "got here, instance.changed() is True, but all changes could be made # on", "To initialize query instance we need list of all primary keys. Those are", "else: old_val = val else: return val @capture_psycopg_error def data(self): d = {}", "to be intersected later sets_of_ids = [] # This table ids this_table_objects =", "ignored because there is no way of implementing it different from both: *", "way (probably via PUT). Generator is called repeatedly, until we find a non-duplicated", "the database data = {} for field, val in instance.field_values(): if not field.stored():", "is interpreted as WHERE 2. SORT becomes ORDER BY 3. LIMIT becomes LIMIT", "rollback(self): self._conn.rollback() @capture_psycopg_error def delete(self, name, id_): self._q().delete(name, id_) @capture_psycopg_error def selected_ids(self, this_name,", "= field.stores.name other_field_name = field.other.name other_pkey_name = dm().object(other_name).pkey_field().name other_selects.append((other_name, other_field_name, {other_pkey_name: val})) #", "those parameters, they are applied later in Enigne.get). HOW IT SHOULD BE DONE", "on other tables. I.e. if we have parent-child relationship probably child table has", "{} for key, val in data.items(): if key in self._q().table_columns(name): clean_data[key] = val", "= self._add_virtual_columns(name, stored_data) return full_data @capture_psycopg_error def begin(self): # All necesary things were", "representation including all columns that will be written to the database. This means", "return # Create representation data = self._write_repr(instance) # Save new value name =", "be done better? Note - we want to handle also other than nextval()", "implementing it different from both: * HOW IT SHOULD BE DONE above *", "twice the same value. \\ Table: {}, val: {}'.format(name, val)) else: old_val =", "a single schema * tables have the same names as resources ''' from", "we got here, instance.changed() is True, but all changes could be made #", "import dm from .query import Query from .... import exceptions import psycopg2 def", "for key, val in data.items(): if key in self._q().table_columns(name): clean_data[key] = val return", "instance): ''' Returns INSTANCE representation including all columns that will be written to", "\"possible\" column values. Some of those need to be written to the database,", "data): ''' DATA contains all \"possible\" column values. Some of those need to", "DONE 1. WR is split into two parts: * one select for THIS_NAME", "data] for field in dm().object(this_name).fields(): name = field.name if field.rel and name not", "{}\".format(name)) old_val = None while True: cursor = self._conn.cursor() cursor.execute(\"SELECT {}\".format(default_expr)) val =", "than None * None values, if they were explicitly set (by INSTANCE.update() ->", "stored_data = self._select_objects(name, {pkey_name: ids}) if len(stored_data) != len(ids): got_ids = [d[pkey_name] for", "called repeatedly, until we find a non-duplicated value. This might take long, if", "for d in stored_data] missing_ids = [id_ for id_ in ids if id_", "this_table_objects])) # Other tables ids for other_name, other_fk_name, other_table_wr in other_selects: other_table_objects =", "no 'children' column, so it might not be written) and they need to", "# a different value by database default) from updated fields set to None.", "biggest current ID and add 1. NOTE: Any value returned by any generator", "instance.changed() is True, but all changes could be made # on \"virtual\" columns", "PUT's, but next time, it will probably be fast (if nextval(sequence) is used).", "!= len(ids): got_ids = [d[pkey_name] for d in stored_data] missing_ids = [id_ for", "val @capture_psycopg_error def data(self): d = {} for name, obj in dm().objects().items(): d[name]", "handle also other than nextval() defaults, i.e. dependant on now(). ''' pkey_name =", "to parts this_table_wr = {} other_selects = [] for key, val in wr.items():", "return diag.message_primary # return \"{}\\n{}\".format(diag.message_primary, diag.message_detail) try: return f(self, *args, **kwargs) except psycopg2.extensions.TransactionRollbackError", "= self._select_objects(this_name, this_table_wr) this_pkey_name = model.pkey_field().name sets_of_ids.append(set([x[this_pkey_name] for x in this_table_objects])) # Other", "are applied later in Enigne.get). HOW IT SHOULD BE DONE 1. WR is", "will be written to the database. This means * all values other than", "field.rel and name not in data[0]: other_name = field.stores.name other_field_name = field.other.name all_related", "means * all values other than None * None values, if they were", "other than nextval() defaults, i.e. dependant on now(). ''' pkey_name = dm().object(name).pkey_field().name default_expr", "and LIMIT is ignored because SORTing first is necesary. ''' model = dm().object(this_name)", "and LIMIT are ignored. SORT is ignored because there is no way of", "returned twice the same value. \\ Table: {}, val: {}'.format(name, val)) else: old_val", "instance.model.name self._q().upsert(name, data) def _write_repr(self, instance): ''' Returns INSTANCE representation including all columns", "directly to storage. # Instead, self._query is built in lazy way, when needed", "field = model.field(key) other_name = field.stores.name other_field_name = field.other.name other_pkey_name = dm().object(other_name).pkey_field().name other_selects.append((other_name,", "all_related if x[other_field_name] == el[pkey_name]] related_ids = [x[related_pkey_name] for x in this_related] if", "= self._select_objects(other_name, other_table_wr) sets_of_ids.append(set([x[other_fk_name] for x in other_table_objects])) # Final ids return sorted(set.intersection(*sets_of_ids))", "val in wr.items(): if key in self._q().table_columns(this_name): this_table_wr[key] = val else: field =", "dm().object(this_name).pkey_field().name ids = [d[pkey_name] for d in data] for field in dm().object(this_name).fields(): name", "'children' column, so if NAME == 'parent' we need to add 'children' key", "column name pkey_name = dm().object(name).pkey_field().name stored_data = self._select_objects(name, {pkey_name: ids}) if len(stored_data) !=", "of all primary keys. Those are available # in the data model, but", "and field not in instance.changed_fields: continue data[field.name] = val.stored() # 2. Add primary", "in this_table_objects])) # Other tables ids for other_name, other_fk_name, other_table_wr in other_selects: other_table_objects", "all possible WHEREs * one select for each joined table with REL field", "other than None * None values, if they were explicitly set (by INSTANCE.update()", "psycopg2.Error as e: raise exceptions.e400(diag_2_msg(e.diag)) return wrapped class PGStorage(BaseStorage): def __init__(self, conn, schema,", "All necesary things were set in __init__(autocommit, deferrable constraints), # so begin does", "val return clean_data def _add_virtual_columns(self, this_name, data): ''' DATA contains only values stored", "made # on \"virtual\" columns (rel fields stored on the other side). In", "def _add_virtual_columns(self, this_name, data): ''' DATA contains only values stored in table NAME.", "[] # This table ids this_table_objects = self._select_objects(this_name, this_table_wr) this_pkey_name = model.pkey_field().name sets_of_ids.append(set([x[this_pkey_name]", "value, it is returned. This works well with * nextval(sequence) * any simmilar", "set to None. if val.stored() is None and field not in instance.changed_fields: continue", "[id_ for id_ in ids if id_ not in got_ids] raise exceptions.e404(object_name=name, object_id=missing_ids[0])", "List of sets of ids, to be intersected later sets_of_ids = [] #", "NAME primary key column has default value, it is returned. This works well", "load(self, name, id_): return self.load_many(name, [id_])[0] @capture_psycopg_error def load_many(self, name, ids): if not", "are redundat (i.e. if we have relation parent-child, probably child table has something", "they were explicitly set (by INSTANCE.update() -> they are in INSTANCE.changed_fields ''' #", "= val else: return val @capture_psycopg_error def data(self): d = {} for name,", "self._q().delete(name, id_) @capture_psycopg_error def selected_ids(self, this_name, wr, sort, limit): ''' Return IDs from", "= dm().object(other_name).pkey_field().name other_selects.append((other_name, other_field_name, {other_pkey_name: val})) # List of sets of ids, to", "this_related = [x for x in all_related if x[other_field_name] == el[pkey_name]] related_ids =", "sorting in Engine.get and LIMIT is ignored because SORTing first is necesary. '''", "BE DONE 1. WR is interpreted as WHERE 2. SORT becomes ORDER BY", "in THIS_NAME table but unfortunately REL field could be stored on any table,", "are available # in the data model, but it seems a good idea", "something like 'parent_id', but parent table has no 'children' column, so it might", "set to # a different value by database default) from updated fields set", "begin(self): # All necesary things were set in __init__(autocommit, deferrable constraints), # so", "None while True: cursor = self._conn.cursor() cursor.execute(\"SELECT {}\".format(default_expr)) val = cursor.fetchone()[0] if self._select_objects(name,", "= field.stores.name other_field_name = field.other.name all_related = self._select_objects(other_name, {other_field_name: ids}) related_pkey_name = dm().object(other_name).pkey_field().name", "that were never set before (and might be set to # a different", "WR. SORT and LIMIT are ignored (storages are allwed to ignore those parameters,", "storage. # Instead, self._query is built in lazy way, when needed - and", "probably be fast (if nextval(sequence) is used). Also: * if generator returns twice", "continue data[field.name] = val.stored() # 2. Add primary key value (if this is", "in dm().object(this_name).fields(): name = field.name if field.rel and name not in data[0]: other_name", "field changed. # This way we distinguish None fields that were never set", "# This table ids this_table_objects = self._select_objects(this_name, this_table_wr) this_pkey_name = model.pkey_field().name sets_of_ids.append(set([x[this_pkey_name] for", "if field.multi: el[name] = related_ids else: el[name] = related_ids[0] if related_ids else None", "= self._q().default_pkey_expr(name, pkey_name) if default_expr is None: raise exceptions.ProgrammingError(\"Unknown default pkey value for", "raise exceptions.ProgrammingError(\"Unknown default pkey value for {}\".format(name)) old_val = None while True: cursor", "self._query is built in lazy way, when needed - and that should be", "cursor = self._conn.cursor() cursor.execute(\"SELECT {}\".format(default_expr)) val = cursor.fetchone()[0] if self._select_objects(name, {pkey_name: val}): if", "@capture_psycopg_error def begin(self): # All necesary things were set in __init__(autocommit, deferrable constraints),", "@capture_psycopg_error def load_many(self, name, ids): if not ids: return [] # Determine column", "in instance.changed_fields: continue data[field.name] = val.stored() # 2. Add primary key value (if", "set it in an explicit way (probably via PUT). Generator is called repeatedly,", "* nextval(sequence) * any simmilar user-defined function If there is no default, an", "one select for each joined table with REL field stored on the other", "self._schema, {o.name: o.pkey_field().name for o in dm().objects().values()}) return self._query # PUBLIC INTERFACE @capture_psycopg_error", "self._query_cls = query_cls self._query = None def _q(self): if self._query is None: self._query", "# so begin does nothing pass @capture_psycopg_error def commit(self): self._conn.commit() @capture_psycopg_error def rollback(self):", "ids this_table_objects = self._select_objects(this_name, this_table_wr) this_pkey_name = model.pkey_field().name sets_of_ids.append(set([x[this_pkey_name] for x in this_table_objects]))", "way of implementing it different from both: * HOW IT SHOULD BE DONE", "to storage. # Instead, self._query is built in lazy way, when needed -", "''' CURRENT ASSUMPTIONS: * all tables are in a single schema * tables", "ORDER BY 3. LIMIT becomes LIMIT and everything is processed in a single", "IS CURRENLY DONE 1. WR is split into two parts: * one select", "later in Enigne.get). HOW IT SHOULD BE DONE 1. WR is interpreted as", "this_table_wr = {} other_selects = [] for key, val in wr.items(): if key", "x in all_related if x[other_field_name] == el[pkey_name]] related_ids = [x[related_pkey_name] for x in", "{}\".format(default_expr)) val = cursor.fetchone()[0] if self._select_objects(name, {pkey_name: val}): if old_val == val: raise", "self._query is None: self._query = self._query_cls(self._conn, self._schema, {o.name: o.pkey_field().name for o in dm().objects().values()})", "data[field.name] = val.stored() # 2. Add primary key value (if this is a", "should be after engine # was set up, so the data model is", "this_related] if field.multi: el[name] = related_ids else: el[name] = related_ids[0] if related_ids else", "deadlock was detected. # Whatever happened, it makes sense to run this operation", "= [d[pkey_name] for d in stored_data] missing_ids = [id_ for id_ in ids", "LIMIT are ignored (storages are allwed to ignore those parameters, they are applied", "d[name] = self._q().dump_table(name, obj.pkey_field().name) return d # PRIVATE METHODS def _remove_virtual_columns(self, name, data):", "value. \\ Table: {}, val: {}'.format(name, val)) else: old_val = val else: return", "_write_repr(self, instance): ''' Returns INSTANCE representation including all columns that will be written", "all primary keys. Those are available # in the data model, but it", "but all changes could be made # on \"virtual\" columns (rel fields stored", "= [] # This table ids this_table_objects = self._select_objects(this_name, this_table_wr) this_pkey_name = model.pkey_field().name", "field could be stored on any table, so instead of WHEREs we might", "was not serializable, or some deadlock was detected. # Whatever happened, it makes", "look for the biggest current ID and add 1. NOTE: Any value returned", "*args, **kwargs) except psycopg2.extensions.TransactionRollbackError as e: # Either transaction was not serializable, or", "in other_selects: other_table_objects = self._select_objects(other_name, other_table_wr) sets_of_ids.append(set([x[other_fk_name] for x in other_table_objects])) # Final", "we assumed that all REL fields have information stored in THIS_NAME table but", "REL field could be stored on any table, so instead of WHEREs we", "self._conn = conn self._schema = schema # To initialize query instance we need", "= schema # To initialize query instance we need list of all primary", "in ids if id_ not in got_ids] raise exceptions.e404(object_name=name, object_id=missing_ids[0]) full_data = self._add_virtual_columns(name,", "= None while True: cursor = self._conn.cursor() cursor.execute(\"SELECT {}\".format(default_expr)) val = cursor.fetchone()[0] if", "self._write_repr(instance) # Save new value name = instance.model.name self._q().upsert(name, data) def _write_repr(self, instance):", "in table NAME. List of dictionaries from table NAME is returned. ''' return", "dm().object(other_name).pkey_field().name for el in data: this_related = [x for x in all_related if", "blargh.engine import dm from .query import Query from .... import exceptions import psycopg2", "Some of those need to be written to the database, but other are", "If there is no default, an exception is raised. This might change and", "None and field not in instance.changed_fields: continue data[field.name] = val.stored() # 2. Add", "a good idea to avoid passing data model directly to storage. # Instead,", "detected. # Whatever happened, it makes sense to run this operation again. raise", "3. LIMIT becomes LIMIT and everything is processed in a single query. That", "it should be written only if field changed. # This way we distinguish", "some JOINS and this becomes more complicated. HOW IT IS CURRENLY DONE 1.", "{} for field, val in instance.field_values(): if not field.stored(): continue # If val.stored()", "a single query. That would be easy if we assumed that all REL", "and add 1. NOTE: Any value returned by any generator might be already", "obj in dm().objects().items(): d[name] = self._q().dump_table(name, obj.pkey_field().name) return d # PRIVATE METHODS def", "if we assumed that all REL fields have information stored in THIS_NAME table", "el[pkey_name]] related_ids = [x[related_pkey_name] for x in this_related] if field.multi: el[name] = related_ids", "clean_data def _add_virtual_columns(self, this_name, data): ''' DATA contains only values stored in table", "if id_ not in got_ids] raise exceptions.e404(object_name=name, object_id=missing_ids[0]) full_data = self._add_virtual_columns(name, stored_data) return", "to avoid wrapping __init__ self._true_init(conn, schema, query_cls) @capture_psycopg_error def _true_init(self, conn, schema, query_cls):", "field.name if field.rel and name not in data[0]: other_name = field.stores.name other_field_name =", "parts this_table_wr = {} other_selects = [] for key, val in wr.items(): if", "value. This might take long, if there were many PUT's, but next time,", "selected_ids(self, this_name, wr, sort, limit): ''' Return IDs from table NAME matching WR.", "not changed_columns: return # Create representation data = self._write_repr(instance) # Save new value", "'parent_id', but parent table has no 'children' column, so it might not be", "raise exceptions.e404(object_name=name, object_id=missing_ids[0]) full_data = self._add_virtual_columns(name, stored_data) return full_data @capture_psycopg_error def begin(self): #", "def _select_objects(self, name, wr): ''' WR containst key-val pairs matching columns in table", "is already in data) pkey_name = instance.model.pkey_field().name data[pkey_name] = instance.id() # 3. Remove", "that all REL fields have information stored in THIS_NAME table but unfortunately REL", "def _write_repr(self, instance): ''' Returns INSTANCE representation including all columns that will be", "self._select_objects(other_name, {other_field_name: ids}) related_pkey_name = dm().object(other_name).pkey_field().name for el in data: this_related = [x", "exceptions import psycopg2 def capture_psycopg_error(f): def wrapped(self, *args, **kwargs): def diag_2_msg(diag): # print(diag.message_primary)", "a fresh instance, it is already in data) pkey_name = instance.model.pkey_field().name data[pkey_name] =", "there is no default, an exception is raised. This might change and one", "two parts: * one select for THIS_NAME table with all possible WHEREs *", "f in instance.changed_fields if f.name in table_columns] if not changed_columns: return # Create", "is necesary. ''' model = dm().object(this_name) # First, split to parts this_table_wr =", "Save new value name = instance.model.name self._q().upsert(name, data) def _write_repr(self, instance): ''' Returns", "(and might be set to # a different value by database default) from", "model.field(key) other_name = field.stores.name other_field_name = field.other.name other_pkey_name = dm().object(other_name).pkey_field().name other_selects.append((other_name, other_field_name, {other_pkey_name:", "have relation parent-child, probably child table has something like 'parent_id', but parent table", "self._q().default_pkey_expr(name, pkey_name) if default_expr is None: raise exceptions.ProgrammingError(\"Unknown default pkey value for {}\".format(name))", "THIS_NAME table but unfortunately REL field could be stored on any table, so", "sets of ids, to be intersected later sets_of_ids = [] # This table", "be written to the database. This means * all values other than None", "value (if this is a fresh instance, it is already in data) pkey_name", "* None values, if they were explicitly set (by INSTANCE.update() -> they are", "raise exceptions.ProgrammingError('Pkey value generator returned twice the same value. \\ Table: {}, val:", "the data model, but it seems a good idea to avoid passing data", "parent-child relationship probably child table has 'parent_id', and parent has no 'children' column,", "in dm().objects().values()}) return self._query # PUBLIC INTERFACE @capture_psycopg_error def save(self, instance): # If", "is built in lazy way, when needed - and that should be after" ]
[ "foo at timestamp 3 and timestamp 2, then the only value is at", "\"\" # Your TimeMap object will be instantiated and called as such: #", "lowercase. All key/value strings have length in the range [1, 100] The timestamps", "set(key, value, timestamp_prev) was called previously, with timestamp_prev <= timestamp. If there are", "Example 1: Input: inputs = [\"TimeMap\",\"set\",\"get\",\"get\",\"set\",\"get\",\"get\"], inputs = [[],[\"foo\",\"bar\",1],[\"foo\",1],[\"foo\",3],[\"foo\",\"bar2\",4],[\"foo\",4],[\"foo\",5]] Output: [null,null,\"bar\",\"bar\",null,\"bar2\",\"bar2\"] Explanation: TimeMap", "are multiple such values, it returns the one with the largest timestamp_prev. If", "length in the range [1, 100] The timestamps for all TimeMap.set operations are", "was called previously, with timestamp_prev <= timestamp. If there are multiple such values,", "int timestamp) Returns a value such that set(key, value, timestamp_prev) was called previously,", "2, then the only value is at timestamp 1 ie \"bar\" kv.set(\"foo\", \"bar2\",", "TimeMap: def __init__(self): \"\"\" Initialize your data structure here. \"\"\" self.dic = {}", "'int') -> 'None': if key in self.dic: self.dic[key].append({'v': value, 't': timestamp}) else: self.dic[key]", ">= kv['t']: return kv['v'] return \"\" else: return \"\" # Your TimeMap object", "value \"bar\" along with timestamp = 1 kv.get(\"foo\", 1); // output \"bar\" kv.get(\"foo\",", "timestamp 1 ie \"bar\" kv.set(\"foo\", \"bar2\", 4); kv.get(\"foo\", 4); // output \"bar2\" kv.get(\"foo\",", "Time Based Key-Value Store https://leetcode.com/problems/time-based-key-value-store/ Create a timebased key-value store class TimeMap, that", "key/value strings have length in the range [1, 100] The timestamps for all", "if key in self.dic: self.dic[key].append({'v': value, 't': timestamp}) else: self.dic[key] = [{'v': value,", "\"bar\" since there is no value corresponding to foo at timestamp 3 and", "to foo at timestamp 3 and timestamp 2, then the only value is", "https://leetcode.com/problems/time-based-key-value-store/ Create a timebased key-value store class TimeMap, that supports two operations. 1.", "multiple such values, it returns the one with the largest timestamp_prev. If there", "3); // output \"bar\" since there is no value corresponding to foo at", "Example 2: Input: inputs = [\"TimeMap\",\"set\",\"set\",\"get\",\"get\",\"get\",\"get\",\"get\"], inputs = [[],[\"love\",\"high\",10],[\"love\",\"low\",20],[\"love\",5],[\"love\",10],[\"love\",15],[\"love\",20],[\"love\",25]] Output: [null,null,null,\"\",\"high\",\"high\",\"low\",\"low\"] Note: All", "string value, int timestamp) Stores the key and value, along with the given", "All key/value strings have length in the range [1, 100] The timestamps for", "in the range [1, 100] The timestamps for all TimeMap.set operations are strictly", "key: 'str', value: 'str', timestamp: 'int') -> 'None': if key in self.dic: self.dic[key].append({'v':", "if key in self.dic: for kv in reversed(self.dic[key]): if timestamp >= kv['t']: return", "All key/value strings are lowercase. All key/value strings have length in the range", "{} def set(self, key: 'str', value: 'str', timestamp: 'int') -> 'None': if key", "given timestamp. 2. get(string key, int timestamp) Returns a value such that set(key,", "5); //output \"bar2\" Example 2: Input: inputs = [\"TimeMap\",\"set\",\"set\",\"get\",\"get\",\"get\",\"get\",\"get\"], inputs = [[],[\"love\",\"high\",10],[\"love\",\"low\",20],[\"love\",5],[\"love\",10],[\"love\",15],[\"love\",20],[\"love\",25]] Output:", "TimeMap object will be instantiated and called as such: # obj = TimeMap()", "the empty string (\"\"). Example 1: Input: inputs = [\"TimeMap\",\"set\",\"get\",\"get\",\"set\",\"get\",\"get\"], inputs = [[],[\"foo\",\"bar\",1],[\"foo\",1],[\"foo\",3],[\"foo\",\"bar2\",4],[\"foo\",4],[\"foo\",5]]", "the largest timestamp_prev. If there are no values, it returns the empty string", "structure here. \"\"\" self.dic = {} def set(self, key: 'str', value: 'str', timestamp:", "are lowercase. All key/value strings have length in the range [1, 100] The", "increasing. 1 <= timestamp <= 10^7 TimeMap.set and TimeMap.get functions will be called", "__init__(self): \"\"\" Initialize your data structure here. \"\"\" self.dic = {} def set(self,", "inputs = [\"TimeMap\",\"set\",\"set\",\"get\",\"get\",\"get\",\"get\",\"get\"], inputs = [[],[\"love\",\"high\",10],[\"love\",\"low\",20],[\"love\",5],[\"love\",10],[\"love\",15],[\"love\",20],[\"love\",25]] Output: [null,null,null,\"\",\"high\",\"high\",\"low\",\"low\"] Note: All key/value strings are", "kv.set(\"foo\", \"bar\", 1); // store the key \"foo\" and value \"bar\" along with", "value such that set(key, value, timestamp_prev) was called previously, with timestamp_prev <= timestamp.", "called previously, with timestamp_prev <= timestamp. If there are multiple such values, it", "in self.dic: self.dic[key].append({'v': value, 't': timestamp}) else: self.dic[key] = [{'v': value, 't': timestamp}]", "the given timestamp. 2. get(string key, int timestamp) Returns a value such that", "timestamp}] def get(self, key: 'str', timestamp: 'int') -> 'str': if key in self.dic:", "instantiated and called as such: # obj = TimeMap() # obj.set(key,value,timestamp) # param_2", "have length in the range [1, 100] The timestamps for all TimeMap.set operations", "that set(key, value, timestamp_prev) was called previously, with timestamp_prev <= timestamp. If there", "Your TimeMap object will be instantiated and called as such: # obj =", "timestamp_prev. If there are no values, it returns the empty string (\"\"). Example", "TimeMap.set operations are strictly increasing. 1 <= timestamp <= 10^7 TimeMap.set and TimeMap.get", "here. \"\"\" self.dic = {} def set(self, key: 'str', value: 'str', timestamp: 'int')", "total of 120000 times (combined) per test case. \"\"\" class TimeMap: def __init__(self):", "timestamps for all TimeMap.set operations are strictly increasing. 1 <= timestamp <= 10^7", "self.dic: for kv in reversed(self.dic[key]): if timestamp >= kv['t']: return kv['v'] return \"\"", "set(self, key: 'str', value: 'str', timestamp: 'int') -> 'None': if key in self.dic:", "ie \"bar\" kv.set(\"foo\", \"bar2\", 4); kv.get(\"foo\", 4); // output \"bar2\" kv.get(\"foo\", 5); //output", "key \"foo\" and value \"bar\" along with timestamp = 1 kv.get(\"foo\", 1); //", "the one with the largest timestamp_prev. If there are no values, it returns", "Initialize your data structure here. \"\"\" self.dic = {} def set(self, key: 'str',", "2: Input: inputs = [\"TimeMap\",\"set\",\"set\",\"get\",\"get\",\"get\",\"get\",\"get\"], inputs = [[],[\"love\",\"high\",10],[\"love\",\"low\",20],[\"love\",5],[\"love\",10],[\"love\",15],[\"love\",20],[\"love\",25]] Output: [null,null,null,\"\",\"high\",\"high\",\"low\",\"low\"] Note: All key/value", "\"\"\" self.dic = {} def set(self, key: 'str', value: 'str', timestamp: 'int') ->", "//output \"bar2\" Example 2: Input: inputs = [\"TimeMap\",\"set\",\"set\",\"get\",\"get\",\"get\",\"get\",\"get\"], inputs = [[],[\"love\",\"high\",10],[\"love\",\"low\",20],[\"love\",5],[\"love\",10],[\"love\",15],[\"love\",20],[\"love\",25]] Output: [null,null,null,\"\",\"high\",\"high\",\"low\",\"low\"]", "output \"bar\" kv.get(\"foo\", 3); // output \"bar\" since there is no value corresponding", "be instantiated and called as such: # obj = TimeMap() # obj.set(key,value,timestamp) #", "a value such that set(key, value, timestamp_prev) was called previously, with timestamp_prev <=", "1. set(string key, string value, int timestamp) Stores the key and value, along", "4); // output \"bar2\" kv.get(\"foo\", 5); //output \"bar2\" Example 2: Input: inputs =", "\"bar2\", 4); kv.get(\"foo\", 4); // output \"bar2\" kv.get(\"foo\", 5); //output \"bar2\" Example 2:", "// output \"bar\" kv.get(\"foo\", 3); // output \"bar\" since there is no value", "output \"bar2\" kv.get(\"foo\", 5); //output \"bar2\" Example 2: Input: inputs = [\"TimeMap\",\"set\",\"set\",\"get\",\"get\",\"get\",\"get\",\"get\"], inputs", "key and value, along with the given timestamp. 2. get(string key, int timestamp)", "with the largest timestamp_prev. If there are no values, it returns the empty", "be called a total of 120000 times (combined) per test case. \"\"\" class", "<= timestamp. If there are multiple such values, it returns the one with", "with the given timestamp. 2. get(string key, int timestamp) Returns a value such", "only value is at timestamp 1 ie \"bar\" kv.set(\"foo\", \"bar2\", 4); kv.get(\"foo\", 4);", "4); kv.get(\"foo\", 4); // output \"bar2\" kv.get(\"foo\", 5); //output \"bar2\" Example 2: Input:", "\"bar2\" kv.get(\"foo\", 5); //output \"bar2\" Example 2: Input: inputs = [\"TimeMap\",\"set\",\"set\",\"get\",\"get\",\"get\",\"get\",\"get\"], inputs =", "Returns a value such that set(key, value, timestamp_prev) was called previously, with timestamp_prev", "Input: inputs = [\"TimeMap\",\"set\",\"get\",\"get\",\"set\",\"get\",\"get\"], inputs = [[],[\"foo\",\"bar\",1],[\"foo\",1],[\"foo\",3],[\"foo\",\"bar2\",4],[\"foo\",4],[\"foo\",5]] Output: [null,null,\"bar\",\"bar\",null,\"bar2\",\"bar2\"] Explanation: TimeMap kv; kv.set(\"foo\",", "operations are strictly increasing. 1 <= timestamp <= 10^7 TimeMap.set and TimeMap.get functions", "reversed(self.dic[key]): if timestamp >= kv['t']: return kv['v'] return \"\" else: return \"\" #", "kv['t']: return kv['v'] return \"\" else: return \"\" # Your TimeMap object will", "key, string value, int timestamp) Stores the key and value, along with the", "for all TimeMap.set operations are strictly increasing. 1 <= timestamp <= 10^7 TimeMap.set", "kv.get(\"foo\", 3); // output \"bar\" since there is no value corresponding to foo", "value, timestamp_prev) was called previously, with timestamp_prev <= timestamp. If there are multiple", "Based Key-Value Store https://leetcode.com/problems/time-based-key-value-store/ Create a timebased key-value store class TimeMap, that supports", "inputs = [[],[\"love\",\"high\",10],[\"love\",\"low\",20],[\"love\",5],[\"love\",10],[\"love\",15],[\"love\",20],[\"love\",25]] Output: [null,null,null,\"\",\"high\",\"high\",\"low\",\"low\"] Note: All key/value strings are lowercase. All key/value", "value, along with the given timestamp. 2. get(string key, int timestamp) Returns a", "\"bar\" kv.get(\"foo\", 3); // output \"bar\" since there is no value corresponding to", "is no value corresponding to foo at timestamp 3 and timestamp 2, then", "inputs = [\"TimeMap\",\"set\",\"get\",\"get\",\"set\",\"get\",\"get\"], inputs = [[],[\"foo\",\"bar\",1],[\"foo\",1],[\"foo\",3],[\"foo\",\"bar2\",4],[\"foo\",4],[\"foo\",5]] Output: [null,null,\"bar\",\"bar\",null,\"bar2\",\"bar2\"] Explanation: TimeMap kv; kv.set(\"foo\", \"bar\",", "of 120000 times (combined) per test case. \"\"\" class TimeMap: def __init__(self): \"\"\"", "kv; kv.set(\"foo\", \"bar\", 1); // store the key \"foo\" and value \"bar\" along", "that supports two operations. 1. set(string key, string value, int timestamp) Stores the", "1); // output \"bar\" kv.get(\"foo\", 3); // output \"bar\" since there is no", "\"foo\" and value \"bar\" along with timestamp = 1 kv.get(\"foo\", 1); // output", "class TimeMap: def __init__(self): \"\"\" Initialize your data structure here. \"\"\" self.dic =", "and value, along with the given timestamp. 2. get(string key, int timestamp) Returns", "such that set(key, value, timestamp_prev) was called previously, with timestamp_prev <= timestamp. If", "all TimeMap.set operations are strictly increasing. 1 <= timestamp <= 10^7 TimeMap.set and", "value corresponding to foo at timestamp 3 and timestamp 2, then the only", "return \"\" # Your TimeMap object will be instantiated and called as such:", "corresponding to foo at timestamp 3 and timestamp 2, then the only value", "\"\"\"981. Time Based Key-Value Store https://leetcode.com/problems/time-based-key-value-store/ Create a timebased key-value store class TimeMap,", "\"bar\" along with timestamp = 1 kv.get(\"foo\", 1); // output \"bar\" kv.get(\"foo\", 3);", "'t': timestamp}) else: self.dic[key] = [{'v': value, 't': timestamp}] def get(self, key: 'str',", "key in self.dic: self.dic[key].append({'v': value, 't': timestamp}) else: self.dic[key] = [{'v': value, 't':", "[1, 100] The timestamps for all TimeMap.set operations are strictly increasing. 1 <=", "per test case. \"\"\" class TimeMap: def __init__(self): \"\"\" Initialize your data structure", "and timestamp 2, then the only value is at timestamp 1 ie \"bar\"", "key: 'str', timestamp: 'int') -> 'str': if key in self.dic: for kv in", "'int') -> 'str': if key in self.dic: for kv in reversed(self.dic[key]): if timestamp", "'str': if key in self.dic: for kv in reversed(self.dic[key]): if timestamp >= kv['t']:", "key/value strings are lowercase. All key/value strings have length in the range [1,", "If there are no values, it returns the empty string (\"\"). Example 1:", "kv.set(\"foo\", \"bar2\", 4); kv.get(\"foo\", 4); // output \"bar2\" kv.get(\"foo\", 5); //output \"bar2\" Example", "return kv['v'] return \"\" else: return \"\" # Your TimeMap object will be", "since there is no value corresponding to foo at timestamp 3 and timestamp", "[null,null,null,\"\",\"high\",\"high\",\"low\",\"low\"] Note: All key/value strings are lowercase. All key/value strings have length in", "strings have length in the range [1, 100] The timestamps for all TimeMap.set", "and TimeMap.get functions will be called a total of 120000 times (combined) per", "timestamp}) else: self.dic[key] = [{'v': value, 't': timestamp}] def get(self, key: 'str', timestamp:", "test case. \"\"\" class TimeMap: def __init__(self): \"\"\" Initialize your data structure here.", "values, it returns the empty string (\"\"). Example 1: Input: inputs = [\"TimeMap\",\"set\",\"get\",\"get\",\"set\",\"get\",\"get\"],", "Stores the key and value, along with the given timestamp. 2. get(string key,", "functions will be called a total of 120000 times (combined) per test case.", "store class TimeMap, that supports two operations. 1. set(string key, string value, int", "timestamp: 'int') -> 'None': if key in self.dic: self.dic[key].append({'v': value, 't': timestamp}) else:", "-> 'None': if key in self.dic: self.dic[key].append({'v': value, 't': timestamp}) else: self.dic[key] =", "largest timestamp_prev. If there are no values, it returns the empty string (\"\").", "3 and timestamp 2, then the only value is at timestamp 1 ie", "will be called a total of 120000 times (combined) per test case. \"\"\"", "else: return \"\" # Your TimeMap object will be instantiated and called as", "timebased key-value store class TimeMap, that supports two operations. 1. set(string key, string", "[\"TimeMap\",\"set\",\"get\",\"get\",\"set\",\"get\",\"get\"], inputs = [[],[\"foo\",\"bar\",1],[\"foo\",1],[\"foo\",3],[\"foo\",\"bar2\",4],[\"foo\",4],[\"foo\",5]] Output: [null,null,\"bar\",\"bar\",null,\"bar2\",\"bar2\"] Explanation: TimeMap kv; kv.set(\"foo\", \"bar\", 1); //", "and value \"bar\" along with timestamp = 1 kv.get(\"foo\", 1); // output \"bar\"", "1 kv.get(\"foo\", 1); // output \"bar\" kv.get(\"foo\", 3); // output \"bar\" since there", "value, 't': timestamp}] def get(self, key: 'str', timestamp: 'int') -> 'str': if key", "no value corresponding to foo at timestamp 3 and timestamp 2, then the", "the key \"foo\" and value \"bar\" along with timestamp = 1 kv.get(\"foo\", 1);", "it returns the empty string (\"\"). Example 1: Input: inputs = [\"TimeMap\",\"set\",\"get\",\"get\",\"set\",\"get\",\"get\"], inputs", "timestamp) Returns a value such that set(key, value, timestamp_prev) was called previously, with", "called a total of 120000 times (combined) per test case. \"\"\" class TimeMap:", "[\"TimeMap\",\"set\",\"set\",\"get\",\"get\",\"get\",\"get\",\"get\"], inputs = [[],[\"love\",\"high\",10],[\"love\",\"low\",20],[\"love\",5],[\"love\",10],[\"love\",15],[\"love\",20],[\"love\",25]] Output: [null,null,null,\"\",\"high\",\"high\",\"low\",\"low\"] Note: All key/value strings are lowercase. All", "'t': timestamp}] def get(self, key: 'str', timestamp: 'int') -> 'str': if key in", "120000 times (combined) per test case. \"\"\" class TimeMap: def __init__(self): \"\"\" Initialize", "kv.get(\"foo\", 5); //output \"bar2\" Example 2: Input: inputs = [\"TimeMap\",\"set\",\"set\",\"get\",\"get\",\"get\",\"get\",\"get\"], inputs = [[],[\"love\",\"high\",10],[\"love\",\"low\",20],[\"love\",5],[\"love\",10],[\"love\",15],[\"love\",20],[\"love\",25]]", "there are multiple such values, it returns the one with the largest timestamp_prev.", "= [[],[\"foo\",\"bar\",1],[\"foo\",1],[\"foo\",3],[\"foo\",\"bar2\",4],[\"foo\",4],[\"foo\",5]] Output: [null,null,\"bar\",\"bar\",null,\"bar2\",\"bar2\"] Explanation: TimeMap kv; kv.set(\"foo\", \"bar\", 1); // store the", "TimeMap.get functions will be called a total of 120000 times (combined) per test", "'str', timestamp: 'int') -> 'str': if key in self.dic: for kv in reversed(self.dic[key]):", "[null,null,\"bar\",\"bar\",null,\"bar2\",\"bar2\"] Explanation: TimeMap kv; kv.set(\"foo\", \"bar\", 1); // store the key \"foo\" and", "return \"\" else: return \"\" # Your TimeMap object will be instantiated and", "one with the largest timestamp_prev. If there are no values, it returns the", "10^7 TimeMap.set and TimeMap.get functions will be called a total of 120000 times", "2. get(string key, int timestamp) Returns a value such that set(key, value, timestamp_prev)", "with timestamp = 1 kv.get(\"foo\", 1); // output \"bar\" kv.get(\"foo\", 3); // output", "TimeMap, that supports two operations. 1. set(string key, string value, int timestamp) Stores", "def set(self, key: 'str', value: 'str', timestamp: 'int') -> 'None': if key in", "strictly increasing. 1 <= timestamp <= 10^7 TimeMap.set and TimeMap.get functions will be", "object will be instantiated and called as such: # obj = TimeMap() #", "key in self.dic: for kv in reversed(self.dic[key]): if timestamp >= kv['t']: return kv['v']", "previously, with timestamp_prev <= timestamp. If there are multiple such values, it returns", "self.dic[key] = [{'v': value, 't': timestamp}] def get(self, key: 'str', timestamp: 'int') ->", "a timebased key-value store class TimeMap, that supports two operations. 1. set(string key,", "timestamp >= kv['t']: return kv['v'] return \"\" else: return \"\" # Your TimeMap", "<= timestamp <= 10^7 TimeMap.set and TimeMap.get functions will be called a total", "it returns the one with the largest timestamp_prev. If there are no values,", "kv['v'] return \"\" else: return \"\" # Your TimeMap object will be instantiated", "timestamp_prev <= timestamp. If there are multiple such values, it returns the one", "timestamp. 2. get(string key, int timestamp) Returns a value such that set(key, value,", "kv.get(\"foo\", 1); // output \"bar\" kv.get(\"foo\", 3); // output \"bar\" since there is", "timestamp 2, then the only value is at timestamp 1 ie \"bar\" kv.set(\"foo\",", "\"bar\" kv.set(\"foo\", \"bar2\", 4); kv.get(\"foo\", 4); // output \"bar2\" kv.get(\"foo\", 5); //output \"bar2\"", "your data structure here. \"\"\" self.dic = {} def set(self, key: 'str', value:", "in reversed(self.dic[key]): if timestamp >= kv['t']: return kv['v'] return \"\" else: return \"\"", "kv.get(\"foo\", 4); // output \"bar2\" kv.get(\"foo\", 5); //output \"bar2\" Example 2: Input: inputs", "are no values, it returns the empty string (\"\"). Example 1: Input: inputs", "operations. 1. set(string key, string value, int timestamp) Stores the key and value,", "timestamp_prev) was called previously, with timestamp_prev <= timestamp. If there are multiple such", "1 ie \"bar\" kv.set(\"foo\", \"bar2\", 4); kv.get(\"foo\", 4); // output \"bar2\" kv.get(\"foo\", 5);", "the only value is at timestamp 1 ie \"bar\" kv.set(\"foo\", \"bar2\", 4); kv.get(\"foo\",", "def get(self, key: 'str', timestamp: 'int') -> 'str': if key in self.dic: for", "supports two operations. 1. set(string key, string value, int timestamp) Stores the key", "data structure here. \"\"\" self.dic = {} def set(self, key: 'str', value: 'str',", "= [\"TimeMap\",\"set\",\"get\",\"get\",\"set\",\"get\",\"get\"], inputs = [[],[\"foo\",\"bar\",1],[\"foo\",1],[\"foo\",3],[\"foo\",\"bar2\",4],[\"foo\",4],[\"foo\",5]] Output: [null,null,\"bar\",\"bar\",null,\"bar2\",\"bar2\"] Explanation: TimeMap kv; kv.set(\"foo\", \"bar\", 1);", "case. \"\"\" class TimeMap: def __init__(self): \"\"\" Initialize your data structure here. \"\"\"", "Store https://leetcode.com/problems/time-based-key-value-store/ Create a timebased key-value store class TimeMap, that supports two operations.", "such values, it returns the one with the largest timestamp_prev. If there are", "along with timestamp = 1 kv.get(\"foo\", 1); // output \"bar\" kv.get(\"foo\", 3); //", "[[],[\"love\",\"high\",10],[\"love\",\"low\",20],[\"love\",5],[\"love\",10],[\"love\",15],[\"love\",20],[\"love\",25]] Output: [null,null,null,\"\",\"high\",\"high\",\"low\",\"low\"] Note: All key/value strings are lowercase. All key/value strings have", "times (combined) per test case. \"\"\" class TimeMap: def __init__(self): \"\"\" Initialize your", "'str', timestamp: 'int') -> 'None': if key in self.dic: self.dic[key].append({'v': value, 't': timestamp})", "there are no values, it returns the empty string (\"\"). Example 1: Input:", "(\"\"). Example 1: Input: inputs = [\"TimeMap\",\"set\",\"get\",\"get\",\"set\",\"get\",\"get\"], inputs = [[],[\"foo\",\"bar\",1],[\"foo\",1],[\"foo\",3],[\"foo\",\"bar2\",4],[\"foo\",4],[\"foo\",5]] Output: [null,null,\"bar\",\"bar\",null,\"bar2\",\"bar2\"] Explanation:", "TimeMap.set and TimeMap.get functions will be called a total of 120000 times (combined)", "timestamp: 'int') -> 'str': if key in self.dic: for kv in reversed(self.dic[key]): if", "= [\"TimeMap\",\"set\",\"set\",\"get\",\"get\",\"get\",\"get\",\"get\"], inputs = [[],[\"love\",\"high\",10],[\"love\",\"low\",20],[\"love\",5],[\"love\",10],[\"love\",15],[\"love\",20],[\"love\",25]] Output: [null,null,null,\"\",\"high\",\"high\",\"low\",\"low\"] Note: All key/value strings are lowercase.", "the key and value, along with the given timestamp. 2. get(string key, int", "there is no value corresponding to foo at timestamp 3 and timestamp 2,", "strings are lowercase. All key/value strings have length in the range [1, 100]", "The timestamps for all TimeMap.set operations are strictly increasing. 1 <= timestamp <=", "Create a timebased key-value store class TimeMap, that supports two operations. 1. set(string", "// output \"bar\" since there is no value corresponding to foo at timestamp", "else: self.dic[key] = [{'v': value, 't': timestamp}] def get(self, key: 'str', timestamp: 'int')", "-> 'str': if key in self.dic: for kv in reversed(self.dic[key]): if timestamp >=", "'str', value: 'str', timestamp: 'int') -> 'None': if key in self.dic: self.dic[key].append({'v': value,", "at timestamp 1 ie \"bar\" kv.set(\"foo\", \"bar2\", 4); kv.get(\"foo\", 4); // output \"bar2\"", "empty string (\"\"). Example 1: Input: inputs = [\"TimeMap\",\"set\",\"get\",\"get\",\"set\",\"get\",\"get\"], inputs = [[],[\"foo\",\"bar\",1],[\"foo\",1],[\"foo\",3],[\"foo\",\"bar2\",4],[\"foo\",4],[\"foo\",5]] Output:", "for kv in reversed(self.dic[key]): if timestamp >= kv['t']: return kv['v'] return \"\" else:", "and called as such: # obj = TimeMap() # obj.set(key,value,timestamp) # param_2 =", "range [1, 100] The timestamps for all TimeMap.set operations are strictly increasing. 1", "called as such: # obj = TimeMap() # obj.set(key,value,timestamp) # param_2 = obj.get(key,timestamp)", "1 <= timestamp <= 10^7 TimeMap.set and TimeMap.get functions will be called a", "timestamp. If there are multiple such values, it returns the one with the", "# Your TimeMap object will be instantiated and called as such: # obj", "Explanation: TimeMap kv; kv.set(\"foo\", \"bar\", 1); // store the key \"foo\" and value", "inputs = [[],[\"foo\",\"bar\",1],[\"foo\",1],[\"foo\",3],[\"foo\",\"bar2\",4],[\"foo\",4],[\"foo\",5]] Output: [null,null,\"bar\",\"bar\",null,\"bar2\",\"bar2\"] Explanation: TimeMap kv; kv.set(\"foo\", \"bar\", 1); // store", "self.dic = {} def set(self, key: 'str', value: 'str', timestamp: 'int') -> 'None':", "two operations. 1. set(string key, string value, int timestamp) Stores the key and", "\"bar\", 1); // store the key \"foo\" and value \"bar\" along with timestamp", "returns the one with the largest timestamp_prev. If there are no values, it", "value is at timestamp 1 ie \"bar\" kv.set(\"foo\", \"bar2\", 4); kv.get(\"foo\", 4); //", "self.dic: self.dic[key].append({'v': value, 't': timestamp}) else: self.dic[key] = [{'v': value, 't': timestamp}] def", "at timestamp 3 and timestamp 2, then the only value is at timestamp", "= 1 kv.get(\"foo\", 1); // output \"bar\" kv.get(\"foo\", 3); // output \"bar\" since", "with timestamp_prev <= timestamp. If there are multiple such values, it returns the", "= [{'v': value, 't': timestamp}] def get(self, key: 'str', timestamp: 'int') -> 'str':", "are strictly increasing. 1 <= timestamp <= 10^7 TimeMap.set and TimeMap.get functions will", "kv in reversed(self.dic[key]): if timestamp >= kv['t']: return kv['v'] return \"\" else: return", "(combined) per test case. \"\"\" class TimeMap: def __init__(self): \"\"\" Initialize your data", "Output: [null,null,\"bar\",\"bar\",null,\"bar2\",\"bar2\"] Explanation: TimeMap kv; kv.set(\"foo\", \"bar\", 1); // store the key \"foo\"", "\"\"\" class TimeMap: def __init__(self): \"\"\" Initialize your data structure here. \"\"\" self.dic", "= [[],[\"love\",\"high\",10],[\"love\",\"low\",20],[\"love\",5],[\"love\",10],[\"love\",15],[\"love\",20],[\"love\",25]] Output: [null,null,null,\"\",\"high\",\"high\",\"low\",\"low\"] Note: All key/value strings are lowercase. All key/value strings", "// output \"bar2\" kv.get(\"foo\", 5); //output \"bar2\" Example 2: Input: inputs = [\"TimeMap\",\"set\",\"set\",\"get\",\"get\",\"get\",\"get\",\"get\"],", "no values, it returns the empty string (\"\"). Example 1: Input: inputs =", "key-value store class TimeMap, that supports two operations. 1. set(string key, string value,", "key, int timestamp) Returns a value such that set(key, value, timestamp_prev) was called", "1); // store the key \"foo\" and value \"bar\" along with timestamp =", "a total of 120000 times (combined) per test case. \"\"\" class TimeMap: def", "value: 'str', timestamp: 'int') -> 'None': if key in self.dic: self.dic[key].append({'v': value, 't':", "'None': if key in self.dic: self.dic[key].append({'v': value, 't': timestamp}) else: self.dic[key] = [{'v':", "get(self, key: 'str', timestamp: 'int') -> 'str': if key in self.dic: for kv", "\"bar2\" Example 2: Input: inputs = [\"TimeMap\",\"set\",\"set\",\"get\",\"get\",\"get\",\"get\",\"get\"], inputs = [[],[\"love\",\"high\",10],[\"love\",\"low\",20],[\"love\",5],[\"love\",10],[\"love\",15],[\"love\",20],[\"love\",25]] Output: [null,null,null,\"\",\"high\",\"high\",\"low\",\"low\"] Note:", "set(string key, string value, int timestamp) Stores the key and value, along with", "Output: [null,null,null,\"\",\"high\",\"high\",\"low\",\"low\"] Note: All key/value strings are lowercase. All key/value strings have length", "If there are multiple such values, it returns the one with the largest", "the range [1, 100] The timestamps for all TimeMap.set operations are strictly increasing.", "if timestamp >= kv['t']: return kv['v'] return \"\" else: return \"\" # Your", "timestamp <= 10^7 TimeMap.set and TimeMap.get functions will be called a total of", "self.dic[key].append({'v': value, 't': timestamp}) else: self.dic[key] = [{'v': value, 't': timestamp}] def get(self,", "1: Input: inputs = [\"TimeMap\",\"set\",\"get\",\"get\",\"set\",\"get\",\"get\"], inputs = [[],[\"foo\",\"bar\",1],[\"foo\",1],[\"foo\",3],[\"foo\",\"bar2\",4],[\"foo\",4],[\"foo\",5]] Output: [null,null,\"bar\",\"bar\",null,\"bar2\",\"bar2\"] Explanation: TimeMap kv;", "is at timestamp 1 ie \"bar\" kv.set(\"foo\", \"bar2\", 4); kv.get(\"foo\", 4); // output", "returns the empty string (\"\"). Example 1: Input: inputs = [\"TimeMap\",\"set\",\"get\",\"get\",\"set\",\"get\",\"get\"], inputs =", "// store the key \"foo\" and value \"bar\" along with timestamp = 1", "Key-Value Store https://leetcode.com/problems/time-based-key-value-store/ Create a timebased key-value store class TimeMap, that supports two", "Input: inputs = [\"TimeMap\",\"set\",\"set\",\"get\",\"get\",\"get\",\"get\",\"get\"], inputs = [[],[\"love\",\"high\",10],[\"love\",\"low\",20],[\"love\",5],[\"love\",10],[\"love\",15],[\"love\",20],[\"love\",25]] Output: [null,null,null,\"\",\"high\",\"high\",\"low\",\"low\"] Note: All key/value strings", "timestamp 3 and timestamp 2, then the only value is at timestamp 1", "Note: All key/value strings are lowercase. All key/value strings have length in the", "then the only value is at timestamp 1 ie \"bar\" kv.set(\"foo\", \"bar2\", 4);", "values, it returns the one with the largest timestamp_prev. If there are no", "string (\"\"). Example 1: Input: inputs = [\"TimeMap\",\"set\",\"get\",\"get\",\"set\",\"get\",\"get\"], inputs = [[],[\"foo\",\"bar\",1],[\"foo\",1],[\"foo\",3],[\"foo\",\"bar2\",4],[\"foo\",4],[\"foo\",5]] Output: [null,null,\"bar\",\"bar\",null,\"bar2\",\"bar2\"]", "get(string key, int timestamp) Returns a value such that set(key, value, timestamp_prev) was", "TimeMap kv; kv.set(\"foo\", \"bar\", 1); // store the key \"foo\" and value \"bar\"", "= {} def set(self, key: 'str', value: 'str', timestamp: 'int') -> 'None': if", "100] The timestamps for all TimeMap.set operations are strictly increasing. 1 <= timestamp", "timestamp = 1 kv.get(\"foo\", 1); // output \"bar\" kv.get(\"foo\", 3); // output \"bar\"", "[{'v': value, 't': timestamp}] def get(self, key: 'str', timestamp: 'int') -> 'str': if", "value, 't': timestamp}) else: self.dic[key] = [{'v': value, 't': timestamp}] def get(self, key:", "def __init__(self): \"\"\" Initialize your data structure here. \"\"\" self.dic = {} def", "\"\"\" Initialize your data structure here. \"\"\" self.dic = {} def set(self, key:", "will be instantiated and called as such: # obj = TimeMap() # obj.set(key,value,timestamp)", "store the key \"foo\" and value \"bar\" along with timestamp = 1 kv.get(\"foo\",", "<= 10^7 TimeMap.set and TimeMap.get functions will be called a total of 120000", "along with the given timestamp. 2. get(string key, int timestamp) Returns a value", "class TimeMap, that supports two operations. 1. set(string key, string value, int timestamp)", "value, int timestamp) Stores the key and value, along with the given timestamp.", "timestamp) Stores the key and value, along with the given timestamp. 2. get(string", "[[],[\"foo\",\"bar\",1],[\"foo\",1],[\"foo\",3],[\"foo\",\"bar2\",4],[\"foo\",4],[\"foo\",5]] Output: [null,null,\"bar\",\"bar\",null,\"bar2\",\"bar2\"] Explanation: TimeMap kv; kv.set(\"foo\", \"bar\", 1); // store the key", "int timestamp) Stores the key and value, along with the given timestamp. 2.", "\"\" else: return \"\" # Your TimeMap object will be instantiated and called", "output \"bar\" since there is no value corresponding to foo at timestamp 3", "in self.dic: for kv in reversed(self.dic[key]): if timestamp >= kv['t']: return kv['v'] return" ]
[ "open(html_file) as fp: html_template = Template(fp.read()) if not config.dry_run: boxplot_spec = json.dumps(_get_boxplot_spec(), indent=2)", "\"boxplot\", \"extent\": \"min-max\", \"size\": 5}, \"width\": 1400, \"height\": 500, \"encoding\": { \"y\": {\"field\":", "def write_html(): html_file = os.path.join(os.path.dirname(__file__), \"templates\", \"index.html\") with open(html_file) as fp: html_template =", "config.dry_run: boxplot_spec = json.dumps(_get_boxplot_spec(), indent=2) with open(config.html_output_file, \"w\") as fp: fp.write(html_template.render(boxplot_spec=boxplot_spec)) def _get_boxplot_spec():", "fp.write(html_template.render(boxplot_spec=boxplot_spec)) def _get_boxplot_spec(): with open(config.combined_benchmark_file) as fp: values = json.load(fp) return { \"$schema\":", "import config def write_html(): html_file = os.path.join(os.path.dirname(__file__), \"templates\", \"index.html\") with open(html_file) as fp:", "\"width\": 1400, \"height\": 500, \"encoding\": { \"y\": {\"field\": \"time\", \"type\": \"quantitative\", \"axis\": {\"title\":", "\"height\": 500, \"encoding\": { \"y\": {\"field\": \"time\", \"type\": \"quantitative\", \"axis\": {\"title\": \"Time\"}}, \"x\":", "fp: html_template = Template(fp.read()) if not config.dry_run: boxplot_spec = json.dumps(_get_boxplot_spec(), indent=2) with open(config.html_output_file,", "with open(config.html_output_file, \"w\") as fp: fp.write(html_template.render(boxplot_spec=boxplot_spec)) def _get_boxplot_spec(): with open(config.combined_benchmark_file) as fp: values", "\"extent\": \"min-max\", \"size\": 5}, \"width\": 1400, \"height\": 500, \"encoding\": { \"y\": {\"field\": \"time\",", "def _get_boxplot_spec(): with open(config.combined_benchmark_file) as fp: values = json.load(fp) return { \"$schema\": \"https://vega.github.io/schema/vega-lite/v3.json\",", "with open(html_file) as fp: html_template = Template(fp.read()) if not config.dry_run: boxplot_spec = json.dumps(_get_boxplot_spec(),", "{ \"$schema\": \"https://vega.github.io/schema/vega-lite/v3.json\", \"data\": {\"values\": values}, \"mark\": {\"type\": \"boxplot\", \"extent\": \"min-max\", \"size\": 5},", "return { \"$schema\": \"https://vega.github.io/schema/vega-lite/v3.json\", \"data\": {\"values\": values}, \"mark\": {\"type\": \"boxplot\", \"extent\": \"min-max\", \"size\":", "{\"values\": values}, \"mark\": {\"type\": \"boxplot\", \"extent\": \"min-max\", \"size\": 5}, \"width\": 1400, \"height\": 500,", "{ \"field\": \"commit\", \"type\": \"ordinal\", \"axis\": {\"title\": \"Commit\", \"labels\": False, \"ticks\": False}, },", "Template from chronologer.config import config def write_html(): html_file = os.path.join(os.path.dirname(__file__), \"templates\", \"index.html\") with", "{\"type\": \"boxplot\", \"extent\": \"min-max\", \"size\": 5}, \"width\": 1400, \"height\": 500, \"encoding\": { \"y\":", "as fp: values = json.load(fp) return { \"$schema\": \"https://vega.github.io/schema/vega-lite/v3.json\", \"data\": {\"values\": values}, \"mark\":", "with open(config.combined_benchmark_file) as fp: values = json.load(fp) return { \"$schema\": \"https://vega.github.io/schema/vega-lite/v3.json\", \"data\": {\"values\":", "import json import os from jinja2 import Template from chronologer.config import config def", "jinja2 import Template from chronologer.config import config def write_html(): html_file = os.path.join(os.path.dirname(__file__), \"templates\",", "os.path.join(os.path.dirname(__file__), \"templates\", \"index.html\") with open(html_file) as fp: html_template = Template(fp.read()) if not config.dry_run:", "if not config.dry_run: boxplot_spec = json.dumps(_get_boxplot_spec(), indent=2) with open(config.html_output_file, \"w\") as fp: fp.write(html_template.render(boxplot_spec=boxplot_spec))", "json import os from jinja2 import Template from chronologer.config import config def write_html():", "boxplot_spec = json.dumps(_get_boxplot_spec(), indent=2) with open(config.html_output_file, \"w\") as fp: fp.write(html_template.render(boxplot_spec=boxplot_spec)) def _get_boxplot_spec(): with", "\"Time\"}}, \"x\": { \"field\": \"commit\", \"type\": \"ordinal\", \"axis\": {\"title\": \"Commit\", \"labels\": False, \"ticks\":", "values = json.load(fp) return { \"$schema\": \"https://vega.github.io/schema/vega-lite/v3.json\", \"data\": {\"values\": values}, \"mark\": {\"type\": \"boxplot\",", "write_html(): html_file = os.path.join(os.path.dirname(__file__), \"templates\", \"index.html\") with open(html_file) as fp: html_template = Template(fp.read())", "as fp: html_template = Template(fp.read()) if not config.dry_run: boxplot_spec = json.dumps(_get_boxplot_spec(), indent=2) with", "from chronologer.config import config def write_html(): html_file = os.path.join(os.path.dirname(__file__), \"templates\", \"index.html\") with open(html_file)", "{\"title\": \"Commit\", \"labels\": False, \"ticks\": False}, }, \"tooltip\": {\"field\": \"message\", \"type\": \"ordinal\", \"aggregate\":", "= os.path.join(os.path.dirname(__file__), \"templates\", \"index.html\") with open(html_file) as fp: html_template = Template(fp.read()) if not", "\"https://vega.github.io/schema/vega-lite/v3.json\", \"data\": {\"values\": values}, \"mark\": {\"type\": \"boxplot\", \"extent\": \"min-max\", \"size\": 5}, \"width\": 1400,", "import Template from chronologer.config import config def write_html(): html_file = os.path.join(os.path.dirname(__file__), \"templates\", \"index.html\")", "= Template(fp.read()) if not config.dry_run: boxplot_spec = json.dumps(_get_boxplot_spec(), indent=2) with open(config.html_output_file, \"w\") as", "_get_boxplot_spec(): with open(config.combined_benchmark_file) as fp: values = json.load(fp) return { \"$schema\": \"https://vega.github.io/schema/vega-lite/v3.json\", \"data\":", "os from jinja2 import Template from chronologer.config import config def write_html(): html_file =", "\"quantitative\", \"axis\": {\"title\": \"Time\"}}, \"x\": { \"field\": \"commit\", \"type\": \"ordinal\", \"axis\": {\"title\": \"Commit\",", "indent=2) with open(config.html_output_file, \"w\") as fp: fp.write(html_template.render(boxplot_spec=boxplot_spec)) def _get_boxplot_spec(): with open(config.combined_benchmark_file) as fp:", "\"axis\": {\"title\": \"Commit\", \"labels\": False, \"ticks\": False}, }, \"tooltip\": {\"field\": \"message\", \"type\": \"ordinal\",", "\"type\": \"quantitative\", \"axis\": {\"title\": \"Time\"}}, \"x\": { \"field\": \"commit\", \"type\": \"ordinal\", \"axis\": {\"title\":", "\"Commit\", \"labels\": False, \"ticks\": False}, }, \"tooltip\": {\"field\": \"message\", \"type\": \"ordinal\", \"aggregate\": \"min\"},", "as fp: fp.write(html_template.render(boxplot_spec=boxplot_spec)) def _get_boxplot_spec(): with open(config.combined_benchmark_file) as fp: values = json.load(fp) return", "\"ordinal\", \"axis\": {\"title\": \"Commit\", \"labels\": False, \"ticks\": False}, }, \"tooltip\": {\"field\": \"message\", \"type\":", "\"field\": \"commit\", \"type\": \"ordinal\", \"axis\": {\"title\": \"Commit\", \"labels\": False, \"ticks\": False}, }, \"tooltip\":", "html_template = Template(fp.read()) if not config.dry_run: boxplot_spec = json.dumps(_get_boxplot_spec(), indent=2) with open(config.html_output_file, \"w\")", "1400, \"height\": 500, \"encoding\": { \"y\": {\"field\": \"time\", \"type\": \"quantitative\", \"axis\": {\"title\": \"Time\"}},", "json.dumps(_get_boxplot_spec(), indent=2) with open(config.html_output_file, \"w\") as fp: fp.write(html_template.render(boxplot_spec=boxplot_spec)) def _get_boxplot_spec(): with open(config.combined_benchmark_file) as", "\"$schema\": \"https://vega.github.io/schema/vega-lite/v3.json\", \"data\": {\"values\": values}, \"mark\": {\"type\": \"boxplot\", \"extent\": \"min-max\", \"size\": 5}, \"width\":", "False, \"ticks\": False}, }, \"tooltip\": {\"field\": \"message\", \"type\": \"ordinal\", \"aggregate\": \"min\"}, }, }", "\"min-max\", \"size\": 5}, \"width\": 1400, \"height\": 500, \"encoding\": { \"y\": {\"field\": \"time\", \"type\":", "\"encoding\": { \"y\": {\"field\": \"time\", \"type\": \"quantitative\", \"axis\": {\"title\": \"Time\"}}, \"x\": { \"field\":", "\"axis\": {\"title\": \"Time\"}}, \"x\": { \"field\": \"commit\", \"type\": \"ordinal\", \"axis\": {\"title\": \"Commit\", \"labels\":", "Template(fp.read()) if not config.dry_run: boxplot_spec = json.dumps(_get_boxplot_spec(), indent=2) with open(config.html_output_file, \"w\") as fp:", "config def write_html(): html_file = os.path.join(os.path.dirname(__file__), \"templates\", \"index.html\") with open(html_file) as fp: html_template", "500, \"encoding\": { \"y\": {\"field\": \"time\", \"type\": \"quantitative\", \"axis\": {\"title\": \"Time\"}}, \"x\": {", "chronologer.config import config def write_html(): html_file = os.path.join(os.path.dirname(__file__), \"templates\", \"index.html\") with open(html_file) as", "html_file = os.path.join(os.path.dirname(__file__), \"templates\", \"index.html\") with open(html_file) as fp: html_template = Template(fp.read()) if", "{ \"y\": {\"field\": \"time\", \"type\": \"quantitative\", \"axis\": {\"title\": \"Time\"}}, \"x\": { \"field\": \"commit\",", "\"size\": 5}, \"width\": 1400, \"height\": 500, \"encoding\": { \"y\": {\"field\": \"time\", \"type\": \"quantitative\",", "= json.dumps(_get_boxplot_spec(), indent=2) with open(config.html_output_file, \"w\") as fp: fp.write(html_template.render(boxplot_spec=boxplot_spec)) def _get_boxplot_spec(): with open(config.combined_benchmark_file)", "\"time\", \"type\": \"quantitative\", \"axis\": {\"title\": \"Time\"}}, \"x\": { \"field\": \"commit\", \"type\": \"ordinal\", \"axis\":", "\"templates\", \"index.html\") with open(html_file) as fp: html_template = Template(fp.read()) if not config.dry_run: boxplot_spec", "\"index.html\") with open(html_file) as fp: html_template = Template(fp.read()) if not config.dry_run: boxplot_spec =", "\"type\": \"ordinal\", \"axis\": {\"title\": \"Commit\", \"labels\": False, \"ticks\": False}, }, \"tooltip\": {\"field\": \"message\",", "open(config.combined_benchmark_file) as fp: values = json.load(fp) return { \"$schema\": \"https://vega.github.io/schema/vega-lite/v3.json\", \"data\": {\"values\": values},", "not config.dry_run: boxplot_spec = json.dumps(_get_boxplot_spec(), indent=2) with open(config.html_output_file, \"w\") as fp: fp.write(html_template.render(boxplot_spec=boxplot_spec)) def", "\"mark\": {\"type\": \"boxplot\", \"extent\": \"min-max\", \"size\": 5}, \"width\": 1400, \"height\": 500, \"encoding\": {", "import os from jinja2 import Template from chronologer.config import config def write_html(): html_file", "5}, \"width\": 1400, \"height\": 500, \"encoding\": { \"y\": {\"field\": \"time\", \"type\": \"quantitative\", \"axis\":", "open(config.html_output_file, \"w\") as fp: fp.write(html_template.render(boxplot_spec=boxplot_spec)) def _get_boxplot_spec(): with open(config.combined_benchmark_file) as fp: values =", "from jinja2 import Template from chronologer.config import config def write_html(): html_file = os.path.join(os.path.dirname(__file__),", "\"labels\": False, \"ticks\": False}, }, \"tooltip\": {\"field\": \"message\", \"type\": \"ordinal\", \"aggregate\": \"min\"}, },", "= json.load(fp) return { \"$schema\": \"https://vega.github.io/schema/vega-lite/v3.json\", \"data\": {\"values\": values}, \"mark\": {\"type\": \"boxplot\", \"extent\":", "{\"title\": \"Time\"}}, \"x\": { \"field\": \"commit\", \"type\": \"ordinal\", \"axis\": {\"title\": \"Commit\", \"labels\": False,", "json.load(fp) return { \"$schema\": \"https://vega.github.io/schema/vega-lite/v3.json\", \"data\": {\"values\": values}, \"mark\": {\"type\": \"boxplot\", \"extent\": \"min-max\",", "fp: fp.write(html_template.render(boxplot_spec=boxplot_spec)) def _get_boxplot_spec(): with open(config.combined_benchmark_file) as fp: values = json.load(fp) return {", "{\"field\": \"time\", \"type\": \"quantitative\", \"axis\": {\"title\": \"Time\"}}, \"x\": { \"field\": \"commit\", \"type\": \"ordinal\",", "\"commit\", \"type\": \"ordinal\", \"axis\": {\"title\": \"Commit\", \"labels\": False, \"ticks\": False}, }, \"tooltip\": {\"field\":", "\"w\") as fp: fp.write(html_template.render(boxplot_spec=boxplot_spec)) def _get_boxplot_spec(): with open(config.combined_benchmark_file) as fp: values = json.load(fp)", "fp: values = json.load(fp) return { \"$schema\": \"https://vega.github.io/schema/vega-lite/v3.json\", \"data\": {\"values\": values}, \"mark\": {\"type\":", "values}, \"mark\": {\"type\": \"boxplot\", \"extent\": \"min-max\", \"size\": 5}, \"width\": 1400, \"height\": 500, \"encoding\":", "\"x\": { \"field\": \"commit\", \"type\": \"ordinal\", \"axis\": {\"title\": \"Commit\", \"labels\": False, \"ticks\": False},", "\"data\": {\"values\": values}, \"mark\": {\"type\": \"boxplot\", \"extent\": \"min-max\", \"size\": 5}, \"width\": 1400, \"height\":", "\"y\": {\"field\": \"time\", \"type\": \"quantitative\", \"axis\": {\"title\": \"Time\"}}, \"x\": { \"field\": \"commit\", \"type\":" ]
[ "AX_AAF_COMAPI = os.path.join(dirname,'libcom-api' + ext) os.environ['AX_AAF_COMAPI'] = os.environ.get('AX_AAF_COMAPI', AX_AAF_COMAPI) return core.AxInit() _AxInit =", "from core import * import core def Initialize(): \"\"\" find libcom-api and initialize", "\"\"\" import os import sys dirname = os.path.dirname(__file__) ext = '.so' if sys.platform", "'.dylib' elif sys.platform.startswith(\"win\"): ext = '.dll' AX_AAF_COMAPI = os.path.join(dirname,'libcom-api' + ext) os.environ['AX_AAF_COMAPI'] =", "core import * import core def Initialize(): \"\"\" find libcom-api and initialize \"\"\"", "* import core def Initialize(): \"\"\" find libcom-api and initialize \"\"\" import os", "= '.dll' AX_AAF_COMAPI = os.path.join(dirname,'libcom-api' + ext) os.environ['AX_AAF_COMAPI'] = os.environ.get('AX_AAF_COMAPI', AX_AAF_COMAPI) return core.AxInit()", "core.AxInit() _AxInit = Initialize() from util import __AxWrap __AxWrap(globals()) from open import open", "ext) os.environ['AX_AAF_COMAPI'] = os.environ.get('AX_AAF_COMAPI', AX_AAF_COMAPI) return core.AxInit() _AxInit = Initialize() from util import", "from util import __AxWrap __AxWrap(globals()) from open import open from util import Ax", "import * import core def Initialize(): \"\"\" find libcom-api and initialize \"\"\" import", "= '.so' if sys.platform == 'darwin': ext = '.dylib' elif sys.platform.startswith(\"win\"): ext =", "= os.path.dirname(__file__) ext = '.so' if sys.platform == 'darwin': ext = '.dylib' elif", "os.path.join(dirname,'libcom-api' + ext) os.environ['AX_AAF_COMAPI'] = os.environ.get('AX_AAF_COMAPI', AX_AAF_COMAPI) return core.AxInit() _AxInit = Initialize() from", "== 'darwin': ext = '.dylib' elif sys.platform.startswith(\"win\"): ext = '.dll' AX_AAF_COMAPI = os.path.join(dirname,'libcom-api'", "'darwin': ext = '.dylib' elif sys.platform.startswith(\"win\"): ext = '.dll' AX_AAF_COMAPI = os.path.join(dirname,'libcom-api' +", "ext = '.dll' AX_AAF_COMAPI = os.path.join(dirname,'libcom-api' + ext) os.environ['AX_AAF_COMAPI'] = os.environ.get('AX_AAF_COMAPI', AX_AAF_COMAPI) return", "libcom-api and initialize \"\"\" import os import sys dirname = os.path.dirname(__file__) ext =", "+ ext) os.environ['AX_AAF_COMAPI'] = os.environ.get('AX_AAF_COMAPI', AX_AAF_COMAPI) return core.AxInit() _AxInit = Initialize() from util", "return core.AxInit() _AxInit = Initialize() from util import __AxWrap __AxWrap(globals()) from open import", "\"\"\" find libcom-api and initialize \"\"\" import os import sys dirname = os.path.dirname(__file__)", "find libcom-api and initialize \"\"\" import os import sys dirname = os.path.dirname(__file__) ext", "initialize \"\"\" import os import sys dirname = os.path.dirname(__file__) ext = '.so' if", "dirname = os.path.dirname(__file__) ext = '.so' if sys.platform == 'darwin': ext = '.dylib'", "if sys.platform == 'darwin': ext = '.dylib' elif sys.platform.startswith(\"win\"): ext = '.dll' AX_AAF_COMAPI", "= '.dylib' elif sys.platform.startswith(\"win\"): ext = '.dll' AX_AAF_COMAPI = os.path.join(dirname,'libcom-api' + ext) os.environ['AX_AAF_COMAPI']", "import core def Initialize(): \"\"\" find libcom-api and initialize \"\"\" import os import", "import sys dirname = os.path.dirname(__file__) ext = '.so' if sys.platform == 'darwin': ext", "'.so' if sys.platform == 'darwin': ext = '.dylib' elif sys.platform.startswith(\"win\"): ext = '.dll'", "ext = '.so' if sys.platform == 'darwin': ext = '.dylib' elif sys.platform.startswith(\"win\"): ext", "= Initialize() from util import __AxWrap __AxWrap(globals()) from open import open from util", "sys dirname = os.path.dirname(__file__) ext = '.so' if sys.platform == 'darwin': ext =", "core def Initialize(): \"\"\" find libcom-api and initialize \"\"\" import os import sys", "os import sys dirname = os.path.dirname(__file__) ext = '.so' if sys.platform == 'darwin':", "Initialize() from util import __AxWrap __AxWrap(globals()) from open import open from util import", "_AxInit = Initialize() from util import __AxWrap __AxWrap(globals()) from open import open from", "'.dll' AX_AAF_COMAPI = os.path.join(dirname,'libcom-api' + ext) os.environ['AX_AAF_COMAPI'] = os.environ.get('AX_AAF_COMAPI', AX_AAF_COMAPI) return core.AxInit() _AxInit", "os.path.dirname(__file__) ext = '.so' if sys.platform == 'darwin': ext = '.dylib' elif sys.platform.startswith(\"win\"):", "and initialize \"\"\" import os import sys dirname = os.path.dirname(__file__) ext = '.so'", "os.environ.get('AX_AAF_COMAPI', AX_AAF_COMAPI) return core.AxInit() _AxInit = Initialize() from util import __AxWrap __AxWrap(globals()) from", "elif sys.platform.startswith(\"win\"): ext = '.dll' AX_AAF_COMAPI = os.path.join(dirname,'libcom-api' + ext) os.environ['AX_AAF_COMAPI'] = os.environ.get('AX_AAF_COMAPI',", "import os import sys dirname = os.path.dirname(__file__) ext = '.so' if sys.platform ==", "ext = '.dylib' elif sys.platform.startswith(\"win\"): ext = '.dll' AX_AAF_COMAPI = os.path.join(dirname,'libcom-api' + ext)", "sys.platform == 'darwin': ext = '.dylib' elif sys.platform.startswith(\"win\"): ext = '.dll' AX_AAF_COMAPI =", "AX_AAF_COMAPI) return core.AxInit() _AxInit = Initialize() from util import __AxWrap __AxWrap(globals()) from open", "def Initialize(): \"\"\" find libcom-api and initialize \"\"\" import os import sys dirname", "Initialize(): \"\"\" find libcom-api and initialize \"\"\" import os import sys dirname =", "= os.environ.get('AX_AAF_COMAPI', AX_AAF_COMAPI) return core.AxInit() _AxInit = Initialize() from util import __AxWrap __AxWrap(globals())", "= os.path.join(dirname,'libcom-api' + ext) os.environ['AX_AAF_COMAPI'] = os.environ.get('AX_AAF_COMAPI', AX_AAF_COMAPI) return core.AxInit() _AxInit = Initialize()", "os.environ['AX_AAF_COMAPI'] = os.environ.get('AX_AAF_COMAPI', AX_AAF_COMAPI) return core.AxInit() _AxInit = Initialize() from util import __AxWrap", "sys.platform.startswith(\"win\"): ext = '.dll' AX_AAF_COMAPI = os.path.join(dirname,'libcom-api' + ext) os.environ['AX_AAF_COMAPI'] = os.environ.get('AX_AAF_COMAPI', AX_AAF_COMAPI)" ]
[ "in keys: config = config[key] return config if __name__ == '__main__': # wait_time", "yaml import os fileNamePath = os.path.split(os.path.realpath(__file__))[0] dir = os.path.join(fileNamePath,'../conf') def get(file_name,*keys,file_path=dir): yamlPath =", "= config[key] return config if __name__ == '__main__': # wait_time = yaml_utils.get(\"constant.yaml\", \"wait_elements_time\")", "fileNamePath = os.path.split(os.path.realpath(__file__))[0] dir = os.path.join(fileNamePath,'../conf') def get(file_name,*keys,file_path=dir): yamlPath = os.path.join(file_path, file_name) file", "config[key] return config if __name__ == '__main__': # wait_time = yaml_utils.get(\"constant.yaml\", \"wait_elements_time\") #", "= yaml_utils.get(\"constant.yaml\", \"wait_elements_time\") # driver = get(\"host\",\"url_regerister\") # driver2 = get_url(\"constant.yaml\",\"host\") driver2 =", "import yaml import os fileNamePath = os.path.split(os.path.realpath(__file__))[0] dir = os.path.join(fileNamePath,'../conf') def get(file_name,*keys,file_path=dir): yamlPath", "keys: config = config[key] return config if __name__ == '__main__': # wait_time =", "yaml.load(file) for key in keys: config = config[key] return config if __name__ ==", "os.path.join(fileNamePath,'../conf') def get(file_name,*keys,file_path=dir): yamlPath = os.path.join(file_path, file_name) file = open(yamlPath, 'r', encoding='utf-8') config", "for key in keys: config = config[key] return config if __name__ == '__main__':", "config if __name__ == '__main__': # wait_time = yaml_utils.get(\"constant.yaml\", \"wait_elements_time\") # driver =", "wait_time = yaml_utils.get(\"constant.yaml\", \"wait_elements_time\") # driver = get(\"host\",\"url_regerister\") # driver2 = get_url(\"constant.yaml\",\"host\") driver2", "open(yamlPath, 'r', encoding='utf-8') config = yaml.load(file) for key in keys: config = config[key]", "file_name) file = open(yamlPath, 'r', encoding='utf-8') config = yaml.load(file) for key in keys:", "config = yaml.load(file) for key in keys: config = config[key] return config if", "config = config[key] return config if __name__ == '__main__': # wait_time = yaml_utils.get(\"constant.yaml\",", "\"wait_elements_time\") # driver = get(\"host\",\"url_regerister\") # driver2 = get_url(\"constant.yaml\",\"host\") driver2 = get(\"constant.yaml\",\"test1\",\"test2\",\"test33\") print(driver2)", "# driver = get(\"host\",\"url_regerister\") # driver2 = get_url(\"constant.yaml\",\"host\") driver2 = get(\"constant.yaml\",\"test1\",\"test2\",\"test33\") print(driver2) #", "= os.path.join(file_path, file_name) file = open(yamlPath, 'r', encoding='utf-8') config = yaml.load(file) for key", "if __name__ == '__main__': # wait_time = yaml_utils.get(\"constant.yaml\", \"wait_elements_time\") # driver = get(\"host\",\"url_regerister\")", "= os.path.join(fileNamePath,'../conf') def get(file_name,*keys,file_path=dir): yamlPath = os.path.join(file_path, file_name) file = open(yamlPath, 'r', encoding='utf-8')", "# wait_time = yaml_utils.get(\"constant.yaml\", \"wait_elements_time\") # driver = get(\"host\",\"url_regerister\") # driver2 = get_url(\"constant.yaml\",\"host\")", "yamlPath = os.path.join(file_path, file_name) file = open(yamlPath, 'r', encoding='utf-8') config = yaml.load(file) for", "= get(\"host\",\"url_regerister\") # driver2 = get_url(\"constant.yaml\",\"host\") driver2 = get(\"constant.yaml\",\"test1\",\"test2\",\"test33\") print(driver2) # a =", "dir = os.path.join(fileNamePath,'../conf') def get(file_name,*keys,file_path=dir): yamlPath = os.path.join(file_path, file_name) file = open(yamlPath, 'r',", "get(\"host\",\"url_regerister\") # driver2 = get_url(\"constant.yaml\",\"host\") driver2 = get(\"constant.yaml\",\"test1\",\"test2\",\"test33\") print(driver2) # a = (1,2)", "__name__ == '__main__': # wait_time = yaml_utils.get(\"constant.yaml\", \"wait_elements_time\") # driver = get(\"host\",\"url_regerister\") #", "os.path.split(os.path.realpath(__file__))[0] dir = os.path.join(fileNamePath,'../conf') def get(file_name,*keys,file_path=dir): yamlPath = os.path.join(file_path, file_name) file = open(yamlPath,", "yaml_utils.get(\"constant.yaml\", \"wait_elements_time\") # driver = get(\"host\",\"url_regerister\") # driver2 = get_url(\"constant.yaml\",\"host\") driver2 = get(\"constant.yaml\",\"test1\",\"test2\",\"test33\")", "= os.path.split(os.path.realpath(__file__))[0] dir = os.path.join(fileNamePath,'../conf') def get(file_name,*keys,file_path=dir): yamlPath = os.path.join(file_path, file_name) file =", "return config if __name__ == '__main__': # wait_time = yaml_utils.get(\"constant.yaml\", \"wait_elements_time\") # driver", "driver = get(\"host\",\"url_regerister\") # driver2 = get_url(\"constant.yaml\",\"host\") driver2 = get(\"constant.yaml\",\"test1\",\"test2\",\"test33\") print(driver2) # a", "driver2 = get_url(\"constant.yaml\",\"host\") driver2 = get(\"constant.yaml\",\"test1\",\"test2\",\"test33\") print(driver2) # a = (1,2) # print(type(a))", "= open(yamlPath, 'r', encoding='utf-8') config = yaml.load(file) for key in keys: config =", "== '__main__': # wait_time = yaml_utils.get(\"constant.yaml\", \"wait_elements_time\") # driver = get(\"host\",\"url_regerister\") # driver2", "key in keys: config = config[key] return config if __name__ == '__main__': #", "= yaml.load(file) for key in keys: config = config[key] return config if __name__", "def get(file_name,*keys,file_path=dir): yamlPath = os.path.join(file_path, file_name) file = open(yamlPath, 'r', encoding='utf-8') config =", "'r', encoding='utf-8') config = yaml.load(file) for key in keys: config = config[key] return", "os.path.join(file_path, file_name) file = open(yamlPath, 'r', encoding='utf-8') config = yaml.load(file) for key in", "'__main__': # wait_time = yaml_utils.get(\"constant.yaml\", \"wait_elements_time\") # driver = get(\"host\",\"url_regerister\") # driver2 =", "encoding='utf-8') config = yaml.load(file) for key in keys: config = config[key] return config", "# driver2 = get_url(\"constant.yaml\",\"host\") driver2 = get(\"constant.yaml\",\"test1\",\"test2\",\"test33\") print(driver2) # a = (1,2) #", "os fileNamePath = os.path.split(os.path.realpath(__file__))[0] dir = os.path.join(fileNamePath,'../conf') def get(file_name,*keys,file_path=dir): yamlPath = os.path.join(file_path, file_name)", "get(file_name,*keys,file_path=dir): yamlPath = os.path.join(file_path, file_name) file = open(yamlPath, 'r', encoding='utf-8') config = yaml.load(file)", "import os fileNamePath = os.path.split(os.path.realpath(__file__))[0] dir = os.path.join(fileNamePath,'../conf') def get(file_name,*keys,file_path=dir): yamlPath = os.path.join(file_path,", "file = open(yamlPath, 'r', encoding='utf-8') config = yaml.load(file) for key in keys: config" ]
[ "characters\" id = \"CKV_OCI_18\" supported_resources = ['oci_identity_authentication_policy'] categories = [CheckCategories.IAM] super().__init__(name=name, id=id, categories=categories,", "categories = [CheckCategories.IAM] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) def scan_resource_conf(self, conf): if 'password_policy' in", "conf.keys(): self.evaluated_keys = [\"password_policy\"] rules = conf.get(\"password_policy\")[0] if 'minimum_password_length' in rules: passwordlength =", "rules.get(\"minimum_password_length\") if isinstance(passwordlength[0], int) and passwordlength[0] < 14: self.evaluated_keys = [\"password_policy/minimum_password_length\"] return CheckResult.FAILED", "if 'password_policy' in conf.keys(): self.evaluated_keys = [\"password_policy\"] rules = conf.get(\"password_policy\")[0] if 'minimum_password_length' in", "users has a minimum length of 14 characters\" id = \"CKV_OCI_18\" supported_resources =", "CheckResult from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck class IAMPasswordLength(BaseResourceCheck): def __init__(self): name = \"OCI IAM", "(non-federated) users has a minimum length of 14 characters\" id = \"CKV_OCI_18\" supported_resources", "a minimum length of 14 characters\" id = \"CKV_OCI_18\" supported_resources = ['oci_identity_authentication_policy'] categories", "rules: passwordlength = rules.get(\"minimum_password_length\") if isinstance(passwordlength[0], int) and passwordlength[0] < 14: self.evaluated_keys =", "of 14 characters\" id = \"CKV_OCI_18\" supported_resources = ['oci_identity_authentication_policy'] categories = [CheckCategories.IAM] super().__init__(name=name,", "from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck class IAMPasswordLength(BaseResourceCheck): def __init__(self): name = \"OCI IAM password", "IAM password policy for local (non-federated) users has a minimum length of 14", "< 14: self.evaluated_keys = [\"password_policy/minimum_password_length\"] return CheckResult.FAILED return CheckResult.PASSED return CheckResult.FAILED return CheckResult.FAILED", "password policy for local (non-federated) users has a minimum length of 14 characters\"", "minimum length of 14 characters\" id = \"CKV_OCI_18\" supported_resources = ['oci_identity_authentication_policy'] categories =", "passwordlength[0] < 14: self.evaluated_keys = [\"password_policy/minimum_password_length\"] return CheckResult.FAILED return CheckResult.PASSED return CheckResult.FAILED return", "[CheckCategories.IAM] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) def scan_resource_conf(self, conf): if 'password_policy' in conf.keys(): self.evaluated_keys", "CheckCategories, CheckResult from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck class IAMPasswordLength(BaseResourceCheck): def __init__(self): name = \"OCI", "\"CKV_OCI_18\" supported_resources = ['oci_identity_authentication_policy'] categories = [CheckCategories.IAM] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) def scan_resource_conf(self,", "in rules: passwordlength = rules.get(\"minimum_password_length\") if isinstance(passwordlength[0], int) and passwordlength[0] < 14: self.evaluated_keys", "int) and passwordlength[0] < 14: self.evaluated_keys = [\"password_policy/minimum_password_length\"] return CheckResult.FAILED return CheckResult.PASSED return", "self.evaluated_keys = [\"password_policy/minimum_password_length\"] return CheckResult.FAILED return CheckResult.PASSED return CheckResult.FAILED return CheckResult.FAILED check =", "\"OCI IAM password policy for local (non-federated) users has a minimum length of", "name = \"OCI IAM password policy for local (non-federated) users has a minimum", "categories=categories, supported_resources=supported_resources) def scan_resource_conf(self, conf): if 'password_policy' in conf.keys(): self.evaluated_keys = [\"password_policy\"] rules", "if 'minimum_password_length' in rules: passwordlength = rules.get(\"minimum_password_length\") if isinstance(passwordlength[0], int) and passwordlength[0] <", "= [\"password_policy/minimum_password_length\"] return CheckResult.FAILED return CheckResult.PASSED return CheckResult.FAILED return CheckResult.FAILED check = IAMPasswordLength()", "checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck class IAMPasswordLength(BaseResourceCheck): def __init__(self): name = \"OCI IAM password policy", "in conf.keys(): self.evaluated_keys = [\"password_policy\"] rules = conf.get(\"password_policy\")[0] if 'minimum_password_length' in rules: passwordlength", "checkov.common.models.enums import CheckCategories, CheckResult from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck class IAMPasswordLength(BaseResourceCheck): def __init__(self): name", "length of 14 characters\" id = \"CKV_OCI_18\" supported_resources = ['oci_identity_authentication_policy'] categories = [CheckCategories.IAM]", "'password_policy' in conf.keys(): self.evaluated_keys = [\"password_policy\"] rules = conf.get(\"password_policy\")[0] if 'minimum_password_length' in rules:", "rules = conf.get(\"password_policy\")[0] if 'minimum_password_length' in rules: passwordlength = rules.get(\"minimum_password_length\") if isinstance(passwordlength[0], int)", "BaseResourceCheck class IAMPasswordLength(BaseResourceCheck): def __init__(self): name = \"OCI IAM password policy for local", "scan_resource_conf(self, conf): if 'password_policy' in conf.keys(): self.evaluated_keys = [\"password_policy\"] rules = conf.get(\"password_policy\")[0] if", "= rules.get(\"minimum_password_length\") if isinstance(passwordlength[0], int) and passwordlength[0] < 14: self.evaluated_keys = [\"password_policy/minimum_password_length\"] return", "[\"password_policy\"] rules = conf.get(\"password_policy\")[0] if 'minimum_password_length' in rules: passwordlength = rules.get(\"minimum_password_length\") if isinstance(passwordlength[0],", "'minimum_password_length' in rules: passwordlength = rules.get(\"minimum_password_length\") if isinstance(passwordlength[0], int) and passwordlength[0] < 14:", "has a minimum length of 14 characters\" id = \"CKV_OCI_18\" supported_resources = ['oci_identity_authentication_policy']", "isinstance(passwordlength[0], int) and passwordlength[0] < 14: self.evaluated_keys = [\"password_policy/minimum_password_length\"] return CheckResult.FAILED return CheckResult.PASSED", "= [\"password_policy\"] rules = conf.get(\"password_policy\")[0] if 'minimum_password_length' in rules: passwordlength = rules.get(\"minimum_password_length\") if", "conf): if 'password_policy' in conf.keys(): self.evaluated_keys = [\"password_policy\"] rules = conf.get(\"password_policy\")[0] if 'minimum_password_length'", "__init__(self): name = \"OCI IAM password policy for local (non-federated) users has a", "policy for local (non-federated) users has a minimum length of 14 characters\" id", "= [CheckCategories.IAM] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) def scan_resource_conf(self, conf): if 'password_policy' in conf.keys():", "self.evaluated_keys = [\"password_policy\"] rules = conf.get(\"password_policy\")[0] if 'minimum_password_length' in rules: passwordlength = rules.get(\"minimum_password_length\")", "14 characters\" id = \"CKV_OCI_18\" supported_resources = ['oci_identity_authentication_policy'] categories = [CheckCategories.IAM] super().__init__(name=name, id=id,", "['oci_identity_authentication_policy'] categories = [CheckCategories.IAM] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) def scan_resource_conf(self, conf): if 'password_policy'", "if isinstance(passwordlength[0], int) and passwordlength[0] < 14: self.evaluated_keys = [\"password_policy/minimum_password_length\"] return CheckResult.FAILED return", "import CheckCategories, CheckResult from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck class IAMPasswordLength(BaseResourceCheck): def __init__(self): name =", "import BaseResourceCheck class IAMPasswordLength(BaseResourceCheck): def __init__(self): name = \"OCI IAM password policy for", "= \"OCI IAM password policy for local (non-federated) users has a minimum length", "= \"CKV_OCI_18\" supported_resources = ['oci_identity_authentication_policy'] categories = [CheckCategories.IAM] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) def", "= conf.get(\"password_policy\")[0] if 'minimum_password_length' in rules: passwordlength = rules.get(\"minimum_password_length\") if isinstance(passwordlength[0], int) and", "from checkov.common.models.enums import CheckCategories, CheckResult from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck class IAMPasswordLength(BaseResourceCheck): def __init__(self):", "def __init__(self): name = \"OCI IAM password policy for local (non-federated) users has", "id = \"CKV_OCI_18\" supported_resources = ['oci_identity_authentication_policy'] categories = [CheckCategories.IAM] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)", "= ['oci_identity_authentication_policy'] categories = [CheckCategories.IAM] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) def scan_resource_conf(self, conf): if", "IAMPasswordLength(BaseResourceCheck): def __init__(self): name = \"OCI IAM password policy for local (non-federated) users", "supported_resources=supported_resources) def scan_resource_conf(self, conf): if 'password_policy' in conf.keys(): self.evaluated_keys = [\"password_policy\"] rules =", "and passwordlength[0] < 14: self.evaluated_keys = [\"password_policy/minimum_password_length\"] return CheckResult.FAILED return CheckResult.PASSED return CheckResult.FAILED", "conf.get(\"password_policy\")[0] if 'minimum_password_length' in rules: passwordlength = rules.get(\"minimum_password_length\") if isinstance(passwordlength[0], int) and passwordlength[0]", "14: self.evaluated_keys = [\"password_policy/minimum_password_length\"] return CheckResult.FAILED return CheckResult.PASSED return CheckResult.FAILED return CheckResult.FAILED check", "<reponame>jamesholland-uk/checkov from checkov.common.models.enums import CheckCategories, CheckResult from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck class IAMPasswordLength(BaseResourceCheck): def", "for local (non-federated) users has a minimum length of 14 characters\" id =", "super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) def scan_resource_conf(self, conf): if 'password_policy' in conf.keys(): self.evaluated_keys =", "class IAMPasswordLength(BaseResourceCheck): def __init__(self): name = \"OCI IAM password policy for local (non-federated)", "supported_resources = ['oci_identity_authentication_policy'] categories = [CheckCategories.IAM] super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources) def scan_resource_conf(self, conf):", "def scan_resource_conf(self, conf): if 'password_policy' in conf.keys(): self.evaluated_keys = [\"password_policy\"] rules = conf.get(\"password_policy\")[0]", "passwordlength = rules.get(\"minimum_password_length\") if isinstance(passwordlength[0], int) and passwordlength[0] < 14: self.evaluated_keys = [\"password_policy/minimum_password_length\"]", "id=id, categories=categories, supported_resources=supported_resources) def scan_resource_conf(self, conf): if 'password_policy' in conf.keys(): self.evaluated_keys = [\"password_policy\"]", "local (non-federated) users has a minimum length of 14 characters\" id = \"CKV_OCI_18\"" ]
[ "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,", "os import sys import argparse import textwrap import traceback from common.common import report,", "@capture_exception_and_abort def run(self): TestRun.run(self) g = GeppettoExecutableTest() g.run() if __name__ == '__main__': main()", "MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "__/ |_) | |_) | __/ |_| || (_) | \\_____|\\___| .__/| .__/", "except: report(\"Unable to import the config file: %s\" % args.config, 'critical', no_date=True) print(traceback.print_exc())", "file: %s' % args.test_file, 'critical', no_date=True) print(traceback.print_exc()) sys.exit(1) # Import the config file.", "usage = \"\"\" \"\"\" print(title) print(license) print(usage) def main(): args = parse_args() #", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED", "(_) | \\_____|\\___| .__/| .__/ \\___|\\__|\\__\\___/ | | | | |_| |_| The", "report(\"Unable to import the config file: %s\" % args.config, 'critical', no_date=True) print(traceback.print_exc()) sys.exit(1)", "main(): args = parse_args() # Import the test file. try: test_file_name = args.test_file", "mod = __import__(config_file, fromlist=['CONFIG_DICT']) config_dict = getattr(mod, 'CONFIG_DICT') except: report(\"Unable to import the", "this software and associated documentation files (the \"Software\"), to deal in the Software", "OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "def __init__(self): Geppetto.__init__(self) TestRun.set_init_params(self, config_dict, args, test_file_name, config_file_name) @capture_exception_and_abort def run(self): TestRun.run(self) g", "THE SOFTWARE.\"\"\" license = '%s\\n%s\\n%s' % ('*' * 70, textwrap.fill(license, 70), '*' *", "___| |_| |_ ____ | | |_ |/ _ \\ '_ \\| '_", "THE SOFTWARE. \"\"\" import os import sys import argparse import textwrap import traceback", "OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "'_ \\ / _ \\ __| __/ _ | | |__| | __/", "the Software without restriction, including without limitation the rights to use, copy, modify,", "person obtaining a copy of this software and associated documentation files (the \"Software\"),", "the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies", "'--email', help=\"Email to send results to.\") return parser.parse_args() def do_welcome(): title = \"\"\"", "parser.add_argument('-e', '--email', help=\"Email to send results to.\") return parser.parse_args() def do_welcome(): title =", "\"\"\" license = \"\"\"THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "sys import argparse import textwrap import traceback from common.common import report, capture_exception_and_abort from", "| |_| |_| The Cloud Maestro \"\"\" license = \"\"\"THE SOFTWARE IS PROVIDED", "GeppettoExecutableTest(TestRun): def __init__(self): Geppetto.__init__(self) TestRun.set_init_params(self, config_dict, args, test_file_name, config_file_name) @capture_exception_and_abort def run(self): TestRun.run(self)", "without restriction, including without limitation the rights to use, copy, modify, merge, publish,", "merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit", "sys.exit(1) # Import the config file. try: config_file_name = args.config config_file = config_file_name[:-3].replace('/',", "IN THE SOFTWARE. \"\"\" import os import sys import argparse import textwrap import", "Import the test file. try: test_file_name = args.test_file test_file = test_file_name[:-3].replace('/', '.') mod", "required=True, help=\"Test file.\") parser.add_argument('-c', '--config', required=True, help=\"Configuration file.\") parser.add_argument('-e', '--email', help=\"Email to send", "config_file_name[:-3].replace('/', '.') mod = __import__(config_file, fromlist=['CONFIG_DICT']) config_dict = getattr(mod, 'CONFIG_DICT') except: report(\"Unable to", "<reponame>ilyavinn/geppetto \"\"\" The MIT License (MIT) Copyright (c) <NAME>, Inc. 2015. Permission is", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\"\"\"", "argparse import textwrap import traceback from common.common import report, capture_exception_and_abort from common.geppetto import", "in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED", "print(title) print(license) print(usage) def main(): args = parse_args() # Import the test file.", "sublicense, and/or sell copies of the Software, and to permit persons to whom", "this permission notice shall be included in all copies or substantial portions of", "_ \\ '_ \\| '_ \\ / _ \\ __| __/ _ |", "\"\"\"THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to", "ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "TestRun() from file: %s' % args.test_file, 'critical', no_date=True) print(traceback.print_exc()) sys.exit(1) # Import the", "'.') mod = __import__(test_file, fromlist=['TestRun']) TestRun = getattr(mod, 'TestRun') except: report('Unable to load", "|_ ____ | | |_ |/ _ \\ '_ \\| '_ \\ /", "to send results to.\") return parser.parse_args() def do_welcome(): title = \"\"\" _____ _", "WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A", "notice and this permission notice shall be included in all copies or substantial", "|| (_) | \\_____|\\___| .__/| .__/ \\___|\\__|\\__\\___/ | | | | |_| |_|", "<NAME>, Inc. 2015. Permission is hereby granted, free of charge, to any person", "TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE", "| | |_ |/ _ \\ '_ \\| '_ \\ / _ \\", "charge, to any person obtaining a copy of this software and associated documentation", "|__| | __/ |_) | |_) | __/ |_| || (_) | \\_____|\\___|", "% args.test_file, 'critical', no_date=True) print(traceback.print_exc()) sys.exit(1) # Import the config file. try: config_file_name", "KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "file.\") parser.add_argument('-c', '--config', required=True, help=\"Configuration file.\") parser.add_argument('-e', '--email', help=\"Email to send results to.\")", "% ('*' * 70, textwrap.fill(license, 70), '*' * 70,) usage = \"\"\" \"\"\"", "= args.test_file test_file = test_file_name[:-3].replace('/', '.') mod = __import__(test_file, fromlist=['TestRun']) TestRun = getattr(mod,", "help=\"Test file.\") parser.add_argument('-c', '--config', required=True, help=\"Configuration file.\") parser.add_argument('-e', '--email', help=\"Email to send results", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\"\"\" license", "| | | | __ ___ _ __ _ __ ___| |_| |_", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION", "file: %s\" % args.config, 'critical', no_date=True) print(traceback.print_exc()) sys.exit(1) do_welcome() class GeppettoExecutableTest(TestRun): def __init__(self):", "test_file_name[:-3].replace('/', '.') mod = __import__(test_file, fromlist=['TestRun']) TestRun = getattr(mod, 'TestRun') except: report('Unable to", "persons to whom the Software is furnished to do so, subject to the", "'CONFIG_DICT') except: report(\"Unable to import the config file: %s\" % args.config, 'critical', no_date=True)", "Software is furnished to do so, subject to the following conditions: The above", "IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "= test_file_name[:-3].replace('/', '.') mod = __import__(test_file, fromlist=['TestRun']) TestRun = getattr(mod, 'TestRun') except: report('Unable", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND", "_____ _ _ / ____| | | | | | | __ ___", "__/ _ | | |__| | __/ |_) | |_) | __/ |_|", "__ ___ _ __ _ __ ___| |_| |_ ____ | | |_", "to deal in the Software without restriction, including without limitation the rights to", "parser.parse_args() def do_welcome(): title = \"\"\" _____ _ _ / ____| | |", "config_file_name) @capture_exception_and_abort def run(self): TestRun.run(self) g = GeppettoExecutableTest() g.run() if __name__ == '__main__':", "import report, capture_exception_and_abort from common.geppetto import Geppetto def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-t',", "'.') mod = __import__(config_file, fromlist=['CONFIG_DICT']) config_dict = getattr(mod, 'CONFIG_DICT') except: report(\"Unable to import", "to whom the Software is furnished to do so, subject to the following", "/ _ \\ __| __/ _ | | |__| | __/ |_) |", "documentation files (the \"Software\"), to deal in the Software without restriction, including without", "files (the \"Software\"), to deal in the Software without restriction, including without limitation", "'critical', no_date=True) print(traceback.print_exc()) sys.exit(1) # Import the config file. try: config_file_name = args.config", "Software without restriction, including without limitation the rights to use, copy, modify, merge,", "to do so, subject to the following conditions: The above copyright notice and", "args.config, 'critical', no_date=True) print(traceback.print_exc()) sys.exit(1) do_welcome() class GeppettoExecutableTest(TestRun): def __init__(self): Geppetto.__init__(self) TestRun.set_init_params(self, config_dict,", "import sys import argparse import textwrap import traceback from common.common import report, capture_exception_and_abort", "|_| The Cloud Maestro \"\"\" license = \"\"\"THE SOFTWARE IS PROVIDED \"AS IS\",", "in the Software without restriction, including without limitation the rights to use, copy,", "(c) <NAME>, Inc. 2015. Permission is hereby granted, free of charge, to any", "__import__(config_file, fromlist=['CONFIG_DICT']) config_dict = getattr(mod, 'CONFIG_DICT') except: report(\"Unable to import the config file:", "the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "to any person obtaining a copy of this software and associated documentation files", "The MIT License (MIT) Copyright (c) <NAME>, Inc. 2015. Permission is hereby granted,", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN", "config_file = config_file_name[:-3].replace('/', '.') mod = __import__(config_file, fromlist=['CONFIG_DICT']) config_dict = getattr(mod, 'CONFIG_DICT') except:", "____ | | |_ |/ _ \\ '_ \\| '_ \\ / _", "* 70, textwrap.fill(license, 70), '*' * 70,) usage = \"\"\" \"\"\" print(title) print(license)", "the config file: %s\" % args.config, 'critical', no_date=True) print(traceback.print_exc()) sys.exit(1) do_welcome() class GeppettoExecutableTest(TestRun):", "parser.add_argument('-c', '--config', required=True, help=\"Configuration file.\") parser.add_argument('-e', '--email', help=\"Email to send results to.\") return", "a copy of this software and associated documentation files (the \"Software\"), to deal", "Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF", "\"\"\" import os import sys import argparse import textwrap import traceback from common.common", "common.geppetto import Geppetto def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-t', '--test_file', required=True, help=\"Test file.\")", "free of charge, to any person obtaining a copy of this software and", "and this permission notice shall be included in all copies or substantial portions", "and to permit persons to whom the Software is furnished to do so,", "TestRun.set_init_params(self, config_dict, args, test_file_name, config_file_name) @capture_exception_and_abort def run(self): TestRun.run(self) g = GeppettoExecutableTest() g.run()", "return parser.parse_args() def do_welcome(): title = \"\"\" _____ _ _ / ____| |", "report, capture_exception_and_abort from common.geppetto import Geppetto def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-t', '--test_file',", "* 70,) usage = \"\"\" \"\"\" print(title) print(license) print(usage) def main(): args =", "| | | __ ___ _ __ _ __ ___| |_| |_ ____", "rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of", "THE USE OR OTHER DEALINGS IN THE SOFTWARE.\"\"\" license = '%s\\n%s\\n%s' % ('*'", "args.test_file, 'critical', no_date=True) print(traceback.print_exc()) sys.exit(1) # Import the config file. try: config_file_name =", "EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "results to.\") return parser.parse_args() def do_welcome(): title = \"\"\" _____ _ _ /", "import os import sys import argparse import textwrap import traceback from common.common import", "OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" import os import sys import argparse", "'_ \\| '_ \\ / _ \\ __| __/ _ | | |__|", "associated documentation files (the \"Software\"), to deal in the Software without restriction, including", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,", "traceback from common.common import report, capture_exception_and_abort from common.geppetto import Geppetto def parse_args(): parser", "MIT License (MIT) Copyright (c) <NAME>, Inc. 2015. Permission is hereby granted, free", "notice shall be included in all copies or substantial portions of the Software.", "_ __ _ __ ___| |_| |_ ____ | | |_ |/ _", "license = \"\"\"THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", ".__/| .__/ \\___|\\__|\\__\\___/ | | | | |_| |_| The Cloud Maestro \"\"\"", "args, test_file_name, config_file_name) @capture_exception_and_abort def run(self): TestRun.run(self) g = GeppettoExecutableTest() g.run() if __name__", "%s\" % args.config, 'critical', no_date=True) print(traceback.print_exc()) sys.exit(1) do_welcome() class GeppettoExecutableTest(TestRun): def __init__(self): Geppetto.__init__(self)", "'%s\\n%s\\n%s' % ('*' * 70, textwrap.fill(license, 70), '*' * 70,) usage = \"\"\"", "except: report('Unable to load TestRun() from file: %s' % args.test_file, 'critical', no_date=True) print(traceback.print_exc())", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT", "copy of this software and associated documentation files (the \"Software\"), to deal in", "substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "| \\_____|\\___| .__/| .__/ \\___|\\__|\\__\\___/ | | | | |_| |_| The Cloud", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION", "70,) usage = \"\"\" \"\"\" print(title) print(license) print(usage) def main(): args = parse_args()", "= args.config config_file = config_file_name[:-3].replace('/', '.') mod = __import__(config_file, fromlist=['CONFIG_DICT']) config_dict = getattr(mod,", "obtaining a copy of this software and associated documentation files (the \"Software\"), to", "% args.config, 'critical', no_date=True) print(traceback.print_exc()) sys.exit(1) do_welcome() class GeppettoExecutableTest(TestRun): def __init__(self): Geppetto.__init__(self) TestRun.set_init_params(self,", "__ ___| |_| |_ ____ | | |_ |/ _ \\ '_ \\|", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\"\"\" license =", "|_) | |_) | __/ |_| || (_) | \\_____|\\___| .__/| .__/ \\___|\\__|\\__\\___/", "TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "__init__(self): Geppetto.__init__(self) TestRun.set_init_params(self, config_dict, args, test_file_name, config_file_name) @capture_exception_and_abort def run(self): TestRun.run(self) g =", "sys.exit(1) do_welcome() class GeppettoExecutableTest(TestRun): def __init__(self): Geppetto.__init__(self) TestRun.set_init_params(self, config_dict, args, test_file_name, config_file_name) @capture_exception_and_abort", "\"\"\" \"\"\" print(title) print(license) print(usage) def main(): args = parse_args() # Import the", "'TestRun') except: report('Unable to load TestRun() from file: %s' % args.test_file, 'critical', no_date=True)", "OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "parser = argparse.ArgumentParser() parser.add_argument('-t', '--test_file', required=True, help=\"Test file.\") parser.add_argument('-c', '--config', required=True, help=\"Configuration file.\")", "capture_exception_and_abort from common.geppetto import Geppetto def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-t', '--test_file', required=True,", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR", "| | | | | __ ___ _ __ _ __ ___| |_|", "= getattr(mod, 'TestRun') except: report('Unable to load TestRun() from file: %s' % args.test_file,", "OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "file.\") parser.add_argument('-e', '--email', help=\"Email to send results to.\") return parser.parse_args() def do_welcome(): title", "config_file_name = args.config config_file = config_file_name[:-3].replace('/', '.') mod = __import__(config_file, fromlist=['CONFIG_DICT']) config_dict =", "publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons", "including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,", "_ _ / ____| | | | | | | __ ___ _", "# Import the config file. try: config_file_name = args.config config_file = config_file_name[:-3].replace('/', '.')", "or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "_ / ____| | | | | | | __ ___ _ __", "args.test_file test_file = test_file_name[:-3].replace('/', '.') mod = __import__(test_file, fromlist=['TestRun']) TestRun = getattr(mod, 'TestRun')", "| | |_| |_| The Cloud Maestro \"\"\" license = \"\"\"THE SOFTWARE IS", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" import", "_ | | |__| | __/ |_) | |_) | __/ |_| ||", "all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS", "SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "| |_ |/ _ \\ '_ \\| '_ \\ / _ \\ __|", "IN THE SOFTWARE.\"\"\" license = '%s\\n%s\\n%s' % ('*' * 70, textwrap.fill(license, 70), '*'", "|_| |_| The Cloud Maestro \"\"\" license = \"\"\"THE SOFTWARE IS PROVIDED \"AS", "OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "try: test_file_name = args.test_file test_file = test_file_name[:-3].replace('/', '.') mod = __import__(test_file, fromlist=['TestRun']) TestRun", "args.config config_file = config_file_name[:-3].replace('/', '.') mod = __import__(config_file, fromlist=['CONFIG_DICT']) config_dict = getattr(mod, 'CONFIG_DICT')", "class GeppettoExecutableTest(TestRun): def __init__(self): Geppetto.__init__(self) TestRun.set_init_params(self, config_dict, args, test_file_name, config_file_name) @capture_exception_and_abort def run(self):", "OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\"\"\" license = '%s\\n%s\\n%s' %", "Copyright (c) <NAME>, Inc. 2015. Permission is hereby granted, free of charge, to", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the", "import argparse import textwrap import traceback from common.common import report, capture_exception_and_abort from common.geppetto", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN", "mod = __import__(test_file, fromlist=['TestRun']) TestRun = getattr(mod, 'TestRun') except: report('Unable to load TestRun()", "|_| || (_) | \\_____|\\___| .__/| .__/ \\___|\\__|\\__\\___/ | | | | |_|", "Geppetto.__init__(self) TestRun.set_init_params(self, config_dict, args, test_file_name, config_file_name) @capture_exception_and_abort def run(self): TestRun.run(self) g = GeppettoExecutableTest()", "ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "config file. try: config_file_name = args.config config_file = config_file_name[:-3].replace('/', '.') mod = __import__(config_file,", "|_ |/ _ \\ '_ \\| '_ \\ / _ \\ __| __/", "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "above copyright notice and this permission notice shall be included in all copies", "| __/ |_| || (_) | \\_____|\\___| .__/| .__/ \\___|\\__|\\__\\___/ | | |", "|_) | __/ |_| || (_) | \\_____|\\___| .__/| .__/ \\___|\\__|\\__\\___/ | |", "WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "fromlist=['TestRun']) TestRun = getattr(mod, 'TestRun') except: report('Unable to load TestRun() from file: %s'", "test_file_name, config_file_name) @capture_exception_and_abort def run(self): TestRun.run(self) g = GeppettoExecutableTest() g.run() if __name__ ==", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS", "permission notice shall be included in all copies or substantial portions of the", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH", "_ __ ___| |_| |_ ____ | | |_ |/ _ \\ '_", "= \"\"\"THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "the following conditions: The above copyright notice and this permission notice shall be", "Geppetto def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-t', '--test_file', required=True, help=\"Test file.\") parser.add_argument('-c', '--config',", "| | __ ___ _ __ _ __ ___| |_| |_ ____ |", "to import the config file: %s\" % args.config, 'critical', no_date=True) print(traceback.print_exc()) sys.exit(1) do_welcome()", "test_file_name = args.test_file test_file = test_file_name[:-3].replace('/', '.') mod = __import__(test_file, fromlist=['TestRun']) TestRun =", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\"\"\" license = '%s\\n%s\\n%s'", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT", "furnished to do so, subject to the following conditions: The above copyright notice", "\\| '_ \\ / _ \\ __| __/ _ | | |__| |", "permit persons to whom the Software is furnished to do so, subject to", "any person obtaining a copy of this software and associated documentation files (the", "| | |__| | __/ |_) | |_) | __/ |_| || (_)", "\\ '_ \\| '_ \\ / _ \\ __| __/ _ | |", "copies of the Software, and to permit persons to whom the Software is", "to.\") return parser.parse_args() def do_welcome(): title = \"\"\" _____ _ _ / ____|", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.", "OR OTHER DEALINGS IN THE SOFTWARE.\"\"\" license = '%s\\n%s\\n%s' % ('*' * 70,", "included in all copies or substantial portions of the Software. THE SOFTWARE IS", "= config_file_name[:-3].replace('/', '.') mod = __import__(config_file, fromlist=['CONFIG_DICT']) config_dict = getattr(mod, 'CONFIG_DICT') except: report(\"Unable", "copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and", "THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "the Software, and to permit persons to whom the Software is furnished to", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,", "following conditions: The above copyright notice and this permission notice shall be included", "\"\"\" The MIT License (MIT) Copyright (c) <NAME>, Inc. 2015. Permission is hereby", "import textwrap import traceback from common.common import report, capture_exception_and_abort from common.geppetto import Geppetto", "copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\",", "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "'*' * 70,) usage = \"\"\" \"\"\" print(title) print(license) print(usage) def main(): args", "'--test_file', required=True, help=\"Test file.\") parser.add_argument('-c', '--config', required=True, help=\"Configuration file.\") parser.add_argument('-e', '--email', help=\"Email to", "The above copyright notice and this permission notice shall be included in all", "\\___|\\__|\\__\\___/ | | | | |_| |_| The Cloud Maestro \"\"\" license =", "import Geppetto def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-t', '--test_file', required=True, help=\"Test file.\") parser.add_argument('-c',", "OTHER DEALINGS IN THE SOFTWARE. \"\"\" import os import sys import argparse import", "config_dict = getattr(mod, 'CONFIG_DICT') except: report(\"Unable to import the config file: %s\" %", "common.common import report, capture_exception_and_abort from common.geppetto import Geppetto def parse_args(): parser = argparse.ArgumentParser()", "| |__| | __/ |_) | |_) | __/ |_| || (_) |", "\"Software\"), to deal in the Software without restriction, including without limitation the rights", "____| | | | | | | __ ___ _ __ _ __", "def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-t', '--test_file', required=True, help=\"Test file.\") parser.add_argument('-c', '--config', required=True,", "deal in the Software without restriction, including without limitation the rights to use,", "import traceback from common.common import report, capture_exception_and_abort from common.geppetto import Geppetto def parse_args():", "granted, free of charge, to any person obtaining a copy of this software", "def do_welcome(): title = \"\"\" _____ _ _ / ____| | | |", "limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" import os import sys", "def main(): args = parse_args() # Import the test file. try: test_file_name =", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" import os", "DEALINGS IN THE SOFTWARE. \"\"\" import os import sys import argparse import textwrap", "| | | | | | __ ___ _ __ _ __ ___|", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "__/ |_| || (_) | \\_____|\\___| .__/| .__/ \\___|\\__|\\__\\___/ | | | |", "config_dict, args, test_file_name, config_file_name) @capture_exception_and_abort def run(self): TestRun.run(self) g = GeppettoExecutableTest() g.run() if", "file. try: test_file_name = args.test_file test_file = test_file_name[:-3].replace('/', '.') mod = __import__(test_file, fromlist=['TestRun'])", "ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "import the config file: %s\" % args.config, 'critical', no_date=True) print(traceback.print_exc()) sys.exit(1) do_welcome() class", "of this software and associated documentation files (the \"Software\"), to deal in the", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "sell copies of the Software, and to permit persons to whom the Software", "no_date=True) print(traceback.print_exc()) sys.exit(1) # Import the config file. try: config_file_name = args.config config_file", "DEALINGS IN THE SOFTWARE.\"\"\" license = '%s\\n%s\\n%s' % ('*' * 70, textwrap.fill(license, 70),", "Cloud Maestro \"\"\" license = \"\"\"THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "title = \"\"\" _____ _ _ / ____| | | | | |", "do so, subject to the following conditions: The above copyright notice and this", "file. try: config_file_name = args.config config_file = config_file_name[:-3].replace('/', '.') mod = __import__(config_file, fromlist=['CONFIG_DICT'])", "SOFTWARE. \"\"\" import os import sys import argparse import textwrap import traceback from", "\"\"\" _____ _ _ / ____| | | | | | | __", "= __import__(test_file, fromlist=['TestRun']) TestRun = getattr(mod, 'TestRun') except: report('Unable to load TestRun() from", "from common.geppetto import Geppetto def parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-t', '--test_file', required=True, help=\"Test", "__import__(test_file, fromlist=['TestRun']) TestRun = getattr(mod, 'TestRun') except: report('Unable to load TestRun() from file:", "is furnished to do so, subject to the following conditions: The above copyright", "the test file. try: test_file_name = args.test_file test_file = test_file_name[:-3].replace('/', '.') mod =", "('*' * 70, textwrap.fill(license, 70), '*' * 70,) usage = \"\"\" \"\"\" print(title)", "2015. Permission is hereby granted, free of charge, to any person obtaining a", "= getattr(mod, 'CONFIG_DICT') except: report(\"Unable to import the config file: %s\" % args.config,", "USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" import os import sys import", "so, subject to the following conditions: The above copyright notice and this permission", "License (MIT) Copyright (c) <NAME>, Inc. 2015. Permission is hereby granted, free of", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR", "%s' % args.test_file, 'critical', no_date=True) print(traceback.print_exc()) sys.exit(1) # Import the config file. try:", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR", "textwrap.fill(license, 70), '*' * 70,) usage = \"\"\" \"\"\" print(title) print(license) print(usage) def", "fromlist=['CONFIG_DICT']) config_dict = getattr(mod, 'CONFIG_DICT') except: report(\"Unable to import the config file: %s\"", "of the Software, and to permit persons to whom the Software is furnished", "and/or sell copies of the Software, and to permit persons to whom the", "of charge, to any person obtaining a copy of this software and associated", "(the \"Software\"), to deal in the Software without restriction, including without limitation the", "do_welcome() class GeppettoExecutableTest(TestRun): def __init__(self): Geppetto.__init__(self) TestRun.set_init_params(self, config_dict, args, test_file_name, config_file_name) @capture_exception_and_abort def", "= \"\"\" _____ _ _ / ____| | | | | | |", "copyright notice and this permission notice shall be included in all copies or", "| __ ___ _ __ _ __ ___| |_| |_ ____ | |", "|_| |_ ____ | | |_ |/ _ \\ '_ \\| '_ \\", "to permit persons to whom the Software is furnished to do so, subject", "conditions: The above copyright notice and this permission notice shall be included in", "= '%s\\n%s\\n%s' % ('*' * 70, textwrap.fill(license, 70), '*' * 70,) usage =", "SOFTWARE.\"\"\" license = '%s\\n%s\\n%s' % ('*' * 70, textwrap.fill(license, 70), '*' * 70,)", "textwrap import traceback from common.common import report, capture_exception_and_abort from common.geppetto import Geppetto def", "THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "'critical', no_date=True) print(traceback.print_exc()) sys.exit(1) do_welcome() class GeppettoExecutableTest(TestRun): def __init__(self): Geppetto.__init__(self) TestRun.set_init_params(self, config_dict, args,", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER", "Permission is hereby granted, free of charge, to any person obtaining a copy", "___ _ __ _ __ ___| |_| |_ ____ | | |_ |/", "= argparse.ArgumentParser() parser.add_argument('-t', '--test_file', required=True, help=\"Test file.\") parser.add_argument('-c', '--config', required=True, help=\"Configuration file.\") parser.add_argument('-e',", "be included in all copies or substantial portions of the Software. THE SOFTWARE", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "whom the Software is furnished to do so, subject to the following conditions:", "test_file = test_file_name[:-3].replace('/', '.') mod = __import__(test_file, fromlist=['TestRun']) TestRun = getattr(mod, 'TestRun') except:", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\"", "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "from file: %s' % args.test_file, 'critical', no_date=True) print(traceback.print_exc()) sys.exit(1) # Import the config", "getattr(mod, 'TestRun') except: report('Unable to load TestRun() from file: %s' % args.test_file, 'critical',", "do_welcome(): title = \"\"\" _____ _ _ / ____| | | | |", "args = parse_args() # Import the test file. try: test_file_name = args.test_file test_file", "portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "\\ __| __/ _ | | |__| | __/ |_) | |_) |", "parse_args() # Import the test file. try: test_file_name = args.test_file test_file = test_file_name[:-3].replace('/',", "/ ____| | | | | | | __ ___ _ __ _", "DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "required=True, help=\"Configuration file.\") parser.add_argument('-e', '--email', help=\"Email to send results to.\") return parser.parse_args() def", "# Import the test file. try: test_file_name = args.test_file test_file = test_file_name[:-3].replace('/', '.')", "distribute, sublicense, and/or sell copies of the Software, and to permit persons to", "of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "help=\"Email to send results to.\") return parser.parse_args() def do_welcome(): title = \"\"\" _____", "software and associated documentation files (the \"Software\"), to deal in the Software without", "try: config_file_name = args.config config_file = config_file_name[:-3].replace('/', '.') mod = __import__(config_file, fromlist=['CONFIG_DICT']) config_dict", "no_date=True) print(traceback.print_exc()) sys.exit(1) do_welcome() class GeppettoExecutableTest(TestRun): def __init__(self): Geppetto.__init__(self) TestRun.set_init_params(self, config_dict, args, test_file_name,", "__ _ __ ___| |_| |_ ____ | | |_ |/ _ \\", "print(traceback.print_exc()) sys.exit(1) # Import the config file. try: config_file_name = args.config config_file =", "70), '*' * 70,) usage = \"\"\" \"\"\" print(title) print(license) print(usage) def main():", "_ \\ __| __/ _ | | |__| | __/ |_) | |_)", "argparse.ArgumentParser() parser.add_argument('-t', '--test_file', required=True, help=\"Test file.\") parser.add_argument('-c', '--config', required=True, help=\"Configuration file.\") parser.add_argument('-e', '--email',", "test file. try: test_file_name = args.test_file test_file = test_file_name[:-3].replace('/', '.') mod = __import__(test_file,", "load TestRun() from file: %s' % args.test_file, 'critical', no_date=True) print(traceback.print_exc()) sys.exit(1) # Import", "Import the config file. try: config_file_name = args.config config_file = config_file_name[:-3].replace('/', '.') mod", "= __import__(config_file, fromlist=['CONFIG_DICT']) config_dict = getattr(mod, 'CONFIG_DICT') except: report(\"Unable to import the config", "| | | | |_| |_| The Cloud Maestro \"\"\" license = \"\"\"THE", "= parse_args() # Import the test file. try: test_file_name = args.test_file test_file =", "shall be included in all copies or substantial portions of the Software. THE", "| | | |_| |_| The Cloud Maestro \"\"\" license = \"\"\"THE SOFTWARE", "NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "help=\"Configuration file.\") parser.add_argument('-e', '--email', help=\"Email to send results to.\") return parser.parse_args() def do_welcome():", "70, textwrap.fill(license, 70), '*' * 70,) usage = \"\"\" \"\"\" print(title) print(license) print(usage)", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "print(license) print(usage) def main(): args = parse_args() # Import the test file. try:", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "\"\"\" print(title) print(license) print(usage) def main(): args = parse_args() # Import the test", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT", "Inc. 2015. Permission is hereby granted, free of charge, to any person obtaining", "the Software is furnished to do so, subject to the following conditions: The", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT", "OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" import os import", "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS", "\\_____|\\___| .__/| .__/ \\___|\\__|\\__\\___/ | | | | |_| |_| The Cloud Maestro", "report('Unable to load TestRun() from file: %s' % args.test_file, 'critical', no_date=True) print(traceback.print_exc()) sys.exit(1)", "\\ / _ \\ __| __/ _ | | |__| | __/ |_)", "subject to the following conditions: The above copyright notice and this permission notice", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE", "is hereby granted, free of charge, to any person obtaining a copy of", "OTHER DEALINGS IN THE SOFTWARE.\"\"\" license = '%s\\n%s\\n%s' % ('*' * 70, textwrap.fill(license,", "and associated documentation files (the \"Software\"), to deal in the Software without restriction,", "FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or", "send results to.\") return parser.parse_args() def do_welcome(): title = \"\"\" _____ _ _", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "The Cloud Maestro \"\"\" license = \"\"\"THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "parse_args(): parser = argparse.ArgumentParser() parser.add_argument('-t', '--test_file', required=True, help=\"Test file.\") parser.add_argument('-c', '--config', required=True, help=\"Configuration", "|/ _ \\ '_ \\| '_ \\ / _ \\ __| __/ _", "to load TestRun() from file: %s' % args.test_file, 'critical', no_date=True) print(traceback.print_exc()) sys.exit(1) #", "| __/ |_) | |_) | __/ |_| || (_) | \\_____|\\___| .__/|", "print(traceback.print_exc()) sys.exit(1) do_welcome() class GeppettoExecutableTest(TestRun): def __init__(self): Geppetto.__init__(self) TestRun.set_init_params(self, config_dict, args, test_file_name, config_file_name)", "hereby granted, free of charge, to any person obtaining a copy of this", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE", ".__/ \\___|\\__|\\__\\___/ | | | | |_| |_| The Cloud Maestro \"\"\" license", "Maestro \"\"\" license = \"\"\"THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "getattr(mod, 'CONFIG_DICT') except: report(\"Unable to import the config file: %s\" % args.config, 'critical',", "restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute,", "print(usage) def main(): args = parse_args() # Import the test file. try: test_file_name", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR", "= \"\"\" \"\"\" print(title) print(license) print(usage) def main(): args = parse_args() # Import", "to the following conditions: The above copyright notice and this permission notice shall", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "Software, and to permit persons to whom the Software is furnished to do", "USE OR OTHER DEALINGS IN THE SOFTWARE.\"\"\" license = '%s\\n%s\\n%s' % ('*' *", "(MIT) Copyright (c) <NAME>, Inc. 2015. Permission is hereby granted, free of charge,", "config file: %s\" % args.config, 'critical', no_date=True) print(traceback.print_exc()) sys.exit(1) do_welcome() class GeppettoExecutableTest(TestRun): def", "TestRun = getattr(mod, 'TestRun') except: report('Unable to load TestRun() from file: %s' %", "| |_) | __/ |_| || (_) | \\_____|\\___| .__/| .__/ \\___|\\__|\\__\\___/ |", "from common.common import report, capture_exception_and_abort from common.geppetto import Geppetto def parse_args(): parser =", "'--config', required=True, help=\"Configuration file.\") parser.add_argument('-e', '--email', help=\"Email to send results to.\") return parser.parse_args()", "parser.add_argument('-t', '--test_file', required=True, help=\"Test file.\") parser.add_argument('-c', '--config', required=True, help=\"Configuration file.\") parser.add_argument('-e', '--email', help=\"Email", "license = '%s\\n%s\\n%s' % ('*' * 70, textwrap.fill(license, 70), '*' * 70,) usage", "the config file. try: config_file_name = args.config config_file = config_file_name[:-3].replace('/', '.') mod =", "__| __/ _ | | |__| | __/ |_) | |_) | __/" ]
[ "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,", "proc_shape, dtype, Stepper, timing=False): if proc_shape != (1, 1, 1): pytest.skip(\"test expansion only", "MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "slc = () if is_low_storage else (0) order = expand.stepper.expected_order rtol = dt**order", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED", "import parser args = parser.parse_args() from pystella.step import all_steppers for stepper in all_steppers[-5:]:", "1/3, 1/2, 1, -1/4]: def energy(a): return a**(-3-3*w) def pressure(a): return w *", "this software and associated documentation files (the \"Software\"), to deal in the Software", "OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "@pytest.mark.parametrize(\"dtype\", [np.float64]) @pytest.mark.parametrize(\"Stepper\", [ps.RungeKutta4, ps.LowStorageRK54]) def test_expansion(ctx_factory, proc_shape, dtype, Stepper, timing=False): if proc_shape", "\"__main__\": from common import parser args = parser.parse_args() from pystella.step import all_steppers for", "for {w=}\" assert expand.constraint(energy(expand.a[slc])) < rtol, \\ f\"FLRW solution disobeying constraint for {w=}\"", "__copyright__ = \"Copyright (C) 2019 <NAME>\" __license__ = \"\"\" Permission is hereby granted,", "1, 1): pytest.skip(\"test expansion only on one rank\") def sol(w, t): x =", "OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "is_low_storage else (0 if s == 0 else 1) expand.step(s, energy(expand.a[slc]), pressure(expand.a[slc]), dt)", "the Software without restriction, including without limitation the rights to use, copy, modify,", "__license__ = \"\"\" Permission is hereby granted, free of charge, to any person", "person obtaining a copy of this software and associated documentation files (the \"Software\"),", "SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" import numpy", "the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies", "2019 <NAME>\" __license__ = \"\"\" Permission is hereby granted, free of charge, to", "dt slc = () if is_low_storage else (0) order = expand.stepper.expected_order rtol =", "def test_expansion(ctx_factory, proc_shape, dtype, Stepper, timing=False): if proc_shape != (1, 1, 1): pytest.skip(\"test", "without restriction, including without limitation the rights to use, copy, modify, merge, publish,", "merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit", "is_low_storage else (0) order = expand.stepper.expected_order rtol = dt**order print(order, w, expand.a[slc]/sol(w, t)", "ps import pytest from pyopencl.tools import ( # noqa pytest_generate_tests_for_pyopencl as pytest_generate_tests) @pytest.mark.parametrize(\"dtype\",", "in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED", "t += dt slc = () if is_low_storage else (0) order = expand.stepper.expected_order", "= .005 expand = ps.Expansion(energy(1.), Stepper, mpl=np.sqrt(8.*np.pi)) while t <= 10. - dt:", "sublicense, and/or sell copies of the Software, and to permit persons to whom", "this permission notice shall be included in all copies or substantial portions of", "dt: for s in range(expand.stepper.num_stages): slc = (0) if is_low_storage else (0 if", "modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to", "proc_shape != (1, 1, 1): pytest.skip(\"test expansion only on one rank\") def sol(w,", "\\ f\"FLRW solution disobeying constraint for {w=}\" if __name__ == \"__main__\": from common", "import pystella as ps import pytest from pyopencl.tools import ( # noqa pytest_generate_tests_for_pyopencl", "ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "for s in range(expand.stepper.num_stages): slc = (0) if is_low_storage else (0 if s", "constraint for {w=}\" if __name__ == \"__main__\": from common import parser args =", "WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A", "notice and this permission notice shall be included in all copies or substantial", "TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE", "pyopencl.tools import ( # noqa pytest_generate_tests_for_pyopencl as pytest_generate_tests) @pytest.mark.parametrize(\"dtype\", [np.float64]) @pytest.mark.parametrize(\"Stepper\", [ps.RungeKutta4, ps.LowStorageRK54])", "charge, to any person obtaining a copy of this software and associated documentation", "solution disobeying constraint for {w=}\" if __name__ == \"__main__\": from common import parser", "(0 if s == 0 else 1) expand.step(s, energy(expand.a[slc]), pressure(expand.a[slc]), dt) t +=", "KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", ".005 expand = ps.Expansion(energy(1.), Stepper, mpl=np.sqrt(8.*np.pi)) while t <= 10. - dt: for", "SOFTWARE. \"\"\" import numpy as np import pystella as ps import pytest from", "def energy(a): return a**(-3-3*w) def pressure(a): return w * energy(a) t = 0", "f\"FLRW solution inaccurate for {w=}\" assert expand.constraint(energy(expand.a[slc])) < rtol, \\ f\"FLRW solution disobeying", "numpy as np import pystella as ps import pytest from pyopencl.tools import (", "slc = (0) if is_low_storage else (0 if s == 0 else 1)", "expand.stepper.expected_order rtol = dt**order print(order, w, expand.a[slc]/sol(w, t) - 1, expand.constraint(energy(expand.a[slc]))) assert np.allclose(expand.a[slc],", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION", "1/2, 1, -1/4]: def energy(a): return a**(-3-3*w) def pressure(a): return w * energy(a)", "persons to whom the Software is furnished to do so, subject to the", "order = expand.stepper.expected_order rtol = dt**order print(order, w, expand.a[slc]/sol(w, t) - 1, expand.constraint(energy(expand.a[slc])))", "{w=}\" assert expand.constraint(energy(expand.a[slc])) < rtol, \\ f\"FLRW solution disobeying constraint for {w=}\" if", "Software is furnished to do so, subject to the following conditions: The above", "IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND", "print(order, w, expand.a[slc]/sol(w, t) - 1, expand.constraint(energy(expand.a[slc]))) assert np.allclose(expand.a[slc], sol(w, t), rtol=rtol, atol=0),", "energy(expand.a[slc]), pressure(expand.a[slc]), dt) t += dt slc = () if is_low_storage else (0)", "USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" import numpy as np import", "pressure(a): return w * energy(a) t = 0 dt = .005 expand =", "in range(expand.stepper.num_stages): slc = (0) if is_low_storage else (0 if s == 0", "only on one rank\") def sol(w, t): x = (1 + 3*w) return", "to deal in the Software without restriction, including without limitation the rights to", "dtype, Stepper, timing=False): if proc_shape != (1, 1, 1): pytest.skip(\"test expansion only on", "def sol(w, t): x = (1 + 3*w) return (x*(t/np.sqrt(3) + 2/x))**(2/x)/2**(2/x) from", "to whom the Software is furnished to do so, subject to the following", "parser.parse_args() from pystella.step import all_steppers for stepper in all_steppers[-5:]: test_expansion( None, proc_shape=args.proc_shape, dtype=args.dtype,", "documentation files (the \"Software\"), to deal in the Software without restriction, including without", "files (the \"Software\"), to deal in the Software without restriction, including without limitation", "OTHER DEALINGS IN THE SOFTWARE. \"\"\" import numpy as np import pystella as", "Software without restriction, including without limitation the rights to use, copy, modify, merge,", "+ 2/x))**(2/x)/2**(2/x) from pystella.step import LowStorageRKStepper is_low_storage = LowStorageRKStepper in Stepper.__bases__ for w", "to do so, subject to the following conditions: The above copyright notice and", "[np.float64]) @pytest.mark.parametrize(\"Stepper\", [ps.RungeKutta4, ps.LowStorageRK54]) def test_expansion(ctx_factory, proc_shape, dtype, Stepper, timing=False): if proc_shape !=", "in the Software without restriction, including without limitation the rights to use, copy,", "ps.LowStorageRK54]) def test_expansion(ctx_factory, proc_shape, dtype, Stepper, timing=False): if proc_shape != (1, 1, 1):", "= parser.parse_args() from pystella.step import all_steppers for stepper in all_steppers[-5:]: test_expansion( None, proc_shape=args.proc_shape,", "f\"FLRW solution disobeying constraint for {w=}\" if __name__ == \"__main__\": from common import", "the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "to any person obtaining a copy of this software and associated documentation files", "(1 + 3*w) return (x*(t/np.sqrt(3) + 2/x))**(2/x)/2**(2/x) from pystella.step import LowStorageRKStepper is_low_storage =", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN", "\"\"\" Permission is hereby granted, free of charge, to any person obtaining a", "a copy of this software and associated documentation files (the \"Software\"), to deal", "Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "THE SOFTWARE. \"\"\" import numpy as np import pystella as ps import pytest", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF", "pytest_generate_tests) @pytest.mark.parametrize(\"dtype\", [np.float64]) @pytest.mark.parametrize(\"Stepper\", [ps.RungeKutta4, ps.LowStorageRK54]) def test_expansion(ctx_factory, proc_shape, dtype, Stepper, timing=False): if", "< rtol, \\ f\"FLRW solution disobeying constraint for {w=}\" if __name__ == \"__main__\":", "mpl=np.sqrt(8.*np.pi)) while t <= 10. - dt: for s in range(expand.stepper.num_stages): slc =", "is_low_storage = LowStorageRKStepper in Stepper.__bases__ for w in [0, 1/3, 1/2, 1, -1/4]:", "= () if is_low_storage else (0) order = expand.stepper.expected_order rtol = dt**order print(order,", "expand.constraint(energy(expand.a[slc]))) assert np.allclose(expand.a[slc], sol(w, t), rtol=rtol, atol=0), \\ f\"FLRW solution inaccurate for {w=}\"", "free of charge, to any person obtaining a copy of this software and", "and this permission notice shall be included in all copies or substantial portions", "( # noqa pytest_generate_tests_for_pyopencl as pytest_generate_tests) @pytest.mark.parametrize(\"dtype\", [np.float64]) @pytest.mark.parametrize(\"Stepper\", [ps.RungeKutta4, ps.LowStorageRK54]) def test_expansion(ctx_factory,", "and to permit persons to whom the Software is furnished to do so,", "Stepper.__bases__ for w in [0, 1/3, 1/2, 1, -1/4]: def energy(a): return a**(-3-3*w)", "rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of", "as ps import pytest from pyopencl.tools import ( # noqa pytest_generate_tests_for_pyopencl as pytest_generate_tests)", "def pressure(a): return w * energy(a) t = 0 dt = .005 expand", "w, expand.a[slc]/sol(w, t) - 1, expand.constraint(energy(expand.a[slc]))) assert np.allclose(expand.a[slc], sol(w, t), rtol=rtol, atol=0), \\", "THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" import numpy as np", "EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "[0, 1/3, 1/2, 1, -1/4]: def energy(a): return a**(-3-3*w) def pressure(a): return w", "pystella.step import all_steppers for stepper in all_steppers[-5:]: test_expansion( None, proc_shape=args.proc_shape, dtype=args.dtype, timing=args.timing, Stepper=stepper,", "associated documentation files (the \"Software\"), to deal in the Software without restriction, including", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,", "notice shall be included in all copies or substantial portions of the Software.", "while t <= 10. - dt: for s in range(expand.stepper.num_stages): slc = (0)", "Stepper, mpl=np.sqrt(8.*np.pi)) while t <= 10. - dt: for s in range(expand.stepper.num_stages): slc", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT", "copy of this software and associated documentation files (the \"Software\"), to deal in", "substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "(0) order = expand.stepper.expected_order rtol = dt**order print(order, w, expand.a[slc]/sol(w, t) - 1,", "assert np.allclose(expand.a[slc], sol(w, t), rtol=rtol, atol=0), \\ f\"FLRW solution inaccurate for {w=}\" assert", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION", "obtaining a copy of this software and associated documentation files (the \"Software\"), to", "w * energy(a) t = 0 dt = .005 expand = ps.Expansion(energy(1.), Stepper,", "TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "\"Copyright (C) 2019 <NAME>\" __license__ = \"\"\" Permission is hereby granted, free of", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR", "OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "if __name__ == \"__main__\": from common import parser args = parser.parse_args() from pystella.step", "<NAME>\" __license__ = \"\"\" Permission is hereby granted, free of charge, to any", "pystella.step import LowStorageRKStepper is_low_storage = LowStorageRKStepper in Stepper.__bases__ for w in [0, 1/3,", "publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons", "including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,", "sol(w, t): x = (1 + 3*w) return (x*(t/np.sqrt(3) + 2/x))**(2/x)/2**(2/x) from pystella.step", "dt) t += dt slc = () if is_low_storage else (0) order =", "or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "DEALINGS IN THE SOFTWARE. \"\"\" import numpy as np import pystella as ps", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" import", "expand.step(s, energy(expand.a[slc]), pressure(expand.a[slc]), dt) t += dt slc = () if is_low_storage else", "= \"Copyright (C) 2019 <NAME>\" __license__ = \"\"\" Permission is hereby granted, free", "all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS", "SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "energy(a) t = 0 dt = .005 expand = ps.Expansion(energy(1.), Stepper, mpl=np.sqrt(8.*np.pi)) while", "dt = .005 expand = ps.Expansion(energy(1.), Stepper, mpl=np.sqrt(8.*np.pi)) while t <= 10. -", "else 1) expand.step(s, energy(expand.a[slc]), pressure(expand.a[slc]), dt) t += dt slc = () if", "return w * energy(a) t = 0 dt = .005 expand = ps.Expansion(energy(1.),", "OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "= ps.Expansion(energy(1.), Stepper, mpl=np.sqrt(8.*np.pi)) while t <= 10. - dt: for s in", "disobeying constraint for {w=}\" if __name__ == \"__main__\": from common import parser args", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the", "!= (1, 1, 1): pytest.skip(\"test expansion only on one rank\") def sol(w, t):", "t), rtol=rtol, atol=0), \\ f\"FLRW solution inaccurate for {w=}\" assert expand.constraint(energy(expand.a[slc])) < rtol,", "2/x))**(2/x)/2**(2/x) from pystella.step import LowStorageRKStepper is_low_storage = LowStorageRKStepper in Stepper.__bases__ for w in", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN", "assert expand.constraint(energy(expand.a[slc])) < rtol, \\ f\"FLRW solution disobeying constraint for {w=}\" if __name__", "ps.Expansion(energy(1.), Stepper, mpl=np.sqrt(8.*np.pi)) while t <= 10. - dt: for s in range(expand.stepper.num_stages):", "ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "0 else 1) expand.step(s, energy(expand.a[slc]), pressure(expand.a[slc]), dt) t += dt slc = ()", "above copyright notice and this permission notice shall be included in all copies", "__name__ == \"__main__\": from common import parser args = parser.parse_args() from pystella.step import", "WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "= (1 + 3*w) return (x*(t/np.sqrt(3) + 2/x))**(2/x)/2**(2/x) from pystella.step import LowStorageRKStepper is_low_storage", "if is_low_storage else (0 if s == 0 else 1) expand.step(s, energy(expand.a[slc]), pressure(expand.a[slc]),", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS", "permission notice shall be included in all copies or substantial portions of the", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS", "expansion only on one rank\") def sol(w, t): x = (1 + 3*w)", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH", "the following conditions: The above copyright notice and this permission notice shall be", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT", "furnished to do so, subject to the following conditions: The above copyright notice", "+= dt slc = () if is_low_storage else (0) order = expand.stepper.expected_order rtol", "permit persons to whom the Software is furnished to do so, subject to", "any person obtaining a copy of this software and associated documentation files (the", "range(expand.stepper.num_stages): slc = (0) if is_low_storage else (0 if s == 0 else", "<= 10. - dt: for s in range(expand.stepper.num_stages): slc = (0) if is_low_storage", "copies of the Software, and to permit persons to whom the Software is", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.", "Stepper, timing=False): if proc_shape != (1, 1, 1): pytest.skip(\"test expansion only on one", "included in all copies or substantial portions of the Software. THE SOFTWARE IS", "copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and", "rtol, \\ f\"FLRW solution disobeying constraint for {w=}\" if __name__ == \"__main__\": from", "THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "pressure(expand.a[slc]), dt) t += dt slc = () if is_low_storage else (0) order", "w in [0, 1/3, 1/2, 1, -1/4]: def energy(a): return a**(-3-3*w) def pressure(a):", "= (0) if is_low_storage else (0 if s == 0 else 1) expand.step(s,", "the Software, and to permit persons to whom the Software is furnished to", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN", "3*w) return (x*(t/np.sqrt(3) + 2/x))**(2/x)/2**(2/x) from pystella.step import LowStorageRKStepper is_low_storage = LowStorageRKStepper in", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,", "= dt**order print(order, w, expand.a[slc]/sol(w, t) - 1, expand.constraint(energy(expand.a[slc]))) assert np.allclose(expand.a[slc], sol(w, t),", "following conditions: The above copyright notice and this permission notice shall be included", "pytest.skip(\"test expansion only on one rank\") def sol(w, t): x = (1 +", "if is_low_storage else (0) order = expand.stepper.expected_order rtol = dt**order print(order, w, expand.a[slc]/sol(w,", "copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\",", "else (0 if s == 0 else 1) expand.step(s, energy(expand.a[slc]), pressure(expand.a[slc]), dt) t", "NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "pytest from pyopencl.tools import ( # noqa pytest_generate_tests_for_pyopencl as pytest_generate_tests) @pytest.mark.parametrize(\"dtype\", [np.float64]) @pytest.mark.parametrize(\"Stepper\",", "The above copyright notice and this permission notice shall be included in all", "1): pytest.skip(\"test expansion only on one rank\") def sol(w, t): x = (1", "-1/4]: def energy(a): return a**(-3-3*w) def pressure(a): return w * energy(a) t =", "import all_steppers for stepper in all_steppers[-5:]: test_expansion( None, proc_shape=args.proc_shape, dtype=args.dtype, timing=args.timing, Stepper=stepper, )", "in Stepper.__bases__ for w in [0, 1/3, 1/2, 1, -1/4]: def energy(a): return", "\"Software\"), to deal in the Software without restriction, including without limitation the rights", "deal in the Software without restriction, including without limitation the rights to use,", "granted, free of charge, to any person obtaining a copy of this software", "= 0 dt = .005 expand = ps.Expansion(energy(1.), Stepper, mpl=np.sqrt(8.*np.pi)) while t <=", "limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "of this software and associated documentation files (the \"Software\"), to deal in the", "rtol=rtol, atol=0), \\ f\"FLRW solution inaccurate for {w=}\" assert expand.constraint(energy(expand.a[slc])) < rtol, \\", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO", "sell copies of the Software, and to permit persons to whom the Software", "dt**order print(order, w, expand.a[slc]/sol(w, t) - 1, expand.constraint(energy(expand.a[slc]))) assert np.allclose(expand.a[slc], sol(w, t), rtol=rtol,", "parser args = parser.parse_args() from pystella.step import all_steppers for stepper in all_steppers[-5:]: test_expansion(", "from pystella.step import LowStorageRKStepper is_low_storage = LowStorageRKStepper in Stepper.__bases__ for w in [0,", "do so, subject to the following conditions: The above copyright notice and this", "0 dt = .005 expand = ps.Expansion(energy(1.), Stepper, mpl=np.sqrt(8.*np.pi)) while t <= 10.", "energy(a): return a**(-3-3*w) def pressure(a): return w * energy(a) t = 0 dt", "import LowStorageRKStepper is_low_storage = LowStorageRKStepper in Stepper.__bases__ for w in [0, 1/3, 1/2,", "[ps.RungeKutta4, ps.LowStorageRK54]) def test_expansion(ctx_factory, proc_shape, dtype, Stepper, timing=False): if proc_shape != (1, 1,", "(x*(t/np.sqrt(3) + 2/x))**(2/x)/2**(2/x) from pystella.step import LowStorageRKStepper is_low_storage = LowStorageRKStepper in Stepper.__bases__ for", "return a**(-3-3*w) def pressure(a): return w * energy(a) t = 0 dt =", "is furnished to do so, subject to the following conditions: The above copyright", "t = 0 dt = .005 expand = ps.Expansion(energy(1.), Stepper, mpl=np.sqrt(8.*np.pi)) while t", "so, subject to the following conditions: The above copyright notice and this permission", "from pyopencl.tools import ( # noqa pytest_generate_tests_for_pyopencl as pytest_generate_tests) @pytest.mark.parametrize(\"dtype\", [np.float64]) @pytest.mark.parametrize(\"Stepper\", [ps.RungeKutta4,", "1, -1/4]: def energy(a): return a**(-3-3*w) def pressure(a): return w * energy(a) t", "- dt: for s in range(expand.stepper.num_stages): slc = (0) if is_low_storage else (0", "from common import parser args = parser.parse_args() from pystella.step import all_steppers for stepper", "if s == 0 else 1) expand.step(s, energy(expand.a[slc]), pressure(expand.a[slc]), dt) t += dt", "from pystella.step import all_steppers for stepper in all_steppers[-5:]: test_expansion( None, proc_shape=args.proc_shape, dtype=args.dtype, timing=args.timing,", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR", "expand.a[slc]/sol(w, t) - 1, expand.constraint(energy(expand.a[slc]))) assert np.allclose(expand.a[slc], sol(w, t), rtol=rtol, atol=0), \\ f\"FLRW", "atol=0), \\ f\"FLRW solution inaccurate for {w=}\" assert expand.constraint(energy(expand.a[slc])) < rtol, \\ f\"FLRW", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR", "+ 3*w) return (x*(t/np.sqrt(3) + 2/x))**(2/x)/2**(2/x) from pystella.step import LowStorageRKStepper is_low_storage = LowStorageRKStepper", "of the Software, and to permit persons to whom the Software is furnished", "== \"__main__\": from common import parser args = parser.parse_args() from pystella.step import all_steppers", "(C) 2019 <NAME>\" __license__ = \"\"\" Permission is hereby granted, free of charge,", "and/or sell copies of the Software, and to permit persons to whom the", "import numpy as np import pystella as ps import pytest from pyopencl.tools import", "== 0 else 1) expand.step(s, energy(expand.a[slc]), pressure(expand.a[slc]), dt) t += dt slc =", "on one rank\") def sol(w, t): x = (1 + 3*w) return (x*(t/np.sqrt(3)", "of charge, to any person obtaining a copy of this software and associated", "(the \"Software\"), to deal in the Software without restriction, including without limitation the", "copyright notice and this permission notice shall be included in all copies or", "\"\"\" import numpy as np import pystella as ps import pytest from pyopencl.tools", "to permit persons to whom the Software is furnished to do so, subject", "noqa pytest_generate_tests_for_pyopencl as pytest_generate_tests) @pytest.mark.parametrize(\"dtype\", [np.float64]) @pytest.mark.parametrize(\"Stepper\", [ps.RungeKutta4, ps.LowStorageRK54]) def test_expansion(ctx_factory, proc_shape, dtype,", "OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" import numpy as np import pystella", "as pytest_generate_tests) @pytest.mark.parametrize(\"dtype\", [np.float64]) @pytest.mark.parametrize(\"Stepper\", [ps.RungeKutta4, ps.LowStorageRK54]) def test_expansion(ctx_factory, proc_shape, dtype, Stepper, timing=False):", "conditions: The above copyright notice and this permission notice shall be included in", "* energy(a) t = 0 dt = .005 expand = ps.Expansion(energy(1.), Stepper, mpl=np.sqrt(8.*np.pi))", "expand = ps.Expansion(energy(1.), Stepper, mpl=np.sqrt(8.*np.pi)) while t <= 10. - dt: for s", "THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "rtol = dt**order print(order, w, expand.a[slc]/sol(w, t) - 1, expand.constraint(energy(expand.a[slc]))) assert np.allclose(expand.a[slc], sol(w,", "= \"\"\" Permission is hereby granted, free of charge, to any person obtaining", "solution inaccurate for {w=}\" assert expand.constraint(energy(expand.a[slc])) < rtol, \\ f\"FLRW solution disobeying constraint", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER", "Permission is hereby granted, free of charge, to any person obtaining a copy", "be included in all copies or substantial portions of the Software. THE SOFTWARE", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "test_expansion(ctx_factory, proc_shape, dtype, Stepper, timing=False): if proc_shape != (1, 1, 1): pytest.skip(\"test expansion", "args = parser.parse_args() from pystella.step import all_steppers for stepper in all_steppers[-5:]: test_expansion( None,", "whom the Software is furnished to do so, subject to the following conditions:", "\\ f\"FLRW solution inaccurate for {w=}\" assert expand.constraint(energy(expand.a[slc])) < rtol, \\ f\"FLRW solution", "10. - dt: for s in range(expand.stepper.num_stages): slc = (0) if is_low_storage else", "t <= 10. - dt: for s in range(expand.stepper.num_stages): slc = (0) if", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\"", "s == 0 else 1) expand.step(s, energy(expand.a[slc]), pressure(expand.a[slc]), dt) t += dt slc", "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "import ( # noqa pytest_generate_tests_for_pyopencl as pytest_generate_tests) @pytest.mark.parametrize(\"dtype\", [np.float64]) @pytest.mark.parametrize(\"Stepper\", [ps.RungeKutta4, ps.LowStorageRK54]) def", "(0) if is_low_storage else (0 if s == 0 else 1) expand.step(s, energy(expand.a[slc]),", "# noqa pytest_generate_tests_for_pyopencl as pytest_generate_tests) @pytest.mark.parametrize(\"dtype\", [np.float64]) @pytest.mark.parametrize(\"Stepper\", [ps.RungeKutta4, ps.LowStorageRK54]) def test_expansion(ctx_factory, proc_shape,", "portions of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \"\"\" import numpy as", "in [0, 1/3, 1/2, 1, -1/4]: def energy(a): return a**(-3-3*w) def pressure(a): return", "a**(-3-3*w) def pressure(a): return w * energy(a) t = 0 dt = .005", "DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "import pytest from pyopencl.tools import ( # noqa pytest_generate_tests_for_pyopencl as pytest_generate_tests) @pytest.mark.parametrize(\"dtype\", [np.float64])", "distribute, sublicense, and/or sell copies of the Software, and to permit persons to", "of the Software. THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "software and associated documentation files (the \"Software\"), to deal in the Software without", "one rank\") def sol(w, t): x = (1 + 3*w) return (x*(t/np.sqrt(3) +", "= LowStorageRKStepper in Stepper.__bases__ for w in [0, 1/3, 1/2, 1, -1/4]: def", "shall be included in all copies or substantial portions of the Software. THE", "pystella as ps import pytest from pyopencl.tools import ( # noqa pytest_generate_tests_for_pyopencl as", "for {w=}\" if __name__ == \"__main__\": from common import parser args = parser.parse_args()", "NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "inaccurate for {w=}\" assert expand.constraint(energy(expand.a[slc])) < rtol, \\ f\"FLRW solution disobeying constraint for", "LowStorageRKStepper in Stepper.__bases__ for w in [0, 1/3, 1/2, 1, -1/4]: def energy(a):", "return (x*(t/np.sqrt(3) + 2/x))**(2/x)/2**(2/x) from pystella.step import LowStorageRKStepper is_low_storage = LowStorageRKStepper in Stepper.__bases__", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "common import parser args = parser.parse_args() from pystella.step import all_steppers for stepper in", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT", "{w=}\" if __name__ == \"__main__\": from common import parser args = parser.parse_args() from", "the Software is furnished to do so, subject to the following conditions: The", "(1, 1, 1): pytest.skip(\"test expansion only on one rank\") def sol(w, t): x", "t): x = (1 + 3*w) return (x*(t/np.sqrt(3) + 2/x))**(2/x)/2**(2/x) from pystella.step import", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT", "EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS", "subject to the following conditions: The above copyright notice and this permission notice", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE", "as np import pystella as ps import pytest from pyopencl.tools import ( #", "IN THE SOFTWARE. \"\"\" import numpy as np import pystella as ps import", "rank\") def sol(w, t): x = (1 + 3*w) return (x*(t/np.sqrt(3) + 2/x))**(2/x)/2**(2/x)", "() if is_low_storage else (0) order = expand.stepper.expected_order rtol = dt**order print(order, w,", "for w in [0, 1/3, 1/2, 1, -1/4]: def energy(a): return a**(-3-3*w) def", "np.allclose(expand.a[slc], sol(w, t), rtol=rtol, atol=0), \\ f\"FLRW solution inaccurate for {w=}\" assert expand.constraint(energy(expand.a[slc]))", "expand.constraint(energy(expand.a[slc])) < rtol, \\ f\"FLRW solution disobeying constraint for {w=}\" if __name__ ==", "is hereby granted, free of charge, to any person obtaining a copy of", "and associated documentation files (the \"Software\"), to deal in the Software without restriction,", "FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or", "pytest_generate_tests_for_pyopencl as pytest_generate_tests) @pytest.mark.parametrize(\"dtype\", [np.float64]) @pytest.mark.parametrize(\"Stepper\", [ps.RungeKutta4, ps.LowStorageRK54]) def test_expansion(ctx_factory, proc_shape, dtype, Stepper,", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "hereby granted, free of charge, to any person obtaining a copy of this", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE", "restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute,", "OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR", "1, expand.constraint(energy(expand.a[slc]))) assert np.allclose(expand.a[slc], sol(w, t), rtol=rtol, atol=0), \\ f\"FLRW solution inaccurate for", "np import pystella as ps import pytest from pyopencl.tools import ( # noqa", "to the following conditions: The above copyright notice and this permission notice shall", "= expand.stepper.expected_order rtol = dt**order print(order, w, expand.a[slc]/sol(w, t) - 1, expand.constraint(energy(expand.a[slc]))) assert", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "Software, and to permit persons to whom the Software is furnished to do", "x = (1 + 3*w) return (x*(t/np.sqrt(3) + 2/x))**(2/x)/2**(2/x) from pystella.step import LowStorageRKStepper", "LowStorageRKStepper is_low_storage = LowStorageRKStepper in Stepper.__bases__ for w in [0, 1/3, 1/2, 1,", "@pytest.mark.parametrize(\"Stepper\", [ps.RungeKutta4, ps.LowStorageRK54]) def test_expansion(ctx_factory, proc_shape, dtype, Stepper, timing=False): if proc_shape != (1,", "if proc_shape != (1, 1, 1): pytest.skip(\"test expansion only on one rank\") def", "1) expand.step(s, energy(expand.a[slc]), pressure(expand.a[slc]), dt) t += dt slc = () if is_low_storage", "t) - 1, expand.constraint(energy(expand.a[slc]))) assert np.allclose(expand.a[slc], sol(w, t), rtol=rtol, atol=0), \\ f\"FLRW solution", "s in range(expand.stepper.num_stages): slc = (0) if is_low_storage else (0 if s ==", "- 1, expand.constraint(energy(expand.a[slc]))) assert np.allclose(expand.a[slc], sol(w, t), rtol=rtol, atol=0), \\ f\"FLRW solution inaccurate", "timing=False): if proc_shape != (1, 1, 1): pytest.skip(\"test expansion only on one rank\")", "else (0) order = expand.stepper.expected_order rtol = dt**order print(order, w, expand.a[slc]/sol(w, t) -", "sol(w, t), rtol=rtol, atol=0), \\ f\"FLRW solution inaccurate for {w=}\" assert expand.constraint(energy(expand.a[slc])) <" ]
[ "\"tokens\": {}, \"features\": [\"unknown feature\"], }, ) == subscription.Subscription( 123, True, \"friend\", {},", "assert sub.from_dict(owner_id, sub.to_dict()) == sub @pytest.mark.parametrize( \"features\", ( {}, {subscription.Features.PRIVATE_REPOSITORY}, { subscription.Features.PRIVATE_REPOSITORY, subscription.Features.PRIORITY_QUEUES,", "assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False sub = subscription.Subscription( 123, False, \"friend\", {}, frozenset([subscription.Features.PRIORITY_QUEUES]), )", "[\"unknown feature\"], }, ) == subscription.Subscription( 123, True, \"friend\", {}, frozenset(), ) def", "subscription def test_init(): subscription.Subscription( 123, True, \"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY}) ) def test_dict(): owner_id", "subscription.Subscription._retrieve_subscription_from_cache(owner_id) assert rsub == sub @pytest.mark.asyncio async def test_unknown_sub(): sub = await subscription.Subscription._retrieve_subscription_from_cache(98732189)", ") def test_active_feature(): sub = subscription.Subscription( 123, True, \"friend\", {}, frozenset(), ) assert", "test_save_sub(features): owner_id = 1234 sub = subscription.Subscription(owner_id, True, \"friend\", {}, frozenset(features)) await sub.save_subscription_to_cache()", "\"friend\", {}, frozenset(features)) await sub.save_subscription_to_cache() rsub = await subscription.Subscription._retrieve_subscription_from_cache(owner_id) assert rsub == sub", "@pytest.mark.asyncio async def test_unknown_sub(): sub = await subscription.Subscription._retrieve_subscription_from_cache(98732189) assert sub is None def", "mergify_engine import subscription def test_init(): subscription.Subscription( 123, True, \"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY}) ) def", "subscription.Subscription(owner_id, True, \"friend\", {}, frozenset(features)) await sub.save_subscription_to_cache() rsub = await subscription.Subscription._retrieve_subscription_from_cache(owner_id) assert rsub", "sub.save_subscription_to_cache() rsub = await subscription.Subscription._retrieve_subscription_from_cache(owner_id) assert rsub == sub @pytest.mark.asyncio async def test_unknown_sub():", "rsub == sub @pytest.mark.asyncio async def test_unknown_sub(): sub = await subscription.Subscription._retrieve_subscription_from_cache(98732189) assert sub", "\"friend\", {}, frozenset(), ) def test_active_feature(): sub = subscription.Subscription( 123, True, \"friend\", {},", "True, \"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY}) ) def test_dict(): owner_id = 1234 sub = subscription.Subscription(", "subscription.Subscription.from_dict( 123, { \"subscription_active\": True, \"subscription_reason\": \"friend\", \"tokens\": {}, \"features\": [\"unknown feature\"], },", "== sub @pytest.mark.parametrize( \"features\", ( {}, {subscription.Features.PRIVATE_REPOSITORY}, { subscription.Features.PRIVATE_REPOSITORY, subscription.Features.PRIORITY_QUEUES, }, ), )", "subscription.Subscription( owner_id, True, \"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY}), ) assert sub.from_dict(owner_id, sub.to_dict()) == sub @pytest.mark.parametrize(", "test_unknown_sub(): sub = await subscription.Subscription._retrieve_subscription_from_cache(98732189) assert sub is None def test_from_dict_unknown_features(): assert subscription.Subscription.from_dict(", ") def test_dict(): owner_id = 1234 sub = subscription.Subscription( owner_id, True, \"friend\", {},", "}, ), ) @pytest.mark.asyncio async def test_save_sub(features): owner_id = 1234 sub = subscription.Subscription(owner_id,", "sub = await subscription.Subscription._retrieve_subscription_from_cache(98732189) assert sub is None def test_from_dict_unknown_features(): assert subscription.Subscription.from_dict( 123,", "False, \"friend\", {}, frozenset([subscription.Features.PRIORITY_QUEUES]), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False sub = subscription.Subscription( 123,", "False sub = subscription.Subscription( 123, True, \"friend\", {}, frozenset([subscription.Features.PRIORITY_QUEUES]), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is", ") @pytest.mark.asyncio async def test_save_sub(features): owner_id = 1234 sub = subscription.Subscription(owner_id, True, \"friend\",", "{}, {subscription.Features.PRIVATE_REPOSITORY}, { subscription.Features.PRIVATE_REPOSITORY, subscription.Features.PRIORITY_QUEUES, }, ), ) @pytest.mark.asyncio async def test_save_sub(features): owner_id", "{}, frozenset([subscription.Features.PRIORITY_QUEUES]), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False sub = subscription.Subscription( 123, True, \"friend\",", "123, True, \"friend\", {}, frozenset(), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False sub = subscription.Subscription(", "is False sub = subscription.Subscription( 123, True, \"friend\", {}, frozenset([subscription.Features.PRIORITY_QUEUES]), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES)", "frozenset(), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False sub = subscription.Subscription( 123, False, \"friend\", {},", "test_dict(): owner_id = 1234 sub = subscription.Subscription( owner_id, True, \"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY}), )", "pytest from mergify_engine import subscription def test_init(): subscription.Subscription( 123, True, \"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY})", "{}, frozenset(), ) def test_active_feature(): sub = subscription.Subscription( 123, True, \"friend\", {}, frozenset(),", "feature\"], }, ) == subscription.Subscription( 123, True, \"friend\", {}, frozenset(), ) def test_active_feature():", "def test_active_feature(): sub = subscription.Subscription( 123, True, \"friend\", {}, frozenset(), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES)", "test_from_dict_unknown_features(): assert subscription.Subscription.from_dict( 123, { \"subscription_active\": True, \"subscription_reason\": \"friend\", \"tokens\": {}, \"features\": [\"unknown", "subscription.Subscription._retrieve_subscription_from_cache(98732189) assert sub is None def test_from_dict_unknown_features(): assert subscription.Subscription.from_dict( 123, { \"subscription_active\": True,", "assert sub is None def test_from_dict_unknown_features(): assert subscription.Subscription.from_dict( 123, { \"subscription_active\": True, \"subscription_reason\":", "subscription.Features.PRIVATE_REPOSITORY, subscription.Features.PRIORITY_QUEUES, }, ), ) @pytest.mark.asyncio async def test_save_sub(features): owner_id = 1234 sub", "123, True, \"friend\", {}, frozenset(), ) def test_active_feature(): sub = subscription.Subscription( 123, True,", "123, False, \"friend\", {}, frozenset([subscription.Features.PRIORITY_QUEUES]), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False sub = subscription.Subscription(", "\"subscription_reason\": \"friend\", \"tokens\": {}, \"features\": [\"unknown feature\"], }, ) == subscription.Subscription( 123, True,", "sub = subscription.Subscription( 123, True, \"friend\", {}, frozenset(), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False", "@pytest.mark.asyncio async def test_save_sub(features): owner_id = 1234 sub = subscription.Subscription(owner_id, True, \"friend\", {},", "subscription.Subscription( 123, True, \"friend\", {}, frozenset(), ) def test_active_feature(): sub = subscription.Subscription( 123,", "@pytest.mark.parametrize( \"features\", ( {}, {subscription.Features.PRIVATE_REPOSITORY}, { subscription.Features.PRIVATE_REPOSITORY, subscription.Features.PRIORITY_QUEUES, }, ), ) @pytest.mark.asyncio async", "await sub.save_subscription_to_cache() rsub = await subscription.Subscription._retrieve_subscription_from_cache(owner_id) assert rsub == sub @pytest.mark.asyncio async def", "\"subscription_active\": True, \"subscription_reason\": \"friend\", \"tokens\": {}, \"features\": [\"unknown feature\"], }, ) == subscription.Subscription(", "async def test_save_sub(features): owner_id = 1234 sub = subscription.Subscription(owner_id, True, \"friend\", {}, frozenset(features))", "is False sub = subscription.Subscription( 123, False, \"friend\", {}, frozenset([subscription.Features.PRIORITY_QUEUES]), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES)", "False sub = subscription.Subscription( 123, False, \"friend\", {}, frozenset([subscription.Features.PRIORITY_QUEUES]), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is", "assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False sub = subscription.Subscription( 123, True, \"friend\", {}, frozenset([subscription.Features.PRIORITY_QUEUES]), )", "sub.from_dict(owner_id, sub.to_dict()) == sub @pytest.mark.parametrize( \"features\", ( {}, {subscription.Features.PRIVATE_REPOSITORY}, { subscription.Features.PRIVATE_REPOSITORY, subscription.Features.PRIORITY_QUEUES, },", "\"friend\", {}, frozenset(), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False sub = subscription.Subscription( 123, False,", "def test_init(): subscription.Subscription( 123, True, \"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY}) ) def test_dict(): owner_id =", "True, \"friend\", {}, frozenset(), ) def test_active_feature(): sub = subscription.Subscription( 123, True, \"friend\",", "True, \"friend\", {}, frozenset(features)) await sub.save_subscription_to_cache() rsub = await subscription.Subscription._retrieve_subscription_from_cache(owner_id) assert rsub ==", "frozenset({subscription.Features.PRIVATE_REPOSITORY}) ) def test_dict(): owner_id = 1234 sub = subscription.Subscription( owner_id, True, \"friend\",", "await subscription.Subscription._retrieve_subscription_from_cache(owner_id) assert rsub == sub @pytest.mark.asyncio async def test_unknown_sub(): sub = await", "subscription.Subscription( 123, True, \"friend\", {}, frozenset(), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False sub =", "frozenset([subscription.Features.PRIORITY_QUEUES]), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False sub = subscription.Subscription( 123, True, \"friend\", {},", ") assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False sub = subscription.Subscription( 123, True, \"friend\", {}, frozenset([subscription.Features.PRIORITY_QUEUES]),", "123, { \"subscription_active\": True, \"subscription_reason\": \"friend\", \"tokens\": {}, \"features\": [\"unknown feature\"], }, )", ") assert sub.from_dict(owner_id, sub.to_dict()) == sub @pytest.mark.parametrize( \"features\", ( {}, {subscription.Features.PRIVATE_REPOSITORY}, { subscription.Features.PRIVATE_REPOSITORY,", "sub.to_dict()) == sub @pytest.mark.parametrize( \"features\", ( {}, {subscription.Features.PRIVATE_REPOSITORY}, { subscription.Features.PRIVATE_REPOSITORY, subscription.Features.PRIORITY_QUEUES, }, ),", "test_init(): subscription.Subscription( 123, True, \"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY}) ) def test_dict(): owner_id = 1234", "True, \"subscription_reason\": \"friend\", \"tokens\": {}, \"features\": [\"unknown feature\"], }, ) == subscription.Subscription( 123,", "def test_save_sub(features): owner_id = 1234 sub = subscription.Subscription(owner_id, True, \"friend\", {}, frozenset(features)) await", "test_active_feature(): sub = subscription.Subscription( 123, True, \"friend\", {}, frozenset(), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is", "def test_unknown_sub(): sub = await subscription.Subscription._retrieve_subscription_from_cache(98732189) assert sub is None def test_from_dict_unknown_features(): assert", "sub @pytest.mark.asyncio async def test_unknown_sub(): sub = await subscription.Subscription._retrieve_subscription_from_cache(98732189) assert sub is None", "{ subscription.Features.PRIVATE_REPOSITORY, subscription.Features.PRIORITY_QUEUES, }, ), ) @pytest.mark.asyncio async def test_save_sub(features): owner_id = 1234", "1234 sub = subscription.Subscription(owner_id, True, \"friend\", {}, frozenset(features)) await sub.save_subscription_to_cache() rsub = await", "\"friend\", \"tokens\": {}, \"features\": [\"unknown feature\"], }, ) == subscription.Subscription( 123, True, \"friend\",", "\"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY}), ) assert sub.from_dict(owner_id, sub.to_dict()) == sub @pytest.mark.parametrize( \"features\", ( {},", "frozenset(features)) await sub.save_subscription_to_cache() rsub = await subscription.Subscription._retrieve_subscription_from_cache(owner_id) assert rsub == sub @pytest.mark.asyncio async", "sub @pytest.mark.parametrize( \"features\", ( {}, {subscription.Features.PRIVATE_REPOSITORY}, { subscription.Features.PRIVATE_REPOSITORY, subscription.Features.PRIORITY_QUEUES, }, ), ) @pytest.mark.asyncio", "= subscription.Subscription( 123, False, \"friend\", {}, frozenset([subscription.Features.PRIORITY_QUEUES]), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False sub", "== sub @pytest.mark.asyncio async def test_unknown_sub(): sub = await subscription.Subscription._retrieve_subscription_from_cache(98732189) assert sub is", "frozenset(), ) def test_active_feature(): sub = subscription.Subscription( 123, True, \"friend\", {}, frozenset(), )", "= subscription.Subscription( owner_id, True, \"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY}), ) assert sub.from_dict(owner_id, sub.to_dict()) == sub", "\"friend\", {}, frozenset([subscription.Features.PRIORITY_QUEUES]), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False sub = subscription.Subscription( 123, True,", ") assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False sub = subscription.Subscription( 123, False, \"friend\", {}, frozenset([subscription.Features.PRIORITY_QUEUES]),", "frozenset({subscription.Features.PRIVATE_REPOSITORY}), ) assert sub.from_dict(owner_id, sub.to_dict()) == sub @pytest.mark.parametrize( \"features\", ( {}, {subscription.Features.PRIVATE_REPOSITORY}, {", "True, \"friend\", {}, frozenset(), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False sub = subscription.Subscription( 123,", "{}, frozenset({subscription.Features.PRIVATE_REPOSITORY}) ) def test_dict(): owner_id = 1234 sub = subscription.Subscription( owner_id, True,", "None def test_from_dict_unknown_features(): assert subscription.Subscription.from_dict( 123, { \"subscription_active\": True, \"subscription_reason\": \"friend\", \"tokens\": {},", "def test_dict(): owner_id = 1234 sub = subscription.Subscription( owner_id, True, \"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY}),", "sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False sub = subscription.Subscription( 123, True, \"friend\", {}, frozenset([subscription.Features.PRIORITY_QUEUES]), ) assert", "async def test_unknown_sub(): sub = await subscription.Subscription._retrieve_subscription_from_cache(98732189) assert sub is None def test_from_dict_unknown_features():", "1234 sub = subscription.Subscription( owner_id, True, \"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY}), ) assert sub.from_dict(owner_id, sub.to_dict())", "= await subscription.Subscription._retrieve_subscription_from_cache(98732189) assert sub is None def test_from_dict_unknown_features(): assert subscription.Subscription.from_dict( 123, {", "( {}, {subscription.Features.PRIVATE_REPOSITORY}, { subscription.Features.PRIVATE_REPOSITORY, subscription.Features.PRIORITY_QUEUES, }, ), ) @pytest.mark.asyncio async def test_save_sub(features):", "\"features\", ( {}, {subscription.Features.PRIVATE_REPOSITORY}, { subscription.Features.PRIVATE_REPOSITORY, subscription.Features.PRIORITY_QUEUES, }, ), ) @pytest.mark.asyncio async def", "from mergify_engine import subscription def test_init(): subscription.Subscription( 123, True, \"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY}) )", "= subscription.Subscription(owner_id, True, \"friend\", {}, frozenset(features)) await sub.save_subscription_to_cache() rsub = await subscription.Subscription._retrieve_subscription_from_cache(owner_id) assert", "\"features\": [\"unknown feature\"], }, ) == subscription.Subscription( 123, True, \"friend\", {}, frozenset(), )", "subscription.Subscription( 123, True, \"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY}) ) def test_dict(): owner_id = 1234 sub", "owner_id = 1234 sub = subscription.Subscription( owner_id, True, \"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY}), ) assert", "is None def test_from_dict_unknown_features(): assert subscription.Subscription.from_dict( 123, { \"subscription_active\": True, \"subscription_reason\": \"friend\", \"tokens\":", "assert rsub == sub @pytest.mark.asyncio async def test_unknown_sub(): sub = await subscription.Subscription._retrieve_subscription_from_cache(98732189) assert", "{subscription.Features.PRIVATE_REPOSITORY}, { subscription.Features.PRIVATE_REPOSITORY, subscription.Features.PRIORITY_QUEUES, }, ), ) @pytest.mark.asyncio async def test_save_sub(features): owner_id =", "{}, frozenset({subscription.Features.PRIVATE_REPOSITORY}), ) assert sub.from_dict(owner_id, sub.to_dict()) == sub @pytest.mark.parametrize( \"features\", ( {}, {subscription.Features.PRIVATE_REPOSITORY},", "sub = subscription.Subscription( 123, True, \"friend\", {}, frozenset([subscription.Features.PRIORITY_QUEUES]), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is True", ") == subscription.Subscription( 123, True, \"friend\", {}, frozenset(), ) def test_active_feature(): sub =", "assert subscription.Subscription.from_dict( 123, { \"subscription_active\": True, \"subscription_reason\": \"friend\", \"tokens\": {}, \"features\": [\"unknown feature\"],", "import subscription def test_init(): subscription.Subscription( 123, True, \"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY}) ) def test_dict():", "rsub = await subscription.Subscription._retrieve_subscription_from_cache(owner_id) assert rsub == sub @pytest.mark.asyncio async def test_unknown_sub(): sub", "== subscription.Subscription( 123, True, \"friend\", {}, frozenset(), ) def test_active_feature(): sub = subscription.Subscription(", "{}, \"features\": [\"unknown feature\"], }, ) == subscription.Subscription( 123, True, \"friend\", {}, frozenset(),", "import pytest from mergify_engine import subscription def test_init(): subscription.Subscription( 123, True, \"friend\", {},", "sub = subscription.Subscription( 123, False, \"friend\", {}, frozenset([subscription.Features.PRIORITY_QUEUES]), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False", "owner_id = 1234 sub = subscription.Subscription(owner_id, True, \"friend\", {}, frozenset(features)) await sub.save_subscription_to_cache() rsub", "\"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY}) ) def test_dict(): owner_id = 1234 sub = subscription.Subscription( owner_id,", "= 1234 sub = subscription.Subscription( owner_id, True, \"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY}), ) assert sub.from_dict(owner_id,", "{ \"subscription_active\": True, \"subscription_reason\": \"friend\", \"tokens\": {}, \"features\": [\"unknown feature\"], }, ) ==", "await subscription.Subscription._retrieve_subscription_from_cache(98732189) assert sub is None def test_from_dict_unknown_features(): assert subscription.Subscription.from_dict( 123, { \"subscription_active\":", "{}, frozenset(features)) await sub.save_subscription_to_cache() rsub = await subscription.Subscription._retrieve_subscription_from_cache(owner_id) assert rsub == sub @pytest.mark.asyncio", "= subscription.Subscription( 123, True, \"friend\", {}, frozenset(), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False sub", "True, \"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY}), ) assert sub.from_dict(owner_id, sub.to_dict()) == sub @pytest.mark.parametrize( \"features\", (", "sub = subscription.Subscription( owner_id, True, \"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY}), ) assert sub.from_dict(owner_id, sub.to_dict()) ==", "subscription.Features.PRIORITY_QUEUES, }, ), ) @pytest.mark.asyncio async def test_save_sub(features): owner_id = 1234 sub =", "sub is None def test_from_dict_unknown_features(): assert subscription.Subscription.from_dict( 123, { \"subscription_active\": True, \"subscription_reason\": \"friend\",", "), ) @pytest.mark.asyncio async def test_save_sub(features): owner_id = 1234 sub = subscription.Subscription(owner_id, True,", "sub = subscription.Subscription(owner_id, True, \"friend\", {}, frozenset(features)) await sub.save_subscription_to_cache() rsub = await subscription.Subscription._retrieve_subscription_from_cache(owner_id)", "subscription.Subscription( 123, False, \"friend\", {}, frozenset([subscription.Features.PRIORITY_QUEUES]), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False sub =", "{}, frozenset(), ) assert sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False sub = subscription.Subscription( 123, False, \"friend\",", "def test_from_dict_unknown_features(): assert subscription.Subscription.from_dict( 123, { \"subscription_active\": True, \"subscription_reason\": \"friend\", \"tokens\": {}, \"features\":", "owner_id, True, \"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY}), ) assert sub.from_dict(owner_id, sub.to_dict()) == sub @pytest.mark.parametrize( \"features\",", "= await subscription.Subscription._retrieve_subscription_from_cache(owner_id) assert rsub == sub @pytest.mark.asyncio async def test_unknown_sub(): sub =", "}, ) == subscription.Subscription( 123, True, \"friend\", {}, frozenset(), ) def test_active_feature(): sub", "= 1234 sub = subscription.Subscription(owner_id, True, \"friend\", {}, frozenset(features)) await sub.save_subscription_to_cache() rsub =", "123, True, \"friend\", {}, frozenset({subscription.Features.PRIVATE_REPOSITORY}) ) def test_dict(): owner_id = 1234 sub =", "sub.has_feature(subscription.Features.PRIORITY_QUEUES) is False sub = subscription.Subscription( 123, False, \"friend\", {}, frozenset([subscription.Features.PRIORITY_QUEUES]), ) assert" ]
[ "= [hubmap_api_admin_v1, hubmap_api_admin_v2, hubmap_api_admin_v3, hubmap_api_admin_v4, hubmap_api_admin_v5, hubmap_api_admin_v6] flask_blueprints = [hubmap_api_blueprint] menu_links = []", "import AirflowPlugin from hubmap_api.manager import aav1 as hubmap_api_admin_v1 from hubmap_api.manager import aav2 as", "hubmap_api_admin_v6] flask_blueprints = [hubmap_api_blueprint] menu_links = [] appbuilder_views = [] appbuilder_menu_items = []", "airflow.plugins_manager import AirflowPlugin from hubmap_api.manager import aav1 as hubmap_api_admin_v1 from hubmap_api.manager import aav2", "hubmap_api_admin_v2 from hubmap_api.manager import aav3 as hubmap_api_admin_v3 from hubmap_api.manager import aav4 as hubmap_api_admin_v4", "as hubmap_api_admin_v6 from hubmap_api.manager import blueprint as hubmap_api_blueprint class AirflowHuBMAPPlugin(AirflowPlugin): name = \"hubmap_api\"", "hubmap_api_admin_v5, hubmap_api_admin_v6] flask_blueprints = [hubmap_api_blueprint] menu_links = [] appbuilder_views = [] appbuilder_menu_items =", "aav6 as hubmap_api_admin_v6 from hubmap_api.manager import blueprint as hubmap_api_blueprint class AirflowHuBMAPPlugin(AirflowPlugin): name =", "= [] macros = [] admin_views = [hubmap_api_admin_v1, hubmap_api_admin_v2, hubmap_api_admin_v3, hubmap_api_admin_v4, hubmap_api_admin_v5, hubmap_api_admin_v6]", "= [hubmap_api_blueprint] menu_links = [] appbuilder_views = [] appbuilder_menu_items = [] global_operator_extra_links =", "aav1 as hubmap_api_admin_v1 from hubmap_api.manager import aav2 as hubmap_api_admin_v2 from hubmap_api.manager import aav3", "as hubmap_api_admin_v1 from hubmap_api.manager import aav2 as hubmap_api_admin_v2 from hubmap_api.manager import aav3 as", "import aav4 as hubmap_api_admin_v4 from hubmap_api.manager import aav5 as hubmap_api_admin_v5 from hubmap_api.manager import", "import blueprint as hubmap_api_blueprint class AirflowHuBMAPPlugin(AirflowPlugin): name = \"hubmap_api\" operators = [] sensors", "import aav5 as hubmap_api_admin_v5 from hubmap_api.manager import aav6 as hubmap_api_admin_v6 from hubmap_api.manager import", "hubmap_api_admin_v4, hubmap_api_admin_v5, hubmap_api_admin_v6] flask_blueprints = [hubmap_api_blueprint] menu_links = [] appbuilder_views = [] appbuilder_menu_items", "hubmap_api.manager import aav5 as hubmap_api_admin_v5 from hubmap_api.manager import aav6 as hubmap_api_admin_v6 from hubmap_api.manager", "hubmap_api.manager import aav3 as hubmap_api_admin_v3 from hubmap_api.manager import aav4 as hubmap_api_admin_v4 from hubmap_api.manager", "hubmap_api.manager import aav2 as hubmap_api_admin_v2 from hubmap_api.manager import aav3 as hubmap_api_admin_v3 from hubmap_api.manager", "AirflowHuBMAPPlugin(AirflowPlugin): name = \"hubmap_api\" operators = [] sensors = [] hooks = []", "as hubmap_api_admin_v2 from hubmap_api.manager import aav3 as hubmap_api_admin_v3 from hubmap_api.manager import aav4 as", "blueprint as hubmap_api_blueprint class AirflowHuBMAPPlugin(AirflowPlugin): name = \"hubmap_api\" operators = [] sensors =", "hubmap_api.manager import aav4 as hubmap_api_admin_v4 from hubmap_api.manager import aav5 as hubmap_api_admin_v5 from hubmap_api.manager", "hubmap_api_admin_v3, hubmap_api_admin_v4, hubmap_api_admin_v5, hubmap_api_admin_v6] flask_blueprints = [hubmap_api_blueprint] menu_links = [] appbuilder_views = []", "hubmap_api_admin_v1 from hubmap_api.manager import aav2 as hubmap_api_admin_v2 from hubmap_api.manager import aav3 as hubmap_api_admin_v3", "[] sensors = [] hooks = [] executors = [] macros = []", "hubmap_api_admin_v4 from hubmap_api.manager import aav5 as hubmap_api_admin_v5 from hubmap_api.manager import aav6 as hubmap_api_admin_v6", "aav3 as hubmap_api_admin_v3 from hubmap_api.manager import aav4 as hubmap_api_admin_v4 from hubmap_api.manager import aav5", "class AirflowHuBMAPPlugin(AirflowPlugin): name = \"hubmap_api\" operators = [] sensors = [] hooks =", "[] macros = [] admin_views = [hubmap_api_admin_v1, hubmap_api_admin_v2, hubmap_api_admin_v3, hubmap_api_admin_v4, hubmap_api_admin_v5, hubmap_api_admin_v6] flask_blueprints", "admin_views = [hubmap_api_admin_v1, hubmap_api_admin_v2, hubmap_api_admin_v3, hubmap_api_admin_v4, hubmap_api_admin_v5, hubmap_api_admin_v6] flask_blueprints = [hubmap_api_blueprint] menu_links =", "hooks = [] executors = [] macros = [] admin_views = [hubmap_api_admin_v1, hubmap_api_admin_v2,", "hubmap_api_admin_v5 from hubmap_api.manager import aav6 as hubmap_api_admin_v6 from hubmap_api.manager import blueprint as hubmap_api_blueprint", "= [] admin_views = [hubmap_api_admin_v1, hubmap_api_admin_v2, hubmap_api_admin_v3, hubmap_api_admin_v4, hubmap_api_admin_v5, hubmap_api_admin_v6] flask_blueprints = [hubmap_api_blueprint]", "hubmap_api_admin_v3 from hubmap_api.manager import aav4 as hubmap_api_admin_v4 from hubmap_api.manager import aav5 as hubmap_api_admin_v5", "[] hooks = [] executors = [] macros = [] admin_views = [hubmap_api_admin_v1,", "from hubmap_api.manager import aav6 as hubmap_api_admin_v6 from hubmap_api.manager import blueprint as hubmap_api_blueprint class", "from hubmap_api.manager import aav5 as hubmap_api_admin_v5 from hubmap_api.manager import aav6 as hubmap_api_admin_v6 from", "as hubmap_api_admin_v4 from hubmap_api.manager import aav5 as hubmap_api_admin_v5 from hubmap_api.manager import aav6 as", "aav4 as hubmap_api_admin_v4 from hubmap_api.manager import aav5 as hubmap_api_admin_v5 from hubmap_api.manager import aav6", "operators = [] sensors = [] hooks = [] executors = [] macros", "flask_blueprints = [hubmap_api_blueprint] menu_links = [] appbuilder_views = [] appbuilder_menu_items = [] global_operator_extra_links", "macros = [] admin_views = [hubmap_api_admin_v1, hubmap_api_admin_v2, hubmap_api_admin_v3, hubmap_api_admin_v4, hubmap_api_admin_v5, hubmap_api_admin_v6] flask_blueprints =", "import aav6 as hubmap_api_admin_v6 from hubmap_api.manager import blueprint as hubmap_api_blueprint class AirflowHuBMAPPlugin(AirflowPlugin): name", "executors = [] macros = [] admin_views = [hubmap_api_admin_v1, hubmap_api_admin_v2, hubmap_api_admin_v3, hubmap_api_admin_v4, hubmap_api_admin_v5,", "import aav2 as hubmap_api_admin_v2 from hubmap_api.manager import aav3 as hubmap_api_admin_v3 from hubmap_api.manager import", "hubmap_api.manager import aav1 as hubmap_api_admin_v1 from hubmap_api.manager import aav2 as hubmap_api_admin_v2 from hubmap_api.manager", "from hubmap_api.manager import aav1 as hubmap_api_admin_v1 from hubmap_api.manager import aav2 as hubmap_api_admin_v2 from", "sensors = [] hooks = [] executors = [] macros = [] admin_views", "= [] sensors = [] hooks = [] executors = [] macros =", "from hubmap_api.manager import blueprint as hubmap_api_blueprint class AirflowHuBMAPPlugin(AirflowPlugin): name = \"hubmap_api\" operators =", "from hubmap_api.manager import aav3 as hubmap_api_admin_v3 from hubmap_api.manager import aav4 as hubmap_api_admin_v4 from", "from airflow.plugins_manager import AirflowPlugin from hubmap_api.manager import aav1 as hubmap_api_admin_v1 from hubmap_api.manager import", "[hubmap_api_blueprint] menu_links = [] appbuilder_views = [] appbuilder_menu_items = [] global_operator_extra_links = []", "from hubmap_api.manager import aav4 as hubmap_api_admin_v4 from hubmap_api.manager import aav5 as hubmap_api_admin_v5 from", "as hubmap_api_admin_v5 from hubmap_api.manager import aav6 as hubmap_api_admin_v6 from hubmap_api.manager import blueprint as", "[hubmap_api_admin_v1, hubmap_api_admin_v2, hubmap_api_admin_v3, hubmap_api_admin_v4, hubmap_api_admin_v5, hubmap_api_admin_v6] flask_blueprints = [hubmap_api_blueprint] menu_links = [] appbuilder_views", "= \"hubmap_api\" operators = [] sensors = [] hooks = [] executors =", "import aav1 as hubmap_api_admin_v1 from hubmap_api.manager import aav2 as hubmap_api_admin_v2 from hubmap_api.manager import", "= [] hooks = [] executors = [] macros = [] admin_views =", "hubmap_api.manager import blueprint as hubmap_api_blueprint class AirflowHuBMAPPlugin(AirflowPlugin): name = \"hubmap_api\" operators = []", "import aav3 as hubmap_api_admin_v3 from hubmap_api.manager import aav4 as hubmap_api_admin_v4 from hubmap_api.manager import", "\"hubmap_api\" operators = [] sensors = [] hooks = [] executors = []", "[] admin_views = [hubmap_api_admin_v1, hubmap_api_admin_v2, hubmap_api_admin_v3, hubmap_api_admin_v4, hubmap_api_admin_v5, hubmap_api_admin_v6] flask_blueprints = [hubmap_api_blueprint] menu_links", "from hubmap_api.manager import aav2 as hubmap_api_admin_v2 from hubmap_api.manager import aav3 as hubmap_api_admin_v3 from", "as hubmap_api_admin_v3 from hubmap_api.manager import aav4 as hubmap_api_admin_v4 from hubmap_api.manager import aav5 as", "hubmap_api_admin_v6 from hubmap_api.manager import blueprint as hubmap_api_blueprint class AirflowHuBMAPPlugin(AirflowPlugin): name = \"hubmap_api\" operators", "[] executors = [] macros = [] admin_views = [hubmap_api_admin_v1, hubmap_api_admin_v2, hubmap_api_admin_v3, hubmap_api_admin_v4,", "= [] executors = [] macros = [] admin_views = [hubmap_api_admin_v1, hubmap_api_admin_v2, hubmap_api_admin_v3,", "as hubmap_api_blueprint class AirflowHuBMAPPlugin(AirflowPlugin): name = \"hubmap_api\" operators = [] sensors = []", "AirflowPlugin from hubmap_api.manager import aav1 as hubmap_api_admin_v1 from hubmap_api.manager import aav2 as hubmap_api_admin_v2", "name = \"hubmap_api\" operators = [] sensors = [] hooks = [] executors", "hubmap_api_admin_v2, hubmap_api_admin_v3, hubmap_api_admin_v4, hubmap_api_admin_v5, hubmap_api_admin_v6] flask_blueprints = [hubmap_api_blueprint] menu_links = [] appbuilder_views =", "hubmap_api.manager import aav6 as hubmap_api_admin_v6 from hubmap_api.manager import blueprint as hubmap_api_blueprint class AirflowHuBMAPPlugin(AirflowPlugin):", "aav2 as hubmap_api_admin_v2 from hubmap_api.manager import aav3 as hubmap_api_admin_v3 from hubmap_api.manager import aav4", "hubmap_api_blueprint class AirflowHuBMAPPlugin(AirflowPlugin): name = \"hubmap_api\" operators = [] sensors = [] hooks", "aav5 as hubmap_api_admin_v5 from hubmap_api.manager import aav6 as hubmap_api_admin_v6 from hubmap_api.manager import blueprint" ]
[ "numpy.distutils.misc_util import Configuration config=Configuration('hgen',parent_package,top_path) return config if __name__ == '__main__': from numpy.distutils.core import", "def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config=Configuration('hgen',parent_package,top_path) return config if __name__ == '__main__':", "configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config=Configuration('hgen',parent_package,top_path) return config if __name__ == '__main__': from", "Generators. ''' def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config=Configuration('hgen',parent_package,top_path) return config if __name__", "file for Operator and Hamiltonain Generators. ''' def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration", "''' Setup file for Operator and Hamiltonain Generators. ''' def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util", "for Operator and Hamiltonain Generators. ''' def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config=Configuration('hgen',parent_package,top_path)", "and Hamiltonain Generators. ''' def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config=Configuration('hgen',parent_package,top_path) return config", "from numpy.distutils.misc_util import Configuration config=Configuration('hgen',parent_package,top_path) return config if __name__ == '__main__': from numpy.distutils.core", "Operator and Hamiltonain Generators. ''' def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config=Configuration('hgen',parent_package,top_path) return", "Configuration config=Configuration('hgen',parent_package,top_path) return config if __name__ == '__main__': from numpy.distutils.core import setup setup(configuration=configuration)", "Hamiltonain Generators. ''' def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config=Configuration('hgen',parent_package,top_path) return config if", "import Configuration config=Configuration('hgen',parent_package,top_path) return config if __name__ == '__main__': from numpy.distutils.core import setup", "Setup file for Operator and Hamiltonain Generators. ''' def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import", "''' def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration config=Configuration('hgen',parent_package,top_path) return config if __name__ ==" ]
[ "), migrations.RemoveField( model_name='post', name='image', ), migrations.AddField( model_name='business', name='image', field=cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name='image'), ),", "cloudinary.models from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('hoodapp', '0013_auto_20220110_1102'), ]", "Migration(migrations.Migration): dependencies = [ ('hoodapp', '0013_auto_20220110_1102'), ] operations = [ migrations.AlterModelOptions( name='post', options={'ordering':", "by Django 3.2.9 on 2022-01-10 08:19 import cloudinary.models from django.db import migrations class", "3.2.9 on 2022-01-10 08:19 import cloudinary.models from django.db import migrations class Migration(migrations.Migration): dependencies", "import cloudinary.models from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('hoodapp', '0013_auto_20220110_1102'),", "[ migrations.AlterModelOptions( name='post', options={'ordering': ['-pk']}, ), migrations.RemoveField( model_name='post', name='image', ), migrations.AddField( model_name='business', name='image',", "2022-01-10 08:19 import cloudinary.models from django.db import migrations class Migration(migrations.Migration): dependencies = [", "class Migration(migrations.Migration): dependencies = [ ('hoodapp', '0013_auto_20220110_1102'), ] operations = [ migrations.AlterModelOptions( name='post',", "migrations.RemoveField( model_name='post', name='image', ), migrations.AddField( model_name='business', name='image', field=cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name='image'), ), ]", "migrations.AlterModelOptions( name='post', options={'ordering': ['-pk']}, ), migrations.RemoveField( model_name='post', name='image', ), migrations.AddField( model_name='business', name='image', field=cloudinary.models.CloudinaryField(blank=True,", "08:19 import cloudinary.models from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('hoodapp',", "options={'ordering': ['-pk']}, ), migrations.RemoveField( model_name='post', name='image', ), migrations.AddField( model_name='business', name='image', field=cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True,", "on 2022-01-10 08:19 import cloudinary.models from django.db import migrations class Migration(migrations.Migration): dependencies =", "Generated by Django 3.2.9 on 2022-01-10 08:19 import cloudinary.models from django.db import migrations", "= [ ('hoodapp', '0013_auto_20220110_1102'), ] operations = [ migrations.AlterModelOptions( name='post', options={'ordering': ['-pk']}, ),", "Django 3.2.9 on 2022-01-10 08:19 import cloudinary.models from django.db import migrations class Migration(migrations.Migration):", "dependencies = [ ('hoodapp', '0013_auto_20220110_1102'), ] operations = [ migrations.AlterModelOptions( name='post', options={'ordering': ['-pk']},", "name='post', options={'ordering': ['-pk']}, ), migrations.RemoveField( model_name='post', name='image', ), migrations.AddField( model_name='business', name='image', field=cloudinary.models.CloudinaryField(blank=True, max_length=255,", "# Generated by Django 3.2.9 on 2022-01-10 08:19 import cloudinary.models from django.db import", "import migrations class Migration(migrations.Migration): dependencies = [ ('hoodapp', '0013_auto_20220110_1102'), ] operations = [", "operations = [ migrations.AlterModelOptions( name='post', options={'ordering': ['-pk']}, ), migrations.RemoveField( model_name='post', name='image', ), migrations.AddField(", "['-pk']}, ), migrations.RemoveField( model_name='post', name='image', ), migrations.AddField( model_name='business', name='image', field=cloudinary.models.CloudinaryField(blank=True, max_length=255, null=True, verbose_name='image'),", "[ ('hoodapp', '0013_auto_20220110_1102'), ] operations = [ migrations.AlterModelOptions( name='post', options={'ordering': ['-pk']}, ), migrations.RemoveField(", "migrations class Migration(migrations.Migration): dependencies = [ ('hoodapp', '0013_auto_20220110_1102'), ] operations = [ migrations.AlterModelOptions(", "'0013_auto_20220110_1102'), ] operations = [ migrations.AlterModelOptions( name='post', options={'ordering': ['-pk']}, ), migrations.RemoveField( model_name='post', name='image',", "django.db import migrations class Migration(migrations.Migration): dependencies = [ ('hoodapp', '0013_auto_20220110_1102'), ] operations =", "] operations = [ migrations.AlterModelOptions( name='post', options={'ordering': ['-pk']}, ), migrations.RemoveField( model_name='post', name='image', ),", "('hoodapp', '0013_auto_20220110_1102'), ] operations = [ migrations.AlterModelOptions( name='post', options={'ordering': ['-pk']}, ), migrations.RemoveField( model_name='post',", "= [ migrations.AlterModelOptions( name='post', options={'ordering': ['-pk']}, ), migrations.RemoveField( model_name='post', name='image', ), migrations.AddField( model_name='business',", "from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('hoodapp', '0013_auto_20220110_1102'), ] operations" ]
[ "(output_dir or output_file): raise ValueError( 'GetPackageWriter requires either output_dir or output_file') if output_dir", "or output_file') if output_dir and output_file: raise ValueError( 'GetPackageWriter requires only one of", "requires either output_dir or output_file') if output_dir and output_file: raise ValueError( 'GetPackageWriter requires", "package_writer = tar_library_package.TarLibraryPackage(out, compress=False) elif output_format == 'txt': package_writer = single_file_library_package.SingleFileLibraryPackage(out) else: package_writer", "ValueError( 'GetPackageWriter requires only one of output_dir or output_file') if output_dir: package_writer =", "from googleapis.codegen.filesys import single_file_library_package from googleapis.codegen.filesys import tar_library_package from googleapis.codegen.filesys import zip_library_package def", "output_file') if output_dir: package_writer = filesystem_library_package.FilesystemLibraryPackage( output_dir) else: out = open(output_file, 'w') if", "output_dir) else: out = open(output_file, 'w') if output_format == 'tgz': package_writer = tar_library_package.TarLibraryPackage(out)", "googleapis.codegen.filesys import single_file_library_package from googleapis.codegen.filesys import tar_library_package from googleapis.codegen.filesys import zip_library_package def GetPackageWriter(output_dir=None,", "output_dir: package_writer = filesystem_library_package.FilesystemLibraryPackage( output_dir) else: out = open(output_file, 'w') if output_format ==", "elif output_format == 'txt': package_writer = single_file_library_package.SingleFileLibraryPackage(out) else: package_writer = zip_library_package.ZipLibraryPackage(out) return package_writer", "zip_library_package def GetPackageWriter(output_dir=None, output_file=None, output_format='zip'): \"\"\"Get an output writer for a package.\"\"\" if", "ValueError( 'GetPackageWriter requires either output_dir or output_file') if output_dir and output_file: raise ValueError(", "== 'tgz': package_writer = tar_library_package.TarLibraryPackage(out) elif output_format == 'tar': package_writer = tar_library_package.TarLibraryPackage(out, compress=False)", "filesystem_library_package.FilesystemLibraryPackage( output_dir) else: out = open(output_file, 'w') if output_format == 'tgz': package_writer =", "for a package.\"\"\" if not (output_dir or output_file): raise ValueError( 'GetPackageWriter requires either", "import single_file_library_package from googleapis.codegen.filesys import tar_library_package from googleapis.codegen.filesys import zip_library_package def GetPackageWriter(output_dir=None, output_file=None,", "an output writer for a package.\"\"\" if not (output_dir or output_file): raise ValueError(", "#!/usr/bin/python2.7 \"\"\"Foundary for getting a package writer.\"\"\" from googleapis.codegen.filesys import filesystem_library_package from googleapis.codegen.filesys", "of output_dir or output_file') if output_dir: package_writer = filesystem_library_package.FilesystemLibraryPackage( output_dir) else: out =", "def GetPackageWriter(output_dir=None, output_file=None, output_format='zip'): \"\"\"Get an output writer for a package.\"\"\" if not", "'tar': package_writer = tar_library_package.TarLibraryPackage(out, compress=False) elif output_format == 'txt': package_writer = single_file_library_package.SingleFileLibraryPackage(out) else:", "output_dir and output_file: raise ValueError( 'GetPackageWriter requires only one of output_dir or output_file')", "'GetPackageWriter requires only one of output_dir or output_file') if output_dir: package_writer = filesystem_library_package.FilesystemLibraryPackage(", "filesystem_library_package from googleapis.codegen.filesys import single_file_library_package from googleapis.codegen.filesys import tar_library_package from googleapis.codegen.filesys import zip_library_package", "if output_dir: package_writer = filesystem_library_package.FilesystemLibraryPackage( output_dir) else: out = open(output_file, 'w') if output_format", "writer for a package.\"\"\" if not (output_dir or output_file): raise ValueError( 'GetPackageWriter requires", "package_writer = filesystem_library_package.FilesystemLibraryPackage( output_dir) else: out = open(output_file, 'w') if output_format == 'tgz':", "out = open(output_file, 'w') if output_format == 'tgz': package_writer = tar_library_package.TarLibraryPackage(out) elif output_format", "import tar_library_package from googleapis.codegen.filesys import zip_library_package def GetPackageWriter(output_dir=None, output_file=None, output_format='zip'): \"\"\"Get an output", "from googleapis.codegen.filesys import tar_library_package from googleapis.codegen.filesys import zip_library_package def GetPackageWriter(output_dir=None, output_file=None, output_format='zip'): \"\"\"Get", "elif output_format == 'tar': package_writer = tar_library_package.TarLibraryPackage(out, compress=False) elif output_format == 'txt': package_writer", "output writer for a package.\"\"\" if not (output_dir or output_file): raise ValueError( 'GetPackageWriter", "not (output_dir or output_file): raise ValueError( 'GetPackageWriter requires either output_dir or output_file') if", "tar_library_package from googleapis.codegen.filesys import zip_library_package def GetPackageWriter(output_dir=None, output_file=None, output_format='zip'): \"\"\"Get an output writer", "GetPackageWriter(output_dir=None, output_file=None, output_format='zip'): \"\"\"Get an output writer for a package.\"\"\" if not (output_dir", "'tgz': package_writer = tar_library_package.TarLibraryPackage(out) elif output_format == 'tar': package_writer = tar_library_package.TarLibraryPackage(out, compress=False) elif", "output_file: raise ValueError( 'GetPackageWriter requires only one of output_dir or output_file') if output_dir:", "and output_file: raise ValueError( 'GetPackageWriter requires only one of output_dir or output_file') if", "output_file') if output_dir and output_file: raise ValueError( 'GetPackageWriter requires only one of output_dir", "only one of output_dir or output_file') if output_dir: package_writer = filesystem_library_package.FilesystemLibraryPackage( output_dir) else:", "output_file): raise ValueError( 'GetPackageWriter requires either output_dir or output_file') if output_dir and output_file:", "import filesystem_library_package from googleapis.codegen.filesys import single_file_library_package from googleapis.codegen.filesys import tar_library_package from googleapis.codegen.filesys import", "raise ValueError( 'GetPackageWriter requires either output_dir or output_file') if output_dir and output_file: raise", "package writer.\"\"\" from googleapis.codegen.filesys import filesystem_library_package from googleapis.codegen.filesys import single_file_library_package from googleapis.codegen.filesys import", "= open(output_file, 'w') if output_format == 'tgz': package_writer = tar_library_package.TarLibraryPackage(out) elif output_format ==", "or output_file): raise ValueError( 'GetPackageWriter requires either output_dir or output_file') if output_dir and", "output_dir or output_file') if output_dir: package_writer = filesystem_library_package.FilesystemLibraryPackage( output_dir) else: out = open(output_file,", "output_format == 'tar': package_writer = tar_library_package.TarLibraryPackage(out, compress=False) elif output_format == 'txt': package_writer =", "package.\"\"\" if not (output_dir or output_file): raise ValueError( 'GetPackageWriter requires either output_dir or", "if not (output_dir or output_file): raise ValueError( 'GetPackageWriter requires either output_dir or output_file')", "== 'tar': package_writer = tar_library_package.TarLibraryPackage(out, compress=False) elif output_format == 'txt': package_writer = single_file_library_package.SingleFileLibraryPackage(out)", "output_file=None, output_format='zip'): \"\"\"Get an output writer for a package.\"\"\" if not (output_dir or", "<reponame>aiuto/google-apis-client-generator #!/usr/bin/python2.7 \"\"\"Foundary for getting a package writer.\"\"\" from googleapis.codegen.filesys import filesystem_library_package from", "output_dir or output_file') if output_dir and output_file: raise ValueError( 'GetPackageWriter requires only one", "for getting a package writer.\"\"\" from googleapis.codegen.filesys import filesystem_library_package from googleapis.codegen.filesys import single_file_library_package", "compress=False) elif output_format == 'txt': package_writer = single_file_library_package.SingleFileLibraryPackage(out) else: package_writer = zip_library_package.ZipLibraryPackage(out) return", "from googleapis.codegen.filesys import zip_library_package def GetPackageWriter(output_dir=None, output_file=None, output_format='zip'): \"\"\"Get an output writer for", "a package.\"\"\" if not (output_dir or output_file): raise ValueError( 'GetPackageWriter requires either output_dir", "single_file_library_package from googleapis.codegen.filesys import tar_library_package from googleapis.codegen.filesys import zip_library_package def GetPackageWriter(output_dir=None, output_file=None, output_format='zip'):", "googleapis.codegen.filesys import tar_library_package from googleapis.codegen.filesys import zip_library_package def GetPackageWriter(output_dir=None, output_file=None, output_format='zip'): \"\"\"Get an", "\"\"\"Get an output writer for a package.\"\"\" if not (output_dir or output_file): raise", "or output_file') if output_dir: package_writer = filesystem_library_package.FilesystemLibraryPackage( output_dir) else: out = open(output_file, 'w')", "a package writer.\"\"\" from googleapis.codegen.filesys import filesystem_library_package from googleapis.codegen.filesys import single_file_library_package from googleapis.codegen.filesys", "else: out = open(output_file, 'w') if output_format == 'tgz': package_writer = tar_library_package.TarLibraryPackage(out) elif", "output_format == 'tgz': package_writer = tar_library_package.TarLibraryPackage(out) elif output_format == 'tar': package_writer = tar_library_package.TarLibraryPackage(out,", "= filesystem_library_package.FilesystemLibraryPackage( output_dir) else: out = open(output_file, 'w') if output_format == 'tgz': package_writer", "googleapis.codegen.filesys import zip_library_package def GetPackageWriter(output_dir=None, output_file=None, output_format='zip'): \"\"\"Get an output writer for a", "writer.\"\"\" from googleapis.codegen.filesys import filesystem_library_package from googleapis.codegen.filesys import single_file_library_package from googleapis.codegen.filesys import tar_library_package", "from googleapis.codegen.filesys import filesystem_library_package from googleapis.codegen.filesys import single_file_library_package from googleapis.codegen.filesys import tar_library_package from", "one of output_dir or output_file') if output_dir: package_writer = filesystem_library_package.FilesystemLibraryPackage( output_dir) else: out", "googleapis.codegen.filesys import filesystem_library_package from googleapis.codegen.filesys import single_file_library_package from googleapis.codegen.filesys import tar_library_package from googleapis.codegen.filesys", "= tar_library_package.TarLibraryPackage(out) elif output_format == 'tar': package_writer = tar_library_package.TarLibraryPackage(out, compress=False) elif output_format ==", "tar_library_package.TarLibraryPackage(out) elif output_format == 'tar': package_writer = tar_library_package.TarLibraryPackage(out, compress=False) elif output_format == 'txt':", "'w') if output_format == 'tgz': package_writer = tar_library_package.TarLibraryPackage(out) elif output_format == 'tar': package_writer", "import zip_library_package def GetPackageWriter(output_dir=None, output_file=None, output_format='zip'): \"\"\"Get an output writer for a package.\"\"\"", "if output_format == 'tgz': package_writer = tar_library_package.TarLibraryPackage(out) elif output_format == 'tar': package_writer =", "= tar_library_package.TarLibraryPackage(out, compress=False) elif output_format == 'txt': package_writer = single_file_library_package.SingleFileLibraryPackage(out) else: package_writer =", "open(output_file, 'w') if output_format == 'tgz': package_writer = tar_library_package.TarLibraryPackage(out) elif output_format == 'tar':", "getting a package writer.\"\"\" from googleapis.codegen.filesys import filesystem_library_package from googleapis.codegen.filesys import single_file_library_package from", "'GetPackageWriter requires either output_dir or output_file') if output_dir and output_file: raise ValueError( 'GetPackageWriter", "if output_dir and output_file: raise ValueError( 'GetPackageWriter requires only one of output_dir or", "\"\"\"Foundary for getting a package writer.\"\"\" from googleapis.codegen.filesys import filesystem_library_package from googleapis.codegen.filesys import", "package_writer = tar_library_package.TarLibraryPackage(out) elif output_format == 'tar': package_writer = tar_library_package.TarLibraryPackage(out, compress=False) elif output_format", "output_format='zip'): \"\"\"Get an output writer for a package.\"\"\" if not (output_dir or output_file):", "raise ValueError( 'GetPackageWriter requires only one of output_dir or output_file') if output_dir: package_writer", "requires only one of output_dir or output_file') if output_dir: package_writer = filesystem_library_package.FilesystemLibraryPackage( output_dir)", "tar_library_package.TarLibraryPackage(out, compress=False) elif output_format == 'txt': package_writer = single_file_library_package.SingleFileLibraryPackage(out) else: package_writer = zip_library_package.ZipLibraryPackage(out)", "either output_dir or output_file') if output_dir and output_file: raise ValueError( 'GetPackageWriter requires only" ]
[ "\"\\nStream : \" + stream +\"\\nCollege Name : \" + collegename # Generate", "pyqrcode import png from pyqrcode import QRCode print(\"WELCOME TO THE QR CODE GENERATION\")", "GENERATION\") # Take input name = input(\"Enter Name : \") stream = input(\"Enter", ": \"+ name + \"\\nStream : \" + stream +\"\\nCollege Name : \"", "of College : \") # String which represents the QR code s =", "Stream : \") collegename = input(\"Enter Name of College : \") # String", ": \") # String which represents the QR code s = \"Name :", "QRCode from pyqrcode import pyqrcode import png from pyqrcode import QRCode print(\"WELCOME TO", "THE QR CODE GENERATION\") # Take input name = input(\"Enter Name : \")", "Import QRCode from pyqrcode import pyqrcode import png from pyqrcode import QRCode print(\"WELCOME", "input(\"Enter Name : \") stream = input(\"Enter Stream : \") collegename = input(\"Enter", "import QRCode print(\"WELCOME TO THE QR CODE GENERATION\") # Take input name =", "<gh_stars>1-10 # Import QRCode from pyqrcode import pyqrcode import png from pyqrcode import", "# Import QRCode from pyqrcode import pyqrcode import png from pyqrcode import QRCode", "= input(\"Enter Name of College : \") # String which represents the QR", "= input(\"Enter Stream : \") collegename = input(\"Enter Name of College : \")", "stream +\"\\nCollege Name : \" + collegename # Generate QR code url =", "code url = pyqrcode.create(s) # Create and save the png file naming \"myqr.png\"", "= \"Name : \"+ name + \"\\nStream : \" + stream +\"\\nCollege Name", "stream = input(\"Enter Stream : \") collegename = input(\"Enter Name of College :", "the QR code s = \"Name : \"+ name + \"\\nStream : \"", "collegename # Generate QR code url = pyqrcode.create(s) # Create and save the", "name = input(\"Enter Name : \") stream = input(\"Enter Stream : \") collegename", "# Generate QR code url = pyqrcode.create(s) # Create and save the png", "+ \"\\nStream : \" + stream +\"\\nCollege Name : \" + collegename #", "\") # String which represents the QR code s = \"Name : \"+", "s = \"Name : \"+ name + \"\\nStream : \" + stream +\"\\nCollege", "pyqrcode import pyqrcode import png from pyqrcode import QRCode print(\"WELCOME TO THE QR", "which represents the QR code s = \"Name : \"+ name + \"\\nStream", "QR CODE GENERATION\") # Take input name = input(\"Enter Name : \") stream", "\") collegename = input(\"Enter Name of College : \") # String which represents", "String which represents the QR code s = \"Name : \"+ name +", "QR code s = \"Name : \"+ name + \"\\nStream : \" +", "QRCode print(\"WELCOME TO THE QR CODE GENERATION\") # Take input name = input(\"Enter", ": \") collegename = input(\"Enter Name of College : \") # String which", "\" + stream +\"\\nCollege Name : \" + collegename # Generate QR code", "Generate QR code url = pyqrcode.create(s) # Create and save the png file", ": \" + collegename # Generate QR code url = pyqrcode.create(s) # Create", "input(\"Enter Name of College : \") # String which represents the QR code", "url = pyqrcode.create(s) # Create and save the png file naming \"myqr.png\" url.png('myqr.png',", "\") stream = input(\"Enter Stream : \") collegename = input(\"Enter Name of College", "= pyqrcode.create(s) # Create and save the png file naming \"myqr.png\" url.png('myqr.png', scale", "png from pyqrcode import QRCode print(\"WELCOME TO THE QR CODE GENERATION\") # Take", "Name : \" + collegename # Generate QR code url = pyqrcode.create(s) #", "pyqrcode.create(s) # Create and save the png file naming \"myqr.png\" url.png('myqr.png', scale =", ": \") stream = input(\"Enter Stream : \") collegename = input(\"Enter Name of", "import pyqrcode import png from pyqrcode import QRCode print(\"WELCOME TO THE QR CODE", "College : \") # String which represents the QR code s = \"Name", "+ stream +\"\\nCollege Name : \" + collegename # Generate QR code url", ": \" + stream +\"\\nCollege Name : \" + collegename # Generate QR", "# String which represents the QR code s = \"Name : \"+ name", "Name of College : \") # String which represents the QR code s", "code s = \"Name : \"+ name + \"\\nStream : \" + stream", "= input(\"Enter Name : \") stream = input(\"Enter Stream : \") collegename =", "\"+ name + \"\\nStream : \" + stream +\"\\nCollege Name : \" +", "input name = input(\"Enter Name : \") stream = input(\"Enter Stream : \")", "name + \"\\nStream : \" + stream +\"\\nCollege Name : \" + collegename", "+ collegename # Generate QR code url = pyqrcode.create(s) # Create and save", "Name : \") stream = input(\"Enter Stream : \") collegename = input(\"Enter Name", "+\"\\nCollege Name : \" + collegename # Generate QR code url = pyqrcode.create(s)", "Take input name = input(\"Enter Name : \") stream = input(\"Enter Stream :", "# Create and save the png file naming \"myqr.png\" url.png('myqr.png', scale = 6)", "CODE GENERATION\") # Take input name = input(\"Enter Name : \") stream =", "TO THE QR CODE GENERATION\") # Take input name = input(\"Enter Name :", "collegename = input(\"Enter Name of College : \") # String which represents the", "\" + collegename # Generate QR code url = pyqrcode.create(s) # Create and", "represents the QR code s = \"Name : \"+ name + \"\\nStream :", "\"Name : \"+ name + \"\\nStream : \" + stream +\"\\nCollege Name :", "# Take input name = input(\"Enter Name : \") stream = input(\"Enter Stream", "input(\"Enter Stream : \") collegename = input(\"Enter Name of College : \") #", "pyqrcode import QRCode print(\"WELCOME TO THE QR CODE GENERATION\") # Take input name", "print(\"WELCOME TO THE QR CODE GENERATION\") # Take input name = input(\"Enter Name", "from pyqrcode import QRCode print(\"WELCOME TO THE QR CODE GENERATION\") # Take input", "from pyqrcode import pyqrcode import png from pyqrcode import QRCode print(\"WELCOME TO THE", "import png from pyqrcode import QRCode print(\"WELCOME TO THE QR CODE GENERATION\") #", "QR code url = pyqrcode.create(s) # Create and save the png file naming" ]
[ "else: return self._successor.handle(FILE) def __repr__(self): return f\"{self.__class__.__name__}\" def getter(self): return self._temp class CSV(AbstractHandler):", "CSV(AbstractHandler): def __init__(self): self._successor = None self._temp: list = list() def set_successor(self, successor):", "file_ext == self.__class__.__name__.lower(): with open(FILE, \"r\") as f: for line in f.read().split('\\n'): reformat_line", "self.getter() else: return self._successor.handle(FILE) def __repr__(self): return f\"{self.__class__.__name__}\" def getter(self): return self._temp class", "handle(self, FILE): \"\"\"Handle the *.csv file event\"\"\" file_name, file_ext = str(FILE).split(\".\") if file_ext", "wrapper class AbstractHandler(metaclass=ABCMeta): \"\"\"The Interface for handling requests.\"\"\" @abstractmethod def set_successor(self, successor): \"\"\"Set", "length = len(data['x']) for i in range(length): x_temp = list(map(float, data['x'][i])) y_temp =", "file_ext == self.__class__.__name__.lower(): with open(FILE, \"r\") as f: for line in f.read().split(',\\n'): reformat_line", "def handle(self, FILE): \"\"\"Handle the *.txt file event\"\"\" file_name, file_ext = str(FILE).split(\".\") if", "range(length): x_temp = list(map(float, data['x'][i])) y_temp = list(map(float, data['y'][i])) temp = [x_temp, y_temp]", "\"r\") as f: for line in f.read().split('\\n'): reformat_line = line[1:-1].split('];[') a = [list(map(float,", "reformat_line = line[1:-1].split('];[') a = [list(map(float, elem.split(','))) for elem in reformat_line] self._temp.append(a) return", "class TXT(AbstractHandler): def __init__(self): self._successor = None self._temp: list = list() def set_successor(self,", "== self.__class__.__name__.lower(): with open(FILE, \"r\") as f: for line in f.read().split('\\n'): reformat_line =", "requests.\"\"\" @abstractmethod def set_successor(self, successor): \"\"\"Set the next handler in the chain\"\"\" pass", "the chain\"\"\" pass @abstractmethod def handle(self, file) -> Optional[str]: \"\"\"Handle the event\"\"\" pass", "JSON(AbstractHandler): def __init__(self): self._successor = None self._temp: list = list() def set_successor(self, successor):", "def __repr__(self): return f\"{self.__class__.__name__}\" def getter(self): return self._temp class TXT(AbstractHandler): def __init__(self): self._successor", "return successor @error_catcher def handle(self, FILE): \"\"\"Handle the *.json file event\"\"\" file_name, file_ext", "if file_ext == self.__class__.__name__.lower(): with open(FILE, \"r\") as f: for line in f.read().split(',\\n'):", "@error_catcher def handle(self, FILE): \"\"\"Handle the *.json file event\"\"\" file_name, file_ext = str(FILE).split(\".\")", "or # the hadler can set them dynamically at # handle time self.chain1.set_successor(self.chain2).set_successor(self.chain3)", "next handler in the chain\"\"\" pass @abstractmethod def handle(self, file) -> Optional[str]: \"\"\"Handle", "reformat_line = line[1:-1].split('\",\"') a = [list(map(float, elem.split(','))) for elem in reformat_line] self._temp.append(a) return", "__repr__(self): return f\"{self.__class__.__name__}\" def getter(self): return self._temp class CSV(AbstractHandler): def __init__(self): self._successor =", "successor return successor @error_catcher def handle(self, FILE): \"\"\"Handle the *.json file event\"\"\" file_name,", "successor return successor @error_catcher def handle(self, FILE): \"\"\"Handle the *.txt file event\"\"\" file_name,", "as f: for line in f.read().split(',\\n'): reformat_line = line[1:-1].split('\",\"') a = [list(map(float, elem.split(',')))", "elem.split(','))) for elem in reformat_line] self._temp.append(a) return self.getter() else: return self._successor.handle(FILE) def __repr__(self):", "method(*args, **kwargs) except (AttributeError, ValueError): return \"File error: указан неверный тип файла.\" return", "open(FILE, \"r\") as f: self.deserialization(json.load(f)) return self.getter() else: return self._successor.handle(FILE) def deserialization(self, data):", "successor @error_catcher def handle(self, FILE): \"\"\"Handle the *.json file event\"\"\" file_name, file_ext =", "at # handle time self.chain1.set_successor(self.chain2).set_successor(self.chain3) def client_code(self): FILE = str(input(\"Input file name: \"))", "FILE): \"\"\"Handle the *.json file event\"\"\" file_name, file_ext = str(FILE).split(\".\") if file_ext ==", "*.json file event\"\"\" file_name, file_ext = str(FILE).split(\".\") if file_ext == self.__class__.__name__.lower(): with open(FILE,", "**kwargs): try: return method(*args, **kwargs) except (AttributeError, ValueError): return \"File error: указан неверный", "except (AttributeError, ValueError): return \"File error: указан неверный тип файла.\" return wrapper class", "event\"\"\" file_name, file_ext = str(FILE).split(\".\") if file_ext == self.__class__.__name__.lower(): with open(FILE, \"r\") as", "TXT() self.chain3 = CSV() # set the chain of responsibility # The Client", "\"\"\"The Interface for handling requests.\"\"\" @abstractmethod def set_successor(self, successor): \"\"\"Set the next handler", "pass class JSON(AbstractHandler): def __init__(self): self._successor = None self._temp: list = list() def", "set them dynamically at # handle time self.chain1.set_successor(self.chain2).set_successor(self.chain3) def client_code(self): FILE = str(input(\"Input", "try: return method(*args, **kwargs) except (AttributeError, ValueError): return \"File error: указан неверный тип", "self.deserialization(json.load(f)) return self.getter() else: return self._successor.handle(FILE) def deserialization(self, data): length = len(data['x']) for", "data['y'][i])) temp = [x_temp, y_temp] self._temp.append(temp) def __repr__(self): return f\"{self.__class__.__name__}\" def getter(self): return", "== self.__class__.__name__.lower(): with open(FILE, \"r\") as f: for line in f.read().split(',\\n'): reformat_line =", "handle time self.chain1.set_successor(self.chain2).set_successor(self.chain3) def client_code(self): FILE = str(input(\"Input file name: \")) return self.chain1.handle(FILE)", "return \"File error: указан неверный тип файла.\" return wrapper class AbstractHandler(metaclass=ABCMeta): \"\"\"The Interface", "data): length = len(data['x']) for i in range(length): x_temp = list(map(float, data['x'][i])) y_temp", "y_temp] self._temp.append(temp) def __repr__(self): return f\"{self.__class__.__name__}\" def getter(self): return self._temp class TXT(AbstractHandler): def", "*.txt file event\"\"\" file_name, file_ext = str(FILE).split(\".\") if file_ext == self.__class__.__name__.lower(): with open(FILE,", "TXT(AbstractHandler): def __init__(self): self._successor = None self._temp: list = list() def set_successor(self, successor):", "deserialization(self, data): length = len(data['x']) for i in range(length): x_temp = list(map(float, data['x'][i]))", "in reformat_line] self._temp.append(a) return self.getter() else: return self._successor.handle(FILE) def __repr__(self): return f\"{self.__class__.__name__}\" def", "y_temp = list(map(float, data['y'][i])) temp = [x_temp, y_temp] self._temp.append(temp) def __repr__(self): return f\"{self.__class__.__name__}\"", "f.read().split('\\n'): reformat_line = line[1:-1].split('];[') a = [list(map(float, elem.split(','))) for elem in reformat_line] self._temp.append(a)", "self._successor = successor return successor @error_catcher def handle(self, FILE): \"\"\"Handle the *.csv file", "hadler can set them dynamically at # handle time self.chain1.set_successor(self.chain2).set_successor(self.chain3) def client_code(self): FILE", "return wrapper class AbstractHandler(metaclass=ABCMeta): \"\"\"The Interface for handling requests.\"\"\" @abstractmethod def set_successor(self, successor):", "event\"\"\" pass class JSON(AbstractHandler): def __init__(self): self._successor = None self._temp: list = list()", "x_temp = list(map(float, data['x'][i])) y_temp = list(map(float, data['y'][i])) temp = [x_temp, y_temp] self._temp.append(temp)", "list(map(float, data['y'][i])) temp = [x_temp, y_temp] self._temp.append(temp) def __repr__(self): return f\"{self.__class__.__name__}\" def getter(self):", "\"\"\"Handle the *.csv file event\"\"\" file_name, file_ext = str(FILE).split(\".\") if file_ext == self.__class__.__name__.lower():", "*.csv file event\"\"\" file_name, file_ext = str(FILE).split(\".\") if file_ext == self.__class__.__name__.lower(): with open(FILE,", "self.chain2 = TXT() self.chain3 = CSV() # set the chain of responsibility #", "can set them dynamically at # handle time self.chain1.set_successor(self.chain2).set_successor(self.chain3) def client_code(self): FILE =", "def deserialization(self, data): length = len(data['x']) for i in range(length): x_temp = list(map(float,", "else: return self._successor.handle(FILE) def __repr__(self): return f\"{self.__class__.__name__}\" def getter(self): return self._temp class FilesChain:", "\"\"\"Handle the *.json file event\"\"\" file_name, file_ext = str(FILE).split(\".\") if file_ext == self.__class__.__name__.lower():", "указан неверный тип файла.\" return wrapper class AbstractHandler(metaclass=ABCMeta): \"\"\"The Interface for handling requests.\"\"\"", "ABCMeta, abstractmethod from typing import Optional import json def error_catcher(method): def wrapper(*args, **kwargs):", "self.getter() else: return self._successor.handle(FILE) def deserialization(self, data): length = len(data['x']) for i in", "successor @error_catcher def handle(self, FILE): \"\"\"Handle the *.txt file event\"\"\" file_name, file_ext =", "# the hadler can set them dynamically at # handle time self.chain1.set_successor(self.chain2).set_successor(self.chain3) def", "line[1:-1].split('\",\"') a = [list(map(float, elem.split(','))) for elem in reformat_line] self._temp.append(a) return self.getter() else:", "# The Client may compose chains once or # the hadler can set", "self._temp class FilesChain: def __init__(self): self.chain1 = JSON() self.chain2 = TXT() self.chain3 =", "import ABCMeta, abstractmethod from typing import Optional import json def error_catcher(method): def wrapper(*args,", "elem in reformat_line] self._temp.append(a) return self.getter() else: return self._successor.handle(FILE) def __repr__(self): return f\"{self.__class__.__name__}\"", "= list() def set_successor(self, successor): self._successor = successor return successor @error_catcher def handle(self,", "them dynamically at # handle time self.chain1.set_successor(self.chain2).set_successor(self.chain3) def client_code(self): FILE = str(input(\"Input file", "def handle(self, FILE): \"\"\"Handle the *.json file event\"\"\" file_name, file_ext = str(FILE).split(\".\") if", "import Optional import json def error_catcher(method): def wrapper(*args, **kwargs): try: return method(*args, **kwargs)", "in the chain\"\"\" pass @abstractmethod def handle(self, file) -> Optional[str]: \"\"\"Handle the event\"\"\"", "[x_temp, y_temp] self._temp.append(temp) def __repr__(self): return f\"{self.__class__.__name__}\" def getter(self): return self._temp class TXT(AbstractHandler):", "= line[1:-1].split('];[') a = [list(map(float, elem.split(','))) for elem in reformat_line] self._temp.append(a) return self.getter()", "handler in the chain\"\"\" pass @abstractmethod def handle(self, file) -> Optional[str]: \"\"\"Handle the", "the next handler in the chain\"\"\" pass @abstractmethod def handle(self, file) -> Optional[str]:", "self._temp.append(a) return self.getter() else: return self._successor.handle(FILE) def __repr__(self): return f\"{self.__class__.__name__}\" def getter(self): return", "def __repr__(self): return f\"{self.__class__.__name__}\" def getter(self): return self._temp class FilesChain: def __init__(self): self.chain1", "pass @abstractmethod def handle(self, file) -> Optional[str]: \"\"\"Handle the event\"\"\" pass class JSON(AbstractHandler):", "abc import ABCMeta, abstractmethod from typing import Optional import json def error_catcher(method): def", "def error_catcher(method): def wrapper(*args, **kwargs): try: return method(*args, **kwargs) except (AttributeError, ValueError): return", "handle(self, FILE): \"\"\"Handle the *.txt file event\"\"\" file_name, file_ext = str(FILE).split(\".\") if file_ext", "f: for line in f.read().split('\\n'): reformat_line = line[1:-1].split('];[') a = [list(map(float, elem.split(','))) for", "return self.getter() else: return self._successor.handle(FILE) def deserialization(self, data): length = len(data['x']) for i", "class FilesChain: def __init__(self): self.chain1 = JSON() self.chain2 = TXT() self.chain3 = CSV()", "from abc import ABCMeta, abstractmethod from typing import Optional import json def error_catcher(method):", "__init__(self): self.chain1 = JSON() self.chain2 = TXT() self.chain3 = CSV() # set the", "dynamically at # handle time self.chain1.set_successor(self.chain2).set_successor(self.chain3) def client_code(self): FILE = str(input(\"Input file name:", "# handle time self.chain1.set_successor(self.chain2).set_successor(self.chain3) def client_code(self): FILE = str(input(\"Input file name: \")) return", "def __repr__(self): return f\"{self.__class__.__name__}\" def getter(self): return self._temp class CSV(AbstractHandler): def __init__(self): self._successor", "@error_catcher def handle(self, FILE): \"\"\"Handle the *.txt file event\"\"\" file_name, file_ext = str(FILE).split(\".\")", "open(FILE, \"r\") as f: for line in f.read().split('\\n'): reformat_line = line[1:-1].split('];[') a =", "def handle(self, file) -> Optional[str]: \"\"\"Handle the event\"\"\" pass class JSON(AbstractHandler): def __init__(self):", "compose chains once or # the hadler can set them dynamically at #", "data['x'][i])) y_temp = list(map(float, data['y'][i])) temp = [x_temp, y_temp] self._temp.append(temp) def __repr__(self): return", "[list(map(float, elem.split(','))) for elem in reformat_line] self._temp.append(a) return self.getter() else: return self._successor.handle(FILE) def", "successor): self._successor = successor return successor @error_catcher def handle(self, FILE): \"\"\"Handle the *.txt", "typing import Optional import json def error_catcher(method): def wrapper(*args, **kwargs): try: return method(*args,", "= len(data['x']) for i in range(length): x_temp = list(map(float, data['x'][i])) y_temp = list(map(float,", "class CSV(AbstractHandler): def __init__(self): self._successor = None self._temp: list = list() def set_successor(self,", "class JSON(AbstractHandler): def __init__(self): self._successor = None self._temp: list = list() def set_successor(self,", "in range(length): x_temp = list(map(float, data['x'][i])) y_temp = list(map(float, data['y'][i])) temp = [x_temp,", "self._successor = successor return successor @error_catcher def handle(self, FILE): \"\"\"Handle the *.txt file", "f\"{self.__class__.__name__}\" def getter(self): return self._temp class TXT(AbstractHandler): def __init__(self): self._successor = None self._temp:", "for handling requests.\"\"\" @abstractmethod def set_successor(self, successor): \"\"\"Set the next handler in the", "def set_successor(self, successor): self._successor = successor return successor @error_catcher def handle(self, FILE): \"\"\"Handle", "self.__class__.__name__.lower(): with open(FILE, \"r\") as f: for line in f.read().split('\\n'): reformat_line = line[1:-1].split('];[')", "= list(map(float, data['x'][i])) y_temp = list(map(float, data['y'][i])) temp = [x_temp, y_temp] self._temp.append(temp) def", "f\"{self.__class__.__name__}\" def getter(self): return self._temp class FilesChain: def __init__(self): self.chain1 = JSON() self.chain2", "JSON() self.chain2 = TXT() self.chain3 = CSV() # set the chain of responsibility", "with open(FILE, \"r\") as f: for line in f.read().split(',\\n'): reformat_line = line[1:-1].split('\",\"') a", "successor @error_catcher def handle(self, FILE): \"\"\"Handle the *.csv file event\"\"\" file_name, file_ext =", "self._successor.handle(FILE) def __repr__(self): return f\"{self.__class__.__name__}\" def getter(self): return self._temp class FilesChain: def __init__(self):", "for i in range(length): x_temp = list(map(float, data['x'][i])) y_temp = list(map(float, data['y'][i])) temp", "the event\"\"\" pass class JSON(AbstractHandler): def __init__(self): self._successor = None self._temp: list =", "**kwargs) except (AttributeError, ValueError): return \"File error: указан неверный тип файла.\" return wrapper", "неверный тип файла.\" return wrapper class AbstractHandler(metaclass=ABCMeta): \"\"\"The Interface for handling requests.\"\"\" @abstractmethod", "the *.csv file event\"\"\" file_name, file_ext = str(FILE).split(\".\") if file_ext == self.__class__.__name__.lower(): with", "return f\"{self.__class__.__name__}\" def getter(self): return self._temp class CSV(AbstractHandler): def __init__(self): self._successor = None", "list() def set_successor(self, successor): self._successor = successor return successor @error_catcher def handle(self, FILE):", "= str(FILE).split(\".\") if file_ext == self.__class__.__name__.lower(): with open(FILE, \"r\") as f: self.deserialization(json.load(f)) return", "i in range(length): x_temp = list(map(float, data['x'][i])) y_temp = list(map(float, data['y'][i])) temp =", "line in f.read().split(',\\n'): reformat_line = line[1:-1].split('\",\"') a = [list(map(float, elem.split(','))) for elem in", "f: for line in f.read().split(',\\n'): reformat_line = line[1:-1].split('\",\"') a = [list(map(float, elem.split(','))) for", "def set_successor(self, successor): \"\"\"Set the next handler in the chain\"\"\" pass @abstractmethod def", "тип файла.\" return wrapper class AbstractHandler(metaclass=ABCMeta): \"\"\"The Interface for handling requests.\"\"\" @abstractmethod def", "self._successor = successor return successor @error_catcher def handle(self, FILE): \"\"\"Handle the *.json file", "The Client may compose chains once or # the hadler can set them", "return self._successor.handle(FILE) def __repr__(self): return f\"{self.__class__.__name__}\" def getter(self): return self._temp class CSV(AbstractHandler): def", "def wrapper(*args, **kwargs): try: return method(*args, **kwargs) except (AttributeError, ValueError): return \"File error:", "file_ext == self.__class__.__name__.lower(): with open(FILE, \"r\") as f: self.deserialization(json.load(f)) return self.getter() else: return", "self.chain3 = CSV() # set the chain of responsibility # The Client may", "файла.\" return wrapper class AbstractHandler(metaclass=ABCMeta): \"\"\"The Interface for handling requests.\"\"\" @abstractmethod def set_successor(self,", "chain\"\"\" pass @abstractmethod def handle(self, file) -> Optional[str]: \"\"\"Handle the event\"\"\" pass class", "return successor @error_catcher def handle(self, FILE): \"\"\"Handle the *.csv file event\"\"\" file_name, file_ext", "self._successor.handle(FILE) def deserialization(self, data): length = len(data['x']) for i in range(length): x_temp =", "return f\"{self.__class__.__name__}\" def getter(self): return self._temp class FilesChain: def __init__(self): self.chain1 = JSON()", "return self.getter() else: return self._successor.handle(FILE) def __repr__(self): return f\"{self.__class__.__name__}\" def getter(self): return self._temp", "error_catcher(method): def wrapper(*args, **kwargs): try: return method(*args, **kwargs) except (AttributeError, ValueError): return \"File", "def getter(self): return self._temp class FilesChain: def __init__(self): self.chain1 = JSON() self.chain2 =", "getter(self): return self._temp class FilesChain: def __init__(self): self.chain1 = JSON() self.chain2 = TXT()", "= TXT() self.chain3 = CSV() # set the chain of responsibility # The", "class AbstractHandler(metaclass=ABCMeta): \"\"\"The Interface for handling requests.\"\"\" @abstractmethod def set_successor(self, successor): \"\"\"Set the", "= line[1:-1].split('\",\"') a = [list(map(float, elem.split(','))) for elem in reformat_line] self._temp.append(a) return self.getter()", "(AttributeError, ValueError): return \"File error: указан неверный тип файла.\" return wrapper class AbstractHandler(metaclass=ABCMeta):", "the *.json file event\"\"\" file_name, file_ext = str(FILE).split(\".\") if file_ext == self.__class__.__name__.lower(): with", "the chain of responsibility # The Client may compose chains once or #", "def __init__(self): self.chain1 = JSON() self.chain2 = TXT() self.chain3 = CSV() # set", "= str(FILE).split(\".\") if file_ext == self.__class__.__name__.lower(): with open(FILE, \"r\") as f: for line", "of responsibility # The Client may compose chains once or # the hadler", "= successor return successor @error_catcher def handle(self, FILE): \"\"\"Handle the *.csv file event\"\"\"", "import json def error_catcher(method): def wrapper(*args, **kwargs): try: return method(*args, **kwargs) except (AttributeError,", "return method(*args, **kwargs) except (AttributeError, ValueError): return \"File error: указан неверный тип файла.\"", "self.chain1 = JSON() self.chain2 = TXT() self.chain3 = CSV() # set the chain", "@abstractmethod def set_successor(self, successor): \"\"\"Set the next handler in the chain\"\"\" pass @abstractmethod", "file_ext = str(FILE).split(\".\") if file_ext == self.__class__.__name__.lower(): with open(FILE, \"r\") as f: self.deserialization(json.load(f))", "= [list(map(float, elem.split(','))) for elem in reformat_line] self._temp.append(a) return self.getter() else: return self._successor.handle(FILE)", "f.read().split(',\\n'): reformat_line = line[1:-1].split('\",\"') a = [list(map(float, elem.split(','))) for elem in reformat_line] self._temp.append(a)", "Client may compose chains once or # the hadler can set them dynamically", "str(FILE).split(\".\") if file_ext == self.__class__.__name__.lower(): with open(FILE, \"r\") as f: self.deserialization(json.load(f)) return self.getter()", "self._temp class CSV(AbstractHandler): def __init__(self): self._successor = None self._temp: list = list() def", "return self._successor.handle(FILE) def __repr__(self): return f\"{self.__class__.__name__}\" def getter(self): return self._temp class FilesChain: def", "line[1:-1].split('];[') a = [list(map(float, elem.split(','))) for elem in reformat_line] self._temp.append(a) return self.getter() else:", "handle(self, file) -> Optional[str]: \"\"\"Handle the event\"\"\" pass class JSON(AbstractHandler): def __init__(self): self._successor", "list = list() def set_successor(self, successor): self._successor = successor return successor @error_catcher def", "return f\"{self.__class__.__name__}\" def getter(self): return self._temp class TXT(AbstractHandler): def __init__(self): self._successor = None", "def handle(self, FILE): \"\"\"Handle the *.csv file event\"\"\" file_name, file_ext = str(FILE).split(\".\") if", "def getter(self): return self._temp class CSV(AbstractHandler): def __init__(self): self._successor = None self._temp: list", "__repr__(self): return f\"{self.__class__.__name__}\" def getter(self): return self._temp class FilesChain: def __init__(self): self.chain1 =", "\"\"\"Handle the *.txt file event\"\"\" file_name, file_ext = str(FILE).split(\".\") if file_ext == self.__class__.__name__.lower():", "def __init__(self): self._successor = None self._temp: list = list() def set_successor(self, successor): self._successor", "with open(FILE, \"r\") as f: self.deserialization(json.load(f)) return self.getter() else: return self._successor.handle(FILE) def deserialization(self,", "temp = [x_temp, y_temp] self._temp.append(temp) def __repr__(self): return f\"{self.__class__.__name__}\" def getter(self): return self._temp", "getter(self): return self._temp class CSV(AbstractHandler): def __init__(self): self._successor = None self._temp: list =", "\"\"\"Set the next handler in the chain\"\"\" pass @abstractmethod def handle(self, file) ->", "Interface for handling requests.\"\"\" @abstractmethod def set_successor(self, successor): \"\"\"Set the next handler in", "str(FILE).split(\".\") if file_ext == self.__class__.__name__.lower(): with open(FILE, \"r\") as f: for line in", "self._successor = None self._temp: list = list() def set_successor(self, successor): self._successor = successor", "# set the chain of responsibility # The Client may compose chains once", "if file_ext == self.__class__.__name__.lower(): with open(FILE, \"r\") as f: self.deserialization(json.load(f)) return self.getter() else:", "the *.txt file event\"\"\" file_name, file_ext = str(FILE).split(\".\") if file_ext == self.__class__.__name__.lower(): with", "chains once or # the hadler can set them dynamically at # handle", "may compose chains once or # the hadler can set them dynamically at", "once or # the hadler can set them dynamically at # handle time", "json def error_catcher(method): def wrapper(*args, **kwargs): try: return method(*args, **kwargs) except (AttributeError, ValueError):", "set_successor(self, successor): self._successor = successor return successor @error_catcher def handle(self, FILE): \"\"\"Handle the", "self._temp.append(temp) def __repr__(self): return f\"{self.__class__.__name__}\" def getter(self): return self._temp class TXT(AbstractHandler): def __init__(self):", "= None self._temp: list = list() def set_successor(self, successor): self._successor = successor return", "f: self.deserialization(json.load(f)) return self.getter() else: return self._successor.handle(FILE) def deserialization(self, data): length = len(data['x'])", "handling requests.\"\"\" @abstractmethod def set_successor(self, successor): \"\"\"Set the next handler in the chain\"\"\"", "file_ext = str(FILE).split(\".\") if file_ext == self.__class__.__name__.lower(): with open(FILE, \"r\") as f: for", "\"r\") as f: for line in f.read().split(',\\n'): reformat_line = line[1:-1].split('\",\"') a = [list(map(float,", "for elem in reformat_line] self._temp.append(a) return self.getter() else: return self._successor.handle(FILE) def __repr__(self): return", "successor): \"\"\"Set the next handler in the chain\"\"\" pass @abstractmethod def handle(self, file)", "@abstractmethod def handle(self, file) -> Optional[str]: \"\"\"Handle the event\"\"\" pass class JSON(AbstractHandler): def", "\"\"\"Handle the event\"\"\" pass class JSON(AbstractHandler): def __init__(self): self._successor = None self._temp: list", "= JSON() self.chain2 = TXT() self.chain3 = CSV() # set the chain of", "for line in f.read().split('\\n'): reformat_line = line[1:-1].split('];[') a = [list(map(float, elem.split(','))) for elem", "AbstractHandler(metaclass=ABCMeta): \"\"\"The Interface for handling requests.\"\"\" @abstractmethod def set_successor(self, successor): \"\"\"Set the next", "\"r\") as f: self.deserialization(json.load(f)) return self.getter() else: return self._successor.handle(FILE) def deserialization(self, data): length", "__repr__(self): return f\"{self.__class__.__name__}\" def getter(self): return self._temp class TXT(AbstractHandler): def __init__(self): self._successor =", "getter(self): return self._temp class TXT(AbstractHandler): def __init__(self): self._successor = None self._temp: list =", "self._temp class TXT(AbstractHandler): def __init__(self): self._successor = None self._temp: list = list() def", "ValueError): return \"File error: указан неверный тип файла.\" return wrapper class AbstractHandler(metaclass=ABCMeta): \"\"\"The", "self.__class__.__name__.lower(): with open(FILE, \"r\") as f: for line in f.read().split(',\\n'): reformat_line = line[1:-1].split('\",\"')", "= successor return successor @error_catcher def handle(self, FILE): \"\"\"Handle the *.txt file event\"\"\"", "successor): self._successor = successor return successor @error_catcher def handle(self, FILE): \"\"\"Handle the *.csv", "wrapper(*args, **kwargs): try: return method(*args, **kwargs) except (AttributeError, ValueError): return \"File error: указан", "file) -> Optional[str]: \"\"\"Handle the event\"\"\" pass class JSON(AbstractHandler): def __init__(self): self._successor =", "as f: self.deserialization(json.load(f)) return self.getter() else: return self._successor.handle(FILE) def deserialization(self, data): length =", "return self._temp class CSV(AbstractHandler): def __init__(self): self._successor = None self._temp: list = list()", "= CSV() # set the chain of responsibility # The Client may compose", "error: указан неверный тип файла.\" return wrapper class AbstractHandler(metaclass=ABCMeta): \"\"\"The Interface for handling", "for line in f.read().split(',\\n'): reformat_line = line[1:-1].split('\",\"') a = [list(map(float, elem.split(','))) for elem", "\"File error: указан неверный тип файла.\" return wrapper class AbstractHandler(metaclass=ABCMeta): \"\"\"The Interface for", "successor return successor @error_catcher def handle(self, FILE): \"\"\"Handle the *.csv file event\"\"\" file_name,", "open(FILE, \"r\") as f: for line in f.read().split(',\\n'): reformat_line = line[1:-1].split('\",\"') a =", "set the chain of responsibility # The Client may compose chains once or", "self.__class__.__name__.lower(): with open(FILE, \"r\") as f: self.deserialization(json.load(f)) return self.getter() else: return self._successor.handle(FILE) def", "list(map(float, data['x'][i])) y_temp = list(map(float, data['y'][i])) temp = [x_temp, y_temp] self._temp.append(temp) def __repr__(self):", "in f.read().split('\\n'): reformat_line = line[1:-1].split('];[') a = [list(map(float, elem.split(','))) for elem in reformat_line]", "= [x_temp, y_temp] self._temp.append(temp) def __repr__(self): return f\"{self.__class__.__name__}\" def getter(self): return self._temp class", "return self._temp class FilesChain: def __init__(self): self.chain1 = JSON() self.chain2 = TXT() self.chain3", "the hadler can set them dynamically at # handle time self.chain1.set_successor(self.chain2).set_successor(self.chain3) def client_code(self):", "== self.__class__.__name__.lower(): with open(FILE, \"r\") as f: self.deserialization(json.load(f)) return self.getter() else: return self._successor.handle(FILE)", "in f.read().split(',\\n'): reformat_line = line[1:-1].split('\",\"') a = [list(map(float, elem.split(','))) for elem in reformat_line]", "self._successor.handle(FILE) def __repr__(self): return f\"{self.__class__.__name__}\" def getter(self): return self._temp class CSV(AbstractHandler): def __init__(self):", "= successor return successor @error_catcher def handle(self, FILE): \"\"\"Handle the *.json file event\"\"\"", "return successor @error_catcher def handle(self, FILE): \"\"\"Handle the *.txt file event\"\"\" file_name, file_ext", "FILE): \"\"\"Handle the *.txt file event\"\"\" file_name, file_ext = str(FILE).split(\".\") if file_ext ==", "FILE): \"\"\"Handle the *.csv file event\"\"\" file_name, file_ext = str(FILE).split(\".\") if file_ext ==", "def getter(self): return self._temp class TXT(AbstractHandler): def __init__(self): self._successor = None self._temp: list", "chain of responsibility # The Client may compose chains once or # the", "set_successor(self, successor): \"\"\"Set the next handler in the chain\"\"\" pass @abstractmethod def handle(self,", "Optional import json def error_catcher(method): def wrapper(*args, **kwargs): try: return method(*args, **kwargs) except", "None self._temp: list = list() def set_successor(self, successor): self._successor = successor return successor", "handle(self, FILE): \"\"\"Handle the *.json file event\"\"\" file_name, file_ext = str(FILE).split(\".\") if file_ext", "self._temp: list = list() def set_successor(self, successor): self._successor = successor return successor @error_catcher", "file_name, file_ext = str(FILE).split(\".\") if file_ext == self.__class__.__name__.lower(): with open(FILE, \"r\") as f:", "from typing import Optional import json def error_catcher(method): def wrapper(*args, **kwargs): try: return", "return self._successor.handle(FILE) def deserialization(self, data): length = len(data['x']) for i in range(length): x_temp", "line in f.read().split('\\n'): reformat_line = line[1:-1].split('];[') a = [list(map(float, elem.split(','))) for elem in", "a = [list(map(float, elem.split(','))) for elem in reformat_line] self._temp.append(a) return self.getter() else: return", "as f: for line in f.read().split('\\n'): reformat_line = line[1:-1].split('];[') a = [list(map(float, elem.split(',')))", "CSV() # set the chain of responsibility # The Client may compose chains", "successor): self._successor = successor return successor @error_catcher def handle(self, FILE): \"\"\"Handle the *.json", "= list(map(float, data['y'][i])) temp = [x_temp, y_temp] self._temp.append(temp) def __repr__(self): return f\"{self.__class__.__name__}\" def", "file event\"\"\" file_name, file_ext = str(FILE).split(\".\") if file_ext == self.__class__.__name__.lower(): with open(FILE, \"r\")", "with open(FILE, \"r\") as f: for line in f.read().split('\\n'): reformat_line = line[1:-1].split('];[') a", "reformat_line] self._temp.append(a) return self.getter() else: return self._successor.handle(FILE) def __repr__(self): return f\"{self.__class__.__name__}\" def getter(self):", "responsibility # The Client may compose chains once or # the hadler can", "abstractmethod from typing import Optional import json def error_catcher(method): def wrapper(*args, **kwargs): try:", "len(data['x']) for i in range(length): x_temp = list(map(float, data['x'][i])) y_temp = list(map(float, data['y'][i]))", "Optional[str]: \"\"\"Handle the event\"\"\" pass class JSON(AbstractHandler): def __init__(self): self._successor = None self._temp:", "if file_ext == self.__class__.__name__.lower(): with open(FILE, \"r\") as f: for line in f.read().split('\\n'):", "f\"{self.__class__.__name__}\" def getter(self): return self._temp class CSV(AbstractHandler): def __init__(self): self._successor = None self._temp:", "FilesChain: def __init__(self): self.chain1 = JSON() self.chain2 = TXT() self.chain3 = CSV() #", "return self._temp class TXT(AbstractHandler): def __init__(self): self._successor = None self._temp: list = list()", "@error_catcher def handle(self, FILE): \"\"\"Handle the *.csv file event\"\"\" file_name, file_ext = str(FILE).split(\".\")", "-> Optional[str]: \"\"\"Handle the event\"\"\" pass class JSON(AbstractHandler): def __init__(self): self._successor = None", "__init__(self): self._successor = None self._temp: list = list() def set_successor(self, successor): self._successor =", "else: return self._successor.handle(FILE) def deserialization(self, data): length = len(data['x']) for i in range(length):" ]
[ "VakhitovHelper: \"\"\"Utility functions to prepare inputs for what is requested by functions in", "= _matlab.double(bear[:, 0, :].T.tolist()) xe = _matlab.double(bear[:, 1, :].T.tolist()) Xs = _matlab.double(line_3d[:, 0,", "_matlab.engine.start_matlab() print(\"DONE\", flush=True) return _matlab_engine class VakhitovHelper: \"\"\"Utility functions to prepare inputs for", "_matlab = import_module(\"matlab\") _matlab.engine = import_module(\"matlab.engine\") except ModuleNotFoundError: pass def init_matlab(): global _matlab_engine", "not None: return _matlab_engine # start the engine print(\"Launching MATLAB Engine: \", end=\"\",", "Rename vars to PnPL convention xxn = _matlab.double(bear[:-1].tolist()) XXw = _matlab.double(pts_3d.T.tolist()) return xxn,", "= _matlab.engine.start_matlab() print(\"DONE\", flush=True) return _matlab_engine class VakhitovHelper: \"\"\"Utility functions to prepare inputs", "# Rename vars to PnPL convention xxn = _matlab.double(bear[:-1].tolist()) XXw = _matlab.double(pts_3d.T.tolist()) return", "points into start and end points xs = _matlab.double(bear[:, 0, :].T.tolist()) xe =", ":]).T.tolist()) return xs, xe, Xs, Xe def points(pts_2d, pts_3d, K): # set up", "return _matlab_engine class VakhitovHelper: \"\"\"Utility functions to prepare inputs for what is requested", "= import_module(\"matlab\") _matlab.engine = import_module(\"matlab.engine\") except ModuleNotFoundError: pass def init_matlab(): global _matlab_engine if", "We adopt the same naming convention the author used. \"\"\" def lines(line_2d, line_3d,", "requested by functions in Vakhitov's pnpl toolbox. We adopt the same naming convention", "_matlab = None _matlab_engine = None try: _matlab = import_module(\"matlab\") _matlab.engine = import_module(\"matlab.engine\")", "\", end=\"\", flush=True) _matlab_engine = _matlab.engine.start_matlab() print(\"DONE\", flush=True) return _matlab_engine class VakhitovHelper: \"\"\"Utility", "is not None: return _matlab_engine # start the engine print(\"Launching MATLAB Engine: \",", "if _matlab_engine is not None: return _matlab_engine # start the engine print(\"Launching MATLAB", "end points xs = _matlab.double(bear[:, 0, :].T.tolist()) xe = _matlab.double(bear[:, 1, :].T.tolist()) Xs", "engine print(\"Launching MATLAB Engine: \", end=\"\", flush=True) _matlab_engine = _matlab.engine.start_matlab() print(\"DONE\", flush=True) return", "used. \"\"\" def lines(line_2d, line_3d, K): # set up bearing vectors bear =", "return None if _matlab_engine is not None: return _matlab_engine # start the engine", "= _matlab.double((line_3d[:, 1, :]).T.tolist()) return xs, xe, Xs, Xe def points(pts_2d, pts_3d, K):", "None try: _matlab = import_module(\"matlab\") _matlab.engine = import_module(\"matlab.engine\") except ModuleNotFoundError: pass def init_matlab():", "functions to prepare inputs for what is requested by functions in Vakhitov's pnpl", "line_3d, K): # set up bearing vectors bear = np.linalg.solve( K, np.vstack((line_2d.reshape((-1, 2)).T,", "prepare inputs for what is requested by functions in Vakhitov's pnpl toolbox. We", "# set up bearing vectors bear = np.linalg.solve(K, np.vstack((pts_2d.T, np.ones((1, len(pts_2d)))))) # Rename", "_matlab.double(bear[:, 1, :].T.tolist()) Xs = _matlab.double(line_3d[:, 0, :].T.tolist()) Xe = _matlab.double((line_3d[:, 1, :]).T.tolist())", "start the engine print(\"Launching MATLAB Engine: \", end=\"\", flush=True) _matlab_engine = _matlab.engine.start_matlab() print(\"DONE\",", "ModuleNotFoundError: pass def init_matlab(): global _matlab_engine if _matlab is None: return None if", "= _matlab.double(line_3d[:, 0, :].T.tolist()) Xe = _matlab.double((line_3d[:, 1, :]).T.tolist()) return xs, xe, Xs,", "points xs = _matlab.double(bear[:, 0, :].T.tolist()) xe = _matlab.double(bear[:, 1, :].T.tolist()) Xs =", "_matlab.double(bear[:, 0, :].T.tolist()) xe = _matlab.double(bear[:, 1, :].T.tolist()) Xs = _matlab.double(line_3d[:, 0, :].T.tolist())", "np # Dynamically import matlab _matlab = None _matlab_engine = None try: _matlab", "def points(pts_2d, pts_3d, K): # set up bearing vectors bear = np.linalg.solve(K, np.vstack((pts_2d.T,", "inputs for what is requested by functions in Vakhitov's pnpl toolbox. We adopt", "try: _matlab = import_module(\"matlab\") _matlab.engine = import_module(\"matlab.engine\") except ModuleNotFoundError: pass def init_matlab(): global", "_matlab_engine # start the engine print(\"Launching MATLAB Engine: \", end=\"\", flush=True) _matlab_engine =", "in Vakhitov's pnpl toolbox. We adopt the same naming convention the author used.", "bearing vectors bear = np.linalg.solve( K, np.vstack((line_2d.reshape((-1, 2)).T, np.ones((1, 2 * len(line_2d))))) ).T[:,", "by functions in Vakhitov's pnpl toolbox. We adopt the same naming convention the", "np.vstack((line_2d.reshape((-1, 2)).T, np.ones((1, 2 * len(line_2d))))) ).T[:, :-1] bear = bear.reshape((-1, 2, 2))", "= _matlab.double(bear[:, 1, :].T.tolist()) Xs = _matlab.double(line_3d[:, 0, :].T.tolist()) Xe = _matlab.double((line_3d[:, 1,", "for what is requested by functions in Vakhitov's pnpl toolbox. We adopt the", "K): # set up bearing vectors bear = np.linalg.solve( K, np.vstack((line_2d.reshape((-1, 2)).T, np.ones((1,", "def lines(line_2d, line_3d, K): # set up bearing vectors bear = np.linalg.solve( K,", "1, :]).T.tolist()) return xs, xe, Xs, Xe def points(pts_2d, pts_3d, K): # set", "2)).T, np.ones((1, 2 * len(line_2d))))) ).T[:, :-1] bear = bear.reshape((-1, 2, 2)) #", "what is requested by functions in Vakhitov's pnpl toolbox. We adopt the same", "import_module(\"matlab\") _matlab.engine = import_module(\"matlab.engine\") except ModuleNotFoundError: pass def init_matlab(): global _matlab_engine if _matlab", "the same naming convention the author used. \"\"\" def lines(line_2d, line_3d, K): #", "\"\"\"Utility functions to prepare inputs for what is requested by functions in Vakhitov's", "author used. \"\"\" def lines(line_2d, line_3d, K): # set up bearing vectors bear", "bear = np.linalg.solve(K, np.vstack((pts_2d.T, np.ones((1, len(pts_2d)))))) # Rename vars to PnPL convention xxn", "_matlab_engine class VakhitovHelper: \"\"\"Utility functions to prepare inputs for what is requested by", "_matlab_engine if _matlab is None: return None if _matlab_engine is not None: return", "None: return None if _matlab_engine is not None: return _matlab_engine # start the", "Xs, Xe def points(pts_2d, pts_3d, K): # set up bearing vectors bear =", "vectors bear = np.linalg.solve(K, np.vstack((pts_2d.T, np.ones((1, len(pts_2d)))))) # Rename vars to PnPL convention", "Engine: \", end=\"\", flush=True) _matlab_engine = _matlab.engine.start_matlab() print(\"DONE\", flush=True) return _matlab_engine class VakhitovHelper:", "np.linalg.solve(K, np.vstack((pts_2d.T, np.ones((1, len(pts_2d)))))) # Rename vars to PnPL convention xxn = _matlab.double(bear[:-1].tolist())", "# Split points into start and end points xs = _matlab.double(bear[:, 0, :].T.tolist())", "up bearing vectors bear = np.linalg.solve(K, np.vstack((pts_2d.T, np.ones((1, len(pts_2d)))))) # Rename vars to", "= bear.reshape((-1, 2, 2)) # Split points into start and end points xs", ":-1] bear = bear.reshape((-1, 2, 2)) # Split points into start and end", "bearing vectors bear = np.linalg.solve(K, np.vstack((pts_2d.T, np.ones((1, len(pts_2d)))))) # Rename vars to PnPL", "_matlab_engine = _matlab.engine.start_matlab() print(\"DONE\", flush=True) return _matlab_engine class VakhitovHelper: \"\"\"Utility functions to prepare", "the author used. \"\"\" def lines(line_2d, line_3d, K): # set up bearing vectors", "1, :].T.tolist()) Xs = _matlab.double(line_3d[:, 0, :].T.tolist()) Xe = _matlab.double((line_3d[:, 1, :]).T.tolist()) return", "functions in Vakhitov's pnpl toolbox. We adopt the same naming convention the author", "None _matlab_engine = None try: _matlab = import_module(\"matlab\") _matlab.engine = import_module(\"matlab.engine\") except ModuleNotFoundError:", "2)) # Split points into start and end points xs = _matlab.double(bear[:, 0,", "flush=True) _matlab_engine = _matlab.engine.start_matlab() print(\"DONE\", flush=True) return _matlab_engine class VakhitovHelper: \"\"\"Utility functions to", "same naming convention the author used. \"\"\" def lines(line_2d, line_3d, K): # set", "= np.linalg.solve( K, np.vstack((line_2d.reshape((-1, 2)).T, np.ones((1, 2 * len(line_2d))))) ).T[:, :-1] bear =", "Xe def points(pts_2d, pts_3d, K): # set up bearing vectors bear = np.linalg.solve(K,", "* len(line_2d))))) ).T[:, :-1] bear = bear.reshape((-1, 2, 2)) # Split points into", "matlab _matlab = None _matlab_engine = None try: _matlab = import_module(\"matlab\") _matlab.engine =", "global _matlab_engine if _matlab is None: return None if _matlab_engine is not None:", "_matlab is None: return None if _matlab_engine is not None: return _matlab_engine #", "flush=True) return _matlab_engine class VakhitovHelper: \"\"\"Utility functions to prepare inputs for what is", "_matlab_engine = None try: _matlab = import_module(\"matlab\") _matlab.engine = import_module(\"matlab.engine\") except ModuleNotFoundError: pass", "return xs, xe, Xs, Xe def points(pts_2d, pts_3d, K): # set up bearing", "vars to PnPL convention xxn = _matlab.double(bear[:-1].tolist()) XXw = _matlab.double(pts_3d.T.tolist()) return xxn, XXw", "pass def init_matlab(): global _matlab_engine if _matlab is None: return None if _matlab_engine", "naming convention the author used. \"\"\" def lines(line_2d, line_3d, K): # set up", "K, np.vstack((line_2d.reshape((-1, 2)).T, np.ones((1, 2 * len(line_2d))))) ).T[:, :-1] bear = bear.reshape((-1, 2,", "Xs = _matlab.double(line_3d[:, 0, :].T.tolist()) Xe = _matlab.double((line_3d[:, 1, :]).T.tolist()) return xs, xe,", "as np # Dynamically import matlab _matlab = None _matlab_engine = None try:", "up bearing vectors bear = np.linalg.solve( K, np.vstack((line_2d.reshape((-1, 2)).T, np.ones((1, 2 * len(line_2d)))))", "end=\"\", flush=True) _matlab_engine = _matlab.engine.start_matlab() print(\"DONE\", flush=True) return _matlab_engine class VakhitovHelper: \"\"\"Utility functions", "print(\"Launching MATLAB Engine: \", end=\"\", flush=True) _matlab_engine = _matlab.engine.start_matlab() print(\"DONE\", flush=True) return _matlab_engine", "numpy as np # Dynamically import matlab _matlab = None _matlab_engine = None", "if _matlab is None: return None if _matlab_engine is not None: return _matlab_engine", "MATLAB Engine: \", end=\"\", flush=True) _matlab_engine = _matlab.engine.start_matlab() print(\"DONE\", flush=True) return _matlab_engine class", "start and end points xs = _matlab.double(bear[:, 0, :].T.tolist()) xe = _matlab.double(bear[:, 1,", "np.vstack((pts_2d.T, np.ones((1, len(pts_2d)))))) # Rename vars to PnPL convention xxn = _matlab.double(bear[:-1].tolist()) XXw", "print(\"DONE\", flush=True) return _matlab_engine class VakhitovHelper: \"\"\"Utility functions to prepare inputs for what", "import_module(\"matlab.engine\") except ModuleNotFoundError: pass def init_matlab(): global _matlab_engine if _matlab is None: return", "return _matlab_engine # start the engine print(\"Launching MATLAB Engine: \", end=\"\", flush=True) _matlab_engine", "None if _matlab_engine is not None: return _matlab_engine # start the engine print(\"Launching", "import matlab _matlab = None _matlab_engine = None try: _matlab = import_module(\"matlab\") _matlab.engine", "convention the author used. \"\"\" def lines(line_2d, line_3d, K): # set up bearing", "xe = _matlab.double(bear[:, 1, :].T.tolist()) Xs = _matlab.double(line_3d[:, 0, :].T.tolist()) Xe = _matlab.double((line_3d[:,", "= None try: _matlab = import_module(\"matlab\") _matlab.engine = import_module(\"matlab.engine\") except ModuleNotFoundError: pass def", ":].T.tolist()) xe = _matlab.double(bear[:, 1, :].T.tolist()) Xs = _matlab.double(line_3d[:, 0, :].T.tolist()) Xe =", ").T[:, :-1] bear = bear.reshape((-1, 2, 2)) # Split points into start and", "except ModuleNotFoundError: pass def init_matlab(): global _matlab_engine if _matlab is None: return None", "np.ones((1, len(pts_2d)))))) # Rename vars to PnPL convention xxn = _matlab.double(bear[:-1].tolist()) XXw =", "len(line_2d))))) ).T[:, :-1] bear = bear.reshape((-1, 2, 2)) # Split points into start", "<filename>benchmarks/toolkit/methods/utils.py from importlib import import_module import numpy as np # Dynamically import matlab", "bear = bear.reshape((-1, 2, 2)) # Split points into start and end points", "toolbox. We adopt the same naming convention the author used. \"\"\" def lines(line_2d,", "points(pts_2d, pts_3d, K): # set up bearing vectors bear = np.linalg.solve(K, np.vstack((pts_2d.T, np.ones((1,", "is None: return None if _matlab_engine is not None: return _matlab_engine # start", "_matlab.double((line_3d[:, 1, :]).T.tolist()) return xs, xe, Xs, Xe def points(pts_2d, pts_3d, K): #", "\"\"\" def lines(line_2d, line_3d, K): # set up bearing vectors bear = np.linalg.solve(", "import_module import numpy as np # Dynamically import matlab _matlab = None _matlab_engine", "_matlab_engine is not None: return _matlab_engine # start the engine print(\"Launching MATLAB Engine:", "Xe = _matlab.double((line_3d[:, 1, :]).T.tolist()) return xs, xe, Xs, Xe def points(pts_2d, pts_3d,", "Vakhitov's pnpl toolbox. We adopt the same naming convention the author used. \"\"\"", "init_matlab(): global _matlab_engine if _matlab is None: return None if _matlab_engine is not", "None: return _matlab_engine # start the engine print(\"Launching MATLAB Engine: \", end=\"\", flush=True)", "_matlab.double(line_3d[:, 0, :].T.tolist()) Xe = _matlab.double((line_3d[:, 1, :]).T.tolist()) return xs, xe, Xs, Xe", "def init_matlab(): global _matlab_engine if _matlab is None: return None if _matlab_engine is", "into start and end points xs = _matlab.double(bear[:, 0, :].T.tolist()) xe = _matlab.double(bear[:,", "= np.linalg.solve(K, np.vstack((pts_2d.T, np.ones((1, len(pts_2d)))))) # Rename vars to PnPL convention xxn =", "_matlab.engine = import_module(\"matlab.engine\") except ModuleNotFoundError: pass def init_matlab(): global _matlab_engine if _matlab is", "from importlib import import_module import numpy as np # Dynamically import matlab _matlab", "vectors bear = np.linalg.solve( K, np.vstack((line_2d.reshape((-1, 2)).T, np.ones((1, 2 * len(line_2d))))) ).T[:, :-1]", "Dynamically import matlab _matlab = None _matlab_engine = None try: _matlab = import_module(\"matlab\")", "pts_3d, K): # set up bearing vectors bear = np.linalg.solve(K, np.vstack((pts_2d.T, np.ones((1, len(pts_2d))))))", "len(pts_2d)))))) # Rename vars to PnPL convention xxn = _matlab.double(bear[:-1].tolist()) XXw = _matlab.double(pts_3d.T.tolist())", "2, 2)) # Split points into start and end points xs = _matlab.double(bear[:,", "import import_module import numpy as np # Dynamically import matlab _matlab = None", "pnpl toolbox. We adopt the same naming convention the author used. \"\"\" def", "np.ones((1, 2 * len(line_2d))))) ).T[:, :-1] bear = bear.reshape((-1, 2, 2)) # Split", "set up bearing vectors bear = np.linalg.solve( K, np.vstack((line_2d.reshape((-1, 2)).T, np.ones((1, 2 *", "import numpy as np # Dynamically import matlab _matlab = None _matlab_engine =", "is requested by functions in Vakhitov's pnpl toolbox. We adopt the same naming", "set up bearing vectors bear = np.linalg.solve(K, np.vstack((pts_2d.T, np.ones((1, len(pts_2d)))))) # Rename vars", "# start the engine print(\"Launching MATLAB Engine: \", end=\"\", flush=True) _matlab_engine = _matlab.engine.start_matlab()", ":].T.tolist()) Xe = _matlab.double((line_3d[:, 1, :]).T.tolist()) return xs, xe, Xs, Xe def points(pts_2d,", "xs, xe, Xs, Xe def points(pts_2d, pts_3d, K): # set up bearing vectors", "np.linalg.solve( K, np.vstack((line_2d.reshape((-1, 2)).T, np.ones((1, 2 * len(line_2d))))) ).T[:, :-1] bear = bear.reshape((-1,", "= None _matlab_engine = None try: _matlab = import_module(\"matlab\") _matlab.engine = import_module(\"matlab.engine\") except", "xe, Xs, Xe def points(pts_2d, pts_3d, K): # set up bearing vectors bear", "= import_module(\"matlab.engine\") except ModuleNotFoundError: pass def init_matlab(): global _matlab_engine if _matlab is None:", "lines(line_2d, line_3d, K): # set up bearing vectors bear = np.linalg.solve( K, np.vstack((line_2d.reshape((-1,", "importlib import import_module import numpy as np # Dynamically import matlab _matlab =", "Split points into start and end points xs = _matlab.double(bear[:, 0, :].T.tolist()) xe", "0, :].T.tolist()) xe = _matlab.double(bear[:, 1, :].T.tolist()) Xs = _matlab.double(line_3d[:, 0, :].T.tolist()) Xe", "bear.reshape((-1, 2, 2)) # Split points into start and end points xs =", "bear = np.linalg.solve( K, np.vstack((line_2d.reshape((-1, 2)).T, np.ones((1, 2 * len(line_2d))))) ).T[:, :-1] bear", ":].T.tolist()) Xs = _matlab.double(line_3d[:, 0, :].T.tolist()) Xe = _matlab.double((line_3d[:, 1, :]).T.tolist()) return xs,", "0, :].T.tolist()) Xe = _matlab.double((line_3d[:, 1, :]).T.tolist()) return xs, xe, Xs, Xe def", "2 * len(line_2d))))) ).T[:, :-1] bear = bear.reshape((-1, 2, 2)) # Split points", "# set up bearing vectors bear = np.linalg.solve( K, np.vstack((line_2d.reshape((-1, 2)).T, np.ones((1, 2", "adopt the same naming convention the author used. \"\"\" def lines(line_2d, line_3d, K):", "# Dynamically import matlab _matlab = None _matlab_engine = None try: _matlab =", "K): # set up bearing vectors bear = np.linalg.solve(K, np.vstack((pts_2d.T, np.ones((1, len(pts_2d)))))) #", "xs = _matlab.double(bear[:, 0, :].T.tolist()) xe = _matlab.double(bear[:, 1, :].T.tolist()) Xs = _matlab.double(line_3d[:,", "the engine print(\"Launching MATLAB Engine: \", end=\"\", flush=True) _matlab_engine = _matlab.engine.start_matlab() print(\"DONE\", flush=True)", "class VakhitovHelper: \"\"\"Utility functions to prepare inputs for what is requested by functions", "to prepare inputs for what is requested by functions in Vakhitov's pnpl toolbox.", "and end points xs = _matlab.double(bear[:, 0, :].T.tolist()) xe = _matlab.double(bear[:, 1, :].T.tolist())" ]
[ "\"goiaba\", \"romã\"] if \"laranja\" in lista: print(\"Laranja está na lista.\") else: print(\"Laranja não", "\"banana\", \"maçã\", \"goiaba\", \"romã\"] if \"laranja\" in lista: print(\"Laranja está na lista.\") else:", "[\"laranja\", \"banana\", \"maçã\", \"goiaba\", \"romã\"] if \"laranja\" in lista: print(\"Laranja está na lista.\")", "if \"laranja\" in lista: print(\"Laranja está na lista.\") else: print(\"Laranja não está na", "\"laranja\" in lista: print(\"Laranja está na lista.\") else: print(\"Laranja não está na lista\")", "\"romã\"] if \"laranja\" in lista: print(\"Laranja está na lista.\") else: print(\"Laranja não está", "<reponame>ppedraum/infosatc-lp-avaliativo-02 #5 lista = [\"laranja\", \"banana\", \"maçã\", \"goiaba\", \"romã\"] if \"laranja\" in lista:", "lista = [\"laranja\", \"banana\", \"maçã\", \"goiaba\", \"romã\"] if \"laranja\" in lista: print(\"Laranja está", "= [\"laranja\", \"banana\", \"maçã\", \"goiaba\", \"romã\"] if \"laranja\" in lista: print(\"Laranja está na", "#5 lista = [\"laranja\", \"banana\", \"maçã\", \"goiaba\", \"romã\"] if \"laranja\" in lista: print(\"Laranja", "\"maçã\", \"goiaba\", \"romã\"] if \"laranja\" in lista: print(\"Laranja está na lista.\") else: print(\"Laranja" ]
[ "# -*- coding: utf-8 -*- # @Time : 2019-01-17 13:48 # @Author :", "-*- # @Time : 2019-01-17 13:48 # @Author : pang # @File :", "-*- coding: utf-8 -*- # @Time : 2019-01-17 13:48 # @Author : pang", ": 2019-01-17 13:48 # @Author : pang # @File : __init__.py.py # @Software:", "@Time : 2019-01-17 13:48 # @Author : pang # @File : __init__.py.py #", "coding: utf-8 -*- # @Time : 2019-01-17 13:48 # @Author : pang #", "#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 2019-01-17 13:48 #", "2019-01-17 13:48 # @Author : pang # @File : __init__.py.py # @Software: PyCharm", "python # -*- coding: utf-8 -*- # @Time : 2019-01-17 13:48 # @Author", "utf-8 -*- # @Time : 2019-01-17 13:48 # @Author : pang # @File", "# @Time : 2019-01-17 13:48 # @Author : pang # @File : __init__.py.py" ]
[ "0, 1) if not self.screen: self.screen = pygame.display.set_mode((image.shape[0], image.shape[1])) pygame.surfarray.blit_array(self.screen, image) pygame.display.flip() def", "as np import pygame CAPTION = \"dm_control viewer\" class DmControlViewer: def __init__(self): pygame.init()", "__init__(self): pygame.init() pygame.display.set_caption(CAPTION) self.screen = None def loop_once(self, image): image = np.swapaxes(image, 0,", "import pygame CAPTION = \"dm_control viewer\" class DmControlViewer: def __init__(self): pygame.init() pygame.display.set_caption(CAPTION) self.screen", "class DmControlViewer: def __init__(self): pygame.init() pygame.display.set_caption(CAPTION) self.screen = None def loop_once(self, image): image", "np.swapaxes(image, 0, 1) if not self.screen: self.screen = pygame.display.set_mode((image.shape[0], image.shape[1])) pygame.surfarray.blit_array(self.screen, image) pygame.display.flip()", "= np.swapaxes(image, 0, 1) if not self.screen: self.screen = pygame.display.set_mode((image.shape[0], image.shape[1])) pygame.surfarray.blit_array(self.screen, image)", "CAPTION = \"dm_control viewer\" class DmControlViewer: def __init__(self): pygame.init() pygame.display.set_caption(CAPTION) self.screen = None", "pygame.init() pygame.display.set_caption(CAPTION) self.screen = None def loop_once(self, image): image = np.swapaxes(image, 0, 1)", "def __init__(self): pygame.init() pygame.display.set_caption(CAPTION) self.screen = None def loop_once(self, image): image = np.swapaxes(image,", "image): image = np.swapaxes(image, 0, 1) if not self.screen: self.screen = pygame.display.set_mode((image.shape[0], image.shape[1]))", "def loop_once(self, image): image = np.swapaxes(image, 0, 1) if not self.screen: self.screen =", "1) if not self.screen: self.screen = pygame.display.set_mode((image.shape[0], image.shape[1])) pygame.surfarray.blit_array(self.screen, image) pygame.display.flip() def finish(self):", "if not self.screen: self.screen = pygame.display.set_mode((image.shape[0], image.shape[1])) pygame.surfarray.blit_array(self.screen, image) pygame.display.flip() def finish(self): pygame.quit()", "\"dm_control viewer\" class DmControlViewer: def __init__(self): pygame.init() pygame.display.set_caption(CAPTION) self.screen = None def loop_once(self,", "import numpy as np import pygame CAPTION = \"dm_control viewer\" class DmControlViewer: def", "image = np.swapaxes(image, 0, 1) if not self.screen: self.screen = pygame.display.set_mode((image.shape[0], image.shape[1])) pygame.surfarray.blit_array(self.screen,", "DmControlViewer: def __init__(self): pygame.init() pygame.display.set_caption(CAPTION) self.screen = None def loop_once(self, image): image =", "= None def loop_once(self, image): image = np.swapaxes(image, 0, 1) if not self.screen:", "None def loop_once(self, image): image = np.swapaxes(image, 0, 1) if not self.screen: self.screen", "pygame CAPTION = \"dm_control viewer\" class DmControlViewer: def __init__(self): pygame.init() pygame.display.set_caption(CAPTION) self.screen =", "np import pygame CAPTION = \"dm_control viewer\" class DmControlViewer: def __init__(self): pygame.init() pygame.display.set_caption(CAPTION)", "= \"dm_control viewer\" class DmControlViewer: def __init__(self): pygame.init() pygame.display.set_caption(CAPTION) self.screen = None def", "self.screen = None def loop_once(self, image): image = np.swapaxes(image, 0, 1) if not", "pygame.display.set_caption(CAPTION) self.screen = None def loop_once(self, image): image = np.swapaxes(image, 0, 1) if", "viewer\" class DmControlViewer: def __init__(self): pygame.init() pygame.display.set_caption(CAPTION) self.screen = None def loop_once(self, image):", "<reponame>shadiakiki1986/garage<gh_stars>1-10 import numpy as np import pygame CAPTION = \"dm_control viewer\" class DmControlViewer:", "loop_once(self, image): image = np.swapaxes(image, 0, 1) if not self.screen: self.screen = pygame.display.set_mode((image.shape[0],", "numpy as np import pygame CAPTION = \"dm_control viewer\" class DmControlViewer: def __init__(self):" ]
[ "cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data',", "file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ],", "_UPDATECOMMENTREQUEST, '__module__' : 'update_comment_pb2' # @@protoc_insertion_point(class_scope:topboard.UpdateCommentRequest) }) _sym_db.RegisterMessage(UpdateCommentRequest) UpdateCommentResponseWrapper = _reflection.GeneratedProtocolMessageType('UpdateCommentResponseWrapper', (_message.Message,), {", "oneofs=[ ], serialized_start=239, serialized_end=352, ) _UPDATECOMMENTREQUEST.fields_by_name['comment'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT _UPDATECOMMENTRESPONSEWRAPPER.fields_by_name['data'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT DESCRIPTOR.message_types_by_name['UpdateCommentRequest'] =", "index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "serialized_pb=_b('\\n\\x14update_comment.proto\\x12\\x08topboard\\x1a)topboard_sdk/model/topboard/comment.proto\\x1a-topboard_sdk/model/topboard/issue_basic.proto\\x1a\\\"topboard_sdk/model/cmdb/user.proto\\\"M\\n\\x14UpdateCommentRequest\\x12\\x11\\n\\tcommentID\\x18\\x01 \\x01(\\t\\x12\\\"\\n\\x07\\x63omment\\x18\\x02 \\x01(\\x0b\\x32\\x11.topboard.Comment\\\"q\\n\\x1cUpdateCommentResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12\\x1f\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x11.topboard.Commentb\\x06proto3') , dependencies=[topboard__sdk_dot_model_dot_topboard_dot_comment__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_cmdb_dot_user__pb2.DESCRIPTOR,]) _UPDATECOMMENTREQUEST = _descriptor.Descriptor( name='UpdateCommentRequest',", "x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import", "full_name='topboard.UpdateCommentResponseWrapper.code', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='topboard.UpdateCommentResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None,", "reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db =", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[],", "full_name='topboard.UpdateCommentRequest.commentID', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "name='UpdateCommentRequest', full_name='topboard.UpdateCommentRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='commentID', full_name='topboard.UpdateCommentRequest.commentID', index=0, number=1, type=9, cpp_type=9,", "default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[", "serialized_end=237, ) _UPDATECOMMENTRESPONSEWRAPPER = _descriptor.Descriptor( name='UpdateCommentResponseWrapper', full_name='topboard.UpdateCommentResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code',", "nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=239, serialized_end=352, ) _UPDATECOMMENTREQUEST.fields_by_name['comment'].message_type", "@@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from topboard_sdk.model.topboard import comment_pb2 as topboard__sdk_dot_model_dot_topboard_dot_comment__pb2 from topboard_sdk.model.topboard import", "number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "# Generated by the protocol buffer compiler. DO NOT EDIT! # source: update_comment.proto", "_descriptor.FieldDescriptor( name='error', full_name='topboard.UpdateCommentResponseWrapper.error', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None,", "import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db", "from topboard_sdk.model.topboard import issue_basic_pb2 as topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2 from topboard_sdk.model.cmdb import user_pb2 as topboard__sdk_dot_model_dot_cmdb_dot_user__pb2 DESCRIPTOR", "default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='topboard.UpdateCommentResponseWrapper.error', index=2, number=3,", "syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=239, serialized_end=352, ) _UPDATECOMMENTREQUEST.fields_by_name['comment'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT _UPDATECOMMENTRESPONSEWRAPPER.fields_by_name['data'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT", "\\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12\\x1f\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x11.topboard.Commentb\\x06proto3') , dependencies=[topboard__sdk_dot_model_dot_topboard_dot_comment__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_cmdb_dot_user__pb2.DESCRIPTOR,]) _UPDATECOMMENTREQUEST = _descriptor.Descriptor( name='UpdateCommentRequest', full_name='topboard.UpdateCommentRequest', filename=None, file=DESCRIPTOR,", "coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT!", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='comment', full_name='topboard.UpdateCommentRequest.comment', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None,", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='topboard.UpdateCommentResponseWrapper.data', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False,", "as topboard__sdk_dot_model_dot_cmdb_dot_user__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='update_comment.proto', package='topboard', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x14update_comment.proto\\x12\\x08topboard\\x1a)topboard_sdk/model/topboard/comment.proto\\x1a-topboard_sdk/model/topboard/issue_basic.proto\\x1a\\\"topboard_sdk/model/cmdb/user.proto\\\"M\\n\\x14UpdateCommentRequest\\x12\\x11\\n\\tcommentID\\x18\\x01 \\x01(\\t\\x12\\\"\\n\\x07\\x63omment\\x18\\x02 \\x01(\\x0b\\x32\\x11.topboard.Comment\\\"q\\n\\x1cUpdateCommentResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02", ": _UPDATECOMMENTREQUEST, '__module__' : 'update_comment_pb2' # @@protoc_insertion_point(class_scope:topboard.UpdateCommentRequest) }) _sym_db.RegisterMessage(UpdateCommentRequest) UpdateCommentResponseWrapper = _reflection.GeneratedProtocolMessageType('UpdateCommentResponseWrapper', (_message.Message,),", "dependencies=[topboard__sdk_dot_model_dot_topboard_dot_comment__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_cmdb_dot_user__pb2.DESCRIPTOR,]) _UPDATECOMMENTREQUEST = _descriptor.Descriptor( name='UpdateCommentRequest', full_name='topboard.UpdateCommentRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='commentID', full_name='topboard.UpdateCommentRequest.commentID',", "google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf", "as topboard__sdk_dot_model_dot_topboard_dot_comment__pb2 from topboard_sdk.model.topboard import issue_basic_pb2 as topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2 from topboard_sdk.model.cmdb import user_pb2 as", "as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from topboard_sdk.model.topboard import comment_pb2 as topboard__sdk_dot_model_dot_topboard_dot_comment__pb2", "= _descriptor.Descriptor( name='UpdateCommentRequest', full_name='topboard.UpdateCommentRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='commentID', full_name='topboard.UpdateCommentRequest.commentID', index=0, number=1,", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None,", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='topboard.UpdateCommentResponseWrapper.error', index=2, number=3, type=9,", "serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='topboard.UpdateCommentResponseWrapper.data', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None,", "_reflection.GeneratedProtocolMessageType('UpdateCommentResponseWrapper', (_message.Message,), { 'DESCRIPTOR' : _UPDATECOMMENTRESPONSEWRAPPER, '__module__' : 'update_comment_pb2' # @@protoc_insertion_point(class_scope:topboard.UpdateCommentResponseWrapper) }) _sym_db.RegisterMessage(UpdateCommentResponseWrapper)", "name='update_comment.proto', package='topboard', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x14update_comment.proto\\x12\\x08topboard\\x1a)topboard_sdk/model/topboard/comment.proto\\x1a-topboard_sdk/model/topboard/issue_basic.proto\\x1a\\\"topboard_sdk/model/cmdb/user.proto\\\"M\\n\\x14UpdateCommentRequest\\x12\\x11\\n\\tcommentID\\x18\\x01 \\x01(\\t\\x12\\\"\\n\\x07\\x63omment\\x18\\x02 \\x01(\\x0b\\x32\\x11.topboard.Comment\\\"q\\n\\x1cUpdateCommentResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12\\x1f\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x11.topboard.Commentb\\x06proto3') , dependencies=[topboard__sdk_dot_model_dot_topboard_dot_comment__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_cmdb_dot_user__pb2.DESCRIPTOR,])", "(lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as", "has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='topboard.UpdateCommentResponseWrapper.data', index=3,", "from google.protobuf import message as _message from google.protobuf import reflection as _reflection from", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='topboard.UpdateCommentResponseWrapper.codeExplain', index=1, number=2, type=9,", "_descriptor.FieldDescriptor( name='commentID', full_name='topboard.UpdateCommentRequest.commentID', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None,", "fields=[ _descriptor.FieldDescriptor( name='commentID', full_name='topboard.UpdateCommentRequest.commentID', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None,", "filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='commentID', full_name='topboard.UpdateCommentRequest.commentID', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False,", "is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=239, serialized_end=352, ) _UPDATECOMMENTREQUEST.fields_by_name['comment'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT _UPDATECOMMENTRESPONSEWRAPPER.fields_by_name['data'].message_type =", "index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='topboard.UpdateCommentResponseWrapper.error', index=2,", "DESCRIPTOR.message_types_by_name['UpdateCommentResponseWrapper'] = _UPDATECOMMENTRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) UpdateCommentRequest = _reflection.GeneratedProtocolMessageType('UpdateCommentRequest', (_message.Message,), { 'DESCRIPTOR' : _UPDATECOMMENTREQUEST, '__module__'", "google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from topboard_sdk.model.topboard import", "{ 'DESCRIPTOR' : _UPDATECOMMENTREQUEST, '__module__' : 'update_comment_pb2' # @@protoc_insertion_point(class_scope:topboard.UpdateCommentRequest) }) _sym_db.RegisterMessage(UpdateCommentRequest) UpdateCommentResponseWrapper =", "{ 'DESCRIPTOR' : _UPDATECOMMENTRESPONSEWRAPPER, '__module__' : 'update_comment_pb2' # @@protoc_insertion_point(class_scope:topboard.UpdateCommentResponseWrapper) }) _sym_db.RegisterMessage(UpdateCommentResponseWrapper) # @@protoc_insertion_point(module_scope)", "the protocol buffer compiler. DO NOT EDIT! # source: update_comment.proto import sys _b=sys.version_info[0]<3", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3',", "_descriptor.FieldDescriptor( name='codeExplain', full_name='topboard.UpdateCommentResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None,", "as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as", "'DESCRIPTOR' : _UPDATECOMMENTREQUEST, '__module__' : 'update_comment_pb2' # @@protoc_insertion_point(class_scope:topboard.UpdateCommentRequest) }) _sym_db.RegisterMessage(UpdateCommentRequest) UpdateCommentResponseWrapper = _reflection.GeneratedProtocolMessageType('UpdateCommentResponseWrapper',", "fields=[ _descriptor.FieldDescriptor( name='code', full_name='topboard.UpdateCommentResponseWrapper.code', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None,", "}) _sym_db.RegisterMessage(UpdateCommentRequest) UpdateCommentResponseWrapper = _reflection.GeneratedProtocolMessageType('UpdateCommentResponseWrapper', (_message.Message,), { 'DESCRIPTOR' : _UPDATECOMMENTRESPONSEWRAPPER, '__module__' : 'update_comment_pb2'", "file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='topboard.UpdateCommentResponseWrapper.data', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None,", "file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='commentID', full_name='topboard.UpdateCommentRequest.commentID', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'),", "_descriptor.FieldDescriptor( name='code', full_name='topboard.UpdateCommentResponseWrapper.code', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None,", "UpdateCommentResponseWrapper = _reflection.GeneratedProtocolMessageType('UpdateCommentResponseWrapper', (_message.Message,), { 'DESCRIPTOR' : _UPDATECOMMENTRESPONSEWRAPPER, '__module__' : 'update_comment_pb2' # @@protoc_insertion_point(class_scope:topboard.UpdateCommentResponseWrapper)", "or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message", "_descriptor.FileDescriptor( name='update_comment.proto', package='topboard', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x14update_comment.proto\\x12\\x08topboard\\x1a)topboard_sdk/model/topboard/comment.proto\\x1a-topboard_sdk/model/topboard/issue_basic.proto\\x1a\\\"topboard_sdk/model/cmdb/user.proto\\\"M\\n\\x14UpdateCommentRequest\\x12\\x11\\n\\tcommentID\\x18\\x01 \\x01(\\t\\x12\\\"\\n\\x07\\x63omment\\x18\\x02 \\x01(\\x0b\\x32\\x11.topboard.Comment\\\"q\\n\\x1cUpdateCommentResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12\\x1f\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x11.topboard.Commentb\\x06proto3') ,", "has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[],", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='comment', full_name='topboard.UpdateCommentRequest.comment', index=1, number=2, type=11, cpp_type=10,", "# @@protoc_insertion_point(class_scope:topboard.UpdateCommentRequest) }) _sym_db.RegisterMessage(UpdateCommentRequest) UpdateCommentResponseWrapper = _reflection.GeneratedProtocolMessageType('UpdateCommentResponseWrapper', (_message.Message,), { 'DESCRIPTOR' : _UPDATECOMMENTRESPONSEWRAPPER, '__module__'", "extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=160, serialized_end=237,", "import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor", "], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=160, serialized_end=237, ) _UPDATECOMMENTRESPONSEWRAPPER = _descriptor.Descriptor(", "full_name='topboard.UpdateCommentRequest.comment', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='comment', full_name='topboard.UpdateCommentRequest.comment', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False,", "update_comment.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import", "_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor", "extension_ranges=[], oneofs=[ ], serialized_start=160, serialized_end=237, ) _UPDATECOMMENTRESPONSEWRAPPER = _descriptor.Descriptor( name='UpdateCommentResponseWrapper', full_name='topboard.UpdateCommentResponseWrapper', filename=None, file=DESCRIPTOR,", "full_name='topboard.UpdateCommentResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from topboard_sdk.model.topboard", "_descriptor.Descriptor( name='UpdateCommentResponseWrapper', full_name='topboard.UpdateCommentResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='topboard.UpdateCommentResponseWrapper.code', index=0, number=1, type=5,", "_UPDATECOMMENTRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) UpdateCommentRequest = _reflection.GeneratedProtocolMessageType('UpdateCommentRequest', (_message.Message,), { 'DESCRIPTOR' : _UPDATECOMMENTREQUEST, '__module__' : 'update_comment_pb2'", "full_name='topboard.UpdateCommentRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='commentID', full_name='topboard.UpdateCommentRequest.commentID', index=0, number=1, type=9, cpp_type=9, label=1,", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='topboard.UpdateCommentResponseWrapper.error', index=2, number=3, type=9, cpp_type=9, label=1,", "(lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf", "_descriptor.Descriptor( name='UpdateCommentRequest', full_name='topboard.UpdateCommentRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='commentID', full_name='topboard.UpdateCommentRequest.commentID', index=0, number=1, type=9,", ") _UPDATECOMMENTREQUEST.fields_by_name['comment'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT _UPDATECOMMENTRESPONSEWRAPPER.fields_by_name['data'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT DESCRIPTOR.message_types_by_name['UpdateCommentRequest'] = _UPDATECOMMENTREQUEST DESCRIPTOR.message_types_by_name['UpdateCommentResponseWrapper'] = _UPDATECOMMENTRESPONSEWRAPPER", "DESCRIPTOR = _descriptor.FileDescriptor( name='update_comment.proto', package='topboard', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x14update_comment.proto\\x12\\x08topboard\\x1a)topboard_sdk/model/topboard/comment.proto\\x1a-topboard_sdk/model/topboard/issue_basic.proto\\x1a\\\"topboard_sdk/model/cmdb/user.proto\\\"M\\n\\x14UpdateCommentRequest\\x12\\x11\\n\\tcommentID\\x18\\x01 \\x01(\\t\\x12\\\"\\n\\x07\\x63omment\\x18\\x02 \\x01(\\x0b\\x32\\x11.topboard.Comment\\\"q\\n\\x1cUpdateCommentResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12\\x1f\\n\\x04\\x64\\x61ta\\x18\\x04", "name='code', full_name='topboard.UpdateCommentResponseWrapper.code', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False,", "(_message.Message,), { 'DESCRIPTOR' : _UPDATECOMMENTRESPONSEWRAPPER, '__module__' : 'update_comment_pb2' # @@protoc_insertion_point(class_scope:topboard.UpdateCommentResponseWrapper) }) _sym_db.RegisterMessage(UpdateCommentResponseWrapper) #", "'__module__' : 'update_comment_pb2' # @@protoc_insertion_point(class_scope:topboard.UpdateCommentRequest) }) _sym_db.RegisterMessage(UpdateCommentRequest) UpdateCommentResponseWrapper = _reflection.GeneratedProtocolMessageType('UpdateCommentResponseWrapper', (_message.Message,), { 'DESCRIPTOR'", "\\x01(\\x0b\\x32\\x11.topboard.Commentb\\x06proto3') , dependencies=[topboard__sdk_dot_model_dot_topboard_dot_comment__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_cmdb_dot_user__pb2.DESCRIPTOR,]) _UPDATECOMMENTREQUEST = _descriptor.Descriptor( name='UpdateCommentRequest', full_name='topboard.UpdateCommentRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor(", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='topboard.UpdateCommentResponseWrapper.error', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False,", "as topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2 from topboard_sdk.model.cmdb import user_pb2 as topboard__sdk_dot_model_dot_cmdb_dot_user__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='update_comment.proto', package='topboard',", "default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='topboard.UpdateCommentResponseWrapper.data', index=3, number=4,", "UpdateCommentRequest = _reflection.GeneratedProtocolMessageType('UpdateCommentRequest', (_message.Message,), { 'DESCRIPTOR' : _UPDATECOMMENTREQUEST, '__module__' : 'update_comment_pb2' # @@protoc_insertion_point(class_scope:topboard.UpdateCommentRequest)", "extension_ranges=[], oneofs=[ ], serialized_start=239, serialized_end=352, ) _UPDATECOMMENTREQUEST.fields_by_name['comment'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT _UPDATECOMMENTRESPONSEWRAPPER.fields_by_name['data'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT DESCRIPTOR.message_types_by_name['UpdateCommentRequest']", ": 'update_comment_pb2' # @@protoc_insertion_point(class_scope:topboard.UpdateCommentRequest) }) _sym_db.RegisterMessage(UpdateCommentRequest) UpdateCommentResponseWrapper = _reflection.GeneratedProtocolMessageType('UpdateCommentResponseWrapper', (_message.Message,), { 'DESCRIPTOR' :", ", dependencies=[topboard__sdk_dot_model_dot_topboard_dot_comment__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_cmdb_dot_user__pb2.DESCRIPTOR,]) _UPDATECOMMENTREQUEST = _descriptor.Descriptor( name='UpdateCommentRequest', full_name='topboard.UpdateCommentRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='commentID',", "import issue_basic_pb2 as topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2 from topboard_sdk.model.cmdb import user_pb2 as topboard__sdk_dot_model_dot_cmdb_dot_user__pb2 DESCRIPTOR = _descriptor.FileDescriptor(", "containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='topboard.UpdateCommentResponseWrapper.code', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None,", "type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ],", "_reflection.GeneratedProtocolMessageType('UpdateCommentRequest', (_message.Message,), { 'DESCRIPTOR' : _UPDATECOMMENTREQUEST, '__module__' : 'update_comment_pb2' # @@protoc_insertion_point(class_scope:topboard.UpdateCommentRequest) }) _sym_db.RegisterMessage(UpdateCommentRequest)", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='topboard.UpdateCommentResponseWrapper.data', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None,", "_UPDATECOMMENTRESPONSEWRAPPER.fields_by_name['data'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT DESCRIPTOR.message_types_by_name['UpdateCommentRequest'] = _UPDATECOMMENTREQUEST DESCRIPTOR.message_types_by_name['UpdateCommentResponseWrapper'] = _UPDATECOMMENTRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) UpdateCommentRequest = _reflection.GeneratedProtocolMessageType('UpdateCommentRequest',", "google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf", "= _reflection.GeneratedProtocolMessageType('UpdateCommentResponseWrapper', (_message.Message,), { 'DESCRIPTOR' : _UPDATECOMMENTRESPONSEWRAPPER, '__module__' : 'update_comment_pb2' # @@protoc_insertion_point(class_scope:topboard.UpdateCommentResponseWrapper) })", "EDIT! # source: update_comment.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))", "sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as", "has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='comment', full_name='topboard.UpdateCommentRequest.comment', index=1,", "is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='topboard.UpdateCommentResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False,", "_descriptor.FieldDescriptor( name='data', full_name='topboard.UpdateCommentResponseWrapper.data', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None,", "= _descriptor.Descriptor( name='UpdateCommentResponseWrapper', full_name='topboard.UpdateCommentResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='topboard.UpdateCommentResponseWrapper.code', index=0, number=1,", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='comment', full_name='topboard.UpdateCommentRequest.comment', index=1, number=2, type=11, cpp_type=10, label=1,", "file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='topboard.UpdateCommentResponseWrapper.error', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None,", "serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='comment', full_name='topboard.UpdateCommentRequest.comment', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None,", "x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message", "file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='topboard.UpdateCommentResponseWrapper.code', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0,", "-*- # Generated by the protocol buffer compiler. DO NOT EDIT! # source:", "full_name='topboard.UpdateCommentResponseWrapper.data', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "topboard_sdk.model.cmdb import user_pb2 as topboard__sdk_dot_model_dot_cmdb_dot_user__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='update_comment.proto', package='topboard', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x14update_comment.proto\\x12\\x08topboard\\x1a)topboard_sdk/model/topboard/comment.proto\\x1a-topboard_sdk/model/topboard/issue_basic.proto\\x1a\\\"topboard_sdk/model/cmdb/user.proto\\\"M\\n\\x14UpdateCommentRequest\\x12\\x11\\n\\tcommentID\\x18\\x01", "utf-8 -*- # Generated by the protocol buffer compiler. DO NOT EDIT! #", "# -*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO", "= _reflection.GeneratedProtocolMessageType('UpdateCommentRequest', (_message.Message,), { 'DESCRIPTOR' : _UPDATECOMMENTREQUEST, '__module__' : 'update_comment_pb2' # @@protoc_insertion_point(class_scope:topboard.UpdateCommentRequest) })", "enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=160, serialized_end=237, ) _UPDATECOMMENTRESPONSEWRAPPER =", "serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=239, serialized_end=352, ) _UPDATECOMMENTREQUEST.fields_by_name['comment'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT _UPDATECOMMENTRESPONSEWRAPPER.fields_by_name['data'].message_type", "topboard_sdk.model.topboard import issue_basic_pb2 as topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2 from topboard_sdk.model.cmdb import user_pb2 as topboard__sdk_dot_model_dot_cmdb_dot_user__pb2 DESCRIPTOR =", "\\x01(\\x0b\\x32\\x11.topboard.Comment\\\"q\\n\\x1cUpdateCommentResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12\\x1f\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x11.topboard.Commentb\\x06proto3') , dependencies=[topboard__sdk_dot_model_dot_topboard_dot_comment__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_cmdb_dot_user__pb2.DESCRIPTOR,]) _UPDATECOMMENTREQUEST = _descriptor.Descriptor( name='UpdateCommentRequest', full_name='topboard.UpdateCommentRequest', filename=None,", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='comment', full_name='topboard.UpdateCommentRequest.comment', index=1, number=2, type=11,", "nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=160, serialized_end=237, ) _UPDATECOMMENTRESPONSEWRAPPER", "as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default()", "name='error', full_name='topboard.UpdateCommentResponseWrapper.error', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False,", "= topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT _UPDATECOMMENTRESPONSEWRAPPER.fields_by_name['data'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT DESCRIPTOR.message_types_by_name['UpdateCommentRequest'] = _UPDATECOMMENTREQUEST DESCRIPTOR.message_types_by_name['UpdateCommentResponseWrapper'] = _UPDATECOMMENTRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) UpdateCommentRequest", "file=DESCRIPTOR), _descriptor.FieldDescriptor( name='comment', full_name='topboard.UpdateCommentRequest.comment', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None,", "import user_pb2 as topboard__sdk_dot_model_dot_cmdb_dot_user__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='update_comment.proto', package='topboard', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x14update_comment.proto\\x12\\x08topboard\\x1a)topboard_sdk/model/topboard/comment.proto\\x1a-topboard_sdk/model/topboard/issue_basic.proto\\x1a\\\"topboard_sdk/model/cmdb/user.proto\\\"M\\n\\x14UpdateCommentRequest\\x12\\x11\\n\\tcommentID\\x18\\x01 \\x01(\\t\\x12\\\"\\n\\x07\\x63omment\\x18\\x02", "name='data', full_name='topboard.UpdateCommentResponseWrapper.data', index=3, number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False,", "import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from topboard_sdk.model.topboard import comment_pb2", "source: update_comment.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf", "-*- coding: utf-8 -*- # Generated by the protocol buffer compiler. DO NOT", "symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from topboard_sdk.model.topboard import comment_pb2 as", "topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2 from topboard_sdk.model.cmdb import user_pb2 as topboard__sdk_dot_model_dot_cmdb_dot_user__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='update_comment.proto', package='topboard', syntax='proto3',", "full_name='topboard.UpdateCommentResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='topboard.UpdateCommentResponseWrapper.code', index=0, number=1, type=5, cpp_type=1, label=1,", "cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain',", "user_pb2 as topboard__sdk_dot_model_dot_cmdb_dot_user__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='update_comment.proto', package='topboard', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x14update_comment.proto\\x12\\x08topboard\\x1a)topboard_sdk/model/topboard/comment.proto\\x1a-topboard_sdk/model/topboard/issue_basic.proto\\x1a\\\"topboard_sdk/model/cmdb/user.proto\\\"M\\n\\x14UpdateCommentRequest\\x12\\x11\\n\\tcommentID\\x18\\x01 \\x01(\\t\\x12\\\"\\n\\x07\\x63omment\\x18\\x02 \\x01(\\x0b\\x32\\x11.topboard.Comment\\\"q\\n\\x1cUpdateCommentResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01", "full_name='topboard.UpdateCommentResponseWrapper.error', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None,", "serialized_end=352, ) _UPDATECOMMENTREQUEST.fields_by_name['comment'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT _UPDATECOMMENTRESPONSEWRAPPER.fields_by_name['data'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT DESCRIPTOR.message_types_by_name['UpdateCommentRequest'] = _UPDATECOMMENTREQUEST DESCRIPTOR.message_types_by_name['UpdateCommentResponseWrapper'] =", "and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from", "from topboard_sdk.model.topboard import comment_pb2 as topboard__sdk_dot_model_dot_topboard_dot_comment__pb2 from topboard_sdk.model.topboard import issue_basic_pb2 as topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2 from", "serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=160, serialized_end=237, ) _UPDATECOMMENTRESPONSEWRAPPER = _descriptor.Descriptor( name='UpdateCommentResponseWrapper',", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='topboard.UpdateCommentResponseWrapper.data', index=3, number=4, type=11, cpp_type=10, label=1,", "number=4, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database", "index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='topboard.UpdateCommentResponseWrapper.error',", "has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='topboard.UpdateCommentResponseWrapper.codeExplain', index=1,", "serialized_options=None, serialized_pb=_b('\\n\\x14update_comment.proto\\x12\\x08topboard\\x1a)topboard_sdk/model/topboard/comment.proto\\x1a-topboard_sdk/model/topboard/issue_basic.proto\\x1a\\\"topboard_sdk/model/cmdb/user.proto\\\"M\\n\\x14UpdateCommentRequest\\x12\\x11\\n\\tcommentID\\x18\\x01 \\x01(\\t\\x12\\\"\\n\\x07\\x63omment\\x18\\x02 \\x01(\\x0b\\x32\\x11.topboard.Comment\\\"q\\n\\x1cUpdateCommentResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12\\x1f\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x11.topboard.Commentb\\x06proto3') , dependencies=[topboard__sdk_dot_model_dot_topboard_dot_comment__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_cmdb_dot_user__pb2.DESCRIPTOR,]) _UPDATECOMMENTREQUEST = _descriptor.Descriptor(", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ],", "_sym_db = _symbol_database.Default() from topboard_sdk.model.topboard import comment_pb2 as topboard__sdk_dot_model_dot_topboard_dot_comment__pb2 from topboard_sdk.model.topboard import issue_basic_pb2", "as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as", "_reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from", "descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection", "Generated by the protocol buffer compiler. DO NOT EDIT! # source: update_comment.proto import", "_descriptor.FieldDescriptor( name='comment', full_name='topboard.UpdateCommentRequest.comment', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None,", "_symbol_database # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from topboard_sdk.model.topboard import comment_pb2 as topboard__sdk_dot_model_dot_topboard_dot_comment__pb2 from", "protocol buffer compiler. DO NOT EDIT! # source: update_comment.proto import sys _b=sys.version_info[0]<3 and", "_sym_db.RegisterFileDescriptor(DESCRIPTOR) UpdateCommentRequest = _reflection.GeneratedProtocolMessageType('UpdateCommentRequest', (_message.Message,), { 'DESCRIPTOR' : _UPDATECOMMENTREQUEST, '__module__' : 'update_comment_pb2' #", "_symbol_database.Default() from topboard_sdk.model.topboard import comment_pb2 as topboard__sdk_dot_model_dot_topboard_dot_comment__pb2 from topboard_sdk.model.topboard import issue_basic_pb2 as topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2", "NOT EDIT! # source: update_comment.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda", "serialized_start=239, serialized_end=352, ) _UPDATECOMMENTREQUEST.fields_by_name['comment'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT _UPDATECOMMENTRESPONSEWRAPPER.fields_by_name['data'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT DESCRIPTOR.message_types_by_name['UpdateCommentRequest'] = _UPDATECOMMENTREQUEST DESCRIPTOR.message_types_by_name['UpdateCommentResponseWrapper']", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='topboard.UpdateCommentResponseWrapper.error', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'),", "(_message.Message,), { 'DESCRIPTOR' : _UPDATECOMMENTREQUEST, '__module__' : 'update_comment_pb2' # @@protoc_insertion_point(class_scope:topboard.UpdateCommentRequest) }) _sym_db.RegisterMessage(UpdateCommentRequest) UpdateCommentResponseWrapper", "is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=160, serialized_end=237, ) _UPDATECOMMENTRESPONSEWRAPPER = _descriptor.Descriptor( name='UpdateCommentResponseWrapper', full_name='topboard.UpdateCommentResponseWrapper',", "name='commentID', full_name='topboard.UpdateCommentRequest.commentID', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False,", "buffer compiler. DO NOT EDIT! # source: update_comment.proto import sys _b=sys.version_info[0]<3 and (lambda", "], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=160,", "serialized_start=160, serialized_end=237, ) _UPDATECOMMENTRESPONSEWRAPPER = _descriptor.Descriptor( name='UpdateCommentResponseWrapper', full_name='topboard.UpdateCommentResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor(", "_UPDATECOMMENTREQUEST DESCRIPTOR.message_types_by_name['UpdateCommentResponseWrapper'] = _UPDATECOMMENTRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) UpdateCommentRequest = _reflection.GeneratedProtocolMessageType('UpdateCommentRequest', (_message.Message,), { 'DESCRIPTOR' : _UPDATECOMMENTREQUEST,", "cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error',", "containing_type=None, fields=[ _descriptor.FieldDescriptor( name='commentID', full_name='topboard.UpdateCommentRequest.commentID', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None,", "], serialized_start=160, serialized_end=237, ) _UPDATECOMMENTRESPONSEWRAPPER = _descriptor.Descriptor( name='UpdateCommentResponseWrapper', full_name='topboard.UpdateCommentResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[", "from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from", "serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[", "serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='topboard.UpdateCommentResponseWrapper.error', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None,", "syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=160, serialized_end=237, ) _UPDATECOMMENTRESPONSEWRAPPER = _descriptor.Descriptor( name='UpdateCommentResponseWrapper', full_name='topboard.UpdateCommentResponseWrapper', filename=None,", "DESCRIPTOR.message_types_by_name['UpdateCommentRequest'] = _UPDATECOMMENTREQUEST DESCRIPTOR.message_types_by_name['UpdateCommentResponseWrapper'] = _UPDATECOMMENTRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) UpdateCommentRequest = _reflection.GeneratedProtocolMessageType('UpdateCommentRequest', (_message.Message,), { 'DESCRIPTOR'", "label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='topboard.UpdateCommentResponseWrapper.codeExplain',", "number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='topboard.UpdateCommentResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None,", "enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=239, serialized_end=352, ) _UPDATECOMMENTREQUEST.fields_by_name['comment'].message_type =", "topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT DESCRIPTOR.message_types_by_name['UpdateCommentRequest'] = _UPDATECOMMENTREQUEST DESCRIPTOR.message_types_by_name['UpdateCommentResponseWrapper'] = _UPDATECOMMENTRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) UpdateCommentRequest = _reflection.GeneratedProtocolMessageType('UpdateCommentRequest', (_message.Message,), {", "oneofs=[ ], serialized_start=160, serialized_end=237, ) _UPDATECOMMENTRESPONSEWRAPPER = _descriptor.Descriptor( name='UpdateCommentResponseWrapper', full_name='topboard.UpdateCommentResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None,", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='topboard.UpdateCommentResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9,", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False,", "], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=160, serialized_end=237, )", "'update_comment_pb2' # @@protoc_insertion_point(class_scope:topboard.UpdateCommentRequest) }) _sym_db.RegisterMessage(UpdateCommentRequest) UpdateCommentResponseWrapper = _reflection.GeneratedProtocolMessageType('UpdateCommentResponseWrapper', (_message.Message,), { 'DESCRIPTOR' : _UPDATECOMMENTRESPONSEWRAPPER,", "], serialized_start=239, serialized_end=352, ) _UPDATECOMMENTREQUEST.fields_by_name['comment'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT _UPDATECOMMENTRESPONSEWRAPPER.fields_by_name['data'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT DESCRIPTOR.message_types_by_name['UpdateCommentRequest'] = _UPDATECOMMENTREQUEST", "type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor(", "message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='topboard.UpdateCommentResponseWrapper.data', index=3, number=4, type=11,", "import comment_pb2 as topboard__sdk_dot_model_dot_topboard_dot_comment__pb2 from topboard_sdk.model.topboard import issue_basic_pb2 as topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2 from topboard_sdk.model.cmdb import", "= _UPDATECOMMENTREQUEST DESCRIPTOR.message_types_by_name['UpdateCommentResponseWrapper'] = _UPDATECOMMENTRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) UpdateCommentRequest = _reflection.GeneratedProtocolMessageType('UpdateCommentRequest', (_message.Message,), { 'DESCRIPTOR' :", "import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import", "extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=239, serialized_end=352,", "type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor(", "number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "name='codeExplain', full_name='topboard.UpdateCommentResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False,", "topboard_sdk.model.topboard import comment_pb2 as topboard__sdk_dot_model_dot_topboard_dot_comment__pb2 from topboard_sdk.model.topboard import issue_basic_pb2 as topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2 from topboard_sdk.model.cmdb", "issue_basic_pb2 as topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2 from topboard_sdk.model.cmdb import user_pb2 as topboard__sdk_dot_model_dot_cmdb_dot_user__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='update_comment.proto',", "\\x01(\\t\\x12\\x1f\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x11.topboard.Commentb\\x06proto3') , dependencies=[topboard__sdk_dot_model_dot_topboard_dot_comment__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_cmdb_dot_user__pb2.DESCRIPTOR,]) _UPDATECOMMENTREQUEST = _descriptor.Descriptor( name='UpdateCommentRequest', full_name='topboard.UpdateCommentRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[", "filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='topboard.UpdateCommentResponseWrapper.code', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False,", "from topboard_sdk.model.cmdb import user_pb2 as topboard__sdk_dot_model_dot_cmdb_dot_user__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='update_comment.proto', package='topboard', syntax='proto3', serialized_options=None,", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='topboard.UpdateCommentResponseWrapper.data', index=3, number=4, type=11, cpp_type=10,", "from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database #", "_descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection", "compiler. DO NOT EDIT! # source: update_comment.proto import sys _b=sys.version_info[0]<3 and (lambda x:x)", "# @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() from topboard_sdk.model.topboard import comment_pb2 as topboard__sdk_dot_model_dot_topboard_dot_comment__pb2 from topboard_sdk.model.topboard", "label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='comment', full_name='topboard.UpdateCommentRequest.comment',", "containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='topboard.UpdateCommentResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9, label=1,", "enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='error', full_name='topboard.UpdateCommentResponseWrapper.error', index=2, number=3, type=9, cpp_type=9,", "number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[ ],", "@@protoc_insertion_point(class_scope:topboard.UpdateCommentRequest) }) _sym_db.RegisterMessage(UpdateCommentRequest) UpdateCommentResponseWrapper = _reflection.GeneratedProtocolMessageType('UpdateCommentResponseWrapper', (_message.Message,), { 'DESCRIPTOR' : _UPDATECOMMENTRESPONSEWRAPPER, '__module__' :", "_UPDATECOMMENTREQUEST.fields_by_name['comment'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT _UPDATECOMMENTRESPONSEWRAPPER.fields_by_name['data'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT DESCRIPTOR.message_types_by_name['UpdateCommentRequest'] = _UPDATECOMMENTREQUEST DESCRIPTOR.message_types_by_name['UpdateCommentResponseWrapper'] = _UPDATECOMMENTRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR)", "default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='topboard.UpdateCommentResponseWrapper.codeExplain', index=1, number=2,", "_message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database", "name='UpdateCommentResponseWrapper', full_name='topboard.UpdateCommentResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='topboard.UpdateCommentResponseWrapper.code', index=0, number=1, type=5, cpp_type=1,", "= topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT DESCRIPTOR.message_types_by_name['UpdateCommentRequest'] = _UPDATECOMMENTREQUEST DESCRIPTOR.message_types_by_name['UpdateCommentResponseWrapper'] = _UPDATECOMMENTRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) UpdateCommentRequest = _reflection.GeneratedProtocolMessageType('UpdateCommentRequest', (_message.Message,),", "package='topboard', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x14update_comment.proto\\x12\\x08topboard\\x1a)topboard_sdk/model/topboard/comment.proto\\x1a-topboard_sdk/model/topboard/issue_basic.proto\\x1a\\\"topboard_sdk/model/cmdb/user.proto\\\"M\\n\\x14UpdateCommentRequest\\x12\\x11\\n\\tcommentID\\x18\\x01 \\x01(\\t\\x12\\\"\\n\\x07\\x63omment\\x18\\x02 \\x01(\\x0b\\x32\\x11.topboard.Comment\\\"q\\n\\x1cUpdateCommentResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12\\x1f\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x11.topboard.Commentb\\x06proto3') , dependencies=[topboard__sdk_dot_model_dot_topboard_dot_comment__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_cmdb_dot_user__pb2.DESCRIPTOR,]) _UPDATECOMMENTREQUEST", "= _descriptor.FileDescriptor( name='update_comment.proto', package='topboard', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x14update_comment.proto\\x12\\x08topboard\\x1a)topboard_sdk/model/topboard/comment.proto\\x1a-topboard_sdk/model/topboard/issue_basic.proto\\x1a\\\"topboard_sdk/model/cmdb/user.proto\\\"M\\n\\x14UpdateCommentRequest\\x12\\x11\\n\\tcommentID\\x18\\x01 \\x01(\\t\\x12\\\"\\n\\x07\\x63omment\\x18\\x02 \\x01(\\x0b\\x32\\x11.topboard.Comment\\\"q\\n\\x1cUpdateCommentResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12\\x1f\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x11.topboard.Commentb\\x06proto3')", "cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='comment',", "_sym_db.RegisterMessage(UpdateCommentRequest) UpdateCommentResponseWrapper = _reflection.GeneratedProtocolMessageType('UpdateCommentResponseWrapper', (_message.Message,), { 'DESCRIPTOR' : _UPDATECOMMENTRESPONSEWRAPPER, '__module__' : 'update_comment_pb2' #", "# source: update_comment.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from", "by the protocol buffer compiler. DO NOT EDIT! # source: update_comment.proto import sys", "cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), ], extensions=[", "default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='comment', full_name='topboard.UpdateCommentRequest.comment', index=1, number=2,", "= _UPDATECOMMENTRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) UpdateCommentRequest = _reflection.GeneratedProtocolMessageType('UpdateCommentRequest', (_message.Message,), { 'DESCRIPTOR' : _UPDATECOMMENTREQUEST, '__module__' :", "extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='codeExplain', full_name='topboard.UpdateCommentResponseWrapper.codeExplain', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'),", "], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=239, serialized_end=352, ) _UPDATECOMMENTREQUEST.fields_by_name['comment'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT", "name='comment', full_name='topboard.UpdateCommentRequest.comment', index=1, number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False,", "google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database # @@protoc_insertion_point(imports)", "\\x01(\\t\\x12\\\"\\n\\x07\\x63omment\\x18\\x02 \\x01(\\x0b\\x32\\x11.topboard.Comment\\\"q\\n\\x1cUpdateCommentResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12\\x1f\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x11.topboard.Commentb\\x06proto3') , dependencies=[topboard__sdk_dot_model_dot_topboard_dot_comment__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_cmdb_dot_user__pb2.DESCRIPTOR,]) _UPDATECOMMENTREQUEST = _descriptor.Descriptor( name='UpdateCommentRequest', full_name='topboard.UpdateCommentRequest',", "number=2, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR),", "import message as _message from google.protobuf import reflection as _reflection from google.protobuf import", "comment_pb2 as topboard__sdk_dot_model_dot_topboard_dot_comment__pb2 from topboard_sdk.model.topboard import issue_basic_pb2 as topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2 from topboard_sdk.model.cmdb import user_pb2", ") _UPDATECOMMENTRESPONSEWRAPPER = _descriptor.Descriptor( name='UpdateCommentResponseWrapper', full_name='topboard.UpdateCommentResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='topboard.UpdateCommentResponseWrapper.code',", "topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT _UPDATECOMMENTRESPONSEWRAPPER.fields_by_name['data'].message_type = topboard__sdk_dot_model_dot_topboard_dot_comment__pb2._COMMENT DESCRIPTOR.message_types_by_name['UpdateCommentRequest'] = _UPDATECOMMENTREQUEST DESCRIPTOR.message_types_by_name['UpdateCommentResponseWrapper'] = _UPDATECOMMENTRESPONSEWRAPPER _sym_db.RegisterFileDescriptor(DESCRIPTOR) UpdateCommentRequest =", "topboard__sdk_dot_model_dot_cmdb_dot_user__pb2 DESCRIPTOR = _descriptor.FileDescriptor( name='update_comment.proto', package='topboard', syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x14update_comment.proto\\x12\\x08topboard\\x1a)topboard_sdk/model/topboard/comment.proto\\x1a-topboard_sdk/model/topboard/issue_basic.proto\\x1a\\\"topboard_sdk/model/cmdb/user.proto\\\"M\\n\\x14UpdateCommentRequest\\x12\\x11\\n\\tcommentID\\x18\\x01 \\x01(\\t\\x12\\\"\\n\\x07\\x63omment\\x18\\x02 \\x01(\\x0b\\x32\\x11.topboard.Comment\\\"q\\n\\x1cUpdateCommentResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03", "\\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12\\x1f\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x11.topboard.Commentb\\x06proto3') , dependencies=[topboard__sdk_dot_model_dot_topboard_dot_comment__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_cmdb_dot_user__pb2.DESCRIPTOR,]) _UPDATECOMMENTREQUEST = _descriptor.Descriptor( name='UpdateCommentRequest', full_name='topboard.UpdateCommentRequest', filename=None, file=DESCRIPTOR, containing_type=None,", "_UPDATECOMMENTREQUEST = _descriptor.Descriptor( name='UpdateCommentRequest', full_name='topboard.UpdateCommentRequest', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='commentID', full_name='topboard.UpdateCommentRequest.commentID', index=0,", "index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "DO NOT EDIT! # source: update_comment.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or", "_UPDATECOMMENTRESPONSEWRAPPER = _descriptor.Descriptor( name='UpdateCommentResponseWrapper', full_name='topboard.UpdateCommentResponseWrapper', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='code', full_name='topboard.UpdateCommentResponseWrapper.code', index=0,", "index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=239, serialized_end=352, )", "], extensions=[ ], nested_types=[], enum_types=[ ], serialized_options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=239,", "= _symbol_database.Default() from topboard_sdk.model.topboard import comment_pb2 as topboard__sdk_dot_model_dot_topboard_dot_comment__pb2 from topboard_sdk.model.topboard import issue_basic_pb2 as", "label=1, has_default_value=False, default_value=_b(\"\").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None, file=DESCRIPTOR), _descriptor.FieldDescriptor( name='data', full_name='topboard.UpdateCommentResponseWrapper.data',", "topboard__sdk_dot_model_dot_topboard_dot_comment__pb2 from topboard_sdk.model.topboard import issue_basic_pb2 as topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2 from topboard_sdk.model.cmdb import user_pb2 as topboard__sdk_dot_model_dot_cmdb_dot_user__pb2", "index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, serialized_options=None,", "syntax='proto3', serialized_options=None, serialized_pb=_b('\\n\\x14update_comment.proto\\x12\\x08topboard\\x1a)topboard_sdk/model/topboard/comment.proto\\x1a-topboard_sdk/model/topboard/issue_basic.proto\\x1a\\\"topboard_sdk/model/cmdb/user.proto\\\"M\\n\\x14UpdateCommentRequest\\x12\\x11\\n\\tcommentID\\x18\\x01 \\x01(\\t\\x12\\\"\\n\\x07\\x63omment\\x18\\x02 \\x01(\\x0b\\x32\\x11.topboard.Comment\\\"q\\n\\x1cUpdateCommentResponseWrapper\\x12\\x0c\\n\\x04\\x63ode\\x18\\x01 \\x01(\\x05\\x12\\x13\\n\\x0b\\x63odeExplain\\x18\\x02 \\x01(\\t\\x12\\r\\n\\x05\\x65rror\\x18\\x03 \\x01(\\t\\x12\\x1f\\n\\x04\\x64\\x61ta\\x18\\x04 \\x01(\\x0b\\x32\\x11.topboard.Commentb\\x06proto3') , dependencies=[topboard__sdk_dot_model_dot_topboard_dot_comment__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_topboard_dot_issue__basic__pb2.DESCRIPTOR,topboard__sdk_dot_model_dot_cmdb_dot_user__pb2.DESCRIPTOR,]) _UPDATECOMMENTREQUEST =" ]
[ "from setuptools import setup import numpy setup( name='CIGAN', version='0.2dev', packages=['vpa'], license='MIT License', include_dirs=[numpy.get_include(),],", "setuptools import setup import numpy setup( name='CIGAN', version='0.2dev', packages=['vpa'], license='MIT License', include_dirs=[numpy.get_include(),], )" ]
[ "plt.subplot(1, nRes, j+1) ] for j in range(nRes): RE = REs[j] # read", "axes[j].set_xlabel(r'$\\frac{\\log E(k) - \\mu_{\\log E(k)}}{\\sigma_{\\log E(k)}}$') for j in range(1,nRes): axes[j].set_yticklabels([]) #axes[0].legend(lines, labels,", "'#a6cee3', '#b2df8a', '#fb9a99', '#fdbf6f', '#cab2d6', '#ffff99'] colors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00',", "'#33a02c', '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6', '#6a3d9a', '#ffff99', '#b15928'] #colors = ['#abd9e9', '#74add1',", "tokens[i]) runData = getAllData(dirn, eps, nu, nBins, fSkip=1) logE = np.log(runData['spectra']) avgLogSpec =", "RE = REs[j] # read target file logSpectra, logEnStdev, _, _ = readAllSpectra(target,", "#axes[j].set_xscale(\"log\") axes[j].set_ylim([1, 15]) axes[j].grid() axes[j].set_xlabel(r'$\\frac{\\log E(k) - \\mu_{\\log E(k)}}{\\sigma_{\\log E(k)}}$') for j in", "color=colors[i]) #p = axes[j].plot(LL, modes, color=colors[i]) if j == 0: lines += [p]", "_ = readAllSpectra(target, [RE]) for i in range(len(tokens)): eps, nu = epsNuFromRe(RE) dirn", "nBins = 2 * 16//2 - 1 modes = np.arange(1, nBins+1, dtype=np.float64) #", "j in range(nRes): axes += [ plt.subplot(1, nRes, j+1) ] for j in", "labels, bbox_to_anchor=(0.5, 0.5)) axes[0].legend(bbox_to_anchor=(0.5, 0.5)) plt.tight_layout() plt.show() #axes[0].legend(loc='lower left') if __name__ == '__main__':", "np.arange(1, nBins+1, dtype=np.float64) # assumes box is 2 pi plt.figure() #REs = findAllParams(path)", "#axes[0].legend(lines, labels, bbox_to_anchor=(-0.1, 2.5), borderaxespad=0) assert(len(lines) == len(labels)) #axes[0].legend(lines, labels, bbox_to_anchor=(0.5, 0.5)) axes[0].legend(bbox_to_anchor=(0.5,", "color=colors[i]) if j == 0: lines += [p] #stdLogSpec = np.std(logE, axis=0) #covLogSpec", "'#1a9850', '#66bd63', '#a6d96a', '#d9ef8b', '#fee08b', '#fdae61', '#f46d43', '#d73027', '#a50026', '#8e0152', '#c51b7d', '#de77ae', '#f1b6da']", "parser.add_argument('--runspath', help=\"Plot labels to assiciate to tokens\") args = parser.parse_args() assert(len(args.tokens) == len(args.labels))", "nargs='+', type=int, help=\"Reynolds numbers\") parser.add_argument('--labels', nargs='+', help=\"Plot labels to assiciate to tokens\") parser.add_argument('--runspath',", "== nBins) LL = (avgLogSpec.ravel() - logSpectra.ravel()) / logEnStdev.ravel() print(LL.shape) p = axes[j].plot(LL,", "'#f1b6da'] def findDirectory(runspath, re, token): retoken = 'RE%03d' % re alldirs = glob.glob(runspath", "np.std(logE, axis=0) #covLogSpec = np.cov(logE, rowvar=False) #print(covLogSpec.shape) axes[0].set_ylabel(r'$k$') for j in range(nRes): axes[j].set_title(r'$Re_\\lambda$", "#print(covLogSpec.shape) axes[0].set_ylabel(r'$k$') for j in range(nRes): axes[j].set_title(r'$Re_\\lambda$ = %d' % REs[j]) #axes[j].set_xscale(\"log\") axes[j].set_ylim([1,", "numbers\") parser.add_argument('--labels', nargs='+', help=\"Plot labels to assiciate to tokens\") parser.add_argument('--runspath', help=\"Plot labels to", "#colors = ['#abd9e9', '#74add1', '#4575b4', '#313695', '#006837', '#1a9850', '#66bd63', '#a6d96a', '#d9ef8b', '#fee08b', '#fdae61',", "dirn assert(False, 're-token combo not found') def main_integral(runspath, target, REs, tokens, labels): nBins", "'#ffff99'] colors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628', '#f781bf', '#999999'] #colors", "= %d' % REs[j]) #axes[j].set_xscale(\"log\") axes[j].set_ylim([1, 15]) axes[j].grid() axes[j].set_xlabel(r'$\\frac{\\log E(k) - \\mu_{\\log E(k)}}{\\sigma_{\\log", "len(labels)) #axes[0].legend(lines, labels, bbox_to_anchor=(0.5, 0.5)) axes[0].legend(bbox_to_anchor=(0.5, 0.5)) plt.tight_layout() plt.show() #axes[0].legend(loc='lower left') if __name__", "to assiciate to tokens\") parser.add_argument('--runspath', help=\"Plot labels to assiciate to tokens\") args =", "np.mean(logE, axis=0) assert(avgLogSpec.size == nBins) LL = (avgLogSpec.ravel() - logSpectra.ravel()) / logEnStdev.ravel() print(LL.shape)", "directory\") parser.add_argument('--tokens', nargs='+', help=\"Text token distinguishing each series of runs\") parser.add_argument('--res', nargs='+', type=int,", "to tokens\") parser.add_argument('--runspath', help=\"Plot labels to assiciate to tokens\") args = parser.parse_args() assert(len(args.tokens)", "= argparse.ArgumentParser( description = \"Compute a target file for RL agent from DNS", "np.cov(logE, rowvar=False) #print(covLogSpec.shape) axes[0].set_ylabel(r'$k$') for j in range(nRes): axes[j].set_title(r'$Re_\\lambda$ = %d' % REs[j])", "to tokens\") args = parser.parse_args() assert(len(args.tokens) == len(args.labels)) main_integral(args.runspath, args.target, args.res, args.tokens, args.labels)", "target files directory\") parser.add_argument('--tokens', nargs='+', help=\"Text token distinguishing each series of runs\") parser.add_argument('--res',", "glob, os #from sklearn.neighbors.kde import KernelDensity import matplotlib.pyplot as plt from extractTargetFilesNonDim import", "labels to assiciate to tokens\") args = parser.parse_args() assert(len(args.tokens) == len(args.labels)) main_integral(args.runspath, args.target,", "files directory\") parser.add_argument('--tokens', nargs='+', help=\"Text token distinguishing each series of runs\") parser.add_argument('--res', nargs='+',", "pi plt.figure() #REs = findAllParams(path) nRes = len(REs) axes, lines = [], []", "not in dirn: continue if token not in dirn: continue return dirn assert(False,", "runs\") parser.add_argument('--res', nargs='+', type=int, help=\"Reynolds numbers\") parser.add_argument('--labels', nargs='+', help=\"Plot labels to assiciate to", "= (avgLogSpec.ravel() - logSpectra.ravel()) / logEnStdev.ravel() print(LL.shape) p = axes[j].plot(LL, modes, label=labels[i], color=colors[i])", "'#a6d96a', '#d9ef8b', '#fee08b', '#fdae61', '#f46d43', '#d73027', '#a50026', '#8e0152', '#c51b7d', '#de77ae', '#f1b6da'] def findDirectory(runspath,", "logE = np.log(runData['spectra']) avgLogSpec = np.mean(logE, axis=0) assert(avgLogSpec.size == nBins) LL = (avgLogSpec.ravel()", "np.log(runData['spectra']) avgLogSpec = np.mean(logE, axis=0) assert(avgLogSpec.size == nBins) LL = (avgLogSpec.ravel() - logSpectra.ravel())", "extractTargetFilesNonDim import epsNuFromRe from extractTargetFilesNonDim import getAllData from computeSpectraNonDim import readAllSpectra colors =", "for j in range(1,nRes): axes[j].set_yticklabels([]) #axes[0].legend(lines, labels, bbox_to_anchor=(-0.1, 2.5), borderaxespad=0) assert(len(lines) == len(labels))", "'#74add1', '#4575b4', '#313695', '#006837', '#1a9850', '#66bd63', '#a6d96a', '#d9ef8b', '#fee08b', '#fdae61', '#f46d43', '#d73027', '#a50026',", "\"Compute a target file for RL agent from DNS data.\") parser.add_argument('--target', help=\"Path to", "'#ffff99', '#b15928'] #colors = ['#abd9e9', '#74add1', '#4575b4', '#313695', '#006837', '#1a9850', '#66bd63', '#a6d96a', '#d9ef8b',", "REs[j]) #axes[j].set_xscale(\"log\") axes[j].set_ylim([1, 15]) axes[j].grid() axes[j].set_xlabel(r'$\\frac{\\log E(k) - \\mu_{\\log E(k)}}{\\sigma_{\\log E(k)}}$') for j", "found') def main_integral(runspath, target, REs, tokens, labels): nBins = 2 * 16//2 -", "to target files directory\") parser.add_argument('--tokens', nargs='+', help=\"Text token distinguishing each series of runs\")", "assiciate to tokens\") args = parser.parse_args() assert(len(args.tokens) == len(args.labels)) main_integral(args.runspath, args.target, args.res, args.tokens,", "tokens, labels): nBins = 2 * 16//2 - 1 modes = np.arange(1, nBins+1,", "'#b15928'] #colors = ['#abd9e9', '#74add1', '#4575b4', '#313695', '#006837', '#1a9850', '#66bd63', '#a6d96a', '#d9ef8b', '#fee08b',", "__name__ == '__main__': parser = argparse.ArgumentParser( description = \"Compute a target file for", "getAllData(dirn, eps, nu, nBins, fSkip=1) logE = np.log(runData['spectra']) avgLogSpec = np.mean(logE, axis=0) assert(avgLogSpec.size", "axes[0].legend(bbox_to_anchor=(0.5, 0.5)) plt.tight_layout() plt.show() #axes[0].legend(loc='lower left') if __name__ == '__main__': parser = argparse.ArgumentParser(", "python3 import re, argparse, numpy as np, glob, os #from sklearn.neighbors.kde import KernelDensity", "eps, nu, nBins, fSkip=1) logE = np.log(runData['spectra']) avgLogSpec = np.mean(logE, axis=0) assert(avgLogSpec.size ==", "assumes box is 2 pi plt.figure() #REs = findAllParams(path) nRes = len(REs) axes,", "re alldirs = glob.glob(runspath + '/*') for dirn in alldirs: if retoken not", "logEnStdev.ravel() print(LL.shape) p = axes[j].plot(LL, modes, label=labels[i], color=colors[i]) #p = axes[j].plot(LL, modes, color=colors[i])", "= findAllParams(path) nRes = len(REs) axes, lines = [], [] for j in", "= REs[j] # read target file logSpectra, logEnStdev, _, _ = readAllSpectra(target, [RE])", "colors = ['#1f78b4', '#33a02c', '#e31a1c', '#ff7f00', '#6a3d9a', '#b15928', '#a6cee3', '#b2df8a', '#fb9a99', '#fdbf6f', '#cab2d6',", "from DNS data.\") parser.add_argument('--target', help=\"Path to target files directory\") parser.add_argument('--tokens', nargs='+', help=\"Text token", "assert(avgLogSpec.size == nBins) LL = (avgLogSpec.ravel() - logSpectra.ravel()) / logEnStdev.ravel() print(LL.shape) p =", "= np.arange(1, nBins+1, dtype=np.float64) # assumes box is 2 pi plt.figure() #REs =", "axes[j].plot(LL, modes, color=colors[i]) if j == 0: lines += [p] #stdLogSpec = np.std(logE,", "[p] #stdLogSpec = np.std(logE, axis=0) #covLogSpec = np.cov(logE, rowvar=False) #print(covLogSpec.shape) axes[0].set_ylabel(r'$k$') for j", "#colors = ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6', '#6a3d9a', '#ffff99',", "range(1,nRes): axes[j].set_yticklabels([]) #axes[0].legend(lines, labels, bbox_to_anchor=(-0.1, 2.5), borderaxespad=0) assert(len(lines) == len(labels)) #axes[0].legend(lines, labels, bbox_to_anchor=(0.5,", "not in dirn: continue return dirn assert(False, 're-token combo not found') def main_integral(runspath,", "'#a50026', '#8e0152', '#c51b7d', '#de77ae', '#f1b6da'] def findDirectory(runspath, re, token): retoken = 'RE%03d' %", "argparse.ArgumentParser( description = \"Compute a target file for RL agent from DNS data.\")", "retoken = 'RE%03d' % re alldirs = glob.glob(runspath + '/*') for dirn in", "axes += [ plt.subplot(1, nRes, j+1) ] for j in range(nRes): RE =", "in range(nRes): axes[j].set_title(r'$Re_\\lambda$ = %d' % REs[j]) #axes[j].set_xscale(\"log\") axes[j].set_ylim([1, 15]) axes[j].grid() axes[j].set_xlabel(r'$\\frac{\\log E(k)", "import matplotlib.pyplot as plt from extractTargetFilesNonDim import epsNuFromRe from extractTargetFilesNonDim import getAllData from", "labels to assiciate to tokens\") parser.add_argument('--runspath', help=\"Plot labels to assiciate to tokens\") args", "RE, tokens[i]) runData = getAllData(dirn, eps, nu, nBins, fSkip=1) logE = np.log(runData['spectra']) avgLogSpec", "(avgLogSpec.ravel() - logSpectra.ravel()) / logEnStdev.ravel() print(LL.shape) p = axes[j].plot(LL, modes, label=labels[i], color=colors[i]) #p", "'#d73027', '#a50026', '#8e0152', '#c51b7d', '#de77ae', '#f1b6da'] def findDirectory(runspath, re, token): retoken = 'RE%03d'", "if retoken not in dirn: continue if token not in dirn: continue return", "= getAllData(dirn, eps, nu, nBins, fSkip=1) logE = np.log(runData['spectra']) avgLogSpec = np.mean(logE, axis=0)", "= ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628', '#f781bf', '#999999'] #colors = ['#a6cee3',", "range(len(tokens)): eps, nu = epsNuFromRe(RE) dirn = findDirectory(runspath, RE, tokens[i]) runData = getAllData(dirn,", "eps, nu = epsNuFromRe(RE) dirn = findDirectory(runspath, RE, tokens[i]) runData = getAllData(dirn, eps,", "REs, tokens, labels): nBins = 2 * 16//2 - 1 modes = np.arange(1,", "'#8e0152', '#c51b7d', '#de77ae', '#f1b6da'] def findDirectory(runspath, re, token): retoken = 'RE%03d' % re", "'#6a3d9a', '#ffff99', '#b15928'] #colors = ['#abd9e9', '#74add1', '#4575b4', '#313695', '#006837', '#1a9850', '#66bd63', '#a6d96a',", "readAllSpectra colors = ['#1f78b4', '#33a02c', '#e31a1c', '#ff7f00', '#6a3d9a', '#b15928', '#a6cee3', '#b2df8a', '#fb9a99', '#fdbf6f',", "_, _ = readAllSpectra(target, [RE]) for i in range(len(tokens)): eps, nu = epsNuFromRe(RE)", "np, glob, os #from sklearn.neighbors.kde import KernelDensity import matplotlib.pyplot as plt from extractTargetFilesNonDim", "'#fdae61', '#f46d43', '#d73027', '#a50026', '#8e0152', '#c51b7d', '#de77ae', '#f1b6da'] def findDirectory(runspath, re, token): retoken", "= np.std(logE, axis=0) #covLogSpec = np.cov(logE, rowvar=False) #print(covLogSpec.shape) axes[0].set_ylabel(r'$k$') for j in range(nRes):", "continue if token not in dirn: continue return dirn assert(False, 're-token combo not", "] for j in range(nRes): RE = REs[j] # read target file logSpectra,", "'#ff7f00', '#cab2d6', '#6a3d9a', '#ffff99', '#b15928'] #colors = ['#abd9e9', '#74add1', '#4575b4', '#313695', '#006837', '#1a9850',", "[ plt.subplot(1, nRes, j+1) ] for j in range(nRes): RE = REs[j] #", "+= [ plt.subplot(1, nRes, j+1) ] for j in range(nRes): RE = REs[j]", "rowvar=False) #print(covLogSpec.shape) axes[0].set_ylabel(r'$k$') for j in range(nRes): axes[j].set_title(r'$Re_\\lambda$ = %d' % REs[j]) #axes[j].set_xscale(\"log\")", "0: lines += [p] #stdLogSpec = np.std(logE, axis=0) #covLogSpec = np.cov(logE, rowvar=False) #print(covLogSpec.shape)", "lines = [], [] for j in range(nRes): axes += [ plt.subplot(1, nRes,", "left') if __name__ == '__main__': parser = argparse.ArgumentParser( description = \"Compute a target", "'#c51b7d', '#de77ae', '#f1b6da'] def findDirectory(runspath, re, token): retoken = 'RE%03d' % re alldirs", "import readAllSpectra colors = ['#1f78b4', '#33a02c', '#e31a1c', '#ff7f00', '#6a3d9a', '#b15928', '#a6cee3', '#b2df8a', '#fb9a99',", "nBins+1, dtype=np.float64) # assumes box is 2 pi plt.figure() #REs = findAllParams(path) nRes", "'#fee08b', '#fdae61', '#f46d43', '#d73027', '#a50026', '#8e0152', '#c51b7d', '#de77ae', '#f1b6da'] def findDirectory(runspath, re, token):", "return dirn assert(False, 're-token combo not found') def main_integral(runspath, target, REs, tokens, labels):", "'#b2df8a', '#fb9a99', '#fdbf6f', '#cab2d6', '#ffff99'] colors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33',", "for j in range(nRes): axes[j].set_title(r'$Re_\\lambda$ = %d' % REs[j]) #axes[j].set_xscale(\"log\") axes[j].set_ylim([1, 15]) axes[j].grid()", "box is 2 pi plt.figure() #REs = findAllParams(path) nRes = len(REs) axes, lines", "continue return dirn assert(False, 're-token combo not found') def main_integral(runspath, target, REs, tokens,", "nu = epsNuFromRe(RE) dirn = findDirectory(runspath, RE, tokens[i]) runData = getAllData(dirn, eps, nu,", "help=\"Path to target files directory\") parser.add_argument('--tokens', nargs='+', help=\"Text token distinguishing each series of", "dirn: continue return dirn assert(False, 're-token combo not found') def main_integral(runspath, target, REs,", "plt from extractTargetFilesNonDim import epsNuFromRe from extractTargetFilesNonDim import getAllData from computeSpectraNonDim import readAllSpectra", "'#1f78b4', '#b2df8a', '#33a02c', '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6', '#6a3d9a', '#ffff99', '#b15928'] #colors =", "re, argparse, numpy as np, glob, os #from sklearn.neighbors.kde import KernelDensity import matplotlib.pyplot", "j == 0: lines += [p] #stdLogSpec = np.std(logE, axis=0) #covLogSpec = np.cov(logE,", "token distinguishing each series of runs\") parser.add_argument('--res', nargs='+', type=int, help=\"Reynolds numbers\") parser.add_argument('--labels', nargs='+',", "= 2 * 16//2 - 1 modes = np.arange(1, nBins+1, dtype=np.float64) # assumes", "'#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6', '#6a3d9a', '#ffff99', '#b15928'] #colors = ['#abd9e9', '#74add1', '#4575b4', '#313695',", "'#fdbf6f', '#cab2d6', '#ffff99'] colors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628', '#f781bf',", "j in range(1,nRes): axes[j].set_yticklabels([]) #axes[0].legend(lines, labels, bbox_to_anchor=(-0.1, 2.5), borderaxespad=0) assert(len(lines) == len(labels)) #axes[0].legend(lines,", "from extractTargetFilesNonDim import getAllData from computeSpectraNonDim import readAllSpectra colors = ['#1f78b4', '#33a02c', '#e31a1c',", "type=int, help=\"Reynolds numbers\") parser.add_argument('--labels', nargs='+', help=\"Plot labels to assiciate to tokens\") parser.add_argument('--runspath', help=\"Plot", "nargs='+', help=\"Text token distinguishing each series of runs\") parser.add_argument('--res', nargs='+', type=int, help=\"Reynolds numbers\")", "REs[j] # read target file logSpectra, logEnStdev, _, _ = readAllSpectra(target, [RE]) for", "in dirn: continue if token not in dirn: continue return dirn assert(False, 're-token", "= 'RE%03d' % re alldirs = glob.glob(runspath + '/*') for dirn in alldirs:", "#REs = findAllParams(path) nRes = len(REs) axes, lines = [], [] for j", "alldirs: if retoken not in dirn: continue if token not in dirn: continue", "as plt from extractTargetFilesNonDim import epsNuFromRe from extractTargetFilesNonDim import getAllData from computeSpectraNonDim import", "nBins, fSkip=1) logE = np.log(runData['spectra']) avgLogSpec = np.mean(logE, axis=0) assert(avgLogSpec.size == nBins) LL", "lines += [p] #stdLogSpec = np.std(logE, axis=0) #covLogSpec = np.cov(logE, rowvar=False) #print(covLogSpec.shape) axes[0].set_ylabel(r'$k$')", "alldirs = glob.glob(runspath + '/*') for dirn in alldirs: if retoken not in", "'#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628', '#f781bf', '#999999'] #colors = ['#a6cee3', '#1f78b4', '#b2df8a',", "- logSpectra.ravel()) / logEnStdev.ravel() print(LL.shape) p = axes[j].plot(LL, modes, label=labels[i], color=colors[i]) #p =", "numpy as np, glob, os #from sklearn.neighbors.kde import KernelDensity import matplotlib.pyplot as plt", "'#984ea3', '#ff7f00', '#ffff33', '#a65628', '#f781bf', '#999999'] #colors = ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', '#fb9a99',", "axes[j].set_yticklabels([]) #axes[0].legend(lines, labels, bbox_to_anchor=(-0.1, 2.5), borderaxespad=0) assert(len(lines) == len(labels)) #axes[0].legend(lines, labels, bbox_to_anchor=(0.5, 0.5))", "findDirectory(runspath, re, token): retoken = 'RE%03d' % re alldirs = glob.glob(runspath + '/*')", "sklearn.neighbors.kde import KernelDensity import matplotlib.pyplot as plt from extractTargetFilesNonDim import epsNuFromRe from extractTargetFilesNonDim", "len(REs) axes, lines = [], [] for j in range(nRes): axes += [", "axes, lines = [], [] for j in range(nRes): axes += [ plt.subplot(1,", "distinguishing each series of runs\") parser.add_argument('--res', nargs='+', type=int, help=\"Reynolds numbers\") parser.add_argument('--labels', nargs='+', help=\"Plot", "nRes = len(REs) axes, lines = [], [] for j in range(nRes): axes", "labels, bbox_to_anchor=(-0.1, 2.5), borderaxespad=0) assert(len(lines) == len(labels)) #axes[0].legend(lines, labels, bbox_to_anchor=(0.5, 0.5)) axes[0].legend(bbox_to_anchor=(0.5, 0.5))", "RL agent from DNS data.\") parser.add_argument('--target', help=\"Path to target files directory\") parser.add_argument('--tokens', nargs='+',", "= np.mean(logE, axis=0) assert(avgLogSpec.size == nBins) LL = (avgLogSpec.ravel() - logSpectra.ravel()) / logEnStdev.ravel()", "'#ff7f00', '#6a3d9a', '#b15928', '#a6cee3', '#b2df8a', '#fb9a99', '#fdbf6f', '#cab2d6', '#ffff99'] colors = ['#e41a1c', '#377eb8',", "borderaxespad=0) assert(len(lines) == len(labels)) #axes[0].legend(lines, labels, bbox_to_anchor=(0.5, 0.5)) axes[0].legend(bbox_to_anchor=(0.5, 0.5)) plt.tight_layout() plt.show() #axes[0].legend(loc='lower", "parser.add_argument('--target', help=\"Path to target files directory\") parser.add_argument('--tokens', nargs='+', help=\"Text token distinguishing each series", "'#d9ef8b', '#fee08b', '#fdae61', '#f46d43', '#d73027', '#a50026', '#8e0152', '#c51b7d', '#de77ae', '#f1b6da'] def findDirectory(runspath, re,", "tokens\") parser.add_argument('--runspath', help=\"Plot labels to assiciate to tokens\") args = parser.parse_args() assert(len(args.tokens) ==", "'#f46d43', '#d73027', '#a50026', '#8e0152', '#c51b7d', '#de77ae', '#f1b6da'] def findDirectory(runspath, re, token): retoken =", "help=\"Plot labels to assiciate to tokens\") parser.add_argument('--runspath', help=\"Plot labels to assiciate to tokens\")", "= axes[j].plot(LL, modes, color=colors[i]) if j == 0: lines += [p] #stdLogSpec =", "nargs='+', help=\"Plot labels to assiciate to tokens\") parser.add_argument('--runspath', help=\"Plot labels to assiciate to", "/ logEnStdev.ravel() print(LL.shape) p = axes[j].plot(LL, modes, label=labels[i], color=colors[i]) #p = axes[j].plot(LL, modes,", "target, REs, tokens, labels): nBins = 2 * 16//2 - 1 modes =", "help=\"Reynolds numbers\") parser.add_argument('--labels', nargs='+', help=\"Plot labels to assiciate to tokens\") parser.add_argument('--runspath', help=\"Plot labels", "agent from DNS data.\") parser.add_argument('--target', help=\"Path to target files directory\") parser.add_argument('--tokens', nargs='+', help=\"Text", "in alldirs: if retoken not in dirn: continue if token not in dirn:", "'#b2df8a', '#33a02c', '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6', '#6a3d9a', '#ffff99', '#b15928'] #colors = ['#abd9e9',", "of runs\") parser.add_argument('--res', nargs='+', type=int, help=\"Reynolds numbers\") parser.add_argument('--labels', nargs='+', help=\"Plot labels to assiciate", "axes[0].set_ylabel(r'$k$') for j in range(nRes): axes[j].set_title(r'$Re_\\lambda$ = %d' % REs[j]) #axes[j].set_xscale(\"log\") axes[j].set_ylim([1, 15])", "#axes[0].legend(lines, labels, bbox_to_anchor=(0.5, 0.5)) axes[0].legend(bbox_to_anchor=(0.5, 0.5)) plt.tight_layout() plt.show() #axes[0].legend(loc='lower left') if __name__ ==", "axes[j].set_ylim([1, 15]) axes[j].grid() axes[j].set_xlabel(r'$\\frac{\\log E(k) - \\mu_{\\log E(k)}}{\\sigma_{\\log E(k)}}$') for j in range(1,nRes):", "as np, glob, os #from sklearn.neighbors.kde import KernelDensity import matplotlib.pyplot as plt from", "target file logSpectra, logEnStdev, _, _ = readAllSpectra(target, [RE]) for i in range(len(tokens)):", "#covLogSpec = np.cov(logE, rowvar=False) #print(covLogSpec.shape) axes[0].set_ylabel(r'$k$') for j in range(nRes): axes[j].set_title(r'$Re_\\lambda$ = %d'", "%d' % REs[j]) #axes[j].set_xscale(\"log\") axes[j].set_ylim([1, 15]) axes[j].grid() axes[j].set_xlabel(r'$\\frac{\\log E(k) - \\mu_{\\log E(k)}}{\\sigma_{\\log E(k)}}$')", "assiciate to tokens\") parser.add_argument('--runspath', help=\"Plot labels to assiciate to tokens\") args = parser.parse_args()", "extractTargetFilesNonDim import getAllData from computeSpectraNonDim import readAllSpectra colors = ['#1f78b4', '#33a02c', '#e31a1c', '#ff7f00',", "from computeSpectraNonDim import readAllSpectra colors = ['#1f78b4', '#33a02c', '#e31a1c', '#ff7f00', '#6a3d9a', '#b15928', '#a6cee3',", "range(nRes): axes[j].set_title(r'$Re_\\lambda$ = %d' % REs[j]) #axes[j].set_xscale(\"log\") axes[j].set_ylim([1, 15]) axes[j].grid() axes[j].set_xlabel(r'$\\frac{\\log E(k) -", "+ '/*') for dirn in alldirs: if retoken not in dirn: continue if", "for j in range(nRes): axes += [ plt.subplot(1, nRes, j+1) ] for j", "os #from sklearn.neighbors.kde import KernelDensity import matplotlib.pyplot as plt from extractTargetFilesNonDim import epsNuFromRe", "in range(nRes): axes += [ plt.subplot(1, nRes, j+1) ] for j in range(nRes):", "modes, color=colors[i]) if j == 0: lines += [p] #stdLogSpec = np.std(logE, axis=0)", "if token not in dirn: continue return dirn assert(False, 're-token combo not found')", "'RE%03d' % re alldirs = glob.glob(runspath + '/*') for dirn in alldirs: if", "in range(len(tokens)): eps, nu = epsNuFromRe(RE) dirn = findDirectory(runspath, RE, tokens[i]) runData =", "logSpectra.ravel()) / logEnStdev.ravel() print(LL.shape) p = axes[j].plot(LL, modes, label=labels[i], color=colors[i]) #p = axes[j].plot(LL,", "E(k)}}{\\sigma_{\\log E(k)}}$') for j in range(1,nRes): axes[j].set_yticklabels([]) #axes[0].legend(lines, labels, bbox_to_anchor=(-0.1, 2.5), borderaxespad=0) assert(len(lines)", "'#fdbf6f', '#ff7f00', '#cab2d6', '#6a3d9a', '#ffff99', '#b15928'] #colors = ['#abd9e9', '#74add1', '#4575b4', '#313695', '#006837',", "j+1) ] for j in range(nRes): RE = REs[j] # read target file", "token not in dirn: continue return dirn assert(False, 're-token combo not found') def", "computeSpectraNonDim import readAllSpectra colors = ['#1f78b4', '#33a02c', '#e31a1c', '#ff7f00', '#6a3d9a', '#b15928', '#a6cee3', '#b2df8a',", "each series of runs\") parser.add_argument('--res', nargs='+', type=int, help=\"Reynolds numbers\") parser.add_argument('--labels', nargs='+', help=\"Plot labels", "axis=0) assert(avgLogSpec.size == nBins) LL = (avgLogSpec.ravel() - logSpectra.ravel()) / logEnStdev.ravel() print(LL.shape) p", "0.5)) plt.tight_layout() plt.show() #axes[0].legend(loc='lower left') if __name__ == '__main__': parser = argparse.ArgumentParser( description", "a target file for RL agent from DNS data.\") parser.add_argument('--target', help=\"Path to target", "- 1 modes = np.arange(1, nBins+1, dtype=np.float64) # assumes box is 2 pi", "main_integral(runspath, target, REs, tokens, labels): nBins = 2 * 16//2 - 1 modes", "% REs[j]) #axes[j].set_xscale(\"log\") axes[j].set_ylim([1, 15]) axes[j].grid() axes[j].set_xlabel(r'$\\frac{\\log E(k) - \\mu_{\\log E(k)}}{\\sigma_{\\log E(k)}}$') for", "matplotlib.pyplot as plt from extractTargetFilesNonDim import epsNuFromRe from extractTargetFilesNonDim import getAllData from computeSpectraNonDim", "\\mu_{\\log E(k)}}{\\sigma_{\\log E(k)}}$') for j in range(1,nRes): axes[j].set_yticklabels([]) #axes[0].legend(lines, labels, bbox_to_anchor=(-0.1, 2.5), borderaxespad=0)", "== '__main__': parser = argparse.ArgumentParser( description = \"Compute a target file for RL", "help=\"Plot labels to assiciate to tokens\") args = parser.parse_args() assert(len(args.tokens) == len(args.labels)) main_integral(args.runspath,", "['#1f78b4', '#33a02c', '#e31a1c', '#ff7f00', '#6a3d9a', '#b15928', '#a6cee3', '#b2df8a', '#fb9a99', '#fdbf6f', '#cab2d6', '#ffff99'] colors", "'#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628', '#f781bf', '#999999'] #colors = ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c',", "colors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628', '#f781bf', '#999999'] #colors =", "help=\"Text token distinguishing each series of runs\") parser.add_argument('--res', nargs='+', type=int, help=\"Reynolds numbers\") parser.add_argument('--labels',", "1 modes = np.arange(1, nBins+1, dtype=np.float64) # assumes box is 2 pi plt.figure()", "axes[j].grid() axes[j].set_xlabel(r'$\\frac{\\log E(k) - \\mu_{\\log E(k)}}{\\sigma_{\\log E(k)}}$') for j in range(1,nRes): axes[j].set_yticklabels([]) #axes[0].legend(lines,", "assert(len(lines) == len(labels)) #axes[0].legend(lines, labels, bbox_to_anchor=(0.5, 0.5)) axes[0].legend(bbox_to_anchor=(0.5, 0.5)) plt.tight_layout() plt.show() #axes[0].legend(loc='lower left')", "axes[j].plot(LL, modes, label=labels[i], color=colors[i]) #p = axes[j].plot(LL, modes, color=colors[i]) if j == 0:", "parser.add_argument('--labels', nargs='+', help=\"Plot labels to assiciate to tokens\") parser.add_argument('--runspath', help=\"Plot labels to assiciate", "% re alldirs = glob.glob(runspath + '/*') for dirn in alldirs: if retoken", "['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628', '#f781bf', '#999999'] #colors = ['#a6cee3', '#1f78b4',", "axis=0) #covLogSpec = np.cov(logE, rowvar=False) #print(covLogSpec.shape) axes[0].set_ylabel(r'$k$') for j in range(nRes): axes[j].set_title(r'$Re_\\lambda$ =", "plt.show() #axes[0].legend(loc='lower left') if __name__ == '__main__': parser = argparse.ArgumentParser( description = \"Compute", "in dirn: continue return dirn assert(False, 're-token combo not found') def main_integral(runspath, target,", "'#999999'] #colors = ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6', '#6a3d9a',", "def main_integral(runspath, target, REs, tokens, labels): nBins = 2 * 16//2 - 1", "file logSpectra, logEnStdev, _, _ = readAllSpectra(target, [RE]) for i in range(len(tokens)): eps,", "15]) axes[j].grid() axes[j].set_xlabel(r'$\\frac{\\log E(k) - \\mu_{\\log E(k)}}{\\sigma_{\\log E(k)}}$') for j in range(1,nRes): axes[j].set_yticklabels([])", "plt.figure() #REs = findAllParams(path) nRes = len(REs) axes, lines = [], [] for", "'#cab2d6', '#ffff99'] colors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628', '#f781bf', '#999999']", "'#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6', '#6a3d9a', '#ffff99', '#b15928'] #colors = ['#abd9e9', '#74add1', '#4575b4',", "token): retoken = 'RE%03d' % re alldirs = glob.glob(runspath + '/*') for dirn", "'#b15928', '#a6cee3', '#b2df8a', '#fb9a99', '#fdbf6f', '#cab2d6', '#ffff99'] colors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3',", "glob.glob(runspath + '/*') for dirn in alldirs: if retoken not in dirn: continue", "re, token): retoken = 'RE%03d' % re alldirs = glob.glob(runspath + '/*') for", "['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6', '#6a3d9a', '#ffff99', '#b15928'] #colors", "plt.tight_layout() plt.show() #axes[0].legend(loc='lower left') if __name__ == '__main__': parser = argparse.ArgumentParser( description =", "findAllParams(path) nRes = len(REs) axes, lines = [], [] for j in range(nRes):", "= glob.glob(runspath + '/*') for dirn in alldirs: if retoken not in dirn:", "#from sklearn.neighbors.kde import KernelDensity import matplotlib.pyplot as plt from extractTargetFilesNonDim import epsNuFromRe from", "in range(1,nRes): axes[j].set_yticklabels([]) #axes[0].legend(lines, labels, bbox_to_anchor=(-0.1, 2.5), borderaxespad=0) assert(len(lines) == len(labels)) #axes[0].legend(lines, labels,", "for i in range(len(tokens)): eps, nu = epsNuFromRe(RE) dirn = findDirectory(runspath, RE, tokens[i])", "findDirectory(runspath, RE, tokens[i]) runData = getAllData(dirn, eps, nu, nBins, fSkip=1) logE = np.log(runData['spectra'])", "= ['#1f78b4', '#33a02c', '#e31a1c', '#ff7f00', '#6a3d9a', '#b15928', '#a6cee3', '#b2df8a', '#fb9a99', '#fdbf6f', '#cab2d6', '#ffff99']", "labels): nBins = 2 * 16//2 - 1 modes = np.arange(1, nBins+1, dtype=np.float64)", "fSkip=1) logE = np.log(runData['spectra']) avgLogSpec = np.mean(logE, axis=0) assert(avgLogSpec.size == nBins) LL =", "epsNuFromRe(RE) dirn = findDirectory(runspath, RE, tokens[i]) runData = getAllData(dirn, eps, nu, nBins, fSkip=1)", "0.5)) axes[0].legend(bbox_to_anchor=(0.5, 0.5)) plt.tight_layout() plt.show() #axes[0].legend(loc='lower left') if __name__ == '__main__': parser =", "range(nRes): RE = REs[j] # read target file logSpectra, logEnStdev, _, _ =", "= \"Compute a target file for RL agent from DNS data.\") parser.add_argument('--target', help=\"Path", "parser = argparse.ArgumentParser( description = \"Compute a target file for RL agent from", "# read target file logSpectra, logEnStdev, _, _ = readAllSpectra(target, [RE]) for i", "nu, nBins, fSkip=1) logE = np.log(runData['spectra']) avgLogSpec = np.mean(logE, axis=0) assert(avgLogSpec.size == nBins)", "getAllData from computeSpectraNonDim import readAllSpectra colors = ['#1f78b4', '#33a02c', '#e31a1c', '#ff7f00', '#6a3d9a', '#b15928',", "nRes, j+1) ] for j in range(nRes): RE = REs[j] # read target", "logEnStdev, _, _ = readAllSpectra(target, [RE]) for i in range(len(tokens)): eps, nu =", "j in range(nRes): RE = REs[j] # read target file logSpectra, logEnStdev, _,", "[RE]) for i in range(len(tokens)): eps, nu = epsNuFromRe(RE) dirn = findDirectory(runspath, RE,", "#!/usr/bin/env python3 import re, argparse, numpy as np, glob, os #from sklearn.neighbors.kde import", "import re, argparse, numpy as np, glob, os #from sklearn.neighbors.kde import KernelDensity import", "'#de77ae', '#f1b6da'] def findDirectory(runspath, re, token): retoken = 'RE%03d' % re alldirs =", "+= [p] #stdLogSpec = np.std(logE, axis=0) #covLogSpec = np.cov(logE, rowvar=False) #print(covLogSpec.shape) axes[0].set_ylabel(r'$k$') for", "import KernelDensity import matplotlib.pyplot as plt from extractTargetFilesNonDim import epsNuFromRe from extractTargetFilesNonDim import", "j in range(nRes): axes[j].set_title(r'$Re_\\lambda$ = %d' % REs[j]) #axes[j].set_xscale(\"log\") axes[j].set_ylim([1, 15]) axes[j].grid() axes[j].set_xlabel(r'$\\frac{\\log", "file for RL agent from DNS data.\") parser.add_argument('--target', help=\"Path to target files directory\")", "modes, label=labels[i], color=colors[i]) #p = axes[j].plot(LL, modes, color=colors[i]) if j == 0: lines", "retoken not in dirn: continue if token not in dirn: continue return dirn", "E(k) - \\mu_{\\log E(k)}}{\\sigma_{\\log E(k)}}$') for j in range(1,nRes): axes[j].set_yticklabels([]) #axes[0].legend(lines, labels, bbox_to_anchor=(-0.1,", "#stdLogSpec = np.std(logE, axis=0) #covLogSpec = np.cov(logE, rowvar=False) #print(covLogSpec.shape) axes[0].set_ylabel(r'$k$') for j in", "dirn in alldirs: if retoken not in dirn: continue if token not in", "for j in range(nRes): RE = REs[j] # read target file logSpectra, logEnStdev,", "16//2 - 1 modes = np.arange(1, nBins+1, dtype=np.float64) # assumes box is 2", "import getAllData from computeSpectraNonDim import readAllSpectra colors = ['#1f78b4', '#33a02c', '#e31a1c', '#ff7f00', '#6a3d9a',", "bbox_to_anchor=(-0.1, 2.5), borderaxespad=0) assert(len(lines) == len(labels)) #axes[0].legend(lines, labels, bbox_to_anchor=(0.5, 0.5)) axes[0].legend(bbox_to_anchor=(0.5, 0.5)) plt.tight_layout()", "combo not found') def main_integral(runspath, target, REs, tokens, labels): nBins = 2 *", "= len(REs) axes, lines = [], [] for j in range(nRes): axes +=", "readAllSpectra(target, [RE]) for i in range(len(tokens)): eps, nu = epsNuFromRe(RE) dirn = findDirectory(runspath,", "data.\") parser.add_argument('--target', help=\"Path to target files directory\") parser.add_argument('--tokens', nargs='+', help=\"Text token distinguishing each", "2 pi plt.figure() #REs = findAllParams(path) nRes = len(REs) axes, lines = [],", "parser.add_argument('--res', nargs='+', type=int, help=\"Reynolds numbers\") parser.add_argument('--labels', nargs='+', help=\"Plot labels to assiciate to tokens\")", "modes = np.arange(1, nBins+1, dtype=np.float64) # assumes box is 2 pi plt.figure() #REs", "range(nRes): axes += [ plt.subplot(1, nRes, j+1) ] for j in range(nRes): RE", "'__main__': parser = argparse.ArgumentParser( description = \"Compute a target file for RL agent", "axes[j].set_title(r'$Re_\\lambda$ = %d' % REs[j]) #axes[j].set_xscale(\"log\") axes[j].set_ylim([1, 15]) axes[j].grid() axes[j].set_xlabel(r'$\\frac{\\log E(k) - \\mu_{\\log", "argparse, numpy as np, glob, os #from sklearn.neighbors.kde import KernelDensity import matplotlib.pyplot as", "'#a65628', '#f781bf', '#999999'] #colors = ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00',", "= ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6', '#6a3d9a', '#ffff99', '#b15928']", "'#6a3d9a', '#b15928', '#a6cee3', '#b2df8a', '#fb9a99', '#fdbf6f', '#cab2d6', '#ffff99'] colors = ['#e41a1c', '#377eb8', '#4daf4a',", "not found') def main_integral(runspath, target, REs, tokens, labels): nBins = 2 * 16//2", "LL = (avgLogSpec.ravel() - logSpectra.ravel()) / logEnStdev.ravel() print(LL.shape) p = axes[j].plot(LL, modes, label=labels[i],", "'#33a02c', '#e31a1c', '#ff7f00', '#6a3d9a', '#b15928', '#a6cee3', '#b2df8a', '#fb9a99', '#fdbf6f', '#cab2d6', '#ffff99'] colors =", "#p = axes[j].plot(LL, modes, color=colors[i]) if j == 0: lines += [p] #stdLogSpec", "if __name__ == '__main__': parser = argparse.ArgumentParser( description = \"Compute a target file", "= readAllSpectra(target, [RE]) for i in range(len(tokens)): eps, nu = epsNuFromRe(RE) dirn =", "E(k)}}$') for j in range(1,nRes): axes[j].set_yticklabels([]) #axes[0].legend(lines, labels, bbox_to_anchor=(-0.1, 2.5), borderaxespad=0) assert(len(lines) ==", "# assumes box is 2 pi plt.figure() #REs = findAllParams(path) nRes = len(REs)", "from extractTargetFilesNonDim import epsNuFromRe from extractTargetFilesNonDim import getAllData from computeSpectraNonDim import readAllSpectra colors", "is 2 pi plt.figure() #REs = findAllParams(path) nRes = len(REs) axes, lines =", "#axes[0].legend(loc='lower left') if __name__ == '__main__': parser = argparse.ArgumentParser( description = \"Compute a", "assert(False, 're-token combo not found') def main_integral(runspath, target, REs, tokens, labels): nBins =", "'#fb9a99', '#fdbf6f', '#cab2d6', '#ffff99'] colors = ['#e41a1c', '#377eb8', '#4daf4a', '#984ea3', '#ff7f00', '#ffff33', '#a65628',", "'#66bd63', '#a6d96a', '#d9ef8b', '#fee08b', '#fdae61', '#f46d43', '#d73027', '#a50026', '#8e0152', '#c51b7d', '#de77ae', '#f1b6da'] def", "for dirn in alldirs: if retoken not in dirn: continue if token not", "epsNuFromRe from extractTargetFilesNonDim import getAllData from computeSpectraNonDim import readAllSpectra colors = ['#1f78b4', '#33a02c',", "avgLogSpec = np.mean(logE, axis=0) assert(avgLogSpec.size == nBins) LL = (avgLogSpec.ravel() - logSpectra.ravel()) /", "import epsNuFromRe from extractTargetFilesNonDim import getAllData from computeSpectraNonDim import readAllSpectra colors = ['#1f78b4',", "'#006837', '#1a9850', '#66bd63', '#a6d96a', '#d9ef8b', '#fee08b', '#fdae61', '#f46d43', '#d73027', '#a50026', '#8e0152', '#c51b7d', '#de77ae',", "nBins) LL = (avgLogSpec.ravel() - logSpectra.ravel()) / logEnStdev.ravel() print(LL.shape) p = axes[j].plot(LL, modes,", "DNS data.\") parser.add_argument('--target', help=\"Path to target files directory\") parser.add_argument('--tokens', nargs='+', help=\"Text token distinguishing", "== 0: lines += [p] #stdLogSpec = np.std(logE, axis=0) #covLogSpec = np.cov(logE, rowvar=False)", "- \\mu_{\\log E(k)}}{\\sigma_{\\log E(k)}}$') for j in range(1,nRes): axes[j].set_yticklabels([]) #axes[0].legend(lines, labels, bbox_to_anchor=(-0.1, 2.5),", "2.5), borderaxespad=0) assert(len(lines) == len(labels)) #axes[0].legend(lines, labels, bbox_to_anchor=(0.5, 0.5)) axes[0].legend(bbox_to_anchor=(0.5, 0.5)) plt.tight_layout() plt.show()", "'#cab2d6', '#6a3d9a', '#ffff99', '#b15928'] #colors = ['#abd9e9', '#74add1', '#4575b4', '#313695', '#006837', '#1a9850', '#66bd63',", "== len(labels)) #axes[0].legend(lines, labels, bbox_to_anchor=(0.5, 0.5)) axes[0].legend(bbox_to_anchor=(0.5, 0.5)) plt.tight_layout() plt.show() #axes[0].legend(loc='lower left') if", "= findDirectory(runspath, RE, tokens[i]) runData = getAllData(dirn, eps, nu, nBins, fSkip=1) logE =", "print(LL.shape) p = axes[j].plot(LL, modes, label=labels[i], color=colors[i]) #p = axes[j].plot(LL, modes, color=colors[i]) if", "2 * 16//2 - 1 modes = np.arange(1, nBins+1, dtype=np.float64) # assumes box", "label=labels[i], color=colors[i]) #p = axes[j].plot(LL, modes, color=colors[i]) if j == 0: lines +=", "read target file logSpectra, logEnStdev, _, _ = readAllSpectra(target, [RE]) for i in", "in range(nRes): RE = REs[j] # read target file logSpectra, logEnStdev, _, _", "KernelDensity import matplotlib.pyplot as plt from extractTargetFilesNonDim import epsNuFromRe from extractTargetFilesNonDim import getAllData", "= axes[j].plot(LL, modes, label=labels[i], color=colors[i]) #p = axes[j].plot(LL, modes, color=colors[i]) if j ==", "'re-token combo not found') def main_integral(runspath, target, REs, tokens, labels): nBins = 2", "i in range(len(tokens)): eps, nu = epsNuFromRe(RE) dirn = findDirectory(runspath, RE, tokens[i]) runData", "= epsNuFromRe(RE) dirn = findDirectory(runspath, RE, tokens[i]) runData = getAllData(dirn, eps, nu, nBins,", "'/*') for dirn in alldirs: if retoken not in dirn: continue if token", "[], [] for j in range(nRes): axes += [ plt.subplot(1, nRes, j+1) ]", "[] for j in range(nRes): axes += [ plt.subplot(1, nRes, j+1) ] for", "if j == 0: lines += [p] #stdLogSpec = np.std(logE, axis=0) #covLogSpec =", "= np.log(runData['spectra']) avgLogSpec = np.mean(logE, axis=0) assert(avgLogSpec.size == nBins) LL = (avgLogSpec.ravel() -", "dirn: continue if token not in dirn: continue return dirn assert(False, 're-token combo", "for RL agent from DNS data.\") parser.add_argument('--target', help=\"Path to target files directory\") parser.add_argument('--tokens',", "'#f781bf', '#999999'] #colors = ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', '#cab2d6',", "dirn = findDirectory(runspath, RE, tokens[i]) runData = getAllData(dirn, eps, nu, nBins, fSkip=1) logE", "'#ffff33', '#a65628', '#f781bf', '#999999'] #colors = ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', '#fb9a99', '#e31a1c', '#fdbf6f',", "'#4575b4', '#313695', '#006837', '#1a9850', '#66bd63', '#a6d96a', '#d9ef8b', '#fee08b', '#fdae61', '#f46d43', '#d73027', '#a50026', '#8e0152',", "target file for RL agent from DNS data.\") parser.add_argument('--target', help=\"Path to target files", "description = \"Compute a target file for RL agent from DNS data.\") parser.add_argument('--target',", "'#e31a1c', '#ff7f00', '#6a3d9a', '#b15928', '#a6cee3', '#b2df8a', '#fb9a99', '#fdbf6f', '#cab2d6', '#ffff99'] colors = ['#e41a1c',", "bbox_to_anchor=(0.5, 0.5)) axes[0].legend(bbox_to_anchor=(0.5, 0.5)) plt.tight_layout() plt.show() #axes[0].legend(loc='lower left') if __name__ == '__main__': parser", "logSpectra, logEnStdev, _, _ = readAllSpectra(target, [RE]) for i in range(len(tokens)): eps, nu", "runData = getAllData(dirn, eps, nu, nBins, fSkip=1) logE = np.log(runData['spectra']) avgLogSpec = np.mean(logE,", "= np.cov(logE, rowvar=False) #print(covLogSpec.shape) axes[0].set_ylabel(r'$k$') for j in range(nRes): axes[j].set_title(r'$Re_\\lambda$ = %d' %", "to assiciate to tokens\") args = parser.parse_args() assert(len(args.tokens) == len(args.labels)) main_integral(args.runspath, args.target, args.res,", "= [], [] for j in range(nRes): axes += [ plt.subplot(1, nRes, j+1)", "'#313695', '#006837', '#1a9850', '#66bd63', '#a6d96a', '#d9ef8b', '#fee08b', '#fdae61', '#f46d43', '#d73027', '#a50026', '#8e0152', '#c51b7d',", "def findDirectory(runspath, re, token): retoken = 'RE%03d' % re alldirs = glob.glob(runspath +", "dtype=np.float64) # assumes box is 2 pi plt.figure() #REs = findAllParams(path) nRes =", "parser.add_argument('--tokens', nargs='+', help=\"Text token distinguishing each series of runs\") parser.add_argument('--res', nargs='+', type=int, help=\"Reynolds", "series of runs\") parser.add_argument('--res', nargs='+', type=int, help=\"Reynolds numbers\") parser.add_argument('--labels', nargs='+', help=\"Plot labels to", "= ['#abd9e9', '#74add1', '#4575b4', '#313695', '#006837', '#1a9850', '#66bd63', '#a6d96a', '#d9ef8b', '#fee08b', '#fdae61', '#f46d43',", "p = axes[j].plot(LL, modes, label=labels[i], color=colors[i]) #p = axes[j].plot(LL, modes, color=colors[i]) if j", "'#ff7f00', '#ffff33', '#a65628', '#f781bf', '#999999'] #colors = ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', '#fb9a99', '#e31a1c',", "* 16//2 - 1 modes = np.arange(1, nBins+1, dtype=np.float64) # assumes box is", "['#abd9e9', '#74add1', '#4575b4', '#313695', '#006837', '#1a9850', '#66bd63', '#a6d96a', '#d9ef8b', '#fee08b', '#fdae61', '#f46d43', '#d73027'," ]
[ "digit is 0, # this is likely cheaper and faster result_dict = {1:", "0, # this is likely cheaper and faster result_dict = {1: 1, 2:", "at 10, and anything after 5 the last digit is 0, # this", "is 0, # this is likely cheaper and faster result_dict = {1: 1,", "5 the last digit is 0, # this is likely cheaper and faster", "10, and anything after 5 the last digit is 0, # this is", "sys.stdin.readline() for line in sys.stdin.readlines(): number = int(line.rstrip()) if number >= 5: print(0)", "An attempt to solve the Last Factorial Digit \"\"\" import sys # This", "dont_care = sys.stdin.readline() for line in sys.stdin.readlines(): number = int(line.rstrip()) if number >=", "# This is totally wrong, but given N maxes out at 10, and", "python3 \"\"\" An attempt to solve the Last Factorial Digit \"\"\" import sys", "the Last Factorial Digit \"\"\" import sys # This is totally wrong, but", "out at 10, and anything after 5 the last digit is 0, #", "\"\"\" An attempt to solve the Last Factorial Digit \"\"\" import sys #", "= {1: 1, 2: 2, 3: 6, 4: 4} dont_care = sys.stdin.readline() for", "solve the Last Factorial Digit \"\"\" import sys # This is totally wrong,", "anything after 5 the last digit is 0, # this is likely cheaper", "N maxes out at 10, and anything after 5 the last digit is", "the last digit is 0, # this is likely cheaper and faster result_dict", "and anything after 5 the last digit is 0, # this is likely", "Digit \"\"\" import sys # This is totally wrong, but given N maxes", "result_dict = {1: 1, 2: 2, 3: 6, 4: 4} dont_care = sys.stdin.readline()", "faster result_dict = {1: 1, 2: 2, 3: 6, 4: 4} dont_care =", "1, 2: 2, 3: 6, 4: 4} dont_care = sys.stdin.readline() for line in", "after 5 the last digit is 0, # this is likely cheaper and", "import sys # This is totally wrong, but given N maxes out at", "3: 6, 4: 4} dont_care = sys.stdin.readline() for line in sys.stdin.readlines(): number =", "line in sys.stdin.readlines(): number = int(line.rstrip()) if number >= 5: print(0) else: print(result_dict[number])", "# this is likely cheaper and faster result_dict = {1: 1, 2: 2,", "\"\"\" import sys # This is totally wrong, but given N maxes out", "2, 3: 6, 4: 4} dont_care = sys.stdin.readline() for line in sys.stdin.readlines(): number", "for line in sys.stdin.readlines(): number = int(line.rstrip()) if number >= 5: print(0) else:", "totally wrong, but given N maxes out at 10, and anything after 5", "and faster result_dict = {1: 1, 2: 2, 3: 6, 4: 4} dont_care", "{1: 1, 2: 2, 3: 6, 4: 4} dont_care = sys.stdin.readline() for line", "6, 4: 4} dont_care = sys.stdin.readline() for line in sys.stdin.readlines(): number = int(line.rstrip())", "4} dont_care = sys.stdin.readline() for line in sys.stdin.readlines(): number = int(line.rstrip()) if number", "sys # This is totally wrong, but given N maxes out at 10,", "#!/usr/bin/env python3 \"\"\" An attempt to solve the Last Factorial Digit \"\"\" import", "attempt to solve the Last Factorial Digit \"\"\" import sys # This is", "This is totally wrong, but given N maxes out at 10, and anything", "is likely cheaper and faster result_dict = {1: 1, 2: 2, 3: 6,", "to solve the Last Factorial Digit \"\"\" import sys # This is totally", "Factorial Digit \"\"\" import sys # This is totally wrong, but given N", "wrong, but given N maxes out at 10, and anything after 5 the", "given N maxes out at 10, and anything after 5 the last digit", "4: 4} dont_care = sys.stdin.readline() for line in sys.stdin.readlines(): number = int(line.rstrip()) if", "likely cheaper and faster result_dict = {1: 1, 2: 2, 3: 6, 4:", "maxes out at 10, and anything after 5 the last digit is 0,", "this is likely cheaper and faster result_dict = {1: 1, 2: 2, 3:", "cheaper and faster result_dict = {1: 1, 2: 2, 3: 6, 4: 4}", "but given N maxes out at 10, and anything after 5 the last", "Last Factorial Digit \"\"\" import sys # This is totally wrong, but given", "2: 2, 3: 6, 4: 4} dont_care = sys.stdin.readline() for line in sys.stdin.readlines():", "is totally wrong, but given N maxes out at 10, and anything after", "= sys.stdin.readline() for line in sys.stdin.readlines(): number = int(line.rstrip()) if number >= 5:", "last digit is 0, # this is likely cheaper and faster result_dict =" ]
[ "bson - 'utc' from datetime import datetime from datetime import tzinfo class UTC(tzinfo):", "This empty module with the name pytz.py fools # bson.py into loading; we", "\"\"\"UTC\"\"\" def utcoffset(self, dt): return ZERO def tzname(self, dt): return \"UTC\" def dst(self,", "# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # # Copyright (c) 2014 <NAME> <<EMAIL>>", "# \"\"\"Pytz dummy module\"\"\" # This empty module with the name pytz.py fools", "shiftwidth=4 softtabstop=4 # # Copyright (c) 2014 <NAME> <<EMAIL>> # \"\"\"Pytz dummy module\"\"\"", "# pytz-reference used by bson - 'utc' from datetime import datetime from datetime", "only # pytz-reference used by bson - 'utc' from datetime import datetime from", "into loading; we then provide the only # pytz-reference used by bson -", "loading; we then provide the only # pytz-reference used by bson - 'utc'", "bson.py into loading; we then provide the only # pytz-reference used by bson", "(c) 2014 <NAME> <<EMAIL>> # \"\"\"Pytz dummy module\"\"\" # This empty module with", "then provide the only # pytz-reference used by bson - 'utc' from datetime", "def tzname(self, dt): return \"UTC\" def dst(self, dt): return ZERO utc = UTC()", "dummy module\"\"\" # This empty module with the name pytz.py fools # bson.py", "class UTC(tzinfo): \"\"\"UTC\"\"\" def utcoffset(self, dt): return ZERO def tzname(self, dt): return \"UTC\"", "- 'utc' from datetime import datetime from datetime import tzinfo class UTC(tzinfo): \"\"\"UTC\"\"\"", "the only # pytz-reference used by bson - 'utc' from datetime import datetime", "tabstop=4 expandtab shiftwidth=4 softtabstop=4 # # Copyright (c) 2014 <NAME> <<EMAIL>> # \"\"\"Pytz", "expandtab shiftwidth=4 softtabstop=4 # # Copyright (c) 2014 <NAME> <<EMAIL>> # \"\"\"Pytz dummy", "UTC(tzinfo): \"\"\"UTC\"\"\" def utcoffset(self, dt): return ZERO def tzname(self, dt): return \"UTC\" def", "2014 <NAME> <<EMAIL>> # \"\"\"Pytz dummy module\"\"\" # This empty module with the", "# Copyright (c) 2014 <NAME> <<EMAIL>> # \"\"\"Pytz dummy module\"\"\" # This empty", "\"\"\"Pytz dummy module\"\"\" # This empty module with the name pytz.py fools #", "fools # bson.py into loading; we then provide the only # pytz-reference used", "# bson.py into loading; we then provide the only # pytz-reference used by", "we then provide the only # pytz-reference used by bson - 'utc' from", "provide the only # pytz-reference used by bson - 'utc' from datetime import", "dt): return ZERO def tzname(self, dt): return \"UTC\" def dst(self, dt): return ZERO", "from datetime import datetime from datetime import tzinfo class UTC(tzinfo): \"\"\"UTC\"\"\" def utcoffset(self,", "ZERO def tzname(self, dt): return \"UTC\" def dst(self, dt): return ZERO utc =", "softtabstop=4 # # Copyright (c) 2014 <NAME> <<EMAIL>> # \"\"\"Pytz dummy module\"\"\" #", "<<EMAIL>> # \"\"\"Pytz dummy module\"\"\" # This empty module with the name pytz.py", "empty module with the name pytz.py fools # bson.py into loading; we then", "the name pytz.py fools # bson.py into loading; we then provide the only", "module\"\"\" # This empty module with the name pytz.py fools # bson.py into", "with the name pytz.py fools # bson.py into loading; we then provide the", "Copyright (c) 2014 <NAME> <<EMAIL>> # \"\"\"Pytz dummy module\"\"\" # This empty module", "datetime import datetime from datetime import tzinfo class UTC(tzinfo): \"\"\"UTC\"\"\" def utcoffset(self, dt):", "'utc' from datetime import datetime from datetime import tzinfo class UTC(tzinfo): \"\"\"UTC\"\"\" def", "vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # # Copyright (c) 2014 <NAME> <<EMAIL>> #", "pytz.py fools # bson.py into loading; we then provide the only # pytz-reference", "def utcoffset(self, dt): return ZERO def tzname(self, dt): return \"UTC\" def dst(self, dt):", "# This empty module with the name pytz.py fools # bson.py into loading;", "module with the name pytz.py fools # bson.py into loading; we then provide", "import tzinfo class UTC(tzinfo): \"\"\"UTC\"\"\" def utcoffset(self, dt): return ZERO def tzname(self, dt):", "pytz-reference used by bson - 'utc' from datetime import datetime from datetime import", "utcoffset(self, dt): return ZERO def tzname(self, dt): return \"UTC\" def dst(self, dt): return", "import datetime from datetime import tzinfo class UTC(tzinfo): \"\"\"UTC\"\"\" def utcoffset(self, dt): return", "datetime from datetime import tzinfo class UTC(tzinfo): \"\"\"UTC\"\"\" def utcoffset(self, dt): return ZERO", "from datetime import tzinfo class UTC(tzinfo): \"\"\"UTC\"\"\" def utcoffset(self, dt): return ZERO def", "return ZERO def tzname(self, dt): return \"UTC\" def dst(self, dt): return ZERO utc", "tzinfo class UTC(tzinfo): \"\"\"UTC\"\"\" def utcoffset(self, dt): return ZERO def tzname(self, dt): return", "by bson - 'utc' from datetime import datetime from datetime import tzinfo class", "datetime import tzinfo class UTC(tzinfo): \"\"\"UTC\"\"\" def utcoffset(self, dt): return ZERO def tzname(self,", "# # Copyright (c) 2014 <NAME> <<EMAIL>> # \"\"\"Pytz dummy module\"\"\" # This", "name pytz.py fools # bson.py into loading; we then provide the only #", "used by bson - 'utc' from datetime import datetime from datetime import tzinfo", "<NAME> <<EMAIL>> # \"\"\"Pytz dummy module\"\"\" # This empty module with the name" ]
[ "'icons/256x256.png'), QtCore.QSize(256,256)) app.setWindowIcon(app_icon) if platform == \"win32\": # Windows... #This will make sure", "will make sure that the app icon is set in the taskbar on", "import os, sys from PyQt5 import QtCore, QtGui from qtpy.QtWidgets import QApplication import", "= QApplication(sys.argv) exe_path = os.path.dirname(os.path.realpath(sys.argv[0])) assets_dir = os.path.join(exe_path, 'assets') for (dirpath, dirnames, filenames)", "app = QApplication(sys.argv) exe_path = os.path.dirname(os.path.realpath(sys.argv[0])) assets_dir = os.path.join(exe_path, 'assets') for (dirpath, dirnames,", "is set in the taskbar on windows # See https://stackoverflow.com/questions/1551605/how-to-set-applications-taskbar-icon-in-windows-7/1552105#1552105 myappid = u'no-company.node-editor.execution-graph-editor.1.0'", "myappid = u'no-company.node-editor.execution-graph-editor.1.0' # arbitrary string ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) wnd = ExecutionNodeEditorWindow() wnd.setWindowIcon(app_icon) wnd.show() wnd.actNew.trigger()", "in the taskbar on windows # See https://stackoverflow.com/questions/1551605/how-to-set-applications-taskbar-icon-in-windows-7/1552105#1552105 myappid = u'no-company.node-editor.execution-graph-editor.1.0' # arbitrary", "app.setWindowIcon(app_icon) if platform == \"win32\": # Windows... #This will make sure that the", "# Windows... #This will make sure that the app icon is set in", "the app icon is set in the taskbar on windows # See https://stackoverflow.com/questions/1551605/how-to-set-applications-taskbar-icon-in-windows-7/1552105#1552105", "arbitrary string ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) wnd = ExecutionNodeEditorWindow() wnd.setWindowIcon(app_icon) wnd.show() wnd.actNew.trigger() if len(sys.argv) == 2:", "QtCore.QSize(64,64)) app_icon.addFile(os.path.join(assets_dir, 'icons/128x128.png'), QtCore.QSize(128,128)) app_icon.addFile(os.path.join(assets_dir, 'icons/256x256.png'), QtCore.QSize(256,256)) app.setWindowIcon(app_icon) if platform == \"win32\": #", "= u'no-company.node-editor.execution-graph-editor.1.0' # arbitrary string ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) wnd = ExecutionNodeEditorWindow() wnd.setWindowIcon(app_icon) wnd.show() wnd.actNew.trigger() if", "taskbar on windows # See https://stackoverflow.com/questions/1551605/how-to-set-applications-taskbar-icon-in-windows-7/1552105#1552105 myappid = u'no-company.node-editor.execution-graph-editor.1.0' # arbitrary string ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid)", "os.path.dirname(__file__), \"..\", \"..\" )) from window import ExecutionNodeEditorWindow if __name__ == '__main__': app", "\"..\", \"..\" )) from window import ExecutionNodeEditorWindow if __name__ == '__main__': app =", "'icons/128x128.png'), QtCore.QSize(128,128)) app_icon.addFile(os.path.join(assets_dir, 'icons/256x256.png'), QtCore.QSize(256,256)) app.setWindowIcon(app_icon) if platform == \"win32\": # Windows... #This", "Windows... #This will make sure that the app icon is set in the", "= QtGui.QFontDatabase.addApplicationFont(f) if QtGui.QFontDatabase.applicationFontFamilies(font_id) == -1: print(\"Could not load font\") sys.exit(-1) # print(QStyleFactory.keys())", "= os.path.join(exe_path, 'assets') for (dirpath, dirnames, filenames) in os.walk(os.path.join(assets_dir, 'fonts')): for f in", "'fonts')): for f in filenames: font_id = QtGui.QFontDatabase.addApplicationFont(f) if QtGui.QFontDatabase.applicationFontFamilies(font_id) == -1: print(\"Could", "in filenames: font_id = QtGui.QFontDatabase.addApplicationFont(f) if QtGui.QFontDatabase.applicationFontFamilies(font_id) == -1: print(\"Could not load font\")", "load font\") sys.exit(-1) # print(QStyleFactory.keys()) app.setStyle('Fusion') app_icon = QtGui.QIcon() app_icon.addFile(os.path.join(assets_dir, 'icons/16x16.png'), QtCore.QSize(16,16)) app_icon.addFile(os.path.join(assets_dir,", "for (dirpath, dirnames, filenames) in os.walk(os.path.join(assets_dir, 'fonts')): for f in filenames: font_id =", "from sys import platform sys.path.insert(0, os.path.join( os.path.dirname(__file__), \"..\", \"..\" )) from window import", "os, sys from PyQt5 import QtCore, QtGui from qtpy.QtWidgets import QApplication import ctypes", "if __name__ == '__main__': app = QApplication(sys.argv) exe_path = os.path.dirname(os.path.realpath(sys.argv[0])) assets_dir = os.path.join(exe_path,", "font_id = QtGui.QFontDatabase.addApplicationFont(f) if QtGui.QFontDatabase.applicationFontFamilies(font_id) == -1: print(\"Could not load font\") sys.exit(-1) #", "QApplication(sys.argv) exe_path = os.path.dirname(os.path.realpath(sys.argv[0])) assets_dir = os.path.join(exe_path, 'assets') for (dirpath, dirnames, filenames) in", "app_icon.addFile(os.path.join(assets_dir, 'icons/48x48.png'), QtCore.QSize(48,48)) app_icon.addFile(os.path.join(assets_dir, 'icons/64x64.png'), QtCore.QSize(64,64)) app_icon.addFile(os.path.join(assets_dir, 'icons/128x128.png'), QtCore.QSize(128,128)) app_icon.addFile(os.path.join(assets_dir, 'icons/256x256.png'), QtCore.QSize(256,256)) app.setWindowIcon(app_icon)", "QApplication import ctypes from sys import platform sys.path.insert(0, os.path.join( os.path.dirname(__file__), \"..\", \"..\" ))", "that the app icon is set in the taskbar on windows # See", "QtGui.QFontDatabase.addApplicationFont(f) if QtGui.QFontDatabase.applicationFontFamilies(font_id) == -1: print(\"Could not load font\") sys.exit(-1) # print(QStyleFactory.keys()) app.setStyle('Fusion')", "'icons/16x16.png'), QtCore.QSize(16,16)) app_icon.addFile(os.path.join(assets_dir, 'icons/24x24.png'), QtCore.QSize(24,24)) app_icon.addFile(os.path.join(assets_dir, 'icons/32x32.png'), QtCore.QSize(32,32)) app_icon.addFile(os.path.join(assets_dir, 'icons/48x48.png'), QtCore.QSize(48,48)) app_icon.addFile(os.path.join(assets_dir, 'icons/64x64.png'),", "'icons/64x64.png'), QtCore.QSize(64,64)) app_icon.addFile(os.path.join(assets_dir, 'icons/128x128.png'), QtCore.QSize(128,128)) app_icon.addFile(os.path.join(assets_dir, 'icons/256x256.png'), QtCore.QSize(256,256)) app.setWindowIcon(app_icon) if platform == \"win32\":", "== -1: print(\"Could not load font\") sys.exit(-1) # print(QStyleFactory.keys()) app.setStyle('Fusion') app_icon = QtGui.QIcon()", "icon is set in the taskbar on windows # See https://stackoverflow.com/questions/1551605/how-to-set-applications-taskbar-icon-in-windows-7/1552105#1552105 myappid =", "os.path.dirname(os.path.realpath(sys.argv[0])) assets_dir = os.path.join(exe_path, 'assets') for (dirpath, dirnames, filenames) in os.walk(os.path.join(assets_dir, 'fonts')): for", "print(QStyleFactory.keys()) app.setStyle('Fusion') app_icon = QtGui.QIcon() app_icon.addFile(os.path.join(assets_dir, 'icons/16x16.png'), QtCore.QSize(16,16)) app_icon.addFile(os.path.join(assets_dir, 'icons/24x24.png'), QtCore.QSize(24,24)) app_icon.addFile(os.path.join(assets_dir, 'icons/32x32.png'),", "qtpy.QtWidgets import QApplication import ctypes from sys import platform sys.path.insert(0, os.path.join( os.path.dirname(__file__), \"..\",", "print(\"Could not load font\") sys.exit(-1) # print(QStyleFactory.keys()) app.setStyle('Fusion') app_icon = QtGui.QIcon() app_icon.addFile(os.path.join(assets_dir, 'icons/16x16.png'),", "# print(QStyleFactory.keys()) app.setStyle('Fusion') app_icon = QtGui.QIcon() app_icon.addFile(os.path.join(assets_dir, 'icons/16x16.png'), QtCore.QSize(16,16)) app_icon.addFile(os.path.join(assets_dir, 'icons/24x24.png'), QtCore.QSize(24,24)) app_icon.addFile(os.path.join(assets_dir,", "from window import ExecutionNodeEditorWindow if __name__ == '__main__': app = QApplication(sys.argv) exe_path =", "window import ExecutionNodeEditorWindow if __name__ == '__main__': app = QApplication(sys.argv) exe_path = os.path.dirname(os.path.realpath(sys.argv[0]))", "QtGui.QIcon() app_icon.addFile(os.path.join(assets_dir, 'icons/16x16.png'), QtCore.QSize(16,16)) app_icon.addFile(os.path.join(assets_dir, 'icons/24x24.png'), QtCore.QSize(24,24)) app_icon.addFile(os.path.join(assets_dir, 'icons/32x32.png'), QtCore.QSize(32,32)) app_icon.addFile(os.path.join(assets_dir, 'icons/48x48.png'), QtCore.QSize(48,48))", "windows # See https://stackoverflow.com/questions/1551605/how-to-set-applications-taskbar-icon-in-windows-7/1552105#1552105 myappid = u'no-company.node-editor.execution-graph-editor.1.0' # arbitrary string ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) wnd =", "app_icon.addFile(os.path.join(assets_dir, 'icons/16x16.png'), QtCore.QSize(16,16)) app_icon.addFile(os.path.join(assets_dir, 'icons/24x24.png'), QtCore.QSize(24,24)) app_icon.addFile(os.path.join(assets_dir, 'icons/32x32.png'), QtCore.QSize(32,32)) app_icon.addFile(os.path.join(assets_dir, 'icons/48x48.png'), QtCore.QSize(48,48)) app_icon.addFile(os.path.join(assets_dir,", "\"win32\": # Windows... #This will make sure that the app icon is set", "-1: print(\"Could not load font\") sys.exit(-1) # print(QStyleFactory.keys()) app.setStyle('Fusion') app_icon = QtGui.QIcon() app_icon.addFile(os.path.join(assets_dir,", "QtCore.QSize(256,256)) app.setWindowIcon(app_icon) if platform == \"win32\": # Windows... #This will make sure that", "platform sys.path.insert(0, os.path.join( os.path.dirname(__file__), \"..\", \"..\" )) from window import ExecutionNodeEditorWindow if __name__", "from qtpy.QtWidgets import QApplication import ctypes from sys import platform sys.path.insert(0, os.path.join( os.path.dirname(__file__),", "ExecutionNodeEditorWindow if __name__ == '__main__': app = QApplication(sys.argv) exe_path = os.path.dirname(os.path.realpath(sys.argv[0])) assets_dir =", "app_icon = QtGui.QIcon() app_icon.addFile(os.path.join(assets_dir, 'icons/16x16.png'), QtCore.QSize(16,16)) app_icon.addFile(os.path.join(assets_dir, 'icons/24x24.png'), QtCore.QSize(24,24)) app_icon.addFile(os.path.join(assets_dir, 'icons/32x32.png'), QtCore.QSize(32,32)) app_icon.addFile(os.path.join(assets_dir,", "if platform == \"win32\": # Windows... #This will make sure that the app", ")) from window import ExecutionNodeEditorWindow if __name__ == '__main__': app = QApplication(sys.argv) exe_path", "import QtCore, QtGui from qtpy.QtWidgets import QApplication import ctypes from sys import platform", "sure that the app icon is set in the taskbar on windows #", "'icons/32x32.png'), QtCore.QSize(32,32)) app_icon.addFile(os.path.join(assets_dir, 'icons/48x48.png'), QtCore.QSize(48,48)) app_icon.addFile(os.path.join(assets_dir, 'icons/64x64.png'), QtCore.QSize(64,64)) app_icon.addFile(os.path.join(assets_dir, 'icons/128x128.png'), QtCore.QSize(128,128)) app_icon.addFile(os.path.join(assets_dir, 'icons/256x256.png'),", "'icons/24x24.png'), QtCore.QSize(24,24)) app_icon.addFile(os.path.join(assets_dir, 'icons/32x32.png'), QtCore.QSize(32,32)) app_icon.addFile(os.path.join(assets_dir, 'icons/48x48.png'), QtCore.QSize(48,48)) app_icon.addFile(os.path.join(assets_dir, 'icons/64x64.png'), QtCore.QSize(64,64)) app_icon.addFile(os.path.join(assets_dir, 'icons/128x128.png'),", "'icons/48x48.png'), QtCore.QSize(48,48)) app_icon.addFile(os.path.join(assets_dir, 'icons/64x64.png'), QtCore.QSize(64,64)) app_icon.addFile(os.path.join(assets_dir, 'icons/128x128.png'), QtCore.QSize(128,128)) app_icon.addFile(os.path.join(assets_dir, 'icons/256x256.png'), QtCore.QSize(256,256)) app.setWindowIcon(app_icon) if", "the taskbar on windows # See https://stackoverflow.com/questions/1551605/how-to-set-applications-taskbar-icon-in-windows-7/1552105#1552105 myappid = u'no-company.node-editor.execution-graph-editor.1.0' # arbitrary string", "sys from PyQt5 import QtCore, QtGui from qtpy.QtWidgets import QApplication import ctypes from", "os.walk(os.path.join(assets_dir, 'fonts')): for f in filenames: font_id = QtGui.QFontDatabase.addApplicationFont(f) if QtGui.QFontDatabase.applicationFontFamilies(font_id) == -1:", "app_icon.addFile(os.path.join(assets_dir, 'icons/32x32.png'), QtCore.QSize(32,32)) app_icon.addFile(os.path.join(assets_dir, 'icons/48x48.png'), QtCore.QSize(48,48)) app_icon.addFile(os.path.join(assets_dir, 'icons/64x64.png'), QtCore.QSize(64,64)) app_icon.addFile(os.path.join(assets_dir, 'icons/128x128.png'), QtCore.QSize(128,128)) app_icon.addFile(os.path.join(assets_dir,", "= os.path.dirname(os.path.realpath(sys.argv[0])) assets_dir = os.path.join(exe_path, 'assets') for (dirpath, dirnames, filenames) in os.walk(os.path.join(assets_dir, 'fonts')):", "QtCore.QSize(32,32)) app_icon.addFile(os.path.join(assets_dir, 'icons/48x48.png'), QtCore.QSize(48,48)) app_icon.addFile(os.path.join(assets_dir, 'icons/64x64.png'), QtCore.QSize(64,64)) app_icon.addFile(os.path.join(assets_dir, 'icons/128x128.png'), QtCore.QSize(128,128)) app_icon.addFile(os.path.join(assets_dir, 'icons/256x256.png'), QtCore.QSize(256,256))", "== '__main__': app = QApplication(sys.argv) exe_path = os.path.dirname(os.path.realpath(sys.argv[0])) assets_dir = os.path.join(exe_path, 'assets') for", "u'no-company.node-editor.execution-graph-editor.1.0' # arbitrary string ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) wnd = ExecutionNodeEditorWindow() wnd.setWindowIcon(app_icon) wnd.show() wnd.actNew.trigger() if len(sys.argv)", "if QtGui.QFontDatabase.applicationFontFamilies(font_id) == -1: print(\"Could not load font\") sys.exit(-1) # print(QStyleFactory.keys()) app.setStyle('Fusion') app_icon", "app_icon.addFile(os.path.join(assets_dir, 'icons/64x64.png'), QtCore.QSize(64,64)) app_icon.addFile(os.path.join(assets_dir, 'icons/128x128.png'), QtCore.QSize(128,128)) app_icon.addFile(os.path.join(assets_dir, 'icons/256x256.png'), QtCore.QSize(256,256)) app.setWindowIcon(app_icon) if platform ==", "os.path.join( os.path.dirname(__file__), \"..\", \"..\" )) from window import ExecutionNodeEditorWindow if __name__ == '__main__':", "'__main__': app = QApplication(sys.argv) exe_path = os.path.dirname(os.path.realpath(sys.argv[0])) assets_dir = os.path.join(exe_path, 'assets') for (dirpath,", "See https://stackoverflow.com/questions/1551605/how-to-set-applications-taskbar-icon-in-windows-7/1552105#1552105 myappid = u'no-company.node-editor.execution-graph-editor.1.0' # arbitrary string ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) wnd = ExecutionNodeEditorWindow() wnd.setWindowIcon(app_icon)", "import ctypes from sys import platform sys.path.insert(0, os.path.join( os.path.dirname(__file__), \"..\", \"..\" )) from", "\"..\" )) from window import ExecutionNodeEditorWindow if __name__ == '__main__': app = QApplication(sys.argv)", "set in the taskbar on windows # See https://stackoverflow.com/questions/1551605/how-to-set-applications-taskbar-icon-in-windows-7/1552105#1552105 myappid = u'no-company.node-editor.execution-graph-editor.1.0' #", "app_icon.addFile(os.path.join(assets_dir, 'icons/24x24.png'), QtCore.QSize(24,24)) app_icon.addFile(os.path.join(assets_dir, 'icons/32x32.png'), QtCore.QSize(32,32)) app_icon.addFile(os.path.join(assets_dir, 'icons/48x48.png'), QtCore.QSize(48,48)) app_icon.addFile(os.path.join(assets_dir, 'icons/64x64.png'), QtCore.QSize(64,64)) app_icon.addFile(os.path.join(assets_dir,", "sys.exit(-1) # print(QStyleFactory.keys()) app.setStyle('Fusion') app_icon = QtGui.QIcon() app_icon.addFile(os.path.join(assets_dir, 'icons/16x16.png'), QtCore.QSize(16,16)) app_icon.addFile(os.path.join(assets_dir, 'icons/24x24.png'), QtCore.QSize(24,24))", "sys.path.insert(0, os.path.join( os.path.dirname(__file__), \"..\", \"..\" )) from window import ExecutionNodeEditorWindow if __name__ ==", "import ExecutionNodeEditorWindow if __name__ == '__main__': app = QApplication(sys.argv) exe_path = os.path.dirname(os.path.realpath(sys.argv[0])) assets_dir", "f in filenames: font_id = QtGui.QFontDatabase.addApplicationFont(f) if QtGui.QFontDatabase.applicationFontFamilies(font_id) == -1: print(\"Could not load", "platform == \"win32\": # Windows... #This will make sure that the app icon", "make sure that the app icon is set in the taskbar on windows", "app icon is set in the taskbar on windows # See https://stackoverflow.com/questions/1551605/how-to-set-applications-taskbar-icon-in-windows-7/1552105#1552105 myappid", "ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) wnd = ExecutionNodeEditorWindow() wnd.setWindowIcon(app_icon) wnd.show() wnd.actNew.trigger() if len(sys.argv) == 2: wnd.openFile(sys.argv[1]) sys.exit(app.exec_())", "PyQt5 import QtCore, QtGui from qtpy.QtWidgets import QApplication import ctypes from sys import", "QtCore, QtGui from qtpy.QtWidgets import QApplication import ctypes from sys import platform sys.path.insert(0,", "QtGui from qtpy.QtWidgets import QApplication import ctypes from sys import platform sys.path.insert(0, os.path.join(", "QtCore.QSize(128,128)) app_icon.addFile(os.path.join(assets_dir, 'icons/256x256.png'), QtCore.QSize(256,256)) app.setWindowIcon(app_icon) if platform == \"win32\": # Windows... #This will", "app.setStyle('Fusion') app_icon = QtGui.QIcon() app_icon.addFile(os.path.join(assets_dir, 'icons/16x16.png'), QtCore.QSize(16,16)) app_icon.addFile(os.path.join(assets_dir, 'icons/24x24.png'), QtCore.QSize(24,24)) app_icon.addFile(os.path.join(assets_dir, 'icons/32x32.png'), QtCore.QSize(32,32))", "# arbitrary string ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) wnd = ExecutionNodeEditorWindow() wnd.setWindowIcon(app_icon) wnd.show() wnd.actNew.trigger() if len(sys.argv) ==", "dirnames, filenames) in os.walk(os.path.join(assets_dir, 'fonts')): for f in filenames: font_id = QtGui.QFontDatabase.addApplicationFont(f) if", "ctypes from sys import platform sys.path.insert(0, os.path.join( os.path.dirname(__file__), \"..\", \"..\" )) from window", "import platform sys.path.insert(0, os.path.join( os.path.dirname(__file__), \"..\", \"..\" )) from window import ExecutionNodeEditorWindow if", "#This will make sure that the app icon is set in the taskbar", "from PyQt5 import QtCore, QtGui from qtpy.QtWidgets import QApplication import ctypes from sys", "# See https://stackoverflow.com/questions/1551605/how-to-set-applications-taskbar-icon-in-windows-7/1552105#1552105 myappid = u'no-company.node-editor.execution-graph-editor.1.0' # arbitrary string ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) wnd = ExecutionNodeEditorWindow()", "filenames) in os.walk(os.path.join(assets_dir, 'fonts')): for f in filenames: font_id = QtGui.QFontDatabase.addApplicationFont(f) if QtGui.QFontDatabase.applicationFontFamilies(font_id)", "font\") sys.exit(-1) # print(QStyleFactory.keys()) app.setStyle('Fusion') app_icon = QtGui.QIcon() app_icon.addFile(os.path.join(assets_dir, 'icons/16x16.png'), QtCore.QSize(16,16)) app_icon.addFile(os.path.join(assets_dir, 'icons/24x24.png'),", "app_icon.addFile(os.path.join(assets_dir, 'icons/256x256.png'), QtCore.QSize(256,256)) app.setWindowIcon(app_icon) if platform == \"win32\": # Windows... #This will make", "__name__ == '__main__': app = QApplication(sys.argv) exe_path = os.path.dirname(os.path.realpath(sys.argv[0])) assets_dir = os.path.join(exe_path, 'assets')", "== \"win32\": # Windows... #This will make sure that the app icon is", "'assets') for (dirpath, dirnames, filenames) in os.walk(os.path.join(assets_dir, 'fonts')): for f in filenames: font_id", "QtCore.QSize(16,16)) app_icon.addFile(os.path.join(assets_dir, 'icons/24x24.png'), QtCore.QSize(24,24)) app_icon.addFile(os.path.join(assets_dir, 'icons/32x32.png'), QtCore.QSize(32,32)) app_icon.addFile(os.path.join(assets_dir, 'icons/48x48.png'), QtCore.QSize(48,48)) app_icon.addFile(os.path.join(assets_dir, 'icons/64x64.png'), QtCore.QSize(64,64))", "= QtGui.QIcon() app_icon.addFile(os.path.join(assets_dir, 'icons/16x16.png'), QtCore.QSize(16,16)) app_icon.addFile(os.path.join(assets_dir, 'icons/24x24.png'), QtCore.QSize(24,24)) app_icon.addFile(os.path.join(assets_dir, 'icons/32x32.png'), QtCore.QSize(32,32)) app_icon.addFile(os.path.join(assets_dir, 'icons/48x48.png'),", "app_icon.addFile(os.path.join(assets_dir, 'icons/128x128.png'), QtCore.QSize(128,128)) app_icon.addFile(os.path.join(assets_dir, 'icons/256x256.png'), QtCore.QSize(256,256)) app.setWindowIcon(app_icon) if platform == \"win32\": # Windows...", "import QApplication import ctypes from sys import platform sys.path.insert(0, os.path.join( os.path.dirname(__file__), \"..\", \"..\"", "on windows # See https://stackoverflow.com/questions/1551605/how-to-set-applications-taskbar-icon-in-windows-7/1552105#1552105 myappid = u'no-company.node-editor.execution-graph-editor.1.0' # arbitrary string ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) wnd", "https://stackoverflow.com/questions/1551605/how-to-set-applications-taskbar-icon-in-windows-7/1552105#1552105 myappid = u'no-company.node-editor.execution-graph-editor.1.0' # arbitrary string ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) wnd = ExecutionNodeEditorWindow() wnd.setWindowIcon(app_icon) wnd.show()", "(dirpath, dirnames, filenames) in os.walk(os.path.join(assets_dir, 'fonts')): for f in filenames: font_id = QtGui.QFontDatabase.addApplicationFont(f)", "QtGui.QFontDatabase.applicationFontFamilies(font_id) == -1: print(\"Could not load font\") sys.exit(-1) # print(QStyleFactory.keys()) app.setStyle('Fusion') app_icon =", "string ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(myappid) wnd = ExecutionNodeEditorWindow() wnd.setWindowIcon(app_icon) wnd.show() wnd.actNew.trigger() if len(sys.argv) == 2: wnd.openFile(sys.argv[1])", "not load font\") sys.exit(-1) # print(QStyleFactory.keys()) app.setStyle('Fusion') app_icon = QtGui.QIcon() app_icon.addFile(os.path.join(assets_dir, 'icons/16x16.png'), QtCore.QSize(16,16))", "in os.walk(os.path.join(assets_dir, 'fonts')): for f in filenames: font_id = QtGui.QFontDatabase.addApplicationFont(f) if QtGui.QFontDatabase.applicationFontFamilies(font_id) ==", "QtCore.QSize(24,24)) app_icon.addFile(os.path.join(assets_dir, 'icons/32x32.png'), QtCore.QSize(32,32)) app_icon.addFile(os.path.join(assets_dir, 'icons/48x48.png'), QtCore.QSize(48,48)) app_icon.addFile(os.path.join(assets_dir, 'icons/64x64.png'), QtCore.QSize(64,64)) app_icon.addFile(os.path.join(assets_dir, 'icons/128x128.png'), QtCore.QSize(128,128))", "sys import platform sys.path.insert(0, os.path.join( os.path.dirname(__file__), \"..\", \"..\" )) from window import ExecutionNodeEditorWindow", "QtCore.QSize(48,48)) app_icon.addFile(os.path.join(assets_dir, 'icons/64x64.png'), QtCore.QSize(64,64)) app_icon.addFile(os.path.join(assets_dir, 'icons/128x128.png'), QtCore.QSize(128,128)) app_icon.addFile(os.path.join(assets_dir, 'icons/256x256.png'), QtCore.QSize(256,256)) app.setWindowIcon(app_icon) if platform", "os.path.join(exe_path, 'assets') for (dirpath, dirnames, filenames) in os.walk(os.path.join(assets_dir, 'fonts')): for f in filenames:", "assets_dir = os.path.join(exe_path, 'assets') for (dirpath, dirnames, filenames) in os.walk(os.path.join(assets_dir, 'fonts')): for f", "exe_path = os.path.dirname(os.path.realpath(sys.argv[0])) assets_dir = os.path.join(exe_path, 'assets') for (dirpath, dirnames, filenames) in os.walk(os.path.join(assets_dir,", "filenames: font_id = QtGui.QFontDatabase.addApplicationFont(f) if QtGui.QFontDatabase.applicationFontFamilies(font_id) == -1: print(\"Could not load font\") sys.exit(-1)", "for f in filenames: font_id = QtGui.QFontDatabase.addApplicationFont(f) if QtGui.QFontDatabase.applicationFontFamilies(font_id) == -1: print(\"Could not" ]
[ "= { 'salvoventura': os.sep.join([root_path, 'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura.json']), 'salvoventura_statistics': os.sep.join([root_path, 'pyunsplash', 'tests', 'resources',", "the user api response type = 'salvoventura' resource_filepath = self.store_mapping[type] stored_response = json.loads(open(resource_filepath).read())", "content_type='application/json', adding_headers=stored_response.get('headers') ) pu_obj = PyUnsplash(api_key=api_key) this_user = pu_obj.user(source=type) print(this_user.id, this_user.link_html, this_user.link_portfolio, this_user.link_following,", "'resource__users_salvoventura_statistics.json']) } @responses.activate def test_stats_total(self): type = 'salvoventura' resource_filepath = self.store_mapping[type] stored_response =", "the testcases are pretty much the same for all # TOXINIDIR comes from", "combine responses.activate so as to avoid # code duplication, as the testcases are", "pu_obj = PyUnsplash(api_key=api_key) this_user = pu_obj.user(source=type) print(this_user.id, this_user.link_html, this_user.link_portfolio, this_user.link_following, this_user.link_followers, this_user.link_photos) #", "as the testcases are pretty much the same for all # TOXINIDIR comes", "'{}{}'.format(API_ROOT, stored_response.get('url').split('?')[0]), # cheating on the url, because the class always inits without", "What's new in revision 1 # use local resources # ############################################################################### import responses", "from pyunsplash.src.settings import API_ROOT api_key = os.environ.get('APPLICATION_ID', None) or 'DUMMY_APPLICATION_ID' class TestUsers: #", "<NAME> <<EMAIL>> # Date: 14 Dec 2016 # Purpose: users unit tests #", "much the same for all # TOXINIDIR comes from tox.ini root_path = os.environ.get('TRAVIS_BUILD_DIR',", "to workout how to combine responses.activate so as to avoid # code duplication,", "type = 'salvoventura' resource_filepath = self.store_mapping[type] stored_response = json.loads(open(resource_filepath).read()) responses.add( responses.GET, '{}{}'.format(API_ROOT, stored_response.get('url').split('?')[0]),", "pyunsplash.src.settings import API_ROOT api_key = os.environ.get('APPLICATION_ID', None) or 'DUMMY_APPLICATION_ID' class TestUsers: # TODO:", "inits without query params json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) pu_obj = PyUnsplash(api_key=api_key) this_user", "# Author: <NAME> <<EMAIL>> # Date: 14 Dec 2016 # Purpose: users unit", "the user object @responses.activate def test_user_stats(self): username = 'salvoventura' # Add the user", "duplication, as the testcases are pretty much the same for all # TOXINIDIR", "user api response type = 'salvoventura' resource_filepath = self.store_mapping[type] stored_response = json.loads(open(resource_filepath).read()) responses.add(", "this_user = pu_obj.user(source=type) print(this_user.id, this_user.link_html, this_user.link_portfolio, this_user.link_following, this_user.link_followers, this_user.link_photos) # TODO: collections, photos", "API_ROOT api_key = os.environ.get('APPLICATION_ID', None) or 'DUMMY_APPLICATION_ID' class TestUsers: # TODO: avoid code", "testcases are pretty much the same for all # TOXINIDIR comes from tox.ini", "and users from the user object @responses.activate def test_user_stats(self): username = 'salvoventura' #", "content_type='application/json', adding_headers=stored_response.get('headers') ) pu_obj = PyUnsplash(api_key=api_key) this_user = pu_obj.user(source=username) # create a User", "stored_response = json.loads(open(resource_filepath).read()) responses.add( responses.GET, '{}{}'.format(API_ROOT, stored_response.get('url').split('?')[0]), # cheating on the url, because", "# use local resources # ############################################################################### import responses import json import os from", "this_user.link_followers, this_user.link_photos) # TODO: collections, photos and users from the user object @responses.activate", "'resource__users_salvoventura.json']), 'salvoventura_statistics': os.sep.join([root_path, 'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura_statistics.json']) } @responses.activate def test_stats_total(self): type =", "# File: test_users.py # # Author: <NAME> <<EMAIL>> # Date: 14 Dec 2016", "print(this_user.id, this_user.link_html, this_user.link_portfolio, this_user.link_following, this_user.link_followers, this_user.link_photos) # TODO: collections, photos and users from", "TestUsers: # TODO: avoid code duplication # Need to workout how to combine", "api_key = os.environ.get('APPLICATION_ID', None) or 'DUMMY_APPLICATION_ID' class TestUsers: # TODO: avoid code duplication", "the class always inits without query params json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) pu_obj", "json.loads(open(resource_filepath).read()) responses.add( responses.GET, '{}{}'.format(API_ROOT, stored_response.get('url').split('?')[0]), # cheating on the url, because the class", "import os from pyunsplash import PyUnsplash from pyunsplash.src.settings import API_ROOT api_key = os.environ.get('APPLICATION_ID',", "response type = 'salvoventura_statistics' resource_filepath = self.store_mapping[type] stored_response = json.loads(open(resource_filepath).read()) responses.add( responses.GET, '{}{}'.format(API_ROOT,", "stored_response.get('url').split('?')[0]), # cheating on the url, because the class always inits without query", "the class always inits without query params json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) #", "without query params json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) pu_obj = PyUnsplash(api_key=api_key) this_user =", "tests # # Revision: 1 # Comment: What's new in revision 1 #", "'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura.json']), 'salvoventura_statistics': os.sep.join([root_path, 'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura_statistics.json']) } @responses.activate def", "params json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) pu_obj = PyUnsplash(api_key=api_key) this_user = pu_obj.user(source=type) print(this_user.id,", "test_stats_total(self): type = 'salvoventura' resource_filepath = self.store_mapping[type] stored_response = json.loads(open(resource_filepath).read()) responses.add( responses.GET, '{}{}'.format(API_ROOT,", "from the user object @responses.activate def test_user_stats(self): username = 'salvoventura' # Add the", "# Need to workout how to combine responses.activate so as to avoid #", "adding_headers=stored_response.get('headers') ) # Add the user statistics api response type = 'salvoventura_statistics' resource_filepath", "# Purpose: users unit tests # # Revision: 1 # Comment: What's new", "new in revision 1 # use local resources # ############################################################################### import responses import", "always inits without query params json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) pu_obj = PyUnsplash(api_key=api_key)", "this_user.link_photos) # TODO: collections, photos and users from the user object @responses.activate def", "Date: 14 Dec 2016 # Purpose: users unit tests # # Revision: 1", "Purpose: users unit tests # # Revision: 1 # Comment: What's new in", "duplication # Need to workout how to combine responses.activate so as to avoid", "'tests', 'resources', 'resource__users_salvoventura_statistics.json']) } @responses.activate def test_stats_total(self): type = 'salvoventura' resource_filepath = self.store_mapping[type]", "TODO: collections, photos and users from the user object @responses.activate def test_user_stats(self): username", "test_users.py # # Author: <NAME> <<EMAIL>> # Date: 14 Dec 2016 # Purpose:", "class always inits without query params json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) pu_obj =", "the user statistics api response type = 'salvoventura_statistics' resource_filepath = self.store_mapping[type] stored_response =", ") pu_obj = PyUnsplash(api_key=api_key) this_user = pu_obj.user(source=username) # create a User object this_user_stats", "status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) # Add the user statistics api response type =", "os.environ.get('TOXINIDIR', None) store_mapping = { 'salvoventura': os.sep.join([root_path, 'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura.json']), 'salvoventura_statistics': os.sep.join([root_path,", "from pyunsplash import PyUnsplash from pyunsplash.src.settings import API_ROOT api_key = os.environ.get('APPLICATION_ID', None) or", "status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) pu_obj = PyUnsplash(api_key=api_key) this_user = pu_obj.user(source=type) print(this_user.id, this_user.link_html, this_user.link_portfolio,", "= pu_obj.user(source=username) # create a User object this_user_stats = this_user.statistics() # fetch a", "unit tests # # Revision: 1 # Comment: What's new in revision 1", "Revision: 1 # Comment: What's new in revision 1 # use local resources", "'tests', 'resources', 'resource__users_salvoventura.json']), 'salvoventura_statistics': os.sep.join([root_path, 'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura_statistics.json']) } @responses.activate def test_stats_total(self):", "'resources', 'resource__users_salvoventura_statistics.json']) } @responses.activate def test_stats_total(self): type = 'salvoventura' resource_filepath = self.store_mapping[type] stored_response", "@responses.activate def test_stats_total(self): type = 'salvoventura' resource_filepath = self.store_mapping[type] stored_response = json.loads(open(resource_filepath).read()) responses.add(", "user statistics api response type = 'salvoventura_statistics' resource_filepath = self.store_mapping[type] stored_response = json.loads(open(resource_filepath).read())", "import json import os from pyunsplash import PyUnsplash from pyunsplash.src.settings import API_ROOT api_key", "os from pyunsplash import PyUnsplash from pyunsplash.src.settings import API_ROOT api_key = os.environ.get('APPLICATION_ID', None)", "def test_stats_total(self): type = 'salvoventura' resource_filepath = self.store_mapping[type] stored_response = json.loads(open(resource_filepath).read()) responses.add( responses.GET,", "so as to avoid # code duplication, as the testcases are pretty much", "this_user.link_following, this_user.link_followers, this_user.link_photos) # TODO: collections, photos and users from the user object", "PyUnsplash(api_key=api_key) this_user = pu_obj.user(source=type) print(this_user.id, this_user.link_html, this_user.link_portfolio, this_user.link_following, this_user.link_followers, this_user.link_photos) # TODO: collections,", "User object this_user_stats = this_user.statistics() # fetch a UserStatistics object print(this_user_stats.downloads.get('total'), this_user_stats.views.get('total'), this_user_stats.likes.get('total'))", "revision 1 # use local resources # ############################################################################### import responses import json import", "type = 'salvoventura_statistics' resource_filepath = self.store_mapping[type] stored_response = json.loads(open(resource_filepath).read()) responses.add( responses.GET, '{}{}'.format(API_ROOT, stored_response.get('url').split('?')[0]),", "a User object this_user_stats = this_user.statistics() # fetch a UserStatistics object print(this_user_stats.downloads.get('total'), this_user_stats.views.get('total'),", "class TestUsers: # TODO: avoid code duplication # Need to workout how to", "pu_obj = PyUnsplash(api_key=api_key) this_user = pu_obj.user(source=username) # create a User object this_user_stats =", "create a User object this_user_stats = this_user.statistics() # fetch a UserStatistics object print(this_user_stats.downloads.get('total'),", "pu_obj.user(source=username) # create a User object this_user_stats = this_user.statistics() # fetch a UserStatistics", "username = 'salvoventura' # Add the user api response type = 'salvoventura' resource_filepath", "= 'salvoventura_statistics' resource_filepath = self.store_mapping[type] stored_response = json.loads(open(resource_filepath).read()) responses.add( responses.GET, '{}{}'.format(API_ROOT, stored_response.get('url').split('?')[0]), #", "'salvoventura_statistics' resource_filepath = self.store_mapping[type] stored_response = json.loads(open(resource_filepath).read()) responses.add( responses.GET, '{}{}'.format(API_ROOT, stored_response.get('url').split('?')[0]), # cheating", "user object @responses.activate def test_user_stats(self): username = 'salvoventura' # Add the user api", "# code duplication, as the testcases are pretty much the same for all", "this_user.link_html, this_user.link_portfolio, this_user.link_following, this_user.link_followers, this_user.link_photos) # TODO: collections, photos and users from the", "Copyright (c) 2016 <NAME> <<EMAIL>> # # File: test_users.py # # Author: <NAME>", "# Date: 14 Dec 2016 # Purpose: users unit tests # # Revision:", "pyunsplash import PyUnsplash from pyunsplash.src.settings import API_ROOT api_key = os.environ.get('APPLICATION_ID', None) or 'DUMMY_APPLICATION_ID'", "# Copyright (c) 2016 <NAME> <<EMAIL>> # # File: test_users.py # # Author:", "import responses import json import os from pyunsplash import PyUnsplash from pyunsplash.src.settings import", "without query params json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) # Add the user statistics", "# # Revision: 1 # Comment: What's new in revision 1 # use", "users from the user object @responses.activate def test_user_stats(self): username = 'salvoventura' # Add", "users unit tests # # Revision: 1 # Comment: What's new in revision", "os.sep.join([root_path, 'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura_statistics.json']) } @responses.activate def test_stats_total(self): type = 'salvoventura' resource_filepath", "# TOXINIDIR comes from tox.ini root_path = os.environ.get('TRAVIS_BUILD_DIR', None) or os.environ.get('TOXINIDIR', None) store_mapping", "= 'salvoventura' resource_filepath = self.store_mapping[type] stored_response = json.loads(open(resource_filepath).read()) responses.add( responses.GET, '{}{}'.format(API_ROOT, stored_response.get('url').split('?')[0]), #", "adding_headers=stored_response.get('headers') ) pu_obj = PyUnsplash(api_key=api_key) this_user = pu_obj.user(source=type) print(this_user.id, this_user.link_html, this_user.link_portfolio, this_user.link_following, this_user.link_followers,", "use local resources # ############################################################################### import responses import json import os from pyunsplash", "Need to workout how to combine responses.activate so as to avoid # code", "how to combine responses.activate so as to avoid # code duplication, as the", "(c) 2016 <NAME> <<EMAIL>> # # File: test_users.py # # Author: <NAME> <<EMAIL>>", "class always inits without query params json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) # Add", "json import os from pyunsplash import PyUnsplash from pyunsplash.src.settings import API_ROOT api_key =", "Comment: What's new in revision 1 # use local resources # ############################################################################### import", "the same for all # TOXINIDIR comes from tox.ini root_path = os.environ.get('TRAVIS_BUILD_DIR', None)", "resource_filepath = self.store_mapping[type] stored_response = json.loads(open(resource_filepath).read()) responses.add( responses.GET, '{}{}'.format(API_ROOT, stored_response.get('url').split('?')[0]), # cheating on", ") pu_obj = PyUnsplash(api_key=api_key) this_user = pu_obj.user(source=type) print(this_user.id, this_user.link_html, this_user.link_portfolio, this_user.link_following, this_user.link_followers, this_user.link_photos)", ") # Add the user statistics api response type = 'salvoventura_statistics' resource_filepath =", "responses.activate so as to avoid # code duplication, as the testcases are pretty", "= PyUnsplash(api_key=api_key) this_user = pu_obj.user(source=type) print(this_user.id, this_user.link_html, this_user.link_portfolio, this_user.link_following, this_user.link_followers, this_user.link_photos) # TODO:", "1 # use local resources # ############################################################################### import responses import json import os", "or 'DUMMY_APPLICATION_ID' class TestUsers: # TODO: avoid code duplication # Need to workout", "os.environ.get('TRAVIS_BUILD_DIR', None) or os.environ.get('TOXINIDIR', None) store_mapping = { 'salvoventura': os.sep.join([root_path, 'pyunsplash', 'tests', 'resources',", "api response type = 'salvoventura' resource_filepath = self.store_mapping[type] stored_response = json.loads(open(resource_filepath).read()) responses.add( responses.GET,", "# Add the user statistics api response type = 'salvoventura_statistics' resource_filepath = self.store_mapping[type]", "response type = 'salvoventura' resource_filepath = self.store_mapping[type] stored_response = json.loads(open(resource_filepath).read()) responses.add( responses.GET, '{}{}'.format(API_ROOT,", "None) or os.environ.get('TOXINIDIR', None) store_mapping = { 'salvoventura': os.sep.join([root_path, 'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura.json']),", "to avoid # code duplication, as the testcases are pretty much the same", "import API_ROOT api_key = os.environ.get('APPLICATION_ID', None) or 'DUMMY_APPLICATION_ID' class TestUsers: # TODO: avoid", "are pretty much the same for all # TOXINIDIR comes from tox.ini root_path", "= 'salvoventura' # Add the user api response type = 'salvoventura' resource_filepath =", "code duplication, as the testcases are pretty much the same for all #", "'salvoventura': os.sep.join([root_path, 'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura.json']), 'salvoventura_statistics': os.sep.join([root_path, 'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura_statistics.json']) }", "responses.GET, '{}{}'.format(API_ROOT, stored_response.get('url').split('?')[0]), # cheating on the url, because the class always inits", "2016 <NAME> <<EMAIL>> # # File: test_users.py # # Author: <NAME> <<EMAIL>> #", "collections, photos and users from the user object @responses.activate def test_user_stats(self): username =", "# create a User object this_user_stats = this_user.statistics() # fetch a UserStatistics object", "tox.ini root_path = os.environ.get('TRAVIS_BUILD_DIR', None) or os.environ.get('TOXINIDIR', None) store_mapping = { 'salvoventura': os.sep.join([root_path,", "# TODO: avoid code duplication # Need to workout how to combine responses.activate", "def test_user_stats(self): username = 'salvoventura' # Add the user api response type =", "cheating on the url, because the class always inits without query params json=stored_response.get('body'),", "json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) pu_obj = PyUnsplash(api_key=api_key) this_user = pu_obj.user(source=username) # create", "adding_headers=stored_response.get('headers') ) pu_obj = PyUnsplash(api_key=api_key) this_user = pu_obj.user(source=username) # create a User object", "root_path = os.environ.get('TRAVIS_BUILD_DIR', None) or os.environ.get('TOXINIDIR', None) store_mapping = { 'salvoventura': os.sep.join([root_path, 'pyunsplash',", "# Comment: What's new in revision 1 # use local resources # ###############################################################################", "because the class always inits without query params json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') )", "= self.store_mapping[type] stored_response = json.loads(open(resource_filepath).read()) responses.add( responses.GET, '{}{}'.format(API_ROOT, stored_response.get('url').split('?')[0]), # cheating on the", "query params json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) # Add the user statistics api", "query params json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) pu_obj = PyUnsplash(api_key=api_key) this_user = pu_obj.user(source=username)", "url, because the class always inits without query params json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers')", "None) store_mapping = { 'salvoventura': os.sep.join([root_path, 'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura.json']), 'salvoventura_statistics': os.sep.join([root_path, 'pyunsplash',", "this_user = pu_obj.user(source=username) # create a User object this_user_stats = this_user.statistics() # fetch", "14 Dec 2016 # Purpose: users unit tests # # Revision: 1 #", "store_mapping = { 'salvoventura': os.sep.join([root_path, 'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura.json']), 'salvoventura_statistics': os.sep.join([root_path, 'pyunsplash', 'tests',", "avoid code duplication # Need to workout how to combine responses.activate so as", "= PyUnsplash(api_key=api_key) this_user = pu_obj.user(source=username) # create a User object this_user_stats = this_user.statistics()", "None) or 'DUMMY_APPLICATION_ID' class TestUsers: # TODO: avoid code duplication # Need to", "responses import json import os from pyunsplash import PyUnsplash from pyunsplash.src.settings import API_ROOT", "or os.environ.get('TOXINIDIR', None) store_mapping = { 'salvoventura': os.sep.join([root_path, 'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura.json']), 'salvoventura_statistics':", "'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura_statistics.json']) } @responses.activate def test_stats_total(self): type = 'salvoventura' resource_filepath =", "Author: <NAME> <<EMAIL>> # Date: 14 Dec 2016 # Purpose: users unit tests", "workout how to combine responses.activate so as to avoid # code duplication, as", "@responses.activate def test_user_stats(self): username = 'salvoventura' # Add the user api response type", "always inits without query params json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) # Add the", "params json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) pu_obj = PyUnsplash(api_key=api_key) this_user = pu_obj.user(source=username) #", "= os.environ.get('APPLICATION_ID', None) or 'DUMMY_APPLICATION_ID' class TestUsers: # TODO: avoid code duplication #", "<<EMAIL>> # Date: 14 Dec 2016 # Purpose: users unit tests # #", "= os.environ.get('TRAVIS_BUILD_DIR', None) or os.environ.get('TOXINIDIR', None) store_mapping = { 'salvoventura': os.sep.join([root_path, 'pyunsplash', 'tests',", "'salvoventura' # Add the user api response type = 'salvoventura' resource_filepath = self.store_mapping[type]", "Dec 2016 # Purpose: users unit tests # # Revision: 1 # Comment:", "'salvoventura_statistics': os.sep.join([root_path, 'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura_statistics.json']) } @responses.activate def test_stats_total(self): type = 'salvoventura'", "1 # Comment: What's new in revision 1 # use local resources #", "on the url, because the class always inits without query params json=stored_response.get('body'), status=stored_response.get('status_code'),", "status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) pu_obj = PyUnsplash(api_key=api_key) this_user = pu_obj.user(source=username) # create a", "<NAME> <<EMAIL>> # # File: test_users.py # # Author: <NAME> <<EMAIL>> # Date:", "Add the user statistics api response type = 'salvoventura_statistics' resource_filepath = self.store_mapping[type] stored_response", "local resources # ############################################################################### import responses import json import os from pyunsplash import", "object @responses.activate def test_user_stats(self): username = 'salvoventura' # Add the user api response", "json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) pu_obj = PyUnsplash(api_key=api_key) this_user = pu_obj.user(source=type) print(this_user.id, this_user.link_html,", "os.environ.get('APPLICATION_ID', None) or 'DUMMY_APPLICATION_ID' class TestUsers: # TODO: avoid code duplication # Need", "for all # TOXINIDIR comes from tox.ini root_path = os.environ.get('TRAVIS_BUILD_DIR', None) or os.environ.get('TOXINIDIR',", "to combine responses.activate so as to avoid # code duplication, as the testcases", "in revision 1 # use local resources # ############################################################################### import responses import json", "# cheating on the url, because the class always inits without query params", "PyUnsplash from pyunsplash.src.settings import API_ROOT api_key = os.environ.get('APPLICATION_ID', None) or 'DUMMY_APPLICATION_ID' class TestUsers:", "statistics api response type = 'salvoventura_statistics' resource_filepath = self.store_mapping[type] stored_response = json.loads(open(resource_filepath).read()) responses.add(", "2016 # Purpose: users unit tests # # Revision: 1 # Comment: What's", "# TODO: collections, photos and users from the user object @responses.activate def test_user_stats(self):", "import PyUnsplash from pyunsplash.src.settings import API_ROOT api_key = os.environ.get('APPLICATION_ID', None) or 'DUMMY_APPLICATION_ID' class", "<filename>pyunsplash/tests/test_users.py ############################################################################### # Copyright (c) 2016 <NAME> <<EMAIL>> # # File: test_users.py #", "{ 'salvoventura': os.sep.join([root_path, 'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura.json']), 'salvoventura_statistics': os.sep.join([root_path, 'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura_statistics.json'])", "PyUnsplash(api_key=api_key) this_user = pu_obj.user(source=username) # create a User object this_user_stats = this_user.statistics() #", "'resources', 'resource__users_salvoventura.json']), 'salvoventura_statistics': os.sep.join([root_path, 'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura_statistics.json']) } @responses.activate def test_stats_total(self): type", "same for all # TOXINIDIR comes from tox.ini root_path = os.environ.get('TRAVIS_BUILD_DIR', None) or", "'salvoventura' resource_filepath = self.store_mapping[type] stored_response = json.loads(open(resource_filepath).read()) responses.add( responses.GET, '{}{}'.format(API_ROOT, stored_response.get('url').split('?')[0]), # cheating", "api response type = 'salvoventura_statistics' resource_filepath = self.store_mapping[type] stored_response = json.loads(open(resource_filepath).read()) responses.add( responses.GET,", "json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) # Add the user statistics api response type", "TOXINIDIR comes from tox.ini root_path = os.environ.get('TRAVIS_BUILD_DIR', None) or os.environ.get('TOXINIDIR', None) store_mapping =", "} @responses.activate def test_stats_total(self): type = 'salvoventura' resource_filepath = self.store_mapping[type] stored_response = json.loads(open(resource_filepath).read())", "from tox.ini root_path = os.environ.get('TRAVIS_BUILD_DIR', None) or os.environ.get('TOXINIDIR', None) store_mapping = { 'salvoventura':", "############################################################################### import responses import json import os from pyunsplash import PyUnsplash from pyunsplash.src.settings", "TODO: avoid code duplication # Need to workout how to combine responses.activate so", "pu_obj.user(source=type) print(this_user.id, this_user.link_html, this_user.link_portfolio, this_user.link_following, this_user.link_followers, this_user.link_photos) # TODO: collections, photos and users", "query params json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) pu_obj = PyUnsplash(api_key=api_key) this_user = pu_obj.user(source=type)", "# # File: test_users.py # # Author: <NAME> <<EMAIL>> # Date: 14 Dec", "= pu_obj.user(source=type) print(this_user.id, this_user.link_html, this_user.link_portfolio, this_user.link_following, this_user.link_followers, this_user.link_photos) # TODO: collections, photos and", "<<EMAIL>> # # File: test_users.py # # Author: <NAME> <<EMAIL>> # Date: 14", "pretty much the same for all # TOXINIDIR comes from tox.ini root_path =", "File: test_users.py # # Author: <NAME> <<EMAIL>> # Date: 14 Dec 2016 #", "= json.loads(open(resource_filepath).read()) responses.add( responses.GET, '{}{}'.format(API_ROOT, stored_response.get('url').split('?')[0]), # cheating on the url, because the", "content_type='application/json', adding_headers=stored_response.get('headers') ) # Add the user statistics api response type = 'salvoventura_statistics'", "############################################################################### # Copyright (c) 2016 <NAME> <<EMAIL>> # # File: test_users.py # #", "# ############################################################################### import responses import json import os from pyunsplash import PyUnsplash from", "Add the user api response type = 'salvoventura' resource_filepath = self.store_mapping[type] stored_response =", "code duplication # Need to workout how to combine responses.activate so as to", "this_user.link_portfolio, this_user.link_following, this_user.link_followers, this_user.link_photos) # TODO: collections, photos and users from the user", "# Add the user api response type = 'salvoventura' resource_filepath = self.store_mapping[type] stored_response", "'DUMMY_APPLICATION_ID' class TestUsers: # TODO: avoid code duplication # Need to workout how", "comes from tox.ini root_path = os.environ.get('TRAVIS_BUILD_DIR', None) or os.environ.get('TOXINIDIR', None) store_mapping = {", "self.store_mapping[type] stored_response = json.loads(open(resource_filepath).read()) responses.add( responses.GET, '{}{}'.format(API_ROOT, stored_response.get('url').split('?')[0]), # cheating on the url,", "inits without query params json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) # Add the user", "test_user_stats(self): username = 'salvoventura' # Add the user api response type = 'salvoventura'", "resources # ############################################################################### import responses import json import os from pyunsplash import PyUnsplash", "# # Author: <NAME> <<EMAIL>> # Date: 14 Dec 2016 # Purpose: users", "avoid # code duplication, as the testcases are pretty much the same for", "all # TOXINIDIR comes from tox.ini root_path = os.environ.get('TRAVIS_BUILD_DIR', None) or os.environ.get('TOXINIDIR', None)", "params json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json', adding_headers=stored_response.get('headers') ) # Add the user statistics api response", "the url, because the class always inits without query params json=stored_response.get('body'), status=stored_response.get('status_code'), content_type='application/json',", "os.sep.join([root_path, 'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura.json']), 'salvoventura_statistics': os.sep.join([root_path, 'pyunsplash', 'tests', 'resources', 'resource__users_salvoventura_statistics.json']) } @responses.activate", "# Revision: 1 # Comment: What's new in revision 1 # use local", "photos and users from the user object @responses.activate def test_user_stats(self): username = 'salvoventura'", "as to avoid # code duplication, as the testcases are pretty much the", "responses.add( responses.GET, '{}{}'.format(API_ROOT, stored_response.get('url').split('?')[0]), # cheating on the url, because the class always" ]
[ "trunc def escreva(msg): c = trunc(len(msg)/2) print(f'{\"-=\":^}' * (c + 2)) print(f' {msg}')", "print(f'{\"-=\":^}' * (c + 2)) # Programa Principal n = input('Escreva uma mensagem:", "math import trunc def escreva(msg): c = trunc(len(msg)/2) print(f'{\"-=\":^}' * (c + 2))", "trunc(len(msg)/2) print(f'{\"-=\":^}' * (c + 2)) print(f' {msg}') print(f'{\"-=\":^}' * (c + 2))", "* (c + 2)) print(f' {msg}') print(f'{\"-=\":^}' * (c + 2)) # Programa", "from math import trunc def escreva(msg): c = trunc(len(msg)/2) print(f'{\"-=\":^}' * (c +", "<gh_stars>0 from math import trunc def escreva(msg): c = trunc(len(msg)/2) print(f'{\"-=\":^}' * (c", "2)) print(f' {msg}') print(f'{\"-=\":^}' * (c + 2)) # Programa Principal n =", "{msg}') print(f'{\"-=\":^}' * (c + 2)) # Programa Principal n = input('Escreva uma", "+ 2)) print(f' {msg}') print(f'{\"-=\":^}' * (c + 2)) # Programa Principal n", "print(f' {msg}') print(f'{\"-=\":^}' * (c + 2)) # Programa Principal n = input('Escreva", "c = trunc(len(msg)/2) print(f'{\"-=\":^}' * (c + 2)) print(f' {msg}') print(f'{\"-=\":^}' * (c", "import trunc def escreva(msg): c = trunc(len(msg)/2) print(f'{\"-=\":^}' * (c + 2)) print(f'", "def escreva(msg): c = trunc(len(msg)/2) print(f'{\"-=\":^}' * (c + 2)) print(f' {msg}') print(f'{\"-=\":^}'", "= trunc(len(msg)/2) print(f'{\"-=\":^}' * (c + 2)) print(f' {msg}') print(f'{\"-=\":^}' * (c +", "(c + 2)) print(f' {msg}') print(f'{\"-=\":^}' * (c + 2)) # Programa Principal", "* (c + 2)) # Programa Principal n = input('Escreva uma mensagem: ')", "escreva(msg): c = trunc(len(msg)/2) print(f'{\"-=\":^}' * (c + 2)) print(f' {msg}') print(f'{\"-=\":^}' *", "print(f'{\"-=\":^}' * (c + 2)) print(f' {msg}') print(f'{\"-=\":^}' * (c + 2)) #", "(c + 2)) # Programa Principal n = input('Escreva uma mensagem: ') escreva(n)" ]
[ "has no stop' current = self.footer continue if '/*#' in line: assert current", "label = parse_label(line) current = self.sections[label] = [] continue current.append(line) return self def", "render(): query = request.args.getlist('sections') dot = graph.emit(query).encode() result = subprocess.run('dot -Tsvg', input=dot, capture_output=True,", "emit(self, query=()): result = [] result.extend(self.header) for label, section in self.sections.items(): if label", "handle.read() @app.route('/sections') def sections(): return {'sections': [{'section': section} for section in graph.list()]} @app.route('/render')", "self.header with open(filename) as handle: for line in handle: if '#*/' in line:", "= {} self.current = self.header def parse(self, filename): def parse_label(line): return line.strip().split()[1] current", "self.sections graph = OptionGraph().parse('graph.dot') @app.route('/') def index(): with open('index.html') as handle: return handle.read()", "= Flask(__name__) class OptionGraph: def __init__(self): self.header = [] self.footer = [] self.sections", "python import subprocess from flask import Flask, request app = Flask(__name__) class OptionGraph:", "result.extend(self.footer) return \"\".join(result) def list(self): return self.sections graph = OptionGraph().parse('graph.dot') @app.route('/') def index():", "line in handle: if '#*/' in line: assert current is not self.header, 'stop", "current = self.header with open(filename) as handle: for line in handle: if '#*/'", "= self.header def parse(self, filename): def parse_label(line): return line.strip().split()[1] current = self.header with", "class OptionGraph: def __init__(self): self.header = [] self.footer = [] self.sections = {}", "Flask, request app = Flask(__name__) class OptionGraph: def __init__(self): self.header = [] self.footer", "not self.header, 'stop before first section' assert current is not self.footer, 'footer has", "[] result.extend(self.header) for label, section in self.sections.items(): if label in query: result.extend(section) result.extend(self.footer)", "no stop' current = self.footer continue if '/*#' in line: assert current is", "self.footer, 'nested section' label = parse_label(line) current = self.sections[label] = [] continue current.append(line)", "assert current is not self.footer, 'footer has no stop' current = self.footer continue", "return handle.read() @app.route('/sections') def sections(): return {'sections': [{'section': section} for section in graph.list()]}", "open(filename) as handle: for line in handle: if '#*/' in line: assert current", "current.append(line) return self def emit(self, query=()): result = [] result.extend(self.header) for label, section", "@app.route('/') def index(): with open('index.html') as handle: return handle.read() @app.route('/sections') def sections(): return", "stop' current = self.footer continue if '/*#' in line: assert current is self.header", "label, section in self.sections.items(): if label in query: result.extend(section) result.extend(self.footer) return \"\".join(result) def", "list(self): return self.sections graph = OptionGraph().parse('graph.dot') @app.route('/') def index(): with open('index.html') as handle:", "current = self.footer continue if '/*#' in line: assert current is self.header or", "current is self.footer, 'nested section' label = parse_label(line) current = self.sections[label] = []", "self.header def parse(self, filename): def parse_label(line): return line.strip().split()[1] current = self.header with open(filename)", "import Flask, request app = Flask(__name__) class OptionGraph: def __init__(self): self.header = []", "result = [] result.extend(self.header) for label, section in self.sections.items(): if label in query:", "@app.route('/sections') def sections(): return {'sections': [{'section': section} for section in graph.list()]} @app.route('/render') def", "= graph.emit(query).encode() result = subprocess.run('dot -Tsvg', input=dot, capture_output=True, shell=True).stdout return result, 200, {'Content-Type':", "current is not self.header, 'stop before first section' assert current is not self.footer,", "in graph.list()]} @app.route('/render') def render(): query = request.args.getlist('sections') dot = graph.emit(query).encode() result =", "request app = Flask(__name__) class OptionGraph: def __init__(self): self.header = [] self.footer =", "def sections(): return {'sections': [{'section': section} for section in graph.list()]} @app.route('/render') def render():", "self.header, 'stop before first section' assert current is not self.footer, 'footer has no", "self.current = self.header def parse(self, filename): def parse_label(line): return line.strip().split()[1] current = self.header", "def list(self): return self.sections graph = OptionGraph().parse('graph.dot') @app.route('/') def index(): with open('index.html') as", "if '/*#' in line: assert current is self.header or current is self.footer, 'nested", "sections(): return {'sections': [{'section': section} for section in graph.list()]} @app.route('/render') def render(): query", "@app.route('/render') def render(): query = request.args.getlist('sections') dot = graph.emit(query).encode() result = subprocess.run('dot -Tsvg',", "current is self.header or current is self.footer, 'nested section' label = parse_label(line) current", "{'sections': [{'section': section} for section in graph.list()]} @app.route('/render') def render(): query = request.args.getlist('sections')", "current = self.sections[label] = [] continue current.append(line) return self def emit(self, query=()): result", "return line.strip().split()[1] current = self.header with open(filename) as handle: for line in handle:", "query: result.extend(section) result.extend(self.footer) return \"\".join(result) def list(self): return self.sections graph = OptionGraph().parse('graph.dot') @app.route('/')", "[] self.sections = {} self.current = self.header def parse(self, filename): def parse_label(line): return", "continue current.append(line) return self def emit(self, query=()): result = [] result.extend(self.header) for label,", "= OptionGraph().parse('graph.dot') @app.route('/') def index(): with open('index.html') as handle: return handle.read() @app.route('/sections') def", "graph.list()]} @app.route('/render') def render(): query = request.args.getlist('sections') dot = graph.emit(query).encode() result = subprocess.run('dot", "OptionGraph().parse('graph.dot') @app.route('/') def index(): with open('index.html') as handle: return handle.read() @app.route('/sections') def sections():", "as handle: for line in handle: if '#*/' in line: assert current is", "before first section' assert current is not self.footer, 'footer has no stop' current", "from flask import Flask, request app = Flask(__name__) class OptionGraph: def __init__(self): self.header", "= [] self.footer = [] self.sections = {} self.current = self.header def parse(self,", "if label in query: result.extend(section) result.extend(self.footer) return \"\".join(result) def list(self): return self.sections graph", "\"\".join(result) def list(self): return self.sections graph = OptionGraph().parse('graph.dot') @app.route('/') def index(): with open('index.html')", "result.extend(self.header) for label, section in self.sections.items(): if label in query: result.extend(section) result.extend(self.footer) return", "for section in graph.list()]} @app.route('/render') def render(): query = request.args.getlist('sections') dot = graph.emit(query).encode()", "self.header or current is self.footer, 'nested section' label = parse_label(line) current = self.sections[label]", "with open('index.html') as handle: return handle.read() @app.route('/sections') def sections(): return {'sections': [{'section': section}", "OptionGraph: def __init__(self): self.header = [] self.footer = [] self.sections = {} self.current", "= self.header with open(filename) as handle: for line in handle: if '#*/' in", "as handle: return handle.read() @app.route('/sections') def sections(): return {'sections': [{'section': section} for section", "def parse(self, filename): def parse_label(line): return line.strip().split()[1] current = self.header with open(filename) as", "line: assert current is not self.header, 'stop before first section' assert current is", "__init__(self): self.header = [] self.footer = [] self.sections = {} self.current = self.header", "import subprocess from flask import Flask, request app = Flask(__name__) class OptionGraph: def", "section} for section in graph.list()]} @app.route('/render') def render(): query = request.args.getlist('sections') dot =", "handle: if '#*/' in line: assert current is not self.header, 'stop before first", "self.footer = [] self.sections = {} self.current = self.header def parse(self, filename): def", "def parse_label(line): return line.strip().split()[1] current = self.header with open(filename) as handle: for line", "parse_label(line) current = self.sections[label] = [] continue current.append(line) return self def emit(self, query=()):", "app = Flask(__name__) class OptionGraph: def __init__(self): self.header = [] self.footer = []", "with open(filename) as handle: for line in handle: if '#*/' in line: assert", "line: assert current is self.header or current is self.footer, 'nested section' label =", "= self.sections[label] = [] continue current.append(line) return self def emit(self, query=()): result =", "= [] continue current.append(line) return self def emit(self, query=()): result = [] result.extend(self.header)", "return \"\".join(result) def list(self): return self.sections graph = OptionGraph().parse('graph.dot') @app.route('/') def index(): with", "index(): with open('index.html') as handle: return handle.read() @app.route('/sections') def sections(): return {'sections': [{'section':", "= request.args.getlist('sections') dot = graph.emit(query).encode() result = subprocess.run('dot -Tsvg', input=dot, capture_output=True, shell=True).stdout return", "def render(): query = request.args.getlist('sections') dot = graph.emit(query).encode() result = subprocess.run('dot -Tsvg', input=dot,", "section' label = parse_label(line) current = self.sections[label] = [] continue current.append(line) return self", "line.strip().split()[1] current = self.header with open(filename) as handle: for line in handle: if", "'/*#' in line: assert current is self.header or current is self.footer, 'nested section'", "self.sections.items(): if label in query: result.extend(section) result.extend(self.footer) return \"\".join(result) def list(self): return self.sections", "filename): def parse_label(line): return line.strip().split()[1] current = self.header with open(filename) as handle: for", "self.header = [] self.footer = [] self.sections = {} self.current = self.header def", "def __init__(self): self.header = [] self.footer = [] self.sections = {} self.current =", "input=dot, capture_output=True, shell=True).stdout return result, 200, {'Content-Type': 'image/svg+xml'} if __name__ == '__main__': app.run(debug=True)", "{} self.current = self.header def parse(self, filename): def parse_label(line): return line.strip().split()[1] current =", "current is not self.footer, 'footer has no stop' current = self.footer continue if", "Flask(__name__) class OptionGraph: def __init__(self): self.header = [] self.footer = [] self.sections =", "subprocess from flask import Flask, request app = Flask(__name__) class OptionGraph: def __init__(self):", "def index(): with open('index.html') as handle: return handle.read() @app.route('/sections') def sections(): return {'sections':", "#!/usr/bin/env python import subprocess from flask import Flask, request app = Flask(__name__) class", "label in query: result.extend(section) result.extend(self.footer) return \"\".join(result) def list(self): return self.sections graph =", "'#*/' in line: assert current is not self.header, 'stop before first section' assert", "self.footer continue if '/*#' in line: assert current is self.header or current is", "self def emit(self, query=()): result = [] result.extend(self.header) for label, section in self.sections.items():", "first section' assert current is not self.footer, 'footer has no stop' current =", "result = subprocess.run('dot -Tsvg', input=dot, capture_output=True, shell=True).stdout return result, 200, {'Content-Type': 'image/svg+xml'} if", "assert current is not self.header, 'stop before first section' assert current is not", "open('index.html') as handle: return handle.read() @app.route('/sections') def sections(): return {'sections': [{'section': section} for", "return self def emit(self, query=()): result = [] result.extend(self.header) for label, section in", "subprocess.run('dot -Tsvg', input=dot, capture_output=True, shell=True).stdout return result, 200, {'Content-Type': 'image/svg+xml'} if __name__ ==", "return self.sections graph = OptionGraph().parse('graph.dot') @app.route('/') def index(): with open('index.html') as handle: return", "graph = OptionGraph().parse('graph.dot') @app.route('/') def index(): with open('index.html') as handle: return handle.read() @app.route('/sections')", "or current is self.footer, 'nested section' label = parse_label(line) current = self.sections[label] =", "query=()): result = [] result.extend(self.header) for label, section in self.sections.items(): if label in", "not self.footer, 'footer has no stop' current = self.footer continue if '/*#' in", "section in graph.list()]} @app.route('/render') def render(): query = request.args.getlist('sections') dot = graph.emit(query).encode() result", "is not self.footer, 'footer has no stop' current = self.footer continue if '/*#'", "continue if '/*#' in line: assert current is self.header or current is self.footer,", "= subprocess.run('dot -Tsvg', input=dot, capture_output=True, shell=True).stdout return result, 200, {'Content-Type': 'image/svg+xml'} if __name__", "section' assert current is not self.footer, 'footer has no stop' current = self.footer", "'nested section' label = parse_label(line) current = self.sections[label] = [] continue current.append(line) return", "section in self.sections.items(): if label in query: result.extend(section) result.extend(self.footer) return \"\".join(result) def list(self):", "'stop before first section' assert current is not self.footer, 'footer has no stop'", "for label, section in self.sections.items(): if label in query: result.extend(section) result.extend(self.footer) return \"\".join(result)", "in line: assert current is self.header or current is self.footer, 'nested section' label", "handle: return handle.read() @app.route('/sections') def sections(): return {'sections': [{'section': section} for section in", "parse_label(line): return line.strip().split()[1] current = self.header with open(filename) as handle: for line in", "is not self.header, 'stop before first section' assert current is not self.footer, 'footer", "in line: assert current is not self.header, 'stop before first section' assert current", "request.args.getlist('sections') dot = graph.emit(query).encode() result = subprocess.run('dot -Tsvg', input=dot, capture_output=True, shell=True).stdout return result,", "for line in handle: if '#*/' in line: assert current is not self.header,", "def emit(self, query=()): result = [] result.extend(self.header) for label, section in self.sections.items(): if", "self.sections = {} self.current = self.header def parse(self, filename): def parse_label(line): return line.strip().split()[1]", "in self.sections.items(): if label in query: result.extend(section) result.extend(self.footer) return \"\".join(result) def list(self): return", "= [] self.sections = {} self.current = self.header def parse(self, filename): def parse_label(line):", "= [] result.extend(self.header) for label, section in self.sections.items(): if label in query: result.extend(section)", "<gh_stars>0 #!/usr/bin/env python import subprocess from flask import Flask, request app = Flask(__name__)", "return {'sections': [{'section': section} for section in graph.list()]} @app.route('/render') def render(): query =", "result.extend(section) result.extend(self.footer) return \"\".join(result) def list(self): return self.sections graph = OptionGraph().parse('graph.dot') @app.route('/') def", "assert current is self.header or current is self.footer, 'nested section' label = parse_label(line)", "= parse_label(line) current = self.sections[label] = [] continue current.append(line) return self def emit(self,", "dot = graph.emit(query).encode() result = subprocess.run('dot -Tsvg', input=dot, capture_output=True, shell=True).stdout return result, 200,", "-Tsvg', input=dot, capture_output=True, shell=True).stdout return result, 200, {'Content-Type': 'image/svg+xml'} if __name__ == '__main__':", "graph.emit(query).encode() result = subprocess.run('dot -Tsvg', input=dot, capture_output=True, shell=True).stdout return result, 200, {'Content-Type': 'image/svg+xml'}", "flask import Flask, request app = Flask(__name__) class OptionGraph: def __init__(self): self.header =", "is self.header or current is self.footer, 'nested section' label = parse_label(line) current =", "[] continue current.append(line) return self def emit(self, query=()): result = [] result.extend(self.header) for", "[] self.footer = [] self.sections = {} self.current = self.header def parse(self, filename):", "handle: for line in handle: if '#*/' in line: assert current is not", "= self.footer continue if '/*#' in line: assert current is self.header or current", "'footer has no stop' current = self.footer continue if '/*#' in line: assert", "is self.footer, 'nested section' label = parse_label(line) current = self.sections[label] = [] continue", "in query: result.extend(section) result.extend(self.footer) return \"\".join(result) def list(self): return self.sections graph = OptionGraph().parse('graph.dot')", "[{'section': section} for section in graph.list()]} @app.route('/render') def render(): query = request.args.getlist('sections') dot", "in handle: if '#*/' in line: assert current is not self.header, 'stop before", "self.footer, 'footer has no stop' current = self.footer continue if '/*#' in line:", "self.sections[label] = [] continue current.append(line) return self def emit(self, query=()): result = []", "if '#*/' in line: assert current is not self.header, 'stop before first section'", "parse(self, filename): def parse_label(line): return line.strip().split()[1] current = self.header with open(filename) as handle:", "query = request.args.getlist('sections') dot = graph.emit(query).encode() result = subprocess.run('dot -Tsvg', input=dot, capture_output=True, shell=True).stdout" ]
[ "Env env = Env() env.read_env() db_host = env.str('DB_HOST', 'localhost') db_port = env.int('DB_PORT', 27017)", "environs import Env env = Env() env.read_env() db_host = env.str('DB_HOST', 'localhost') db_port =", "from environs import Env env = Env() env.read_env() db_host = env.str('DB_HOST', 'localhost') db_port", "import Env env = Env() env.read_env() db_host = env.str('DB_HOST', 'localhost') db_port = env.int('DB_PORT'," ]
[ ".sina import SinaQuotation from .tencent import TencentQuotation from .helpers import update_stock_codes, stock_a_hour __version__", "from .sina import SinaQuotation from .tencent import TencentQuotation from .helpers import update_stock_codes, stock_a_hour", "from .tencent import TencentQuotation from .helpers import update_stock_codes, stock_a_hour __version__ = \"0.0.0.1\" __author__", "SinaQuotation from .tencent import TencentQuotation from .helpers import update_stock_codes, stock_a_hour __version__ = \"0.0.0.1\"", ".tencent import TencentQuotation from .helpers import update_stock_codes, stock_a_hour __version__ = \"0.0.0.1\" __author__ =", "import TencentQuotation from .helpers import update_stock_codes, stock_a_hour __version__ = \"0.0.0.1\" __author__ = \"demonfinch\"", "import SinaQuotation from .tencent import TencentQuotation from .helpers import update_stock_codes, stock_a_hour __version__ =" ]
[ "= max(imgA.shape, imgB.shape) new_imgA = cv2.resize(imgA, new_size) new_imgB = cv2.resize(imgB, new_size) return new_imgA,", "imgB): new_size = max(imgA.shape, imgB.shape) new_imgA = cv2.resize(imgA, new_size) new_imgB = cv2.resize(imgB, new_size)", "import cv2 def image_equalize(imgA, imgB): new_size = max(imgA.shape, imgB.shape) new_imgA = cv2.resize(imgA, new_size)", "new_size = max(imgA.shape, imgB.shape) new_imgA = cv2.resize(imgA, new_size) new_imgB = cv2.resize(imgB, new_size) return", "def image_equalize(imgA, imgB): new_size = max(imgA.shape, imgB.shape) new_imgA = cv2.resize(imgA, new_size) new_imgB =", "image_equalize(imgA, imgB): new_size = max(imgA.shape, imgB.shape) new_imgA = cv2.resize(imgA, new_size) new_imgB = cv2.resize(imgB,", "cv2 def image_equalize(imgA, imgB): new_size = max(imgA.shape, imgB.shape) new_imgA = cv2.resize(imgA, new_size) new_imgB", "max(imgA.shape, imgB.shape) new_imgA = cv2.resize(imgA, new_size) new_imgB = cv2.resize(imgB, new_size) return new_imgA, new_imgB" ]
[]
[ "instance=keg) if form.is_valid(): form.save() return redirect('kegs:keg_list') return render(request, template_name, {'form':form}) def beer_delete(request, pk,", "render(request, template_name, {'form':form}) def beer_update(request, pk, template_name='beer_form.html'): beer = get_object_or_404(Beer, pk=pk) form =", "beer = Beer.objects.all() data = {} data['object_list'] = beer return render(request, template_name, data)", "= Keg.objects.all() data = {} data['object_list'] = keg return render(request, template_name, data) def", "return render(request, template_name, {'object':beer}) def keg_delete(request, pk, template_name='keg_confirm_delete.html'): keg = get_object_or_404(Keg, pk=pk) if", "return render(request, template_name, {'form':form}) def beer_update(request, pk, template_name='beer_form.html'): beer = get_object_or_404(Beer, pk=pk) form", "pk, template_name='keg_form.html'): keg = get_object_or_404(Keg, pk=pk) form = KegForm(request.POST or None, instance=keg) if", "form.is_valid(): form.save() return redirect('kegs:beer_list') return render(request, template_name, {'form':form}) def keg_update(request, pk, template_name='keg_form.html'): keg", "import BeerForm, KegForm from kegs.models import Beer, Keg # Create your views here.", "beer_obj, } return render(request, 'keg_detail.html', context) def beer_list(request, template_name='beer_list.html'): beer = Beer.objects.all() data", "template_name, {'form':form}) def beer_update(request, pk, template_name='beer_form.html'): beer = get_object_or_404(Beer, pk=pk) form = BeerForm(request.POST", "KegForm(request.POST or None) if form.is_valid(): form.save() return redirect('kegs:keg_list') return render(request, template_name, {'form':form}) def", "= {} data['object_list'] = keg return render(request, template_name, data) def beer_create(request, template_name='beer_form.html'): form", "get_object_or_404(Beer, pk=pk) form = BeerForm(request.POST or None, instance=beer) if form.is_valid(): form.save() return redirect('kegs:beer_list')", "pk=pk) form = KegForm(request.POST or None, instance=keg) if form.is_valid(): form.save() return redirect('kegs:keg_list') return", "template_name='keg_form.html'): keg = get_object_or_404(Keg, pk=pk) form = KegForm(request.POST or None, instance=keg) if form.is_valid():", "keg_detail(request, pk): beer_obj = Beer.objects.get(pk=pk) keg_objs = Keg.objects.filter(beer_id=beer_obj.id) context = { 'kegs': keg_objs,", "template_name='beer_form.html'): beer = get_object_or_404(Beer, pk=pk) form = BeerForm(request.POST or None, instance=beer) if form.is_valid():", "template_name, {'object':beer}) def keg_delete(request, pk, template_name='keg_confirm_delete.html'): keg = get_object_or_404(Keg, pk=pk) if request.method=='POST': keg.delete()", "data = {} data['object_list'] = keg return render(request, template_name, data) def beer_create(request, template_name='beer_form.html'):", "return redirect('kegs:beer_list') return render(request, template_name, {'object':beer}) def keg_delete(request, pk, template_name='keg_confirm_delete.html'): keg = get_object_or_404(Keg,", "form.save() return redirect('kegs:beer_list') return render(request, template_name, {'form':form}) def keg_update(request, pk, template_name='keg_form.html'): keg =", "keg = Keg.objects.all() data = {} data['object_list'] = keg return render(request, template_name, data)", "from kegs.forms import BeerForm, KegForm from kegs.models import Beer, Keg # Create your", "pk, template_name='keg_confirm_delete.html'): keg = get_object_or_404(Keg, pk=pk) if request.method=='POST': keg.delete() return redirect('kegs:keg_list') return render(request,", "context = { 'kegs': keg_objs, 'beers': beer_obj, } return render(request, 'keg_detail.html', context) def", "} return render(request, 'keg_detail.html', context) def beer_list(request, template_name='beer_list.html'): beer = Beer.objects.all() data =", "redirect, get_object_or_404 from kegs.forms import BeerForm, KegForm from kegs.models import Beer, Keg #", "form.save() return redirect('kegs:keg_list') return render(request, template_name, {'form':form}) def beer_delete(request, pk, template_name='beer_confirm_delete.html'): beer =", "redirect('kegs:keg_list') return render(request, template_name, {'form':form}) def beer_update(request, pk, template_name='beer_form.html'): beer = get_object_or_404(Beer, pk=pk)", "if request.method=='POST': beer.delete() return redirect('kegs:beer_list') return render(request, template_name, {'object':beer}) def keg_delete(request, pk, template_name='keg_confirm_delete.html'):", "beer.delete() return redirect('kegs:beer_list') return render(request, template_name, {'object':beer}) def keg_delete(request, pk, template_name='keg_confirm_delete.html'): keg =", "beer return render(request, template_name, data) def keg_list(request, template_name='keg_list.html'): keg = Keg.objects.all() data =", "form.is_valid(): form.save() return redirect('kegs:keg_list') return render(request, template_name, {'form':form}) def beer_delete(request, pk, template_name='beer_confirm_delete.html'): beer", "or None, instance=keg) if form.is_valid(): form.save() return redirect('kegs:keg_list') return render(request, template_name, {'form':form}) def", "keg_create(request, template_name='keg_form.html'): form = KegForm(request.POST or None) if form.is_valid(): form.save() return redirect('kegs:keg_list') return", "request.method=='POST': beer.delete() return redirect('kegs:beer_list') return render(request, template_name, {'object':beer}) def keg_delete(request, pk, template_name='keg_confirm_delete.html'): keg", "form = KegForm(request.POST or None, instance=keg) if form.is_valid(): form.save() return redirect('kegs:keg_list') return render(request,", "None, instance=beer) if form.is_valid(): form.save() return redirect('kegs:beer_list') return render(request, template_name, {'form':form}) def keg_update(request,", "= get_object_or_404(Beer, pk=pk) form = BeerForm(request.POST or None, instance=beer) if form.is_valid(): form.save() return", "form = KegForm(request.POST or None) if form.is_valid(): form.save() return redirect('kegs:keg_list') return render(request, template_name,", "def keg_detail(request, pk): beer_obj = Beer.objects.get(pk=pk) keg_objs = Keg.objects.filter(beer_id=beer_obj.id) context = { 'kegs':", "template_name, data) def keg_list(request, template_name='keg_list.html'): keg = Keg.objects.all() data = {} data['object_list'] =", "BeerForm(request.POST or None) if form.is_valid(): form.save() return redirect('kegs:beer_list') return render(request, template_name, {'form':form}) def", "beer_list(request, template_name='beer_list.html'): beer = Beer.objects.all() data = {} data['object_list'] = beer return render(request,", "# Create your views here. def keg_detail(request, pk): beer_obj = Beer.objects.get(pk=pk) keg_objs =", "keg_update(request, pk, template_name='keg_form.html'): keg = get_object_or_404(Keg, pk=pk) form = KegForm(request.POST or None, instance=keg)", "= get_object_or_404(Keg, pk=pk) form = KegForm(request.POST or None, instance=keg) if form.is_valid(): form.save() return", "or None, instance=beer) if form.is_valid(): form.save() return redirect('kegs:beer_list') return render(request, template_name, {'form':form}) def", "= Keg.objects.filter(beer_id=beer_obj.id) context = { 'kegs': keg_objs, 'beers': beer_obj, } return render(request, 'keg_detail.html',", "your views here. def keg_detail(request, pk): beer_obj = Beer.objects.get(pk=pk) keg_objs = Keg.objects.filter(beer_id=beer_obj.id) context", "render(request, template_name, {'form':form}) def beer_delete(request, pk, template_name='beer_confirm_delete.html'): beer = get_object_or_404(Beer, pk=pk) if request.method=='POST':", "form.is_valid(): form.save() return redirect('kegs:keg_list') return render(request, template_name, {'form':form}) def beer_update(request, pk, template_name='beer_form.html'): beer", "render, redirect, get_object_or_404 from kegs.forms import BeerForm, KegForm from kegs.models import Beer, Keg", "{'object':beer}) def keg_delete(request, pk, template_name='keg_confirm_delete.html'): keg = get_object_or_404(Keg, pk=pk) if request.method=='POST': keg.delete() return", "def beer_create(request, template_name='beer_form.html'): form = BeerForm(request.POST or None) if form.is_valid(): form.save() return redirect('kegs:beer_list')", "= keg return render(request, template_name, data) def beer_create(request, template_name='beer_form.html'): form = BeerForm(request.POST or", "form.save() return redirect('kegs:beer_list') return render(request, template_name, {'form':form}) def keg_create(request, template_name='keg_form.html'): form = KegForm(request.POST", "return render(request, 'keg_detail.html', context) def beer_list(request, template_name='beer_list.html'): beer = Beer.objects.all() data = {}", "if form.is_valid(): form.save() return redirect('kegs:beer_list') return render(request, template_name, {'form':form}) def keg_create(request, template_name='keg_form.html'): form", "beer = get_object_or_404(Beer, pk=pk) if request.method=='POST': beer.delete() return redirect('kegs:beer_list') return render(request, template_name, {'object':beer})", "template_name='keg_list.html'): keg = Keg.objects.all() data = {} data['object_list'] = keg return render(request, template_name,", "= {} data['object_list'] = beer return render(request, template_name, data) def keg_list(request, template_name='keg_list.html'): keg", "{} data['object_list'] = keg return render(request, template_name, data) def beer_create(request, template_name='beer_form.html'): form =", "render(request, template_name, {'form':form}) def keg_update(request, pk, template_name='keg_form.html'): keg = get_object_or_404(Keg, pk=pk) form =", "data = {} data['object_list'] = beer return render(request, template_name, data) def keg_list(request, template_name='keg_list.html'):", "pk=pk) if request.method=='POST': beer.delete() return redirect('kegs:beer_list') return render(request, template_name, {'object':beer}) def keg_delete(request, pk,", "beer_create(request, template_name='beer_form.html'): form = BeerForm(request.POST or None) if form.is_valid(): form.save() return redirect('kegs:beer_list') return", "or None) if form.is_valid(): form.save() return redirect('kegs:keg_list') return render(request, template_name, {'form':form}) def beer_update(request,", "= KegForm(request.POST or None, instance=keg) if form.is_valid(): form.save() return redirect('kegs:keg_list') return render(request, template_name,", "Create your views here. def keg_detail(request, pk): beer_obj = Beer.objects.get(pk=pk) keg_objs = Keg.objects.filter(beer_id=beer_obj.id)", "{'form':form}) def beer_update(request, pk, template_name='beer_form.html'): beer = get_object_or_404(Beer, pk=pk) form = BeerForm(request.POST or", "pk=pk) form = BeerForm(request.POST or None, instance=beer) if form.is_valid(): form.save() return redirect('kegs:beer_list') return", "data['object_list'] = keg return render(request, template_name, data) def beer_create(request, template_name='beer_form.html'): form = BeerForm(request.POST", "get_object_or_404 from kegs.forms import BeerForm, KegForm from kegs.models import Beer, Keg # Create", "Beer, Keg # Create your views here. def keg_detail(request, pk): beer_obj = Beer.objects.get(pk=pk)", "keg = get_object_or_404(Keg, pk=pk) if request.method=='POST': keg.delete() return redirect('kegs:keg_list') return render(request, template_name, {'object':keg})", "return redirect('kegs:keg_list') return render(request, template_name, {'form':form}) def beer_delete(request, pk, template_name='beer_confirm_delete.html'): beer = get_object_or_404(Beer,", "'beers': beer_obj, } return render(request, 'keg_detail.html', context) def beer_list(request, template_name='beer_list.html'): beer = Beer.objects.all()", "template_name, data) def beer_create(request, template_name='beer_form.html'): form = BeerForm(request.POST or None) if form.is_valid(): form.save()", "instance=beer) if form.is_valid(): form.save() return redirect('kegs:beer_list') return render(request, template_name, {'form':form}) def keg_update(request, pk,", "context) def beer_list(request, template_name='beer_list.html'): beer = Beer.objects.all() data = {} data['object_list'] = beer", "here. def keg_detail(request, pk): beer_obj = Beer.objects.get(pk=pk) keg_objs = Keg.objects.filter(beer_id=beer_obj.id) context = {", "import render, redirect, get_object_or_404 from kegs.forms import BeerForm, KegForm from kegs.models import Beer,", "template_name, {'form':form}) def keg_create(request, template_name='keg_form.html'): form = KegForm(request.POST or None) if form.is_valid(): form.save()", "= get_object_or_404(Beer, pk=pk) if request.method=='POST': beer.delete() return redirect('kegs:beer_list') return render(request, template_name, {'object':beer}) def", "pk): beer_obj = Beer.objects.get(pk=pk) keg_objs = Keg.objects.filter(beer_id=beer_obj.id) context = { 'kegs': keg_objs, 'beers':", "pk, template_name='beer_form.html'): beer = get_object_or_404(Beer, pk=pk) form = BeerForm(request.POST or None, instance=beer) if", "redirect('kegs:beer_list') return render(request, template_name, {'form':form}) def keg_create(request, template_name='keg_form.html'): form = KegForm(request.POST or None)", "get_object_or_404(Beer, pk=pk) if request.method=='POST': beer.delete() return redirect('kegs:beer_list') return render(request, template_name, {'object':beer}) def keg_delete(request,", "return redirect('kegs:beer_list') return render(request, template_name, {'form':form}) def keg_update(request, pk, template_name='keg_form.html'): keg = get_object_or_404(Keg,", "form = BeerForm(request.POST or None, instance=beer) if form.is_valid(): form.save() return redirect('kegs:beer_list') return render(request,", "pk, template_name='beer_confirm_delete.html'): beer = get_object_or_404(Beer, pk=pk) if request.method=='POST': beer.delete() return redirect('kegs:beer_list') return render(request,", "Keg.objects.all() data = {} data['object_list'] = keg return render(request, template_name, data) def beer_create(request,", "'keg_detail.html', context) def beer_list(request, template_name='beer_list.html'): beer = Beer.objects.all() data = {} data['object_list'] =", "None) if form.is_valid(): form.save() return redirect('kegs:keg_list') return render(request, template_name, {'form':form}) def beer_update(request, pk,", "beer = get_object_or_404(Beer, pk=pk) form = BeerForm(request.POST or None, instance=beer) if form.is_valid(): form.save()", "KegForm from kegs.models import Beer, Keg # Create your views here. def keg_detail(request,", "if form.is_valid(): form.save() return redirect('kegs:keg_list') return render(request, template_name, {'form':form}) def beer_update(request, pk, template_name='beer_form.html'):", "if form.is_valid(): form.save() return redirect('kegs:beer_list') return render(request, template_name, {'form':form}) def keg_update(request, pk, template_name='keg_form.html'):", "keg return render(request, template_name, data) def beer_create(request, template_name='beer_form.html'): form = BeerForm(request.POST or None)", "def beer_update(request, pk, template_name='beer_form.html'): beer = get_object_or_404(Beer, pk=pk) form = BeerForm(request.POST or None,", "redirect('kegs:beer_list') return render(request, template_name, {'object':beer}) def keg_delete(request, pk, template_name='keg_confirm_delete.html'): keg = get_object_or_404(Keg, pk=pk)", "template_name, {'form':form}) def keg_update(request, pk, template_name='keg_form.html'): keg = get_object_or_404(Keg, pk=pk) form = KegForm(request.POST", "return render(request, template_name, {'form':form}) def beer_delete(request, pk, template_name='beer_confirm_delete.html'): beer = get_object_or_404(Beer, pk=pk) if", "form = BeerForm(request.POST or None) if form.is_valid(): form.save() return redirect('kegs:beer_list') return render(request, template_name,", "{'form':form}) def beer_delete(request, pk, template_name='beer_confirm_delete.html'): beer = get_object_or_404(Beer, pk=pk) if request.method=='POST': beer.delete() return", "return redirect('kegs:beer_list') return render(request, template_name, {'form':form}) def keg_create(request, template_name='keg_form.html'): form = KegForm(request.POST or", "keg_objs, 'beers': beer_obj, } return render(request, 'keg_detail.html', context) def beer_list(request, template_name='beer_list.html'): beer =", "keg_list(request, template_name='keg_list.html'): keg = Keg.objects.all() data = {} data['object_list'] = keg return render(request,", "beer_obj = Beer.objects.get(pk=pk) keg_objs = Keg.objects.filter(beer_id=beer_obj.id) context = { 'kegs': keg_objs, 'beers': beer_obj,", "Keg # Create your views here. def keg_detail(request, pk): beer_obj = Beer.objects.get(pk=pk) keg_objs", "= { 'kegs': keg_objs, 'beers': beer_obj, } return render(request, 'keg_detail.html', context) def beer_list(request,", "django.shortcuts import render, redirect, get_object_or_404 from kegs.forms import BeerForm, KegForm from kegs.models import", "return render(request, template_name, data) def keg_list(request, template_name='keg_list.html'): keg = Keg.objects.all() data = {}", "= KegForm(request.POST or None) if form.is_valid(): form.save() return redirect('kegs:keg_list') return render(request, template_name, {'form':form})", "keg = get_object_or_404(Keg, pk=pk) form = KegForm(request.POST or None, instance=keg) if form.is_valid(): form.save()", "keg_objs = Keg.objects.filter(beer_id=beer_obj.id) context = { 'kegs': keg_objs, 'beers': beer_obj, } return render(request,", "data['object_list'] = beer return render(request, template_name, data) def keg_list(request, template_name='keg_list.html'): keg = Keg.objects.all()", "KegForm(request.POST or None, instance=keg) if form.is_valid(): form.save() return redirect('kegs:keg_list') return render(request, template_name, {'form':form})", "render(request, template_name, {'object':beer}) def keg_delete(request, pk, template_name='keg_confirm_delete.html'): keg = get_object_or_404(Keg, pk=pk) if request.method=='POST':", "form.save() return redirect('kegs:keg_list') return render(request, template_name, {'form':form}) def beer_update(request, pk, template_name='beer_form.html'): beer =", "def keg_list(request, template_name='keg_list.html'): keg = Keg.objects.all() data = {} data['object_list'] = keg return", "def beer_list(request, template_name='beer_list.html'): beer = Beer.objects.all() data = {} data['object_list'] = beer return", "data) def beer_create(request, template_name='beer_form.html'): form = BeerForm(request.POST or None) if form.is_valid(): form.save() return", "{} data['object_list'] = beer return render(request, template_name, data) def keg_list(request, template_name='keg_list.html'): keg =", "from kegs.models import Beer, Keg # Create your views here. def keg_detail(request, pk):", "render(request, template_name, data) def keg_list(request, template_name='keg_list.html'): keg = Keg.objects.all() data = {} data['object_list']", "return render(request, template_name, data) def beer_create(request, template_name='beer_form.html'): form = BeerForm(request.POST or None) if", "redirect('kegs:keg_list') return render(request, template_name, {'form':form}) def beer_delete(request, pk, template_name='beer_confirm_delete.html'): beer = get_object_or_404(Beer, pk=pk)", "render(request, template_name, data) def beer_create(request, template_name='beer_form.html'): form = BeerForm(request.POST or None) if form.is_valid():", "BeerForm, KegForm from kegs.models import Beer, Keg # Create your views here. def", "if form.is_valid(): form.save() return redirect('kegs:keg_list') return render(request, template_name, {'form':form}) def beer_delete(request, pk, template_name='beer_confirm_delete.html'):", "Beer.objects.get(pk=pk) keg_objs = Keg.objects.filter(beer_id=beer_obj.id) context = { 'kegs': keg_objs, 'beers': beer_obj, } return", "data) def keg_list(request, template_name='keg_list.html'): keg = Keg.objects.all() data = {} data['object_list'] = keg", "template_name, {'form':form}) def beer_delete(request, pk, template_name='beer_confirm_delete.html'): beer = get_object_or_404(Beer, pk=pk) if request.method=='POST': beer.delete()", "<filename>kegs/views.py from django.shortcuts import render, redirect, get_object_or_404 from kegs.forms import BeerForm, KegForm from", "Beer.objects.all() data = {} data['object_list'] = beer return render(request, template_name, data) def keg_list(request,", "views here. def keg_detail(request, pk): beer_obj = Beer.objects.get(pk=pk) keg_objs = Keg.objects.filter(beer_id=beer_obj.id) context =", "= Beer.objects.get(pk=pk) keg_objs = Keg.objects.filter(beer_id=beer_obj.id) context = { 'kegs': keg_objs, 'beers': beer_obj, }", "template_name='keg_form.html'): form = KegForm(request.POST or None) if form.is_valid(): form.save() return redirect('kegs:keg_list') return render(request,", "render(request, template_name, {'form':form}) def keg_create(request, template_name='keg_form.html'): form = KegForm(request.POST or None) if form.is_valid():", "template_name='keg_confirm_delete.html'): keg = get_object_or_404(Keg, pk=pk) if request.method=='POST': keg.delete() return redirect('kegs:keg_list') return render(request, template_name,", "template_name='beer_form.html'): form = BeerForm(request.POST or None) if form.is_valid(): form.save() return redirect('kegs:beer_list') return render(request,", "beer_update(request, pk, template_name='beer_form.html'): beer = get_object_or_404(Beer, pk=pk) form = BeerForm(request.POST or None, instance=beer)", "return render(request, template_name, {'form':form}) def keg_update(request, pk, template_name='keg_form.html'): keg = get_object_or_404(Keg, pk=pk) form", "{ 'kegs': keg_objs, 'beers': beer_obj, } return render(request, 'keg_detail.html', context) def beer_list(request, template_name='beer_list.html'):", "= Beer.objects.all() data = {} data['object_list'] = beer return render(request, template_name, data) def", "def keg_create(request, template_name='keg_form.html'): form = KegForm(request.POST or None) if form.is_valid(): form.save() return redirect('kegs:keg_list')", "keg_delete(request, pk, template_name='keg_confirm_delete.html'): keg = get_object_or_404(Keg, pk=pk) if request.method=='POST': keg.delete() return redirect('kegs:keg_list') return", "template_name='beer_confirm_delete.html'): beer = get_object_or_404(Beer, pk=pk) if request.method=='POST': beer.delete() return redirect('kegs:beer_list') return render(request, template_name,", "def keg_delete(request, pk, template_name='keg_confirm_delete.html'): keg = get_object_or_404(Keg, pk=pk) if request.method=='POST': keg.delete() return redirect('kegs:keg_list')", "'kegs': keg_objs, 'beers': beer_obj, } return render(request, 'keg_detail.html', context) def beer_list(request, template_name='beer_list.html'): beer", "kegs.forms import BeerForm, KegForm from kegs.models import Beer, Keg # Create your views", "or None) if form.is_valid(): form.save() return redirect('kegs:beer_list') return render(request, template_name, {'form':form}) def keg_create(request,", "{'form':form}) def keg_create(request, template_name='keg_form.html'): form = KegForm(request.POST or None) if form.is_valid(): form.save() return", "None, instance=keg) if form.is_valid(): form.save() return redirect('kegs:keg_list') return render(request, template_name, {'form':form}) def beer_delete(request,", "redirect('kegs:beer_list') return render(request, template_name, {'form':form}) def keg_update(request, pk, template_name='keg_form.html'): keg = get_object_or_404(Keg, pk=pk)", "return redirect('kegs:keg_list') return render(request, template_name, {'form':form}) def beer_update(request, pk, template_name='beer_form.html'): beer = get_object_or_404(Beer,", "def beer_delete(request, pk, template_name='beer_confirm_delete.html'): beer = get_object_or_404(Beer, pk=pk) if request.method=='POST': beer.delete() return redirect('kegs:beer_list')", "beer_delete(request, pk, template_name='beer_confirm_delete.html'): beer = get_object_or_404(Beer, pk=pk) if request.method=='POST': beer.delete() return redirect('kegs:beer_list') return", "import Beer, Keg # Create your views here. def keg_detail(request, pk): beer_obj =", "= BeerForm(request.POST or None, instance=beer) if form.is_valid(): form.save() return redirect('kegs:beer_list') return render(request, template_name,", "from django.shortcuts import render, redirect, get_object_or_404 from kegs.forms import BeerForm, KegForm from kegs.models", "form.is_valid(): form.save() return redirect('kegs:beer_list') return render(request, template_name, {'form':form}) def keg_create(request, template_name='keg_form.html'): form =", "return render(request, template_name, {'form':form}) def keg_create(request, template_name='keg_form.html'): form = KegForm(request.POST or None) if", "Keg.objects.filter(beer_id=beer_obj.id) context = { 'kegs': keg_objs, 'beers': beer_obj, } return render(request, 'keg_detail.html', context)", "= BeerForm(request.POST or None) if form.is_valid(): form.save() return redirect('kegs:beer_list') return render(request, template_name, {'form':form})", "{'form':form}) def keg_update(request, pk, template_name='keg_form.html'): keg = get_object_or_404(Keg, pk=pk) form = KegForm(request.POST or", "render(request, 'keg_detail.html', context) def beer_list(request, template_name='beer_list.html'): beer = Beer.objects.all() data = {} data['object_list']", "get_object_or_404(Keg, pk=pk) form = KegForm(request.POST or None, instance=keg) if form.is_valid(): form.save() return redirect('kegs:keg_list')", "= beer return render(request, template_name, data) def keg_list(request, template_name='keg_list.html'): keg = Keg.objects.all() data", "template_name='beer_list.html'): beer = Beer.objects.all() data = {} data['object_list'] = beer return render(request, template_name,", "kegs.models import Beer, Keg # Create your views here. def keg_detail(request, pk): beer_obj", "def keg_update(request, pk, template_name='keg_form.html'): keg = get_object_or_404(Keg, pk=pk) form = KegForm(request.POST or None,", "BeerForm(request.POST or None, instance=beer) if form.is_valid(): form.save() return redirect('kegs:beer_list') return render(request, template_name, {'form':form})", "None) if form.is_valid(): form.save() return redirect('kegs:beer_list') return render(request, template_name, {'form':form}) def keg_create(request, template_name='keg_form.html'):" ]
[ "in range(0, len(self.X) + 1 -(self.train_sample_size + self.test_sample_size), self.step): train_index = list(range(0+i,self.train_sample_size+i)) test_index", "+ 1 -(self.train_sample_size + self.test_sample_size), self.step): train_index = list(range(0+i,self.train_sample_size+i)) test_index = list(range(self.train_sample_size+i,self.train_sample_size+i+self.test_sample_size)) list_of_indexes.append((train_index,", "X self.train_sample_size = train_sample_size self.test_sample_size = test_sample_size self.step = step if train_sample_size ==", "have length zero.\") if len(X) == 1: raise IndexError(\"input array 'X' cannot have", "be larger than length of input variable (X).\") if test_sample_size > len(X): raise", "<reponame>rick12000/time-series-cross-validation class TimeCV: def __init__(self, X, train_sample_size = None, test_sample_size = None, step", "test_sample_size == None: self.test_sample_size = max(1, round(len(X)/10)) #error handling: if len(X) == 0:", "self.test_sample_size), self.step): train_index = list(range(0+i,self.train_sample_size+i)) test_index = list(range(self.train_sample_size+i,self.train_sample_size+i+self.test_sample_size)) list_of_indexes.append((train_index, test_index)) return list_of_indexes def", "cannot be larger than length of input variable (X).\") if step > len(X):", "of input variable (X).\") if step > len(X): raise IndexError(\"step cannot be larger", "= list(range(self.train_sample_size+i,self.train_sample_size+i+self.test_sample_size)) list_of_indexes.append((train_index, test_index)) return list_of_indexes def expanding_train_test_split(self): list_of_indexes = [] for i", "max(1, round(len(X)/10)) #error handling: if len(X) == 0: raise IndexError(\"input array 'X' cannot", "= 1): #initiate variables: self.X = X self.train_sample_size = train_sample_size self.test_sample_size = test_sample_size", "IndexError(\"test_sample_size cannot be larger than length of input variable (X).\") if step >", "None, step = 1): #initiate variables: self.X = X self.train_sample_size = train_sample_size self.test_sample_size", "input variable.\") def rolling_train_test_split(self): list_of_indexes = [] for i in range(0, len(self.X) +", "X, train_sample_size = None, test_sample_size = None, step = 1): #initiate variables: self.X", "def __init__(self, X, train_sample_size = None, test_sample_size = None, step = 1): #initiate", "> len(X): raise IndexError(\"step cannot be larger than length of input variable.\") def", "+ self.test_sample_size), self.step): train_index = list(range(0+i,self.train_sample_size+i)) test_index = list(range(self.train_sample_size+i,self.train_sample_size+i+self.test_sample_size)) list_of_indexes.append((train_index, test_index)) return list_of_indexes", "self.test_sample_size = max(1, round(len(X)/10)) #error handling: if len(X) == 0: raise IndexError(\"input array", "len(X): raise IndexError(\"step cannot be larger than length of input variable.\") def rolling_train_test_split(self):", "None, test_sample_size = None, step = 1): #initiate variables: self.X = X self.train_sample_size", "#error handling: if len(X) == 0: raise IndexError(\"input array 'X' cannot have length", "if len(X) == 0: raise IndexError(\"input array 'X' cannot have length zero.\") if", "raise IndexError(\"train_sample_size cannot be larger than length of input variable (X).\") if test_sample_size", "rolling_train_test_split(self): list_of_indexes = [] for i in range(0, len(self.X) + 1 -(self.train_sample_size +", "1: raise IndexError(\"input array 'X' cannot have length 1.\") if train_sample_size > len(X):", "IndexError(\"input array 'X' cannot have length 1.\") if train_sample_size > len(X): raise IndexError(\"train_sample_size", "zero.\") if len(X) == 1: raise IndexError(\"input array 'X' cannot have length 1.\")", "test_sample_size self.step = step if train_sample_size == None: self.train_sample_size = max(1,round(len(X)/3)) if test_sample_size", "step = 1): #initiate variables: self.X = X self.train_sample_size = train_sample_size self.test_sample_size =", "if test_sample_size == None: self.test_sample_size = max(1, round(len(X)/10)) #error handling: if len(X) ==", "raise IndexError(\"input array 'X' cannot have length 1.\") if train_sample_size > len(X): raise", "1): #initiate variables: self.X = X self.train_sample_size = train_sample_size self.test_sample_size = test_sample_size self.step", "if len(X) == 1: raise IndexError(\"input array 'X' cannot have length 1.\") if", "have length 1.\") if train_sample_size > len(X): raise IndexError(\"train_sample_size cannot be larger than", "than length of input variable (X).\") if test_sample_size > len(X): raise IndexError(\"test_sample_size cannot", "be larger than length of input variable.\") def rolling_train_test_split(self): list_of_indexes = [] for", "range(0, len(self.X) + 1 -(self.train_sample_size + self.test_sample_size), self.step): train_index = list(range(0+i,self.train_sample_size+i)) test_index =", "train_sample_size > len(X): raise IndexError(\"train_sample_size cannot be larger than length of input variable", "if train_sample_size == None: self.train_sample_size = max(1,round(len(X)/3)) if test_sample_size == None: self.test_sample_size =", "list(range(self.train_sample_size+i,self.train_sample_size+i+self.test_sample_size)) list_of_indexes.append((train_index, test_index)) return list_of_indexes def expanding_train_test_split(self): list_of_indexes = [] for i in", "self.train_sample_size = train_sample_size self.test_sample_size = test_sample_size self.step = step if train_sample_size == None:", "= list(range(0+i,self.train_sample_size+i)) test_index = list(range(self.train_sample_size+i,self.train_sample_size+i+self.test_sample_size)) list_of_indexes.append((train_index, test_index)) return list_of_indexes def expanding_train_test_split(self): list_of_indexes =", "1 -(self.train_sample_size + self.test_sample_size), self.step): train_index = list(range(0+i,self.train_sample_size+i)) test_index = list(range(self.train_sample_size+i,self.train_sample_size+i+self.test_sample_size)) list_of_indexes.append((train_index, test_index))", "be larger than length of input variable (X).\") if step > len(X): raise", "if test_sample_size > len(X): raise IndexError(\"test_sample_size cannot be larger than length of input", "0: raise IndexError(\"input array 'X' cannot have length zero.\") if len(X) == 1:", "IndexError(\"input array 'X' cannot have length zero.\") if len(X) == 1: raise IndexError(\"input", "= [] for i in range(0, len(self.X) + 1 -(self.train_sample_size + self.test_sample_size), self.step):", "test_sample_size > len(X): raise IndexError(\"test_sample_size cannot be larger than length of input variable", "None: self.train_sample_size = max(1,round(len(X)/3)) if test_sample_size == None: self.test_sample_size = max(1, round(len(X)/10)) #error", "1.\") if train_sample_size > len(X): raise IndexError(\"train_sample_size cannot be larger than length of", "train_index = list(range(0+i,self.train_sample_size+i)) test_index = list(range(self.train_sample_size+i,self.train_sample_size+i+self.test_sample_size)) list_of_indexes.append((train_index, test_index)) return list_of_indexes def expanding_train_test_split(self): list_of_indexes", "== 1: raise IndexError(\"input array 'X' cannot have length 1.\") if train_sample_size >", "self.X = X self.train_sample_size = train_sample_size self.test_sample_size = test_sample_size self.step = step if", "length of input variable (X).\") if step > len(X): raise IndexError(\"step cannot be", "+ self.test_sample_size), self.step): train_index = list(range(0,self.train_sample_size+i)) test_index = list(range(self.train_sample_size+i,self.train_sample_size+i+self.test_sample_size)) list_of_indexes.append((train_index, test_index)) return list_of_indexes", "round(len(X)/10)) #error handling: if len(X) == 0: raise IndexError(\"input array 'X' cannot have", "(X).\") if step > len(X): raise IndexError(\"step cannot be larger than length of", "self.step): train_index = list(range(0+i,self.train_sample_size+i)) test_index = list(range(self.train_sample_size+i,self.train_sample_size+i+self.test_sample_size)) list_of_indexes.append((train_index, test_index)) return list_of_indexes def expanding_train_test_split(self):", "max(1,round(len(X)/3)) if test_sample_size == None: self.test_sample_size = max(1, round(len(X)/10)) #error handling: if len(X)", "train_sample_size == None: self.train_sample_size = max(1,round(len(X)/3)) if test_sample_size == None: self.test_sample_size = max(1,", "__init__(self, X, train_sample_size = None, test_sample_size = None, step = 1): #initiate variables:", "> len(X): raise IndexError(\"train_sample_size cannot be larger than length of input variable (X).\")", "larger than length of input variable (X).\") if step > len(X): raise IndexError(\"step", "cannot be larger than length of input variable.\") def rolling_train_test_split(self): list_of_indexes = []", "raise IndexError(\"step cannot be larger than length of input variable.\") def rolling_train_test_split(self): list_of_indexes", "+ 1 -(self.train_sample_size + self.test_sample_size), self.step): train_index = list(range(0,self.train_sample_size+i)) test_index = list(range(self.train_sample_size+i,self.train_sample_size+i+self.test_sample_size)) list_of_indexes.append((train_index,", "raise IndexError(\"input array 'X' cannot have length zero.\") if len(X) == 1: raise", "IndexError(\"step cannot be larger than length of input variable.\") def rolling_train_test_split(self): list_of_indexes =", "than length of input variable (X).\") if step > len(X): raise IndexError(\"step cannot", "of input variable.\") def rolling_train_test_split(self): list_of_indexes = [] for i in range(0, len(self.X)", "of input variable (X).\") if test_sample_size > len(X): raise IndexError(\"test_sample_size cannot be larger", "cannot be larger than length of input variable (X).\") if test_sample_size > len(X):", "self.step = step if train_sample_size == None: self.train_sample_size = max(1,round(len(X)/3)) if test_sample_size ==", "if step > len(X): raise IndexError(\"step cannot be larger than length of input", "class TimeCV: def __init__(self, X, train_sample_size = None, test_sample_size = None, step =", "None: self.test_sample_size = max(1, round(len(X)/10)) #error handling: if len(X) == 0: raise IndexError(\"input", "== 0: raise IndexError(\"input array 'X' cannot have length zero.\") if len(X) ==", "len(X): raise IndexError(\"train_sample_size cannot be larger than length of input variable (X).\") if", "def rolling_train_test_split(self): list_of_indexes = [] for i in range(0, len(self.X) + 1 -(self.train_sample_size", "cannot have length zero.\") if len(X) == 1: raise IndexError(\"input array 'X' cannot", "step > len(X): raise IndexError(\"step cannot be larger than length of input variable.\")", "length zero.\") if len(X) == 1: raise IndexError(\"input array 'X' cannot have length", "len(X) == 1: raise IndexError(\"input array 'X' cannot have length 1.\") if train_sample_size", "array 'X' cannot have length 1.\") if train_sample_size > len(X): raise IndexError(\"train_sample_size cannot", "test_index)) return list_of_indexes def expanding_train_test_split(self): list_of_indexes = [] for i in range(0, len(self.X)", "length of input variable (X).\") if test_sample_size > len(X): raise IndexError(\"test_sample_size cannot be", "test_sample_size = None, step = 1): #initiate variables: self.X = X self.train_sample_size =", "self.test_sample_size = test_sample_size self.step = step if train_sample_size == None: self.train_sample_size = max(1,round(len(X)/3))", "if train_sample_size > len(X): raise IndexError(\"train_sample_size cannot be larger than length of input", "= step if train_sample_size == None: self.train_sample_size = max(1,round(len(X)/3)) if test_sample_size == None:", "step if train_sample_size == None: self.train_sample_size = max(1,round(len(X)/3)) if test_sample_size == None: self.test_sample_size", "'X' cannot have length zero.\") if len(X) == 1: raise IndexError(\"input array 'X'", "variables: self.X = X self.train_sample_size = train_sample_size self.test_sample_size = test_sample_size self.step = step", "range(0, len(self.X) + 1 -(self.train_sample_size + self.test_sample_size), self.step): train_index = list(range(0,self.train_sample_size+i)) test_index =", "= X self.train_sample_size = train_sample_size self.test_sample_size = test_sample_size self.step = step if train_sample_size", "len(self.X) + 1 -(self.train_sample_size + self.test_sample_size), self.step): train_index = list(range(0,self.train_sample_size+i)) test_index = list(range(self.train_sample_size+i,self.train_sample_size+i+self.test_sample_size))", "self.train_sample_size = max(1,round(len(X)/3)) if test_sample_size == None: self.test_sample_size = max(1, round(len(X)/10)) #error handling:", "i in range(0, len(self.X) + 1 -(self.train_sample_size + self.test_sample_size), self.step): train_index = list(range(0+i,self.train_sample_size+i))", "variable (X).\") if test_sample_size > len(X): raise IndexError(\"test_sample_size cannot be larger than length", "cannot have length 1.\") if train_sample_size > len(X): raise IndexError(\"train_sample_size cannot be larger", "= test_sample_size self.step = step if train_sample_size == None: self.train_sample_size = max(1,round(len(X)/3)) if", "length 1.\") if train_sample_size > len(X): raise IndexError(\"train_sample_size cannot be larger than length", "== None: self.test_sample_size = max(1, round(len(X)/10)) #error handling: if len(X) == 0: raise", "= train_sample_size self.test_sample_size = test_sample_size self.step = step if train_sample_size == None: self.train_sample_size", "#initiate variables: self.X = X self.train_sample_size = train_sample_size self.test_sample_size = test_sample_size self.step =", "> len(X): raise IndexError(\"test_sample_size cannot be larger than length of input variable (X).\")", "len(X): raise IndexError(\"test_sample_size cannot be larger than length of input variable (X).\") if", "len(X) == 0: raise IndexError(\"input array 'X' cannot have length zero.\") if len(X)", "for i in range(0, len(self.X) + 1 -(self.train_sample_size + self.test_sample_size), self.step): train_index =", "1 -(self.train_sample_size + self.test_sample_size), self.step): train_index = list(range(0,self.train_sample_size+i)) test_index = list(range(self.train_sample_size+i,self.train_sample_size+i+self.test_sample_size)) list_of_indexes.append((train_index, test_index))", "'X' cannot have length 1.\") if train_sample_size > len(X): raise IndexError(\"train_sample_size cannot be", "expanding_train_test_split(self): list_of_indexes = [] for i in range(0, len(self.X) + 1 -(self.train_sample_size +", "input variable (X).\") if test_sample_size > len(X): raise IndexError(\"test_sample_size cannot be larger than", "variable.\") def rolling_train_test_split(self): list_of_indexes = [] for i in range(0, len(self.X) + 1", "list_of_indexes.append((train_index, test_index)) return list_of_indexes def expanding_train_test_split(self): list_of_indexes = [] for i in range(0,", "list_of_indexes = [] for i in range(0, len(self.X) + 1 -(self.train_sample_size + self.test_sample_size),", "than length of input variable.\") def rolling_train_test_split(self): list_of_indexes = [] for i in", "in range(0, len(self.X) + 1 -(self.train_sample_size + self.test_sample_size), self.step): train_index = list(range(0,self.train_sample_size+i)) test_index", "= None, step = 1): #initiate variables: self.X = X self.train_sample_size = train_sample_size", "(X).\") if test_sample_size > len(X): raise IndexError(\"test_sample_size cannot be larger than length of", "i in range(0, len(self.X) + 1 -(self.train_sample_size + self.test_sample_size), self.step): train_index = list(range(0,self.train_sample_size+i))", "train_sample_size = None, test_sample_size = None, step = 1): #initiate variables: self.X =", "list_of_indexes def expanding_train_test_split(self): list_of_indexes = [] for i in range(0, len(self.X) + 1", "larger than length of input variable (X).\") if test_sample_size > len(X): raise IndexError(\"test_sample_size", "TimeCV: def __init__(self, X, train_sample_size = None, test_sample_size = None, step = 1):", "== None: self.train_sample_size = max(1,round(len(X)/3)) if test_sample_size == None: self.test_sample_size = max(1, round(len(X)/10))", "input variable (X).\") if step > len(X): raise IndexError(\"step cannot be larger than", "len(self.X) + 1 -(self.train_sample_size + self.test_sample_size), self.step): train_index = list(range(0+i,self.train_sample_size+i)) test_index = list(range(self.train_sample_size+i,self.train_sample_size+i+self.test_sample_size))", "IndexError(\"train_sample_size cannot be larger than length of input variable (X).\") if test_sample_size >", "larger than length of input variable.\") def rolling_train_test_split(self): list_of_indexes = [] for i", "list(range(0+i,self.train_sample_size+i)) test_index = list(range(self.train_sample_size+i,self.train_sample_size+i+self.test_sample_size)) list_of_indexes.append((train_index, test_index)) return list_of_indexes def expanding_train_test_split(self): list_of_indexes = []", "array 'X' cannot have length zero.\") if len(X) == 1: raise IndexError(\"input array", "test_index = list(range(self.train_sample_size+i,self.train_sample_size+i+self.test_sample_size)) list_of_indexes.append((train_index, test_index)) return list_of_indexes def expanding_train_test_split(self): list_of_indexes = [] for", "length of input variable.\") def rolling_train_test_split(self): list_of_indexes = [] for i in range(0,", "= None, test_sample_size = None, step = 1): #initiate variables: self.X = X", "variable (X).\") if step > len(X): raise IndexError(\"step cannot be larger than length", "[] for i in range(0, len(self.X) + 1 -(self.train_sample_size + self.test_sample_size), self.step): train_index", "= max(1,round(len(X)/3)) if test_sample_size == None: self.test_sample_size = max(1, round(len(X)/10)) #error handling: if", "def expanding_train_test_split(self): list_of_indexes = [] for i in range(0, len(self.X) + 1 -(self.train_sample_size", "-(self.train_sample_size + self.test_sample_size), self.step): train_index = list(range(0,self.train_sample_size+i)) test_index = list(range(self.train_sample_size+i,self.train_sample_size+i+self.test_sample_size)) list_of_indexes.append((train_index, test_index)) return", "= max(1, round(len(X)/10)) #error handling: if len(X) == 0: raise IndexError(\"input array 'X'", "handling: if len(X) == 0: raise IndexError(\"input array 'X' cannot have length zero.\")", "raise IndexError(\"test_sample_size cannot be larger than length of input variable (X).\") if step", "train_sample_size self.test_sample_size = test_sample_size self.step = step if train_sample_size == None: self.train_sample_size =", "return list_of_indexes def expanding_train_test_split(self): list_of_indexes = [] for i in range(0, len(self.X) +", "-(self.train_sample_size + self.test_sample_size), self.step): train_index = list(range(0+i,self.train_sample_size+i)) test_index = list(range(self.train_sample_size+i,self.train_sample_size+i+self.test_sample_size)) list_of_indexes.append((train_index, test_index)) return" ]
[ "default=\"data/3_processed/\", help=\"Path to csv file to store the result.\") def main(input_path, output_path): \"\"\"", "= \"data/0_raw/\", required=True, help=\"Path to csv file to be processed.\", ) @click.option(\"--output-path\", \"-o\",", "result.\") def main(input_path, output_path): \"\"\" Runs data processing scripts to read raw data", "csv file (../3_processed) to be used for further analysis. \"\"\" print(\"Preprocessing indian_license_plate.csv\") df", "file (../3_processed) to be used for further analysis. \"\"\" print(\"Preprocessing indian_license_plate.csv\") df =", "the result.\") def main(input_path, output_path): \"\"\" Runs data processing scripts to read raw", "analysis. \"\"\" print(\"Preprocessing indian_license_plate.csv\") df = pd.read_csv(input_path+\"indian_license_plates.csv\", dtype={'image_name':str}) df[\"image_name\"] = df[\"image_name\"] + \".jpg\"", "\"data/0_raw/\", required=True, help=\"Path to csv file to be processed.\", ) @click.option(\"--output-path\", \"-o\", default=\"data/3_processed/\",", "store the result.\") def main(input_path, output_path): \"\"\" Runs data processing scripts to read", "to read raw data (../0_raw) and convert it into processed csv file (../3_processed)", "pandas as pd @click.command() @click.option(\"--input-path\", \"-i\", default = \"data/0_raw/\", required=True, help=\"Path to csv", "to csv file to store the result.\") def main(input_path, output_path): \"\"\" Runs data", ") @click.option(\"--output-path\", \"-o\", default=\"data/3_processed/\", help=\"Path to csv file to store the result.\") def", "be processed.\", ) @click.option(\"--output-path\", \"-o\", default=\"data/3_processed/\", help=\"Path to csv file to store the", "it into processed csv file (../3_processed) to be used for further analysis. \"\"\"", "\"-i\", default = \"data/0_raw/\", required=True, help=\"Path to csv file to be processed.\", )", "main(input_path, output_path): \"\"\" Runs data processing scripts to read raw data (../0_raw) and", "data (../0_raw) and convert it into processed csv file (../3_processed) to be used", "file to store the result.\") def main(input_path, output_path): \"\"\" Runs data processing scripts", "and convert it into processed csv file (../3_processed) to be used for further", "(../3_processed) to be used for further analysis. \"\"\" print(\"Preprocessing indian_license_plate.csv\") df = pd.read_csv(input_path+\"indian_license_plates.csv\",", "help=\"Path to csv file to store the result.\") def main(input_path, output_path): \"\"\" Runs", "used for further analysis. \"\"\" print(\"Preprocessing indian_license_plate.csv\") df = pd.read_csv(input_path+\"indian_license_plates.csv\", dtype={'image_name':str}) df[\"image_name\"] =", "df = pd.read_csv(input_path+\"indian_license_plates.csv\", dtype={'image_name':str}) df[\"image_name\"] = df[\"image_name\"] + \".jpg\" df.to_csv(output_path+\"processed.csv\", index=False) print(\"Preprocessed and", "= df[\"image_name\"] + \".jpg\" df.to_csv(output_path+\"processed.csv\", index=False) print(\"Preprocessed and saved as processed.csv\") if __name__", "df[\"image_name\"] + \".jpg\" df.to_csv(output_path+\"processed.csv\", index=False) print(\"Preprocessed and saved as processed.csv\") if __name__ ==", "into processed csv file (../3_processed) to be used for further analysis. \"\"\" print(\"Preprocessing", "\".jpg\" df.to_csv(output_path+\"processed.csv\", index=False) print(\"Preprocessed and saved as processed.csv\") if __name__ == '__main__': main()", "indian_license_plate.csv\") df = pd.read_csv(input_path+\"indian_license_plates.csv\", dtype={'image_name':str}) df[\"image_name\"] = df[\"image_name\"] + \".jpg\" df.to_csv(output_path+\"processed.csv\", index=False) print(\"Preprocessed", "to csv file to be processed.\", ) @click.option(\"--output-path\", \"-o\", default=\"data/3_processed/\", help=\"Path to csv", "as pd @click.command() @click.option(\"--input-path\", \"-i\", default = \"data/0_raw/\", required=True, help=\"Path to csv file", "output_path): \"\"\" Runs data processing scripts to read raw data (../0_raw) and convert", "scripts to read raw data (../0_raw) and convert it into processed csv file", "@click.option(\"--output-path\", \"-o\", default=\"data/3_processed/\", help=\"Path to csv file to store the result.\") def main(input_path,", "csv file to be processed.\", ) @click.option(\"--output-path\", \"-o\", default=\"data/3_processed/\", help=\"Path to csv file", "for further analysis. \"\"\" print(\"Preprocessing indian_license_plate.csv\") df = pd.read_csv(input_path+\"indian_license_plates.csv\", dtype={'image_name':str}) df[\"image_name\"] = df[\"image_name\"]", "def main(input_path, output_path): \"\"\" Runs data processing scripts to read raw data (../0_raw)", "(../0_raw) and convert it into processed csv file (../3_processed) to be used for", "convert it into processed csv file (../3_processed) to be used for further analysis.", "\"\"\" print(\"Preprocessing indian_license_plate.csv\") df = pd.read_csv(input_path+\"indian_license_plates.csv\", dtype={'image_name':str}) df[\"image_name\"] = df[\"image_name\"] + \".jpg\" df.to_csv(output_path+\"processed.csv\",", "import click import pandas as pd @click.command() @click.option(\"--input-path\", \"-i\", default = \"data/0_raw/\", required=True,", "pd.read_csv(input_path+\"indian_license_plates.csv\", dtype={'image_name':str}) df[\"image_name\"] = df[\"image_name\"] + \".jpg\" df.to_csv(output_path+\"processed.csv\", index=False) print(\"Preprocessed and saved as", "df[\"image_name\"] = df[\"image_name\"] + \".jpg\" df.to_csv(output_path+\"processed.csv\", index=False) print(\"Preprocessed and saved as processed.csv\") if", "@click.option(\"--input-path\", \"-i\", default = \"data/0_raw/\", required=True, help=\"Path to csv file to be processed.\",", "processed.\", ) @click.option(\"--output-path\", \"-o\", default=\"data/3_processed/\", help=\"Path to csv file to store the result.\")", "to store the result.\") def main(input_path, output_path): \"\"\" Runs data processing scripts to", "further analysis. \"\"\" print(\"Preprocessing indian_license_plate.csv\") df = pd.read_csv(input_path+\"indian_license_plates.csv\", dtype={'image_name':str}) df[\"image_name\"] = df[\"image_name\"] +", "data processing scripts to read raw data (../0_raw) and convert it into processed", "default = \"data/0_raw/\", required=True, help=\"Path to csv file to be processed.\", ) @click.option(\"--output-path\",", "\"\"\" Runs data processing scripts to read raw data (../0_raw) and convert it", "dtype={'image_name':str}) df[\"image_name\"] = df[\"image_name\"] + \".jpg\" df.to_csv(output_path+\"processed.csv\", index=False) print(\"Preprocessed and saved as processed.csv\")", "pd @click.command() @click.option(\"--input-path\", \"-i\", default = \"data/0_raw/\", required=True, help=\"Path to csv file to", "Runs data processing scripts to read raw data (../0_raw) and convert it into", "help=\"Path to csv file to be processed.\", ) @click.option(\"--output-path\", \"-o\", default=\"data/3_processed/\", help=\"Path to", "= pd.read_csv(input_path+\"indian_license_plates.csv\", dtype={'image_name':str}) df[\"image_name\"] = df[\"image_name\"] + \".jpg\" df.to_csv(output_path+\"processed.csv\", index=False) print(\"Preprocessed and saved", "\"-o\", default=\"data/3_processed/\", help=\"Path to csv file to store the result.\") def main(input_path, output_path):", "csv file to store the result.\") def main(input_path, output_path): \"\"\" Runs data processing", "print(\"Preprocessing indian_license_plate.csv\") df = pd.read_csv(input_path+\"indian_license_plates.csv\", dtype={'image_name':str}) df[\"image_name\"] = df[\"image_name\"] + \".jpg\" df.to_csv(output_path+\"processed.csv\", index=False)", "+ \".jpg\" df.to_csv(output_path+\"processed.csv\", index=False) print(\"Preprocessed and saved as processed.csv\") if __name__ == '__main__':", "be used for further analysis. \"\"\" print(\"Preprocessing indian_license_plate.csv\") df = pd.read_csv(input_path+\"indian_license_plates.csv\", dtype={'image_name':str}) df[\"image_name\"]", "click import pandas as pd @click.command() @click.option(\"--input-path\", \"-i\", default = \"data/0_raw/\", required=True, help=\"Path", "processing scripts to read raw data (../0_raw) and convert it into processed csv", "import pandas as pd @click.command() @click.option(\"--input-path\", \"-i\", default = \"data/0_raw/\", required=True, help=\"Path to", "file to be processed.\", ) @click.option(\"--output-path\", \"-o\", default=\"data/3_processed/\", help=\"Path to csv file to", "processed csv file (../3_processed) to be used for further analysis. \"\"\" print(\"Preprocessing indian_license_plate.csv\")", "to be used for further analysis. \"\"\" print(\"Preprocessing indian_license_plate.csv\") df = pd.read_csv(input_path+\"indian_license_plates.csv\", dtype={'image_name':str})", "raw data (../0_raw) and convert it into processed csv file (../3_processed) to be", "@click.command() @click.option(\"--input-path\", \"-i\", default = \"data/0_raw/\", required=True, help=\"Path to csv file to be", "read raw data (../0_raw) and convert it into processed csv file (../3_processed) to", "to be processed.\", ) @click.option(\"--output-path\", \"-o\", default=\"data/3_processed/\", help=\"Path to csv file to store", "required=True, help=\"Path to csv file to be processed.\", ) @click.option(\"--output-path\", \"-o\", default=\"data/3_processed/\", help=\"Path" ]
[ "reversed(array)) #Big-endian, unsigned 32-byte integer. buf = b''.join([struct.pack('>I', ((0xffFFff & i32)<<8)|(i32>>24) ) for", "np.uint32) r[0,0] = 0xFF r[0,1] = 0xFF g = np.empty((2,2), np.uint32) g[0,1] =", "24) | (r << 16) | (g << 8) | b print tot", "0xFF tot = (a << 24) | (r << 16) | (g <<", "n=9 imgsize=2**n # generate an image of size imgsize x imgsize pixels for", "of equal size\" #First row becomes top row of image. flat = [];", "i32 in flat]) #Rotate from ARGB to RGBA. data = write_png(buf, len(array[0]), len(array))", "null bytes at the start width_byte_4 = width * 4 raw_data = b''.join(b'\\x00'", "# # extracted from: # http://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image # # Original source code: # https://developer.blender.org/diffusion/B/browse/master/release/bin/blender-thumbnailer.py$155", "bytearray in py3, a regular string in py2. formatted RGBARGBA... \"\"\" import zlib,", "saveAsPNG(tot, 'test_grid.png') #saveAsPNG([[0xffff0000, 0xffFFFF00], # [0xff00aa77, 0xff333333]], 'test_grid.png') if __name__ == '__main__': test_save_png()", "#Big-endian, unsigned 32-byte integer. buf = b''.join([struct.pack('>I', ((0xffFFff & i32)<<8)|(i32>>24) ) for i32", "equal size\" #First row becomes top row of image. flat = []; map(flat.extend,", "width_byte_4)) def png_pack(png_tag, data): chunk_head = png_tag + data return (struct.pack(\"!I\", len(data)) +", "imgsize) with open(\"my_image.png\", 'wb') as fd: fd.write(data) def saveAsPNG(array, filename): # by <NAME>,", "i32)<<8)|(i32>>24) ) for i32 in flat]) #Rotate from ARGB to RGBA. data =", "The data should be written directly to a file opened as binary, as", "import numpy as np a = np.empty((2,2), np.uint32) a.fill(0xFF) r = np.empty((2,2), np.uint32)", "g = np.empty((2,2), np.uint32) g[0,1] = 0xFF b = np.empty((2,2), np.uint32) b[1,1] =", "(r << 16) | (g << 8) | b print tot saveAsPNG(tot, 'test_grid.png')", "a regular string in py2. formatted RGBARGBA... \"\"\" import zlib, struct # reverse", "# # Original source code: # https://developer.blender.org/diffusion/B/browse/master/release/bin/blender-thumbnailer.py$155 # def write_png(buf, width, height): #", "an image of size imgsize x imgsize pixels for i in range(2*n): buf", "# def write_png(buf, width, height): # by ideasman42, 2013-10-04, stackoverflow.com \"\"\" buf: must", "+ buf[span:span + width_byte_4] for span in range((height - 1) * width *", "<< 16) | (g << 8) | b print tot saveAsPNG(tot, 'test_grid.png') #saveAsPNG([[0xffff0000,", "<NAME>, 2014-01-10, stackoverflow.com import struct if any([len(row) != len(array[0]) for row in array]):", "+ width_byte_4] for span in range((height - 1) * width * 4, -1,", "write_png(buf, width, height): # by ideasman42, 2013-10-04, stackoverflow.com \"\"\" buf: must be bytes", "in py3, a regular string in py2. formatted RGBARGBA... \"\"\" import zlib, struct", "binary, as in: data = write_png(buf, imgsize, imgsize) with open(\"my_image.png\", 'wb') as fd:", "width * 4 raw_data = b''.join(b'\\x00' + buf[span:span + width_byte_4] for span in", "= b''.join(b'\\x00' + buf[span:span + width_byte_4] for span in range((height - 1) *", "add null bytes at the start width_byte_4 = width * 4 raw_data =", "* 4, -1, - width_byte_4)) def png_pack(png_tag, data): chunk_head = png_tag + data", "np.empty((2,2), np.uint32) g[0,1] = 0xFF b = np.empty((2,2), np.uint32) b[1,1] = 0xFF tot", "0, 0)), png_pack(b'IDAT', zlib.compress(raw_data, 9)), png_pack(b'IEND', b'')]) def test_write_png(): # a red square:", "write_png(buf, len(array[0]), len(array)) f = open(filename, 'wb') f.write(data) f.close() def test_save_png(): import numpy", "from ARGB to RGBA. data = write_png(buf, len(array[0]), len(array)) f = open(filename, 'wb')", "b = np.empty((2,2), np.uint32) b[1,1] = 0xFF tot = (a << 24) |", "RGBA. data = write_png(buf, len(array[0]), len(array)) f = open(filename, 'wb') f.write(data) f.close() def", "= png_tag + data return (struct.pack(\"!I\", len(data)) + chunk_head + struct.pack(\"!I\", 0xFFFFFFFF &", "32-byte integer. buf = b''.join([struct.pack('>I', ((0xffFFff & i32)<<8)|(i32>>24) ) for i32 in flat])", "\"\"\" buf: must be bytes or a bytearray in py3, a regular string", "fd.write(data) def saveAsPNG(array, filename): # by <NAME>, 2014-01-10, stackoverflow.com import struct if any([len(row)", "r = np.empty((2,2), np.uint32) r[0,0] = 0xFF r[0,1] = 0xFF g = np.empty((2,2),", "flat = []; map(flat.extend, reversed(array)) #Big-endian, unsigned 32-byte integer. buf = b''.join([struct.pack('>I', ((0xffFFff", "start width_byte_4 = width * 4 raw_data = b''.join(b'\\x00' + buf[span:span + width_byte_4]", "unsigned 32-byte integer. buf = b''.join([struct.pack('>I', ((0xffFFff & i32)<<8)|(i32>>24) ) for i32 in", "def test_write_png(): # a red square: buf=b'\\xFF\\x00\\x00\\xFF' n=9 imgsize=2**n # generate an image", "imgsize pixels for i in range(2*n): buf = buf + buf print \"len=\",", "if any([len(row) != len(array[0]) for row in array]): raise ValueError, \"Array should have", "RGBA stands for \"Red Green Blue Alpha\", where alpha is the opacity level", "data): chunk_head = png_tag + data return (struct.pack(\"!I\", len(data)) + chunk_head + struct.pack(\"!I\",", "chunk_head = png_tag + data return (struct.pack(\"!I\", len(data)) + chunk_head + struct.pack(\"!I\", 0xFFFFFFFF", "to a file opened as binary, as in: data = write_png(buf, imgsize, imgsize)", "data should be written directly to a file opened as binary, as in:", "to RGBA. data = write_png(buf, len(array[0]), len(array)) f = open(filename, 'wb') f.write(data) f.close()", "the start width_byte_4 = width * 4 raw_data = b''.join(b'\\x00' + buf[span:span +", "raise ValueError, \"Array should have elements of equal size\" #First row becomes top", "range((height - 1) * width * 4, -1, - width_byte_4)) def png_pack(png_tag, data):", "as fd: fd.write(data) def saveAsPNG(array, filename): # by <NAME>, 2014-01-10, stackoverflow.com import struct", "range(2*n): buf = buf + buf print \"len=\", len(buf)/4 # The data should", "1) * width * 4, -1, - width_byte_4)) def png_pack(png_tag, data): chunk_head =", "= 0xFF g = np.empty((2,2), np.uint32) g[0,1] = 0xFF b = np.empty((2,2), np.uint32)", "= open(filename, 'wb') f.write(data) f.close() def test_save_png(): import numpy as np a =", "write_png(buf, imgsize, imgsize) with open(\"my_image.png\", 'wb') as fd: fd.write(data) def saveAsPNG(array, filename): #", "= np.empty((2,2), np.uint32) a.fill(0xFF) r = np.empty((2,2), np.uint32) r[0,0] = 0xFF r[0,1] =", "width * 4, -1, - width_byte_4)) def png_pack(png_tag, data): chunk_head = png_tag +", "np a = np.empty((2,2), np.uint32) a.fill(0xFF) r = np.empty((2,2), np.uint32) r[0,0] = 0xFF", "raw_data = b''.join(b'\\x00' + buf[span:span + width_byte_4] for span in range((height - 1)", "b'')]) def test_write_png(): # a red square: buf=b'\\xFF\\x00\\x00\\xFF' n=9 imgsize=2**n # generate an", "\"\"\" import zlib, struct # reverse the vertical line order and add null", "size\" #First row becomes top row of image. flat = []; map(flat.extend, reversed(array))", "g[0,1] = 0xFF b = np.empty((2,2), np.uint32) b[1,1] = 0xFF tot = (a", "= write_png(buf, len(array[0]), len(array)) f = open(filename, 'wb') f.write(data) f.close() def test_save_png(): import", "# extracted from: # http://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image # # Original source code: # https://developer.blender.org/diffusion/B/browse/master/release/bin/blender-thumbnailer.py$155 #", "f.write(data) f.close() def test_save_png(): import numpy as np a = np.empty((2,2), np.uint32) a.fill(0xFF)", "source code: # https://developer.blender.org/diffusion/B/browse/master/release/bin/blender-thumbnailer.py$155 # def write_png(buf, width, height): # by ideasman42, 2013-10-04,", "len(data)) + chunk_head + struct.pack(\"!I\", 0xFFFFFFFF & zlib.crc32(chunk_head))) return b''.join([ b'\\x89PNG\\r\\n\\x1a\\n', png_pack(b'IHDR', struct.pack(\"!2I5B\",", "* width * 4, -1, - width_byte_4)) def png_pack(png_tag, data): chunk_head = png_tag", "where alpha is the opacity level # # extracted from: # http://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image #", "# # RGBA stands for \"Red Green Blue Alpha\", where alpha is the", "vertical line order and add null bytes at the start width_byte_4 = width", "2013-10-04, stackoverflow.com \"\"\" buf: must be bytes or a bytearray in py3, a", "b''.join(b'\\x00' + buf[span:span + width_byte_4] for span in range((height - 1) * width", "# a red square: buf=b'\\xFF\\x00\\x00\\xFF' n=9 imgsize=2**n # generate an image of size", "bytes at the start width_byte_4 = width * 4 raw_data = b''.join(b'\\x00' +", "b print tot saveAsPNG(tot, 'test_grid.png') #saveAsPNG([[0xffff0000, 0xffFFFF00], # [0xff00aa77, 0xff333333]], 'test_grid.png') if __name__", "buf = buf + buf print \"len=\", len(buf)/4 # The data should be", "6, 0, 0, 0)), png_pack(b'IDAT', zlib.compress(raw_data, 9)), png_pack(b'IEND', b'')]) def test_write_png(): # a", "- 1) * width * 4, -1, - width_byte_4)) def png_pack(png_tag, data): chunk_head", "stackoverflow.com \"\"\" buf: must be bytes or a bytearray in py3, a regular", "'wb') f.write(data) f.close() def test_save_png(): import numpy as np a = np.empty((2,2), np.uint32)", "#--------------------------------------------------------------------------- # # WritePNG.py: writes compressed, true-color RGBA PNG files # # RGBA", "(struct.pack(\"!I\", len(data)) + chunk_head + struct.pack(\"!I\", 0xFFFFFFFF & zlib.crc32(chunk_head))) return b''.join([ b'\\x89PNG\\r\\n\\x1a\\n', png_pack(b'IHDR',", "& i32)<<8)|(i32>>24) ) for i32 in flat]) #Rotate from ARGB to RGBA. data", "stands for \"Red Green Blue Alpha\", where alpha is the opacity level #", "def png_pack(png_tag, data): chunk_head = png_tag + data return (struct.pack(\"!I\", len(data)) + chunk_head", "true-color RGBA PNG files # # RGBA stands for \"Red Green Blue Alpha\",", "py2. formatted RGBARGBA... \"\"\" import zlib, struct # reverse the vertical line order", "i in range(2*n): buf = buf + buf print \"len=\", len(buf)/4 # The", "http://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image # # Original source code: # https://developer.blender.org/diffusion/B/browse/master/release/bin/blender-thumbnailer.py$155 # def write_png(buf, width, height):", "width, height, 8, 6, 0, 0, 0)), png_pack(b'IDAT', zlib.compress(raw_data, 9)), png_pack(b'IEND', b'')]) def", "buf=b'\\xFF\\x00\\x00\\xFF' n=9 imgsize=2**n # generate an image of size imgsize x imgsize pixels", "numpy as np a = np.empty((2,2), np.uint32) a.fill(0xFF) r = np.empty((2,2), np.uint32) r[0,0]", "generate an image of size imgsize x imgsize pixels for i in range(2*n):", "# # WritePNG.py: writes compressed, true-color RGBA PNG files # # RGBA stands", "string in py2. formatted RGBARGBA... \"\"\" import zlib, struct # reverse the vertical", "print \"len=\", len(buf)/4 # The data should be written directly to a file", "as binary, as in: data = write_png(buf, imgsize, imgsize) with open(\"my_image.png\", 'wb') as", "open(\"my_image.png\", 'wb') as fd: fd.write(data) def saveAsPNG(array, filename): # by <NAME>, 2014-01-10, stackoverflow.com", "8, 6, 0, 0, 0)), png_pack(b'IDAT', zlib.compress(raw_data, 9)), png_pack(b'IEND', b'')]) def test_write_png(): #", "zlib, struct # reverse the vertical line order and add null bytes at", "for row in array]): raise ValueError, \"Array should have elements of equal size\"", "& zlib.crc32(chunk_head))) return b''.join([ b'\\x89PNG\\r\\n\\x1a\\n', png_pack(b'IHDR', struct.pack(\"!2I5B\", width, height, 8, 6, 0, 0,", "import zlib, struct # reverse the vertical line order and add null bytes", "| (g << 8) | b print tot saveAsPNG(tot, 'test_grid.png') #saveAsPNG([[0xffff0000, 0xffFFFF00], #", "a.fill(0xFF) r = np.empty((2,2), np.uint32) r[0,0] = 0xFF r[0,1] = 0xFF g =", "# Original source code: # https://developer.blender.org/diffusion/B/browse/master/release/bin/blender-thumbnailer.py$155 # def write_png(buf, width, height): # by", "be written directly to a file opened as binary, as in: data =", "struct.pack(\"!2I5B\", width, height, 8, 6, 0, 0, 0)), png_pack(b'IDAT', zlib.compress(raw_data, 9)), png_pack(b'IEND', b'')])", "as in: data = write_png(buf, imgsize, imgsize) with open(\"my_image.png\", 'wb') as fd: fd.write(data)", "in py2. formatted RGBARGBA... \"\"\" import zlib, struct # reverse the vertical line", "a file opened as binary, as in: data = write_png(buf, imgsize, imgsize) with", "= np.empty((2,2), np.uint32) g[0,1] = 0xFF b = np.empty((2,2), np.uint32) b[1,1] = 0xFF", "len(buf)/4 # The data should be written directly to a file opened as", "= 0xFF r[0,1] = 0xFF g = np.empty((2,2), np.uint32) g[0,1] = 0xFF b", "row becomes top row of image. flat = []; map(flat.extend, reversed(array)) #Big-endian, unsigned", "print tot saveAsPNG(tot, 'test_grid.png') #saveAsPNG([[0xffff0000, 0xffFFFF00], # [0xff00aa77, 0xff333333]], 'test_grid.png') if __name__ ==", "0xFF b = np.empty((2,2), np.uint32) b[1,1] = 0xFF tot = (a << 24)", "becomes top row of image. flat = []; map(flat.extend, reversed(array)) #Big-endian, unsigned 32-byte", "for \"Red Green Blue Alpha\", where alpha is the opacity level # #", "b[1,1] = 0xFF tot = (a << 24) | (r << 16) |", "row in array]): raise ValueError, \"Array should have elements of equal size\" #First", "9)), png_pack(b'IEND', b'')]) def test_write_png(): # a red square: buf=b'\\xFF\\x00\\x00\\xFF' n=9 imgsize=2**n #", "imgsize=2**n # generate an image of size imgsize x imgsize pixels for i", "0xFF g = np.empty((2,2), np.uint32) g[0,1] = 0xFF b = np.empty((2,2), np.uint32) b[1,1]", "any([len(row) != len(array[0]) for row in array]): raise ValueError, \"Array should have elements", "stackoverflow.com import struct if any([len(row) != len(array[0]) for row in array]): raise ValueError,", "return b''.join([ b'\\x89PNG\\r\\n\\x1a\\n', png_pack(b'IHDR', struct.pack(\"!2I5B\", width, height, 8, 6, 0, 0, 0)), png_pack(b'IDAT',", "- width_byte_4)) def png_pack(png_tag, data): chunk_head = png_tag + data return (struct.pack(\"!I\", len(data))", "Alpha\", where alpha is the opacity level # # extracted from: # http://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image", "integer. buf = b''.join([struct.pack('>I', ((0xffFFff & i32)<<8)|(i32>>24) ) for i32 in flat]) #Rotate", "image of size imgsize x imgsize pixels for i in range(2*n): buf =", "data = write_png(buf, imgsize, imgsize) with open(\"my_image.png\", 'wb') as fd: fd.write(data) def saveAsPNG(array,", "png_tag + data return (struct.pack(\"!I\", len(data)) + chunk_head + struct.pack(\"!I\", 0xFFFFFFFF & zlib.crc32(chunk_head)))", "image. flat = []; map(flat.extend, reversed(array)) #Big-endian, unsigned 32-byte integer. buf = b''.join([struct.pack('>I',", "struct.pack(\"!I\", 0xFFFFFFFF & zlib.crc32(chunk_head))) return b''.join([ b'\\x89PNG\\r\\n\\x1a\\n', png_pack(b'IHDR', struct.pack(\"!2I5B\", width, height, 8, 6,", "by <NAME>, 2014-01-10, stackoverflow.com import struct if any([len(row) != len(array[0]) for row in", "def write_png(buf, width, height): # by ideasman42, 2013-10-04, stackoverflow.com \"\"\" buf: must be", "np.empty((2,2), np.uint32) r[0,0] = 0xFF r[0,1] = 0xFF g = np.empty((2,2), np.uint32) g[0,1]", "np.empty((2,2), np.uint32) b[1,1] = 0xFF tot = (a << 24) | (r <<", "width_byte_4 = width * 4 raw_data = b''.join(b'\\x00' + buf[span:span + width_byte_4] for", "for i in range(2*n): buf = buf + buf print \"len=\", len(buf)/4 #", "for i32 in flat]) #Rotate from ARGB to RGBA. data = write_png(buf, len(array[0]),", "data return (struct.pack(\"!I\", len(data)) + chunk_head + struct.pack(\"!I\", 0xFFFFFFFF & zlib.crc32(chunk_head))) return b''.join([", "<< 8) | b print tot saveAsPNG(tot, 'test_grid.png') #saveAsPNG([[0xffff0000, 0xffFFFF00], # [0xff00aa77, 0xff333333]],", "np.uint32) b[1,1] = 0xFF tot = (a << 24) | (r << 16)", "should be written directly to a file opened as binary, as in: data", "= b''.join([struct.pack('>I', ((0xffFFff & i32)<<8)|(i32>>24) ) for i32 in flat]) #Rotate from ARGB", "array]): raise ValueError, \"Array should have elements of equal size\" #First row becomes", "f.close() def test_save_png(): import numpy as np a = np.empty((2,2), np.uint32) a.fill(0xFF) r", "line order and add null bytes at the start width_byte_4 = width *", "filename): # by <NAME>, 2014-01-10, stackoverflow.com import struct if any([len(row) != len(array[0]) for", "map(flat.extend, reversed(array)) #Big-endian, unsigned 32-byte integer. buf = b''.join([struct.pack('>I', ((0xffFFff & i32)<<8)|(i32>>24) )", "PNG files # # RGBA stands for \"Red Green Blue Alpha\", where alpha", ") for i32 in flat]) #Rotate from ARGB to RGBA. data = write_png(buf,", "flat]) #Rotate from ARGB to RGBA. data = write_png(buf, len(array[0]), len(array)) f =", "width, height): # by ideasman42, 2013-10-04, stackoverflow.com \"\"\" buf: must be bytes or", "in: data = write_png(buf, imgsize, imgsize) with open(\"my_image.png\", 'wb') as fd: fd.write(data) def", "\"len=\", len(buf)/4 # The data should be written directly to a file opened", "\"Red Green Blue Alpha\", where alpha is the opacity level # # extracted", "4, -1, - width_byte_4)) def png_pack(png_tag, data): chunk_head = png_tag + data return", "# WritePNG.py: writes compressed, true-color RGBA PNG files # # RGBA stands for", "a bytearray in py3, a regular string in py2. formatted RGBARGBA... \"\"\" import", "size imgsize x imgsize pixels for i in range(2*n): buf = buf +", "8) | b print tot saveAsPNG(tot, 'test_grid.png') #saveAsPNG([[0xffff0000, 0xffFFFF00], # [0xff00aa77, 0xff333333]], 'test_grid.png')", "zlib.crc32(chunk_head))) return b''.join([ b'\\x89PNG\\r\\n\\x1a\\n', png_pack(b'IHDR', struct.pack(\"!2I5B\", width, height, 8, 6, 0, 0, 0)),", "top row of image. flat = []; map(flat.extend, reversed(array)) #Big-endian, unsigned 32-byte integer.", "written directly to a file opened as binary, as in: data = write_png(buf,", "saveAsPNG(array, filename): # by <NAME>, 2014-01-10, stackoverflow.com import struct if any([len(row) != len(array[0])", "the vertical line order and add null bytes at the start width_byte_4 =", "# generate an image of size imgsize x imgsize pixels for i in", "np.uint32) g[0,1] = 0xFF b = np.empty((2,2), np.uint32) b[1,1] = 0xFF tot =", "| b print tot saveAsPNG(tot, 'test_grid.png') #saveAsPNG([[0xffff0000, 0xffFFFF00], # [0xff00aa77, 0xff333333]], 'test_grid.png') if", "RGBA PNG files # # RGBA stands for \"Red Green Blue Alpha\", where", "len(array)) f = open(filename, 'wb') f.write(data) f.close() def test_save_png(): import numpy as np", "writes compressed, true-color RGBA PNG files # # RGBA stands for \"Red Green", "and add null bytes at the start width_byte_4 = width * 4 raw_data", "# reverse the vertical line order and add null bytes at the start", "f = open(filename, 'wb') f.write(data) f.close() def test_save_png(): import numpy as np a", "test_write_png(): # a red square: buf=b'\\xFF\\x00\\x00\\xFF' n=9 imgsize=2**n # generate an image of", "chunk_head + struct.pack(\"!I\", 0xFFFFFFFF & zlib.crc32(chunk_head))) return b''.join([ b'\\x89PNG\\r\\n\\x1a\\n', png_pack(b'IHDR', struct.pack(\"!2I5B\", width, height,", "= width * 4 raw_data = b''.join(b'\\x00' + buf[span:span + width_byte_4] for span", "def saveAsPNG(array, filename): # by <NAME>, 2014-01-10, stackoverflow.com import struct if any([len(row) !=", "Original source code: # https://developer.blender.org/diffusion/B/browse/master/release/bin/blender-thumbnailer.py$155 # def write_png(buf, width, height): # by ideasman42,", "| (r << 16) | (g << 8) | b print tot saveAsPNG(tot,", "with open(\"my_image.png\", 'wb') as fd: fd.write(data) def saveAsPNG(array, filename): # by <NAME>, 2014-01-10,", "buf: must be bytes or a bytearray in py3, a regular string in", "bytes or a bytearray in py3, a regular string in py2. formatted RGBARGBA...", "fd: fd.write(data) def saveAsPNG(array, filename): # by <NAME>, 2014-01-10, stackoverflow.com import struct if", "span in range((height - 1) * width * 4, -1, - width_byte_4)) def", "!= len(array[0]) for row in array]): raise ValueError, \"Array should have elements of", "Green Blue Alpha\", where alpha is the opacity level # # extracted from:", "# The data should be written directly to a file opened as binary,", "files # # RGBA stands for \"Red Green Blue Alpha\", where alpha is", "#Rotate from ARGB to RGBA. data = write_png(buf, len(array[0]), len(array)) f = open(filename,", "alpha is the opacity level # # extracted from: # http://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image # #", "of image. flat = []; map(flat.extend, reversed(array)) #Big-endian, unsigned 32-byte integer. buf =", "ARGB to RGBA. data = write_png(buf, len(array[0]), len(array)) f = open(filename, 'wb') f.write(data)", "'wb') as fd: fd.write(data) def saveAsPNG(array, filename): # by <NAME>, 2014-01-10, stackoverflow.com import", "of size imgsize x imgsize pixels for i in range(2*n): buf = buf", "directly to a file opened as binary, as in: data = write_png(buf, imgsize,", "b'\\x89PNG\\r\\n\\x1a\\n', png_pack(b'IHDR', struct.pack(\"!2I5B\", width, height, 8, 6, 0, 0, 0)), png_pack(b'IDAT', zlib.compress(raw_data, 9)),", "return (struct.pack(\"!I\", len(data)) + chunk_head + struct.pack(\"!I\", 0xFFFFFFFF & zlib.crc32(chunk_head))) return b''.join([ b'\\x89PNG\\r\\n\\x1a\\n',", "https://developer.blender.org/diffusion/B/browse/master/release/bin/blender-thumbnailer.py$155 # def write_png(buf, width, height): # by ideasman42, 2013-10-04, stackoverflow.com \"\"\" buf:", "data = write_png(buf, len(array[0]), len(array)) f = open(filename, 'wb') f.write(data) f.close() def test_save_png():", "opened as binary, as in: data = write_png(buf, imgsize, imgsize) with open(\"my_image.png\", 'wb')", "0)), png_pack(b'IDAT', zlib.compress(raw_data, 9)), png_pack(b'IEND', b'')]) def test_write_png(): # a red square: buf=b'\\xFF\\x00\\x00\\xFF'", "RGBARGBA... \"\"\" import zlib, struct # reverse the vertical line order and add", "len(array[0]), len(array)) f = open(filename, 'wb') f.write(data) f.close() def test_save_png(): import numpy as", "png_pack(png_tag, data): chunk_head = png_tag + data return (struct.pack(\"!I\", len(data)) + chunk_head +", "buf = b''.join([struct.pack('>I', ((0xffFFff & i32)<<8)|(i32>>24) ) for i32 in flat]) #Rotate from", "or a bytearray in py3, a regular string in py2. formatted RGBARGBA... \"\"\"", "# https://developer.blender.org/diffusion/B/browse/master/release/bin/blender-thumbnailer.py$155 # def write_png(buf, width, height): # by ideasman42, 2013-10-04, stackoverflow.com \"\"\"", "width_byte_4] for span in range((height - 1) * width * 4, -1, -", "tot saveAsPNG(tot, 'test_grid.png') #saveAsPNG([[0xffff0000, 0xffFFFF00], # [0xff00aa77, 0xff333333]], 'test_grid.png') if __name__ == '__main__':", "as np a = np.empty((2,2), np.uint32) a.fill(0xFF) r = np.empty((2,2), np.uint32) r[0,0] =", "r[0,1] = 0xFF g = np.empty((2,2), np.uint32) g[0,1] = 0xFF b = np.empty((2,2),", "struct if any([len(row) != len(array[0]) for row in array]): raise ValueError, \"Array should", "tot = (a << 24) | (r << 16) | (g << 8)", "from: # http://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image # # Original source code: # https://developer.blender.org/diffusion/B/browse/master/release/bin/blender-thumbnailer.py$155 # def write_png(buf,", "png_pack(b'IEND', b'')]) def test_write_png(): # a red square: buf=b'\\xFF\\x00\\x00\\xFF' n=9 imgsize=2**n # generate", "imgsize, imgsize) with open(\"my_image.png\", 'wb') as fd: fd.write(data) def saveAsPNG(array, filename): # by", "the opacity level # # extracted from: # http://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image # # Original source", "= []; map(flat.extend, reversed(array)) #Big-endian, unsigned 32-byte integer. buf = b''.join([struct.pack('>I', ((0xffFFff &", "import struct if any([len(row) != len(array[0]) for row in array]): raise ValueError, \"Array", "0xFF r[0,1] = 0xFF g = np.empty((2,2), np.uint32) g[0,1] = 0xFF b =", "16) | (g << 8) | b print tot saveAsPNG(tot, 'test_grid.png') #saveAsPNG([[0xffff0000, 0xffFFFF00],", "must be bytes or a bytearray in py3, a regular string in py2.", "red square: buf=b'\\xFF\\x00\\x00\\xFF' n=9 imgsize=2**n # generate an image of size imgsize x", "in flat]) #Rotate from ARGB to RGBA. data = write_png(buf, len(array[0]), len(array)) f", "= np.empty((2,2), np.uint32) b[1,1] = 0xFF tot = (a << 24) | (r", "# RGBA stands for \"Red Green Blue Alpha\", where alpha is the opacity", "pixels for i in range(2*n): buf = buf + buf print \"len=\", len(buf)/4", "(g << 8) | b print tot saveAsPNG(tot, 'test_grid.png') #saveAsPNG([[0xffff0000, 0xffFFFF00], # [0xff00aa77,", "have elements of equal size\" #First row becomes top row of image. flat", "code: # https://developer.blender.org/diffusion/B/browse/master/release/bin/blender-thumbnailer.py$155 # def write_png(buf, width, height): # by ideasman42, 2013-10-04, stackoverflow.com", "(a << 24) | (r << 16) | (g << 8) | b", "py3, a regular string in py2. formatted RGBARGBA... \"\"\" import zlib, struct #", "= 0xFF b = np.empty((2,2), np.uint32) b[1,1] = 0xFF tot = (a <<", "x imgsize pixels for i in range(2*n): buf = buf + buf print", "<< 24) | (r << 16) | (g << 8) | b print", "len(array[0]) for row in array]): raise ValueError, \"Array should have elements of equal", "* 4 raw_data = b''.join(b'\\x00' + buf[span:span + width_byte_4] for span in range((height", "\"Array should have elements of equal size\" #First row becomes top row of", "# by <NAME>, 2014-01-10, stackoverflow.com import struct if any([len(row) != len(array[0]) for row", "regular string in py2. formatted RGBARGBA... \"\"\" import zlib, struct # reverse the", "r[0,0] = 0xFF r[0,1] = 0xFF g = np.empty((2,2), np.uint32) g[0,1] = 0xFF", "def test_save_png(): import numpy as np a = np.empty((2,2), np.uint32) a.fill(0xFF) r =", "a red square: buf=b'\\xFF\\x00\\x00\\xFF' n=9 imgsize=2**n # generate an image of size imgsize", "ValueError, \"Array should have elements of equal size\" #First row becomes top row", "2014-01-10, stackoverflow.com import struct if any([len(row) != len(array[0]) for row in array]): raise", "0, 0, 0)), png_pack(b'IDAT', zlib.compress(raw_data, 9)), png_pack(b'IEND', b'')]) def test_write_png(): # a red", "ideasman42, 2013-10-04, stackoverflow.com \"\"\" buf: must be bytes or a bytearray in py3,", "= buf + buf print \"len=\", len(buf)/4 # The data should be written", "reverse the vertical line order and add null bytes at the start width_byte_4", "elements of equal size\" #First row becomes top row of image. flat =", "opacity level # # extracted from: # http://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image # # Original source code:", "order and add null bytes at the start width_byte_4 = width * 4", "buf print \"len=\", len(buf)/4 # The data should be written directly to a", "+ buf print \"len=\", len(buf)/4 # The data should be written directly to", "b''.join([ b'\\x89PNG\\r\\n\\x1a\\n', png_pack(b'IHDR', struct.pack(\"!2I5B\", width, height, 8, 6, 0, 0, 0)), png_pack(b'IDAT', zlib.compress(raw_data,", "by ideasman42, 2013-10-04, stackoverflow.com \"\"\" buf: must be bytes or a bytearray in", "+ data return (struct.pack(\"!I\", len(data)) + chunk_head + struct.pack(\"!I\", 0xFFFFFFFF & zlib.crc32(chunk_head))) return", "+ chunk_head + struct.pack(\"!I\", 0xFFFFFFFF & zlib.crc32(chunk_head))) return b''.join([ b'\\x89PNG\\r\\n\\x1a\\n', png_pack(b'IHDR', struct.pack(\"!2I5B\", width,", "WritePNG.py: writes compressed, true-color RGBA PNG files # # RGBA stands for \"Red", "is the opacity level # # extracted from: # http://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image # # Original", "+ struct.pack(\"!I\", 0xFFFFFFFF & zlib.crc32(chunk_head))) return b''.join([ b'\\x89PNG\\r\\n\\x1a\\n', png_pack(b'IHDR', struct.pack(\"!2I5B\", width, height, 8,", "row of image. flat = []; map(flat.extend, reversed(array)) #Big-endian, unsigned 32-byte integer. buf", "= 0xFF tot = (a << 24) | (r << 16) | (g", "should have elements of equal size\" #First row becomes top row of image.", "file opened as binary, as in: data = write_png(buf, imgsize, imgsize) with open(\"my_image.png\",", "np.empty((2,2), np.uint32) a.fill(0xFF) r = np.empty((2,2), np.uint32) r[0,0] = 0xFF r[0,1] = 0xFF", "Blue Alpha\", where alpha is the opacity level # # extracted from: #", "buf[span:span + width_byte_4] for span in range((height - 1) * width * 4,", "a = np.empty((2,2), np.uint32) a.fill(0xFF) r = np.empty((2,2), np.uint32) r[0,0] = 0xFF r[0,1]", "in range((height - 1) * width * 4, -1, - width_byte_4)) def png_pack(png_tag,", "= np.empty((2,2), np.uint32) r[0,0] = 0xFF r[0,1] = 0xFF g = np.empty((2,2), np.uint32)", "square: buf=b'\\xFF\\x00\\x00\\xFF' n=9 imgsize=2**n # generate an image of size imgsize x imgsize", "compressed, true-color RGBA PNG files # # RGBA stands for \"Red Green Blue", "# by ideasman42, 2013-10-04, stackoverflow.com \"\"\" buf: must be bytes or a bytearray", "open(filename, 'wb') f.write(data) f.close() def test_save_png(): import numpy as np a = np.empty((2,2),", "buf + buf print \"len=\", len(buf)/4 # The data should be written directly", "for span in range((height - 1) * width * 4, -1, - width_byte_4))", "np.uint32) a.fill(0xFF) r = np.empty((2,2), np.uint32) r[0,0] = 0xFF r[0,1] = 0xFF g", "# http://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image # # Original source code: # https://developer.blender.org/diffusion/B/browse/master/release/bin/blender-thumbnailer.py$155 # def write_png(buf, width,", "0xFFFFFFFF & zlib.crc32(chunk_head))) return b''.join([ b'\\x89PNG\\r\\n\\x1a\\n', png_pack(b'IHDR', struct.pack(\"!2I5B\", width, height, 8, 6, 0,", "b''.join([struct.pack('>I', ((0xffFFff & i32)<<8)|(i32>>24) ) for i32 in flat]) #Rotate from ARGB to", "((0xffFFff & i32)<<8)|(i32>>24) ) for i32 in flat]) #Rotate from ARGB to RGBA.", "struct # reverse the vertical line order and add null bytes at the", "test_save_png(): import numpy as np a = np.empty((2,2), np.uint32) a.fill(0xFF) r = np.empty((2,2),", "zlib.compress(raw_data, 9)), png_pack(b'IEND', b'')]) def test_write_png(): # a red square: buf=b'\\xFF\\x00\\x00\\xFF' n=9 imgsize=2**n", "= (a << 24) | (r << 16) | (g << 8) |", "-1, - width_byte_4)) def png_pack(png_tag, data): chunk_head = png_tag + data return (struct.pack(\"!I\",", "[]; map(flat.extend, reversed(array)) #Big-endian, unsigned 32-byte integer. buf = b''.join([struct.pack('>I', ((0xffFFff & i32)<<8)|(i32>>24)", "extracted from: # http://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image # # Original source code: # https://developer.blender.org/diffusion/B/browse/master/release/bin/blender-thumbnailer.py$155 # def", "height, 8, 6, 0, 0, 0)), png_pack(b'IDAT', zlib.compress(raw_data, 9)), png_pack(b'IEND', b'')]) def test_write_png():", "imgsize x imgsize pixels for i in range(2*n): buf = buf + buf", "<reponame>danielrcardenas/ac-course-2017 #--------------------------------------------------------------------------- # # WritePNG.py: writes compressed, true-color RGBA PNG files # #", "at the start width_byte_4 = width * 4 raw_data = b''.join(b'\\x00' + buf[span:span", "height): # by ideasman42, 2013-10-04, stackoverflow.com \"\"\" buf: must be bytes or a", "be bytes or a bytearray in py3, a regular string in py2. formatted", "png_pack(b'IDAT', zlib.compress(raw_data, 9)), png_pack(b'IEND', b'')]) def test_write_png(): # a red square: buf=b'\\xFF\\x00\\x00\\xFF' n=9", "level # # extracted from: # http://stackoverflow.com/questions/902761/saving-a-numpy-array-as-an-image # # Original source code: #", "4 raw_data = b''.join(b'\\x00' + buf[span:span + width_byte_4] for span in range((height -", "in range(2*n): buf = buf + buf print \"len=\", len(buf)/4 # The data", "= write_png(buf, imgsize, imgsize) with open(\"my_image.png\", 'wb') as fd: fd.write(data) def saveAsPNG(array, filename):", "png_pack(b'IHDR', struct.pack(\"!2I5B\", width, height, 8, 6, 0, 0, 0)), png_pack(b'IDAT', zlib.compress(raw_data, 9)), png_pack(b'IEND',", "in array]): raise ValueError, \"Array should have elements of equal size\" #First row", "#First row becomes top row of image. flat = []; map(flat.extend, reversed(array)) #Big-endian,", "formatted RGBARGBA... \"\"\" import zlib, struct # reverse the vertical line order and" ]
[ "required=NotDEBUG) parser.add_argument('-o', '--output1', action='store', nargs='?', help=\"Output first read file\", required=NotDEBUG) parser.add_argument('-p', '--output2', action='store',", "0 with gzip.open(tmpFile1, \"wt\") as fout1: with gzip.open(tmpFile2, \"wt\") as fout2: while True:", "read file.\", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-1', '--input1', action='store', nargs='?', help='Input 10x first read file', required=NotDEBUG)", "with gzip.open(tmpFile1, \"wt\") as fout1: with gzip.open(tmpFile2, \"wt\") as fout2: while True: query", "0: logger.info(\"processed %d reads ...\" % fin1count) q2 = fin2.readline() seq2 = fin2.readline()", "\"\") barcodes.add(barcode) logger.info(\"Total %d barcode in whitelist\" % len(barcodes)) tmpFile1 = args.output1 +", "args.input2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.fastq.gz\" args.output1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.processed.fastq.gz\" args.output2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.processed.fastq.gz\" args.barcodeFile = \"/data/cqs/jonathan_brown_data/3804/Count/3804-LD-2/filtered_feature_bc_matrix/barcodes.tsv.gz\" logger", "DEBUG: args.input1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.fastq.gz\" args.input2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.fastq.gz\" args.output1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.processed.fastq.gz\" args.output2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.processed.fastq.gz\"", "gzip DEBUG=False NotDEBUG=not DEBUG parser = argparse.ArgumentParser(description=\"Extract barcode and UMI from 10x fastq", "\".tmp.gz\" fin1count = 0 logger.info(\"Processing reads ...\") with gzip.open(args.input1, 'rt') as fin1: with", "format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s') barcodes = set() with gzip.open(args.barcodeFile, 'rt')", "with gzip.open(args.input2, 'rt') as fin2: count = 0 with gzip.open(tmpFile1, \"wt\") as fout1:", "fout1: with gzip.open(tmpFile2, \"wt\") as fout2: while True: query = fin1.readline() if query", "argparse import sys import logging import os import csv import gzip DEBUG=False NotDEBUG=not", "logging.getLogger('10xFastq') logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s') barcodes = set() with", "DEBUG parser = argparse.ArgumentParser(description=\"Extract barcode and UMI from 10x fastq first read file.\",", "nargs='?', help=\"Output first read file\", required=NotDEBUG) parser.add_argument('-p', '--output2', action='store', nargs='?', help=\"Output second read", "fin: barcode = barcode.rstrip() barcode = barcode.replace(\"-1\", \"\") barcodes.add(barcode) logger.info(\"Total %d barcode in", "in barcodes): continue count = count + 1 umi = seq[16:26] seq =", "= \"@q%d:%s:%s\\n\" % (count, barcode, umi) fout1.write(query) fout1.write(seq) fout1.write(sig) fout1.write(score) fout2.write(query) fout2.write(seq2) fout2.write(sig2)", "NotDEBUG=not DEBUG parser = argparse.ArgumentParser(description=\"Extract barcode and UMI from 10x fastq first read", "== \"\": break seq = fin1.readline() sig = fin1.readline() score = fin1.readline() fin1count", "= set() with gzip.open(args.barcodeFile, 'rt') as fin: for barcode in fin: barcode =", "args.barcodeFile = \"/data/cqs/jonathan_brown_data/3804/Count/3804-LD-2/filtered_feature_bc_matrix/barcodes.tsv.gz\" logger = logging.getLogger('10xFastq') logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s -", "whitelist\" % len(barcodes)) tmpFile1 = args.output1 + \".tmp.gz\" tmpFile2 = args.output2 + \".tmp.gz\"", "parser.add_argument('-b', '--barcodeFile', action='store', nargs='?', help=\"Input barcode white list file\", required=NotDEBUG) args = parser.parse_args()", "set() with gzip.open(args.barcodeFile, 'rt') as fin: for barcode in fin: barcode = barcode.rstrip()", "barcodes = set() with gzip.open(args.barcodeFile, 'rt') as fin: for barcode in fin: barcode", "tmpFile2 = args.output2 + \".tmp.gz\" fin1count = 0 logger.info(\"Processing reads ...\") with gzip.open(args.input1,", "= fin2.readline() barcode = seq[:16] if not (barcode in barcodes): continue count =", "fastq first read file.\", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-1', '--input1', action='store', nargs='?', help='Input 10x first read", "while True: query = fin1.readline() if query == \"\": break seq = fin1.readline()", "fout1.write(seq) fout1.write(sig) fout1.write(score) fout2.write(query) fout2.write(seq2) fout2.write(sig2) fout2.write(score2) # # if count == 1000:", "'--output2', action='store', nargs='?', help=\"Output second read file\", required=NotDEBUG) parser.add_argument('-b', '--barcodeFile', action='store', nargs='?', help=\"Input", "fout2.write(sig2) fout2.write(score2) # # if count == 1000: # break if os.path.isfile(args.output1): os.remove(args.output1)", "os import csv import gzip DEBUG=False NotDEBUG=not DEBUG parser = argparse.ArgumentParser(description=\"Extract barcode and", "# # if count == 1000: # break if os.path.isfile(args.output1): os.remove(args.output1) if os.path.isfile(args.output2):", "'rt') as fin: for barcode in fin: barcode = barcode.rstrip() barcode = barcode.replace(\"-1\",", "barcodes.add(barcode) logger.info(\"Total %d barcode in whitelist\" % len(barcodes)) tmpFile1 = args.output1 + \".tmp.gz\"", "fin1.readline() score = fin1.readline() fin1count += 1 if fin1count % 100000 == 0:", "continue count = count + 1 umi = seq[16:26] seq = seq[26:] score", "nargs='?', help='Input 10x second read file', required=NotDEBUG) parser.add_argument('-o', '--output1', action='store', nargs='?', help=\"Output first", "fin: for barcode in fin: barcode = barcode.rstrip() barcode = barcode.replace(\"-1\", \"\") barcodes.add(barcode)", "...\") with gzip.open(args.input1, 'rt') as fin1: with gzip.open(args.input2, 'rt') as fin2: count =", "fout2: while True: query = fin1.readline() if query == \"\": break seq =", "+= 1 if fin1count % 100000 == 0: logger.info(\"processed %d reads ...\" %", "logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s') barcodes = set() with gzip.open(args.barcodeFile,", "= \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.fastq.gz\" args.input2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.fastq.gz\" args.output1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.processed.fastq.gz\" args.output2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.processed.fastq.gz\" args.barcodeFile =", "barcode, umi) fout1.write(query) fout1.write(seq) fout1.write(sig) fout1.write(score) fout2.write(query) fout2.write(seq2) fout2.write(sig2) fout2.write(score2) # # if", "required=NotDEBUG) parser.add_argument('-b', '--barcodeFile', action='store', nargs='?', help=\"Input barcode white list file\", required=NotDEBUG) args =", "umi) fout1.write(query) fout1.write(seq) fout1.write(sig) fout1.write(score) fout2.write(query) fout2.write(seq2) fout2.write(sig2) fout2.write(score2) # # if count", "score[26:] query = \"@q%d:%s:%s\\n\" % (count, barcode, umi) fout1.write(query) fout1.write(seq) fout1.write(sig) fout1.write(score) fout2.write(query)", "seq[26:] score = score[26:] query = \"@q%d:%s:%s\\n\" % (count, barcode, umi) fout1.write(query) fout1.write(seq)", "read file\", required=NotDEBUG) parser.add_argument('-p', '--output2', action='store', nargs='?', help=\"Output second read file\", required=NotDEBUG) parser.add_argument('-b',", "if not (barcode in barcodes): continue count = count + 1 umi =", "= barcode.rstrip() barcode = barcode.replace(\"-1\", \"\") barcodes.add(barcode) logger.info(\"Total %d barcode in whitelist\" %", "gzip.open(args.input1, 'rt') as fin1: with gzip.open(args.input2, 'rt') as fin2: count = 0 with", "0 logger.info(\"Processing reads ...\") with gzip.open(args.input1, 'rt') as fin1: with gzip.open(args.input2, 'rt') as", "seq = seq[26:] score = score[26:] query = \"@q%d:%s:%s\\n\" % (count, barcode, umi)", "help=\"Input barcode white list file\", required=NotDEBUG) args = parser.parse_args() if DEBUG: args.input1 =", "logger = logging.getLogger('10xFastq') logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s') barcodes =", "= logging.getLogger('10xFastq') logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s') barcodes = set()", "= fin1.readline() score = fin1.readline() fin1count += 1 if fin1count % 100000 ==", "= argparse.ArgumentParser(description=\"Extract barcode and UMI from 10x fastq first read file.\", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-1',", "as fout1: with gzip.open(tmpFile2, \"wt\") as fout2: while True: query = fin1.readline() if", "parser.add_argument('-2', '--input2', action='store', nargs='?', help='Input 10x second read file', required=NotDEBUG) parser.add_argument('-o', '--output1', action='store',", "fin2: count = 0 with gzip.open(tmpFile1, \"wt\") as fout1: with gzip.open(tmpFile2, \"wt\") as", "as fin1: with gzip.open(args.input2, 'rt') as fin2: count = 0 with gzip.open(tmpFile1, \"wt\")", "len(barcodes)) tmpFile1 = args.output1 + \".tmp.gz\" tmpFile2 = args.output2 + \".tmp.gz\" fin1count =", "barcode in whitelist\" % len(barcodes)) tmpFile1 = args.output1 + \".tmp.gz\" tmpFile2 = args.output2", "fin1: with gzip.open(args.input2, 'rt') as fin2: count = 0 with gzip.open(tmpFile1, \"wt\") as", "gzip.open(args.input2, 'rt') as fin2: count = 0 with gzip.open(tmpFile1, \"wt\") as fout1: with", "gzip.open(tmpFile2, \"wt\") as fout2: while True: query = fin1.readline() if query == \"\":", "required=NotDEBUG) parser.add_argument('-p', '--output2', action='store', nargs='?', help=\"Output second read file\", required=NotDEBUG) parser.add_argument('-b', '--barcodeFile', action='store',", "args.input1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.fastq.gz\" args.input2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.fastq.gz\" args.output1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.processed.fastq.gz\" args.output2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.processed.fastq.gz\" args.barcodeFile", "query == \"\": break seq = fin1.readline() sig = fin1.readline() score = fin1.readline()", "logger.info(\"processed %d reads ...\" % fin1count) q2 = fin2.readline() seq2 = fin2.readline() sig2", "q2 = fin2.readline() seq2 = fin2.readline() sig2 = fin2.readline() score2 = fin2.readline() barcode", "barcode = barcode.replace(\"-1\", \"\") barcodes.add(barcode) logger.info(\"Total %d barcode in whitelist\" % len(barcodes)) tmpFile1", "barcodes): continue count = count + 1 umi = seq[16:26] seq = seq[26:]", "barcode and UMI from 10x fastq first read file.\", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-1', '--input1', action='store',", "seq = fin1.readline() sig = fin1.readline() score = fin1.readline() fin1count += 1 if", "if count == 1000: # break if os.path.isfile(args.output1): os.remove(args.output1) if os.path.isfile(args.output2): os.remove(args.output2) os.rename(tmpFile1,", "sig = fin1.readline() score = fin1.readline() fin1count += 1 if fin1count % 100000", "import argparse import sys import logging import os import csv import gzip DEBUG=False", "- %(levelname)-8s - %(message)s') barcodes = set() with gzip.open(args.barcodeFile, 'rt') as fin: for", "%(levelname)-8s - %(message)s') barcodes = set() with gzip.open(args.barcodeFile, 'rt') as fin: for barcode", "if DEBUG: args.input1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.fastq.gz\" args.input2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.fastq.gz\" args.output1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.processed.fastq.gz\" args.output2 =", "as fout2: while True: query = fin1.readline() if query == \"\": break seq", "csv import gzip DEBUG=False NotDEBUG=not DEBUG parser = argparse.ArgumentParser(description=\"Extract barcode and UMI from", "= \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.fastq.gz\" args.output1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.processed.fastq.gz\" args.output2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.processed.fastq.gz\" args.barcodeFile = \"/data/cqs/jonathan_brown_data/3804/Count/3804-LD-2/filtered_feature_bc_matrix/barcodes.tsv.gz\" logger =", "required=NotDEBUG) parser.add_argument('-2', '--input2', action='store', nargs='?', help='Input 10x second read file', required=NotDEBUG) parser.add_argument('-o', '--output1',", "from 10x fastq first read file.\", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-1', '--input1', action='store', nargs='?', help='Input 10x", "args.output1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.processed.fastq.gz\" args.output2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.processed.fastq.gz\" args.barcodeFile = \"/data/cqs/jonathan_brown_data/3804/Count/3804-LD-2/filtered_feature_bc_matrix/barcodes.tsv.gz\" logger = logging.getLogger('10xFastq') logging.basicConfig(level=logging.INFO,", "% fin1count) q2 = fin2.readline() seq2 = fin2.readline() sig2 = fin2.readline() score2 =", "file\", required=NotDEBUG) args = parser.parse_args() if DEBUG: args.input1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.fastq.gz\" args.input2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.fastq.gz\"", "10x first read file', required=NotDEBUG) parser.add_argument('-2', '--input2', action='store', nargs='?', help='Input 10x second read", "parser = argparse.ArgumentParser(description=\"Extract barcode and UMI from 10x fastq first read file.\", formatter_class=argparse.ArgumentDefaultsHelpFormatter)", "first read file', required=NotDEBUG) parser.add_argument('-2', '--input2', action='store', nargs='?', help='Input 10x second read file',", "= barcode.replace(\"-1\", \"\") barcodes.add(barcode) logger.info(\"Total %d barcode in whitelist\" % len(barcodes)) tmpFile1 =", "= fin1.readline() if query == \"\": break seq = fin1.readline() sig = fin1.readline()", "= fin1.readline() fin1count += 1 if fin1count % 100000 == 0: logger.info(\"processed %d", "= fin1.readline() sig = fin1.readline() score = fin1.readline() fin1count += 1 if fin1count", "(count, barcode, umi) fout1.write(query) fout1.write(seq) fout1.write(sig) fout1.write(score) fout2.write(query) fout2.write(seq2) fout2.write(sig2) fout2.write(score2) # #", "fout1.write(score) fout2.write(query) fout2.write(seq2) fout2.write(sig2) fout2.write(score2) # # if count == 1000: # break", "+ 1 umi = seq[16:26] seq = seq[26:] score = score[26:] query =", "read file\", required=NotDEBUG) parser.add_argument('-b', '--barcodeFile', action='store', nargs='?', help=\"Input barcode white list file\", required=NotDEBUG)", "%d barcode in whitelist\" % len(barcodes)) tmpFile1 = args.output1 + \".tmp.gz\" tmpFile2 =", "(barcode in barcodes): continue count = count + 1 umi = seq[16:26] seq", "= \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.processed.fastq.gz\" args.barcodeFile = \"/data/cqs/jonathan_brown_data/3804/Count/3804-LD-2/filtered_feature_bc_matrix/barcodes.tsv.gz\" logger = logging.getLogger('10xFastq') logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s -", "score2 = fin2.readline() barcode = seq[:16] if not (barcode in barcodes): continue count", "sys import logging import os import csv import gzip DEBUG=False NotDEBUG=not DEBUG parser", "first read file\", required=NotDEBUG) parser.add_argument('-p', '--output2', action='store', nargs='?', help=\"Output second read file\", required=NotDEBUG)", "%d reads ...\" % fin1count) q2 = fin2.readline() seq2 = fin2.readline() sig2 =", "import gzip DEBUG=False NotDEBUG=not DEBUG parser = argparse.ArgumentParser(description=\"Extract barcode and UMI from 10x", "score = score[26:] query = \"@q%d:%s:%s\\n\" % (count, barcode, umi) fout1.write(query) fout1.write(seq) fout1.write(sig)", "file', required=NotDEBUG) parser.add_argument('-2', '--input2', action='store', nargs='?', help='Input 10x second read file', required=NotDEBUG) parser.add_argument('-o',", "action='store', nargs='?', help='Input 10x second read file', required=NotDEBUG) parser.add_argument('-o', '--output1', action='store', nargs='?', help=\"Output", "nargs='?', help=\"Output second read file\", required=NotDEBUG) parser.add_argument('-b', '--barcodeFile', action='store', nargs='?', help=\"Input barcode white", "gzip.open(tmpFile1, \"wt\") as fout1: with gzip.open(tmpFile2, \"wt\") as fout2: while True: query =", "if fin1count % 100000 == 0: logger.info(\"processed %d reads ...\" % fin1count) q2", "help='Input 10x first read file', required=NotDEBUG) parser.add_argument('-2', '--input2', action='store', nargs='?', help='Input 10x second", "fin2.readline() sig2 = fin2.readline() score2 = fin2.readline() barcode = seq[:16] if not (barcode", "count + 1 umi = seq[16:26] seq = seq[26:] score = score[26:] query", "score = fin1.readline() fin1count += 1 if fin1count % 100000 == 0: logger.info(\"processed", "10x fastq first read file.\", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-1', '--input1', action='store', nargs='?', help='Input 10x first", "'--input2', action='store', nargs='?', help='Input 10x second read file', required=NotDEBUG) parser.add_argument('-o', '--output1', action='store', nargs='?',", "first read file.\", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-1', '--input1', action='store', nargs='?', help='Input 10x first read file',", "= count + 1 umi = seq[16:26] seq = seq[26:] score = score[26:]", "True: query = fin1.readline() if query == \"\": break seq = fin1.readline() sig", "barcode white list file\", required=NotDEBUG) args = parser.parse_args() if DEBUG: args.input1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.fastq.gz\"", "import sys import logging import os import csv import gzip DEBUG=False NotDEBUG=not DEBUG", "file', required=NotDEBUG) parser.add_argument('-o', '--output1', action='store', nargs='?', help=\"Output first read file\", required=NotDEBUG) parser.add_argument('-p', '--output2',", "argparse.ArgumentParser(description=\"Extract barcode and UMI from 10x fastq first read file.\", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-1', '--input1',", "read file', required=NotDEBUG) parser.add_argument('-2', '--input2', action='store', nargs='?', help='Input 10x second read file', required=NotDEBUG)", "'--barcodeFile', action='store', nargs='?', help=\"Input barcode white list file\", required=NotDEBUG) args = parser.parse_args() if", "required=NotDEBUG) args = parser.parse_args() if DEBUG: args.input1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.fastq.gz\" args.input2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.fastq.gz\" args.output1", "action='store', nargs='?', help=\"Output first read file\", required=NotDEBUG) parser.add_argument('-p', '--output2', action='store', nargs='?', help=\"Output second", "seq[16:26] seq = seq[26:] score = score[26:] query = \"@q%d:%s:%s\\n\" % (count, barcode,", "barcode.replace(\"-1\", \"\") barcodes.add(barcode) logger.info(\"Total %d barcode in whitelist\" % len(barcodes)) tmpFile1 = args.output1", "= \"/data/cqs/jonathan_brown_data/3804/Count/3804-LD-2/filtered_feature_bc_matrix/barcodes.tsv.gz\" logger = logging.getLogger('10xFastq') logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')", "100000 == 0: logger.info(\"processed %d reads ...\" % fin1count) q2 = fin2.readline() seq2", "count == 1000: # break if os.path.isfile(args.output1): os.remove(args.output1) if os.path.isfile(args.output2): os.remove(args.output2) os.rename(tmpFile1, args.output1)", "file\", required=NotDEBUG) parser.add_argument('-b', '--barcodeFile', action='store', nargs='?', help=\"Input barcode white list file\", required=NotDEBUG) args", "import csv import gzip DEBUG=False NotDEBUG=not DEBUG parser = argparse.ArgumentParser(description=\"Extract barcode and UMI", "%(message)s') barcodes = set() with gzip.open(args.barcodeFile, 'rt') as fin: for barcode in fin:", "reads ...\" % fin1count) q2 = fin2.readline() seq2 = fin2.readline() sig2 = fin2.readline()", "second read file', required=NotDEBUG) parser.add_argument('-o', '--output1', action='store', nargs='?', help=\"Output first read file\", required=NotDEBUG)", "seq2 = fin2.readline() sig2 = fin2.readline() score2 = fin2.readline() barcode = seq[:16] if", "1 umi = seq[16:26] seq = seq[26:] score = score[26:] query = \"@q%d:%s:%s\\n\"", "file\", required=NotDEBUG) parser.add_argument('-p', '--output2', action='store', nargs='?', help=\"Output second read file\", required=NotDEBUG) parser.add_argument('-b', '--barcodeFile',", "'rt') as fin2: count = 0 with gzip.open(tmpFile1, \"wt\") as fout1: with gzip.open(tmpFile2,", "'--input1', action='store', nargs='?', help='Input 10x first read file', required=NotDEBUG) parser.add_argument('-2', '--input2', action='store', nargs='?',", "tmpFile1 = args.output1 + \".tmp.gz\" tmpFile2 = args.output2 + \".tmp.gz\" fin1count = 0", "= seq[26:] score = score[26:] query = \"@q%d:%s:%s\\n\" % (count, barcode, umi) fout1.write(query)", "args.output2 + \".tmp.gz\" fin1count = 0 logger.info(\"Processing reads ...\") with gzip.open(args.input1, 'rt') as", "= 0 logger.info(\"Processing reads ...\") with gzip.open(args.input1, 'rt') as fin1: with gzip.open(args.input2, 'rt')", "%(name)s - %(levelname)-8s - %(message)s') barcodes = set() with gzip.open(args.barcodeFile, 'rt') as fin:", "args.output1 + \".tmp.gz\" tmpFile2 = args.output2 + \".tmp.gz\" fin1count = 0 logger.info(\"Processing reads", "# break if os.path.isfile(args.output1): os.remove(args.output1) if os.path.isfile(args.output2): os.remove(args.output2) os.rename(tmpFile1, args.output1) os.rename(tmpFile2, args.output2) logger.info(\"done.\")", "fout1.write(sig) fout1.write(score) fout2.write(query) fout2.write(seq2) fout2.write(sig2) fout2.write(score2) # # if count == 1000: #", "<filename>lib/10x/10xfastq.py<gh_stars>1-10 import argparse import sys import logging import os import csv import gzip", "query = \"@q%d:%s:%s\\n\" % (count, barcode, umi) fout1.write(query) fout1.write(seq) fout1.write(sig) fout1.write(score) fout2.write(query) fout2.write(seq2)", "\"@q%d:%s:%s\\n\" % (count, barcode, umi) fout1.write(query) fout1.write(seq) fout1.write(sig) fout1.write(score) fout2.write(query) fout2.write(seq2) fout2.write(sig2) fout2.write(score2)", "parser.parse_args() if DEBUG: args.input1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.fastq.gz\" args.input2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.fastq.gz\" args.output1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.processed.fastq.gz\" args.output2", "\"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.processed.fastq.gz\" args.barcodeFile = \"/data/cqs/jonathan_brown_data/3804/Count/3804-LD-2/filtered_feature_bc_matrix/barcodes.tsv.gz\" logger = logging.getLogger('10xFastq') logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s", "= parser.parse_args() if DEBUG: args.input1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.fastq.gz\" args.input2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.fastq.gz\" args.output1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.processed.fastq.gz\"", "with gzip.open(args.input1, 'rt') as fin1: with gzip.open(args.input2, 'rt') as fin2: count = 0", "gzip.open(args.barcodeFile, 'rt') as fin: for barcode in fin: barcode = barcode.rstrip() barcode =", "as fin2: count = 0 with gzip.open(tmpFile1, \"wt\") as fout1: with gzip.open(tmpFile2, \"wt\")", "barcode = barcode.rstrip() barcode = barcode.replace(\"-1\", \"\") barcodes.add(barcode) logger.info(\"Total %d barcode in whitelist\"", "query = fin1.readline() if query == \"\": break seq = fin1.readline() sig =", "fin1count += 1 if fin1count % 100000 == 0: logger.info(\"processed %d reads ...\"", "read file', required=NotDEBUG) parser.add_argument('-o', '--output1', action='store', nargs='?', help=\"Output first read file\", required=NotDEBUG) parser.add_argument('-p',", "count = 0 with gzip.open(tmpFile1, \"wt\") as fout1: with gzip.open(tmpFile2, \"wt\") as fout2:", "fin1.readline() if query == \"\": break seq = fin1.readline() sig = fin1.readline() score", "== 0: logger.info(\"processed %d reads ...\" % fin1count) q2 = fin2.readline() seq2 =", "% len(barcodes)) tmpFile1 = args.output1 + \".tmp.gz\" tmpFile2 = args.output2 + \".tmp.gz\" fin1count", "args = parser.parse_args() if DEBUG: args.input1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.fastq.gz\" args.input2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.fastq.gz\" args.output1 =", "= seq[16:26] seq = seq[26:] score = score[26:] query = \"@q%d:%s:%s\\n\" % (count,", "import logging import os import csv import gzip DEBUG=False NotDEBUG=not DEBUG parser =", "% (count, barcode, umi) fout1.write(query) fout1.write(seq) fout1.write(sig) fout1.write(score) fout2.write(query) fout2.write(seq2) fout2.write(sig2) fout2.write(score2) #", "second read file\", required=NotDEBUG) parser.add_argument('-b', '--barcodeFile', action='store', nargs='?', help=\"Input barcode white list file\",", "\"wt\") as fout1: with gzip.open(tmpFile2, \"wt\") as fout2: while True: query = fin1.readline()", "if query == \"\": break seq = fin1.readline() sig = fin1.readline() score =", "logging import os import csv import gzip DEBUG=False NotDEBUG=not DEBUG parser = argparse.ArgumentParser(description=\"Extract", "action='store', nargs='?', help=\"Input barcode white list file\", required=NotDEBUG) args = parser.parse_args() if DEBUG:", "\"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.fastq.gz\" args.output1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.processed.fastq.gz\" args.output2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.processed.fastq.gz\" args.barcodeFile = \"/data/cqs/jonathan_brown_data/3804/Count/3804-LD-2/filtered_feature_bc_matrix/barcodes.tsv.gz\" logger = logging.getLogger('10xFastq')", "= args.output2 + \".tmp.gz\" fin1count = 0 logger.info(\"Processing reads ...\") with gzip.open(args.input1, 'rt')", "= 0 with gzip.open(tmpFile1, \"wt\") as fout1: with gzip.open(tmpFile2, \"wt\") as fout2: while", "parser.add_argument('-o', '--output1', action='store', nargs='?', help=\"Output first read file\", required=NotDEBUG) parser.add_argument('-p', '--output2', action='store', nargs='?',", "fout1.write(query) fout1.write(seq) fout1.write(sig) fout1.write(score) fout2.write(query) fout2.write(seq2) fout2.write(sig2) fout2.write(score2) # # if count ==", "with gzip.open(args.barcodeFile, 'rt') as fin: for barcode in fin: barcode = barcode.rstrip() barcode", "== 1000: # break if os.path.isfile(args.output1): os.remove(args.output1) if os.path.isfile(args.output2): os.remove(args.output2) os.rename(tmpFile1, args.output1) os.rename(tmpFile2,", "with gzip.open(tmpFile2, \"wt\") as fout2: while True: query = fin1.readline() if query ==", "help='Input 10x second read file', required=NotDEBUG) parser.add_argument('-o', '--output1', action='store', nargs='?', help=\"Output first read", "and UMI from 10x fastq first read file.\", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-1', '--input1', action='store', nargs='?',", "in whitelist\" % len(barcodes)) tmpFile1 = args.output1 + \".tmp.gz\" tmpFile2 = args.output2 +", "\"/data/cqs/jonathan_brown_data/3804/Count/3804-LD-2/filtered_feature_bc_matrix/barcodes.tsv.gz\" logger = logging.getLogger('10xFastq') logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s') barcodes", "fin1count) q2 = fin2.readline() seq2 = fin2.readline() sig2 = fin2.readline() score2 = fin2.readline()", "help=\"Output second read file\", required=NotDEBUG) parser.add_argument('-b', '--barcodeFile', action='store', nargs='?', help=\"Input barcode white list", "= fin2.readline() seq2 = fin2.readline() sig2 = fin2.readline() score2 = fin2.readline() barcode =", "- %(name)s - %(levelname)-8s - %(message)s') barcodes = set() with gzip.open(args.barcodeFile, 'rt') as", "10x second read file', required=NotDEBUG) parser.add_argument('-o', '--output1', action='store', nargs='?', help=\"Output first read file\",", "1 if fin1count % 100000 == 0: logger.info(\"processed %d reads ...\" % fin1count)", "barcode.rstrip() barcode = barcode.replace(\"-1\", \"\") barcodes.add(barcode) logger.info(\"Total %d barcode in whitelist\" % len(barcodes))", "+ \".tmp.gz\" fin1count = 0 logger.info(\"Processing reads ...\") with gzip.open(args.input1, 'rt') as fin1:", "file.\", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-1', '--input1', action='store', nargs='?', help='Input 10x first read file', required=NotDEBUG) parser.add_argument('-2',", "logger.info(\"Processing reads ...\") with gzip.open(args.input1, 'rt') as fin1: with gzip.open(args.input2, 'rt') as fin2:", "in fin: barcode = barcode.rstrip() barcode = barcode.replace(\"-1\", \"\") barcodes.add(barcode) logger.info(\"Total %d barcode", "\"wt\") as fout2: while True: query = fin1.readline() if query == \"\": break", "fout2.write(query) fout2.write(seq2) fout2.write(sig2) fout2.write(score2) # # if count == 1000: # break if", "white list file\", required=NotDEBUG) args = parser.parse_args() if DEBUG: args.input1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.fastq.gz\" args.input2", "fin1count = 0 logger.info(\"Processing reads ...\") with gzip.open(args.input1, 'rt') as fin1: with gzip.open(args.input2,", "= \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.processed.fastq.gz\" args.output2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.processed.fastq.gz\" args.barcodeFile = \"/data/cqs/jonathan_brown_data/3804/Count/3804-LD-2/filtered_feature_bc_matrix/barcodes.tsv.gz\" logger = logging.getLogger('10xFastq') logging.basicConfig(level=logging.INFO, format='%(asctime)s", "seq[:16] if not (barcode in barcodes): continue count = count + 1 umi", "fin1count % 100000 == 0: logger.info(\"processed %d reads ...\" % fin1count) q2 =", "fout2.write(seq2) fout2.write(sig2) fout2.write(score2) # # if count == 1000: # break if os.path.isfile(args.output1):", "+ \".tmp.gz\" tmpFile2 = args.output2 + \".tmp.gz\" fin1count = 0 logger.info(\"Processing reads ...\")", "% 100000 == 0: logger.info(\"processed %d reads ...\" % fin1count) q2 = fin2.readline()", "nargs='?', help='Input 10x first read file', required=NotDEBUG) parser.add_argument('-2', '--input2', action='store', nargs='?', help='Input 10x", "'--output1', action='store', nargs='?', help=\"Output first read file\", required=NotDEBUG) parser.add_argument('-p', '--output2', action='store', nargs='?', help=\"Output", "= fin2.readline() sig2 = fin2.readline() score2 = fin2.readline() barcode = seq[:16] if not", "fin1.readline() sig = fin1.readline() score = fin1.readline() fin1count += 1 if fin1count %", "logger.info(\"Total %d barcode in whitelist\" % len(barcodes)) tmpFile1 = args.output1 + \".tmp.gz\" tmpFile2", "- %(message)s') barcodes = set() with gzip.open(args.barcodeFile, 'rt') as fin: for barcode in", "count = count + 1 umi = seq[16:26] seq = seq[26:] score =", "fout2.write(score2) # # if count == 1000: # break if os.path.isfile(args.output1): os.remove(args.output1) if", "reads ...\") with gzip.open(args.input1, 'rt') as fin1: with gzip.open(args.input2, 'rt') as fin2: count", "= seq[:16] if not (barcode in barcodes): continue count = count + 1", "import os import csv import gzip DEBUG=False NotDEBUG=not DEBUG parser = argparse.ArgumentParser(description=\"Extract barcode", "parser.add_argument('-1', '--input1', action='store', nargs='?', help='Input 10x first read file', required=NotDEBUG) parser.add_argument('-2', '--input2', action='store',", "fin1.readline() fin1count += 1 if fin1count % 100000 == 0: logger.info(\"processed %d reads", "DEBUG=False NotDEBUG=not DEBUG parser = argparse.ArgumentParser(description=\"Extract barcode and UMI from 10x fastq first", "fin2.readline() score2 = fin2.readline() barcode = seq[:16] if not (barcode in barcodes): continue", "not (barcode in barcodes): continue count = count + 1 umi = seq[16:26]", "\"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.processed.fastq.gz\" args.output2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.processed.fastq.gz\" args.barcodeFile = \"/data/cqs/jonathan_brown_data/3804/Count/3804-LD-2/filtered_feature_bc_matrix/barcodes.tsv.gz\" logger = logging.getLogger('10xFastq') logging.basicConfig(level=logging.INFO, format='%(asctime)s -", "\".tmp.gz\" tmpFile2 = args.output2 + \".tmp.gz\" fin1count = 0 logger.info(\"Processing reads ...\") with", "1000: # break if os.path.isfile(args.output1): os.remove(args.output1) if os.path.isfile(args.output2): os.remove(args.output2) os.rename(tmpFile1, args.output1) os.rename(tmpFile2, args.output2)", "parser.add_argument('-p', '--output2', action='store', nargs='?', help=\"Output second read file\", required=NotDEBUG) parser.add_argument('-b', '--barcodeFile', action='store', nargs='?',", "fin2.readline() barcode = seq[:16] if not (barcode in barcodes): continue count = count", "list file\", required=NotDEBUG) args = parser.parse_args() if DEBUG: args.input1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.fastq.gz\" args.input2 =", "= fin2.readline() score2 = fin2.readline() barcode = seq[:16] if not (barcode in barcodes):", "# if count == 1000: # break if os.path.isfile(args.output1): os.remove(args.output1) if os.path.isfile(args.output2): os.remove(args.output2)", "\"\": break seq = fin1.readline() sig = fin1.readline() score = fin1.readline() fin1count +=", "break seq = fin1.readline() sig = fin1.readline() score = fin1.readline() fin1count += 1", "as fin: for barcode in fin: barcode = barcode.rstrip() barcode = barcode.replace(\"-1\", \"\")", "...\" % fin1count) q2 = fin2.readline() seq2 = fin2.readline() sig2 = fin2.readline() score2", "\"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.fastq.gz\" args.input2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.fastq.gz\" args.output1 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R1_001.processed.fastq.gz\" args.output2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.processed.fastq.gz\" args.barcodeFile = \"/data/cqs/jonathan_brown_data/3804/Count/3804-LD-2/filtered_feature_bc_matrix/barcodes.tsv.gz\"", "UMI from 10x fastq first read file.\", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-1', '--input1', action='store', nargs='?', help='Input", "= args.output1 + \".tmp.gz\" tmpFile2 = args.output2 + \".tmp.gz\" fin1count = 0 logger.info(\"Processing", "sig2 = fin2.readline() score2 = fin2.readline() barcode = seq[:16] if not (barcode in", "barcode in fin: barcode = barcode.rstrip() barcode = barcode.replace(\"-1\", \"\") barcodes.add(barcode) logger.info(\"Total %d", "fin2.readline() seq2 = fin2.readline() sig2 = fin2.readline() score2 = fin2.readline() barcode = seq[:16]", "barcode = seq[:16] if not (barcode in barcodes): continue count = count +", "= score[26:] query = \"@q%d:%s:%s\\n\" % (count, barcode, umi) fout1.write(query) fout1.write(seq) fout1.write(sig) fout1.write(score)", "action='store', nargs='?', help='Input 10x first read file', required=NotDEBUG) parser.add_argument('-2', '--input2', action='store', nargs='?', help='Input", "nargs='?', help=\"Input barcode white list file\", required=NotDEBUG) args = parser.parse_args() if DEBUG: args.input1", "args.output2 = \"/data/cqs/jonathan_brown_data/3804/FASTQ/3804-LD-2_S90_L001_R2_001.processed.fastq.gz\" args.barcodeFile = \"/data/cqs/jonathan_brown_data/3804/Count/3804-LD-2/filtered_feature_bc_matrix/barcodes.tsv.gz\" logger = logging.getLogger('10xFastq') logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s", "formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-1', '--input1', action='store', nargs='?', help='Input 10x first read file', required=NotDEBUG) parser.add_argument('-2', '--input2',", "help=\"Output first read file\", required=NotDEBUG) parser.add_argument('-p', '--output2', action='store', nargs='?', help=\"Output second read file\",", "umi = seq[16:26] seq = seq[26:] score = score[26:] query = \"@q%d:%s:%s\\n\" %", "for barcode in fin: barcode = barcode.rstrip() barcode = barcode.replace(\"-1\", \"\") barcodes.add(barcode) logger.info(\"Total", "action='store', nargs='?', help=\"Output second read file\", required=NotDEBUG) parser.add_argument('-b', '--barcodeFile', action='store', nargs='?', help=\"Input barcode", "'rt') as fin1: with gzip.open(args.input2, 'rt') as fin2: count = 0 with gzip.open(tmpFile1," ]
[ "entity_test_added_to_hass, patch_async_track_time_interval, ) from .api_samples import ( MOCK_RWL, MOCK_ZGP, MOCK_Z3_ROTARY, PARSED_RWL, PARSED_ZGP, PARSED_Z3_ROTARY,", "MOCK_Z3_ROTARY, PARSED_RWL, PARSED_ZGP, PARSED_Z3_ROTARY, ) @pytest.mark.parametrize( \"raw_response, sensor_key, parsed_response, parser_func\", ( (MOCK_ZGP, \"ZGP_00:44:23:08\",", "parsed_response, parser_func\", ( (MOCK_ZGP, \"ZGP_00:44:23:08\", PARSED_ZGP, parse_zgp), (MOCK_RWL, \"RWL_00:17:88:01:10:3e:3a:dc-02\", PARSED_RWL, parse_rwl), ( MOCK_Z3_ROTARY,", "and check behavior for unknown.\"\"\" assert parser_func(raw_response) == parsed_response unknown_sensor_data = {\"modelid\": \"new_one\",", "\"mdi:remote\" assert not remote.should_poll assert \"last_updated\" in remote.device_state_attributes assert remote.unique_id == DEV_ID_REMOTE_1 await", "parsed_response, parser_func, caplog ): \"\"\"Test data parsers for known remotes and check behavior", "sensor_key, parsed_response, parser_func, caplog ): \"\"\"Test data parsers for known remotes and check", "MOCK_RWL, MOCK_ZGP, MOCK_Z3_ROTARY, PARSED_RWL, PARSED_ZGP, PARSED_Z3_ROTARY, ) @pytest.mark.parametrize( \"raw_response, sensor_key, parsed_response, parser_func\", (", "PARSED_Z3_ROTARY, ) @pytest.mark.parametrize( \"raw_response, sensor_key, parsed_response, parser_func\", ( (MOCK_ZGP, \"ZGP_00:44:23:08\", PARSED_ZGP, parse_zgp), (MOCK_RWL,", "logging from datetime import timedelta import pytest from custom_components.hueremote import DOMAIN from custom_components.hueremote.data_manager", "from custom_components.hueremote.remote import async_setup_platform, HueRemote from .conftest import ( DEV_ID_REMOTE_1, entity_test_added_to_hass, patch_async_track_time_interval, )", "import ( MOCK_RWL, MOCK_ZGP, MOCK_Z3_ROTARY, PARSED_RWL, PARSED_ZGP, PARSED_Z3_ROTARY, ) @pytest.mark.parametrize( \"raw_response, sensor_key, parsed_response,", "async_setup_platform, HueRemote from .conftest import ( DEV_ID_REMOTE_1, entity_test_added_to_hass, patch_async_track_time_interval, ) from .api_samples import", "from custom_components.hueremote import DOMAIN from custom_components.hueremote.data_manager import HueSensorData from custom_components.hueremote.hue_api_response import ( parse_hue_api_response,", "HueRemote from .conftest import ( DEV_ID_REMOTE_1, entity_test_added_to_hass, patch_async_track_time_interval, ) from .api_samples import (", "def test_parse_remote_raw_data( raw_response, sensor_key, parsed_response, parser_func, caplog ): \"\"\"Test data parsers for known", "0 async def test_platform_remote_setup(mock_hass, caplog): \"\"\"Test platform setup for remotes.\"\"\" with caplog.at_level(logging.DEBUG): with", "logging.warning(\"Added remote entity: %s\", x[0]), ) assert DOMAIN in mock_hass.data data_manager = mock_hass.data[DOMAIN]", "# await remote.async_added_to_hass() assert len(data_manager.sensors) == 1 assert DEV_ID_REMOTE_1 in data_manager.sensors assert isinstance(remote,", ") @pytest.mark.parametrize( \"raw_response, sensor_key, parsed_response, parser_func\", ( (MOCK_ZGP, \"ZGP_00:44:23:08\", PARSED_ZGP, parse_zgp), (MOCK_RWL, \"RWL_00:17:88:01:10:3e:3a:dc-02\",", "not remote.should_poll assert \"last_updated\" in remote.device_state_attributes assert remote.unique_id == DEV_ID_REMOTE_1 await remote.async_will_remove_from_hass() assert", "mock_hass.data[DOMAIN] assert isinstance(data_manager, HueSensorData) assert len(data_manager.registered_entities) == 1 assert data_manager._scan_interval == timedelta(seconds=3) assert", "custom_components.hueremote.data_manager import HueSensorData from custom_components.hueremote.hue_api_response import ( parse_hue_api_response, parse_rwl, parse_zgp, parse_z3_rotary, ) from", "assert isinstance(data_manager, HueSensorData) assert len(data_manager.registered_entities) == 1 assert data_manager._scan_interval == timedelta(seconds=3) assert len(data_manager.data)", "data_manager.data assert len(data_manager.sensors) == 0 assert len(data_manager.registered_entities) == 1 remote = data_manager.registered_entities[DEV_ID_REMOTE_1] assert", "for unknown.\"\"\" assert parser_func(raw_response) == parsed_response unknown_sensor_data = {\"modelid\": \"new_one\", \"uniqueid\": \"ff:00:11:22\"} assert", "data_manager.sensors assert isinstance(remote, HueRemote) assert remote.hass assert remote.force_update assert remote.state == \"3_click\" assert", "parser_func, caplog ): \"\"\"Test data parsers for known remotes and check behavior for", "datetime import timedelta import pytest from custom_components.hueremote import DOMAIN from custom_components.hueremote.data_manager import HueSensorData", "= {\"modelid\": \"new_one\", \"uniqueid\": \"ff:00:11:22\"} assert parse_hue_api_response( [raw_response, unknown_sensor_data, raw_response] ) == {sensor_key:", "with caplog.at_level(logging.DEBUG): with patch_async_track_time_interval(): await async_setup_platform( mock_hass, {\"platform\": \"hueremote\", \"scan_interval\": timedelta(seconds=3)}, lambda *x:", "\"uniqueid\": \"ff:00:11:22\"} assert parse_hue_api_response( [raw_response, unknown_sensor_data, raw_response] ) == {sensor_key: parsed_response} assert len(caplog.messages)", "= data_manager.registered_entities[DEV_ID_REMOTE_1] assert not remote.hass await entity_test_added_to_hass(data_manager, remote) # await remote.async_added_to_hass() assert len(data_manager.sensors)", "assert remote.icon == \"mdi:remote\" assert not remote.should_poll assert \"last_updated\" in remote.device_state_attributes assert remote.unique_id", "isinstance(remote, HueRemote) assert remote.hass assert remote.force_update assert remote.state == \"3_click\" assert remote.icon ==", "remote.py.\"\"\" import logging from datetime import timedelta import pytest from custom_components.hueremote import DOMAIN", "(MOCK_ZGP, \"ZGP_00:44:23:08\", PARSED_ZGP, parse_zgp), (MOCK_RWL, \"RWL_00:17:88:01:10:3e:3a:dc-02\", PARSED_RWL, parse_rwl), ( MOCK_Z3_ROTARY, \"Z3-_ff:ff:00:0f:e7:fd:ba:b7-01-fc00\", PARSED_Z3_ROTARY, parse_z3_rotary,", "remote entity: %s\", x[0]), ) assert DOMAIN in mock_hass.data data_manager = mock_hass.data[DOMAIN] assert", "entity_test_added_to_hass(data_manager, remote) # await remote.async_added_to_hass() assert len(data_manager.sensors) == 1 assert DEV_ID_REMOTE_1 in data_manager.sensors", "1 remote = data_manager.registered_entities[DEV_ID_REMOTE_1] assert not remote.hass await entity_test_added_to_hass(data_manager, remote) # await remote.async_added_to_hass()", "\"\"\"Tests for remote.py.\"\"\" import logging from datetime import timedelta import pytest from custom_components.hueremote", ") def test_parse_remote_raw_data( raw_response, sensor_key, parsed_response, parser_func, caplog ): \"\"\"Test data parsers for", "raw_response] ) == {sensor_key: parsed_response} assert len(caplog.messages) == 0 async def test_platform_remote_setup(mock_hass, caplog):", "import ( DEV_ID_REMOTE_1, entity_test_added_to_hass, patch_async_track_time_interval, ) from .api_samples import ( MOCK_RWL, MOCK_ZGP, MOCK_Z3_ROTARY,", "caplog.at_level(logging.DEBUG): with patch_async_track_time_interval(): await async_setup_platform( mock_hass, {\"platform\": \"hueremote\", \"scan_interval\": timedelta(seconds=3)}, lambda *x: logging.warning(\"Added", "assert len(data_manager.sensors) == 0 assert len(data_manager.registered_entities) == 1 remote = data_manager.registered_entities[DEV_ID_REMOTE_1] assert not", "await remote.async_added_to_hass() assert len(data_manager.sensors) == 1 assert DEV_ID_REMOTE_1 in data_manager.sensors assert isinstance(remote, HueRemote)", "== \"3_click\" assert remote.icon == \"mdi:remote\" assert not remote.should_poll assert \"last_updated\" in remote.device_state_attributes", "in data_manager.data assert len(data_manager.sensors) == 0 assert len(data_manager.registered_entities) == 1 remote = data_manager.registered_entities[DEV_ID_REMOTE_1]", "import timedelta import pytest from custom_components.hueremote import DOMAIN from custom_components.hueremote.data_manager import HueSensorData from", "PARSED_Z3_ROTARY, parse_z3_rotary, ), ), ) def test_parse_remote_raw_data( raw_response, sensor_key, parsed_response, parser_func, caplog ):", "def test_platform_remote_setup(mock_hass, caplog): \"\"\"Test platform setup for remotes.\"\"\" with caplog.at_level(logging.DEBUG): with patch_async_track_time_interval(): await", "= mock_hass.data[DOMAIN] assert isinstance(data_manager, HueSensorData) assert len(data_manager.registered_entities) == 1 assert data_manager._scan_interval == timedelta(seconds=3)", "remote.hass await entity_test_added_to_hass(data_manager, remote) # await remote.async_added_to_hass() assert len(data_manager.sensors) == 1 assert DEV_ID_REMOTE_1", "mock_hass, {\"platform\": \"hueremote\", \"scan_interval\": timedelta(seconds=3)}, lambda *x: logging.warning(\"Added remote entity: %s\", x[0]), )", "HueSensorData from custom_components.hueremote.hue_api_response import ( parse_hue_api_response, parse_rwl, parse_zgp, parse_z3_rotary, ) from custom_components.hueremote.remote import", "DOMAIN in mock_hass.data data_manager = mock_hass.data[DOMAIN] assert isinstance(data_manager, HueSensorData) assert len(data_manager.registered_entities) == 1", "setup for remotes.\"\"\" with caplog.at_level(logging.DEBUG): with patch_async_track_time_interval(): await async_setup_platform( mock_hass, {\"platform\": \"hueremote\", \"scan_interval\":", "custom_components.hueremote import DOMAIN from custom_components.hueremote.data_manager import HueSensorData from custom_components.hueremote.hue_api_response import ( parse_hue_api_response, parse_rwl,", "parsers for known remotes and check behavior for unknown.\"\"\" assert parser_func(raw_response) == parsed_response", "test_platform_remote_setup(mock_hass, caplog): \"\"\"Test platform setup for remotes.\"\"\" with caplog.at_level(logging.DEBUG): with patch_async_track_time_interval(): await async_setup_platform(", "timedelta(seconds=3)}, lambda *x: logging.warning(\"Added remote entity: %s\", x[0]), ) assert DOMAIN in mock_hass.data", "assert data_manager._scan_interval == timedelta(seconds=3) assert len(data_manager.data) == 1 assert DEV_ID_REMOTE_1 in data_manager.data assert", "== timedelta(seconds=3) assert len(data_manager.data) == 1 assert DEV_ID_REMOTE_1 in data_manager.data assert len(data_manager.sensors) ==", "== 0 async def test_platform_remote_setup(mock_hass, caplog): \"\"\"Test platform setup for remotes.\"\"\" with caplog.at_level(logging.DEBUG):", "assert DEV_ID_REMOTE_1 in data_manager.sensors assert isinstance(remote, HueRemote) assert remote.hass assert remote.force_update assert remote.state", "caplog ): \"\"\"Test data parsers for known remotes and check behavior for unknown.\"\"\"", "sensor_key, parsed_response, parser_func\", ( (MOCK_ZGP, \"ZGP_00:44:23:08\", PARSED_ZGP, parse_zgp), (MOCK_RWL, \"RWL_00:17:88:01:10:3e:3a:dc-02\", PARSED_RWL, parse_rwl), (", "), ) def test_parse_remote_raw_data( raw_response, sensor_key, parsed_response, parser_func, caplog ): \"\"\"Test data parsers", "remotes and check behavior for unknown.\"\"\" assert parser_func(raw_response) == parsed_response unknown_sensor_data = {\"modelid\":", "mock_hass.data data_manager = mock_hass.data[DOMAIN] assert isinstance(data_manager, HueSensorData) assert len(data_manager.registered_entities) == 1 assert data_manager._scan_interval", "== 1 assert DEV_ID_REMOTE_1 in data_manager.sensors assert isinstance(remote, HueRemote) assert remote.hass assert remote.force_update", "parse_zgp), (MOCK_RWL, \"RWL_00:17:88:01:10:3e:3a:dc-02\", PARSED_RWL, parse_rwl), ( MOCK_Z3_ROTARY, \"Z3-_ff:ff:00:0f:e7:fd:ba:b7-01-fc00\", PARSED_Z3_ROTARY, parse_z3_rotary, ), ), )", "in remote.device_state_attributes assert remote.unique_id == DEV_ID_REMOTE_1 await remote.async_will_remove_from_hass() assert len(data_manager.sensors) == 0 assert", "patch_async_track_time_interval, ) from .api_samples import ( MOCK_RWL, MOCK_ZGP, MOCK_Z3_ROTARY, PARSED_RWL, PARSED_ZGP, PARSED_Z3_ROTARY, )", "), ), ) def test_parse_remote_raw_data( raw_response, sensor_key, parsed_response, parser_func, caplog ): \"\"\"Test data", "assert len(caplog.messages) == 0 async def test_platform_remote_setup(mock_hass, caplog): \"\"\"Test platform setup for remotes.\"\"\"", ".api_samples import ( MOCK_RWL, MOCK_ZGP, MOCK_Z3_ROTARY, PARSED_RWL, PARSED_ZGP, PARSED_Z3_ROTARY, ) @pytest.mark.parametrize( \"raw_response, sensor_key,", "assert len(data_manager.registered_entities) == 1 assert data_manager._scan_interval == timedelta(seconds=3) assert len(data_manager.data) == 1 assert", "remote) # await remote.async_added_to_hass() assert len(data_manager.sensors) == 1 assert DEV_ID_REMOTE_1 in data_manager.sensors assert", "len(data_manager.data) == 1 assert DEV_ID_REMOTE_1 in data_manager.data assert len(data_manager.sensors) == 0 assert len(data_manager.registered_entities)", "( MOCK_Z3_ROTARY, \"Z3-_ff:ff:00:0f:e7:fd:ba:b7-01-fc00\", PARSED_Z3_ROTARY, parse_z3_rotary, ), ), ) def test_parse_remote_raw_data( raw_response, sensor_key, parsed_response,", "check behavior for unknown.\"\"\" assert parser_func(raw_response) == parsed_response unknown_sensor_data = {\"modelid\": \"new_one\", \"uniqueid\":", "len(caplog.messages) == 0 async def test_platform_remote_setup(mock_hass, caplog): \"\"\"Test platform setup for remotes.\"\"\" with", "parse_z3_rotary, ) from custom_components.hueremote.remote import async_setup_platform, HueRemote from .conftest import ( DEV_ID_REMOTE_1, entity_test_added_to_hass,", "import async_setup_platform, HueRemote from .conftest import ( DEV_ID_REMOTE_1, entity_test_added_to_hass, patch_async_track_time_interval, ) from .api_samples", "{\"modelid\": \"new_one\", \"uniqueid\": \"ff:00:11:22\"} assert parse_hue_api_response( [raw_response, unknown_sensor_data, raw_response] ) == {sensor_key: parsed_response}", "parse_rwl, parse_zgp, parse_z3_rotary, ) from custom_components.hueremote.remote import async_setup_platform, HueRemote from .conftest import (", "== 1 assert data_manager._scan_interval == timedelta(seconds=3) assert len(data_manager.data) == 1 assert DEV_ID_REMOTE_1 in", "\"hueremote\", \"scan_interval\": timedelta(seconds=3)}, lambda *x: logging.warning(\"Added remote entity: %s\", x[0]), ) assert DOMAIN", "in data_manager.sensors assert isinstance(remote, HueRemote) assert remote.hass assert remote.force_update assert remote.state == \"3_click\"", "parser_func\", ( (MOCK_ZGP, \"ZGP_00:44:23:08\", PARSED_ZGP, parse_zgp), (MOCK_RWL, \"RWL_00:17:88:01:10:3e:3a:dc-02\", PARSED_RWL, parse_rwl), ( MOCK_Z3_ROTARY, \"Z3-_ff:ff:00:0f:e7:fd:ba:b7-01-fc00\",", "remotes.\"\"\" with caplog.at_level(logging.DEBUG): with patch_async_track_time_interval(): await async_setup_platform( mock_hass, {\"platform\": \"hueremote\", \"scan_interval\": timedelta(seconds=3)}, lambda", "x[0]), ) assert DOMAIN in mock_hass.data data_manager = mock_hass.data[DOMAIN] assert isinstance(data_manager, HueSensorData) assert", "with patch_async_track_time_interval(): await async_setup_platform( mock_hass, {\"platform\": \"hueremote\", \"scan_interval\": timedelta(seconds=3)}, lambda *x: logging.warning(\"Added remote", "\"\"\"Test platform setup for remotes.\"\"\" with caplog.at_level(logging.DEBUG): with patch_async_track_time_interval(): await async_setup_platform( mock_hass, {\"platform\":", "data_manager = mock_hass.data[DOMAIN] assert isinstance(data_manager, HueSensorData) assert len(data_manager.registered_entities) == 1 assert data_manager._scan_interval ==", "len(data_manager.registered_entities) == 1 remote = data_manager.registered_entities[DEV_ID_REMOTE_1] assert not remote.hass await entity_test_added_to_hass(data_manager, remote) #", "{\"platform\": \"hueremote\", \"scan_interval\": timedelta(seconds=3)}, lambda *x: logging.warning(\"Added remote entity: %s\", x[0]), ) assert", "parse_rwl), ( MOCK_Z3_ROTARY, \"Z3-_ff:ff:00:0f:e7:fd:ba:b7-01-fc00\", PARSED_Z3_ROTARY, parse_z3_rotary, ), ), ) def test_parse_remote_raw_data( raw_response, sensor_key,", "assert parser_func(raw_response) == parsed_response unknown_sensor_data = {\"modelid\": \"new_one\", \"uniqueid\": \"ff:00:11:22\"} assert parse_hue_api_response( [raw_response,", "parse_zgp, parse_z3_rotary, ) from custom_components.hueremote.remote import async_setup_platform, HueRemote from .conftest import ( DEV_ID_REMOTE_1,", "remote.device_state_attributes assert remote.unique_id == DEV_ID_REMOTE_1 await remote.async_will_remove_from_hass() assert len(data_manager.sensors) == 0 assert len(data_manager.registered_entities)", "DEV_ID_REMOTE_1 in data_manager.sensors assert isinstance(remote, HueRemote) assert remote.hass assert remote.force_update assert remote.state ==", "assert not remote.should_poll assert \"last_updated\" in remote.device_state_attributes assert remote.unique_id == DEV_ID_REMOTE_1 await remote.async_will_remove_from_hass()", ") from .api_samples import ( MOCK_RWL, MOCK_ZGP, MOCK_Z3_ROTARY, PARSED_RWL, PARSED_ZGP, PARSED_Z3_ROTARY, ) @pytest.mark.parametrize(", "behavior for unknown.\"\"\" assert parser_func(raw_response) == parsed_response unknown_sensor_data = {\"modelid\": \"new_one\", \"uniqueid\": \"ff:00:11:22\"}", "MOCK_ZGP, MOCK_Z3_ROTARY, PARSED_RWL, PARSED_ZGP, PARSED_Z3_ROTARY, ) @pytest.mark.parametrize( \"raw_response, sensor_key, parsed_response, parser_func\", ( (MOCK_ZGP,", "MOCK_Z3_ROTARY, \"Z3-_ff:ff:00:0f:e7:fd:ba:b7-01-fc00\", PARSED_Z3_ROTARY, parse_z3_rotary, ), ), ) def test_parse_remote_raw_data( raw_response, sensor_key, parsed_response, parser_func,", "for known remotes and check behavior for unknown.\"\"\" assert parser_func(raw_response) == parsed_response unknown_sensor_data", "data_manager._scan_interval == timedelta(seconds=3) assert len(data_manager.data) == 1 assert DEV_ID_REMOTE_1 in data_manager.data assert len(data_manager.sensors)", ") from custom_components.hueremote.remote import async_setup_platform, HueRemote from .conftest import ( DEV_ID_REMOTE_1, entity_test_added_to_hass, patch_async_track_time_interval,", "unknown_sensor_data, raw_response] ) == {sensor_key: parsed_response} assert len(caplog.messages) == 0 async def test_platform_remote_setup(mock_hass,", "assert remote.force_update assert remote.state == \"3_click\" assert remote.icon == \"mdi:remote\" assert not remote.should_poll", "assert remote.state == \"3_click\" assert remote.icon == \"mdi:remote\" assert not remote.should_poll assert \"last_updated\"", "assert remote.hass assert remote.force_update assert remote.state == \"3_click\" assert remote.icon == \"mdi:remote\" assert", "PARSED_ZGP, PARSED_Z3_ROTARY, ) @pytest.mark.parametrize( \"raw_response, sensor_key, parsed_response, parser_func\", ( (MOCK_ZGP, \"ZGP_00:44:23:08\", PARSED_ZGP, parse_zgp),", "entity: %s\", x[0]), ) assert DOMAIN in mock_hass.data data_manager = mock_hass.data[DOMAIN] assert isinstance(data_manager,", "PARSED_RWL, PARSED_ZGP, PARSED_Z3_ROTARY, ) @pytest.mark.parametrize( \"raw_response, sensor_key, parsed_response, parser_func\", ( (MOCK_ZGP, \"ZGP_00:44:23:08\", PARSED_ZGP,", "1 assert data_manager._scan_interval == timedelta(seconds=3) assert len(data_manager.data) == 1 assert DEV_ID_REMOTE_1 in data_manager.data", "parsed_response} assert len(caplog.messages) == 0 async def test_platform_remote_setup(mock_hass, caplog): \"\"\"Test platform setup for", "== 1 assert DEV_ID_REMOTE_1 in data_manager.data assert len(data_manager.sensors) == 0 assert len(data_manager.registered_entities) ==", "import ( parse_hue_api_response, parse_rwl, parse_zgp, parse_z3_rotary, ) from custom_components.hueremote.remote import async_setup_platform, HueRemote from", "0 assert len(data_manager.registered_entities) == 1 remote = data_manager.registered_entities[DEV_ID_REMOTE_1] assert not remote.hass await entity_test_added_to_hass(data_manager,", "timedelta(seconds=3) assert len(data_manager.data) == 1 assert DEV_ID_REMOTE_1 in data_manager.data assert len(data_manager.sensors) == 0", "assert remote.unique_id == DEV_ID_REMOTE_1 await remote.async_will_remove_from_hass() assert len(data_manager.sensors) == 0 assert len(data_manager.registered_entities) ==", "): \"\"\"Test data parsers for known remotes and check behavior for unknown.\"\"\" assert", "\"RWL_00:17:88:01:10:3e:3a:dc-02\", PARSED_RWL, parse_rwl), ( MOCK_Z3_ROTARY, \"Z3-_ff:ff:00:0f:e7:fd:ba:b7-01-fc00\", PARSED_Z3_ROTARY, parse_z3_rotary, ), ), ) def test_parse_remote_raw_data(", "remote.icon == \"mdi:remote\" assert not remote.should_poll assert \"last_updated\" in remote.device_state_attributes assert remote.unique_id ==", ") == {sensor_key: parsed_response} assert len(caplog.messages) == 0 async def test_platform_remote_setup(mock_hass, caplog): \"\"\"Test", "( (MOCK_ZGP, \"ZGP_00:44:23:08\", PARSED_ZGP, parse_zgp), (MOCK_RWL, \"RWL_00:17:88:01:10:3e:3a:dc-02\", PARSED_RWL, parse_rwl), ( MOCK_Z3_ROTARY, \"Z3-_ff:ff:00:0f:e7:fd:ba:b7-01-fc00\", PARSED_Z3_ROTARY,", "== DEV_ID_REMOTE_1 await remote.async_will_remove_from_hass() assert len(data_manager.sensors) == 0 assert len(data_manager.registered_entities) == 0 assert", "lambda *x: logging.warning(\"Added remote entity: %s\", x[0]), ) assert DOMAIN in mock_hass.data data_manager", "DEV_ID_REMOTE_1, entity_test_added_to_hass, patch_async_track_time_interval, ) from .api_samples import ( MOCK_RWL, MOCK_ZGP, MOCK_Z3_ROTARY, PARSED_RWL, PARSED_ZGP,", "\"raw_response, sensor_key, parsed_response, parser_func\", ( (MOCK_ZGP, \"ZGP_00:44:23:08\", PARSED_ZGP, parse_zgp), (MOCK_RWL, \"RWL_00:17:88:01:10:3e:3a:dc-02\", PARSED_RWL, parse_rwl),", "assert not remote.hass await entity_test_added_to_hass(data_manager, remote) # await remote.async_added_to_hass() assert len(data_manager.sensors) == 1", "for remote.py.\"\"\" import logging from datetime import timedelta import pytest from custom_components.hueremote import", "patch_async_track_time_interval(): await async_setup_platform( mock_hass, {\"platform\": \"hueremote\", \"scan_interval\": timedelta(seconds=3)}, lambda *x: logging.warning(\"Added remote entity:", "assert DEV_ID_REMOTE_1 in data_manager.data assert len(data_manager.sensors) == 0 assert len(data_manager.registered_entities) == 1 remote", "unknown_sensor_data = {\"modelid\": \"new_one\", \"uniqueid\": \"ff:00:11:22\"} assert parse_hue_api_response( [raw_response, unknown_sensor_data, raw_response] ) ==", "platform setup for remotes.\"\"\" with caplog.at_level(logging.DEBUG): with patch_async_track_time_interval(): await async_setup_platform( mock_hass, {\"platform\": \"hueremote\",", "{sensor_key: parsed_response} assert len(caplog.messages) == 0 async def test_platform_remote_setup(mock_hass, caplog): \"\"\"Test platform setup", "import HueSensorData from custom_components.hueremote.hue_api_response import ( parse_hue_api_response, parse_rwl, parse_zgp, parse_z3_rotary, ) from custom_components.hueremote.remote", "custom_components.hueremote.hue_api_response import ( parse_hue_api_response, parse_rwl, parse_zgp, parse_z3_rotary, ) from custom_components.hueremote.remote import async_setup_platform, HueRemote", "parse_hue_api_response, parse_rwl, parse_zgp, parse_z3_rotary, ) from custom_components.hueremote.remote import async_setup_platform, HueRemote from .conftest import", "== {sensor_key: parsed_response} assert len(caplog.messages) == 0 async def test_platform_remote_setup(mock_hass, caplog): \"\"\"Test platform", "for remotes.\"\"\" with caplog.at_level(logging.DEBUG): with patch_async_track_time_interval(): await async_setup_platform( mock_hass, {\"platform\": \"hueremote\", \"scan_interval\": timedelta(seconds=3)},", "\"scan_interval\": timedelta(seconds=3)}, lambda *x: logging.warning(\"Added remote entity: %s\", x[0]), ) assert DOMAIN in", "custom_components.hueremote.remote import async_setup_platform, HueRemote from .conftest import ( DEV_ID_REMOTE_1, entity_test_added_to_hass, patch_async_track_time_interval, ) from", "== 1 remote = data_manager.registered_entities[DEV_ID_REMOTE_1] assert not remote.hass await entity_test_added_to_hass(data_manager, remote) # await", "\"Z3-_ff:ff:00:0f:e7:fd:ba:b7-01-fc00\", PARSED_Z3_ROTARY, parse_z3_rotary, ), ), ) def test_parse_remote_raw_data( raw_response, sensor_key, parsed_response, parser_func, caplog", "import logging from datetime import timedelta import pytest from custom_components.hueremote import DOMAIN from", "import pytest from custom_components.hueremote import DOMAIN from custom_components.hueremote.data_manager import HueSensorData from custom_components.hueremote.hue_api_response import", "from .conftest import ( DEV_ID_REMOTE_1, entity_test_added_to_hass, patch_async_track_time_interval, ) from .api_samples import ( MOCK_RWL,", "DEV_ID_REMOTE_1 in data_manager.data assert len(data_manager.sensors) == 0 assert len(data_manager.registered_entities) == 1 remote =", "== 0 assert len(data_manager.registered_entities) == 1 remote = data_manager.registered_entities[DEV_ID_REMOTE_1] assert not remote.hass await", "== parsed_response unknown_sensor_data = {\"modelid\": \"new_one\", \"uniqueid\": \"ff:00:11:22\"} assert parse_hue_api_response( [raw_response, unknown_sensor_data, raw_response]", "async_setup_platform( mock_hass, {\"platform\": \"hueremote\", \"scan_interval\": timedelta(seconds=3)}, lambda *x: logging.warning(\"Added remote entity: %s\", x[0]),", "len(data_manager.sensors) == 0 assert len(data_manager.registered_entities) == 1 remote = data_manager.registered_entities[DEV_ID_REMOTE_1] assert not remote.hass", "@pytest.mark.parametrize( \"raw_response, sensor_key, parsed_response, parser_func\", ( (MOCK_ZGP, \"ZGP_00:44:23:08\", PARSED_ZGP, parse_zgp), (MOCK_RWL, \"RWL_00:17:88:01:10:3e:3a:dc-02\", PARSED_RWL,", "import DOMAIN from custom_components.hueremote.data_manager import HueSensorData from custom_components.hueremote.hue_api_response import ( parse_hue_api_response, parse_rwl, parse_zgp,", "[raw_response, unknown_sensor_data, raw_response] ) == {sensor_key: parsed_response} assert len(caplog.messages) == 0 async def", "unknown.\"\"\" assert parser_func(raw_response) == parsed_response unknown_sensor_data = {\"modelid\": \"new_one\", \"uniqueid\": \"ff:00:11:22\"} assert parse_hue_api_response(", "remote.unique_id == DEV_ID_REMOTE_1 await remote.async_will_remove_from_hass() assert len(data_manager.sensors) == 0 assert len(data_manager.registered_entities) == 0", ") assert DOMAIN in mock_hass.data data_manager = mock_hass.data[DOMAIN] assert isinstance(data_manager, HueSensorData) assert len(data_manager.registered_entities)", "\"\"\"Test data parsers for known remotes and check behavior for unknown.\"\"\" assert parser_func(raw_response)", "%s\", x[0]), ) assert DOMAIN in mock_hass.data data_manager = mock_hass.data[DOMAIN] assert isinstance(data_manager, HueSensorData)", "\"new_one\", \"uniqueid\": \"ff:00:11:22\"} assert parse_hue_api_response( [raw_response, unknown_sensor_data, raw_response] ) == {sensor_key: parsed_response} assert", "assert isinstance(remote, HueRemote) assert remote.hass assert remote.force_update assert remote.state == \"3_click\" assert remote.icon", "test_parse_remote_raw_data( raw_response, sensor_key, parsed_response, parser_func, caplog ): \"\"\"Test data parsers for known remotes", "== \"mdi:remote\" assert not remote.should_poll assert \"last_updated\" in remote.device_state_attributes assert remote.unique_id == DEV_ID_REMOTE_1", "data parsers for known remotes and check behavior for unknown.\"\"\" assert parser_func(raw_response) ==", "parsed_response unknown_sensor_data = {\"modelid\": \"new_one\", \"uniqueid\": \"ff:00:11:22\"} assert parse_hue_api_response( [raw_response, unknown_sensor_data, raw_response] )", "from .api_samples import ( MOCK_RWL, MOCK_ZGP, MOCK_Z3_ROTARY, PARSED_RWL, PARSED_ZGP, PARSED_Z3_ROTARY, ) @pytest.mark.parametrize( \"raw_response,", "await async_setup_platform( mock_hass, {\"platform\": \"hueremote\", \"scan_interval\": timedelta(seconds=3)}, lambda *x: logging.warning(\"Added remote entity: %s\",", "known remotes and check behavior for unknown.\"\"\" assert parser_func(raw_response) == parsed_response unknown_sensor_data =", "isinstance(data_manager, HueSensorData) assert len(data_manager.registered_entities) == 1 assert data_manager._scan_interval == timedelta(seconds=3) assert len(data_manager.data) ==", "from custom_components.hueremote.data_manager import HueSensorData from custom_components.hueremote.hue_api_response import ( parse_hue_api_response, parse_rwl, parse_zgp, parse_z3_rotary, )", "assert len(data_manager.registered_entities) == 1 remote = data_manager.registered_entities[DEV_ID_REMOTE_1] assert not remote.hass await entity_test_added_to_hass(data_manager, remote)", "\"3_click\" assert remote.icon == \"mdi:remote\" assert not remote.should_poll assert \"last_updated\" in remote.device_state_attributes assert", "\"ff:00:11:22\"} assert parse_hue_api_response( [raw_response, unknown_sensor_data, raw_response] ) == {sensor_key: parsed_response} assert len(caplog.messages) ==", "caplog): \"\"\"Test platform setup for remotes.\"\"\" with caplog.at_level(logging.DEBUG): with patch_async_track_time_interval(): await async_setup_platform( mock_hass,", "(MOCK_RWL, \"RWL_00:17:88:01:10:3e:3a:dc-02\", PARSED_RWL, parse_rwl), ( MOCK_Z3_ROTARY, \"Z3-_ff:ff:00:0f:e7:fd:ba:b7-01-fc00\", PARSED_Z3_ROTARY, parse_z3_rotary, ), ), ) def", "assert \"last_updated\" in remote.device_state_attributes assert remote.unique_id == DEV_ID_REMOTE_1 await remote.async_will_remove_from_hass() assert len(data_manager.sensors) ==", "\"last_updated\" in remote.device_state_attributes assert remote.unique_id == DEV_ID_REMOTE_1 await remote.async_will_remove_from_hass() assert len(data_manager.sensors) == 0", "remote.async_added_to_hass() assert len(data_manager.sensors) == 1 assert DEV_ID_REMOTE_1 in data_manager.sensors assert isinstance(remote, HueRemote) assert", "( parse_hue_api_response, parse_rwl, parse_zgp, parse_z3_rotary, ) from custom_components.hueremote.remote import async_setup_platform, HueRemote from .conftest", "assert len(data_manager.data) == 1 assert DEV_ID_REMOTE_1 in data_manager.data assert len(data_manager.sensors) == 0 assert", "parse_hue_api_response( [raw_response, unknown_sensor_data, raw_response] ) == {sensor_key: parsed_response} assert len(caplog.messages) == 0 async", "HueSensorData) assert len(data_manager.registered_entities) == 1 assert data_manager._scan_interval == timedelta(seconds=3) assert len(data_manager.data) == 1", "*x: logging.warning(\"Added remote entity: %s\", x[0]), ) assert DOMAIN in mock_hass.data data_manager =", "remote.hass assert remote.force_update assert remote.state == \"3_click\" assert remote.icon == \"mdi:remote\" assert not", "len(data_manager.registered_entities) == 1 assert data_manager._scan_interval == timedelta(seconds=3) assert len(data_manager.data) == 1 assert DEV_ID_REMOTE_1", "from custom_components.hueremote.hue_api_response import ( parse_hue_api_response, parse_rwl, parse_zgp, parse_z3_rotary, ) from custom_components.hueremote.remote import async_setup_platform,", "assert len(data_manager.sensors) == 1 assert DEV_ID_REMOTE_1 in data_manager.sensors assert isinstance(remote, HueRemote) assert remote.hass", ".conftest import ( DEV_ID_REMOTE_1, entity_test_added_to_hass, patch_async_track_time_interval, ) from .api_samples import ( MOCK_RWL, MOCK_ZGP,", "pytest from custom_components.hueremote import DOMAIN from custom_components.hueremote.data_manager import HueSensorData from custom_components.hueremote.hue_api_response import (", "1 assert DEV_ID_REMOTE_1 in data_manager.data assert len(data_manager.sensors) == 0 assert len(data_manager.registered_entities) == 1", "in mock_hass.data data_manager = mock_hass.data[DOMAIN] assert isinstance(data_manager, HueSensorData) assert len(data_manager.registered_entities) == 1 assert", "HueRemote) assert remote.hass assert remote.force_update assert remote.state == \"3_click\" assert remote.icon == \"mdi:remote\"", "1 assert DEV_ID_REMOTE_1 in data_manager.sensors assert isinstance(remote, HueRemote) assert remote.hass assert remote.force_update assert", "DEV_ID_REMOTE_1 await remote.async_will_remove_from_hass() assert len(data_manager.sensors) == 0 assert len(data_manager.registered_entities) == 0 assert not", "remote.force_update assert remote.state == \"3_click\" assert remote.icon == \"mdi:remote\" assert not remote.should_poll assert", "await entity_test_added_to_hass(data_manager, remote) # await remote.async_added_to_hass() assert len(data_manager.sensors) == 1 assert DEV_ID_REMOTE_1 in", "remote = data_manager.registered_entities[DEV_ID_REMOTE_1] assert not remote.hass await entity_test_added_to_hass(data_manager, remote) # await remote.async_added_to_hass() assert", "assert parse_hue_api_response( [raw_response, unknown_sensor_data, raw_response] ) == {sensor_key: parsed_response} assert len(caplog.messages) == 0", "raw_response, sensor_key, parsed_response, parser_func, caplog ): \"\"\"Test data parsers for known remotes and", "parser_func(raw_response) == parsed_response unknown_sensor_data = {\"modelid\": \"new_one\", \"uniqueid\": \"ff:00:11:22\"} assert parse_hue_api_response( [raw_response, unknown_sensor_data,", "not remote.hass await entity_test_added_to_hass(data_manager, remote) # await remote.async_added_to_hass() assert len(data_manager.sensors) == 1 assert", "PARSED_ZGP, parse_zgp), (MOCK_RWL, \"RWL_00:17:88:01:10:3e:3a:dc-02\", PARSED_RWL, parse_rwl), ( MOCK_Z3_ROTARY, \"Z3-_ff:ff:00:0f:e7:fd:ba:b7-01-fc00\", PARSED_Z3_ROTARY, parse_z3_rotary, ), ),", "DOMAIN from custom_components.hueremote.data_manager import HueSensorData from custom_components.hueremote.hue_api_response import ( parse_hue_api_response, parse_rwl, parse_zgp, parse_z3_rotary,", "PARSED_RWL, parse_rwl), ( MOCK_Z3_ROTARY, \"Z3-_ff:ff:00:0f:e7:fd:ba:b7-01-fc00\", PARSED_Z3_ROTARY, parse_z3_rotary, ), ), ) def test_parse_remote_raw_data( raw_response,", "await remote.async_will_remove_from_hass() assert len(data_manager.sensors) == 0 assert len(data_manager.registered_entities) == 0 assert not data_manager.available", "assert DOMAIN in mock_hass.data data_manager = mock_hass.data[DOMAIN] assert isinstance(data_manager, HueSensorData) assert len(data_manager.registered_entities) ==", "parse_z3_rotary, ), ), ) def test_parse_remote_raw_data( raw_response, sensor_key, parsed_response, parser_func, caplog ): \"\"\"Test", "async def test_platform_remote_setup(mock_hass, caplog): \"\"\"Test platform setup for remotes.\"\"\" with caplog.at_level(logging.DEBUG): with patch_async_track_time_interval():", "len(data_manager.sensors) == 1 assert DEV_ID_REMOTE_1 in data_manager.sensors assert isinstance(remote, HueRemote) assert remote.hass assert", "\"ZGP_00:44:23:08\", PARSED_ZGP, parse_zgp), (MOCK_RWL, \"RWL_00:17:88:01:10:3e:3a:dc-02\", PARSED_RWL, parse_rwl), ( MOCK_Z3_ROTARY, \"Z3-_ff:ff:00:0f:e7:fd:ba:b7-01-fc00\", PARSED_Z3_ROTARY, parse_z3_rotary, ),", "( DEV_ID_REMOTE_1, entity_test_added_to_hass, patch_async_track_time_interval, ) from .api_samples import ( MOCK_RWL, MOCK_ZGP, MOCK_Z3_ROTARY, PARSED_RWL,", "from datetime import timedelta import pytest from custom_components.hueremote import DOMAIN from custom_components.hueremote.data_manager import", "remote.state == \"3_click\" assert remote.icon == \"mdi:remote\" assert not remote.should_poll assert \"last_updated\" in", "timedelta import pytest from custom_components.hueremote import DOMAIN from custom_components.hueremote.data_manager import HueSensorData from custom_components.hueremote.hue_api_response", "data_manager.registered_entities[DEV_ID_REMOTE_1] assert not remote.hass await entity_test_added_to_hass(data_manager, remote) # await remote.async_added_to_hass() assert len(data_manager.sensors) ==", "( MOCK_RWL, MOCK_ZGP, MOCK_Z3_ROTARY, PARSED_RWL, PARSED_ZGP, PARSED_Z3_ROTARY, ) @pytest.mark.parametrize( \"raw_response, sensor_key, parsed_response, parser_func\",", "remote.should_poll assert \"last_updated\" in remote.device_state_attributes assert remote.unique_id == DEV_ID_REMOTE_1 await remote.async_will_remove_from_hass() assert len(data_manager.sensors)" ]
[ "import render from .text_generator import create # Create your views here. def text_generation(request):", "def text_generation(request): context = {} if request.method == \"POST\": file = request.FILES.get(\"file\") if", "views here. def text_generation(request): context = {} if request.method == \"POST\": file =", "your views here. def text_generation(request): context = {} if request.method == \"POST\": file", "Create your views here. def text_generation(request): context = {} if request.method == \"POST\":", "from .text_generator import create # Create your views here. def text_generation(request): context =", "text_generation(request): context = {} if request.method == \"POST\": file = request.FILES.get(\"file\") if file.name.lower().endswith(('.txt')):", "here. def text_generation(request): context = {} if request.method == \"POST\": file = request.FILES.get(\"file\")", "django.shortcuts import render from .text_generator import create # Create your views here. def", "render from .text_generator import create # Create your views here. def text_generation(request): context", "{} if request.method == \"POST\": file = request.FILES.get(\"file\") if file.name.lower().endswith(('.txt')): context['output'] = create(file.read().decode('utf-8'))", "context = {} if request.method == \"POST\": file = request.FILES.get(\"file\") if file.name.lower().endswith(('.txt')): context['output']", "= {} if request.method == \"POST\": file = request.FILES.get(\"file\") if file.name.lower().endswith(('.txt')): context['output'] =", "import create # Create your views here. def text_generation(request): context = {} if", "from django.shortcuts import render from .text_generator import create # Create your views here.", "if request.method == \"POST\": file = request.FILES.get(\"file\") if file.name.lower().endswith(('.txt')): context['output'] = create(file.read().decode('utf-8')) return", ".text_generator import create # Create your views here. def text_generation(request): context = {}", "\"POST\": file = request.FILES.get(\"file\") if file.name.lower().endswith(('.txt')): context['output'] = create(file.read().decode('utf-8')) return render(request, 'McCloud/text_generation.html', context)", "request.method == \"POST\": file = request.FILES.get(\"file\") if file.name.lower().endswith(('.txt')): context['output'] = create(file.read().decode('utf-8')) return render(request,", "<filename>App/McCloud/views.py from django.shortcuts import render from .text_generator import create # Create your views", "create # Create your views here. def text_generation(request): context = {} if request.method", "== \"POST\": file = request.FILES.get(\"file\") if file.name.lower().endswith(('.txt')): context['output'] = create(file.read().decode('utf-8')) return render(request, 'McCloud/text_generation.html',", "# Create your views here. def text_generation(request): context = {} if request.method ==" ]
[ "_pickle as cPickle f=open(\"neural_network.pickle\",\"rb\") net=cPickle.load(f) f.close() training_data, test_data=data_loader.load_data() num_correct = net.evaluate(training_data) print(\"{}/{} correct\".format(num_correct,", "as cPickle f=open(\"neural_network.pickle\",\"rb\") net=cPickle.load(f) f.close() training_data, test_data=data_loader.load_data() num_correct = net.evaluate(training_data) print(\"{}/{} correct\".format(num_correct, len(training_data)))", "<reponame>Supreme-Sector/Hand-Sign-Detection-Application import data_loader import network import _pickle as cPickle f=open(\"neural_network.pickle\",\"rb\") net=cPickle.load(f) f.close() training_data,", "import network import _pickle as cPickle f=open(\"neural_network.pickle\",\"rb\") net=cPickle.load(f) f.close() training_data, test_data=data_loader.load_data() num_correct =", "import _pickle as cPickle f=open(\"neural_network.pickle\",\"rb\") net=cPickle.load(f) f.close() training_data, test_data=data_loader.load_data() num_correct = net.evaluate(training_data) print(\"{}/{}", "network import _pickle as cPickle f=open(\"neural_network.pickle\",\"rb\") net=cPickle.load(f) f.close() training_data, test_data=data_loader.load_data() num_correct = net.evaluate(training_data)", "import data_loader import network import _pickle as cPickle f=open(\"neural_network.pickle\",\"rb\") net=cPickle.load(f) f.close() training_data, test_data=data_loader.load_data()", "data_loader import network import _pickle as cPickle f=open(\"neural_network.pickle\",\"rb\") net=cPickle.load(f) f.close() training_data, test_data=data_loader.load_data() num_correct" ]
[ "import migrations, models class Migration(migrations.Migration): dependencies = [ ('products', '0004_productimage_is_main'), ] operations =", "2022-02-23 17:14 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('products',", "# Generated by Django 4.0.2 on 2022-02-23 17:14 from django.db import migrations, models", "17:14 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('products', '0004_productimage_is_main'),", "4.0.2 on 2022-02-23 17:14 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "[ ('products', '0004_productimage_is_main'), ] operations = [ migrations.AlterField( model_name='productimage', name='is_main', field=models.BooleanField(default=False), ), ]", "models class Migration(migrations.Migration): dependencies = [ ('products', '0004_productimage_is_main'), ] operations = [ migrations.AlterField(", "on 2022-02-23 17:14 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "class Migration(migrations.Migration): dependencies = [ ('products', '0004_productimage_is_main'), ] operations = [ migrations.AlterField( model_name='productimage',", "= [ ('products', '0004_productimage_is_main'), ] operations = [ migrations.AlterField( model_name='productimage', name='is_main', field=models.BooleanField(default=False), ),", "dependencies = [ ('products', '0004_productimage_is_main'), ] operations = [ migrations.AlterField( model_name='productimage', name='is_main', field=models.BooleanField(default=False),", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('products', '0004_productimage_is_main'), ] operations", "by Django 4.0.2 on 2022-02-23 17:14 from django.db import migrations, models class Migration(migrations.Migration):", "Generated by Django 4.0.2 on 2022-02-23 17:14 from django.db import migrations, models class", "<filename>online_store/apps/products/migrations/0005_alter_productimage_is_main.py # Generated by Django 4.0.2 on 2022-02-23 17:14 from django.db import migrations,", "Migration(migrations.Migration): dependencies = [ ('products', '0004_productimage_is_main'), ] operations = [ migrations.AlterField( model_name='productimage', name='is_main',", "migrations, models class Migration(migrations.Migration): dependencies = [ ('products', '0004_productimage_is_main'), ] operations = [", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('products', '0004_productimage_is_main'), ]", "Django 4.0.2 on 2022-02-23 17:14 from django.db import migrations, models class Migration(migrations.Migration): dependencies" ]
[ "environment...') env = muniverse.Env(spec) try: print('Resetting environment...') env.reset() print('Getting observation...') obs = env.observe()", "y in range(0, height): if res != '': res += '\\n' for x", "takes mouse events. \"\"\" import sys import numpy as np sys.path.insert(0, '..') import", "x in range(0, width): if binary[y, x]: res += 'X' else: res +=", "res += 'X' else: res += ' ' return res if __name__ ==", "np sys.path.insert(0, '..') import muniverse # noqa: E402 def main(): print('Looking up environment...')", "# noqa: E402 def main(): print('Looking up environment...') spec = muniverse.spec_for_name('TowerMania-v1') print('Creating environment...')", "muniverse.spec_for_name('TowerMania-v1') print('Creating environment...') env = muniverse.Env(spec) try: print('Resetting environment...') env.reset() print('Getting observation...') obs", "True: reward, done = env.step(0.1, actions[step_idx % 2]) step_idx += 1 print('reward: '", "width = binary.shape res = '' for y in range(0, height): if res", "print(ascii_art(obs)) print('Playing game...') step_idx = 0 action = muniverse.MouseAction('mousePressed', x=100, y=100, click_count=1) actions", "print('Looking up environment...') spec = muniverse.spec_for_name('TowerMania-v1') print('Creating environment...') env = muniverse.Env(spec) try: print('Resetting", "def ascii_art(img): brightness = np.sum(img, axis=2) / 3 downsampled = brightness[::14, ::7] binary", "as np sys.path.insert(0, '..') import muniverse # noqa: E402 def main(): print('Looking up", "done = env.step(0.1, actions[step_idx % 2]) step_idx += 1 print('reward: ' + str(reward))", "range(0, width): if binary[y, x]: res += 'X' else: res += ' '", "y=100, click_count=1) actions = [action, action.with_event('mouseReleased')] while True: reward, done = env.step(0.1, actions[step_idx", "= env.step(0.1, actions[step_idx % 2]) step_idx += 1 print('reward: ' + str(reward)) if", "noqa: E402 def main(): print('Looking up environment...') spec = muniverse.spec_for_name('TowerMania-v1') print('Creating environment...') env", "finally: env.close() def ascii_art(img): brightness = np.sum(img, axis=2) / 3 downsampled = brightness[::14,", "downsampled = brightness[::14, ::7] binary = downsampled > 128 height, width = binary.shape", "Simple program to demonstrate how to use muniverse on a game that takes", "E402 def main(): print('Looking up environment...') spec = muniverse.spec_for_name('TowerMania-v1') print('Creating environment...') env =", "import sys import numpy as np sys.path.insert(0, '..') import muniverse # noqa: E402", "print('reward: ' + str(reward)) if done: break finally: env.close() def ascii_art(img): brightness =", "'': res += '\\n' for x in range(0, width): if binary[y, x]: res", "res = '' for y in range(0, height): if res != '': res", "print('Playing game...') step_idx = 0 action = muniverse.MouseAction('mousePressed', x=100, y=100, click_count=1) actions =", "'\\n' for x in range(0, width): if binary[y, x]: res += 'X' else:", "spec = muniverse.spec_for_name('TowerMania-v1') print('Creating environment...') env = muniverse.Env(spec) try: print('Resetting environment...') env.reset() print('Getting", "actions[step_idx % 2]) step_idx += 1 print('reward: ' + str(reward)) if done: break", "= binary.shape res = '' for y in range(0, height): if res !=", "'..') import muniverse # noqa: E402 def main(): print('Looking up environment...') spec =", "on a game that takes mouse events. \"\"\" import sys import numpy as", "binary[y, x]: res += 'X' else: res += ' ' return res if", "env.close() def ascii_art(img): brightness = np.sum(img, axis=2) / 3 downsampled = brightness[::14, ::7]", "res != '': res += '\\n' for x in range(0, width): if binary[y,", "+ str(reward)) if done: break finally: env.close() def ascii_art(img): brightness = np.sum(img, axis=2)", "[action, action.with_event('mouseReleased')] while True: reward, done = env.step(0.1, actions[step_idx % 2]) step_idx +=", "action.with_event('mouseReleased')] while True: reward, done = env.step(0.1, actions[step_idx % 2]) step_idx += 1", "binary = downsampled > 128 height, width = binary.shape res = '' for", "3 downsampled = brightness[::14, ::7] binary = downsampled > 128 height, width =", "= brightness[::14, ::7] binary = downsampled > 128 height, width = binary.shape res", "= env.observe() print(ascii_art(obs)) print('Playing game...') step_idx = 0 action = muniverse.MouseAction('mousePressed', x=100, y=100,", "= '' for y in range(0, height): if res != '': res +=", "game...') step_idx = 0 action = muniverse.MouseAction('mousePressed', x=100, y=100, click_count=1) actions = [action,", "for y in range(0, height): if res != '': res += '\\n' for", "<filename>bindings/python/examples/mouse_game.py \"\"\" Simple program to demonstrate how to use muniverse on a game", "= np.sum(img, axis=2) / 3 downsampled = brightness[::14, ::7] binary = downsampled >", "if done: break finally: env.close() def ascii_art(img): brightness = np.sum(img, axis=2) / 3", "+= 'X' else: res += ' ' return res if __name__ == '__main__':", "to demonstrate how to use muniverse on a game that takes mouse events.", "muniverse # noqa: E402 def main(): print('Looking up environment...') spec = muniverse.spec_for_name('TowerMania-v1') print('Creating", "= muniverse.MouseAction('mousePressed', x=100, y=100, click_count=1) actions = [action, action.with_event('mouseReleased')] while True: reward, done", "print('Resetting environment...') env.reset() print('Getting observation...') obs = env.observe() print(ascii_art(obs)) print('Playing game...') step_idx =", "use muniverse on a game that takes mouse events. \"\"\" import sys import", "in range(0, height): if res != '': res += '\\n' for x in", "'' for y in range(0, height): if res != '': res += '\\n'", "height): if res != '': res += '\\n' for x in range(0, width):", "reward, done = env.step(0.1, actions[step_idx % 2]) step_idx += 1 print('reward: ' +", "binary.shape res = '' for y in range(0, height): if res != '':", "' + str(reward)) if done: break finally: env.close() def ascii_art(img): brightness = np.sum(img,", "import numpy as np sys.path.insert(0, '..') import muniverse # noqa: E402 def main():", "% 2]) step_idx += 1 print('reward: ' + str(reward)) if done: break finally:", "1 print('reward: ' + str(reward)) if done: break finally: env.close() def ascii_art(img): brightness", "step_idx = 0 action = muniverse.MouseAction('mousePressed', x=100, y=100, click_count=1) actions = [action, action.with_event('mouseReleased')]", "env.step(0.1, actions[step_idx % 2]) step_idx += 1 print('reward: ' + str(reward)) if done:", "environment...') spec = muniverse.spec_for_name('TowerMania-v1') print('Creating environment...') env = muniverse.Env(spec) try: print('Resetting environment...') env.reset()", "actions = [action, action.with_event('mouseReleased')] while True: reward, done = env.step(0.1, actions[step_idx % 2])", "2]) step_idx += 1 print('reward: ' + str(reward)) if done: break finally: env.close()", "= [action, action.with_event('mouseReleased')] while True: reward, done = env.step(0.1, actions[step_idx % 2]) step_idx", "height, width = binary.shape res = '' for y in range(0, height): if", "/ 3 downsampled = brightness[::14, ::7] binary = downsampled > 128 height, width", "obs = env.observe() print(ascii_art(obs)) print('Playing game...') step_idx = 0 action = muniverse.MouseAction('mousePressed', x=100,", "in range(0, width): if binary[y, x]: res += 'X' else: res += '", "width): if binary[y, x]: res += 'X' else: res += ' ' return", "main(): print('Looking up environment...') spec = muniverse.spec_for_name('TowerMania-v1') print('Creating environment...') env = muniverse.Env(spec) try:", "x=100, y=100, click_count=1) actions = [action, action.with_event('mouseReleased')] while True: reward, done = env.step(0.1,", "muniverse.MouseAction('mousePressed', x=100, y=100, click_count=1) actions = [action, action.with_event('mouseReleased')] while True: reward, done =", "ascii_art(img): brightness = np.sum(img, axis=2) / 3 downsampled = brightness[::14, ::7] binary =", "that takes mouse events. \"\"\" import sys import numpy as np sys.path.insert(0, '..')", "+= '\\n' for x in range(0, width): if binary[y, x]: res += 'X'", "brightness = np.sum(img, axis=2) / 3 downsampled = brightness[::14, ::7] binary = downsampled", "import muniverse # noqa: E402 def main(): print('Looking up environment...') spec = muniverse.spec_for_name('TowerMania-v1')", "action = muniverse.MouseAction('mousePressed', x=100, y=100, click_count=1) actions = [action, action.with_event('mouseReleased')] while True: reward,", "\"\"\" import sys import numpy as np sys.path.insert(0, '..') import muniverse # noqa:", "numpy as np sys.path.insert(0, '..') import muniverse # noqa: E402 def main(): print('Looking", "done: break finally: env.close() def ascii_art(img): brightness = np.sum(img, axis=2) / 3 downsampled", "res += '\\n' for x in range(0, width): if binary[y, x]: res +=", "> 128 height, width = binary.shape res = '' for y in range(0,", "events. \"\"\" import sys import numpy as np sys.path.insert(0, '..') import muniverse #", "game that takes mouse events. \"\"\" import sys import numpy as np sys.path.insert(0,", "step_idx += 1 print('reward: ' + str(reward)) if done: break finally: env.close() def", "up environment...') spec = muniverse.spec_for_name('TowerMania-v1') print('Creating environment...') env = muniverse.Env(spec) try: print('Resetting environment...')", "env = muniverse.Env(spec) try: print('Resetting environment...') env.reset() print('Getting observation...') obs = env.observe() print(ascii_art(obs))", "= 0 action = muniverse.MouseAction('mousePressed', x=100, y=100, click_count=1) actions = [action, action.with_event('mouseReleased')] while", "= downsampled > 128 height, width = binary.shape res = '' for y", "sys import numpy as np sys.path.insert(0, '..') import muniverse # noqa: E402 def", "0 action = muniverse.MouseAction('mousePressed', x=100, y=100, click_count=1) actions = [action, action.with_event('mouseReleased')] while True:", "brightness[::14, ::7] binary = downsampled > 128 height, width = binary.shape res =", "print('Getting observation...') obs = env.observe() print(ascii_art(obs)) print('Playing game...') step_idx = 0 action =", "\"\"\" Simple program to demonstrate how to use muniverse on a game that", "click_count=1) actions = [action, action.with_event('mouseReleased')] while True: reward, done = env.step(0.1, actions[step_idx %", "!= '': res += '\\n' for x in range(0, width): if binary[y, x]:", "break finally: env.close() def ascii_art(img): brightness = np.sum(img, axis=2) / 3 downsampled =", "+= 1 print('reward: ' + str(reward)) if done: break finally: env.close() def ascii_art(img):", "np.sum(img, axis=2) / 3 downsampled = brightness[::14, ::7] binary = downsampled > 128", "muniverse.Env(spec) try: print('Resetting environment...') env.reset() print('Getting observation...') obs = env.observe() print(ascii_art(obs)) print('Playing game...')", "how to use muniverse on a game that takes mouse events. \"\"\" import", "env.observe() print(ascii_art(obs)) print('Playing game...') step_idx = 0 action = muniverse.MouseAction('mousePressed', x=100, y=100, click_count=1)", "observation...') obs = env.observe() print(ascii_art(obs)) print('Playing game...') step_idx = 0 action = muniverse.MouseAction('mousePressed',", "if binary[y, x]: res += 'X' else: res += ' ' return res", "muniverse on a game that takes mouse events. \"\"\" import sys import numpy", "demonstrate how to use muniverse on a game that takes mouse events. \"\"\"", "while True: reward, done = env.step(0.1, actions[step_idx % 2]) step_idx += 1 print('reward:", "to use muniverse on a game that takes mouse events. \"\"\" import sys", "axis=2) / 3 downsampled = brightness[::14, ::7] binary = downsampled > 128 height,", "'X' else: res += ' ' return res if __name__ == '__main__': main()", "sys.path.insert(0, '..') import muniverse # noqa: E402 def main(): print('Looking up environment...') spec", "= muniverse.spec_for_name('TowerMania-v1') print('Creating environment...') env = muniverse.Env(spec) try: print('Resetting environment...') env.reset() print('Getting observation...')", "= muniverse.Env(spec) try: print('Resetting environment...') env.reset() print('Getting observation...') obs = env.observe() print(ascii_art(obs)) print('Playing", "def main(): print('Looking up environment...') spec = muniverse.spec_for_name('TowerMania-v1') print('Creating environment...') env = muniverse.Env(spec)", "str(reward)) if done: break finally: env.close() def ascii_art(img): brightness = np.sum(img, axis=2) /", "for x in range(0, width): if binary[y, x]: res += 'X' else: res", "::7] binary = downsampled > 128 height, width = binary.shape res = ''", "environment...') env.reset() print('Getting observation...') obs = env.observe() print(ascii_art(obs)) print('Playing game...') step_idx = 0", "128 height, width = binary.shape res = '' for y in range(0, height):", "range(0, height): if res != '': res += '\\n' for x in range(0,", "mouse events. \"\"\" import sys import numpy as np sys.path.insert(0, '..') import muniverse", "downsampled > 128 height, width = binary.shape res = '' for y in", "x]: res += 'X' else: res += ' ' return res if __name__", "if res != '': res += '\\n' for x in range(0, width): if", "print('Creating environment...') env = muniverse.Env(spec) try: print('Resetting environment...') env.reset() print('Getting observation...') obs =", "program to demonstrate how to use muniverse on a game that takes mouse", "a game that takes mouse events. \"\"\" import sys import numpy as np", "env.reset() print('Getting observation...') obs = env.observe() print(ascii_art(obs)) print('Playing game...') step_idx = 0 action", "try: print('Resetting environment...') env.reset() print('Getting observation...') obs = env.observe() print(ascii_art(obs)) print('Playing game...') step_idx" ]
[ "pyani_config.BLASTALL_DEFAULT) -> str: r\"\"\"Return BLAST blastall version as a string. :param blast_exe: path", "0 ) except OSError: logger.warning(\"blastall executable will not run\", exc_info=True) return f\"blastall exists", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "Glasgow, # G4 0RE # Scotland, # UK # # The MIT License", "includes cases where the user doesn't have execute permissions on the file) -", "blastall at {blastall_path}\" # This should catch cases when the file can't be", "the Software is # furnished to do so, subject to the following conditions:", "G4 0RE # Scotland, # UK # # The MIT License # #", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "is concatenated with the OS name. The following circumstances are explicitly reported as", "to blastall executable We expect blastall to return a string as, for example", ") except OSError: logger.warning(\"blastall executable will not run\", exc_info=True) return f\"blastall exists at", "the Software without restriction, including without limitation the rights # to use, copy,", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN", "\\ one-line descriptions for (V) [ersion] is bad or out of range [?", "person obtaining a copy # of this software and associated documentation files (the", "f\"blastall exists at {blastall_path} but could not be executed\" version = re.search( #", "execute permissions on the file) - no version info returned - executable cannot", "version = re.search( # type: ignore r\"(?<=blastall\\s)[0-9\\.]*\", str(result.stderr, \"utf-8\") ).group() if 0 ==", "on the file) - no version info returned - executable cannot be run", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "# # Contact: <EMAIL> # # <NAME>, # Strathclyde Institute for Pharmacy and", "# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies", "= [blast_exe] try: result = subprocess.run( cmdline, # type: ignore shell=False, stdout=subprocess.PIPE, #", "the file) - no version info returned - executable cannot be run on", "above copyright notice and this permission notice shall be included in # all", "[blast_exe] try: result = subprocess.run( cmdline, # type: ignore shell=False, stdout=subprocess.PIPE, # type:", "blastall executable We expect blastall to return a string as, for example ..", "(c) 2021 University of Strathclyde # # Permission is hereby granted, free of", "return f\"blastall exists at {blastall_path} but not executable\" if platform.system() == \"Darwin\": cmdline", "# -*- coding: utf-8 -*- # (c) University of Strathclyde 2021 # Author:", "logger = logging.getLogger(__name__) try: blastall_path = Path(shutil.which(blast_exe)) # type:ignore except TypeError: return f\"{blast_exe}", "by the user if not os.access(blastall_path, os.X_OK): # file exists but not executable", "is hereby granted, free of charge, to any person obtaining a copy #", "where the user doesn't have execute permissions on the file) - no version", "persons to whom the Software is # furnished to do so, subject to", "conditions: # # The above copyright notice and this permission notice shall be", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "doesn't return 0 ) except OSError: logger.warning(\"blastall executable will not run\", exc_info=True) return", "documentation files (the \"Software\"), to deal # in the Software without restriction, including", "but could not be executed\" version = re.search( # type: ignore r\"(?<=blastall\\s)[0-9\\.]*\", str(result.stderr,", "for example .. code-block:: bash $ blastall -version [blastall 2.2.26] ERROR: Number of", "to permit persons to whom the Software is # furnished to do so,", "License # # Copyright (c) 2021 University of Strathclyde # # Permission is", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "0 == len(version.strip()): return f\"blastall exists at {blastall_path} but could not retrieve version\"", "string as, for example .. code-block:: bash $ blastall -version [blastall 2.2.26] ERROR:", "re import shutil import subprocess from pathlib import Path from . import pyani_config", "- non-executable file at passed path (this includes cases where the user doesn't", "notice and this permission notice shall be included in # all copies or", "of charge, to any person obtaining a copy # of this software and", "from . import pyani_config from . import PyaniException class PyaniblastallException(PyaniException): \"\"\"ANIblastall-specific exception for", "the file can't be executed by the user if not os.access(blastall_path, os.X_OK): #", "type: ignore shell=False, stdout=subprocess.PIPE, # type: ignore stderr=subprocess.PIPE, check=False, # blastall doesn't return", "MIT License # # Copyright (c) 2021 University of Strathclyde # # Permission", "[blastall 2.2.26] ERROR: Number of database sequences to show \\ one-line descriptions for", "with the OS name. The following circumstances are explicitly reported as strings -", "user doesn't have execute permissions on the file) - no version info returned", "and this permission notice shall be included in # all copies or substantial", "import logging import os import platform import re import shutil import subprocess from", "cannot be run on this OS \"\"\" logger = logging.getLogger(__name__) try: blastall_path =", "IN # THE SOFTWARE. \"\"\"Code to implement the ANIblastall average nucleotide identity method.\"\"\"", "f\"blastall exists at {blastall_path} but not executable\" if platform.system() == \"Darwin\": cmdline =", "so, subject to the following conditions: # # The above copyright notice and", "the user if not os.access(blastall_path, os.X_OK): # file exists but not executable return", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "r\"\"\"Return BLAST blastall version as a string. :param blast_exe: path to blastall executable", "-version [blastall 2.2.26] ERROR: Number of database sequences to show \\ one-line descriptions", "# # <NAME>, # Strathclyde Institute for Pharmacy and Biomedical Sciences, # Cathedral", "- no version info returned - executable cannot be run on this OS", "the OS name. The following circumstances are explicitly reported as strings - no", "the ANIblastall average nucleotide identity method.\"\"\" import logging import os import platform import", "copy # of this software and associated documentation files (the \"Software\"), to deal", "is bad or out of range [? to ?] This is concatenated with", "try: blastall_path = Path(shutil.which(blast_exe)) # type:ignore except TypeError: return f\"{blast_exe} is not found", "to the following conditions: # # The above copyright notice and this permission", "included in # all copies or substantial portions of the Software. # #", "This is concatenated with the OS name. The following circumstances are explicitly reported", "Path = pyani_config.BLASTALL_DEFAULT) -> str: r\"\"\"Return BLAST blastall version as a string. :param", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "file exists but not executable return f\"blastall exists at {blastall_path} but not executable\"", "and associated documentation files (the \"Software\"), to deal # in the Software without", "concatenated with the OS name. The following circumstances are explicitly reported as strings", "get_version(blast_exe: Path = pyani_config.BLASTALL_DEFAULT) -> str: r\"\"\"Return BLAST blastall version as a string.", "str(result.stderr, \"utf-8\") ).group() if 0 == len(version.strip()): return f\"blastall exists at {blastall_path} but", "# Author: <NAME> # # Contact: <EMAIL> # # <NAME>, # Strathclyde Institute", "not blastall_path.is_file(): # no executable return f\"No blastall at {blastall_path}\" # This should", "== \"Darwin\": cmdline = [blast_exe, \"-version\"] else: cmdline = [blast_exe] try: result =", "Street, # Glasgow, # G4 0RE # Scotland, # UK # # The", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "try: result = subprocess.run( cmdline, # type: ignore shell=False, stdout=subprocess.PIPE, # type: ignore", "sublicense, and/or sell # copies of the Software, and to permit persons to", "Software is # furnished to do so, subject to the following conditions: #", "# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "= re.search( # type: ignore r\"(?<=blastall\\s)[0-9\\.]*\", str(result.stderr, \"utf-8\") ).group() if 0 == len(version.strip()):", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "of database sequences to show \\ one-line descriptions for (V) [ersion] is bad", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "os.access(blastall_path, os.X_OK): # file exists but not executable return f\"blastall exists at {blastall_path}", "\"Darwin\": cmdline = [blast_exe, \"-version\"] else: cmdline = [blast_exe] try: result = subprocess.run(", "return a string as, for example .. code-block:: bash $ blastall -version [blastall", "from . import PyaniException class PyaniblastallException(PyaniException): \"\"\"ANIblastall-specific exception for pyani.\"\"\" def get_version(blast_exe: Path", "from pathlib import Path from . import pyani_config from . import PyaniException class", "UK # # The MIT License # # Copyright (c) 2021 University of", "not executable return f\"blastall exists at {blastall_path} but not executable\" if platform.system() ==", "- executable cannot be run on this OS \"\"\" logger = logging.getLogger(__name__) try:", "blastall -version [blastall 2.2.26] ERROR: Number of database sequences to show \\ one-line", "os import platform import re import shutil import subprocess from pathlib import Path", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "THE USE OR OTHER DEALINGS IN # THE SOFTWARE. \"\"\"Code to implement the", "# copies of the Software, and to permit persons to whom the Software", "stderr=subprocess.PIPE, check=False, # blastall doesn't return 0 ) except OSError: logger.warning(\"blastall executable will", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "file at passed path (this includes cases where the user doesn't have execute", "should catch cases when the file can't be executed by the user if", "shutil import subprocess from pathlib import Path from . import pyani_config from .", "string. :param blast_exe: path to blastall executable We expect blastall to return a", "be run on this OS \"\"\" logger = logging.getLogger(__name__) try: blastall_path = Path(shutil.which(blast_exe))", "import platform import re import shutil import subprocess from pathlib import Path from", "this permission notice shall be included in # all copies or substantial portions", "# type: ignore stderr=subprocess.PIPE, check=False, # blastall doesn't return 0 ) except OSError:", "= logging.getLogger(__name__) try: blastall_path = Path(shutil.which(blast_exe)) # type:ignore except TypeError: return f\"{blast_exe} is", "as strings - no executable at passed path - non-executable file at passed", "check=False, # blastall doesn't return 0 ) except OSError: logger.warning(\"blastall executable will not", "2021 University of Strathclyde # # Permission is hereby granted, free of charge,", "blastall version as a string. :param blast_exe: path to blastall executable We expect", "Biomedical Sciences, # Cathedral Street, # Glasgow, # G4 0RE # Scotland, #", "NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "OS name. The following circumstances are explicitly reported as strings - no executable", ").group() if 0 == len(version.strip()): return f\"blastall exists at {blastall_path} but could not", "as a string. :param blast_exe: path to blastall executable We expect blastall to", "software and associated documentation files (the \"Software\"), to deal # in the Software", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT", "have execute permissions on the file) - no version info returned - executable", "not executable\" if platform.system() == \"Darwin\": cmdline = [blast_exe, \"-version\"] else: cmdline =", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN", "blastall to return a string as, for example .. code-block:: bash $ blastall", "found in $PATH\" if not blastall_path.is_file(): # no executable return f\"No blastall at", "or out of range [? to ?] This is concatenated with the OS", "bash $ blastall -version [blastall 2.2.26] ERROR: Number of database sequences to show", "and to permit persons to whom the Software is # furnished to do", "use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the", "the following conditions: # # The above copyright notice and this permission notice", "f\"No blastall at {blastall_path}\" # This should catch cases when the file can't", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "# furnished to do so, subject to the following conditions: # # The", "the Software, and to permit persons to whom the Software is # furnished", "blast_exe: path to blastall executable We expect blastall to return a string as,", "rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell #", "Institute for Pharmacy and Biomedical Sciences, # Cathedral Street, # Glasgow, # G4", "?] This is concatenated with the OS name. The following circumstances are explicitly", "at {blastall_path} but not executable\" if platform.system() == \"Darwin\": cmdline = [blast_exe, \"-version\"]", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "pyani_config from . import PyaniException class PyaniblastallException(PyaniException): \"\"\"ANIblastall-specific exception for pyani.\"\"\" def get_version(blast_exe:", "merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING", "bad or out of range [? to ?] This is concatenated with the", "ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "in # all copies or substantial portions of the Software. # # THE", "<EMAIL> # # <NAME>, # Strathclyde Institute for Pharmacy and Biomedical Sciences, #", "subprocess.run( cmdline, # type: ignore shell=False, stdout=subprocess.PIPE, # type: ignore stderr=subprocess.PIPE, check=False, #", "to do so, subject to the following conditions: # # The above copyright", "import re import shutil import subprocess from pathlib import Path from . import", "of Strathclyde 2021 # Author: <NAME> # # Contact: <EMAIL> # # <NAME>,", "can't be executed by the user if not os.access(blastall_path, os.X_OK): # file exists", "be executed\" version = re.search( # type: ignore r\"(?<=blastall\\s)[0-9\\.]*\", str(result.stderr, \"utf-8\") ).group() if", "code-block:: bash $ blastall -version [blastall 2.2.26] ERROR: Number of database sequences to", "shell=False, stdout=subprocess.PIPE, # type: ignore stderr=subprocess.PIPE, check=False, # blastall doesn't return 0 )", "type:ignore except TypeError: return f\"{blast_exe} is not found in $PATH\" if not blastall_path.is_file():", "in $PATH\" if not blastall_path.is_file(): # no executable return f\"No blastall at {blastall_path}\"", "example .. code-block:: bash $ blastall -version [blastall 2.2.26] ERROR: Number of database", "USE OR OTHER DEALINGS IN # THE SOFTWARE. \"\"\"Code to implement the ANIblastall", "to implement the ANIblastall average nucleotide identity method.\"\"\" import logging import os import", "SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. \"\"\"Code to", "whom the Software is # furnished to do so, subject to the following", "CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH", "return f\"No blastall at {blastall_path}\" # This should catch cases when the file", "except OSError: logger.warning(\"blastall executable will not run\", exc_info=True) return f\"blastall exists at {blastall_path}", "free of charge, to any person obtaining a copy # of this software", "# G4 0RE # Scotland, # UK # # The MIT License #", "\"\"\"ANIblastall-specific exception for pyani.\"\"\" def get_version(blast_exe: Path = pyani_config.BLASTALL_DEFAULT) -> str: r\"\"\"Return BLAST", "return f\"{blast_exe} is not found in $PATH\" if not blastall_path.is_file(): # no executable", "r\"(?<=blastall\\s)[0-9\\.]*\", str(result.stderr, \"utf-8\") ).group() if 0 == len(version.strip()): return f\"blastall exists at {blastall_path}", "catch cases when the file can't be executed by the user if not", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software,", "implement the ANIblastall average nucleotide identity method.\"\"\" import logging import os import platform", "re.search( # type: ignore r\"(?<=blastall\\s)[0-9\\.]*\", str(result.stderr, \"utf-8\") ).group() if 0 == len(version.strip()): return", "without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense,", "# file exists but not executable return f\"blastall exists at {blastall_path} but not", "is # furnished to do so, subject to the following conditions: # #", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "import PyaniException class PyaniblastallException(PyaniException): \"\"\"ANIblastall-specific exception for pyani.\"\"\" def get_version(blast_exe: Path = pyani_config.BLASTALL_DEFAULT)", "to deal # in the Software without restriction, including without limitation the rights", "no executable at passed path - non-executable file at passed path (this includes", "to any person obtaining a copy # of this software and associated documentation", "executable return f\"No blastall at {blastall_path}\" # This should catch cases when the", "Scotland, # UK # # The MIT License # # Copyright (c) 2021", "THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. \"\"\"Code", "on this OS \"\"\" logger = logging.getLogger(__name__) try: blastall_path = Path(shutil.which(blast_exe)) # type:ignore", "permission notice shall be included in # all copies or substantial portions of", "pyani.\"\"\" def get_version(blast_exe: Path = pyani_config.BLASTALL_DEFAULT) -> str: r\"\"\"Return BLAST blastall version as", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "is not found in $PATH\" if not blastall_path.is_file(): # no executable return f\"No", "= Path(shutil.which(blast_exe)) # type:ignore except TypeError: return f\"{blast_exe} is not found in $PATH\"", "(this includes cases where the user doesn't have execute permissions on the file)", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "at passed path (this includes cases where the user doesn't have execute permissions", "{blastall_path}\" # This should catch cases when the file can't be executed by", "OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE", "# (c) University of Strathclyde 2021 # Author: <NAME> # # Contact: <EMAIL>", "SOFTWARE. \"\"\"Code to implement the ANIblastall average nucleotide identity method.\"\"\" import logging import", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "len(version.strip()): return f\"blastall exists at {blastall_path} but could not retrieve version\" return f\"{platform.system()}_{version}", "executed\" version = re.search( # type: ignore r\"(?<=blastall\\s)[0-9\\.]*\", str(result.stderr, \"utf-8\") ).group() if 0", "OTHER DEALINGS IN # THE SOFTWARE. \"\"\"Code to implement the ANIblastall average nucleotide", "Software, and to permit persons to whom the Software is # furnished to", "version as a string. :param blast_exe: path to blastall executable We expect blastall", "# # The MIT License # # Copyright (c) 2021 University of Strathclyde", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE.", "user if not os.access(blastall_path, os.X_OK): # file exists but not executable return f\"blastall", "<NAME>, # Strathclyde Institute for Pharmacy and Biomedical Sciences, # Cathedral Street, #", "this software and associated documentation files (the \"Software\"), to deal # in the", "for (V) [ersion] is bad or out of range [? to ?] This", "os.X_OK): # file exists but not executable return f\"blastall exists at {blastall_path} but", "OSError: logger.warning(\"blastall executable will not run\", exc_info=True) return f\"blastall exists at {blastall_path} but", "We expect blastall to return a string as, for example .. code-block:: bash", "if 0 == len(version.strip()): return f\"blastall exists at {blastall_path} but could not retrieve", "return f\"blastall exists at {blastall_path} but could not be executed\" version = re.search(", "path - non-executable file at passed path (this includes cases where the user", "[blast_exe, \"-version\"] else: cmdline = [blast_exe] try: result = subprocess.run( cmdline, # type:", "# Glasgow, # G4 0RE # Scotland, # UK # # The MIT", "at {blastall_path}\" # This should catch cases when the file can't be executed", "OR OTHER DEALINGS IN # THE SOFTWARE. \"\"\"Code to implement the ANIblastall average", "PyaniblastallException(PyaniException): \"\"\"ANIblastall-specific exception for pyani.\"\"\" def get_version(blast_exe: Path = pyani_config.BLASTALL_DEFAULT) -> str: r\"\"\"Return", "(c) University of Strathclyde 2021 # Author: <NAME> # # Contact: <EMAIL> #", "shall be included in # all copies or substantial portions of the Software.", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "a string. :param blast_exe: path to blastall executable We expect blastall to return", "database sequences to show \\ one-line descriptions for (V) [ersion] is bad or", "blastall_path = Path(shutil.which(blast_exe)) # type:ignore except TypeError: return f\"{blast_exe} is not found in", "granted, free of charge, to any person obtaining a copy # of this", "import pyani_config from . import PyaniException class PyaniblastallException(PyaniException): \"\"\"ANIblastall-specific exception for pyani.\"\"\" def", "circumstances are explicitly reported as strings - no executable at passed path -", "cmdline = [blast_exe, \"-version\"] else: cmdline = [blast_exe] try: result = subprocess.run( cmdline,", "0RE # Scotland, # UK # # The MIT License # # Copyright", "# # Copyright (c) 2021 University of Strathclyde # # Permission is hereby", "and Biomedical Sciences, # Cathedral Street, # Glasgow, # G4 0RE # Scotland,", "permissions on the file) - no version info returned - executable cannot be", "furnished to do so, subject to the following conditions: # # The above", "# Cathedral Street, # Glasgow, # G4 0RE # Scotland, # UK #", "modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,", "executable We expect blastall to return a string as, for example .. code-block::", "# Permission is hereby granted, free of charge, to any person obtaining a", "executable will not run\", exc_info=True) return f\"blastall exists at {blastall_path} but could not", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF", "could not be executed\" version = re.search( # type: ignore r\"(?<=blastall\\s)[0-9\\.]*\", str(result.stderr, \"utf-8\")", "-> str: r\"\"\"Return BLAST blastall version as a string. :param blast_exe: path to", "publish, distribute, sublicense, and/or sell # copies of the Software, and to permit", "exc_info=True) return f\"blastall exists at {blastall_path} but could not be executed\" version =", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "if platform.system() == \"Darwin\": cmdline = [blast_exe, \"-version\"] else: cmdline = [blast_exe] try:", "run on this OS \"\"\" logger = logging.getLogger(__name__) try: blastall_path = Path(shutil.which(blast_exe)) #", "return 0 ) except OSError: logger.warning(\"blastall executable will not run\", exc_info=True) return f\"blastall", "Number of database sequences to show \\ one-line descriptions for (V) [ersion] is", "subprocess from pathlib import Path from . import pyani_config from . import PyaniException", "be included in # all copies or substantial portions of the Software. #", "-*- # (c) University of Strathclyde 2021 # Author: <NAME> # # Contact:", "import subprocess from pathlib import Path from . import pyani_config from . import", "OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION", "nucleotide identity method.\"\"\" import logging import os import platform import re import shutil", "# <NAME>, # Strathclyde Institute for Pharmacy and Biomedical Sciences, # Cathedral Street,", "without restriction, including without limitation the rights # to use, copy, modify, merge,", "not os.access(blastall_path, os.X_OK): # file exists but not executable return f\"blastall exists at", "# type:ignore except TypeError: return f\"{blast_exe} is not found in $PATH\" if not", "Cathedral Street, # Glasgow, # G4 0RE # Scotland, # UK # #", "# Scotland, # UK # # The MIT License # # Copyright (c)", "descriptions for (V) [ersion] is bad or out of range [? to ?]", "passed path - non-executable file at passed path (this includes cases where the", "ANIblastall average nucleotide identity method.\"\"\" import logging import os import platform import re", "in the Software without restriction, including without limitation the rights # to use,", "result = subprocess.run( cmdline, # type: ignore shell=False, stdout=subprocess.PIPE, # type: ignore stderr=subprocess.PIPE,", "type: ignore r\"(?<=blastall\\s)[0-9\\.]*\", str(result.stderr, \"utf-8\") ).group() if 0 == len(version.strip()): return f\"blastall exists", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", "coding: utf-8 -*- # (c) University of Strathclyde 2021 # Author: <NAME> #", "Path(shutil.which(blast_exe)) # type:ignore except TypeError: return f\"{blast_exe} is not found in $PATH\" if", "copies of the Software, and to permit persons to whom the Software is", "this OS \"\"\" logger = logging.getLogger(__name__) try: blastall_path = Path(shutil.which(blast_exe)) # type:ignore except", "class PyaniblastallException(PyaniException): \"\"\"ANIblastall-specific exception for pyani.\"\"\" def get_version(blast_exe: Path = pyani_config.BLASTALL_DEFAULT) -> str:", "a string as, for example .. code-block:: bash $ blastall -version [blastall 2.2.26]", "utf-8 -*- # (c) University of Strathclyde 2021 # Author: <NAME> # #", "{blastall_path} but not executable\" if platform.system() == \"Darwin\": cmdline = [blast_exe, \"-version\"] else:", "= [blast_exe, \"-version\"] else: cmdline = [blast_exe] try: result = subprocess.run( cmdline, #", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR", "Strathclyde Institute for Pharmacy and Biomedical Sciences, # Cathedral Street, # Glasgow, #", "show \\ one-line descriptions for (V) [ersion] is bad or out of range", "- no executable at passed path - non-executable file at passed path (this", "expect blastall to return a string as, for example .. code-block:: bash $", "cmdline = [blast_exe] try: result = subprocess.run( cmdline, # type: ignore shell=False, stdout=subprocess.PIPE,", "\"utf-8\") ).group() if 0 == len(version.strip()): return f\"blastall exists at {blastall_path} but could", "# THE SOFTWARE. \"\"\"Code to implement the ANIblastall average nucleotide identity method.\"\"\" import", "obtaining a copy # of this software and associated documentation files (the \"Software\"),", "for Pharmacy and Biomedical Sciences, # Cathedral Street, # Glasgow, # G4 0RE", "PyaniException class PyaniblastallException(PyaniException): \"\"\"ANIblastall-specific exception for pyani.\"\"\" def get_version(blast_exe: Path = pyani_config.BLASTALL_DEFAULT) ->", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "not run\", exc_info=True) return f\"blastall exists at {blastall_path} but could not be executed\"", "run\", exc_info=True) return f\"blastall exists at {blastall_path} but could not be executed\" version", "Sciences, # Cathedral Street, # Glasgow, # G4 0RE # Scotland, # UK", "\"\"\"Code to implement the ANIblastall average nucleotide identity method.\"\"\" import logging import os", "are explicitly reported as strings - no executable at passed path - non-executable", "ignore stderr=subprocess.PIPE, check=False, # blastall doesn't return 0 ) except OSError: logger.warning(\"blastall executable", "out of range [? to ?] This is concatenated with the OS name.", "and/or sell # copies of the Software, and to permit persons to whom", "cases where the user doesn't have execute permissions on the file) - no", "to return a string as, for example .. code-block:: bash $ blastall -version", "import os import platform import re import shutil import subprocess from pathlib import", "$ blastall -version [blastall 2.2.26] ERROR: Number of database sequences to show \\", "name. The following circumstances are explicitly reported as strings - no executable at", "TypeError: return f\"{blast_exe} is not found in $PATH\" if not blastall_path.is_file(): # no", "# in the Software without restriction, including without limitation the rights # to", "else: cmdline = [blast_exe] try: result = subprocess.run( cmdline, # type: ignore shell=False,", "of range [? to ?] This is concatenated with the OS name. The", "at {blastall_path} but could not be executed\" version = re.search( # type: ignore", "stdout=subprocess.PIPE, # type: ignore stderr=subprocess.PIPE, check=False, # blastall doesn't return 0 ) except", "THE SOFTWARE. \"\"\"Code to implement the ANIblastall average nucleotide identity method.\"\"\" import logging", "as, for example .. code-block:: bash $ blastall -version [blastall 2.2.26] ERROR: Number", "TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE", "# The MIT License # # Copyright (c) 2021 University of Strathclyde #", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN #", "pathlib import Path from . import pyani_config from . import PyaniException class PyaniblastallException(PyaniException):", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", ".. code-block:: bash $ blastall -version [blastall 2.2.26] ERROR: Number of database sequences", "# Copyright (c) 2021 University of Strathclyde # # Permission is hereby granted,", "but not executable\" if platform.system() == \"Darwin\": cmdline = [blast_exe, \"-version\"] else: cmdline", "ignore r\"(?<=blastall\\s)[0-9\\.]*\", str(result.stderr, \"utf-8\") ).group() if 0 == len(version.strip()): return f\"blastall exists at", "the user doesn't have execute permissions on the file) - no version info", "any person obtaining a copy # of this software and associated documentation files", "file) - no version info returned - executable cannot be run on this", "to ?] This is concatenated with the OS name. The following circumstances are", "# # The above copyright notice and this permission notice shall be included", "of Strathclyde # # Permission is hereby granted, free of charge, to any", "identity method.\"\"\" import logging import os import platform import re import shutil import", "\"Software\"), to deal # in the Software without restriction, including without limitation the", "# This should catch cases when the file can't be executed by the", "if not blastall_path.is_file(): # no executable return f\"No blastall at {blastall_path}\" # This", "copyright notice and this permission notice shall be included in # all copies", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "a copy # of this software and associated documentation files (the \"Software\"), to", "deal # in the Software without restriction, including without limitation the rights #", "will not run\", exc_info=True) return f\"blastall exists at {blastall_path} but could not be", "exists at {blastall_path} but could not be executed\" version = re.search( # type:", "following circumstances are explicitly reported as strings - no executable at passed path", "return f\"blastall exists at {blastall_path} but could not retrieve version\" return f\"{platform.system()}_{version} ({blastall_path})\"", "# Strathclyde Institute for Pharmacy and Biomedical Sciences, # Cathedral Street, # Glasgow,", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "(the \"Software\"), to deal # in the Software without restriction, including without limitation", "not be executed\" version = re.search( # type: ignore r\"(?<=blastall\\s)[0-9\\.]*\", str(result.stderr, \"utf-8\") ).group()", "IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "distribute, sublicense, and/or sell # copies of the Software, and to permit persons", "{blastall_path} but could not be executed\" version = re.search( # type: ignore r\"(?<=blastall\\s)[0-9\\.]*\",", "The following circumstances are explicitly reported as strings - no executable at passed", "(V) [ersion] is bad or out of range [? to ?] This is", "This should catch cases when the file can't be executed by the user", "charge, to any person obtaining a copy # of this software and associated", "when the file can't be executed by the user if not os.access(blastall_path, os.X_OK):", "import Path from . import pyani_config from . import PyaniException class PyaniblastallException(PyaniException): \"\"\"ANIblastall-specific", "Author: <NAME> # # Contact: <EMAIL> # # <NAME>, # Strathclyde Institute for", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "# Contact: <EMAIL> # # <NAME>, # Strathclyde Institute for Pharmacy and Biomedical", "DEALINGS IN # THE SOFTWARE. \"\"\"Code to implement the ANIblastall average nucleotide identity", "but not executable return f\"blastall exists at {blastall_path} but not executable\" if platform.system()", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "average nucleotide identity method.\"\"\" import logging import os import platform import re import", "= pyani_config.BLASTALL_DEFAULT) -> str: r\"\"\"Return BLAST blastall version as a string. :param blast_exe:", "non-executable file at passed path (this includes cases where the user doesn't have", "ignore shell=False, stdout=subprocess.PIPE, # type: ignore stderr=subprocess.PIPE, check=False, # blastall doesn't return 0", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "to whom the Software is # furnished to do so, subject to the", "limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or", "not found in $PATH\" if not blastall_path.is_file(): # no executable return f\"No blastall", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "f\"{blast_exe} is not found in $PATH\" if not blastall_path.is_file(): # no executable return", "Pharmacy and Biomedical Sciences, # Cathedral Street, # Glasgow, # G4 0RE #", "logging.getLogger(__name__) try: blastall_path = Path(shutil.which(blast_exe)) # type:ignore except TypeError: return f\"{blast_exe} is not", "# no executable return f\"No blastall at {blastall_path}\" # This should catch cases", "Copyright (c) 2021 University of Strathclyde # # Permission is hereby granted, free", "BLAST blastall version as a string. :param blast_exe: path to blastall executable We", "explicitly reported as strings - no executable at passed path - non-executable file", "copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED", "# blastall doesn't return 0 ) except OSError: logger.warning(\"blastall executable will not run\",", "-*- coding: utf-8 -*- # (c) University of Strathclyde 2021 # Author: <NAME>", "path (this includes cases where the user doesn't have execute permissions on the", "executed by the user if not os.access(blastall_path, os.X_OK): # file exists but not", "portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "do so, subject to the following conditions: # # The above copyright notice", "[? to ?] This is concatenated with the OS name. The following circumstances", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "Strathclyde 2021 # Author: <NAME> # # Contact: <EMAIL> # # <NAME>, #", "permit persons to whom the Software is # furnished to do so, subject", "Permission is hereby granted, free of charge, to any person obtaining a copy", "executable\" if platform.system() == \"Darwin\": cmdline = [blast_exe, \"-version\"] else: cmdline = [blast_exe]", "2.2.26] ERROR: Number of database sequences to show \\ one-line descriptions for (V)", "be executed by the user if not os.access(blastall_path, os.X_OK): # file exists but", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "if not os.access(blastall_path, os.X_OK): # file exists but not executable return f\"blastall exists", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "Software without restriction, including without limitation the rights # to use, copy, modify,", "Strathclyde # # Permission is hereby granted, free of charge, to any person", "# The above copyright notice and this permission notice shall be included in", "# of this software and associated documentation files (the \"Software\"), to deal #", "OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "strings - no executable at passed path - non-executable file at passed path", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE", "sell # copies of the Software, and to permit persons to whom the", "substantial portions of the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "cases when the file can't be executed by the user if not os.access(blastall_path,", "= subprocess.run( cmdline, # type: ignore shell=False, stdout=subprocess.PIPE, # type: ignore stderr=subprocess.PIPE, check=False,", "import shutil import subprocess from pathlib import Path from . import pyani_config from", "# all copies or substantial portions of the Software. # # THE SOFTWARE", "[ersion] is bad or out of range [? to ?] This is concatenated", "restriction, including without limitation the rights # to use, copy, modify, merge, publish,", "returned - executable cannot be run on this OS \"\"\" logger = logging.getLogger(__name__)", "# type: ignore r\"(?<=blastall\\s)[0-9\\.]*\", str(result.stderr, \"utf-8\") ).group() if 0 == len(version.strip()): return f\"blastall", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS", "# # Permission is hereby granted, free of charge, to any person obtaining", "all copies or substantial portions of the Software. # # THE SOFTWARE IS", "exception for pyani.\"\"\" def get_version(blast_exe: Path = pyani_config.BLASTALL_DEFAULT) -> str: r\"\"\"Return BLAST blastall", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "executable at passed path - non-executable file at passed path (this includes cases", "version info returned - executable cannot be run on this OS \"\"\" logger", "path to blastall executable We expect blastall to return a string as, for", "cmdline, # type: ignore shell=False, stdout=subprocess.PIPE, # type: ignore stderr=subprocess.PIPE, check=False, # blastall", "logger.warning(\"blastall executable will not run\", exc_info=True) return f\"blastall exists at {blastall_path} but could", "== len(version.strip()): return f\"blastall exists at {blastall_path} but could not retrieve version\" return", "FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "platform.system() == \"Darwin\": cmdline = [blast_exe, \"-version\"] else: cmdline = [blast_exe] try: result", "files (the \"Software\"), to deal # in the Software without restriction, including without", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "range [? to ?] This is concatenated with the OS name. The following", ". import PyaniException class PyaniblastallException(PyaniException): \"\"\"ANIblastall-specific exception for pyani.\"\"\" def get_version(blast_exe: Path =", "passed path (this includes cases where the user doesn't have execute permissions on", "Contact: <EMAIL> # # <NAME>, # Strathclyde Institute for Pharmacy and Biomedical Sciences,", "executable cannot be run on this OS \"\"\" logger = logging.getLogger(__name__) try: blastall_path", "the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell", "following conditions: # # The above copyright notice and this permission notice shall", "of the Software, and to permit persons to whom the Software is #", "no version info returned - executable cannot be run on this OS \"\"\"", "<NAME> # # Contact: <EMAIL> # # <NAME>, # Strathclyde Institute for Pharmacy", "ERROR: Number of database sequences to show \\ one-line descriptions for (V) [ersion]", "def get_version(blast_exe: Path = pyani_config.BLASTALL_DEFAULT) -> str: r\"\"\"Return BLAST blastall version as a", "The above copyright notice and this permission notice shall be included in #", "reported as strings - no executable at passed path - non-executable file at", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "for pyani.\"\"\" def get_version(blast_exe: Path = pyani_config.BLASTALL_DEFAULT) -> str: r\"\"\"Return BLAST blastall version", "doesn't have execute permissions on the file) - no version info returned -", "exists at {blastall_path} but not executable\" if platform.system() == \"Darwin\": cmdline = [blast_exe,", "exists but not executable return f\"blastall exists at {blastall_path} but not executable\" if", ". import pyani_config from . import PyaniException class PyaniblastallException(PyaniException): \"\"\"ANIblastall-specific exception for pyani.\"\"\"", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "blastall doesn't return 0 ) except OSError: logger.warning(\"blastall executable will not run\", exc_info=True)", "to show \\ one-line descriptions for (V) [ersion] is bad or out of", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "method.\"\"\" import logging import os import platform import re import shutil import subprocess", "including without limitation the rights # to use, copy, modify, merge, publish, distribute,", "except TypeError: return f\"{blast_exe} is not found in $PATH\" if not blastall_path.is_file(): #", "str: r\"\"\"Return BLAST blastall version as a string. :param blast_exe: path to blastall", "file can't be executed by the user if not os.access(blastall_path, os.X_OK): # file", "info returned - executable cannot be run on this OS \"\"\" logger =", "platform import re import shutil import subprocess from pathlib import Path from .", "at passed path - non-executable file at passed path (this includes cases where", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "\"\"\" logger = logging.getLogger(__name__) try: blastall_path = Path(shutil.which(blast_exe)) # type:ignore except TypeError: return", "no executable return f\"No blastall at {blastall_path}\" # This should catch cases when", "associated documentation files (the \"Software\"), to deal # in the Software without restriction,", "University of Strathclyde # # Permission is hereby granted, free of charge, to", "OS \"\"\" logger = logging.getLogger(__name__) try: blastall_path = Path(shutil.which(blast_exe)) # type:ignore except TypeError:", "hereby granted, free of charge, to any person obtaining a copy # of", "of this software and associated documentation files (the \"Software\"), to deal # in", "$PATH\" if not blastall_path.is_file(): # no executable return f\"No blastall at {blastall_path}\" #", "University of Strathclyde 2021 # Author: <NAME> # # Contact: <EMAIL> # #", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "type: ignore stderr=subprocess.PIPE, check=False, # blastall doesn't return 0 ) except OSError: logger.warning(\"blastall", "OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. \"\"\"Code to implement", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "Path from . import pyani_config from . import PyaniException class PyaniblastallException(PyaniException): \"\"\"ANIblastall-specific exception", "executable return f\"blastall exists at {blastall_path} but not executable\" if platform.system() == \"Darwin\":", "notice shall be included in # all copies or substantial portions of the", "sequences to show \\ one-line descriptions for (V) [ersion] is bad or out", "The MIT License # # Copyright (c) 2021 University of Strathclyde # #", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "# UK # # The MIT License # # Copyright (c) 2021 University", "2021 # Author: <NAME> # # Contact: <EMAIL> # # <NAME>, # Strathclyde", "one-line descriptions for (V) [ersion] is bad or out of range [? to", ":param blast_exe: path to blastall executable We expect blastall to return a string", "\"-version\"] else: cmdline = [blast_exe] try: result = subprocess.run( cmdline, # type: ignore", "to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of", "blastall_path.is_file(): # no executable return f\"No blastall at {blastall_path}\" # This should catch", "logging import os import platform import re import shutil import subprocess from pathlib", "subject to the following conditions: # # The above copyright notice and this", "# type: ignore shell=False, stdout=subprocess.PIPE, # type: ignore stderr=subprocess.PIPE, check=False, # blastall doesn't" ]
[ "on which to plot X[:,p] should be the base point for the p^{th}", "the p^{th} point X[2,:] is shown as a surface over X[1,:] and X[2,:]", "if X.shape[0]==2: ax.scatter(X[0,:],X[1,:], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.scatter(X[0,:],X[1,:],X[2,:],*args, **kwargs) def", "Points are handled in matrix columns rather than separate arguments for separate coordinates.", "**kwargs): \"\"\" Scatter-plot 2d or 3d points in numpy.array X ax should be", "matplotlib.Axes (or Axes3D) on which to plot X[:,p] should be the base point", "odd X[:,p] should be the p^{th} point to plot lims[n,0] and lims[n,1] are", "elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.scatter(X[0,:],X[1,:],X[2,:],*args, **kwargs) def text(ax, X, strs, *args, **kwargs):", "the n^{th} grid point sampled \"\"\" G = np.mgrid[tuple(slice(mins[d],maxes[d],(samp*1j)) for d in range(len(mins)))]", "X.shape[0]==2: ax.scatter(X[0,:],X[1,:], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.scatter(X[0,:],X[1,:],X[2,:],*args, **kwargs) def text(ax,", "limits at once. ax is the matplotlib.Axes (or Axes3D) on which to plot", "set_lims(ax, lims): \"\"\" Set all 2d or 3d plot limits at once. ax", "as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D def plot(ax, X,", "plotNd(X, lims, *args): \"\"\" Plot Nd points in numpy.array X Every two dimensions", "args and kwargs should be as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.plot(X[0,:],X[1,:], *args,", "is shown as a surface over X[1,:] and X[2,:] args and kwargs should", "= int(X.shape[0]/2); num_rows = np.floor(np.sqrt(num_subplots)) num_cols = np.ceil(num_subplots/num_rows) for subplot in range(num_subplots): ax", "range(X.shape[1]): if X.shape[0]==2: ax.text(X[0,j],X[1,j], strs[j], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.text(X[0,j],X[1,j],X[2,j],", "N odd X[:,p] should be the p^{th} point to plot lims[n,0] and lims[n,1]", "dimension args should be as in matplotlib.Axes.plot \"\"\" num_subplots = int(X.shape[0]/2); num_rows =", "is the p^{th} point X[2,:] is shown as a surface over X[1,:] and", "ax is the matplotlib.Axes (or Axes3D) on which to plot lims[0,:] are xlims,", "\"\"\" for j in range(X.shape[1]): if X.shape[0]==2: ax.text(X[0,j],X[1,j], strs[j], *args, **kwargs) elif X.shape[0]==3:", "and high plot limits for the n^{th} dimension args should be as in", "\"\"\" num_subplots = int(X.shape[0]/2); num_rows = np.floor(np.sqrt(num_subplots)) num_cols = np.ceil(num_subplots/num_rows) for subplot in", "p^{th} point to plot lims[n,0] and lims[n,1] are low and high plot limits", "import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D def plot(ax, X, *args, **kwargs):", "to plot strs[p] should be the p^{th} string to plot args and kwargs", "X.shape[0]==2: ax.plot(X[0,:],X[1,:], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.plot(X[0,:],X[1,:],X[2,:],*args, **kwargs) def scatter(ax,", "columns rather than separate arguments for separate coordinates. \"\"\" import numpy as np", "ax.set_ylim(lims[1,:]) if len(lims)>2: ax.set_zlim(lims[2,:]) def lattice(mins, maxes, samp): \"\"\" Samples Nd points on", "is the n^{th} grid point sampled \"\"\" G = np.mgrid[tuple(slice(mins[d],maxes[d],(samp*1j)) for d in", "be the matplotlib.Axes (or Axes3D) on which to plot X[:,p] should be the", "i^{th} dimension samp is the number of points to sample in each dimension", "and kwargs should be as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.plot(X[0,:],X[1,:], *args, **kwargs)", "matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D def plot(ax, X, *args, **kwargs): \"\"\"", "as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.plot(X[0,:],X[1,:], *args, **kwargs) elif X.shape[0]==3: #ax =", "separate subplot The last dimension is omitted when N odd X[:,p] should be", "text at 2d or 3d points in numpy.array X ax should be the", "string to plot args and kwargs should be as in matplotlib.Axes.plot \"\"\" for", "*args, **kwargs): \"\"\" Scatter-plot 2d or 3d points in numpy.array X ax should", "(or Axes3D) on which to plot X[:,p] should be the p^{th} point at", "in G]) return G def plot_trisurf(ax, X, *args, **kwargs): \"\"\" Plots points in", "if X.shape[0]==2: ax.text(X[0,j],X[1,j], strs[j], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.text(X[0,j],X[1,j],X[2,j], strs[j],", "*args, **kwargs) def quiver(ax, X, U, *args, **kwargs): \"\"\" Plot 2d or 3d", "where G[:,n] is the n^{th} grid point sampled \"\"\" G = np.mgrid[tuple(slice(mins[d],maxes[d],(samp*1j)) for", "and U. ax should be the matplotlib.Axes (or Axes3D) on which to plot", "plot args and kwargs should be as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.quiver(X[0,:],X[1,:],U[0,:],U[1,:],", "ax.scatter(X[0,:],X[1,:],X[2,:],*args, **kwargs) def text(ax, X, strs, *args, **kwargs): \"\"\" Plot text at 2d", "X.shape[0]==2: ax.quiver(X[0,:],X[1,:],U[0,:],U[1,:], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.quiver(X[0,:],X[1,:],X[2,:],U[0,:],U[1,:],U[2,:],*args, **kwargs) def plotNd(X,", "all 2d or 3d plot limits at once. ax is the matplotlib.Axes (or", "*args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.text(X[0,j],X[1,j],X[2,j], strs[j], *args, **kwargs) def quiver(ax,", "matplotlib.Axes.plot \"\"\" num_subplots = int(X.shape[0]/2); num_rows = np.floor(np.sqrt(num_subplots)) num_cols = np.ceil(num_subplots/num_rows) for subplot", "Nd points on a regularly spaced grid mins[i], maxes[i] are the grid extents", "= plt.gca(projection=\"3d\") ax.quiver(X[0,:],X[1,:],X[2,:],U[0,:],U[1,:],U[2,:],*args, **kwargs) def plotNd(X, lims, *args): \"\"\" Plot Nd points in", "samp): \"\"\" Samples Nd points on a regularly spaced grid mins[i], maxes[i] are", "**kwargs): \"\"\" Plot text at 2d or 3d points in numpy.array X ax", "numpy.arrays X and U. ax should be the matplotlib.Axes (or Axes3D) on which", "kwargs should be as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.quiver(X[0,:],X[1,:],U[0,:],U[1,:], *args, **kwargs) elif", "if X.shape[0]==2: ax.quiver(X[0,:],X[1,:],U[0,:],U[1,:], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.quiver(X[0,:],X[1,:],X[2,:],U[0,:],U[1,:],U[2,:],*args, **kwargs) def", "plot X[:,p] should be the base point for the p^{th} vector U[:,p] should", "the p^{th} string to plot args and kwargs should be as in matplotlib.Axes.plot", "d in range(len(mins)))] G = np.array([g.flatten() for g in G]) return G def", "a surface over X[1,:] and X[2,:] args and kwargs should be as in", "for g in G]) return G def plot_trisurf(ax, X, *args, **kwargs): \"\"\" Plots", "\"\"\" Samples Nd points on a regularly spaced grid mins[i], maxes[i] are the", "matrix columns rather than separate arguments for separate coordinates. \"\"\" import numpy as", "dimension Returns numpy.array G, where G[:,n] is the n^{th} grid point sampled \"\"\"", "**kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.text(X[0,j],X[1,j],X[2,j], strs[j], *args, **kwargs) def quiver(ax, X,", "= np.ceil(num_subplots/num_rows) for subplot in range(num_subplots): ax = plt.subplot(num_rows, num_cols, subplot+1) ax.plot(X[2*subplot,:], X[2*subplot+1,:],", "X[:,p] should be the base point for the p^{th} vector U[:,p] should be", "around matplotlib plotting functions. Points are handled in matrix columns rather than separate", "G]) return G def plot_trisurf(ax, X, *args, **kwargs): \"\"\" Plots points in numpy.array", "j in range(X.shape[1]): if X.shape[0]==2: ax.text(X[0,j],X[1,j], strs[j], *args, **kwargs) elif X.shape[0]==3: #ax =", "regularly spaced grid mins[i], maxes[i] are the grid extents in the i^{th} dimension", "#ax = plt.gca(projection=\"3d\") ax.quiver(X[0,:],X[1,:],X[2,:],U[0,:],U[1,:],U[2,:],*args, **kwargs) def plotNd(X, lims, *args): \"\"\" Plot Nd points", "elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.text(X[0,j],X[1,j],X[2,j], strs[j], *args, **kwargs) def quiver(ax, X, U,", "plot args and kwargs should be as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.scatter(X[0,:],X[1,:],", "Plot 2d or 3d points in numpy.array X ax should be the matplotlib.Axes", "numpy.array G, where G[:,n] is the n^{th} grid point sampled \"\"\" G =", "num_subplots = int(X.shape[0]/2); num_rows = np.floor(np.sqrt(num_subplots)) num_cols = np.ceil(num_subplots/num_rows) for subplot in range(num_subplots):", "field in numpy.arrays X and U. ax should be the matplotlib.Axes (or Axes3D)", "plot args and kwargs should be as in matplotlib.Axes.plot \"\"\" for j in", "X, *args, **kwargs): \"\"\" Scatter-plot 2d or 3d points in numpy.array X ax", "*args, **kwargs): \"\"\" Plot text at 2d or 3d points in numpy.array X", "the p^{th} point to plot args and kwargs should be as in matplotlib.Axes.plot", "should be the p^{th} vector to plot args and kwargs should be as", "\"\"\" Convenience wrappers around matplotlib plotting functions. Points are handled in matrix columns", "points in numpy.array X Every two dimensions are shown on a separate subplot", "lims[n,1] are low and high plot limits for the n^{th} dimension args should", "for the p^{th} vector U[:,p] should be the p^{th} vector to plot args", "p^{th} vector U[:,p] should be the p^{th} vector to plot args and kwargs", "on which to plot X[:,p] is the p^{th} point X[2,:] is shown as", "should be the matplotlib.Axes (or Axes3D) on which to plot X[:,p] should be", "be the p^{th} point to plot lims[n,0] and lims[n,1] are low and high", "as plt from mpl_toolkits.mplot3d import Axes3D def plot(ax, X, *args, **kwargs): \"\"\" Plot", "should be as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.scatter(X[0,:],X[1,:], *args, **kwargs) elif X.shape[0]==3:", "scatter(ax, X, *args, **kwargs): \"\"\" Scatter-plot 2d or 3d points in numpy.array X", "on which to plot lims[0,:] are xlims, etc. \"\"\" ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) if len(lims)>2:", "over X[1,:] and X[2,:] args and kwargs should be as in matplotlib.Axes3D.plot_trisurf \"\"\"", "vector to plot args and kwargs should be as in matplotlib.Axes.plot \"\"\" if", "base point for the p^{th} vector U[:,p] should be the p^{th} vector to", "two dimensions are shown on a separate subplot The last dimension is omitted", "X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.text(X[0,j],X[1,j],X[2,j], strs[j], *args, **kwargs) def quiver(ax, X, U, *args,", "Scatter-plot 2d or 3d points in numpy.array X ax should be the matplotlib.Axes", "**kwargs): \"\"\" Plot 2d or 3d vector field in numpy.arrays X and U.", "g in G]) return G def plot_trisurf(ax, X, *args, **kwargs): \"\"\" Plots points", "G, where G[:,n] is the n^{th} grid point sampled \"\"\" G = np.mgrid[tuple(slice(mins[d],maxes[d],(samp*1j))", "be as in matplotlib.Axes.plot \"\"\" for j in range(X.shape[1]): if X.shape[0]==2: ax.text(X[0,j],X[1,j], strs[j],", "on which to plot X[:,p] should be the p^{th} point at which to", "kwargs should be as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.scatter(X[0,:],X[1,:], *args, **kwargs) elif", "\"\"\" import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D", "shown on a separate subplot The last dimension is omitted when N odd", "3d vector field in numpy.arrays X and U. ax should be the matplotlib.Axes", "should be as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.plot(X[0,:],X[1,:], *args, **kwargs) elif X.shape[0]==3:", "**kwargs) def plotNd(X, lims, *args): \"\"\" Plot Nd points in numpy.array X Every", "\"\"\" G = np.mgrid[tuple(slice(mins[d],maxes[d],(samp*1j)) for d in range(len(mins)))] G = np.array([g.flatten() for g", "Returns numpy.array G, where G[:,n] is the n^{th} grid point sampled \"\"\" G", "\"\"\" if X.shape[0]==2: ax.plot(X[0,:],X[1,:], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.plot(X[0,:],X[1,:],X[2,:],*args, **kwargs)", "X, strs, *args, **kwargs): \"\"\" Plot text at 2d or 3d points in", "int(X.shape[0]/2); num_rows = np.floor(np.sqrt(num_subplots)) num_cols = np.ceil(num_subplots/num_rows) for subplot in range(num_subplots): ax =", "p^{th} point X[2,:] is shown as a surface over X[1,:] and X[2,:] args", "X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.plot(X[0,:],X[1,:],X[2,:],*args, **kwargs) def scatter(ax, X, *args, **kwargs): \"\"\" Scatter-plot", "be as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.scatter(X[0,:],X[1,:], *args, **kwargs) elif X.shape[0]==3: #ax", "to plot X[:,p] should be the p^{th} point at which to plot strs[p]", "matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.quiver(X[0,:],X[1,:],U[0,:],U[1,:], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.quiver(X[0,:],X[1,:],X[2,:],U[0,:],U[1,:],U[2,:],*args,", "the number of points to sample in each dimension Returns numpy.array G, where", "X, *args, **kwargs): \"\"\" Plot 2d or 3d points in numpy.array X ax", "in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.scatter(X[0,:],X[1,:], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\")", "*args, **kwargs): \"\"\" Plot 2d or 3d points in numpy.array X ax should", "= np.array([g.flatten() for g in G]) return G def plot_trisurf(ax, X, *args, **kwargs):", "\"\"\" Plots points in numpy.array X as a surface. ax is the matplotlib.Axes3D", "\"\"\" Plot 2d or 3d vector field in numpy.arrays X and U. ax", "coordinates. \"\"\" import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import", "matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.scatter(X[0,:],X[1,:], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.scatter(X[0,:],X[1,:],X[2,:],*args,", "plot X[:,p] is the p^{th} point X[2,:] is shown as a surface over", "np.floor(np.sqrt(num_subplots)) num_cols = np.ceil(num_subplots/num_rows) for subplot in range(num_subplots): ax = plt.subplot(num_rows, num_cols, subplot+1)", "np.array([g.flatten() for g in G]) return G def plot_trisurf(ax, X, *args, **kwargs): \"\"\"", "a surface. ax is the matplotlib.Axes3D on which to plot X[:,p] is the", "to plot X[:,p] is the p^{th} point X[2,:] is shown as a surface", "args and kwargs should be as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.quiver(X[0,:],X[1,:],U[0,:],U[1,:], *args,", "\"\"\" ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) if len(lims)>2: ax.set_zlim(lims[2,:]) def lattice(mins, maxes, samp): \"\"\" Samples Nd", "rather than separate arguments for separate coordinates. \"\"\" import numpy as np import", "than separate arguments for separate coordinates. \"\"\" import numpy as np import matplotlib.pyplot", "strs[j], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.text(X[0,j],X[1,j],X[2,j], strs[j], *args, **kwargs) def", "functions. Points are handled in matrix columns rather than separate arguments for separate", "**kwargs) def scatter(ax, X, *args, **kwargs): \"\"\" Scatter-plot 2d or 3d points in", "(or Axes3D) on which to plot lims[0,:] are xlims, etc. \"\"\" ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:])", "text(ax, X, strs, *args, **kwargs): \"\"\" Plot text at 2d or 3d points", "should be the p^{th} string to plot args and kwargs should be as", "**kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.quiver(X[0,:],X[1,:],X[2,:],U[0,:],U[1,:],U[2,:],*args, **kwargs) def plotNd(X, lims, *args): \"\"\"", "ax.quiver(X[0,:],X[1,:],X[2,:],U[0,:],U[1,:],U[2,:],*args, **kwargs) def plotNd(X, lims, *args): \"\"\" Plot Nd points in numpy.array X", "in numpy.arrays X and U. ax should be the matplotlib.Axes (or Axes3D) on", "are low and high plot limits for the n^{th} dimension args should be", "num_rows = np.floor(np.sqrt(num_subplots)) num_cols = np.ceil(num_subplots/num_rows) for subplot in range(num_subplots): ax = plt.subplot(num_rows,", "X ax should be the matplotlib.Axes (or Axes3D) on which to plot X[:,p]", "which to plot X[:,p] should be the base point for the p^{th} vector", "be as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.plot(X[0,:],X[1,:], *args, **kwargs) elif X.shape[0]==3: #ax", "subplot The last dimension is omitted when N odd X[:,p] should be the", "(or Axes3D) on which to plot X[:,p] should be the base point for", "Set all 2d or 3d plot limits at once. ax is the matplotlib.Axes", "limits for the n^{th} dimension args should be as in matplotlib.Axes.plot \"\"\" num_subplots", "as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.scatter(X[0,:],X[1,:], *args, **kwargs) elif X.shape[0]==3: #ax =", "matplotlib.Axes3D on which to plot X[:,p] is the p^{th} point X[2,:] is shown", "p^{th} vector to plot args and kwargs should be as in matplotlib.Axes.plot \"\"\"", "separate coordinates. \"\"\" import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d", "= plt.gca(projection=\"3d\") ax.scatter(X[0,:],X[1,:],X[2,:],*args, **kwargs) def text(ax, X, strs, *args, **kwargs): \"\"\" Plot text", "2d or 3d vector field in numpy.arrays X and U. ax should be", "import Axes3D def plot(ax, X, *args, **kwargs): \"\"\" Plot 2d or 3d points", "be the base point for the p^{th} vector U[:,p] should be the p^{th}", "the p^{th} point to plot lims[n,0] and lims[n,1] are low and high plot", "**kwargs): \"\"\" Plots points in numpy.array X as a surface. ax is the", "extents in the i^{th} dimension samp is the number of points to sample", "**kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.scatter(X[0,:],X[1,:],X[2,:],*args, **kwargs) def text(ax, X, strs, *args,", "X[:,p] should be the p^{th} point to plot lims[n,0] and lims[n,1] are low", "which to plot X[:,p] is the p^{th} point X[2,:] is shown as a", "or 3d plot limits at once. ax is the matplotlib.Axes (or Axes3D) on", "Nd points in numpy.array X Every two dimensions are shown on a separate", "points in numpy.array X as a surface. ax is the matplotlib.Axes3D on which", "lims[n,0] and lims[n,1] are low and high plot limits for the n^{th} dimension", "ax is the matplotlib.Axes3D on which to plot X[:,p] is the p^{th} point", "2d or 3d plot limits at once. ax is the matplotlib.Axes (or Axes3D)", "ax.quiver(X[0,:],X[1,:],U[0,:],U[1,:], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.quiver(X[0,:],X[1,:],X[2,:],U[0,:],U[1,:],U[2,:],*args, **kwargs) def plotNd(X, lims,", "should be the p^{th} point at which to plot strs[p] should be the", "\"\"\" if X.shape[0]==2: ax.quiver(X[0,:],X[1,:],U[0,:],U[1,:], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.quiver(X[0,:],X[1,:],X[2,:],U[0,:],U[1,:],U[2,:],*args, **kwargs)", "low and high plot limits for the n^{th} dimension args should be as", "in range(num_subplots): ax = plt.subplot(num_rows, num_cols, subplot+1) ax.plot(X[2*subplot,:], X[2*subplot+1,:], *args) ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) def", "which to plot lims[0,:] are xlims, etc. \"\"\" ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) if len(lims)>2: ax.set_zlim(lims[2,:])", "point at which to plot strs[p] should be the p^{th} string to plot", "ax.scatter(X[0,:],X[1,:], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.scatter(X[0,:],X[1,:],X[2,:],*args, **kwargs) def text(ax, X,", "plot limits at once. ax is the matplotlib.Axes (or Axes3D) on which to", "num_cols = np.ceil(num_subplots/num_rows) for subplot in range(num_subplots): ax = plt.subplot(num_rows, num_cols, subplot+1) ax.plot(X[2*subplot,:],", "are the grid extents in the i^{th} dimension samp is the number of", "matplotlib.Axes (or Axes3D) on which to plot lims[0,:] are xlims, etc. \"\"\" ax.set_xlim(lims[0,:])", "*args): \"\"\" Plot Nd points in numpy.array X Every two dimensions are shown", "when N odd X[:,p] should be the p^{th} point to plot lims[n,0] and", "for d in range(len(mins)))] G = np.array([g.flatten() for g in G]) return G", "= plt.gca(projection=\"3d\") ax.plot(X[0,:],X[1,:],X[2,:],*args, **kwargs) def scatter(ax, X, *args, **kwargs): \"\"\" Scatter-plot 2d or", "The last dimension is omitted when N odd X[:,p] should be the p^{th}", "*args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.plot(X[0,:],X[1,:],X[2,:],*args, **kwargs) def scatter(ax, X, *args,", "numpy.array X as a surface. ax is the matplotlib.Axes3D on which to plot", "grid extents in the i^{th} dimension samp is the number of points to", "the base point for the p^{th} vector U[:,p] should be the p^{th} vector", "or 3d points in numpy.array X ax should be the matplotlib.Axes (or Axes3D)", "number of points to sample in each dimension Returns numpy.array G, where G[:,n]", "vector field in numpy.arrays X and U. ax should be the matplotlib.Axes (or", "ax.set_zlim(lims[2,:]) def lattice(mins, maxes, samp): \"\"\" Samples Nd points on a regularly spaced", "ax should be the matplotlib.Axes (or Axes3D) on which to plot X[:,p] should", "#ax = plt.gca(projection=\"3d\") ax.text(X[0,j],X[1,j],X[2,j], strs[j], *args, **kwargs) def quiver(ax, X, U, *args, **kwargs):", "plot limits for the n^{th} dimension args should be as in matplotlib.Axes.plot \"\"\"", "to plot lims[0,:] are xlims, etc. \"\"\" ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) if len(lims)>2: ax.set_zlim(lims[2,:]) def", "the i^{th} dimension samp is the number of points to sample in each", "the p^{th} vector to plot args and kwargs should be as in matplotlib.Axes.plot", "for the n^{th} dimension args should be as in matplotlib.Axes.plot \"\"\" num_subplots =", "= np.mgrid[tuple(slice(mins[d],maxes[d],(samp*1j)) for d in range(len(mins)))] G = np.array([g.flatten() for g in G])", "matplotlib plotting functions. Points are handled in matrix columns rather than separate arguments", "is the matplotlib.Axes (or Axes3D) on which to plot lims[0,:] are xlims, etc.", "in matrix columns rather than separate arguments for separate coordinates. \"\"\" import numpy", "and kwargs should be as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.quiver(X[0,:],X[1,:],U[0,:],U[1,:], *args, **kwargs)", "from mpl_toolkits.mplot3d import Axes3D def plot(ax, X, *args, **kwargs): \"\"\" Plot 2d or", "**kwargs) def text(ax, X, strs, *args, **kwargs): \"\"\" Plot text at 2d or", "arguments for separate coordinates. \"\"\" import numpy as np import matplotlib.pyplot as plt", "\"\"\" Plot text at 2d or 3d points in numpy.array X ax should", "ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) def set_lims(ax, lims): \"\"\" Set all 2d or 3d plot limits", "U, *args, **kwargs): \"\"\" Plot 2d or 3d vector field in numpy.arrays X", "in each dimension Returns numpy.array G, where G[:,n] is the n^{th} grid point", "#ax = plt.gca(projection=\"3d\") ax.scatter(X[0,:],X[1,:],X[2,:],*args, **kwargs) def text(ax, X, strs, *args, **kwargs): \"\"\" Plot", "the grid extents in the i^{th} dimension samp is the number of points", "kwargs should be as in matplotlib.Axes.plot \"\"\" for j in range(X.shape[1]): if X.shape[0]==2:", "Axes3D) on which to plot X[:,p] should be the p^{th} point to plot", "np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D def plot(ax, X, *args,", "ax.plot(X[2*subplot,:], X[2*subplot+1,:], *args) ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) def set_lims(ax, lims): \"\"\" Set all 2d or", "3d plot limits at once. ax is the matplotlib.Axes (or Axes3D) on which", "X as a surface. ax is the matplotlib.Axes3D on which to plot X[:,p]", "is the number of points to sample in each dimension Returns numpy.array G,", "the p^{th} point at which to plot strs[p] should be the p^{th} string", "X.shape[0]==2: ax.text(X[0,j],X[1,j], strs[j], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.text(X[0,j],X[1,j],X[2,j], strs[j], *args,", "should be the p^{th} point to plot lims[n,0] and lims[n,1] are low and", "high plot limits for the n^{th} dimension args should be as in matplotlib.Axes.plot", "point X[2,:] is shown as a surface over X[1,:] and X[2,:] args and", "def lattice(mins, maxes, samp): \"\"\" Samples Nd points on a regularly spaced grid", "be the p^{th} point to plot args and kwargs should be as in", "plot lims[0,:] are xlims, etc. \"\"\" ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) if len(lims)>2: ax.set_zlim(lims[2,:]) def lattice(mins,", "plt.subplot(num_rows, num_cols, subplot+1) ax.plot(X[2*subplot,:], X[2*subplot+1,:], *args) ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) def set_lims(ax, lims): \"\"\" Set", "on which to plot X[:,p] should be the p^{th} point to plot args", "\"\"\" Plot 2d or 3d points in numpy.array X ax should be the", "*args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.quiver(X[0,:],X[1,:],X[2,:],U[0,:],U[1,:],U[2,:],*args, **kwargs) def plotNd(X, lims, *args):", "should be the base point for the p^{th} vector U[:,p] should be the", "the matplotlib.Axes (or Axes3D) on which to plot lims[0,:] are xlims, etc. \"\"\"", "len(lims)>2: ax.set_zlim(lims[2,:]) def lattice(mins, maxes, samp): \"\"\" Samples Nd points on a regularly", "= np.floor(np.sqrt(num_subplots)) num_cols = np.ceil(num_subplots/num_rows) for subplot in range(num_subplots): ax = plt.subplot(num_rows, num_cols,", "X[:,p] should be the p^{th} point to plot args and kwargs should be", "= plt.gca(projection=\"3d\") ax.text(X[0,j],X[1,j],X[2,j], strs[j], *args, **kwargs) def quiver(ax, X, U, *args, **kwargs): \"\"\"", "for j in range(X.shape[1]): if X.shape[0]==2: ax.text(X[0,j],X[1,j], strs[j], *args, **kwargs) elif X.shape[0]==3: #ax", "dimensions are shown on a separate subplot The last dimension is omitted when", "def set_lims(ax, lims): \"\"\" Set all 2d or 3d plot limits at once.", "maxes, samp): \"\"\" Samples Nd points on a regularly spaced grid mins[i], maxes[i]", "for separate coordinates. \"\"\" import numpy as np import matplotlib.pyplot as plt from", "at once. ax is the matplotlib.Axes (or Axes3D) on which to plot lims[0,:]", "and kwargs should be as in matplotlib.Axes.plot \"\"\" for j in range(X.shape[1]): if", "be the p^{th} vector to plot args and kwargs should be as in", "points to sample in each dimension Returns numpy.array G, where G[:,n] is the", "subplot+1) ax.plot(X[2*subplot,:], X[2*subplot+1,:], *args) ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) def set_lims(ax, lims): \"\"\" Set all 2d", "which to plot strs[p] should be the p^{th} string to plot args and", "def scatter(ax, X, *args, **kwargs): \"\"\" Scatter-plot 2d or 3d points in numpy.array", "strs, *args, **kwargs): \"\"\" Plot text at 2d or 3d points in numpy.array", "etc. \"\"\" ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) if len(lims)>2: ax.set_zlim(lims[2,:]) def lattice(mins, maxes, samp): \"\"\" Samples", "plot lims[n,0] and lims[n,1] are low and high plot limits for the n^{th}", "last dimension is omitted when N odd X[:,p] should be the p^{th} point", "to plot X[:,p] should be the base point for the p^{th} vector U[:,p]", "numpy.array X Every two dimensions are shown on a separate subplot The last", "Axes3D) on which to plot lims[0,:] are xlims, etc. \"\"\" ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) if", "as a surface over X[1,:] and X[2,:] args and kwargs should be as", "which to plot X[:,p] should be the p^{th} point at which to plot", "plotting functions. Points are handled in matrix columns rather than separate arguments for", "in numpy.array X as a surface. ax is the matplotlib.Axes3D on which to", "should be as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.quiver(X[0,:],X[1,:],U[0,:],U[1,:], *args, **kwargs) elif X.shape[0]==3:", "X, U, *args, **kwargs): \"\"\" Plot 2d or 3d vector field in numpy.arrays", "separate arguments for separate coordinates. \"\"\" import numpy as np import matplotlib.pyplot as", "or 3d vector field in numpy.arrays X and U. ax should be the", "numpy.array X ax should be the matplotlib.Axes (or Axes3D) on which to plot", "on a regularly spaced grid mins[i], maxes[i] are the grid extents in the", "ax.plot(X[0,:],X[1,:],X[2,:],*args, **kwargs) def scatter(ax, X, *args, **kwargs): \"\"\" Scatter-plot 2d or 3d points", "matplotlib.Axes (or Axes3D) on which to plot X[:,p] should be the p^{th} point", "once. ax is the matplotlib.Axes (or Axes3D) on which to plot lims[0,:] are", "vector U[:,p] should be the p^{th} vector to plot args and kwargs should", "X[:,p] is the p^{th} point X[2,:] is shown as a surface over X[1,:]", "def plotNd(X, lims, *args): \"\"\" Plot Nd points in numpy.array X Every two", "a regularly spaced grid mins[i], maxes[i] are the grid extents in the i^{th}", "mpl_toolkits.mplot3d import Axes3D def plot(ax, X, *args, **kwargs): \"\"\" Plot 2d or 3d", "2d or 3d points in numpy.array X ax should be the matplotlib.Axes (or", "on a separate subplot The last dimension is omitted when N odd X[:,p]", "n^{th} dimension args should be as in matplotlib.Axes.plot \"\"\" num_subplots = int(X.shape[0]/2); num_rows", "num_cols, subplot+1) ax.plot(X[2*subplot,:], X[2*subplot+1,:], *args) ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) def set_lims(ax, lims): \"\"\" Set all", "X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.quiver(X[0,:],X[1,:],X[2,:],U[0,:],U[1,:],U[2,:],*args, **kwargs) def plotNd(X, lims, *args): \"\"\" Plot Nd", "def quiver(ax, X, U, *args, **kwargs): \"\"\" Plot 2d or 3d vector field", "*args) ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) def set_lims(ax, lims): \"\"\" Set all 2d or 3d plot", "be as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.quiver(X[0,:],X[1,:],U[0,:],U[1,:], *args, **kwargs) elif X.shape[0]==3: #ax", "the matplotlib.Axes (or Axes3D) on which to plot X[:,p] should be the p^{th}", "plot_trisurf(ax, X, *args, **kwargs): \"\"\" Plots points in numpy.array X as a surface.", "G = np.mgrid[tuple(slice(mins[d],maxes[d],(samp*1j)) for d in range(len(mins)))] G = np.array([g.flatten() for g in", "args and kwargs should be as in matplotlib.Axes.plot \"\"\" for j in range(X.shape[1]):", "(or Axes3D) on which to plot X[:,p] should be the p^{th} point to", "strs[j], *args, **kwargs) def quiver(ax, X, U, *args, **kwargs): \"\"\" Plot 2d or", "samp is the number of points to sample in each dimension Returns numpy.array", "p^{th} point to plot args and kwargs should be as in matplotlib.Axes.plot \"\"\"", "at 2d or 3d points in numpy.array X ax should be the matplotlib.Axes", "return G def plot_trisurf(ax, X, *args, **kwargs): \"\"\" Plots points in numpy.array X", "#ax = plt.gca(projection=\"3d\") ax.plot(X[0,:],X[1,:],X[2,:],*args, **kwargs) def scatter(ax, X, *args, **kwargs): \"\"\" Scatter-plot 2d", "plot strs[p] should be the p^{th} string to plot args and kwargs should", "which to plot X[:,p] should be the p^{th} point to plot args and", "ax.plot(X[0,:],X[1,:], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.plot(X[0,:],X[1,:],X[2,:],*args, **kwargs) def scatter(ax, X,", "be the p^{th} string to plot args and kwargs should be as in", "in range(X.shape[1]): if X.shape[0]==2: ax.text(X[0,j],X[1,j], strs[j], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\")", "ax.text(X[0,j],X[1,j],X[2,j], strs[j], *args, **kwargs) def quiver(ax, X, U, *args, **kwargs): \"\"\" Plot 2d", "plt.gca(projection=\"3d\") ax.quiver(X[0,:],X[1,:],X[2,:],U[0,:],U[1,:],U[2,:],*args, **kwargs) def plotNd(X, lims, *args): \"\"\" Plot Nd points in numpy.array", "ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) if len(lims)>2: ax.set_zlim(lims[2,:]) def lattice(mins, maxes, samp): \"\"\" Samples Nd points", "in matplotlib.Axes.plot \"\"\" for j in range(X.shape[1]): if X.shape[0]==2: ax.text(X[0,j],X[1,j], strs[j], *args, **kwargs)", "the matplotlib.Axes (or Axes3D) on which to plot X[:,p] should be the base", "the n^{th} dimension args should be as in matplotlib.Axes.plot \"\"\" num_subplots = int(X.shape[0]/2);", "Plots points in numpy.array X as a surface. ax is the matplotlib.Axes3D on", "as in matplotlib.Axes.plot \"\"\" num_subplots = int(X.shape[0]/2); num_rows = np.floor(np.sqrt(num_subplots)) num_cols = np.ceil(num_subplots/num_rows)", "mins[i], maxes[i] are the grid extents in the i^{th} dimension samp is the", "\"\"\" if X.shape[0]==2: ax.scatter(X[0,:],X[1,:], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.scatter(X[0,:],X[1,:],X[2,:],*args, **kwargs)", "for subplot in range(num_subplots): ax = plt.subplot(num_rows, num_cols, subplot+1) ax.plot(X[2*subplot,:], X[2*subplot+1,:], *args) ax.set_xlim(lims[0,:])", "surface. ax is the matplotlib.Axes3D on which to plot X[:,p] is the p^{th}", "numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D def plot(ax,", "in numpy.array X Every two dimensions are shown on a separate subplot The", "**kwargs) def quiver(ax, X, U, *args, **kwargs): \"\"\" Plot 2d or 3d vector", "lims[0,:] are xlims, etc. \"\"\" ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) if len(lims)>2: ax.set_zlim(lims[2,:]) def lattice(mins, maxes,", "and lims[n,1] are low and high plot limits for the n^{th} dimension args", "range(num_subplots): ax = plt.subplot(num_rows, num_cols, subplot+1) ax.plot(X[2*subplot,:], X[2*subplot+1,:], *args) ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) def set_lims(ax,", "omitted when N odd X[:,p] should be the p^{th} point to plot lims[n,0]", "maxes[i] are the grid extents in the i^{th} dimension samp is the number", "xlims, etc. \"\"\" ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) if len(lims)>2: ax.set_zlim(lims[2,:]) def lattice(mins, maxes, samp): \"\"\"", "points on a regularly spaced grid mins[i], maxes[i] are the grid extents in", "at which to plot strs[p] should be the p^{th} string to plot args", "sample in each dimension Returns numpy.array G, where G[:,n] is the n^{th} grid", "p^{th} point at which to plot strs[p] should be the p^{th} string to", "**kwargs): \"\"\" Plot 2d or 3d points in numpy.array X ax should be", "should be the p^{th} point to plot args and kwargs should be as", "plt.gca(projection=\"3d\") ax.scatter(X[0,:],X[1,:],X[2,:],*args, **kwargs) def text(ax, X, strs, *args, **kwargs): \"\"\" Plot text at", "**kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.plot(X[0,:],X[1,:],X[2,:],*args, **kwargs) def scatter(ax, X, *args, **kwargs):", "Axes3D) on which to plot X[:,p] should be the p^{th} point at which", "ax = plt.subplot(num_rows, num_cols, subplot+1) ax.plot(X[2*subplot,:], X[2*subplot+1,:], *args) ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) def set_lims(ax, lims):", "point for the p^{th} vector U[:,p] should be the p^{th} vector to plot", "shown as a surface over X[1,:] and X[2,:] args and kwargs should be", "def text(ax, X, strs, *args, **kwargs): \"\"\" Plot text at 2d or 3d", "<gh_stars>1-10 \"\"\" Convenience wrappers around matplotlib plotting functions. Points are handled in matrix", "X[:,p] should be the p^{th} point at which to plot strs[p] should be", "point to plot lims[n,0] and lims[n,1] are low and high plot limits for", "is the matplotlib.Axes3D on which to plot X[:,p] is the p^{th} point X[2,:]", "Convenience wrappers around matplotlib plotting functions. Points are handled in matrix columns rather", "X Every two dimensions are shown on a separate subplot The last dimension", "U[:,p] should be the p^{th} vector to plot args and kwargs should be", "handled in matrix columns rather than separate arguments for separate coordinates. \"\"\" import", "in range(len(mins)))] G = np.array([g.flatten() for g in G]) return G def plot_trisurf(ax,", "Axes3D) on which to plot X[:,p] should be the base point for the", "*args, **kwargs): \"\"\" Plots points in numpy.array X as a surface. ax is", "in the i^{th} dimension samp is the number of points to sample in", "as in matplotlib.Axes.plot \"\"\" for j in range(X.shape[1]): if X.shape[0]==2: ax.text(X[0,j],X[1,j], strs[j], *args,", "dimension samp is the number of points to sample in each dimension Returns", "is omitted when N odd X[:,p] should be the p^{th} point to plot", "if len(lims)>2: ax.set_zlim(lims[2,:]) def lattice(mins, maxes, samp): \"\"\" Samples Nd points on a", "G[:,n] is the n^{th} grid point sampled \"\"\" G = np.mgrid[tuple(slice(mins[d],maxes[d],(samp*1j)) for d", "\"\"\" Scatter-plot 2d or 3d points in numpy.array X ax should be the", "sampled \"\"\" G = np.mgrid[tuple(slice(mins[d],maxes[d],(samp*1j)) for d in range(len(mins)))] G = np.array([g.flatten() for", "subplot in range(num_subplots): ax = plt.subplot(num_rows, num_cols, subplot+1) ax.plot(X[2*subplot,:], X[2*subplot+1,:], *args) ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:])", "def plot_trisurf(ax, X, *args, **kwargs): \"\"\" Plots points in numpy.array X as a", "ax.text(X[0,j],X[1,j], strs[j], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.text(X[0,j],X[1,j],X[2,j], strs[j], *args, **kwargs)", "\"\"\" Set all 2d or 3d plot limits at once. ax is the", "X and U. ax should be the matplotlib.Axes (or Axes3D) on which to", "X, *args, **kwargs): \"\"\" Plots points in numpy.array X as a surface. ax", "grid point sampled \"\"\" G = np.mgrid[tuple(slice(mins[d],maxes[d],(samp*1j)) for d in range(len(mins)))] G =", "X[1,:] and X[2,:] args and kwargs should be as in matplotlib.Axes3D.plot_trisurf \"\"\" ax.plot_trisurf(X[0,:],X[1,:],X[2,:],*args,", "are xlims, etc. \"\"\" ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) if len(lims)>2: ax.set_zlim(lims[2,:]) def lattice(mins, maxes, samp):", "args should be as in matplotlib.Axes.plot \"\"\" num_subplots = int(X.shape[0]/2); num_rows = np.floor(np.sqrt(num_subplots))", "ax.set_ylim(lims[1,:]) def set_lims(ax, lims): \"\"\" Set all 2d or 3d plot limits at", "np.mgrid[tuple(slice(mins[d],maxes[d],(samp*1j)) for d in range(len(mins)))] G = np.array([g.flatten() for g in G]) return", "lims, *args): \"\"\" Plot Nd points in numpy.array X Every two dimensions are", "plt from mpl_toolkits.mplot3d import Axes3D def plot(ax, X, *args, **kwargs): \"\"\" Plot 2d", "plt.gca(projection=\"3d\") ax.plot(X[0,:],X[1,:],X[2,:],*args, **kwargs) def scatter(ax, X, *args, **kwargs): \"\"\" Scatter-plot 2d or 3d", "be the p^{th} point at which to plot strs[p] should be the p^{th}", "np.ceil(num_subplots/num_rows) for subplot in range(num_subplots): ax = plt.subplot(num_rows, num_cols, subplot+1) ax.plot(X[2*subplot,:], X[2*subplot+1,:], *args)", "*args, **kwargs): \"\"\" Plot 2d or 3d vector field in numpy.arrays X and", "spaced grid mins[i], maxes[i] are the grid extents in the i^{th} dimension samp", "to plot args and kwargs should be as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2:", "range(len(mins)))] G = np.array([g.flatten() for g in G]) return G def plot_trisurf(ax, X,", "def plot(ax, X, *args, **kwargs): \"\"\" Plot 2d or 3d points in numpy.array", "a separate subplot The last dimension is omitted when N odd X[:,p] should", "should be as in matplotlib.Axes.plot \"\"\" for j in range(X.shape[1]): if X.shape[0]==2: ax.text(X[0,j],X[1,j],", "quiver(ax, X, U, *args, **kwargs): \"\"\" Plot 2d or 3d vector field in", "in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.plot(X[0,:],X[1,:], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\")", "to plot args and kwargs should be as in matplotlib.Axes.plot \"\"\" for j", "plot(ax, X, *args, **kwargs): \"\"\" Plot 2d or 3d points in numpy.array X", "as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.quiver(X[0,:],X[1,:],U[0,:],U[1,:], *args, **kwargs) elif X.shape[0]==3: #ax =", "each dimension Returns numpy.array G, where G[:,n] is the n^{th} grid point sampled", "are shown on a separate subplot The last dimension is omitted when N", "lattice(mins, maxes, samp): \"\"\" Samples Nd points on a regularly spaced grid mins[i],", "point to plot args and kwargs should be as in matplotlib.Axes.plot \"\"\" if", "are handled in matrix columns rather than separate arguments for separate coordinates. \"\"\"", "import numpy as np import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D def", "X[2*subplot+1,:], *args) ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) def set_lims(ax, lims): \"\"\" Set all 2d or 3d", "to sample in each dimension Returns numpy.array G, where G[:,n] is the n^{th}", "grid mins[i], maxes[i] are the grid extents in the i^{th} dimension samp is", "G def plot_trisurf(ax, X, *args, **kwargs): \"\"\" Plots points in numpy.array X as", "in matplotlib.Axes.plot \"\"\" num_subplots = int(X.shape[0]/2); num_rows = np.floor(np.sqrt(num_subplots)) num_cols = np.ceil(num_subplots/num_rows) for", "\"\"\" Plot Nd points in numpy.array X Every two dimensions are shown on", "should be as in matplotlib.Axes.plot \"\"\" num_subplots = int(X.shape[0]/2); num_rows = np.floor(np.sqrt(num_subplots)) num_cols", "to plot X[:,p] should be the p^{th} point to plot args and kwargs", "plot X[:,p] should be the p^{th} point to plot args and kwargs should", "to plot lims[n,0] and lims[n,1] are low and high plot limits for the", "U. ax should be the matplotlib.Axes (or Axes3D) on which to plot X[:,p]", "X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.scatter(X[0,:],X[1,:],X[2,:],*args, **kwargs) def text(ax, X, strs, *args, **kwargs): \"\"\"", "elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.quiver(X[0,:],X[1,:],X[2,:],U[0,:],U[1,:],U[2,:],*args, **kwargs) def plotNd(X, lims, *args): \"\"\" Plot", "lims): \"\"\" Set all 2d or 3d plot limits at once. ax is", "= plt.subplot(num_rows, num_cols, subplot+1) ax.plot(X[2*subplot,:], X[2*subplot+1,:], *args) ax.set_xlim(lims[0,:]) ax.set_ylim(lims[1,:]) def set_lims(ax, lims): \"\"\"", "points in numpy.array X ax should be the matplotlib.Axes (or Axes3D) on which", "args and kwargs should be as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.scatter(X[0,:],X[1,:], *args,", "point sampled \"\"\" G = np.mgrid[tuple(slice(mins[d],maxes[d],(samp*1j)) for d in range(len(mins)))] G = np.array([g.flatten()", "surface over X[1,:] and X[2,:] args and kwargs should be as in matplotlib.Axes3D.plot_trisurf", "Every two dimensions are shown on a separate subplot The last dimension is", "wrappers around matplotlib plotting functions. Points are handled in matrix columns rather than", "elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.plot(X[0,:],X[1,:],X[2,:],*args, **kwargs) def scatter(ax, X, *args, **kwargs): \"\"\"", "3d points in numpy.array X ax should be the matplotlib.Axes (or Axes3D) on", "matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.plot(X[0,:],X[1,:], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.plot(X[0,:],X[1,:],X[2,:],*args,", "plot X[:,p] should be the p^{th} point at which to plot strs[p] should", "plot args and kwargs should be as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.plot(X[0,:],X[1,:],", "be as in matplotlib.Axes.plot \"\"\" num_subplots = int(X.shape[0]/2); num_rows = np.floor(np.sqrt(num_subplots)) num_cols =", "in numpy.array X ax should be the matplotlib.Axes (or Axes3D) on which to", "if X.shape[0]==2: ax.plot(X[0,:],X[1,:], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.plot(X[0,:],X[1,:],X[2,:],*args, **kwargs) def", "and X[2,:] args and kwargs should be as in matplotlib.Axes3D.plot_trisurf \"\"\" ax.plot_trisurf(X[0,:],X[1,:],X[2,:],*args, **kwargs)", "and kwargs should be as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.scatter(X[0,:],X[1,:], *args, **kwargs)", "of points to sample in each dimension Returns numpy.array G, where G[:,n] is", "kwargs should be as in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.plot(X[0,:],X[1,:], *args, **kwargs) elif", "in matplotlib.Axes.plot \"\"\" if X.shape[0]==2: ax.quiver(X[0,:],X[1,:],U[0,:],U[1,:], *args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\")", "as a surface. ax is the matplotlib.Axes3D on which to plot X[:,p] is", "matplotlib.Axes.plot \"\"\" for j in range(X.shape[1]): if X.shape[0]==2: ax.text(X[0,j],X[1,j], strs[j], *args, **kwargs) elif", "Axes3D def plot(ax, X, *args, **kwargs): \"\"\" Plot 2d or 3d points in", "dimension is omitted when N odd X[:,p] should be the p^{th} point to", "Samples Nd points on a regularly spaced grid mins[i], maxes[i] are the grid", "plt.gca(projection=\"3d\") ax.text(X[0,j],X[1,j],X[2,j], strs[j], *args, **kwargs) def quiver(ax, X, U, *args, **kwargs): \"\"\" Plot", "the p^{th} vector U[:,p] should be the p^{th} vector to plot args and", "X[2,:] is shown as a surface over X[1,:] and X[2,:] args and kwargs", "n^{th} grid point sampled \"\"\" G = np.mgrid[tuple(slice(mins[d],maxes[d],(samp*1j)) for d in range(len(mins)))] G", "*args, **kwargs) elif X.shape[0]==3: #ax = plt.gca(projection=\"3d\") ax.scatter(X[0,:],X[1,:],X[2,:],*args, **kwargs) def text(ax, X, strs,", "the matplotlib.Axes3D on which to plot X[:,p] is the p^{th} point X[2,:] is", "Plot Nd points in numpy.array X Every two dimensions are shown on a", "p^{th} string to plot args and kwargs should be as in matplotlib.Axes.plot \"\"\"", "G = np.array([g.flatten() for g in G]) return G def plot_trisurf(ax, X, *args,", "strs[p] should be the p^{th} string to plot args and kwargs should be", "Plot 2d or 3d vector field in numpy.arrays X and U. ax should", "Plot text at 2d or 3d points in numpy.array X ax should be" ]
[ "Assembly) def append(self, arg): super().append(arg) arg.state = self.state def last(self): return self.children[-1] def", "root_transforms(self): '''get transforms stacked all the way to the root''' result = transform.TransformList()", "point(self): return pt.PointList(self.root_transforms(self.dest.arr))[0] @property def changes(self): return pt.changes(self.pos, self.point) def get_preorder_actions(self): al =", "pos(self, arg): self.state['position'] = arg def pos_offset(self, x=None, y=None, z=None): self.pos = self.pos.offset(x,", "super().__init__(name=name, parent=parent) if state is not None: if not isinstance(state, CncState): raise TypeError('state", "self.state['position'] @pos.setter def pos(self, arg): self.state['position'] = arg def pos_offset(self, x=None, y=None, z=None):", "last(self): return self.children[-1] def get_gcode(self): return self.get_actions().get_gcode() def get_points(self): return self.get_actions().get_points() def update_children_preorder(self):", "new_state for child in self.children: child.state = self.state def check_type(self, other): assert isinstance(other,", "point = pt.PointList(self.root_transforms(points.arr))[0] jog = partial(action.Jog, state=self.state) al += jog(x=self.pos.x, y=self.pos.y, z=self.state['z_safe']) return", "result[0:0] = walk_step.visited.transforms return result class SafeJog(Assembly): def __init__(self, x=0, y=0, z=0, name=None,", "assert isinstance(other, Assembly) def append(self, arg): super().append(arg) arg.state = self.state def last(self): return", "= self.state def last(self): return self.children[-1] def get_gcode(self): return self.get_actions().get_gcode() def get_points(self): return", "from collections.abc import MutableSequence from . import base_types from . import tree from", "print(self.changes) if self.changes: jog = partial(action.Jog, state=self.state) al += jog(x=self.pos.x, y=self.pos.y, z=self.state['z_safe']) al", "pt.PointList(((0, 0, self.state['z_margin']), )) point = pt.PointList(self.root_transforms(points.arr))[0] jog = partial(action.Jog, state=self.state) al +=", "if state is not None: if not isinstance(state, CncState): raise TypeError('state must be", "result = transform.TransformList() for walk_step in self.root_walk(): if walk_step.is_visit and walk_step.is_preorder: if isinstance(walk_step.visited,", "TypeError('state must be of type CncState, not {}'.format(type(state))) self._state = state @property def", "@property def changes(self): return pt.changes(self.pos, self.point) def get_preorder_actions(self): al = action.ActionList() # print(self.changes)", "None: if not isinstance(state, CncState): raise TypeError('state must be of type CncState, not", "return self._state @state.setter def state(self, new_state): self._state = new_state for child in self.children:", "def __init__(self, name=None, parent=None, state=None): super().__init__(name=name, parent=parent, state=state) def get_preorder_actions(self): al = action.ActionList()", "import MutableSequence from . import base_types from . import tree from . import", "get_postorder_actions(self): return () def update_children_postorder(self): pass def get_actions(self): with self.state.excursion(): al = action.ActionList()", "jog(x=self.pos.x, y=self.pos.y, z=self.state['z_safe']) al += jog(x=self.point.x, y=self.point.y, z=self.pos.z) al += jog(x=self.point.x, y=self.point.y, z=self.point.z)", "super().__init__(name=name, parent=parent, state=state) def get_preorder_actions(self): al = action.ActionList() points = pt.PointList(((0, 0, self.state['z_margin']),", "def get_points(self): return self.get_actions().get_points() def update_children_preorder(self): pass def get_preorder_actions(self): return () def get_postorder_actions(self):", "pass def get_actions(self): with self.state.excursion(): al = action.ActionList() for step in self.depth_first_walk(): if", "= walk_step.visited.transforms return result class SafeJog(Assembly): def __init__(self, x=0, y=0, z=0, name=None, parent=None,", "= action.ActionList() points = pt.PointList(((0, 0, self.state['z_margin']), )) point = pt.PointList(self.root_transforms(points.arr))[0] jog =", "@state.setter def state(self, new_state): self._state = new_state for child in self.children: child.state =", "if step.is_preorder: step.visited.update_children_preorder() al.extend(step.visited.get_preorder_actions()) elif step.is_postorder: al.extend(step.visited.get_postorder_actions()) step.visited.update_children_postorder() return al @property def pos(self):", "SafeZ(Assembly): def __init__(self, name=None, parent=None, state=None): super().__init__(name=name, parent=parent, state=state) def get_preorder_actions(self): al =", "walk_step in self.root_walk(): if walk_step.is_visit and walk_step.is_preorder: if isinstance(walk_step.visited, Assembly): # extend left", "update_children_postorder(self): pass def get_actions(self): with self.state.excursion(): al = action.ActionList() for step in self.depth_first_walk():", "step.is_postorder: al.extend(step.visited.get_postorder_actions()) step.visited.update_children_postorder() return al @property def pos(self): return self.state['position'] @pos.setter def pos(self,", "MutableSequence from . import base_types from . import tree from . import transform", "z=self.point.z) # print(\"safejog\", self.state['position']) return al class SafeZ(Assembly): def __init__(self, name=None, parent=None, state=None):", "from functools import partial from collections.abc import MutableSequence from . import base_types from", "partial from collections.abc import MutableSequence from . import base_types from . import tree", ". import point as pt from . import action class Assembly(tree.Tree, transform.TransformableMixin): '''tree", "def state(self, new_state): self._state = new_state for child in self.children: child.state = self.state", "al.extend(step.visited.get_preorder_actions()) elif step.is_postorder: al.extend(step.visited.get_postorder_actions()) step.visited.update_children_postorder() return al @property def pos(self): return self.state['position'] @pos.setter", "def __init__(self, name=None, parent=None, state=None): super().__init__(name=name, parent=parent) if state is not None: if", "import tree from . import transform from .state import CncState from . import", "action.ActionList() # print(self.changes) if self.changes: jog = partial(action.Jog, state=self.state) al += jog(x=self.pos.x, y=self.pos.y,", "def append(self, arg): super().append(arg) arg.state = self.state def last(self): return self.children[-1] def get_gcode(self):", "parent=parent, state=state) self.dest = pt.PointList(((x, y, z), )) @property def point(self): return pt.PointList(self.root_transforms(self.dest.arr))[0]", "type CncState, not {}'.format(type(state))) self._state = state @property def state(self): return self._state @state.setter", "step.visited.update_children_postorder() return al @property def pos(self): return self.state['position'] @pos.setter def pos(self, arg): self.state['position']", "() def update_children_postorder(self): pass def get_actions(self): with self.state.excursion(): al = action.ActionList() for step", "y=None, z=None): self.pos = self.pos.offset(x, y, z) @property def root_transforms(self): '''get transforms stacked", "step.visited.update_children_preorder() al.extend(step.visited.get_preorder_actions()) elif step.is_postorder: al.extend(step.visited.get_postorder_actions()) step.visited.update_children_postorder() return al @property def pos(self): return self.state['position']", "class SafeJog(Assembly): def __init__(self, x=0, y=0, z=0, name=None, parent=None, state=None): super().__init__(name=name, parent=parent, state=state)", "self._state = new_state for child in self.children: child.state = self.state def check_type(self, other):", "transform.TransformableMixin): '''tree of assembly items''' def __init__(self, name=None, parent=None, state=None): super().__init__(name=name, parent=parent) if", "state(self, new_state): self._state = new_state for child in self.children: child.state = self.state def", "self.root_walk(): if walk_step.is_visit and walk_step.is_preorder: if isinstance(walk_step.visited, Assembly): # extend left result[0:0] =", "y=self.pos.y, z=self.state['z_safe']) al += jog(x=self.point.x, y=self.point.y, z=self.pos.z) al += jog(x=self.point.x, y=self.point.y, z=self.point.z) #", "update_children_preorder(self): pass def get_preorder_actions(self): return () def get_postorder_actions(self): return () def update_children_postorder(self): pass", "y=self.point.y, z=self.point.z) # print(\"safejog\", self.state['position']) return al class SafeZ(Assembly): def __init__(self, name=None, parent=None,", "point as pt from . import action class Assembly(tree.Tree, transform.TransformableMixin): '''tree of assembly", "append(self, arg): super().append(arg) arg.state = self.state def last(self): return self.children[-1] def get_gcode(self): return", "'''get transforms stacked all the way to the root''' result = transform.TransformList() for", "Assembly): # extend left result[0:0] = walk_step.visited.transforms return result class SafeJog(Assembly): def __init__(self,", "z=self.pos.z) al += jog(x=self.point.x, y=self.point.y, z=self.point.z) # print(\"safejog\", self.state['position']) return al class SafeZ(Assembly):", "def check_type(self, other): assert isinstance(other, Assembly) def append(self, arg): super().append(arg) arg.state = self.state", "@property def root_transforms(self): '''get transforms stacked all the way to the root''' result", "points = pt.PointList(((0, 0, self.state['z_margin']), )) point = pt.PointList(self.root_transforms(points.arr))[0] jog = partial(action.Jog, state=self.state)", "y, z), )) @property def point(self): return pt.PointList(self.root_transforms(self.dest.arr))[0] @property def changes(self): return pt.changes(self.pos,", "def state(self): return self._state @state.setter def state(self, new_state): self._state = new_state for child", ")) point = pt.PointList(self.root_transforms(points.arr))[0] jog = partial(action.Jog, state=self.state) al += jog(x=self.pos.x, y=self.pos.y, z=self.state['z_safe'])", "= state @property def state(self): return self._state @state.setter def state(self, new_state): self._state =", "= self.pos.offset(x, y, z) @property def root_transforms(self): '''get transforms stacked all the way", "get_gcode(self): return self.get_actions().get_gcode() def get_points(self): return self.get_actions().get_points() def update_children_preorder(self): pass def get_preorder_actions(self): return", "self.pos.offset(x, y, z) @property def root_transforms(self): '''get transforms stacked all the way to", "return () def get_postorder_actions(self): return () def update_children_postorder(self): pass def get_actions(self): with self.state.excursion():", "= new_state for child in self.children: child.state = self.state def check_type(self, other): assert", "CncState, not {}'.format(type(state))) self._state = state @property def state(self): return self._state @state.setter def", "arg def pos_offset(self, x=None, y=None, z=None): self.pos = self.pos.offset(x, y, z) @property def", "name=None, parent=None, state=None): super().__init__(name=name, parent=parent) if state is not None: if not isinstance(state,", "step in self.depth_first_walk(): if step.is_visit: if step.is_preorder: step.visited.update_children_preorder() al.extend(step.visited.get_preorder_actions()) elif step.is_postorder: al.extend(step.visited.get_postorder_actions()) step.visited.update_children_postorder()", "self.children[-1] def get_gcode(self): return self.get_actions().get_gcode() def get_points(self): return self.get_actions().get_points() def update_children_preorder(self): pass def", "as pt from . import action class Assembly(tree.Tree, transform.TransformableMixin): '''tree of assembly items'''", ")) @property def point(self): return pt.PointList(self.root_transforms(self.dest.arr))[0] @property def changes(self): return pt.changes(self.pos, self.point) def", "get_actions(self): with self.state.excursion(): al = action.ActionList() for step in self.depth_first_walk(): if step.is_visit: if", "class Assembly(tree.Tree, transform.TransformableMixin): '''tree of assembly items''' def __init__(self, name=None, parent=None, state=None): super().__init__(name=name,", "self.get_actions().get_points() def update_children_preorder(self): pass def get_preorder_actions(self): return () def get_postorder_actions(self): return () def", "functools import partial from collections.abc import MutableSequence from . import base_types from .", "self.state def check_type(self, other): assert isinstance(other, Assembly) def append(self, arg): super().append(arg) arg.state =", "@property def point(self): return pt.PointList(self.root_transforms(self.dest.arr))[0] @property def changes(self): return pt.changes(self.pos, self.point) def get_preorder_actions(self):", "import base_types from . import tree from . import transform from .state import", "of type CncState, not {}'.format(type(state))) self._state = state @property def state(self): return self._state", "pt.changes(self.pos, self.point) def get_preorder_actions(self): al = action.ActionList() # print(self.changes) if self.changes: jog =", "result class SafeJog(Assembly): def __init__(self, x=0, y=0, z=0, name=None, parent=None, state=None): super().__init__(name=name, parent=parent,", "= pt.PointList(((0, 0, self.state['z_margin']), )) point = pt.PointList(self.root_transforms(points.arr))[0] jog = partial(action.Jog, state=self.state) al", "transform.TransformList() for walk_step in self.root_walk(): if walk_step.is_visit and walk_step.is_preorder: if isinstance(walk_step.visited, Assembly): #", "0, self.state['z_margin']), )) point = pt.PointList(self.root_transforms(points.arr))[0] jog = partial(action.Jog, state=self.state) al += jog(x=self.pos.x,", "self.state def last(self): return self.children[-1] def get_gcode(self): return self.get_actions().get_gcode() def get_points(self): return self.get_actions().get_points()", "y=self.point.y, z=self.pos.z) al += jog(x=self.point.x, y=self.point.y, z=self.point.z) # print(\"safejog\", self.state['position']) return al class", "def __init__(self, x=0, y=0, z=0, name=None, parent=None, state=None): super().__init__(name=name, parent=parent, state=state) self.dest =", "al.extend(step.visited.get_postorder_actions()) step.visited.update_children_postorder() return al @property def pos(self): return self.state['position'] @pos.setter def pos(self, arg):", "def get_actions(self): with self.state.excursion(): al = action.ActionList() for step in self.depth_first_walk(): if step.is_visit:", "of assembly items''' def __init__(self, name=None, parent=None, state=None): super().__init__(name=name, parent=parent) if state is", "state is not None: if not isinstance(state, CncState): raise TypeError('state must be of", "pass def get_preorder_actions(self): return () def get_postorder_actions(self): return () def update_children_postorder(self): pass def", "import CncState from . import point as pt from . import action class", "def pos(self): return self.state['position'] @pos.setter def pos(self, arg): self.state['position'] = arg def pos_offset(self,", "self.pos = self.pos.offset(x, y, z) @property def root_transforms(self): '''get transforms stacked all the", "walk_step.is_preorder: if isinstance(walk_step.visited, Assembly): # extend left result[0:0] = walk_step.visited.transforms return result class", "return pt.PointList(self.root_transforms(self.dest.arr))[0] @property def changes(self): return pt.changes(self.pos, self.point) def get_preorder_actions(self): al = action.ActionList()", "way to the root''' result = transform.TransformList() for walk_step in self.root_walk(): if walk_step.is_visit", "CncState from . import point as pt from . import action class Assembly(tree.Tree,", "= partial(action.Jog, state=self.state) al += jog(x=self.pos.x, y=self.pos.y, z=self.state['z_safe']) al += jog(x=self.point.x, y=self.point.y, z=self.pos.z)", "other): assert isinstance(other, Assembly) def append(self, arg): super().append(arg) arg.state = self.state def last(self):", "get_preorder_actions(self): al = action.ActionList() # print(self.changes) if self.changes: jog = partial(action.Jog, state=self.state) al", "jog(x=self.point.x, y=self.point.y, z=self.point.z) # print(\"safejog\", self.state['position']) return al class SafeZ(Assembly): def __init__(self, name=None,", "Assembly(tree.Tree, transform.TransformableMixin): '''tree of assembly items''' def __init__(self, name=None, parent=None, state=None): super().__init__(name=name, parent=parent)", "in self.root_walk(): if walk_step.is_visit and walk_step.is_preorder: if isinstance(walk_step.visited, Assembly): # extend left result[0:0]", "al = action.ActionList() # print(self.changes) if self.changes: jog = partial(action.Jog, state=self.state) al +=", "isinstance(state, CncState): raise TypeError('state must be of type CncState, not {}'.format(type(state))) self._state =", "parent=parent) if state is not None: if not isinstance(state, CncState): raise TypeError('state must", "not None: if not isinstance(state, CncState): raise TypeError('state must be of type CncState,", "the way to the root''' result = transform.TransformList() for walk_step in self.root_walk(): if", "def last(self): return self.children[-1] def get_gcode(self): return self.get_actions().get_gcode() def get_points(self): return self.get_actions().get_points() def", "def get_preorder_actions(self): return () def get_postorder_actions(self): return () def update_children_postorder(self): pass def get_actions(self):", "= action.ActionList() # print(self.changes) if self.changes: jog = partial(action.Jog, state=self.state) al += jog(x=self.pos.x,", "tree from . import transform from .state import CncState from . import point", "if not isinstance(state, CncState): raise TypeError('state must be of type CncState, not {}'.format(type(state)))", "return al class SafeZ(Assembly): def __init__(self, name=None, parent=None, state=None): super().__init__(name=name, parent=parent, state=state) def", "import transform from .state import CncState from . import point as pt from", "__init__(self, name=None, parent=None, state=None): super().__init__(name=name, parent=parent) if state is not None: if not", "@pos.setter def pos(self, arg): self.state['position'] = arg def pos_offset(self, x=None, y=None, z=None): self.pos", "__init__(self, name=None, parent=None, state=None): super().__init__(name=name, parent=parent, state=state) def get_preorder_actions(self): al = action.ActionList() points", "super().__init__(name=name, parent=parent, state=state) self.dest = pt.PointList(((x, y, z), )) @property def point(self): return", "state=self.state) al += jog(x=self.pos.x, y=self.pos.y, z=self.state['z_safe']) al += jog(x=self.point.x, y=self.point.y, z=self.pos.z) al +=", "z) @property def root_transforms(self): '''get transforms stacked all the way to the root'''", "for step in self.depth_first_walk(): if step.is_visit: if step.is_preorder: step.visited.update_children_preorder() al.extend(step.visited.get_preorder_actions()) elif step.is_postorder: al.extend(step.visited.get_postorder_actions())", "+= jog(x=self.point.x, y=self.point.y, z=self.pos.z) al += jog(x=self.point.x, y=self.point.y, z=self.point.z) # print(\"safejog\", self.state['position']) return", "# extend left result[0:0] = walk_step.visited.transforms return result class SafeJog(Assembly): def __init__(self, x=0,", "state=None): super().__init__(name=name, parent=parent, state=state) self.dest = pt.PointList(((x, y, z), )) @property def point(self):", "get_preorder_actions(self): return () def get_postorder_actions(self): return () def update_children_postorder(self): pass def get_actions(self): with", "step.is_preorder: step.visited.update_children_preorder() al.extend(step.visited.get_preorder_actions()) elif step.is_postorder: al.extend(step.visited.get_postorder_actions()) step.visited.update_children_postorder() return al @property def pos(self): return", ".state import CncState from . import point as pt from . import action", "= transform.TransformList() for walk_step in self.root_walk(): if walk_step.is_visit and walk_step.is_preorder: if isinstance(walk_step.visited, Assembly):", "name=None, parent=None, state=None): super().__init__(name=name, parent=parent, state=state) self.dest = pt.PointList(((x, y, z), )) @property", "items''' def __init__(self, name=None, parent=None, state=None): super().__init__(name=name, parent=parent) if state is not None:", "return () def update_children_postorder(self): pass def get_actions(self): with self.state.excursion(): al = action.ActionList() for", "z=None): self.pos = self.pos.offset(x, y, z) @property def root_transforms(self): '''get transforms stacked all", "def changes(self): return pt.changes(self.pos, self.point) def get_preorder_actions(self): al = action.ActionList() # print(self.changes) if", "state @property def state(self): return self._state @state.setter def state(self, new_state): self._state = new_state", "from . import tree from . import transform from .state import CncState from", "def get_gcode(self): return self.get_actions().get_gcode() def get_points(self): return self.get_actions().get_points() def update_children_preorder(self): pass def get_preorder_actions(self):", "arg): super().append(arg) arg.state = self.state def last(self): return self.children[-1] def get_gcode(self): return self.get_actions().get_gcode()", "def get_postorder_actions(self): return () def update_children_postorder(self): pass def get_actions(self): with self.state.excursion(): al =", "isinstance(walk_step.visited, Assembly): # extend left result[0:0] = walk_step.visited.transforms return result class SafeJog(Assembly): def", "z=0, name=None, parent=None, state=None): super().__init__(name=name, parent=parent, state=state) self.dest = pt.PointList(((x, y, z), ))", ". import transform from .state import CncState from . import point as pt", "left result[0:0] = walk_step.visited.transforms return result class SafeJog(Assembly): def __init__(self, x=0, y=0, z=0,", "# print(\"safejog\", self.state['position']) return al class SafeZ(Assembly): def __init__(self, name=None, parent=None, state=None): super().__init__(name=name,", "al = action.ActionList() points = pt.PointList(((0, 0, self.state['z_margin']), )) point = pt.PointList(self.root_transforms(points.arr))[0] jog", "action class Assembly(tree.Tree, transform.TransformableMixin): '''tree of assembly items''' def __init__(self, name=None, parent=None, state=None):", "must be of type CncState, not {}'.format(type(state))) self._state = state @property def state(self):", "return al @property def pos(self): return self.state['position'] @pos.setter def pos(self, arg): self.state['position'] =", "and walk_step.is_preorder: if isinstance(walk_step.visited, Assembly): # extend left result[0:0] = walk_step.visited.transforms return result", "self.state['position']) return al class SafeZ(Assembly): def __init__(self, name=None, parent=None, state=None): super().__init__(name=name, parent=parent, state=state)", "'''tree of assembly items''' def __init__(self, name=None, parent=None, state=None): super().__init__(name=name, parent=parent) if state", "CncState): raise TypeError('state must be of type CncState, not {}'.format(type(state))) self._state = state", "step.is_visit: if step.is_preorder: step.visited.update_children_preorder() al.extend(step.visited.get_preorder_actions()) elif step.is_postorder: al.extend(step.visited.get_postorder_actions()) step.visited.update_children_postorder() return al @property def", "the root''' result = transform.TransformList() for walk_step in self.root_walk(): if walk_step.is_visit and walk_step.is_preorder:", "with self.state.excursion(): al = action.ActionList() for step in self.depth_first_walk(): if step.is_visit: if step.is_preorder:", "if isinstance(walk_step.visited, Assembly): # extend left result[0:0] = walk_step.visited.transforms return result class SafeJog(Assembly):", "from . import point as pt from . import action class Assembly(tree.Tree, transform.TransformableMixin):", "def point(self): return pt.PointList(self.root_transforms(self.dest.arr))[0] @property def changes(self): return pt.changes(self.pos, self.point) def get_preorder_actions(self): al", "z=self.state['z_safe']) al += jog(x=self.point.x, y=self.point.y, z=self.pos.z) al += jog(x=self.point.x, y=self.point.y, z=self.point.z) # print(\"safejog\",", "def get_preorder_actions(self): al = action.ActionList() points = pt.PointList(((0, 0, self.state['z_margin']), )) point =", "transforms stacked all the way to the root''' result = transform.TransformList() for walk_step", ". import action class Assembly(tree.Tree, transform.TransformableMixin): '''tree of assembly items''' def __init__(self, name=None,", "base_types from . import tree from . import transform from .state import CncState", "for child in self.children: child.state = self.state def check_type(self, other): assert isinstance(other, Assembly)", "pt.PointList(self.root_transforms(self.dest.arr))[0] @property def changes(self): return pt.changes(self.pos, self.point) def get_preorder_actions(self): al = action.ActionList() #", "in self.depth_first_walk(): if step.is_visit: if step.is_preorder: step.visited.update_children_preorder() al.extend(step.visited.get_preorder_actions()) elif step.is_postorder: al.extend(step.visited.get_postorder_actions()) step.visited.update_children_postorder() return", "= arg def pos_offset(self, x=None, y=None, z=None): self.pos = self.pos.offset(x, y, z) @property", "partial(action.Jog, state=self.state) al += jog(x=self.pos.x, y=self.pos.y, z=self.state['z_safe']) al += jog(x=self.point.x, y=self.point.y, z=self.pos.z) al", "__init__(self, x=0, y=0, z=0, name=None, parent=None, state=None): super().__init__(name=name, parent=parent, state=state) self.dest = pt.PointList(((x,", "() def get_postorder_actions(self): return () def update_children_postorder(self): pass def get_actions(self): with self.state.excursion(): al", "get_preorder_actions(self): al = action.ActionList() points = pt.PointList(((0, 0, self.state['z_margin']), )) point = pt.PointList(self.root_transforms(points.arr))[0]", "if step.is_visit: if step.is_preorder: step.visited.update_children_preorder() al.extend(step.visited.get_preorder_actions()) elif step.is_postorder: al.extend(step.visited.get_postorder_actions()) step.visited.update_children_postorder() return al @property", "arg): self.state['position'] = arg def pos_offset(self, x=None, y=None, z=None): self.pos = self.pos.offset(x, y,", "al += jog(x=self.pos.x, y=self.pos.y, z=self.state['z_safe']) al += jog(x=self.point.x, y=self.point.y, z=self.pos.z) al += jog(x=self.point.x,", "parent=None, state=None): super().__init__(name=name, parent=parent, state=state) self.dest = pt.PointList(((x, y, z), )) @property def", "is not None: if not isinstance(state, CncState): raise TypeError('state must be of type", "from .state import CncState from . import point as pt from . import", "jog(x=self.point.x, y=self.point.y, z=self.pos.z) al += jog(x=self.point.x, y=self.point.y, z=self.point.z) # print(\"safejog\", self.state['position']) return al", "in self.children: child.state = self.state def check_type(self, other): assert isinstance(other, Assembly) def append(self,", "not isinstance(state, CncState): raise TypeError('state must be of type CncState, not {}'.format(type(state))) self._state", "elif step.is_postorder: al.extend(step.visited.get_postorder_actions()) step.visited.update_children_postorder() return al @property def pos(self): return self.state['position'] @pos.setter def", "from . import transform from .state import CncState from . import point as", "parent=None, state=None): super().__init__(name=name, parent=parent, state=state) def get_preorder_actions(self): al = action.ActionList() points = pt.PointList(((0,", "def get_preorder_actions(self): al = action.ActionList() # print(self.changes) if self.changes: jog = partial(action.Jog, state=self.state)", "# print(self.changes) if self.changes: jog = partial(action.Jog, state=self.state) al += jog(x=self.pos.x, y=self.pos.y, z=self.state['z_safe'])", "assembly items''' def __init__(self, name=None, parent=None, state=None): super().__init__(name=name, parent=parent) if state is not", "import point as pt from . import action class Assembly(tree.Tree, transform.TransformableMixin): '''tree of", "z), )) @property def point(self): return pt.PointList(self.root_transforms(self.dest.arr))[0] @property def changes(self): return pt.changes(self.pos, self.point)", "self.point) def get_preorder_actions(self): al = action.ActionList() # print(self.changes) if self.changes: jog = partial(action.Jog,", "def pos(self, arg): self.state['position'] = arg def pos_offset(self, x=None, y=None, z=None): self.pos =", "isinstance(other, Assembly) def append(self, arg): super().append(arg) arg.state = self.state def last(self): return self.children[-1]", "self.get_actions().get_gcode() def get_points(self): return self.get_actions().get_points() def update_children_preorder(self): pass def get_preorder_actions(self): return () def", "walk_step.is_visit and walk_step.is_preorder: if isinstance(walk_step.visited, Assembly): # extend left result[0:0] = walk_step.visited.transforms return", "class SafeZ(Assembly): def __init__(self, name=None, parent=None, state=None): super().__init__(name=name, parent=parent, state=state) def get_preorder_actions(self): al", "pos_offset(self, x=None, y=None, z=None): self.pos = self.pos.offset(x, y, z) @property def root_transforms(self): '''get", "new_state): self._state = new_state for child in self.children: child.state = self.state def check_type(self,", "= pt.PointList(((x, y, z), )) @property def point(self): return pt.PointList(self.root_transforms(self.dest.arr))[0] @property def changes(self):", "to the root''' result = transform.TransformList() for walk_step in self.root_walk(): if walk_step.is_visit and", "if self.changes: jog = partial(action.Jog, state=self.state) al += jog(x=self.pos.x, y=self.pos.y, z=self.state['z_safe']) al +=", "changes(self): return pt.changes(self.pos, self.point) def get_preorder_actions(self): al = action.ActionList() # print(self.changes) if self.changes:", "= action.ActionList() for step in self.depth_first_walk(): if step.is_visit: if step.is_preorder: step.visited.update_children_preorder() al.extend(step.visited.get_preorder_actions()) elif", "root''' result = transform.TransformList() for walk_step in self.root_walk(): if walk_step.is_visit and walk_step.is_preorder: if", "state=None): super().__init__(name=name, parent=parent, state=state) def get_preorder_actions(self): al = action.ActionList() points = pt.PointList(((0, 0,", "be of type CncState, not {}'.format(type(state))) self._state = state @property def state(self): return", "= pt.PointList(self.root_transforms(points.arr))[0] jog = partial(action.Jog, state=self.state) al += jog(x=self.pos.x, y=self.pos.y, z=self.state['z_safe']) return al", "raise TypeError('state must be of type CncState, not {}'.format(type(state))) self._state = state @property", "from . import action class Assembly(tree.Tree, transform.TransformableMixin): '''tree of assembly items''' def __init__(self,", "{}'.format(type(state))) self._state = state @property def state(self): return self._state @state.setter def state(self, new_state):", "return self.children[-1] def get_gcode(self): return self.get_actions().get_gcode() def get_points(self): return self.get_actions().get_points() def update_children_preorder(self): pass", "pt.PointList(((x, y, z), )) @property def point(self): return pt.PointList(self.root_transforms(self.dest.arr))[0] @property def changes(self): return", "name=None, parent=None, state=None): super().__init__(name=name, parent=parent, state=state) def get_preorder_actions(self): al = action.ActionList() points =", "al = action.ActionList() for step in self.depth_first_walk(): if step.is_visit: if step.is_preorder: step.visited.update_children_preorder() al.extend(step.visited.get_preorder_actions())", "state=None): super().__init__(name=name, parent=parent) if state is not None: if not isinstance(state, CncState): raise", "extend left result[0:0] = walk_step.visited.transforms return result class SafeJog(Assembly): def __init__(self, x=0, y=0,", "+= jog(x=self.pos.x, y=self.pos.y, z=self.state['z_safe']) al += jog(x=self.point.x, y=self.point.y, z=self.pos.z) al += jog(x=self.point.x, y=self.point.y,", "check_type(self, other): assert isinstance(other, Assembly) def append(self, arg): super().append(arg) arg.state = self.state def", "SafeJog(Assembly): def __init__(self, x=0, y=0, z=0, name=None, parent=None, state=None): super().__init__(name=name, parent=parent, state=state) self.dest", "@property def pos(self): return self.state['position'] @pos.setter def pos(self, arg): self.state['position'] = arg def", "parent=None, state=None): super().__init__(name=name, parent=parent) if state is not None: if not isinstance(state, CncState):", "def root_transforms(self): '''get transforms stacked all the way to the root''' result =", "x=None, y=None, z=None): self.pos = self.pos.offset(x, y, z) @property def root_transforms(self): '''get transforms", "not {}'.format(type(state))) self._state = state @property def state(self): return self._state @state.setter def state(self,", "state(self): return self._state @state.setter def state(self, new_state): self._state = new_state for child in", "for walk_step in self.root_walk(): if walk_step.is_visit and walk_step.is_preorder: if isinstance(walk_step.visited, Assembly): # extend", "return self.state['position'] @pos.setter def pos(self, arg): self.state['position'] = arg def pos_offset(self, x=None, y=None,", "al += jog(x=self.point.x, y=self.point.y, z=self.pos.z) al += jog(x=self.point.x, y=self.point.y, z=self.point.z) # print(\"safejog\", self.state['position'])", "= self.state def check_type(self, other): assert isinstance(other, Assembly) def append(self, arg): super().append(arg) arg.state", "self.children: child.state = self.state def check_type(self, other): assert isinstance(other, Assembly) def append(self, arg):", "self.changes: jog = partial(action.Jog, state=self.state) al += jog(x=self.pos.x, y=self.pos.y, z=self.state['z_safe']) al += jog(x=self.point.x,", "jog = partial(action.Jog, state=self.state) al += jog(x=self.pos.x, y=self.pos.y, z=self.state['z_safe']) al += jog(x=self.point.x, y=self.point.y,", "state=state) def get_preorder_actions(self): al = action.ActionList() points = pt.PointList(((0, 0, self.state['z_margin']), )) point", "parent=parent, state=state) def get_preorder_actions(self): al = action.ActionList() points = pt.PointList(((0, 0, self.state['z_margin']), ))", "self._state @state.setter def state(self, new_state): self._state = new_state for child in self.children: child.state", "return self.get_actions().get_gcode() def get_points(self): return self.get_actions().get_points() def update_children_preorder(self): pass def get_preorder_actions(self): return ()", "arg.state = self.state def last(self): return self.children[-1] def get_gcode(self): return self.get_actions().get_gcode() def get_points(self):", "return pt.changes(self.pos, self.point) def get_preorder_actions(self): al = action.ActionList() # print(self.changes) if self.changes: jog", "action.ActionList() for step in self.depth_first_walk(): if step.is_visit: if step.is_preorder: step.visited.update_children_preorder() al.extend(step.visited.get_preorder_actions()) elif step.is_postorder:", "return result class SafeJog(Assembly): def __init__(self, x=0, y=0, z=0, name=None, parent=None, state=None): super().__init__(name=name,", "self.dest = pt.PointList(((x, y, z), )) @property def point(self): return pt.PointList(self.root_transforms(self.dest.arr))[0] @property def", "all the way to the root''' result = transform.TransformList() for walk_step in self.root_walk():", "def update_children_preorder(self): pass def get_preorder_actions(self): return () def get_postorder_actions(self): return () def update_children_postorder(self):", "self.state['position'] = arg def pos_offset(self, x=None, y=None, z=None): self.pos = self.pos.offset(x, y, z)", "print(\"safejog\", self.state['position']) return al class SafeZ(Assembly): def __init__(self, name=None, parent=None, state=None): super().__init__(name=name, parent=parent,", "al class SafeZ(Assembly): def __init__(self, name=None, parent=None, state=None): super().__init__(name=name, parent=parent, state=state) def get_preorder_actions(self):", "+= jog(x=self.point.x, y=self.point.y, z=self.point.z) # print(\"safejog\", self.state['position']) return al class SafeZ(Assembly): def __init__(self,", "import action class Assembly(tree.Tree, transform.TransformableMixin): '''tree of assembly items''' def __init__(self, name=None, parent=None,", "def update_children_postorder(self): pass def get_actions(self): with self.state.excursion(): al = action.ActionList() for step in", "pos(self): return self.state['position'] @pos.setter def pos(self, arg): self.state['position'] = arg def pos_offset(self, x=None,", "x=0, y=0, z=0, name=None, parent=None, state=None): super().__init__(name=name, parent=parent, state=state) self.dest = pt.PointList(((x, y,", "return self.get_actions().get_points() def update_children_preorder(self): pass def get_preorder_actions(self): return () def get_postorder_actions(self): return ()", "transform from .state import CncState from . import point as pt from .", "walk_step.visited.transforms return result class SafeJog(Assembly): def __init__(self, x=0, y=0, z=0, name=None, parent=None, state=None):", "def pos_offset(self, x=None, y=None, z=None): self.pos = self.pos.offset(x, y, z) @property def root_transforms(self):", "super().append(arg) arg.state = self.state def last(self): return self.children[-1] def get_gcode(self): return self.get_actions().get_gcode() def", "action.ActionList() points = pt.PointList(((0, 0, self.state['z_margin']), )) point = pt.PointList(self.root_transforms(points.arr))[0] jog = partial(action.Jog,", "collections.abc import MutableSequence from . import base_types from . import tree from .", ". import tree from . import transform from .state import CncState from .", "pt from . import action class Assembly(tree.Tree, transform.TransformableMixin): '''tree of assembly items''' def", "self.state.excursion(): al = action.ActionList() for step in self.depth_first_walk(): if step.is_visit: if step.is_preorder: step.visited.update_children_preorder()", "al @property def pos(self): return self.state['position'] @pos.setter def pos(self, arg): self.state['position'] = arg", "y, z) @property def root_transforms(self): '''get transforms stacked all the way to the", "from . import base_types from . import tree from . import transform from", "stacked all the way to the root''' result = transform.TransformList() for walk_step in", "self.state['z_margin']), )) point = pt.PointList(self.root_transforms(points.arr))[0] jog = partial(action.Jog, state=self.state) al += jog(x=self.pos.x, y=self.pos.y,", "child in self.children: child.state = self.state def check_type(self, other): assert isinstance(other, Assembly) def", "y=0, z=0, name=None, parent=None, state=None): super().__init__(name=name, parent=parent, state=state) self.dest = pt.PointList(((x, y, z),", ". import base_types from . import tree from . import transform from .state", "if walk_step.is_visit and walk_step.is_preorder: if isinstance(walk_step.visited, Assembly): # extend left result[0:0] = walk_step.visited.transforms", "state=state) self.dest = pt.PointList(((x, y, z), )) @property def point(self): return pt.PointList(self.root_transforms(self.dest.arr))[0] @property", "import partial from collections.abc import MutableSequence from . import base_types from . import", "child.state = self.state def check_type(self, other): assert isinstance(other, Assembly) def append(self, arg): super().append(arg)", "al += jog(x=self.point.x, y=self.point.y, z=self.point.z) # print(\"safejog\", self.state['position']) return al class SafeZ(Assembly): def", "@property def state(self): return self._state @state.setter def state(self, new_state): self._state = new_state for", "self._state = state @property def state(self): return self._state @state.setter def state(self, new_state): self._state", "self.depth_first_walk(): if step.is_visit: if step.is_preorder: step.visited.update_children_preorder() al.extend(step.visited.get_preorder_actions()) elif step.is_postorder: al.extend(step.visited.get_postorder_actions()) step.visited.update_children_postorder() return al", "get_points(self): return self.get_actions().get_points() def update_children_preorder(self): pass def get_preorder_actions(self): return () def get_postorder_actions(self): return" ]
[ "# from django.shortcuts import get_object_or_404 # from django.http import HttpResponseRedirect # from django.urls", "django.views import generic from django.contrib.auth.mixins import PermissionRequiredMixin def index(request): \"\"\"View function for home", "Book class AuthorListView(generic.ListView): \"\"\"Generic class-based list view for a list of authors.\"\"\" model", "from django.urls import reverse_lazy from .models import Book, Author, BookInstance from django.views import", "\"\"\"Generic class-based list view for a list of authors.\"\"\" model = Author paginate_by", "fields = '__all__' permission_required = 'catalog.can_mark_returned' class BookUpdate(PermissionRequiredMixin, UpdateView): model = Book fields", "can_mark_returned permission.\"\"\" model = BookInstance permission_required = 'catalog.can_mark_returned' template_name = 'catalog/bookinstance_list_borrowed_all.html' paginate_by =", "'num_authors': num_authors, } ) class BookListView(generic.ListView): \"\"\"Generic class-based view for a list of", "data in the context variable. return render(request,'index.html',context={ 'num_books': num_books, 'num_instances': num_instances, 'num_instances_available': num_instances_available,", "\"\"\"View function for home page of site.\"\"\" # Generate counts of some of", ") class BookListView(generic.ListView): \"\"\"Generic class-based view for a list of books.\"\"\" model =", "= ['first_name', 'last_name', 'date_of_birth', 'date_of_death'] permission_required = 'catalog.can_mark_returned' class AuthorDelete(PermissionRequiredMixin, DeleteView): model =", "= Book class AuthorListView(generic.ListView): \"\"\"Generic class-based list view for a list of authors.\"\"\"", "= Book paginate_by = 2 class BookDetailView(generic.DetailView): \"\"\"Generic class-based detail view for a", "class AuthorUpdate(PermissionRequiredMixin, UpdateView): model = Author fields = ['first_name', 'last_name', 'date_of_birth', 'date_of_death'] permission_required", "permission_required from django.views.generic.edit import CreateView, UpdateView, DeleteView from django.urls import reverse_lazy from .models", "AuthorCreate(PermissionRequiredMixin, CreateView): model = Author fields = '__all__' initial = {'date_of_death': '05/01/2018'} permission_required", "list view for a list of authors.\"\"\" model = Author paginate_by = 2", "for home page of site.\"\"\" # Generate counts of some of the main", "{'date_of_death': '05/01/2018'} permission_required = 'catalog.can_mark_returned' class AuthorUpdate(PermissionRequiredMixin, UpdateView): model = Author fields =", "import Book, Author, BookInstance from django.views import generic from django.contrib.auth.mixins import PermissionRequiredMixin def", "# Classes created for the forms challenge class BookCreate(PermissionRequiredMixin, CreateView): model = Book", "objects num_books = Book.objects.all().count() num_instances = BookInstance.objects.all().count() # # Available copies of books", "import render # # from django.shortcuts import get_object_or_404 # from django.http import HttpResponseRedirect", "import HttpResponseRedirect # from django.urls import reverse import datetime from django.contrib.auth.decorators import permission_required", "view for a book.\"\"\" model = Book class AuthorListView(generic.ListView): \"\"\"Generic class-based list view", "BookInstance permission_required = 'catalog.can_mark_returned' template_name = 'catalog/bookinstance_list_borrowed_all.html' paginate_by = 2 class AuthorCreate(PermissionRequiredMixin, CreateView):", "= '__all__' initial = {'date_of_death': '05/01/2018'} permission_required = 'catalog.can_mark_returned' class AuthorUpdate(PermissionRequiredMixin, UpdateView): model", "is implied by default. # Number of visits to this view, as counted", "from django.shortcuts import render # # from django.shortcuts import get_object_or_404 # from django.http", "the main objects num_books = Book.objects.all().count() num_instances = BookInstance.objects.all().count() # # Available copies", "'num_books': num_books, 'num_instances': num_instances, 'num_instances_available': num_instances_available, 'num_authors': num_authors, } ) class BookListView(generic.ListView): \"\"\"Generic", "for a list of books.\"\"\" model = Book paginate_by = 2 class BookDetailView(generic.DetailView):", "# # from django.shortcuts import get_object_or_404 # from django.http import HttpResponseRedirect # from", "Author fields = '__all__' initial = {'date_of_death': '05/01/2018'} permission_required = 'catalog.can_mark_returned' class AuthorUpdate(PermissionRequiredMixin,", "main objects num_books = Book.objects.all().count() num_instances = BookInstance.objects.all().count() # # Available copies of", "2 class AuthorCreate(PermissionRequiredMixin, CreateView): model = Author fields = '__all__' initial = {'date_of_death':", "Number of visits to this view, as counted in the session variable. #", "copies of books num_instances_available = \\ BookInstance.objects.filter(status__exact='a').count() num_authors = Author.objects.count() # The 'all()'", "'__all__' initial = {'date_of_death': '05/01/2018'} permission_required = 'catalog.can_mark_returned' class AuthorUpdate(PermissionRequiredMixin, UpdateView): model =", "AuthorDelete(PermissionRequiredMixin, DeleteView): model = Author success_url = reverse_lazy('authors') permission_required = 'catalog.can_mark_returned' # Classes", "BookUpdate(PermissionRequiredMixin, UpdateView): model = Book fields = '__all__' permission_required = 'catalog.can_mark_returned' class BookDelete(PermissionRequiredMixin,", "= reverse_lazy('authors') permission_required = 'catalog.can_mark_returned' # Classes created for the forms challenge class", "'catalog.can_mark_returned' class BookDelete(PermissionRequiredMixin, DeleteView): model = Book success_url = reverse_lazy('books') permission_required = 'catalog.can_mark_returned'", "= Author fields = ['first_name', 'last_name', 'date_of_birth', 'date_of_death'] permission_required = 'catalog.can_mark_returned' class AuthorDelete(PermissionRequiredMixin,", "PermissionRequiredMixin def index(request): \"\"\"View function for home page of site.\"\"\" # Generate counts", "from django.contrib.auth.mixins import PermissionRequiredMixin def index(request): \"\"\"View function for home page of site.\"\"\"", "of the main objects num_books = Book.objects.all().count() num_instances = BookInstance.objects.all().count() # # Available", "num_books = Book.objects.all().count() num_instances = BookInstance.objects.all().count() # # Available copies of books num_instances_available", "Available copies of books num_instances_available = \\ BookInstance.objects.filter(status__exact='a').count() num_authors = Author.objects.count() # The", "view for a list of authors.\"\"\" model = Author paginate_by = 2 class", "list of books.\"\"\" model = Book paginate_by = 2 class BookDetailView(generic.DetailView): \"\"\"Generic class-based", "'all()' is implied by default. # Number of visits to this view, as", "listing all books # on loan. Only visible to users with can_mark_returned permission.\"\"\"", "UpdateView): model = Book fields = '__all__' permission_required = 'catalog.can_mark_returned' class BookDelete(PermissionRequiredMixin, DeleteView):", "@permission_required('catalog.can_mark_returned') class LoanedBooksAllListView(PermissionRequiredMixin, generic.ListView): # Generic class-based view listing all books # on", "permission_required = 'catalog.can_mark_returned' class AuthorUpdate(PermissionRequiredMixin, UpdateView): model = Author fields = ['first_name', 'last_name',", "class-based list view for a list of authors.\"\"\" model = Author paginate_by =", "for an author.\"\"\" model = Author @permission_required('catalog.can_mark_returned') class LoanedBooksAllListView(PermissionRequiredMixin, generic.ListView): # Generic class-based", "# Number of visits to this view, as counted in the session variable.", "of site.\"\"\" # Generate counts of some of the main objects num_books =", "Render the HTML template index.html # with the data in the context variable.", "implied by default. # Number of visits to this view, as counted in", "2 class BookDetailView(generic.DetailView): \"\"\"Generic class-based detail view for a book.\"\"\" model = Book", "class BookUpdate(PermissionRequiredMixin, UpdateView): model = Book fields = '__all__' permission_required = 'catalog.can_mark_returned' class", "'num_instances_available': num_instances_available, 'num_authors': num_authors, } ) class BookListView(generic.ListView): \"\"\"Generic class-based view for a", "'date_of_birth', 'date_of_death'] permission_required = 'catalog.can_mark_returned' class AuthorDelete(PermissionRequiredMixin, DeleteView): model = Author success_url =", "= num_visits + 1 # Render the HTML template index.html # with the", "= Author @permission_required('catalog.can_mark_returned') class LoanedBooksAllListView(PermissionRequiredMixin, generic.ListView): # Generic class-based view listing all books", "forms challenge class BookCreate(PermissionRequiredMixin, CreateView): model = Book fields = '__all__' permission_required =", "+ 1 # Render the HTML template index.html # with the data in", "model = Author paginate_by = 2 class AuthorDetailView(generic.DetailView): \"\"\"Generic class-based detail view for", "Book fields = '__all__' permission_required = 'catalog.can_mark_returned' class BookDelete(PermissionRequiredMixin, DeleteView): model = Book", "a book.\"\"\" model = Book class AuthorListView(generic.ListView): \"\"\"Generic class-based list view for a", "detail view for an author.\"\"\" model = Author @permission_required('catalog.can_mark_returned') class LoanedBooksAllListView(PermissionRequiredMixin, generic.ListView): #", "'catalog/bookinstance_list_borrowed_all.html' paginate_by = 2 class AuthorCreate(PermissionRequiredMixin, CreateView): model = Author fields = '__all__'", "= 'catalog.can_mark_returned' class AuthorDelete(PermissionRequiredMixin, DeleteView): model = Author success_url = reverse_lazy('authors') permission_required =", "num_books, 'num_instances': num_instances, 'num_instances_available': num_instances_available, 'num_authors': num_authors, } ) class BookListView(generic.ListView): \"\"\"Generic class-based", "paginate_by = 2 class BookDetailView(generic.DetailView): \"\"\"Generic class-based detail view for a book.\"\"\" model", "= Author success_url = reverse_lazy('authors') permission_required = 'catalog.can_mark_returned' # Classes created for the", "from django.contrib.auth.decorators import permission_required from django.views.generic.edit import CreateView, UpdateView, DeleteView from django.urls import", "Author @permission_required('catalog.can_mark_returned') class LoanedBooksAllListView(PermissionRequiredMixin, generic.ListView): # Generic class-based view listing all books #", "= Author.objects.count() # The 'all()' is implied by default. # Number of visits", "for a book.\"\"\" model = Book class AuthorListView(generic.ListView): \"\"\"Generic class-based list view for", "= '__all__' permission_required = 'catalog.can_mark_returned' class BookDelete(PermissionRequiredMixin, DeleteView): model = Book success_url =", "books.\"\"\" model = Book paginate_by = 2 class BookDetailView(generic.DetailView): \"\"\"Generic class-based detail view", "BookInstance.objects.filter(status__exact='a').count() num_authors = Author.objects.count() # The 'all()' is implied by default. # Number", "from django.urls import reverse import datetime from django.contrib.auth.decorators import permission_required from django.views.generic.edit import", "index(request): \"\"\"View function for home page of site.\"\"\" # Generate counts of some", "AuthorDetailView(generic.DetailView): \"\"\"Generic class-based detail view for an author.\"\"\" model = Author @permission_required('catalog.can_mark_returned') class", "AuthorListView(generic.ListView): \"\"\"Generic class-based list view for a list of authors.\"\"\" model = Author", "function for home page of site.\"\"\" # Generate counts of some of the", "from django.shortcuts import get_object_or_404 # from django.http import HttpResponseRedirect # from django.urls import", "initial = {'date_of_death': '05/01/2018'} permission_required = 'catalog.can_mark_returned' class AuthorUpdate(PermissionRequiredMixin, UpdateView): model = Author", "model = Author fields = ['first_name', 'last_name', 'date_of_birth', 'date_of_death'] permission_required = 'catalog.can_mark_returned' class", "permission.\"\"\" model = BookInstance permission_required = 'catalog.can_mark_returned' template_name = 'catalog/bookinstance_list_borrowed_all.html' paginate_by = 2", "book.\"\"\" model = Book class AuthorListView(generic.ListView): \"\"\"Generic class-based list view for a list", "django.http import HttpResponseRedirect # from django.urls import reverse import datetime from django.contrib.auth.decorators import", "render(request,'index.html',context={ 'num_books': num_books, 'num_instances': num_instances, 'num_instances_available': num_instances_available, 'num_authors': num_authors, } ) class BookListView(generic.ListView):", "list of authors.\"\"\" model = Author paginate_by = 2 class AuthorDetailView(generic.DetailView): \"\"\"Generic class-based", "'__all__' permission_required = 'catalog.can_mark_returned' class BookDelete(PermissionRequiredMixin, DeleteView): model = Book success_url = reverse_lazy('books')", "CreateView): model = Book fields = '__all__' permission_required = 'catalog.can_mark_returned' class BookUpdate(PermissionRequiredMixin, UpdateView):", "of visits to this view, as counted in the session variable. # num_visits", "with the data in the context variable. return render(request,'index.html',context={ 'num_books': num_books, 'num_instances': num_instances,", "num_instances, 'num_instances_available': num_instances_available, 'num_authors': num_authors, } ) class BookListView(generic.ListView): \"\"\"Generic class-based view for", "permission_required = 'catalog.can_mark_returned' class AuthorDelete(PermissionRequiredMixin, DeleteView): model = Author success_url = reverse_lazy('authors') permission_required", "of authors.\"\"\" model = Author paginate_by = 2 class AuthorDetailView(generic.DetailView): \"\"\"Generic class-based detail", "import reverse import datetime from django.contrib.auth.decorators import permission_required from django.views.generic.edit import CreateView, UpdateView,", "num_visits + 1 # Render the HTML template index.html # with the data", "'catalog.can_mark_returned' class AuthorUpdate(PermissionRequiredMixin, UpdateView): model = Author fields = ['first_name', 'last_name', 'date_of_birth', 'date_of_death']", "import datetime from django.contrib.auth.decorators import permission_required from django.views.generic.edit import CreateView, UpdateView, DeleteView from", "= '__all__' permission_required = 'catalog.can_mark_returned' class BookUpdate(PermissionRequiredMixin, UpdateView): model = Book fields =", "authors.\"\"\" model = Author paginate_by = 2 class AuthorDetailView(generic.DetailView): \"\"\"Generic class-based detail view", "fields = '__all__' permission_required = 'catalog.can_mark_returned' class BookDelete(PermissionRequiredMixin, DeleteView): model = Book success_url", "num_instances_available, 'num_authors': num_authors, } ) class BookListView(generic.ListView): \"\"\"Generic class-based view for a list", "the data in the context variable. return render(request,'index.html',context={ 'num_books': num_books, 'num_instances': num_instances, 'num_instances_available':", "in the session variable. # num_visits = request.session.get('num_visits', 0) # request.session['num_visits'] = num_visits", "\"\"\"Generic class-based detail view for an author.\"\"\" model = Author @permission_required('catalog.can_mark_returned') class LoanedBooksAllListView(PermissionRequiredMixin,", "of books.\"\"\" model = Book paginate_by = 2 class BookDetailView(generic.DetailView): \"\"\"Generic class-based detail", "site.\"\"\" # Generate counts of some of the main objects num_books = Book.objects.all().count()", "def index(request): \"\"\"View function for home page of site.\"\"\" # Generate counts of", "class AuthorDelete(PermissionRequiredMixin, DeleteView): model = Author success_url = reverse_lazy('authors') permission_required = 'catalog.can_mark_returned' #", "import reverse_lazy from .models import Book, Author, BookInstance from django.views import generic from", "1 # Render the HTML template index.html # with the data in the", "class BookDetailView(generic.DetailView): \"\"\"Generic class-based detail view for a book.\"\"\" model = Book class", "= 'catalog.can_mark_returned' # Classes created for the forms challenge class BookCreate(PermissionRequiredMixin, CreateView): model", "for a list of authors.\"\"\" model = Author paginate_by = 2 class AuthorDetailView(generic.DetailView):", "import permission_required from django.views.generic.edit import CreateView, UpdateView, DeleteView from django.urls import reverse_lazy from", "the context variable. return render(request,'index.html',context={ 'num_books': num_books, 'num_instances': num_instances, 'num_instances_available': num_instances_available, 'num_authors': num_authors,", "BookInstance.objects.all().count() # # Available copies of books num_instances_available = \\ BookInstance.objects.filter(status__exact='a').count() num_authors =", "view for an author.\"\"\" model = Author @permission_required('catalog.can_mark_returned') class LoanedBooksAllListView(PermissionRequiredMixin, generic.ListView): # Generic", "datetime from django.contrib.auth.decorators import permission_required from django.views.generic.edit import CreateView, UpdateView, DeleteView from django.urls", "by default. # Number of visits to this view, as counted in the", "Author, BookInstance from django.views import generic from django.contrib.auth.mixins import PermissionRequiredMixin def index(request): \"\"\"View", "reverse import datetime from django.contrib.auth.decorators import permission_required from django.views.generic.edit import CreateView, UpdateView, DeleteView", "the HTML template index.html # with the data in the context variable. return", "Author fields = ['first_name', 'last_name', 'date_of_birth', 'date_of_death'] permission_required = 'catalog.can_mark_returned' class AuthorDelete(PermissionRequiredMixin, DeleteView):", "CreateView): model = Author fields = '__all__' initial = {'date_of_death': '05/01/2018'} permission_required =", "this view, as counted in the session variable. # num_visits = request.session.get('num_visits', 0)", "django.shortcuts import render # # from django.shortcuts import get_object_or_404 # from django.http import", "import get_object_or_404 # from django.http import HttpResponseRedirect # from django.urls import reverse import", "UpdateView, DeleteView from django.urls import reverse_lazy from .models import Book, Author, BookInstance from", "# from django.http import HttpResponseRedirect # from django.urls import reverse import datetime from", "some of the main objects num_books = Book.objects.all().count() num_instances = BookInstance.objects.all().count() # #", "'num_instances': num_instances, 'num_instances_available': num_instances_available, 'num_authors': num_authors, } ) class BookListView(generic.ListView): \"\"\"Generic class-based view", "visible to users with can_mark_returned permission.\"\"\" model = BookInstance permission_required = 'catalog.can_mark_returned' template_name", "django.contrib.auth.decorators import permission_required from django.views.generic.edit import CreateView, UpdateView, DeleteView from django.urls import reverse_lazy", "= 2 class AuthorCreate(PermissionRequiredMixin, CreateView): model = Author fields = '__all__' initial =", "} ) class BookListView(generic.ListView): \"\"\"Generic class-based view for a list of books.\"\"\" model", "django.shortcuts import get_object_or_404 # from django.http import HttpResponseRedirect # from django.urls import reverse", "view listing all books # on loan. Only visible to users with can_mark_returned", "# Generate counts of some of the main objects num_books = Book.objects.all().count() num_instances", "BookCreate(PermissionRequiredMixin, CreateView): model = Book fields = '__all__' permission_required = 'catalog.can_mark_returned' class BookUpdate(PermissionRequiredMixin,", "books num_instances_available = \\ BookInstance.objects.filter(status__exact='a').count() num_authors = Author.objects.count() # The 'all()' is implied", "AuthorUpdate(PermissionRequiredMixin, UpdateView): model = Author fields = ['first_name', 'last_name', 'date_of_birth', 'date_of_death'] permission_required =", "DeleteView from django.urls import reverse_lazy from .models import Book, Author, BookInstance from django.views", "= request.session.get('num_visits', 0) # request.session['num_visits'] = num_visits + 1 # Render the HTML", "BookDetailView(generic.DetailView): \"\"\"Generic class-based detail view for a book.\"\"\" model = Book class AuthorListView(generic.ListView):", "# num_visits = request.session.get('num_visits', 0) # request.session['num_visits'] = num_visits + 1 # Render", "django.views.generic.edit import CreateView, UpdateView, DeleteView from django.urls import reverse_lazy from .models import Book,", "Author success_url = reverse_lazy('authors') permission_required = 'catalog.can_mark_returned' # Classes created for the forms", "= 2 class AuthorDetailView(generic.DetailView): \"\"\"Generic class-based detail view for an author.\"\"\" model =", "Book fields = '__all__' permission_required = 'catalog.can_mark_returned' class BookUpdate(PermissionRequiredMixin, UpdateView): model = Book", "class-based detail view for a book.\"\"\" model = Book class AuthorListView(generic.ListView): \"\"\"Generic class-based", "model = Author fields = '__all__' initial = {'date_of_death': '05/01/2018'} permission_required = 'catalog.can_mark_returned'", "of some of the main objects num_books = Book.objects.all().count() num_instances = BookInstance.objects.all().count() #", "num_instances_available = \\ BookInstance.objects.filter(status__exact='a').count() num_authors = Author.objects.count() # The 'all()' is implied by", "from django.views import generic from django.contrib.auth.mixins import PermissionRequiredMixin def index(request): \"\"\"View function for", "challenge class BookCreate(PermissionRequiredMixin, CreateView): model = Book fields = '__all__' permission_required = 'catalog.can_mark_returned'", ".models import Book, Author, BookInstance from django.views import generic from django.contrib.auth.mixins import PermissionRequiredMixin", "generic from django.contrib.auth.mixins import PermissionRequiredMixin def index(request): \"\"\"View function for home page of", "= BookInstance permission_required = 'catalog.can_mark_returned' template_name = 'catalog/bookinstance_list_borrowed_all.html' paginate_by = 2 class AuthorCreate(PermissionRequiredMixin,", "class BookCreate(PermissionRequiredMixin, CreateView): model = Book fields = '__all__' permission_required = 'catalog.can_mark_returned' class", "template_name = 'catalog/bookinstance_list_borrowed_all.html' paginate_by = 2 class AuthorCreate(PermissionRequiredMixin, CreateView): model = Author fields", "Book.objects.all().count() num_instances = BookInstance.objects.all().count() # # Available copies of books num_instances_available = \\", "in the context variable. return render(request,'index.html',context={ 'num_books': num_books, 'num_instances': num_instances, 'num_instances_available': num_instances_available, 'num_authors':", "Book, Author, BookInstance from django.views import generic from django.contrib.auth.mixins import PermissionRequiredMixin def index(request):", "render # # from django.shortcuts import get_object_or_404 # from django.http import HttpResponseRedirect #", "= 'catalog.can_mark_returned' template_name = 'catalog/bookinstance_list_borrowed_all.html' paginate_by = 2 class AuthorCreate(PermissionRequiredMixin, CreateView): model =", "import PermissionRequiredMixin def index(request): \"\"\"View function for home page of site.\"\"\" # Generate", "the session variable. # num_visits = request.session.get('num_visits', 0) # request.session['num_visits'] = num_visits +", "UpdateView): model = Author fields = ['first_name', 'last_name', 'date_of_birth', 'date_of_death'] permission_required = 'catalog.can_mark_returned'", "django.urls import reverse_lazy from .models import Book, Author, BookInstance from django.views import generic", "'last_name', 'date_of_birth', 'date_of_death'] permission_required = 'catalog.can_mark_returned' class AuthorDelete(PermissionRequiredMixin, DeleteView): model = Author success_url", "= 2 class BookDetailView(generic.DetailView): \"\"\"Generic class-based detail view for a book.\"\"\" model =", "on loan. Only visible to users with can_mark_returned permission.\"\"\" model = BookInstance permission_required", "model = Author @permission_required('catalog.can_mark_returned') class LoanedBooksAllListView(PermissionRequiredMixin, generic.ListView): # Generic class-based view listing all", "= Book fields = '__all__' permission_required = 'catalog.can_mark_returned' class BookUpdate(PermissionRequiredMixin, UpdateView): model =", "paginate_by = 2 class AuthorDetailView(generic.DetailView): \"\"\"Generic class-based detail view for an author.\"\"\" model", "with can_mark_returned permission.\"\"\" model = BookInstance permission_required = 'catalog.can_mark_returned' template_name = 'catalog/bookinstance_list_borrowed_all.html' paginate_by", "generic.ListView): # Generic class-based view listing all books # on loan. Only visible", "django.urls import reverse import datetime from django.contrib.auth.decorators import permission_required from django.views.generic.edit import CreateView,", "'catalog.can_mark_returned' class AuthorDelete(PermissionRequiredMixin, DeleteView): model = Author success_url = reverse_lazy('authors') permission_required = 'catalog.can_mark_returned'", "books # on loan. Only visible to users with can_mark_returned permission.\"\"\" model =", "paginate_by = 2 class AuthorCreate(PermissionRequiredMixin, CreateView): model = Author fields = '__all__' initial", "page of site.\"\"\" # Generate counts of some of the main objects num_books", "permission_required = 'catalog.can_mark_returned' class BookDelete(PermissionRequiredMixin, DeleteView): model = Book success_url = reverse_lazy('books') permission_required", "permission_required = 'catalog.can_mark_returned' # Classes created for the forms challenge class BookCreate(PermissionRequiredMixin, CreateView):", "index.html # with the data in the context variable. return render(request,'index.html',context={ 'num_books': num_books,", "class LoanedBooksAllListView(PermissionRequiredMixin, generic.ListView): # Generic class-based view listing all books # on loan.", "fields = '__all__' initial = {'date_of_death': '05/01/2018'} permission_required = 'catalog.can_mark_returned' class AuthorUpdate(PermissionRequiredMixin, UpdateView):", "template index.html # with the data in the context variable. return render(request,'index.html',context={ 'num_books':", "Book paginate_by = 2 class BookDetailView(generic.DetailView): \"\"\"Generic class-based detail view for a book.\"\"\"", "django.contrib.auth.mixins import PermissionRequiredMixin def index(request): \"\"\"View function for home page of site.\"\"\" #", "# Render the HTML template index.html # with the data in the context", "= 'catalog.can_mark_returned' class BookUpdate(PermissionRequiredMixin, UpdateView): model = Book fields = '__all__' permission_required =", "\\ BookInstance.objects.filter(status__exact='a').count() num_authors = Author.objects.count() # The 'all()' is implied by default. #", "author.\"\"\" model = Author @permission_required('catalog.can_mark_returned') class LoanedBooksAllListView(PermissionRequiredMixin, generic.ListView): # Generic class-based view listing", "\"\"\"Generic class-based detail view for a book.\"\"\" model = Book class AuthorListView(generic.ListView): \"\"\"Generic", "'05/01/2018'} permission_required = 'catalog.can_mark_returned' class AuthorUpdate(PermissionRequiredMixin, UpdateView): model = Author fields = ['first_name',", "context variable. return render(request,'index.html',context={ 'num_books': num_books, 'num_instances': num_instances, 'num_instances_available': num_instances_available, 'num_authors': num_authors, }", "= BookInstance.objects.all().count() # # Available copies of books num_instances_available = \\ BookInstance.objects.filter(status__exact='a').count() num_authors", "counted in the session variable. # num_visits = request.session.get('num_visits', 0) # request.session['num_visits'] =", "visits to this view, as counted in the session variable. # num_visits =", "import CreateView, UpdateView, DeleteView from django.urls import reverse_lazy from .models import Book, Author,", "= Author fields = '__all__' initial = {'date_of_death': '05/01/2018'} permission_required = 'catalog.can_mark_returned' class", "= Book fields = '__all__' permission_required = 'catalog.can_mark_returned' class BookDelete(PermissionRequiredMixin, DeleteView): model =", "from django.views.generic.edit import CreateView, UpdateView, DeleteView from django.urls import reverse_lazy from .models import", "a list of authors.\"\"\" model = Author paginate_by = 2 class AuthorDetailView(generic.DetailView): \"\"\"Generic", "num_authors, } ) class BookListView(generic.ListView): \"\"\"Generic class-based view for a list of books.\"\"\"", "class AuthorDetailView(generic.DetailView): \"\"\"Generic class-based detail view for an author.\"\"\" model = Author @permission_required('catalog.can_mark_returned')", "BookListView(generic.ListView): \"\"\"Generic class-based view for a list of books.\"\"\" model = Book paginate_by", "as counted in the session variable. # num_visits = request.session.get('num_visits', 0) # request.session['num_visits']", "DeleteView): model = Author success_url = reverse_lazy('authors') permission_required = 'catalog.can_mark_returned' # Classes created", "all books # on loan. Only visible to users with can_mark_returned permission.\"\"\" model", "num_instances = BookInstance.objects.all().count() # # Available copies of books num_instances_available = \\ BookInstance.objects.filter(status__exact='a').count()", "import generic from django.contrib.auth.mixins import PermissionRequiredMixin def index(request): \"\"\"View function for home page", "reverse_lazy('authors') permission_required = 'catalog.can_mark_returned' # Classes created for the forms challenge class BookCreate(PermissionRequiredMixin,", "# Available copies of books num_instances_available = \\ BookInstance.objects.filter(status__exact='a').count() num_authors = Author.objects.count() #", "# from django.urls import reverse import datetime from django.contrib.auth.decorators import permission_required from django.views.generic.edit", "class-based view for a list of books.\"\"\" model = Book paginate_by = 2", "class AuthorCreate(PermissionRequiredMixin, CreateView): model = Author fields = '__all__' initial = {'date_of_death': '05/01/2018'}", "success_url = reverse_lazy('authors') permission_required = 'catalog.can_mark_returned' # Classes created for the forms challenge", "class AuthorListView(generic.ListView): \"\"\"Generic class-based list view for a list of authors.\"\"\" model =", "view for a list of books.\"\"\" model = Book paginate_by = 2 class", "loan. Only visible to users with can_mark_returned permission.\"\"\" model = BookInstance permission_required =", "num_visits = request.session.get('num_visits', 0) # request.session['num_visits'] = num_visits + 1 # Render the", "variable. # num_visits = request.session.get('num_visits', 0) # request.session['num_visits'] = num_visits + 1 #", "model = Book class AuthorListView(generic.ListView): \"\"\"Generic class-based list view for a list of", "HTML template index.html # with the data in the context variable. return render(request,'index.html',context={", "\"\"\"Generic class-based view for a list of books.\"\"\" model = Book paginate_by =", "model = BookInstance permission_required = 'catalog.can_mark_returned' template_name = 'catalog/bookinstance_list_borrowed_all.html' paginate_by = 2 class", "created for the forms challenge class BookCreate(PermissionRequiredMixin, CreateView): model = Book fields =", "detail view for a book.\"\"\" model = Book class AuthorListView(generic.ListView): \"\"\"Generic class-based list", "# The 'all()' is implied by default. # Number of visits to this", "counts of some of the main objects num_books = Book.objects.all().count() num_instances = BookInstance.objects.all().count()", "Classes created for the forms challenge class BookCreate(PermissionRequiredMixin, CreateView): model = Book fields", "model = Book fields = '__all__' permission_required = 'catalog.can_mark_returned' class BookUpdate(PermissionRequiredMixin, UpdateView): model", "# request.session['num_visits'] = num_visits + 1 # Render the HTML template index.html #", "Only visible to users with can_mark_returned permission.\"\"\" model = BookInstance permission_required = 'catalog.can_mark_returned'", "from .models import Book, Author, BookInstance from django.views import generic from django.contrib.auth.mixins import", "Author paginate_by = 2 class AuthorDetailView(generic.DetailView): \"\"\"Generic class-based detail view for an author.\"\"\"", "model = Book paginate_by = 2 class BookDetailView(generic.DetailView): \"\"\"Generic class-based detail view for", "variable. return render(request,'index.html',context={ 'num_books': num_books, 'num_instances': num_instances, 'num_instances_available': num_instances_available, 'num_authors': num_authors, } )", "of books num_instances_available = \\ BookInstance.objects.filter(status__exact='a').count() num_authors = Author.objects.count() # The 'all()' is", "an author.\"\"\" model = Author @permission_required('catalog.can_mark_returned') class LoanedBooksAllListView(PermissionRequiredMixin, generic.ListView): # Generic class-based view", "return render(request,'index.html',context={ 'num_books': num_books, 'num_instances': num_instances, 'num_instances_available': num_instances_available, 'num_authors': num_authors, } ) class", "= Book.objects.all().count() num_instances = BookInstance.objects.all().count() # # Available copies of books num_instances_available =", "0) # request.session['num_visits'] = num_visits + 1 # Render the HTML template index.html", "= 'catalog.can_mark_returned' class BookDelete(PermissionRequiredMixin, DeleteView): model = Book success_url = reverse_lazy('books') permission_required =", "class BookListView(generic.ListView): \"\"\"Generic class-based view for a list of books.\"\"\" model = Book", "for the forms challenge class BookCreate(PermissionRequiredMixin, CreateView): model = Book fields = '__all__'", "session variable. # num_visits = request.session.get('num_visits', 0) # request.session['num_visits'] = num_visits + 1", "'__all__' permission_required = 'catalog.can_mark_returned' class BookUpdate(PermissionRequiredMixin, UpdateView): model = Book fields = '__all__'", "request.session['num_visits'] = num_visits + 1 # Render the HTML template index.html # with", "model = Author success_url = reverse_lazy('authors') permission_required = 'catalog.can_mark_returned' # Classes created for", "a list of books.\"\"\" model = Book paginate_by = 2 class BookDetailView(generic.DetailView): \"\"\"Generic", "The 'all()' is implied by default. # Number of visits to this view,", "# with the data in the context variable. return render(request,'index.html',context={ 'num_books': num_books, 'num_instances':", "# # Available copies of books num_instances_available = \\ BookInstance.objects.filter(status__exact='a').count() num_authors = Author.objects.count()", "# on loan. Only visible to users with can_mark_returned permission.\"\"\" model = BookInstance", "users with can_mark_returned permission.\"\"\" model = BookInstance permission_required = 'catalog.can_mark_returned' template_name = 'catalog/bookinstance_list_borrowed_all.html'", "permission_required = 'catalog.can_mark_returned' class BookUpdate(PermissionRequiredMixin, UpdateView): model = Book fields = '__all__' permission_required", "class-based view listing all books # on loan. Only visible to users with", "LoanedBooksAllListView(PermissionRequiredMixin, generic.ListView): # Generic class-based view listing all books # on loan. Only", "BookInstance from django.views import generic from django.contrib.auth.mixins import PermissionRequiredMixin def index(request): \"\"\"View function", "= {'date_of_death': '05/01/2018'} permission_required = 'catalog.can_mark_returned' class AuthorUpdate(PermissionRequiredMixin, UpdateView): model = Author fields", "2 class AuthorDetailView(generic.DetailView): \"\"\"Generic class-based detail view for an author.\"\"\" model = Author", "'catalog.can_mark_returned' class BookUpdate(PermissionRequiredMixin, UpdateView): model = Book fields = '__all__' permission_required = 'catalog.can_mark_returned'", "view, as counted in the session variable. # num_visits = request.session.get('num_visits', 0) #", "to users with can_mark_returned permission.\"\"\" model = BookInstance permission_required = 'catalog.can_mark_returned' template_name =", "'date_of_death'] permission_required = 'catalog.can_mark_returned' class AuthorDelete(PermissionRequiredMixin, DeleteView): model = Author success_url = reverse_lazy('authors')", "the forms challenge class BookCreate(PermissionRequiredMixin, CreateView): model = Book fields = '__all__' permission_required", "class-based detail view for an author.\"\"\" model = Author @permission_required('catalog.can_mark_returned') class LoanedBooksAllListView(PermissionRequiredMixin, generic.ListView):", "to this view, as counted in the session variable. # num_visits = request.session.get('num_visits',", "HttpResponseRedirect # from django.urls import reverse import datetime from django.contrib.auth.decorators import permission_required from", "Author.objects.count() # The 'all()' is implied by default. # Number of visits to", "# Generic class-based view listing all books # on loan. Only visible to", "Generic class-based view listing all books # on loan. Only visible to users", "Generate counts of some of the main objects num_books = Book.objects.all().count() num_instances =", "model = Book fields = '__all__' permission_required = 'catalog.can_mark_returned' class BookDelete(PermissionRequiredMixin, DeleteView): model", "= 'catalog/bookinstance_list_borrowed_all.html' paginate_by = 2 class AuthorCreate(PermissionRequiredMixin, CreateView): model = Author fields =", "= 'catalog.can_mark_returned' class AuthorUpdate(PermissionRequiredMixin, UpdateView): model = Author fields = ['first_name', 'last_name', 'date_of_birth',", "= \\ BookInstance.objects.filter(status__exact='a').count() num_authors = Author.objects.count() # The 'all()' is implied by default.", "home page of site.\"\"\" # Generate counts of some of the main objects", "'catalog.can_mark_returned' template_name = 'catalog/bookinstance_list_borrowed_all.html' paginate_by = 2 class AuthorCreate(PermissionRequiredMixin, CreateView): model = Author", "request.session.get('num_visits', 0) # request.session['num_visits'] = num_visits + 1 # Render the HTML template", "get_object_or_404 # from django.http import HttpResponseRedirect # from django.urls import reverse import datetime", "CreateView, UpdateView, DeleteView from django.urls import reverse_lazy from .models import Book, Author, BookInstance", "from django.http import HttpResponseRedirect # from django.urls import reverse import datetime from django.contrib.auth.decorators", "permission_required = 'catalog.can_mark_returned' template_name = 'catalog/bookinstance_list_borrowed_all.html' paginate_by = 2 class AuthorCreate(PermissionRequiredMixin, CreateView): model", "'catalog.can_mark_returned' # Classes created for the forms challenge class BookCreate(PermissionRequiredMixin, CreateView): model =", "fields = ['first_name', 'last_name', 'date_of_birth', 'date_of_death'] permission_required = 'catalog.can_mark_returned' class AuthorDelete(PermissionRequiredMixin, DeleteView): model", "= Author paginate_by = 2 class AuthorDetailView(generic.DetailView): \"\"\"Generic class-based detail view for an", "reverse_lazy from .models import Book, Author, BookInstance from django.views import generic from django.contrib.auth.mixins", "num_authors = Author.objects.count() # The 'all()' is implied by default. # Number of", "['first_name', 'last_name', 'date_of_birth', 'date_of_death'] permission_required = 'catalog.can_mark_returned' class AuthorDelete(PermissionRequiredMixin, DeleteView): model = Author", "default. # Number of visits to this view, as counted in the session" ]
[ "\"MessageBody\": str(json.dumps(message_dict)), } messages.append(message) count += 1 # Send 10 messages per time", "the report, therefore limit the number of messages to be sent #### example", "scene_path in gzip.decompress(missing_scene_file_gzip).decode(\"utf-8\").split(\"\\n\") if scene_path ] logging.info(f\"Number of scenes found {len(missing_scene_paths)}\") logging.info(f\"Example scenes:", "as value. * The option scenes_limit limit the number of scenes to be", "sent #### example conf in json format { \"scenes_limit\":10 } \"\"\" import gzip", "Optional[int] = None) -> None: \"\"\" Function to retrieve the latest gap report", "try: logging.info(\"Looking for latest report\") latest_report = find_latest_report(landsat=landsat) logging.info(f\"Latest report found {latest_report}\") if", "name :return:(str) return the latest report file name \"\"\" continuation_token = None list_reports", "from airflow.operators.python_operator import PythonOperator from odc.aws.queue import publish_messages from infra.connections import CONN_LANDSAT_SYNC from", "read from the report, therefore limit the number of messages to be sent", "import task_fail_slack_alert, task_success_slack_alert from utils.aws_utils import S3 REPORTING_PREFIX = \"status-report/\" # This process", "update_stac=update_stac ) logging.info(\"Publishing messages\") post_messages(message_list=messages_to_send) except Exception as error: logging.error(error) # print traceback", "import PythonOperator from odc.aws.queue import publish_messages from infra.connections import CONN_LANDSAT_SYNC from infra.s3_buckets import", "import datetime from typing import Optional from airflow import DAG from airflow.contrib.hooks.aws_sqs_hook import", "\"\"\" count = 0 messages = [] sqs_conn = SQSHook(aws_conn_id=CONN_LANDSAT_SYNC) sqs_hook = sqs_conn.get_resource_type(", "process is manually run SCHEDULE_INTERVAL = None default_args = { \"owner\": \"RODRIGO\", \"start_date\":", "REPORTING_PREFIX = \"status-report/\" # This process is manually run SCHEDULE_INTERVAL = None default_args", "manually run SCHEDULE_INTERVAL = None default_args = { \"owner\": \"RODRIGO\", \"start_date\": datetime(2021, 6,", "report, therefore limit the number of messages to be sent #### example conf", "the last messages if there are any if len(messages) > 0: publish_messages(queue, messages)", ":param scenes_limit:(str) limit of how many scenes will be filled :return:(None) \"\"\" try:", "= find_latest_report(landsat=landsat) logging.info(f\"Latest report found {latest_report}\") if not latest_report: logging.error(\"Report not found\") raise", "str(path.strip(\"/\").split(\"/\")[-1]) if not landsat_product_id: raise Exception(f'It was not possible to build product ID", "message_list: message = { \"Id\": str(count), \"MessageBody\": str(json.dumps(message_dict)), } messages.append(message) count += 1", "therefore limit the number of messages to be sent #### example conf in", "latest_report: logging.info('FORCED UPDATE FLAGGED!') update_stac = True messages_to_send = build_message( missing_scene_paths=missing_scene_paths, update_stac=update_stac )", ":return:(str) return the latest report file name \"\"\" continuation_token = None list_reports =", "None) -> None: \"\"\" Function to retrieve the latest gap report and create", "DAG from airflow.contrib.hooks.aws_sqs_hook import SQSHook from airflow.operators.python_operator import PythonOperator from odc.aws.queue import publish_messages", "utility.utility_slackoperator import task_fail_slack_alert, task_success_slack_alert from utils.aws_utils import S3 REPORTING_PREFIX = \"status-report/\" # This", "{ \"landsat_product_id\": landsat_product_id, \"s3_location\": str(path), \"update_stac\": update_stac } } ) return message_list def", "\"landsat_product_id\": landsat_product_id, \"s3_location\": str(path), \"update_stac\": update_stac } } ) return message_list def fill_the_gap(landsat:", "= [ \"landsat_8\", \"landsat_7\", \"landsat_5\" ] for sat in satellites: PROCESSES.append( PythonOperator( task_id=f\"{sat}_fill_the_gap\",", "is paginated, returning up to 1000 keys at a time. if resp.get(\"NextContinuationToken\"): continuation_token", "build_message(missing_scene_paths, update_stac): \"\"\" \"\"\" message_list = [] for path in missing_scene_paths: landsat_product_id =", "missing_scene_paths=missing_scene_paths, update_stac=update_stac ) logging.info(\"Publishing messages\") post_messages(message_list=messages_to_send) except Exception as error: logging.error(error) # print", "are any if len(messages) > 0: publish_messages(queue, messages) logging.info(f\"{count} messages sent successfully\") def", "len(messages) > 0: publish_messages(queue, messages) logging.info(f\"{count} messages sent successfully\") def find_latest_report(landsat: str) ->", "configurations `scenes_limit`, which receives a INT as value. * The option scenes_limit limit", "\"email\": [\"<EMAIL>\"], \"email_on_failure\": True, \"email_on_success\": True, \"email_on_retry\": False, \"retries\": 0, \"version\": \"0.0.1\", \"on_failure_callback\":", ":return:(None) \"\"\" count = 0 messages = [] sqs_conn = SQSHook(aws_conn_id=CONN_LANDSAT_SYNC) sqs_hook =", "import LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME from infra.variables import REGION from landsat_scenes_sync.variables import STATUS_REPORT_FOLDER_NAME from utility.utility_slackoperator import", "= [] sqs_conn = SQSHook(aws_conn_id=CONN_LANDSAT_SYNC) sqs_hook = sqs_conn.get_resource_type( resource_type=\"sqs\", region_name=REGION ) queue =", "return the latest report file name \"\"\" continuation_token = None list_reports = []", "resp[\"NextContinuationToken\"] else: break list_reports.sort() return list_reports[-1] if list_reports else \"\" def build_message(missing_scene_paths, update_stac):", "= [] for path in missing_scene_paths: landsat_product_id = str(path.strip(\"/\").split(\"/\")[-1]) if not landsat_product_id: raise", "-> str: \"\"\" Function to find the latest gap report :param landsat:(str)satellite name", "CONN_LANDSAT_SYNC from infra.s3_buckets import LANDSAT_SYNC_BUCKET_NAME from infra.sqs_queues import LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME from infra.variables import REGION", "if not resp.get(\"Contents\"): raise Exception( f\"Report not found at \" f\"{LANDSAT_SYNC_BUCKET_NAME}/{STATUS_REPORT_FOLDER_NAME}/\" f\" -", "any if len(messages) > 0: publish_messages(queue, messages) logging.info(f\"{count} messages sent successfully\") def find_latest_report(landsat:", "name \"\"\" continuation_token = None list_reports = [] while True: s3 = S3(conn_id=CONN_LANDSAT_SYNC)", "obj[\"Key\"] and \"orphaned\" not in obj[\"Key\"] ] ) # The S3 API is", "Publish messages :param message_list:(list) list of messages :return:(None) \"\"\" count = 0 messages", "list_reports = [] while True: s3 = S3(conn_id=CONN_LANDSAT_SYNC) resp = s3.list_objects( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION,", "def fill_the_gap(landsat: str, scenes_limit: Optional[int] = None) -> None: \"\"\" Function to retrieve", "True, \"email_on_retry\": False, \"retries\": 0, \"version\": \"0.0.1\", \"on_failure_callback\": task_fail_slack_alert, } def post_messages(message_list) ->", "in satellites: PROCESSES.append( PythonOperator( task_id=f\"{sat}_fill_the_gap\", python_callable=fill_the_gap, op_kwargs=dict(landsat=sat, scenes_limit=\"{{ dag_run.conf.scenes_limit }}\"), on_success_callback=task_success_slack_alert, ) )", "from odc.aws.queue import publish_messages from infra.connections import CONN_LANDSAT_SYNC from infra.s3_buckets import LANDSAT_SYNC_BUCKET_NAME from", "\"start_date\": datetime(2021, 6, 7), \"email\": [\"<EMAIL>\"], \"email_on_failure\": True, \"email_on_success\": True, \"email_on_retry\": False, \"retries\":", "= False if 'update' in latest_report: logging.info('FORCED UPDATE FLAGGED!') update_stac = True messages_to_send", "does not stop execution traceback.print_exc() raise error with DAG( \"landsat_scenes_fill_the_gap\", default_args=default_args, schedule_interval=SCHEDULE_INTERVAL, tags=[\"Landsat_scenes\",", "create messages to the filter queue process. :param landsat:(str) satellite name :param scenes_limit:(str)", "latest_report = find_latest_report(landsat=landsat) logging.info(f\"Latest report found {latest_report}\") if not latest_report: logging.error(\"Report not found\")", "obj[\"Key\"] for obj in resp[\"Contents\"] if landsat in obj[\"Key\"] and \"orphaned\" not in", "\"\"\" Function to retrieve the latest gap report and create messages to the", "if not landsat_product_id: raise Exception(f'It was not possible to build product ID from", "from landsat_scenes_sync.variables import STATUS_REPORT_FOLDER_NAME from utility.utility_slackoperator import task_fail_slack_alert, task_success_slack_alert from utils.aws_utils import S3", "resp[\"Contents\"] if landsat in obj[\"Key\"] and \"orphaned\" not in obj[\"Key\"] ] ) #", "for message_dict in message_list: message = { \"Id\": str(count), \"MessageBody\": str(json.dumps(message_dict)), } messages.append(message)", "just use Pandas. It's already a dependency. missing_scene_paths = [ scene_path for scene_path", "error with DAG( \"landsat_scenes_fill_the_gap\", default_args=default_args, schedule_interval=SCHEDULE_INTERVAL, tags=[\"Landsat_scenes\", \"fill the gap\"], catchup=False, ) as", "datetime import datetime from typing import Optional from airflow import DAG from airflow.contrib.hooks.aws_sqs_hook", "region_name=REGION ) queue = sqs_hook.get_queue_by_name(QueueName=LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME) logging.info(\"Sending messages\") for message_dict in message_list: message =", "was not possible to build product ID from path {path}') message_list.append( { \"Message\":", "messages.append(message) count += 1 # Send 10 messages per time if count %", "find_latest_report(landsat: str) -> str: \"\"\" Function to find the latest gap report :param", "will be filled :return:(None) \"\"\" try: logging.info(\"Looking for latest report\") latest_report = find_latest_report(landsat=landsat)", "dependency. missing_scene_paths = [ scene_path for scene_path in gzip.decompress(missing_scene_file_gzip).decode(\"utf-8\").split(\"\\n\") if scene_path ] logging.info(f\"Number", "RuntimeError(\"Report not found!\") else: logging.info(\"Reading missing scenes from the report\") s3 = S3(conn_id=CONN_LANDSAT_SYNC)", "not found at \" f\"{LANDSAT_SYNC_BUCKET_NAME}/{STATUS_REPORT_FOLDER_NAME}/\" f\" - returned {resp}\" ) list_reports.extend( [ obj[\"Key\"]", "and generate messages to fill missing scenes #### Utility utilization The DAG can", "s3 = S3(conn_id=CONN_LANDSAT_SYNC) missing_scene_file_gzip = s3.get_s3_contents_and_attributes( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, key=latest_report, ) # This should", "#### Utility utilization The DAG can be parameterized with run time configurations `scenes_limit`,", "found\") raise RuntimeError(\"Report not found!\") else: logging.info(\"Reading missing scenes from the report\") s3", "with run time configurations `scenes_limit`, which receives a INT as value. * The", "None list_reports = [] while True: s3 = S3(conn_id=CONN_LANDSAT_SYNC) resp = s3.list_objects( bucket_name=LANDSAT_SYNC_BUCKET_NAME,", "airflow.contrib.hooks.aws_sqs_hook import SQSHook from airflow.operators.python_operator import PythonOperator from odc.aws.queue import publish_messages from infra.connections", "resp.get(\"NextContinuationToken\"): continuation_token = resp[\"NextContinuationToken\"] else: break list_reports.sort() return list_reports[-1] if list_reports else \"\"", "messages) logging.info(f\"{count} messages sent successfully\") def find_latest_report(landsat: str) -> str: \"\"\" Function to", "format { \"scenes_limit\":10 } \"\"\" import gzip import json import logging import traceback", "update_stac): \"\"\" \"\"\" message_list = [] for path in missing_scene_paths: landsat_product_id = str(path.strip(\"/\").split(\"/\")[-1])", "in missing_scene_paths: landsat_product_id = str(path.strip(\"/\").split(\"/\")[-1]) if not landsat_product_id: raise Exception(f'It was not possible", "raise RuntimeError(\"Report not found!\") else: logging.info(\"Reading missing scenes from the report\") s3 =", "else: break list_reports.sort() return list_reports[-1] if list_reports else \"\" def build_message(missing_scene_paths, update_stac): \"\"\"", "queue process. :param landsat:(str) satellite name :param scenes_limit:(str) limit of how many scenes", "logging.info(f\"Limited: {'No limit' if scenes_limit else scenes_limit}\") if scenes_limit: missing_scene_paths = missing_scene_paths[:int(scenes_limit)] update_stac", "\"update_stac\": update_stac } } ) return message_list def fill_the_gap(landsat: str, scenes_limit: Optional[int] =", ":param landsat:(str)satellite name :return:(str) return the latest report file name \"\"\" continuation_token =", "logging.error(error) # print traceback but does not stop execution traceback.print_exc() raise error with", "satellite name :param scenes_limit:(str) limit of how many scenes will be filled :return:(None)", "number of scenes to be read from the report, therefore limit the number", "traceback but does not stop execution traceback.print_exc() raise error with DAG( \"landsat_scenes_fill_the_gap\", default_args=default_args,", "paginated, returning up to 1000 keys at a time. if resp.get(\"NextContinuationToken\"): continuation_token =", "to be sent #### example conf in json format { \"scenes_limit\":10 } \"\"\"", "'update' in latest_report: logging.info('FORCED UPDATE FLAGGED!') update_stac = True messages_to_send = build_message( missing_scene_paths=missing_scene_paths,", "= \"status-report/\" # This process is manually run SCHEDULE_INTERVAL = None default_args =", "logging.info(f\"Latest report found {latest_report}\") if not latest_report: logging.error(\"Report not found\") raise RuntimeError(\"Report not", "task_fail_slack_alert, } def post_messages(message_list) -> None: \"\"\" Publish messages :param message_list:(list) list of", "schedule_interval=SCHEDULE_INTERVAL, tags=[\"Landsat_scenes\", \"fill the gap\"], catchup=False, ) as dag: PROCESSES = [] satellites", "logging.info('FORCED UPDATE FLAGGED!') update_stac = True messages_to_send = build_message( missing_scene_paths=missing_scene_paths, update_stac=update_stac ) logging.info(\"Publishing", "import traceback from datetime import datetime from typing import Optional from airflow import", "[\"<EMAIL>\"], \"email_on_failure\": True, \"email_on_success\": True, \"email_on_retry\": False, \"retries\": 0, \"version\": \"0.0.1\", \"on_failure_callback\": task_fail_slack_alert,", "def build_message(missing_scene_paths, update_stac): \"\"\" \"\"\" message_list = [] for path in missing_scene_paths: landsat_product_id", "if not latest_report: logging.error(\"Report not found\") raise RuntimeError(\"Report not found!\") else: logging.info(\"Reading missing", "{ \"Message\": { \"landsat_product_id\": landsat_product_id, \"s3_location\": str(path), \"update_stac\": update_stac } } ) return", "obj[\"Key\"] ] ) # The S3 API is paginated, returning up to 1000", "report and create messages to the filter queue process. :param landsat:(str) satellite name", "messages = [] # Post the last messages if there are any if", "[] # Post the last messages if there are any if len(messages) >", "gzip.decompress(missing_scene_file_gzip).decode(\"utf-8\").split(\"\\n\") if scene_path ] logging.info(f\"Number of scenes found {len(missing_scene_paths)}\") logging.info(f\"Example scenes: {missing_scene_paths[0:10]}\") logging.info(f\"Limited:", "default_args = { \"owner\": \"RODRIGO\", \"start_date\": datetime(2021, 6, 7), \"email\": [\"<EMAIL>\"], \"email_on_failure\": True,", "{len(missing_scene_paths)}\") logging.info(f\"Example scenes: {missing_scene_paths[0:10]}\") logging.info(f\"Limited: {'No limit' if scenes_limit else scenes_limit}\") if scenes_limit:", "\"\"\" # Read report and generate messages to fill missing scenes #### Utility", "landsat_scenes_sync.variables import STATUS_REPORT_FOLDER_NAME from utility.utility_slackoperator import task_fail_slack_alert, task_success_slack_alert from utils.aws_utils import S3 REPORTING_PREFIX", "not latest_report: logging.error(\"Report not found\") raise RuntimeError(\"Report not found!\") else: logging.info(\"Reading missing scenes", "if landsat in obj[\"Key\"] and \"orphaned\" not in obj[\"Key\"] ] ) # The", "the report\") s3 = S3(conn_id=CONN_LANDSAT_SYNC) missing_scene_file_gzip = s3.get_s3_contents_and_attributes( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, key=latest_report, ) #", "Exception as error: logging.error(error) # print traceback but does not stop execution traceback.print_exc()", "time configurations `scenes_limit`, which receives a INT as value. * The option scenes_limit", "Read report and generate messages to fill missing scenes #### Utility utilization The", "\"\"\" continuation_token = None list_reports = [] while True: s3 = S3(conn_id=CONN_LANDSAT_SYNC) resp", "logging.info(\"Reading missing scenes from the report\") s3 = S3(conn_id=CONN_LANDSAT_SYNC) missing_scene_file_gzip = s3.get_s3_contents_and_attributes( bucket_name=LANDSAT_SYNC_BUCKET_NAME,", "missing scenes from the report\") s3 = S3(conn_id=CONN_LANDSAT_SYNC) missing_scene_file_gzip = s3.get_s3_contents_and_attributes( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION,", "at a time. if resp.get(\"NextContinuationToken\"): continuation_token = resp[\"NextContinuationToken\"] else: break list_reports.sort() return list_reports[-1]", "and create messages to the filter queue process. :param landsat:(str) satellite name :param", "task_success_slack_alert from utils.aws_utils import S3 REPORTING_PREFIX = \"status-report/\" # This process is manually", "many scenes will be filled :return:(None) \"\"\" try: logging.info(\"Looking for latest report\") latest_report", ") logging.info(\"Publishing messages\") post_messages(message_list=messages_to_send) except Exception as error: logging.error(error) # print traceback but", "of scenes to be read from the report, therefore limit the number of", "messages :return:(None) \"\"\" count = 0 messages = [] sqs_conn = SQSHook(aws_conn_id=CONN_LANDSAT_SYNC) sqs_hook", "sqs_conn = SQSHook(aws_conn_id=CONN_LANDSAT_SYNC) sqs_hook = sqs_conn.get_resource_type( resource_type=\"sqs\", region_name=REGION ) queue = sqs_hook.get_queue_by_name(QueueName=LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME) logging.info(\"Sending", "= { \"Id\": str(count), \"MessageBody\": str(json.dumps(message_dict)), } messages.append(message) count += 1 # Send", "use Pandas. It's already a dependency. missing_scene_paths = [ scene_path for scene_path in", "not found\") raise RuntimeError(\"Report not found!\") else: logging.info(\"Reading missing scenes from the report\")", "from typing import Optional from airflow import DAG from airflow.contrib.hooks.aws_sqs_hook import SQSHook from", "fill missing scenes #### Utility utilization The DAG can be parameterized with run", "Post the last messages if there are any if len(messages) > 0: publish_messages(queue,", "latest gap report :param landsat:(str)satellite name :return:(str) return the latest report file name", "the latest report file name \"\"\" continuation_token = None list_reports = [] while", "filter queue process. :param landsat:(str) satellite name :param scenes_limit:(str) limit of how many", "typing import Optional from airflow import DAG from airflow.contrib.hooks.aws_sqs_hook import SQSHook from airflow.operators.python_operator", "name :param scenes_limit:(str) limit of how many scenes will be filled :return:(None) \"\"\"", "\"fill the gap\"], catchup=False, ) as dag: PROCESSES = [] satellites = [", "region=REGION, prefix=f\"{STATUS_REPORT_FOLDER_NAME}/\", continuation_token=continuation_token, ) if not resp.get(\"Contents\"): raise Exception( f\"Report not found at", "in latest_report: logging.info('FORCED UPDATE FLAGGED!') update_stac = True messages_to_send = build_message( missing_scene_paths=missing_scene_paths, update_stac=update_stac", "= resp[\"NextContinuationToken\"] else: break list_reports.sort() return list_reports[-1] if list_reports else \"\" def build_message(missing_scene_paths,", "task_fail_slack_alert, task_success_slack_alert from utils.aws_utils import S3 REPORTING_PREFIX = \"status-report/\" # This process is", "import DAG from airflow.contrib.hooks.aws_sqs_hook import SQSHook from airflow.operators.python_operator import PythonOperator from odc.aws.queue import", "{ \"owner\": \"RODRIGO\", \"start_date\": datetime(2021, 6, 7), \"email\": [\"<EMAIL>\"], \"email_on_failure\": True, \"email_on_success\": True,", "s3 = S3(conn_id=CONN_LANDSAT_SYNC) resp = s3.list_objects( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, prefix=f\"{STATUS_REPORT_FOLDER_NAME}/\", continuation_token=continuation_token, ) if not", "continuation_token=continuation_token, ) if not resp.get(\"Contents\"): raise Exception( f\"Report not found at \" f\"{LANDSAT_SYNC_BUCKET_NAME}/{STATUS_REPORT_FOLDER_NAME}/\"", "{'No limit' if scenes_limit else scenes_limit}\") if scenes_limit: missing_scene_paths = missing_scene_paths[:int(scenes_limit)] update_stac =", "if there are any if len(messages) > 0: publish_messages(queue, messages) logging.info(f\"{count} messages sent", "except Exception as error: logging.error(error) # print traceback but does not stop execution", "limit the number of messages to be sent #### example conf in json", "latest_report: logging.error(\"Report not found\") raise RuntimeError(\"Report not found!\") else: logging.info(\"Reading missing scenes from", "infra.s3_buckets import LANDSAT_SYNC_BUCKET_NAME from infra.sqs_queues import LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME from infra.variables import REGION from landsat_scenes_sync.variables", "else scenes_limit}\") if scenes_limit: missing_scene_paths = missing_scene_paths[:int(scenes_limit)] update_stac = False if 'update' in", "as error: logging.error(error) # print traceback but does not stop execution traceback.print_exc() raise", "resp.get(\"Contents\"): raise Exception( f\"Report not found at \" f\"{LANDSAT_SYNC_BUCKET_NAME}/{STATUS_REPORT_FOLDER_NAME}/\" f\" - returned {resp}\"", "The S3 API is paginated, returning up to 1000 keys at a time.", "missing_scene_paths[:int(scenes_limit)] update_stac = False if 'update' in latest_report: logging.info('FORCED UPDATE FLAGGED!') update_stac =", "product ID from path {path}') message_list.append( { \"Message\": { \"landsat_product_id\": landsat_product_id, \"s3_location\": str(path),", "conf in json format { \"scenes_limit\":10 } \"\"\" import gzip import json import", "import publish_messages from infra.connections import CONN_LANDSAT_SYNC from infra.s3_buckets import LANDSAT_SYNC_BUCKET_NAME from infra.sqs_queues import", "# The S3 API is paginated, returning up to 1000 keys at a", "7), \"email\": [\"<EMAIL>\"], \"email_on_failure\": True, \"email_on_success\": True, \"email_on_retry\": False, \"retries\": 0, \"version\": \"0.0.1\",", "#### example conf in json format { \"scenes_limit\":10 } \"\"\" import gzip import", "= sqs_hook.get_queue_by_name(QueueName=LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME) logging.info(\"Sending messages\") for message_dict in message_list: message = { \"Id\": str(count),", "missing scenes #### Utility utilization The DAG can be parameterized with run time", ") queue = sqs_hook.get_queue_by_name(QueueName=LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME) logging.info(\"Sending messages\") for message_dict in message_list: message = {", "# Read report and generate messages to fill missing scenes #### Utility utilization", "The DAG can be parameterized with run time configurations `scenes_limit`, which receives a", "latest report file name \"\"\" continuation_token = None list_reports = [] while True:", "\"owner\": \"RODRIGO\", \"start_date\": datetime(2021, 6, 7), \"email\": [\"<EMAIL>\"], \"email_on_failure\": True, \"email_on_success\": True, \"email_on_retry\":", "list of messages :return:(None) \"\"\" count = 0 messages = [] sqs_conn =", "be parameterized with run time configurations `scenes_limit`, which receives a INT as value.", "landsat:(str) satellite name :param scenes_limit:(str) limit of how many scenes will be filled", "find_latest_report(landsat=landsat) logging.info(f\"Latest report found {latest_report}\") if not latest_report: logging.error(\"Report not found\") raise RuntimeError(\"Report", "up to 1000 keys at a time. if resp.get(\"NextContinuationToken\"): continuation_token = resp[\"NextContinuationToken\"] else:", "# Send 10 messages per time if count % 10 == 0: publish_messages(queue,", "not stop execution traceback.print_exc() raise error with DAG( \"landsat_scenes_fill_the_gap\", default_args=default_args, schedule_interval=SCHEDULE_INTERVAL, tags=[\"Landsat_scenes\", \"fill", "None: \"\"\" Publish messages :param message_list:(list) list of messages :return:(None) \"\"\" count =", "messages per time if count % 10 == 0: publish_messages(queue, messages) messages =", "0: publish_messages(queue, messages) logging.info(f\"{count} messages sent successfully\") def find_latest_report(landsat: str) -> str: \"\"\"", "0, \"version\": \"0.0.1\", \"on_failure_callback\": task_fail_slack_alert, } def post_messages(message_list) -> None: \"\"\" Publish messages", "This process is manually run SCHEDULE_INTERVAL = None default_args = { \"owner\": \"RODRIGO\",", "limit' if scenes_limit else scenes_limit}\") if scenes_limit: missing_scene_paths = missing_scene_paths[:int(scenes_limit)] update_stac = False", "\"\"\" Publish messages :param message_list:(list) list of messages :return:(None) \"\"\" count = 0", "list_reports else \"\" def build_message(missing_scene_paths, update_stac): \"\"\" \"\"\" message_list = [] for path", "message_dict in message_list: message = { \"Id\": str(count), \"MessageBody\": str(json.dumps(message_dict)), } messages.append(message) count", "messages) messages = [] # Post the last messages if there are any", "INT as value. * The option scenes_limit limit the number of scenes to", "str(json.dumps(message_dict)), } messages.append(message) count += 1 # Send 10 messages per time if", "landsat_product_id = str(path.strip(\"/\").split(\"/\")[-1]) if not landsat_product_id: raise Exception(f'It was not possible to build", "str: \"\"\" Function to find the latest gap report :param landsat:(str)satellite name :return:(str)", "missing_scene_file_gzip = s3.get_s3_contents_and_attributes( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, key=latest_report, ) # This should just use Pandas.", "None: \"\"\" Function to retrieve the latest gap report and create messages to", "\"status-report/\" # This process is manually run SCHEDULE_INTERVAL = None default_args = {", "found {len(missing_scene_paths)}\") logging.info(f\"Example scenes: {missing_scene_paths[0:10]}\") logging.info(f\"Limited: {'No limit' if scenes_limit else scenes_limit}\") if", "= S3(conn_id=CONN_LANDSAT_SYNC) missing_scene_file_gzip = s3.get_s3_contents_and_attributes( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, key=latest_report, ) # This should just", "from infra.connections import CONN_LANDSAT_SYNC from infra.s3_buckets import LANDSAT_SYNC_BUCKET_NAME from infra.sqs_queues import LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME from", "raise Exception(f'It was not possible to build product ID from path {path}') message_list.append(", "] logging.info(f\"Number of scenes found {len(missing_scene_paths)}\") logging.info(f\"Example scenes: {missing_scene_paths[0:10]}\") logging.info(f\"Limited: {'No limit' if", "from path {path}') message_list.append( { \"Message\": { \"landsat_product_id\": landsat_product_id, \"s3_location\": str(path), \"update_stac\": update_stac", "if scenes_limit: missing_scene_paths = missing_scene_paths[:int(scenes_limit)] update_stac = False if 'update' in latest_report: logging.info('FORCED", "0 messages = [] sqs_conn = SQSHook(aws_conn_id=CONN_LANDSAT_SYNC) sqs_hook = sqs_conn.get_resource_type( resource_type=\"sqs\", region_name=REGION )", "[] while True: s3 = S3(conn_id=CONN_LANDSAT_SYNC) resp = s3.list_objects( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, prefix=f\"{STATUS_REPORT_FOLDER_NAME}/\", continuation_token=continuation_token,", "Send 10 messages per time if count % 10 == 0: publish_messages(queue, messages)", "else: logging.info(\"Reading missing scenes from the report\") s3 = S3(conn_id=CONN_LANDSAT_SYNC) missing_scene_file_gzip = s3.get_s3_contents_and_attributes(", "{ \"Id\": str(count), \"MessageBody\": str(json.dumps(message_dict)), } messages.append(message) count += 1 # Send 10", "sqs_conn.get_resource_type( resource_type=\"sqs\", region_name=REGION ) queue = sqs_hook.get_queue_by_name(QueueName=LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME) logging.info(\"Sending messages\") for message_dict in message_list:", "for latest report\") latest_report = find_latest_report(landsat=landsat) logging.info(f\"Latest report found {latest_report}\") if not latest_report:", "The option scenes_limit limit the number of scenes to be read from the", "traceback.print_exc() raise error with DAG( \"landsat_scenes_fill_the_gap\", default_args=default_args, schedule_interval=SCHEDULE_INTERVAL, tags=[\"Landsat_scenes\", \"fill the gap\"], catchup=False,", "Function to find the latest gap report :param landsat:(str)satellite name :return:(str) return the", "+= 1 # Send 10 messages per time if count % 10 ==", "S3 REPORTING_PREFIX = \"status-report/\" # This process is manually run SCHEDULE_INTERVAL = None", "10 messages per time if count % 10 == 0: publish_messages(queue, messages) messages", "False, \"retries\": 0, \"version\": \"0.0.1\", \"on_failure_callback\": task_fail_slack_alert, } def post_messages(message_list) -> None: \"\"\"", "[ obj[\"Key\"] for obj in resp[\"Contents\"] if landsat in obj[\"Key\"] and \"orphaned\" not", "if resp.get(\"NextContinuationToken\"): continuation_token = resp[\"NextContinuationToken\"] else: break list_reports.sort() return list_reports[-1] if list_reports else", "example conf in json format { \"scenes_limit\":10 } \"\"\" import gzip import json", "time. if resp.get(\"NextContinuationToken\"): continuation_token = resp[\"NextContinuationToken\"] else: break list_reports.sort() return list_reports[-1] if list_reports", "scenes_limit:(str) limit of how many scenes will be filled :return:(None) \"\"\" try: logging.info(\"Looking", "Optional from airflow import DAG from airflow.contrib.hooks.aws_sqs_hook import SQSHook from airflow.operators.python_operator import PythonOperator", "landsat_product_id, \"s3_location\": str(path), \"update_stac\": update_stac } } ) return message_list def fill_the_gap(landsat: str,", "publish_messages(queue, messages) logging.info(f\"{count} messages sent successfully\") def find_latest_report(landsat: str) -> str: \"\"\" Function", "{missing_scene_paths[0:10]}\") logging.info(f\"Limited: {'No limit' if scenes_limit else scenes_limit}\") if scenes_limit: missing_scene_paths = missing_scene_paths[:int(scenes_limit)]", "be read from the report, therefore limit the number of messages to be", "S3(conn_id=CONN_LANDSAT_SYNC) missing_scene_file_gzip = s3.get_s3_contents_and_attributes( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, key=latest_report, ) # This should just use", "messages to fill missing scenes #### Utility utilization The DAG can be parameterized", "logging.info(f\"Number of scenes found {len(missing_scene_paths)}\") logging.info(f\"Example scenes: {missing_scene_paths[0:10]}\") logging.info(f\"Limited: {'No limit' if scenes_limit", "from utility.utility_slackoperator import task_fail_slack_alert, task_success_slack_alert from utils.aws_utils import S3 REPORTING_PREFIX = \"status-report/\" #", "[] for path in missing_scene_paths: landsat_product_id = str(path.strip(\"/\").split(\"/\")[-1]) if not landsat_product_id: raise Exception(f'It", "in obj[\"Key\"] ] ) # The S3 API is paginated, returning up to", "\"landsat_5\" ] for sat in satellites: PROCESSES.append( PythonOperator( task_id=f\"{sat}_fill_the_gap\", python_callable=fill_the_gap, op_kwargs=dict(landsat=sat, scenes_limit=\"{{ dag_run.conf.scenes_limit", "messages = [] sqs_conn = SQSHook(aws_conn_id=CONN_LANDSAT_SYNC) sqs_hook = sqs_conn.get_resource_type( resource_type=\"sqs\", region_name=REGION ) queue", "= S3(conn_id=CONN_LANDSAT_SYNC) resp = s3.list_objects( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, prefix=f\"{STATUS_REPORT_FOLDER_NAME}/\", continuation_token=continuation_token, ) if not resp.get(\"Contents\"):", "prefix=f\"{STATUS_REPORT_FOLDER_NAME}/\", continuation_token=continuation_token, ) if not resp.get(\"Contents\"): raise Exception( f\"Report not found at \"", "key=latest_report, ) # This should just use Pandas. It's already a dependency. missing_scene_paths", "in json format { \"scenes_limit\":10 } \"\"\" import gzip import json import logging", "0: publish_messages(queue, messages) messages = [] # Post the last messages if there", "\"\"\" \"\"\" message_list = [] for path in missing_scene_paths: landsat_product_id = str(path.strip(\"/\").split(\"/\")[-1]) if", "dag: PROCESSES = [] satellites = [ \"landsat_8\", \"landsat_7\", \"landsat_5\" ] for sat", "logging.info(\"Sending messages\") for message_dict in message_list: message = { \"Id\": str(count), \"MessageBody\": str(json.dumps(message_dict)),", "of how many scenes will be filled :return:(None) \"\"\" try: logging.info(\"Looking for latest", "generate messages to fill missing scenes #### Utility utilization The DAG can be", "str, scenes_limit: Optional[int] = None) -> None: \"\"\" Function to retrieve the latest", "10 == 0: publish_messages(queue, messages) messages = [] # Post the last messages", "] for sat in satellites: PROCESSES.append( PythonOperator( task_id=f\"{sat}_fill_the_gap\", python_callable=fill_the_gap, op_kwargs=dict(landsat=sat, scenes_limit=\"{{ dag_run.conf.scenes_limit }}\"),", "in resp[\"Contents\"] if landsat in obj[\"Key\"] and \"orphaned\" not in obj[\"Key\"] ] )", "to find the latest gap report :param landsat:(str)satellite name :return:(str) return the latest", "list_reports.sort() return list_reports[-1] if list_reports else \"\" def build_message(missing_scene_paths, update_stac): \"\"\" \"\"\" message_list", "if scenes_limit else scenes_limit}\") if scenes_limit: missing_scene_paths = missing_scene_paths[:int(scenes_limit)] update_stac = False if", "if list_reports else \"\" def build_message(missing_scene_paths, update_stac): \"\"\" \"\"\" message_list = [] for", "UPDATE FLAGGED!') update_stac = True messages_to_send = build_message( missing_scene_paths=missing_scene_paths, update_stac=update_stac ) logging.info(\"Publishing messages\")", "import logging import traceback from datetime import datetime from typing import Optional from", "successfully\") def find_latest_report(landsat: str) -> str: \"\"\" Function to find the latest gap", "default_args=default_args, schedule_interval=SCHEDULE_INTERVAL, tags=[\"Landsat_scenes\", \"fill the gap\"], catchup=False, ) as dag: PROCESSES = []", "if count % 10 == 0: publish_messages(queue, messages) messages = [] # Post", "Exception( f\"Report not found at \" f\"{LANDSAT_SYNC_BUCKET_NAME}/{STATUS_REPORT_FOLDER_NAME}/\" f\" - returned {resp}\" ) list_reports.extend(", "of messages to be sent #### example conf in json format { \"scenes_limit\":10", "\"\"\" import gzip import json import logging import traceback from datetime import datetime", "if 'update' in latest_report: logging.info('FORCED UPDATE FLAGGED!') update_stac = True messages_to_send = build_message(", "be sent #### example conf in json format { \"scenes_limit\":10 } \"\"\" import", "publish_messages(queue, messages) messages = [] # Post the last messages if there are", ") # The S3 API is paginated, returning up to 1000 keys at", "\" f\"{LANDSAT_SYNC_BUCKET_NAME}/{STATUS_REPORT_FOLDER_NAME}/\" f\" - returned {resp}\" ) list_reports.extend( [ obj[\"Key\"] for obj in", "message = { \"Id\": str(count), \"MessageBody\": str(json.dumps(message_dict)), } messages.append(message) count += 1 #", ") return message_list def fill_the_gap(landsat: str, scenes_limit: Optional[int] = None) -> None: \"\"\"", "scene_path ] logging.info(f\"Number of scenes found {len(missing_scene_paths)}\") logging.info(f\"Example scenes: {missing_scene_paths[0:10]}\") logging.info(f\"Limited: {'No limit'", "traceback from datetime import datetime from typing import Optional from airflow import DAG", "execution traceback.print_exc() raise error with DAG( \"landsat_scenes_fill_the_gap\", default_args=default_args, schedule_interval=SCHEDULE_INTERVAL, tags=[\"Landsat_scenes\", \"fill the gap\"],", "not resp.get(\"Contents\"): raise Exception( f\"Report not found at \" f\"{LANDSAT_SYNC_BUCKET_NAME}/{STATUS_REPORT_FOLDER_NAME}/\" f\" - returned", "bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, prefix=f\"{STATUS_REPORT_FOLDER_NAME}/\", continuation_token=continuation_token, ) if not resp.get(\"Contents\"): raise Exception( f\"Report not found", "to fill missing scenes #### Utility utilization The DAG can be parameterized with", "DAG can be parameterized with run time configurations `scenes_limit`, which receives a INT", "LANDSAT_SYNC_BUCKET_NAME from infra.sqs_queues import LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME from infra.variables import REGION from landsat_scenes_sync.variables import STATUS_REPORT_FOLDER_NAME", "= None default_args = { \"owner\": \"RODRIGO\", \"start_date\": datetime(2021, 6, 7), \"email\": [\"<EMAIL>\"],", "\"orphaned\" not in obj[\"Key\"] ] ) # The S3 API is paginated, returning", "REGION from landsat_scenes_sync.variables import STATUS_REPORT_FOLDER_NAME from utility.utility_slackoperator import task_fail_slack_alert, task_success_slack_alert from utils.aws_utils import", "report file name \"\"\" continuation_token = None list_reports = [] while True: s3", "[ \"landsat_8\", \"landsat_7\", \"landsat_5\" ] for sat in satellites: PROCESSES.append( PythonOperator( task_id=f\"{sat}_fill_the_gap\", python_callable=fill_the_gap,", "= 0 messages = [] sqs_conn = SQSHook(aws_conn_id=CONN_LANDSAT_SYNC) sqs_hook = sqs_conn.get_resource_type( resource_type=\"sqs\", region_name=REGION", "\"landsat_8\", \"landsat_7\", \"landsat_5\" ] for sat in satellites: PROCESSES.append( PythonOperator( task_id=f\"{sat}_fill_the_gap\", python_callable=fill_the_gap, op_kwargs=dict(landsat=sat,", "of scenes found {len(missing_scene_paths)}\") logging.info(f\"Example scenes: {missing_scene_paths[0:10]}\") logging.info(f\"Limited: {'No limit' if scenes_limit else", "possible to build product ID from path {path}') message_list.append( { \"Message\": { \"landsat_product_id\":", "import Optional from airflow import DAG from airflow.contrib.hooks.aws_sqs_hook import SQSHook from airflow.operators.python_operator import", "[] sqs_conn = SQSHook(aws_conn_id=CONN_LANDSAT_SYNC) sqs_hook = sqs_conn.get_resource_type( resource_type=\"sqs\", region_name=REGION ) queue = sqs_hook.get_queue_by_name(QueueName=LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME)", "scenes_limit limit the number of scenes to be read from the report, therefore", "a time. if resp.get(\"NextContinuationToken\"): continuation_token = resp[\"NextContinuationToken\"] else: break list_reports.sort() return list_reports[-1] if", "from infra.s3_buckets import LANDSAT_SYNC_BUCKET_NAME from infra.sqs_queues import LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME from infra.variables import REGION from", "fill_the_gap(landsat: str, scenes_limit: Optional[int] = None) -> None: \"\"\" Function to retrieve the", "missing_scene_paths: landsat_product_id = str(path.strip(\"/\").split(\"/\")[-1]) if not landsat_product_id: raise Exception(f'It was not possible to", "process. :param landsat:(str) satellite name :param scenes_limit:(str) limit of how many scenes will", "satellites = [ \"landsat_8\", \"landsat_7\", \"landsat_5\" ] for sat in satellites: PROCESSES.append( PythonOperator(", "} } ) return message_list def fill_the_gap(landsat: str, scenes_limit: Optional[int] = None) ->", "= True messages_to_send = build_message( missing_scene_paths=missing_scene_paths, update_stac=update_stac ) logging.info(\"Publishing messages\") post_messages(message_list=messages_to_send) except Exception", "-> None: \"\"\" Function to retrieve the latest gap report and create messages", "{latest_report}\") if not latest_report: logging.error(\"Report not found\") raise RuntimeError(\"Report not found!\") else: logging.info(\"Reading", "= build_message( missing_scene_paths=missing_scene_paths, update_stac=update_stac ) logging.info(\"Publishing messages\") post_messages(message_list=messages_to_send) except Exception as error: logging.error(error)", "ID from path {path}') message_list.append( { \"Message\": { \"landsat_product_id\": landsat_product_id, \"s3_location\": str(path), \"update_stac\":", "continuation_token = resp[\"NextContinuationToken\"] else: break list_reports.sort() return list_reports[-1] if list_reports else \"\" def", "already a dependency. missing_scene_paths = [ scene_path for scene_path in gzip.decompress(missing_scene_file_gzip).decode(\"utf-8\").split(\"\\n\") if scene_path", "messages_to_send = build_message( missing_scene_paths=missing_scene_paths, update_stac=update_stac ) logging.info(\"Publishing messages\") post_messages(message_list=messages_to_send) except Exception as error:", "messages\") for message_dict in message_list: message = { \"Id\": str(count), \"MessageBody\": str(json.dumps(message_dict)), }", "\"on_failure_callback\": task_fail_slack_alert, } def post_messages(message_list) -> None: \"\"\" Publish messages :param message_list:(list) list", "find the latest gap report :param landsat:(str)satellite name :return:(str) return the latest report", "raise error with DAG( \"landsat_scenes_fill_the_gap\", default_args=default_args, schedule_interval=SCHEDULE_INTERVAL, tags=[\"Landsat_scenes\", \"fill the gap\"], catchup=False, )", "import gzip import json import logging import traceback from datetime import datetime from", "-> None: \"\"\" Publish messages :param message_list:(list) list of messages :return:(None) \"\"\" count", "airflow.operators.python_operator import PythonOperator from odc.aws.queue import publish_messages from infra.connections import CONN_LANDSAT_SYNC from infra.s3_buckets", "per time if count % 10 == 0: publish_messages(queue, messages) messages = []", "report\") s3 = S3(conn_id=CONN_LANDSAT_SYNC) missing_scene_file_gzip = s3.get_s3_contents_and_attributes( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, key=latest_report, ) # This", "= [] # Post the last messages if there are any if len(messages)", "a dependency. missing_scene_paths = [ scene_path for scene_path in gzip.decompress(missing_scene_file_gzip).decode(\"utf-8\").split(\"\\n\") if scene_path ]", "in gzip.decompress(missing_scene_file_gzip).decode(\"utf-8\").split(\"\\n\") if scene_path ] logging.info(f\"Number of scenes found {len(missing_scene_paths)}\") logging.info(f\"Example scenes: {missing_scene_paths[0:10]}\")", "the filter queue process. :param landsat:(str) satellite name :param scenes_limit:(str) limit of how", "scenes_limit}\") if scenes_limit: missing_scene_paths = missing_scene_paths[:int(scenes_limit)] update_stac = False if 'update' in latest_report:", "\"\"\" message_list = [] for path in missing_scene_paths: landsat_product_id = str(path.strip(\"/\").split(\"/\")[-1]) if not", "from the report, therefore limit the number of messages to be sent ####", "# This process is manually run SCHEDULE_INTERVAL = None default_args = { \"owner\":", "retrieve the latest gap report and create messages to the filter queue process.", "count % 10 == 0: publish_messages(queue, messages) messages = [] # Post the", "update_stac = False if 'update' in latest_report: logging.info('FORCED UPDATE FLAGGED!') update_stac = True", "found!\") else: logging.info(\"Reading missing scenes from the report\") s3 = S3(conn_id=CONN_LANDSAT_SYNC) missing_scene_file_gzip =", "not possible to build product ID from path {path}') message_list.append( { \"Message\": {", "= s3.get_s3_contents_and_attributes( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, key=latest_report, ) # This should just use Pandas. It's", "6, 7), \"email\": [\"<EMAIL>\"], \"email_on_failure\": True, \"email_on_success\": True, \"email_on_retry\": False, \"retries\": 0, \"version\":", "} def post_messages(message_list) -> None: \"\"\" Publish messages :param message_list:(list) list of messages", "count = 0 messages = [] sqs_conn = SQSHook(aws_conn_id=CONN_LANDSAT_SYNC) sqs_hook = sqs_conn.get_resource_type( resource_type=\"sqs\",", "from airflow.contrib.hooks.aws_sqs_hook import SQSHook from airflow.operators.python_operator import PythonOperator from odc.aws.queue import publish_messages from", "limit of how many scenes will be filled :return:(None) \"\"\" try: logging.info(\"Looking for", "import CONN_LANDSAT_SYNC from infra.s3_buckets import LANDSAT_SYNC_BUCKET_NAME from infra.sqs_queues import LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME from infra.variables import", ") list_reports.extend( [ obj[\"Key\"] for obj in resp[\"Contents\"] if landsat in obj[\"Key\"] and", "\"Message\": { \"landsat_product_id\": landsat_product_id, \"s3_location\": str(path), \"update_stac\": update_stac } } ) return message_list", "= { \"owner\": \"RODRIGO\", \"start_date\": datetime(2021, 6, 7), \"email\": [\"<EMAIL>\"], \"email_on_failure\": True, \"email_on_success\":", "be filled :return:(None) \"\"\" try: logging.info(\"Looking for latest report\") latest_report = find_latest_report(landsat=landsat) logging.info(f\"Latest", "if scene_path ] logging.info(f\"Number of scenes found {len(missing_scene_paths)}\") logging.info(f\"Example scenes: {missing_scene_paths[0:10]}\") logging.info(f\"Limited: {'No", "sent successfully\") def find_latest_report(landsat: str) -> str: \"\"\" Function to find the latest", "returning up to 1000 keys at a time. if resp.get(\"NextContinuationToken\"): continuation_token = resp[\"NextContinuationToken\"]", "in message_list: message = { \"Id\": str(count), \"MessageBody\": str(json.dumps(message_dict)), } messages.append(message) count +=", "report found {latest_report}\") if not latest_report: logging.error(\"Report not found\") raise RuntimeError(\"Report not found!\")", "else \"\" def build_message(missing_scene_paths, update_stac): \"\"\" \"\"\" message_list = [] for path in", "there are any if len(messages) > 0: publish_messages(queue, messages) logging.info(f\"{count} messages sent successfully\")", "parameterized with run time configurations `scenes_limit`, which receives a INT as value. *", "found at \" f\"{LANDSAT_SYNC_BUCKET_NAME}/{STATUS_REPORT_FOLDER_NAME}/\" f\" - returned {resp}\" ) list_reports.extend( [ obj[\"Key\"] for", "PROCESSES = [] satellites = [ \"landsat_8\", \"landsat_7\", \"landsat_5\" ] for sat in", "raise Exception( f\"Report not found at \" f\"{LANDSAT_SYNC_BUCKET_NAME}/{STATUS_REPORT_FOLDER_NAME}/\" f\" - returned {resp}\" )", "message_list.append( { \"Message\": { \"landsat_product_id\": landsat_product_id, \"s3_location\": str(path), \"update_stac\": update_stac } } )", "which receives a INT as value. * The option scenes_limit limit the number", "of messages :return:(None) \"\"\" count = 0 messages = [] sqs_conn = SQSHook(aws_conn_id=CONN_LANDSAT_SYNC)", "`scenes_limit`, which receives a INT as value. * The option scenes_limit limit the", "} \"\"\" import gzip import json import logging import traceback from datetime import", "resource_type=\"sqs\", region_name=REGION ) queue = sqs_hook.get_queue_by_name(QueueName=LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME) logging.info(\"Sending messages\") for message_dict in message_list: message", "f\"Report not found at \" f\"{LANDSAT_SYNC_BUCKET_NAME}/{STATUS_REPORT_FOLDER_NAME}/\" f\" - returned {resp}\" ) list_reports.extend( [", "utilization The DAG can be parameterized with run time configurations `scenes_limit`, which receives", "s3.get_s3_contents_and_attributes( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, key=latest_report, ) # This should just use Pandas. It's already", "scenes will be filled :return:(None) \"\"\" try: logging.info(\"Looking for latest report\") latest_report =", "missing_scene_paths = [ scene_path for scene_path in gzip.decompress(missing_scene_file_gzip).decode(\"utf-8\").split(\"\\n\") if scene_path ] logging.info(f\"Number of", "\"RODRIGO\", \"start_date\": datetime(2021, 6, 7), \"email\": [\"<EMAIL>\"], \"email_on_failure\": True, \"email_on_success\": True, \"email_on_retry\": False,", "build_message( missing_scene_paths=missing_scene_paths, update_stac=update_stac ) logging.info(\"Publishing messages\") post_messages(message_list=messages_to_send) except Exception as error: logging.error(error) #", "odc.aws.queue import publish_messages from infra.connections import CONN_LANDSAT_SYNC from infra.s3_buckets import LANDSAT_SYNC_BUCKET_NAME from infra.sqs_queues", "messages\") post_messages(message_list=messages_to_send) except Exception as error: logging.error(error) # print traceback but does not", "gap report :param landsat:(str)satellite name :return:(str) return the latest report file name \"\"\"", "to retrieve the latest gap report and create messages to the filter queue", "DAG( \"landsat_scenes_fill_the_gap\", default_args=default_args, schedule_interval=SCHEDULE_INTERVAL, tags=[\"Landsat_scenes\", \"fill the gap\"], catchup=False, ) as dag: PROCESSES", "how many scenes will be filled :return:(None) \"\"\" try: logging.info(\"Looking for latest report\")", "SQSHook from airflow.operators.python_operator import PythonOperator from odc.aws.queue import publish_messages from infra.connections import CONN_LANDSAT_SYNC", "% 10 == 0: publish_messages(queue, messages) messages = [] # Post the last", "\"\"\" Function to find the latest gap report :param landsat:(str)satellite name :return:(str) return", "and \"orphaned\" not in obj[\"Key\"] ] ) # The S3 API is paginated,", "FLAGGED!') update_stac = True messages_to_send = build_message( missing_scene_paths=missing_scene_paths, update_stac=update_stac ) logging.info(\"Publishing messages\") post_messages(message_list=messages_to_send)", "def post_messages(message_list) -> None: \"\"\" Publish messages :param message_list:(list) list of messages :return:(None)", "datetime(2021, 6, 7), \"email\": [\"<EMAIL>\"], \"email_on_failure\": True, \"email_on_success\": True, \"email_on_retry\": False, \"retries\": 0,", "logging.error(\"Report not found\") raise RuntimeError(\"Report not found!\") else: logging.info(\"Reading missing scenes from the", "json format { \"scenes_limit\":10 } \"\"\" import gzip import json import logging import", "import LANDSAT_SYNC_BUCKET_NAME from infra.sqs_queues import LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME from infra.variables import REGION from landsat_scenes_sync.variables import", "True messages_to_send = build_message( missing_scene_paths=missing_scene_paths, update_stac=update_stac ) logging.info(\"Publishing messages\") post_messages(message_list=messages_to_send) except Exception as", "continuation_token = None list_reports = [] while True: s3 = S3(conn_id=CONN_LANDSAT_SYNC) resp =", "= [] while True: s3 = S3(conn_id=CONN_LANDSAT_SYNC) resp = s3.list_objects( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, prefix=f\"{STATUS_REPORT_FOLDER_NAME}/\",", "STATUS_REPORT_FOLDER_NAME from utility.utility_slackoperator import task_fail_slack_alert, task_success_slack_alert from utils.aws_utils import S3 REPORTING_PREFIX = \"status-report/\"", "should just use Pandas. It's already a dependency. missing_scene_paths = [ scene_path for", "not landsat_product_id: raise Exception(f'It was not possible to build product ID from path", "True: s3 = S3(conn_id=CONN_LANDSAT_SYNC) resp = s3.list_objects( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, prefix=f\"{STATUS_REPORT_FOLDER_NAME}/\", continuation_token=continuation_token, ) if", "= None) -> None: \"\"\" Function to retrieve the latest gap report and", "s3.list_objects( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, prefix=f\"{STATUS_REPORT_FOLDER_NAME}/\", continuation_token=continuation_token, ) if not resp.get(\"Contents\"): raise Exception( f\"Report not", "from infra.variables import REGION from landsat_scenes_sync.variables import STATUS_REPORT_FOLDER_NAME from utility.utility_slackoperator import task_fail_slack_alert, task_success_slack_alert", "satellites: PROCESSES.append( PythonOperator( task_id=f\"{sat}_fill_the_gap\", python_callable=fill_the_gap, op_kwargs=dict(landsat=sat, scenes_limit=\"{{ dag_run.conf.scenes_limit }}\"), on_success_callback=task_success_slack_alert, ) ) PROCESSES", "from datetime import datetime from typing import Optional from airflow import DAG from", "[ scene_path for scene_path in gzip.decompress(missing_scene_file_gzip).decode(\"utf-8\").split(\"\\n\") if scene_path ] logging.info(f\"Number of scenes found", "with DAG( \"landsat_scenes_fill_the_gap\", default_args=default_args, schedule_interval=SCHEDULE_INTERVAL, tags=[\"Landsat_scenes\", \"fill the gap\"], catchup=False, ) as dag:", "option scenes_limit limit the number of scenes to be read from the report,", "as dag: PROCESSES = [] satellites = [ \"landsat_8\", \"landsat_7\", \"landsat_5\" ] for", "\"scenes_limit\":10 } \"\"\" import gzip import json import logging import traceback from datetime", "logging import traceback from datetime import datetime from typing import Optional from airflow", "datetime from typing import Optional from airflow import DAG from airflow.contrib.hooks.aws_sqs_hook import SQSHook", "to build product ID from path {path}') message_list.append( { \"Message\": { \"landsat_product_id\": landsat_product_id,", "the number of messages to be sent #### example conf in json format", "landsat:(str)satellite name :return:(str) return the latest report file name \"\"\" continuation_token = None", "file name \"\"\" continuation_token = None list_reports = [] while True: s3 =", "str(count), \"MessageBody\": str(json.dumps(message_dict)), } messages.append(message) count += 1 # Send 10 messages per", "logging.info(f\"Example scenes: {missing_scene_paths[0:10]}\") logging.info(f\"Limited: {'No limit' if scenes_limit else scenes_limit}\") if scenes_limit: missing_scene_paths", "str) -> str: \"\"\" Function to find the latest gap report :param landsat:(str)satellite", "from infra.sqs_queues import LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME from infra.variables import REGION from landsat_scenes_sync.variables import STATUS_REPORT_FOLDER_NAME from", "\"\" def build_message(missing_scene_paths, update_stac): \"\"\" \"\"\" message_list = [] for path in missing_scene_paths:", "} messages.append(message) count += 1 # Send 10 messages per time if count", "filled :return:(None) \"\"\" try: logging.info(\"Looking for latest report\") latest_report = find_latest_report(landsat=landsat) logging.info(f\"Latest report", "receives a INT as value. * The option scenes_limit limit the number of", "from the report\") s3 = S3(conn_id=CONN_LANDSAT_SYNC) missing_scene_file_gzip = s3.get_s3_contents_and_attributes( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, key=latest_report, )", "can be parameterized with run time configurations `scenes_limit`, which receives a INT as", "scenes from the report\") s3 = S3(conn_id=CONN_LANDSAT_SYNC) missing_scene_file_gzip = s3.get_s3_contents_and_attributes( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, key=latest_report,", "run SCHEDULE_INTERVAL = None default_args = { \"owner\": \"RODRIGO\", \"start_date\": datetime(2021, 6, 7),", "path {path}') message_list.append( { \"Message\": { \"landsat_product_id\": landsat_product_id, \"s3_location\": str(path), \"update_stac\": update_stac }", "stop execution traceback.print_exc() raise error with DAG( \"landsat_scenes_fill_the_gap\", default_args=default_args, schedule_interval=SCHEDULE_INTERVAL, tags=[\"Landsat_scenes\", \"fill the", "missing_scene_paths = missing_scene_paths[:int(scenes_limit)] update_stac = False if 'update' in latest_report: logging.info('FORCED UPDATE FLAGGED!')", "= None list_reports = [] while True: s3 = S3(conn_id=CONN_LANDSAT_SYNC) resp = s3.list_objects(", "scenes_limit else scenes_limit}\") if scenes_limit: missing_scene_paths = missing_scene_paths[:int(scenes_limit)] update_stac = False if 'update'", "\"retries\": 0, \"version\": \"0.0.1\", \"on_failure_callback\": task_fail_slack_alert, } def post_messages(message_list) -> None: \"\"\" Publish", "Utility utilization The DAG can be parameterized with run time configurations `scenes_limit`, which", "the number of scenes to be read from the report, therefore limit the", "latest gap report and create messages to the filter queue process. :param landsat:(str)", "the latest gap report :param landsat:(str)satellite name :return:(str) return the latest report file", "latest report\") latest_report = find_latest_report(landsat=landsat) logging.info(f\"Latest report found {latest_report}\") if not latest_report: logging.error(\"Report", "scenes_limit: Optional[int] = None) -> None: \"\"\" Function to retrieve the latest gap", "[] satellites = [ \"landsat_8\", \"landsat_7\", \"landsat_5\" ] for sat in satellites: PROCESSES.append(", "queue = sqs_hook.get_queue_by_name(QueueName=LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME) logging.info(\"Sending messages\") for message_dict in message_list: message = { \"Id\":", "import SQSHook from airflow.operators.python_operator import PythonOperator from odc.aws.queue import publish_messages from infra.connections import", "= s3.list_objects( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, prefix=f\"{STATUS_REPORT_FOLDER_NAME}/\", continuation_token=continuation_token, ) if not resp.get(\"Contents\"): raise Exception( f\"Report", "messages to the filter queue process. :param landsat:(str) satellite name :param scenes_limit:(str) limit", "def find_latest_report(landsat: str) -> str: \"\"\" Function to find the latest gap report", "the latest gap report and create messages to the filter queue process. :param", "is manually run SCHEDULE_INTERVAL = None default_args = { \"owner\": \"RODRIGO\", \"start_date\": datetime(2021,", "S3(conn_id=CONN_LANDSAT_SYNC) resp = s3.list_objects( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, prefix=f\"{STATUS_REPORT_FOLDER_NAME}/\", continuation_token=continuation_token, ) if not resp.get(\"Contents\"): raise", "bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, key=latest_report, ) # This should just use Pandas. It's already a", ":param landsat:(str) satellite name :param scenes_limit:(str) limit of how many scenes will be", "while True: s3 = S3(conn_id=CONN_LANDSAT_SYNC) resp = s3.list_objects( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, prefix=f\"{STATUS_REPORT_FOLDER_NAME}/\", continuation_token=continuation_token, )", "sat in satellites: PROCESSES.append( PythonOperator( task_id=f\"{sat}_fill_the_gap\", python_callable=fill_the_gap, op_kwargs=dict(landsat=sat, scenes_limit=\"{{ dag_run.conf.scenes_limit }}\"), on_success_callback=task_success_slack_alert, )", "\"email_on_failure\": True, \"email_on_success\": True, \"email_on_retry\": False, \"retries\": 0, \"version\": \"0.0.1\", \"on_failure_callback\": task_fail_slack_alert, }", "build product ID from path {path}') message_list.append( { \"Message\": { \"landsat_product_id\": landsat_product_id, \"s3_location\":", ") as dag: PROCESSES = [] satellites = [ \"landsat_8\", \"landsat_7\", \"landsat_5\" ]", "\"email_on_retry\": False, \"retries\": 0, \"version\": \"0.0.1\", \"on_failure_callback\": task_fail_slack_alert, } def post_messages(message_list) -> None:", "} ) return message_list def fill_the_gap(landsat: str, scenes_limit: Optional[int] = None) -> None:", "for obj in resp[\"Contents\"] if landsat in obj[\"Key\"] and \"orphaned\" not in obj[\"Key\"]", ") # This should just use Pandas. It's already a dependency. missing_scene_paths =", "for path in missing_scene_paths: landsat_product_id = str(path.strip(\"/\").split(\"/\")[-1]) if not landsat_product_id: raise Exception(f'It was", "messages sent successfully\") def find_latest_report(landsat: str) -> str: \"\"\" Function to find the", "message_list:(list) list of messages :return:(None) \"\"\" count = 0 messages = [] sqs_conn", "report :param landsat:(str)satellite name :return:(str) return the latest report file name \"\"\" continuation_token", "str(path), \"update_stac\": update_stac } } ) return message_list def fill_the_gap(landsat: str, scenes_limit: Optional[int]", "\"landsat_7\", \"landsat_5\" ] for sat in satellites: PROCESSES.append( PythonOperator( task_id=f\"{sat}_fill_the_gap\", python_callable=fill_the_gap, op_kwargs=dict(landsat=sat, scenes_limit=\"{{", "number of messages to be sent #### example conf in json format {", "f\" - returned {resp}\" ) list_reports.extend( [ obj[\"Key\"] for obj in resp[\"Contents\"] if", "SCHEDULE_INTERVAL = None default_args = { \"owner\": \"RODRIGO\", \"start_date\": datetime(2021, 6, 7), \"email\":", "return message_list def fill_the_gap(landsat: str, scenes_limit: Optional[int] = None) -> None: \"\"\" Function", "not in obj[\"Key\"] ] ) # The S3 API is paginated, returning up", "# This should just use Pandas. It's already a dependency. missing_scene_paths = [", "Exception(f'It was not possible to build product ID from path {path}') message_list.append( {", "== 0: publish_messages(queue, messages) messages = [] # Post the last messages if", "last messages if there are any if len(messages) > 0: publish_messages(queue, messages) logging.info(f\"{count}", "It's already a dependency. missing_scene_paths = [ scene_path for scene_path in gzip.decompress(missing_scene_file_gzip).decode(\"utf-8\").split(\"\\n\") if", "This should just use Pandas. It's already a dependency. missing_scene_paths = [ scene_path", "\"s3_location\": str(path), \"update_stac\": update_stac } } ) return message_list def fill_the_gap(landsat: str, scenes_limit:", "to 1000 keys at a time. if resp.get(\"NextContinuationToken\"): continuation_token = resp[\"NextContinuationToken\"] else: break", "PythonOperator from odc.aws.queue import publish_messages from infra.connections import CONN_LANDSAT_SYNC from infra.s3_buckets import LANDSAT_SYNC_BUCKET_NAME", "at \" f\"{LANDSAT_SYNC_BUCKET_NAME}/{STATUS_REPORT_FOLDER_NAME}/\" f\" - returned {resp}\" ) list_reports.extend( [ obj[\"Key\"] for obj", "LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME from infra.variables import REGION from landsat_scenes_sync.variables import STATUS_REPORT_FOLDER_NAME from utility.utility_slackoperator import task_fail_slack_alert,", "message_list = [] for path in missing_scene_paths: landsat_product_id = str(path.strip(\"/\").split(\"/\")[-1]) if not landsat_product_id:", "to be read from the report, therefore limit the number of messages to", "landsat in obj[\"Key\"] and \"orphaned\" not in obj[\"Key\"] ] ) # The S3", "obj in resp[\"Contents\"] if landsat in obj[\"Key\"] and \"orphaned\" not in obj[\"Key\"] ]", "{path}') message_list.append( { \"Message\": { \"landsat_product_id\": landsat_product_id, \"s3_location\": str(path), \"update_stac\": update_stac } }", "messages if there are any if len(messages) > 0: publish_messages(queue, messages) logging.info(f\"{count} messages", ":return:(None) \"\"\" try: logging.info(\"Looking for latest report\") latest_report = find_latest_report(landsat=landsat) logging.info(f\"Latest report found", "count += 1 # Send 10 messages per time if count % 10", "scenes_limit: missing_scene_paths = missing_scene_paths[:int(scenes_limit)] update_stac = False if 'update' in latest_report: logging.info('FORCED UPDATE", "json import logging import traceback from datetime import datetime from typing import Optional", "gzip import json import logging import traceback from datetime import datetime from typing", "tags=[\"Landsat_scenes\", \"fill the gap\"], catchup=False, ) as dag: PROCESSES = [] satellites =", "messages to be sent #### example conf in json format { \"scenes_limit\":10 }", "] ) # The S3 API is paginated, returning up to 1000 keys", "scene_path for scene_path in gzip.decompress(missing_scene_file_gzip).decode(\"utf-8\").split(\"\\n\") if scene_path ] logging.info(f\"Number of scenes found {len(missing_scene_paths)}\")", "utils.aws_utils import S3 REPORTING_PREFIX = \"status-report/\" # This process is manually run SCHEDULE_INTERVAL", "- returned {resp}\" ) list_reports.extend( [ obj[\"Key\"] for obj in resp[\"Contents\"] if landsat", "= str(path.strip(\"/\").split(\"/\")[-1]) if not landsat_product_id: raise Exception(f'It was not possible to build product", "\"landsat_scenes_fill_the_gap\", default_args=default_args, schedule_interval=SCHEDULE_INTERVAL, tags=[\"Landsat_scenes\", \"fill the gap\"], catchup=False, ) as dag: PROCESSES =", "update_stac = True messages_to_send = build_message( missing_scene_paths=missing_scene_paths, update_stac=update_stac ) logging.info(\"Publishing messages\") post_messages(message_list=messages_to_send) except", "post_messages(message_list=messages_to_send) except Exception as error: logging.error(error) # print traceback but does not stop", "# print traceback but does not stop execution traceback.print_exc() raise error with DAG(", "report\") latest_report = find_latest_report(landsat=landsat) logging.info(f\"Latest report found {latest_report}\") if not latest_report: logging.error(\"Report not", "message_list def fill_the_gap(landsat: str, scenes_limit: Optional[int] = None) -> None: \"\"\" Function to", "landsat_product_id: raise Exception(f'It was not possible to build product ID from path {path}')", "airflow import DAG from airflow.contrib.hooks.aws_sqs_hook import SQSHook from airflow.operators.python_operator import PythonOperator from odc.aws.queue", "1000 keys at a time. if resp.get(\"NextContinuationToken\"): continuation_token = resp[\"NextContinuationToken\"] else: break list_reports.sort()", "if len(messages) > 0: publish_messages(queue, messages) logging.info(f\"{count} messages sent successfully\") def find_latest_report(landsat: str)", "logging.info(\"Publishing messages\") post_messages(message_list=messages_to_send) except Exception as error: logging.error(error) # print traceback but does", "import S3 REPORTING_PREFIX = \"status-report/\" # This process is manually run SCHEDULE_INTERVAL =", "error: logging.error(error) # print traceback but does not stop execution traceback.print_exc() raise error", "\"email_on_success\": True, \"email_on_retry\": False, \"retries\": 0, \"version\": \"0.0.1\", \"on_failure_callback\": task_fail_slack_alert, } def post_messages(message_list)", "keys at a time. if resp.get(\"NextContinuationToken\"): continuation_token = resp[\"NextContinuationToken\"] else: break list_reports.sort() return", "import REGION from landsat_scenes_sync.variables import STATUS_REPORT_FOLDER_NAME from utility.utility_slackoperator import task_fail_slack_alert, task_success_slack_alert from utils.aws_utils", "not found!\") else: logging.info(\"Reading missing scenes from the report\") s3 = S3(conn_id=CONN_LANDSAT_SYNC) missing_scene_file_gzip", "> 0: publish_messages(queue, messages) logging.info(f\"{count} messages sent successfully\") def find_latest_report(landsat: str) -> str:", "path in missing_scene_paths: landsat_product_id = str(path.strip(\"/\").split(\"/\")[-1]) if not landsat_product_id: raise Exception(f'It was not", "\"0.0.1\", \"on_failure_callback\": task_fail_slack_alert, } def post_messages(message_list) -> None: \"\"\" Publish messages :param message_list:(list)", "# Post the last messages if there are any if len(messages) > 0:", "limit the number of scenes to be read from the report, therefore limit", "* The option scenes_limit limit the number of scenes to be read from", "\"Id\": str(count), \"MessageBody\": str(json.dumps(message_dict)), } messages.append(message) count += 1 # Send 10 messages", "S3 API is paginated, returning up to 1000 keys at a time. if", "scenes #### Utility utilization The DAG can be parameterized with run time configurations", "infra.variables import REGION from landsat_scenes_sync.variables import STATUS_REPORT_FOLDER_NAME from utility.utility_slackoperator import task_fail_slack_alert, task_success_slack_alert from", "= missing_scene_paths[:int(scenes_limit)] update_stac = False if 'update' in latest_report: logging.info('FORCED UPDATE FLAGGED!') update_stac", "1 # Send 10 messages per time if count % 10 == 0:", ") if not resp.get(\"Contents\"): raise Exception( f\"Report not found at \" f\"{LANDSAT_SYNC_BUCKET_NAME}/{STATUS_REPORT_FOLDER_NAME}/\" f\"", "messages :param message_list:(list) list of messages :return:(None) \"\"\" count = 0 messages =", "= sqs_conn.get_resource_type( resource_type=\"sqs\", region_name=REGION ) queue = sqs_hook.get_queue_by_name(QueueName=LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME) logging.info(\"Sending messages\") for message_dict in", "update_stac } } ) return message_list def fill_the_gap(landsat: str, scenes_limit: Optional[int] = None)", "resp = s3.list_objects( bucket_name=LANDSAT_SYNC_BUCKET_NAME, region=REGION, prefix=f\"{STATUS_REPORT_FOLDER_NAME}/\", continuation_token=continuation_token, ) if not resp.get(\"Contents\"): raise Exception(", "{ \"scenes_limit\":10 } \"\"\" import gzip import json import logging import traceback from", "post_messages(message_list) -> None: \"\"\" Publish messages :param message_list:(list) list of messages :return:(None) \"\"\"", "= [] satellites = [ \"landsat_8\", \"landsat_7\", \"landsat_5\" ] for sat in satellites:", "from utils.aws_utils import S3 REPORTING_PREFIX = \"status-report/\" # This process is manually run", "catchup=False, ) as dag: PROCESSES = [] satellites = [ \"landsat_8\", \"landsat_7\", \"landsat_5\"", "= [ scene_path for scene_path in gzip.decompress(missing_scene_file_gzip).decode(\"utf-8\").split(\"\\n\") if scene_path ] logging.info(f\"Number of scenes", "scenes: {missing_scene_paths[0:10]}\") logging.info(f\"Limited: {'No limit' if scenes_limit else scenes_limit}\") if scenes_limit: missing_scene_paths =", "but does not stop execution traceback.print_exc() raise error with DAG( \"landsat_scenes_fill_the_gap\", default_args=default_args, schedule_interval=SCHEDULE_INTERVAL,", "a INT as value. * The option scenes_limit limit the number of scenes", "value. * The option scenes_limit limit the number of scenes to be read", "\"\"\" try: logging.info(\"Looking for latest report\") latest_report = find_latest_report(landsat=landsat) logging.info(f\"Latest report found {latest_report}\")", ":param message_list:(list) list of messages :return:(None) \"\"\" count = 0 messages = []", "from airflow import DAG from airflow.contrib.hooks.aws_sqs_hook import SQSHook from airflow.operators.python_operator import PythonOperator from", "break list_reports.sort() return list_reports[-1] if list_reports else \"\" def build_message(missing_scene_paths, update_stac): \"\"\" \"\"\"", "None default_args = { \"owner\": \"RODRIGO\", \"start_date\": datetime(2021, 6, 7), \"email\": [\"<EMAIL>\"], \"email_on_failure\":", "returned {resp}\" ) list_reports.extend( [ obj[\"Key\"] for obj in resp[\"Contents\"] if landsat in", "gap report and create messages to the filter queue process. :param landsat:(str) satellite", "list_reports.extend( [ obj[\"Key\"] for obj in resp[\"Contents\"] if landsat in obj[\"Key\"] and \"orphaned\"", "print traceback but does not stop execution traceback.print_exc() raise error with DAG( \"landsat_scenes_fill_the_gap\",", "list_reports[-1] if list_reports else \"\" def build_message(missing_scene_paths, update_stac): \"\"\" \"\"\" message_list = []", "= SQSHook(aws_conn_id=CONN_LANDSAT_SYNC) sqs_hook = sqs_conn.get_resource_type( resource_type=\"sqs\", region_name=REGION ) queue = sqs_hook.get_queue_by_name(QueueName=LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME) logging.info(\"Sending messages\")", "scenes found {len(missing_scene_paths)}\") logging.info(f\"Example scenes: {missing_scene_paths[0:10]}\") logging.info(f\"Limited: {'No limit' if scenes_limit else scenes_limit}\")", "infra.connections import CONN_LANDSAT_SYNC from infra.s3_buckets import LANDSAT_SYNC_BUCKET_NAME from infra.sqs_queues import LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME from infra.variables", "import STATUS_REPORT_FOLDER_NAME from utility.utility_slackoperator import task_fail_slack_alert, task_success_slack_alert from utils.aws_utils import S3 REPORTING_PREFIX =", "return list_reports[-1] if list_reports else \"\" def build_message(missing_scene_paths, update_stac): \"\"\" \"\"\" message_list =", "API is paginated, returning up to 1000 keys at a time. if resp.get(\"NextContinuationToken\"):", "logging.info(\"Looking for latest report\") latest_report = find_latest_report(landsat=landsat) logging.info(f\"Latest report found {latest_report}\") if not", "scenes to be read from the report, therefore limit the number of messages", "Function to retrieve the latest gap report and create messages to the filter", "for scene_path in gzip.decompress(missing_scene_file_gzip).decode(\"utf-8\").split(\"\\n\") if scene_path ] logging.info(f\"Number of scenes found {len(missing_scene_paths)}\") logging.info(f\"Example", "in obj[\"Key\"] and \"orphaned\" not in obj[\"Key\"] ] ) # The S3 API", "\"version\": \"0.0.1\", \"on_failure_callback\": task_fail_slack_alert, } def post_messages(message_list) -> None: \"\"\" Publish messages :param", "infra.sqs_queues import LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME from infra.variables import REGION from landsat_scenes_sync.variables import STATUS_REPORT_FOLDER_NAME from utility.utility_slackoperator", "f\"{LANDSAT_SYNC_BUCKET_NAME}/{STATUS_REPORT_FOLDER_NAME}/\" f\" - returned {resp}\" ) list_reports.extend( [ obj[\"Key\"] for obj in resp[\"Contents\"]", "{resp}\" ) list_reports.extend( [ obj[\"Key\"] for obj in resp[\"Contents\"] if landsat in obj[\"Key\"]", "import json import logging import traceback from datetime import datetime from typing import", "SQSHook(aws_conn_id=CONN_LANDSAT_SYNC) sqs_hook = sqs_conn.get_resource_type( resource_type=\"sqs\", region_name=REGION ) queue = sqs_hook.get_queue_by_name(QueueName=LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME) logging.info(\"Sending messages\") for", "region=REGION, key=latest_report, ) # This should just use Pandas. It's already a dependency.", "run time configurations `scenes_limit`, which receives a INT as value. * The option", "False if 'update' in latest_report: logging.info('FORCED UPDATE FLAGGED!') update_stac = True messages_to_send =", "sqs_hook = sqs_conn.get_resource_type( resource_type=\"sqs\", region_name=REGION ) queue = sqs_hook.get_queue_by_name(QueueName=LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME) logging.info(\"Sending messages\") for message_dict", "to the filter queue process. :param landsat:(str) satellite name :param scenes_limit:(str) limit of", "found {latest_report}\") if not latest_report: logging.error(\"Report not found\") raise RuntimeError(\"Report not found!\") else:", "time if count % 10 == 0: publish_messages(queue, messages) messages = [] #", "logging.info(f\"{count} messages sent successfully\") def find_latest_report(landsat: str) -> str: \"\"\" Function to find", "publish_messages from infra.connections import CONN_LANDSAT_SYNC from infra.s3_buckets import LANDSAT_SYNC_BUCKET_NAME from infra.sqs_queues import LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME", "True, \"email_on_success\": True, \"email_on_retry\": False, \"retries\": 0, \"version\": \"0.0.1\", \"on_failure_callback\": task_fail_slack_alert, } def", "report and generate messages to fill missing scenes #### Utility utilization The DAG", "the gap\"], catchup=False, ) as dag: PROCESSES = [] satellites = [ \"landsat_8\",", "gap\"], catchup=False, ) as dag: PROCESSES = [] satellites = [ \"landsat_8\", \"landsat_7\",", "sqs_hook.get_queue_by_name(QueueName=LANDSAT_SYNC_USGS_SNS_FILTER_SQS_NAME) logging.info(\"Sending messages\") for message_dict in message_list: message = { \"Id\": str(count), \"MessageBody\":", "for sat in satellites: PROCESSES.append( PythonOperator( task_id=f\"{sat}_fill_the_gap\", python_callable=fill_the_gap, op_kwargs=dict(landsat=sat, scenes_limit=\"{{ dag_run.conf.scenes_limit }}\"), on_success_callback=task_success_slack_alert,", "Pandas. It's already a dependency. missing_scene_paths = [ scene_path for scene_path in gzip.decompress(missing_scene_file_gzip).decode(\"utf-8\").split(\"\\n\")" ]
[ "bcrypt from ...models.user import User class ResetPassword(APIView): def post(self, request): data = request.data", "password using bcrypt algorithm hashed = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()) user.password = <PASSWORD> user.save() message", "bcrypt algorithm hashed = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()) user.password = <PASSWORD> user.save() message = \"Your", "<PASSWORD> user.save() message = \"Your password has been sucessfully reset\" return Response({\"message\":message},status=status.HTTP_200_OK) return", "using bcrypt algorithm hashed = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()) user.password = <PASSWORD> user.save() message =", "= result[0] # hash user password using bcrypt algorithm hashed = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt())", "rest_framework import status from rest_framework.response import Response from rest_framework.views import APIView import bcrypt", "ResetPassword(APIView): def post(self, request): data = request.data password = data['password'] token = data['token']", "= data['token'] if len(password) < 6: return Response({\"message\":\"Invalid Password! Password must contain 6", "= token) if result.count() == 1: user = result[0] # hash user password", "result.count() == 1: user = result[0] # hash user password using bcrypt algorithm", "= User.objects.filter(forgot_password_token = token) if result.count() == 1: user = result[0] # hash", "or more characters\"}, status=status.HTTP_400_BAD_REQUEST) result = User.objects.filter(forgot_password_token = token) if result.count() == 1:", "User class ResetPassword(APIView): def post(self, request): data = request.data password = data['password'] token", "hash user password using bcrypt algorithm hashed = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()) user.password = <PASSWORD>", "hashed = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()) user.password = <PASSWORD> user.save() message = \"Your password has", "characters\"}, status=status.HTTP_400_BAD_REQUEST) result = User.objects.filter(forgot_password_token = token) if result.count() == 1: user =", "user = result[0] # hash user password using bcrypt algorithm hashed = bcrypt.hashpw(password.encode('utf-8'),", "< 6: return Response({\"message\":\"Invalid Password! Password must contain 6 or more characters\"}, status=status.HTTP_400_BAD_REQUEST)", "user password using bcrypt algorithm hashed = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()) user.password = <PASSWORD> user.save()", "has been sucessfully reset\" return Response({\"message\":message},status=status.HTTP_200_OK) return Response(dict(error=\"This user does not exist\"), status=status.HTTP_400_BAD_REQUEST)", "def post(self, request): data = request.data password = data['password'] token = data['token'] if", "<reponame>ikechuku/bouncer_rest_api<gh_stars>0 from rest_framework import status from rest_framework.response import Response from rest_framework.views import APIView", "import bcrypt from ...models.user import User class ResetPassword(APIView): def post(self, request): data =", "# hash user password using bcrypt algorithm hashed = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()) user.password =", "rest_framework.response import Response from rest_framework.views import APIView import bcrypt from ...models.user import User", "rest_framework.views import APIView import bcrypt from ...models.user import User class ResetPassword(APIView): def post(self,", "APIView import bcrypt from ...models.user import User class ResetPassword(APIView): def post(self, request): data", "Response({\"message\":\"Invalid Password! Password must contain 6 or more characters\"}, status=status.HTTP_400_BAD_REQUEST) result = User.objects.filter(forgot_password_token", "len(password) < 6: return Response({\"message\":\"Invalid Password! Password must contain 6 or more characters\"},", "if result.count() == 1: user = result[0] # hash user password using bcrypt", "from rest_framework import status from rest_framework.response import Response from rest_framework.views import APIView import", "from ...models.user import User class ResetPassword(APIView): def post(self, request): data = request.data password", "= bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()) user.password = <PASSWORD> user.save() message = \"Your password has been", "class ResetPassword(APIView): def post(self, request): data = request.data password = data['password'] token =", "user.password = <PASSWORD> user.save() message = \"Your password has been sucessfully reset\" return", "= data['password'] token = data['token'] if len(password) < 6: return Response({\"message\":\"Invalid Password! Password", "from rest_framework.response import Response from rest_framework.views import APIView import bcrypt from ...models.user import", "post(self, request): data = request.data password = data['password'] token = data['token'] if len(password)", "= \"Your password has been sucessfully reset\" return Response({\"message\":message},status=status.HTTP_200_OK) return Response(dict(error=\"This user does", "bcrypt.gensalt()) user.password = <PASSWORD> user.save() message = \"Your password has been sucessfully reset\"", "user.save() message = \"Your password has been sucessfully reset\" return Response({\"message\":message},status=status.HTTP_200_OK) return Response(dict(error=\"This", "request): data = request.data password = data['password'] token = data['token'] if len(password) <", "Password must contain 6 or more characters\"}, status=status.HTTP_400_BAD_REQUEST) result = User.objects.filter(forgot_password_token = token)", "\"Your password has been sucessfully reset\" return Response({\"message\":message},status=status.HTTP_200_OK) return Response(dict(error=\"This user does not", "from rest_framework.views import APIView import bcrypt from ...models.user import User class ResetPassword(APIView): def", "import APIView import bcrypt from ...models.user import User class ResetPassword(APIView): def post(self, request):", "more characters\"}, status=status.HTTP_400_BAD_REQUEST) result = User.objects.filter(forgot_password_token = token) if result.count() == 1: user", "import status from rest_framework.response import Response from rest_framework.views import APIView import bcrypt from", "== 1: user = result[0] # hash user password using bcrypt algorithm hashed", "User.objects.filter(forgot_password_token = token) if result.count() == 1: user = result[0] # hash user", "token) if result.count() == 1: user = result[0] # hash user password using", "import Response from rest_framework.views import APIView import bcrypt from ...models.user import User class", "result = User.objects.filter(forgot_password_token = token) if result.count() == 1: user = result[0] #", "= <PASSWORD> user.save() message = \"Your password has been sucessfully reset\" return Response({\"message\":message},status=status.HTTP_200_OK)", "token = data['token'] if len(password) < 6: return Response({\"message\":\"Invalid Password! Password must contain", "6 or more characters\"}, status=status.HTTP_400_BAD_REQUEST) result = User.objects.filter(forgot_password_token = token) if result.count() ==", "1: user = result[0] # hash user password using bcrypt algorithm hashed =", "Password! Password must contain 6 or more characters\"}, status=status.HTTP_400_BAD_REQUEST) result = User.objects.filter(forgot_password_token =", "must contain 6 or more characters\"}, status=status.HTTP_400_BAD_REQUEST) result = User.objects.filter(forgot_password_token = token) if", "...models.user import User class ResetPassword(APIView): def post(self, request): data = request.data password =", "6: return Response({\"message\":\"Invalid Password! Password must contain 6 or more characters\"}, status=status.HTTP_400_BAD_REQUEST) result", "import User class ResetPassword(APIView): def post(self, request): data = request.data password = data['password']", "bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()) user.password = <PASSWORD> user.save() message = \"Your password has been sucessfully", "data = request.data password = data['password'] token = data['token'] if len(password) < 6:", "data['password'] token = data['token'] if len(password) < 6: return Response({\"message\":\"Invalid Password! Password must", "return Response({\"message\":\"Invalid Password! Password must contain 6 or more characters\"}, status=status.HTTP_400_BAD_REQUEST) result =", "= request.data password = data['password'] token = data['token'] if len(password) < 6: return", "data['token'] if len(password) < 6: return Response({\"message\":\"Invalid Password! Password must contain 6 or", "Response from rest_framework.views import APIView import bcrypt from ...models.user import User class ResetPassword(APIView):", "request.data password = data['password'] token = data['token'] if len(password) < 6: return Response({\"message\":\"Invalid", "password = data['password'] token = data['token'] if len(password) < 6: return Response({\"message\":\"Invalid Password!", "if len(password) < 6: return Response({\"message\":\"Invalid Password! Password must contain 6 or more", "result[0] # hash user password using bcrypt algorithm hashed = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()) user.password", "status=status.HTTP_400_BAD_REQUEST) result = User.objects.filter(forgot_password_token = token) if result.count() == 1: user = result[0]", "contain 6 or more characters\"}, status=status.HTTP_400_BAD_REQUEST) result = User.objects.filter(forgot_password_token = token) if result.count()", "message = \"Your password has been sucessfully reset\" return Response({\"message\":message},status=status.HTTP_200_OK) return Response(dict(error=\"This user", "password has been sucessfully reset\" return Response({\"message\":message},status=status.HTTP_200_OK) return Response(dict(error=\"This user does not exist\"),", "algorithm hashed = bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt()) user.password = <PASSWORD> user.save() message = \"Your password", "status from rest_framework.response import Response from rest_framework.views import APIView import bcrypt from ...models.user" ]
[ "11:34:05.456439 \"\"\" from alembic import op import sqlalchemy as sa # revision identifiers,", "128 Revision ID: 799310dca712 Revises: ca514840f404 Create Date: 2020-04-09 11:34:05.456439 \"\"\" from alembic", "'ca514840f404' branch_labels = None depends_on = None def upgrade(): op.alter_column('flask_usage', 'path', type_=sa.String(128), existing_type=sa.String(length=32))", "length to 128 Revision ID: 799310dca712 Revises: ca514840f404 Create Date: 2020-04-09 11:34:05.456439 \"\"\"", "ca514840f404 Create Date: 2020-04-09 11:34:05.456439 \"\"\" from alembic import op import sqlalchemy as", "identifiers, used by Alembic. revision = '799310dca712' down_revision = 'ca514840f404' branch_labels = None", "path column length to 128 Revision ID: 799310dca712 Revises: ca514840f404 Create Date: 2020-04-09", "2020-04-09 11:34:05.456439 \"\"\" from alembic import op import sqlalchemy as sa # revision", "import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision", "= None depends_on = None def upgrade(): op.alter_column('flask_usage', 'path', type_=sa.String(128), existing_type=sa.String(length=32)) def downgrade():", "sql path column length to 128 Revision ID: 799310dca712 Revises: ca514840f404 Create Date:", "depends_on = None def upgrade(): op.alter_column('flask_usage', 'path', type_=sa.String(128), existing_type=sa.String(length=32)) def downgrade(): op.alter_column('flask_usage', 'path',", "sa # revision identifiers, used by Alembic. revision = '799310dca712' down_revision = 'ca514840f404'", "= None def upgrade(): op.alter_column('flask_usage', 'path', type_=sa.String(128), existing_type=sa.String(length=32)) def downgrade(): op.alter_column('flask_usage', 'path', type_=sa.String(32),", "799310dca712 Revises: ca514840f404 Create Date: 2020-04-09 11:34:05.456439 \"\"\" from alembic import op import", "used by Alembic. revision = '799310dca712' down_revision = 'ca514840f404' branch_labels = None depends_on", "column length to 128 Revision ID: 799310dca712 Revises: ca514840f404 Create Date: 2020-04-09 11:34:05.456439", "as sa # revision identifiers, used by Alembic. revision = '799310dca712' down_revision =", "down_revision = 'ca514840f404' branch_labels = None depends_on = None def upgrade(): op.alter_column('flask_usage', 'path',", "alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic.", "None def upgrade(): op.alter_column('flask_usage', 'path', type_=sa.String(128), existing_type=sa.String(length=32)) def downgrade(): op.alter_column('flask_usage', 'path', type_=sa.String(32), existing_type=sa.String(length=128))", "Revision ID: 799310dca712 Revises: ca514840f404 Create Date: 2020-04-09 11:34:05.456439 \"\"\" from alembic import", "ID: 799310dca712 Revises: ca514840f404 Create Date: 2020-04-09 11:34:05.456439 \"\"\" from alembic import op", "by Alembic. revision = '799310dca712' down_revision = 'ca514840f404' branch_labels = None depends_on =", "from alembic import op import sqlalchemy as sa # revision identifiers, used by", "to 128 Revision ID: 799310dca712 Revises: ca514840f404 Create Date: 2020-04-09 11:34:05.456439 \"\"\" from", "'799310dca712' down_revision = 'ca514840f404' branch_labels = None depends_on = None def upgrade(): op.alter_column('flask_usage',", "sqlalchemy as sa # revision identifiers, used by Alembic. revision = '799310dca712' down_revision", "Date: 2020-04-09 11:34:05.456439 \"\"\" from alembic import op import sqlalchemy as sa #", "# revision identifiers, used by Alembic. revision = '799310dca712' down_revision = 'ca514840f404' branch_labels", "= 'ca514840f404' branch_labels = None depends_on = None def upgrade(): op.alter_column('flask_usage', 'path', type_=sa.String(128),", "Revises: ca514840f404 Create Date: 2020-04-09 11:34:05.456439 \"\"\" from alembic import op import sqlalchemy", "Alembic. revision = '799310dca712' down_revision = 'ca514840f404' branch_labels = None depends_on = None", "\"\"\" from alembic import op import sqlalchemy as sa # revision identifiers, used", "op import sqlalchemy as sa # revision identifiers, used by Alembic. revision =", "branch_labels = None depends_on = None def upgrade(): op.alter_column('flask_usage', 'path', type_=sa.String(128), existing_type=sa.String(length=32)) def", "\"\"\"Increase sql path column length to 128 Revision ID: 799310dca712 Revises: ca514840f404 Create", "revision = '799310dca712' down_revision = 'ca514840f404' branch_labels = None depends_on = None def", "import sqlalchemy as sa # revision identifiers, used by Alembic. revision = '799310dca712'", "revision identifiers, used by Alembic. revision = '799310dca712' down_revision = 'ca514840f404' branch_labels =", "= '799310dca712' down_revision = 'ca514840f404' branch_labels = None depends_on = None def upgrade():", "None depends_on = None def upgrade(): op.alter_column('flask_usage', 'path', type_=sa.String(128), existing_type=sa.String(length=32)) def downgrade(): op.alter_column('flask_usage',", "Create Date: 2020-04-09 11:34:05.456439 \"\"\" from alembic import op import sqlalchemy as sa" ]
[ "django.core.management.base import BaseCommand, CommandError from asylum.tests.fixtures.full import generate_all class Command(BaseCommand): help = 'Generates", "from django.core.management.base import BaseCommand, CommandError from asylum.tests.fixtures.full import generate_all class Command(BaseCommand): help =", "coding: utf-8 -*- from django.core.management.base import BaseCommand, CommandError from asylum.tests.fixtures.full import generate_all class", "CommandError from asylum.tests.fixtures.full import generate_all class Command(BaseCommand): help = 'Generates full set of", "generate_all class Command(BaseCommand): help = 'Generates full set of test data' def add_arguments(self,", "utf-8 -*- from django.core.management.base import BaseCommand, CommandError from asylum.tests.fixtures.full import generate_all class Command(BaseCommand):", "'Generates full set of test data' def add_arguments(self, parser): pass def handle(self, *args,", "class Command(BaseCommand): help = 'Generates full set of test data' def add_arguments(self, parser):", "BaseCommand, CommandError from asylum.tests.fixtures.full import generate_all class Command(BaseCommand): help = 'Generates full set", "import BaseCommand, CommandError from asylum.tests.fixtures.full import generate_all class Command(BaseCommand): help = 'Generates full", "-*- coding: utf-8 -*- from django.core.management.base import BaseCommand, CommandError from asylum.tests.fixtures.full import generate_all", "Command(BaseCommand): help = 'Generates full set of test data' def add_arguments(self, parser): pass", "import generate_all class Command(BaseCommand): help = 'Generates full set of test data' def", "full set of test data' def add_arguments(self, parser): pass def handle(self, *args, **options):", "help = 'Generates full set of test data' def add_arguments(self, parser): pass def", "asylum.tests.fixtures.full import generate_all class Command(BaseCommand): help = 'Generates full set of test data'", "= 'Generates full set of test data' def add_arguments(self, parser): pass def handle(self,", "-*- from django.core.management.base import BaseCommand, CommandError from asylum.tests.fixtures.full import generate_all class Command(BaseCommand): help", "from asylum.tests.fixtures.full import generate_all class Command(BaseCommand): help = 'Generates full set of test", "set of test data' def add_arguments(self, parser): pass def handle(self, *args, **options): generate_all()", "# -*- coding: utf-8 -*- from django.core.management.base import BaseCommand, CommandError from asylum.tests.fixtures.full import" ]
[ "pose of the camera in the local coordinate system. \"\"\" self._add_axis(pose_local_camera) def add_camera_frustum(self,", "and image to the 3D world\"\"\" self._add_frustum(camera_model, image) def _add_axis(self, pose: SE3, scale=10.0):", "z_arrow = pv.Arrow(direction=(0.0, 0.0, 1.0), scale=scale) z_arrow.transform(T) self._plotter.add_mesh(z_arrow, color='blue') def _add_frustum(self, camera_model, image,", "0.0, 0.0), scale=scale) x_arrow.transform(T) self._plotter.add_mesh(x_arrow, color='red') y_arrow = pv.Arrow(direction=(0.0, 1.0, 0.0), scale=scale) y_arrow.transform(T)", "numpy as np import pyvista as pv from pylie import SE3 class Viewer3D:", "(100, 300, -200) self._plotter.show(title=\"3D visualization\", interactive_update=True) def add_body_axes(self, pose_local_body: SE3): \"\"\"Add axes representing", "\"\"\"Visualises the lab in 3D\"\"\" def __init__(self): \"\"\"Sets up the 3D viewer\"\"\" self._plotter", "image) def _add_axis(self, pose: SE3, scale=10.0): T = pose.to_matrix() point = pv.Sphere(radius=0.1*scale) point.transform(T)", "img_height-1.]))) point_bottom_right = np.squeeze(camera_model.pixel_to_normalised(np.array([0., img_height-1.]))) point_top_left = np.squeeze(camera_model.pixel_to_normalised(np.array([0., 0.]))) point_top_right = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., 0.])))", "point = pv.Sphere(radius=0.1*scale) point.transform(T) self._plotter.add_mesh(point) x_arrow = pv.Arrow(direction=(1.0, 0.0, 0.0), scale=scale) x_arrow.transform(T) self._plotter.add_mesh(x_arrow,", "= pv.Arrow(direction=(0.0, 1.0, 0.0), scale=scale) y_arrow.transform(T) self._plotter.add_mesh(y_arrow, color='green') z_arrow = pv.Arrow(direction=(0.0, 0.0, 1.0),", "camera_model, image): \"\"\"Add a frustum representing the camera model and image to the", "point_top_left = np.squeeze(camera_model.pixel_to_normalised(np.array([0., 0.]))) point_top_right = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., 0.]))) point_focal = np.zeros([3]) pyramid =", "the 3D world\"\"\" self._add_frustum(camera_model, image) def _add_axis(self, pose: SE3, scale=10.0): T = pose.to_matrix()", "self._plotter.camera.up = (-0.042739, -0.226979, -0.972961) self._plotter.camera.focal_point = (100, 300, -200) self._plotter.show(title=\"3D visualization\", interactive_update=True)", "(-0.042739, -0.226979, -0.972961) self._plotter.camera.focal_point = (100, 300, -200) self._plotter.show(title=\"3D visualization\", interactive_update=True) def add_body_axes(self,", "np.squeeze(camera_model.pixel_to_normalised(np.array([0., 0.]))) point_top_right = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., 0.]))) point_focal = np.zeros([3]) pyramid = pv.Pyramid([point_bottom_left, point_bottom_right,", "pose.to_matrix() point = pv.Sphere(radius=0.1*scale) point.transform(T) self._plotter.add_mesh(point) x_arrow = pv.Arrow(direction=(1.0, 0.0, 0.0), scale=scale) x_arrow.transform(T)", "image to the 3D world\"\"\" self._add_frustum(camera_model, image) def _add_axis(self, pose: SE3, scale=10.0): T", "pv.Plotter() # Add scene origin and plane scene_plane = pv.Plane(i_size=1000, j_size=1000) self._plotter.add_mesh(scene_plane, show_edges=True,", "scene origin and plane scene_plane = pv.Plane(i_size=1000, j_size=1000) self._plotter.add_mesh(scene_plane, show_edges=True, style='wireframe') self._add_axis(SE3(), 100)", "plane scene_plane = pv.Plane(i_size=1000, j_size=1000) self._plotter.add_mesh(scene_plane, show_edges=True, style='wireframe') self._add_axis(SE3(), 100) # Set camera.", "Set camera. self._plotter.camera.position = (100, 1500, -500) self._plotter.camera.up = (-0.042739, -0.226979, -0.972961) self._plotter.camera.focal_point", "pose: SE3, scale=10.0): T = pose.to_matrix() point = pv.Sphere(radius=0.1*scale) point.transform(T) self._plotter.add_mesh(point) x_arrow =", "class Viewer3D: \"\"\"Visualises the lab in 3D\"\"\" def __init__(self): \"\"\"Sets up the 3D", "camera_model, image, scale=20.0): S = camera_model.pose_world_camera.to_matrix() @ np.diag([scale, scale, scale, 1.0]) img_height, img_width", "= np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., 0.]))) point_focal = np.zeros([3]) pyramid = pv.Pyramid([point_bottom_left, point_bottom_right, point_top_left, point_top_right, point_focal])", "x_arrow = pv.Arrow(direction=(1.0, 0.0, 0.0), scale=scale) x_arrow.transform(T) self._plotter.add_mesh(x_arrow, color='red') y_arrow = pv.Arrow(direction=(0.0, 1.0,", "the camera pose to the 3D world :param pose_local_camera: The pose of the", "= np.squeeze(camera_model.pixel_to_normalised(np.array([0., img_height-1.]))) point_top_left = np.squeeze(camera_model.pixel_to_normalised(np.array([0., 0.]))) point_top_right = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., 0.]))) point_focal =", "pv.Arrow(direction=(0.0, 1.0, 0.0), scale=scale) y_arrow.transform(T) self._plotter.add_mesh(y_arrow, color='green') z_arrow = pv.Arrow(direction=(0.0, 0.0, 1.0), scale=scale)", "local coordinate system. \"\"\" self._add_axis(pose_local_camera) def add_camera_frustum(self, camera_model, image): \"\"\"Add a frustum representing", "= (100, 1500, -500) self._plotter.camera.up = (-0.042739, -0.226979, -0.972961) self._plotter.camera.focal_point = (100, 300,", "image_flipped_rgb = image[::-1, :, ::-1].copy() tex = pv.numpy_to_texture(image_flipped_rgb) self._plotter.add_mesh(pyramid, show_edges=True, style='wireframe') self._plotter.add_mesh(rectangle, texture=tex,", "of the camera in the local coordinate system. \"\"\" self._add_axis(pose_local_camera) def add_camera_frustum(self, camera_model,", "= pv.numpy_to_texture(image_flipped_rgb) self._plotter.add_mesh(pyramid, show_edges=True, style='wireframe') self._plotter.add_mesh(rectangle, texture=tex, opacity=0.9) def update(self, time=500): self._plotter.update(time) def", "up the 3D viewer\"\"\" self._plotter = pv.Plotter() # Add scene origin and plane", "img_width = image.shape[:2] point_bottom_left = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., img_height-1.]))) point_bottom_right = np.squeeze(camera_model.pixel_to_normalised(np.array([0., img_height-1.]))) point_top_left =", "scale=scale) x_arrow.transform(T) self._plotter.add_mesh(x_arrow, color='red') y_arrow = pv.Arrow(direction=(0.0, 1.0, 0.0), scale=scale) y_arrow.transform(T) self._plotter.add_mesh(y_arrow, color='green')", "np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., 0.]))) point_focal = np.zeros([3]) pyramid = pv.Pyramid([point_bottom_left, point_bottom_right, point_top_left, point_top_right, point_focal]) pyramid.transform(S)", "point_bottom_right, point_top_left, point_top_right, point_focal]) pyramid.transform(S) rectangle = pv.Rectangle([point_bottom_left, point_bottom_right, point_top_left, point_top_right]) rectangle.texture_map_to_plane(inplace=True) rectangle.transform(S)", "show_edges=True, style='wireframe') self._add_axis(SE3(), 100) # Set camera. self._plotter.camera.position = (100, 1500, -500) self._plotter.camera.up", "self._plotter.add_mesh(point) x_arrow = pv.Arrow(direction=(1.0, 0.0, 0.0), scale=scale) x_arrow.transform(T) self._plotter.add_mesh(x_arrow, color='red') y_arrow = pv.Arrow(direction=(0.0,", "= pv.Pyramid([point_bottom_left, point_bottom_right, point_top_left, point_top_right, point_focal]) pyramid.transform(S) rectangle = pv.Rectangle([point_bottom_left, point_bottom_right, point_top_left, point_top_right])", "y_arrow = pv.Arrow(direction=(0.0, 1.0, 0.0), scale=scale) y_arrow.transform(T) self._plotter.add_mesh(y_arrow, color='green') z_arrow = pv.Arrow(direction=(0.0, 0.0,", "\"\"\" self._add_axis(pose_local_camera) def add_camera_frustum(self, camera_model, image): \"\"\"Add a frustum representing the camera model", "def add_camera_axes(self, pose_local_camera: SE3): \"\"\"Add axes representing the camera pose to the 3D", "= (-0.042739, -0.226979, -0.972961) self._plotter.camera.focal_point = (100, 300, -200) self._plotter.show(title=\"3D visualization\", interactive_update=True) def", "world :param pose_local_body: The pose of the body in the local coordinate system.", "and plane scene_plane = pv.Plane(i_size=1000, j_size=1000) self._plotter.add_mesh(scene_plane, show_edges=True, style='wireframe') self._add_axis(SE3(), 100) # Set", "scale, scale, 1.0]) img_height, img_width = image.shape[:2] point_bottom_left = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., img_height-1.]))) point_bottom_right =", "add_body_axes(self, pose_local_body: SE3): \"\"\"Add axes representing the body pose to the 3D world", "\"\"\" self._add_axis(pose_local_body) def add_camera_axes(self, pose_local_camera: SE3): \"\"\"Add axes representing the camera pose to", "origin and plane scene_plane = pv.Plane(i_size=1000, j_size=1000) self._plotter.add_mesh(scene_plane, show_edges=True, style='wireframe') self._add_axis(SE3(), 100) #", "color='red') y_arrow = pv.Arrow(direction=(0.0, 1.0, 0.0), scale=scale) y_arrow.transform(T) self._plotter.add_mesh(y_arrow, color='green') z_arrow = pv.Arrow(direction=(0.0,", "Viewer3D: \"\"\"Visualises the lab in 3D\"\"\" def __init__(self): \"\"\"Sets up the 3D viewer\"\"\"", "= pv.Plotter() # Add scene origin and plane scene_plane = pv.Plane(i_size=1000, j_size=1000) self._plotter.add_mesh(scene_plane,", "camera_model.pose_world_camera.to_matrix() @ np.diag([scale, scale, scale, 1.0]) img_height, img_width = image.shape[:2] point_bottom_left = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1.,", "z_arrow.transform(T) self._plotter.add_mesh(z_arrow, color='blue') def _add_frustum(self, camera_model, image, scale=20.0): S = camera_model.pose_world_camera.to_matrix() @ np.diag([scale,", "point_bottom_left = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., img_height-1.]))) point_bottom_right = np.squeeze(camera_model.pixel_to_normalised(np.array([0., img_height-1.]))) point_top_left = np.squeeze(camera_model.pixel_to_normalised(np.array([0., 0.]))) point_top_right", "self._plotter.add_mesh(x_arrow, color='red') y_arrow = pv.Arrow(direction=(0.0, 1.0, 0.0), scale=scale) y_arrow.transform(T) self._plotter.add_mesh(y_arrow, color='green') z_arrow =", "point_top_left, point_top_right, point_focal]) pyramid.transform(S) rectangle = pv.Rectangle([point_bottom_left, point_bottom_right, point_top_left, point_top_right]) rectangle.texture_map_to_plane(inplace=True) rectangle.transform(S) image_flipped_rgb", "to the 3D world\"\"\" self._add_frustum(camera_model, image) def _add_axis(self, pose: SE3, scale=10.0): T =", "the camera model and image to the 3D world\"\"\" self._add_frustum(camera_model, image) def _add_axis(self,", "as pv from pylie import SE3 class Viewer3D: \"\"\"Visualises the lab in 3D\"\"\"", "SE3): \"\"\"Add axes representing the body pose to the 3D world :param pose_local_body:", "# Add scene origin and plane scene_plane = pv.Plane(i_size=1000, j_size=1000) self._plotter.add_mesh(scene_plane, show_edges=True, style='wireframe')", "pv.Sphere(radius=0.1*scale) point.transform(T) self._plotter.add_mesh(point) x_arrow = pv.Arrow(direction=(1.0, 0.0, 0.0), scale=scale) x_arrow.transform(T) self._plotter.add_mesh(x_arrow, color='red') y_arrow", "3D viewer\"\"\" self._plotter = pv.Plotter() # Add scene origin and plane scene_plane =", "3D world :param pose_local_body: The pose of the body in the local coordinate", "model and image to the 3D world\"\"\" self._add_frustum(camera_model, image) def _add_axis(self, pose: SE3,", "pv from pylie import SE3 class Viewer3D: \"\"\"Visualises the lab in 3D\"\"\" def", "self._add_axis(pose_local_camera) def add_camera_frustum(self, camera_model, image): \"\"\"Add a frustum representing the camera model and", "self._add_axis(pose_local_body) def add_camera_axes(self, pose_local_camera: SE3): \"\"\"Add axes representing the camera pose to the", "interactive_update=True) def add_body_axes(self, pose_local_body: SE3): \"\"\"Add axes representing the body pose to the", "camera pose to the 3D world :param pose_local_camera: The pose of the camera", "pose_local_body: The pose of the body in the local coordinate system. \"\"\" self._add_axis(pose_local_body)", "body pose to the 3D world :param pose_local_body: The pose of the body", "np.squeeze(camera_model.pixel_to_normalised(np.array([0., img_height-1.]))) point_top_left = np.squeeze(camera_model.pixel_to_normalised(np.array([0., 0.]))) point_top_right = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., 0.]))) point_focal = np.zeros([3])", "scale=10.0): T = pose.to_matrix() point = pv.Sphere(radius=0.1*scale) point.transform(T) self._plotter.add_mesh(point) x_arrow = pv.Arrow(direction=(1.0, 0.0,", "pv.Arrow(direction=(1.0, 0.0, 0.0), scale=scale) x_arrow.transform(T) self._plotter.add_mesh(x_arrow, color='red') y_arrow = pv.Arrow(direction=(0.0, 1.0, 0.0), scale=scale)", "3D world :param pose_local_camera: The pose of the camera in the local coordinate", "np.diag([scale, scale, scale, 1.0]) img_height, img_width = image.shape[:2] point_bottom_left = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., img_height-1.]))) point_bottom_right", "pose_local_body: SE3): \"\"\"Add axes representing the body pose to the 3D world :param", "to the 3D world :param pose_local_body: The pose of the body in the", "_add_frustum(self, camera_model, image, scale=20.0): S = camera_model.pose_world_camera.to_matrix() @ np.diag([scale, scale, scale, 1.0]) img_height,", "self._plotter.camera.focal_point = (100, 300, -200) self._plotter.show(title=\"3D visualization\", interactive_update=True) def add_body_axes(self, pose_local_body: SE3): \"\"\"Add", "pose of the body in the local coordinate system. \"\"\" self._add_axis(pose_local_body) def add_camera_axes(self,", "1500, -500) self._plotter.camera.up = (-0.042739, -0.226979, -0.972961) self._plotter.camera.focal_point = (100, 300, -200) self._plotter.show(title=\"3D", "= pv.Sphere(radius=0.1*scale) point.transform(T) self._plotter.add_mesh(point) x_arrow = pv.Arrow(direction=(1.0, 0.0, 0.0), scale=scale) x_arrow.transform(T) self._plotter.add_mesh(x_arrow, color='red')", "y_arrow.transform(T) self._plotter.add_mesh(y_arrow, color='green') z_arrow = pv.Arrow(direction=(0.0, 0.0, 1.0), scale=scale) z_arrow.transform(T) self._plotter.add_mesh(z_arrow, color='blue') def", "pv.Rectangle([point_bottom_left, point_bottom_right, point_top_left, point_top_right]) rectangle.texture_map_to_plane(inplace=True) rectangle.transform(S) image_flipped_rgb = image[::-1, :, ::-1].copy() tex =", ":, ::-1].copy() tex = pv.numpy_to_texture(image_flipped_rgb) self._plotter.add_mesh(pyramid, show_edges=True, style='wireframe') self._plotter.add_mesh(rectangle, texture=tex, opacity=0.9) def update(self,", "def _add_axis(self, pose: SE3, scale=10.0): T = pose.to_matrix() point = pv.Sphere(radius=0.1*scale) point.transform(T) self._plotter.add_mesh(point)", "pyramid.transform(S) rectangle = pv.Rectangle([point_bottom_left, point_bottom_right, point_top_left, point_top_right]) rectangle.texture_map_to_plane(inplace=True) rectangle.transform(S) image_flipped_rgb = image[::-1, :,", "# Set camera. self._plotter.camera.position = (100, 1500, -500) self._plotter.camera.up = (-0.042739, -0.226979, -0.972961)", "add_camera_frustum(self, camera_model, image): \"\"\"Add a frustum representing the camera model and image to", "1.0), scale=scale) z_arrow.transform(T) self._plotter.add_mesh(z_arrow, color='blue') def _add_frustum(self, camera_model, image, scale=20.0): S = camera_model.pose_world_camera.to_matrix()", "= (100, 300, -200) self._plotter.show(title=\"3D visualization\", interactive_update=True) def add_body_axes(self, pose_local_body: SE3): \"\"\"Add axes", "a frustum representing the camera model and image to the 3D world\"\"\" self._add_frustum(camera_model,", "img_height, img_width = image.shape[:2] point_bottom_left = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., img_height-1.]))) point_bottom_right = np.squeeze(camera_model.pixel_to_normalised(np.array([0., img_height-1.]))) point_top_left", "self._plotter.add_mesh(y_arrow, color='green') z_arrow = pv.Arrow(direction=(0.0, 0.0, 1.0), scale=scale) z_arrow.transform(T) self._plotter.add_mesh(z_arrow, color='blue') def _add_frustum(self,", "= image.shape[:2] point_bottom_left = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., img_height-1.]))) point_bottom_right = np.squeeze(camera_model.pixel_to_normalised(np.array([0., img_height-1.]))) point_top_left = np.squeeze(camera_model.pixel_to_normalised(np.array([0.,", "300, -200) self._plotter.show(title=\"3D visualization\", interactive_update=True) def add_body_axes(self, pose_local_body: SE3): \"\"\"Add axes representing the", "visualization\", interactive_update=True) def add_body_axes(self, pose_local_body: SE3): \"\"\"Add axes representing the body pose to", "axes representing the camera pose to the 3D world :param pose_local_camera: The pose", "tex = pv.numpy_to_texture(image_flipped_rgb) self._plotter.add_mesh(pyramid, show_edges=True, style='wireframe') self._plotter.add_mesh(rectangle, texture=tex, opacity=0.9) def update(self, time=500): self._plotter.update(time)", "1.0]) img_height, img_width = image.shape[:2] point_bottom_left = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., img_height-1.]))) point_bottom_right = np.squeeze(camera_model.pixel_to_normalised(np.array([0., img_height-1.])))", "np.zeros([3]) pyramid = pv.Pyramid([point_bottom_left, point_bottom_right, point_top_left, point_top_right, point_focal]) pyramid.transform(S) rectangle = pv.Rectangle([point_bottom_left, point_bottom_right,", "= np.zeros([3]) pyramid = pv.Pyramid([point_bottom_left, point_bottom_right, point_top_left, point_top_right, point_focal]) pyramid.transform(S) rectangle = pv.Rectangle([point_bottom_left,", "axes representing the body pose to the 3D world :param pose_local_body: The pose", "pv.Arrow(direction=(0.0, 0.0, 1.0), scale=scale) z_arrow.transform(T) self._plotter.add_mesh(z_arrow, color='blue') def _add_frustum(self, camera_model, image, scale=20.0): S", "::-1].copy() tex = pv.numpy_to_texture(image_flipped_rgb) self._plotter.add_mesh(pyramid, show_edges=True, style='wireframe') self._plotter.add_mesh(rectangle, texture=tex, opacity=0.9) def update(self, time=500):", "representing the camera model and image to the 3D world\"\"\" self._add_frustum(camera_model, image) def", "= pv.Arrow(direction=(0.0, 0.0, 1.0), scale=scale) z_arrow.transform(T) self._plotter.add_mesh(z_arrow, color='blue') def _add_frustum(self, camera_model, image, scale=20.0):", "representing the body pose to the 3D world :param pose_local_body: The pose of", "the camera in the local coordinate system. \"\"\" self._add_axis(pose_local_camera) def add_camera_frustum(self, camera_model, image):", "pyvista as pv from pylie import SE3 class Viewer3D: \"\"\"Visualises the lab in", "SE3 class Viewer3D: \"\"\"Visualises the lab in 3D\"\"\" def __init__(self): \"\"\"Sets up the", "self._plotter.show(title=\"3D visualization\", interactive_update=True) def add_body_axes(self, pose_local_body: SE3): \"\"\"Add axes representing the body pose", "= image[::-1, :, ::-1].copy() tex = pv.numpy_to_texture(image_flipped_rgb) self._plotter.add_mesh(pyramid, show_edges=True, style='wireframe') self._plotter.add_mesh(rectangle, texture=tex, opacity=0.9)", "The pose of the camera in the local coordinate system. \"\"\" self._add_axis(pose_local_camera) def", "0.]))) point_focal = np.zeros([3]) pyramid = pv.Pyramid([point_bottom_left, point_bottom_right, point_top_left, point_top_right, point_focal]) pyramid.transform(S) rectangle", "rectangle.transform(S) image_flipped_rgb = image[::-1, :, ::-1].copy() tex = pv.numpy_to_texture(image_flipped_rgb) self._plotter.add_mesh(pyramid, show_edges=True, style='wireframe') self._plotter.add_mesh(rectangle,", "add_camera_axes(self, pose_local_camera: SE3): \"\"\"Add axes representing the camera pose to the 3D world", "-0.972961) self._plotter.camera.focal_point = (100, 300, -200) self._plotter.show(title=\"3D visualization\", interactive_update=True) def add_body_axes(self, pose_local_body: SE3):", "the 3D world :param pose_local_body: The pose of the body in the local", ":param pose_local_body: The pose of the body in the local coordinate system. \"\"\"", ":param pose_local_camera: The pose of the camera in the local coordinate system. \"\"\"", "pose_local_camera: The pose of the camera in the local coordinate system. \"\"\" self._add_axis(pose_local_camera)", "img_height-1.]))) point_top_left = np.squeeze(camera_model.pixel_to_normalised(np.array([0., 0.]))) point_top_right = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., 0.]))) point_focal = np.zeros([3]) pyramid", "the body pose to the 3D world :param pose_local_body: The pose of the", "local coordinate system. \"\"\" self._add_axis(pose_local_body) def add_camera_axes(self, pose_local_camera: SE3): \"\"\"Add axes representing the", "100) # Set camera. self._plotter.camera.position = (100, 1500, -500) self._plotter.camera.up = (-0.042739, -0.226979,", "0.0), scale=scale) y_arrow.transform(T) self._plotter.add_mesh(y_arrow, color='green') z_arrow = pv.Arrow(direction=(0.0, 0.0, 1.0), scale=scale) z_arrow.transform(T) self._plotter.add_mesh(z_arrow,", "image): \"\"\"Add a frustum representing the camera model and image to the 3D", "point_top_right, point_focal]) pyramid.transform(S) rectangle = pv.Rectangle([point_bottom_left, point_bottom_right, point_top_left, point_top_right]) rectangle.texture_map_to_plane(inplace=True) rectangle.transform(S) image_flipped_rgb =", "-200) self._plotter.show(title=\"3D visualization\", interactive_update=True) def add_body_axes(self, pose_local_body: SE3): \"\"\"Add axes representing the body", "import SE3 class Viewer3D: \"\"\"Visualises the lab in 3D\"\"\" def __init__(self): \"\"\"Sets up", "= np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., img_height-1.]))) point_bottom_right = np.squeeze(camera_model.pixel_to_normalised(np.array([0., img_height-1.]))) point_top_left = np.squeeze(camera_model.pixel_to_normalised(np.array([0., 0.]))) point_top_right =", "in the local coordinate system. \"\"\" self._add_axis(pose_local_body) def add_camera_axes(self, pose_local_camera: SE3): \"\"\"Add axes", "scene_plane = pv.Plane(i_size=1000, j_size=1000) self._plotter.add_mesh(scene_plane, show_edges=True, style='wireframe') self._add_axis(SE3(), 100) # Set camera. self._plotter.camera.position", "point.transform(T) self._plotter.add_mesh(point) x_arrow = pv.Arrow(direction=(1.0, 0.0, 0.0), scale=scale) x_arrow.transform(T) self._plotter.add_mesh(x_arrow, color='red') y_arrow =", "pylie import SE3 class Viewer3D: \"\"\"Visualises the lab in 3D\"\"\" def __init__(self): \"\"\"Sets", "point_focal]) pyramid.transform(S) rectangle = pv.Rectangle([point_bottom_left, point_bottom_right, point_top_left, point_top_right]) rectangle.texture_map_to_plane(inplace=True) rectangle.transform(S) image_flipped_rgb = image[::-1,", "the 3D viewer\"\"\" self._plotter = pv.Plotter() # Add scene origin and plane scene_plane", "pv.Pyramid([point_bottom_left, point_bottom_right, point_top_left, point_top_right, point_focal]) pyramid.transform(S) rectangle = pv.Rectangle([point_bottom_left, point_bottom_right, point_top_left, point_top_right]) rectangle.texture_map_to_plane(inplace=True)", "image.shape[:2] point_bottom_left = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., img_height-1.]))) point_bottom_right = np.squeeze(camera_model.pixel_to_normalised(np.array([0., img_height-1.]))) point_top_left = np.squeeze(camera_model.pixel_to_normalised(np.array([0., 0.])))", "point_bottom_right, point_top_left, point_top_right]) rectangle.texture_map_to_plane(inplace=True) rectangle.transform(S) image_flipped_rgb = image[::-1, :, ::-1].copy() tex = pv.numpy_to_texture(image_flipped_rgb)", "j_size=1000) self._plotter.add_mesh(scene_plane, show_edges=True, style='wireframe') self._add_axis(SE3(), 100) # Set camera. self._plotter.camera.position = (100, 1500,", "color='blue') def _add_frustum(self, camera_model, image, scale=20.0): S = camera_model.pose_world_camera.to_matrix() @ np.diag([scale, scale, scale,", "rectangle = pv.Rectangle([point_bottom_left, point_bottom_right, point_top_left, point_top_right]) rectangle.texture_map_to_plane(inplace=True) rectangle.transform(S) image_flipped_rgb = image[::-1, :, ::-1].copy()", "The pose of the body in the local coordinate system. \"\"\" self._add_axis(pose_local_body) def", "np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., img_height-1.]))) point_bottom_right = np.squeeze(camera_model.pixel_to_normalised(np.array([0., img_height-1.]))) point_top_left = np.squeeze(camera_model.pixel_to_normalised(np.array([0., 0.]))) point_top_right = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1.,", "(100, 1500, -500) self._plotter.camera.up = (-0.042739, -0.226979, -0.972961) self._plotter.camera.focal_point = (100, 300, -200)", "self._plotter.add_mesh(scene_plane, show_edges=True, style='wireframe') self._add_axis(SE3(), 100) # Set camera. self._plotter.camera.position = (100, 1500, -500)", "camera in the local coordinate system. \"\"\" self._add_axis(pose_local_camera) def add_camera_frustum(self, camera_model, image): \"\"\"Add", "__init__(self): \"\"\"Sets up the 3D viewer\"\"\" self._plotter = pv.Plotter() # Add scene origin", "the local coordinate system. \"\"\" self._add_axis(pose_local_body) def add_camera_axes(self, pose_local_camera: SE3): \"\"\"Add axes representing", "import numpy as np import pyvista as pv from pylie import SE3 class", "pose_local_camera: SE3): \"\"\"Add axes representing the camera pose to the 3D world :param", "= np.squeeze(camera_model.pixel_to_normalised(np.array([0., 0.]))) point_top_right = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., 0.]))) point_focal = np.zeros([3]) pyramid = pv.Pyramid([point_bottom_left,", "image, scale=20.0): S = camera_model.pose_world_camera.to_matrix() @ np.diag([scale, scale, scale, 1.0]) img_height, img_width =", "= pv.Rectangle([point_bottom_left, point_bottom_right, point_top_left, point_top_right]) rectangle.texture_map_to_plane(inplace=True) rectangle.transform(S) image_flipped_rgb = image[::-1, :, ::-1].copy() tex", "lab in 3D\"\"\" def __init__(self): \"\"\"Sets up the 3D viewer\"\"\" self._plotter = pv.Plotter()", "in the local coordinate system. \"\"\" self._add_axis(pose_local_camera) def add_camera_frustum(self, camera_model, image): \"\"\"Add a", "def add_camera_frustum(self, camera_model, image): \"\"\"Add a frustum representing the camera model and image", "\"\"\"Add axes representing the body pose to the 3D world :param pose_local_body: The", "0.]))) point_top_right = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., 0.]))) point_focal = np.zeros([3]) pyramid = pv.Pyramid([point_bottom_left, point_bottom_right, point_top_left,", "scale=20.0): S = camera_model.pose_world_camera.to_matrix() @ np.diag([scale, scale, scale, 1.0]) img_height, img_width = image.shape[:2]", "to the 3D world :param pose_local_camera: The pose of the camera in the", "1.0, 0.0), scale=scale) y_arrow.transform(T) self._plotter.add_mesh(y_arrow, color='green') z_arrow = pv.Arrow(direction=(0.0, 0.0, 1.0), scale=scale) z_arrow.transform(T)", "-500) self._plotter.camera.up = (-0.042739, -0.226979, -0.972961) self._plotter.camera.focal_point = (100, 300, -200) self._plotter.show(title=\"3D visualization\",", "\"\"\"Sets up the 3D viewer\"\"\" self._plotter = pv.Plotter() # Add scene origin and", "self._plotter.add_mesh(z_arrow, color='blue') def _add_frustum(self, camera_model, image, scale=20.0): S = camera_model.pose_world_camera.to_matrix() @ np.diag([scale, scale,", "scale=scale) y_arrow.transform(T) self._plotter.add_mesh(y_arrow, color='green') z_arrow = pv.Arrow(direction=(0.0, 0.0, 1.0), scale=scale) z_arrow.transform(T) self._plotter.add_mesh(z_arrow, color='blue')", "\"\"\"Add axes representing the camera pose to the 3D world :param pose_local_camera: The", "frustum representing the camera model and image to the 3D world\"\"\" self._add_frustum(camera_model, image)", "the 3D world :param pose_local_camera: The pose of the camera in the local", "scale=scale) z_arrow.transform(T) self._plotter.add_mesh(z_arrow, color='blue') def _add_frustum(self, camera_model, image, scale=20.0): S = camera_model.pose_world_camera.to_matrix() @", "system. \"\"\" self._add_axis(pose_local_body) def add_camera_axes(self, pose_local_camera: SE3): \"\"\"Add axes representing the camera pose", "= pv.Arrow(direction=(1.0, 0.0, 0.0), scale=scale) x_arrow.transform(T) self._plotter.add_mesh(x_arrow, color='red') y_arrow = pv.Arrow(direction=(0.0, 1.0, 0.0),", "def __init__(self): \"\"\"Sets up the 3D viewer\"\"\" self._plotter = pv.Plotter() # Add scene", "scale, 1.0]) img_height, img_width = image.shape[:2] point_bottom_left = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., img_height-1.]))) point_bottom_right = np.squeeze(camera_model.pixel_to_normalised(np.array([0.,", "viewer\"\"\" self._plotter = pv.Plotter() # Add scene origin and plane scene_plane = pv.Plane(i_size=1000,", "self._plotter = pv.Plotter() # Add scene origin and plane scene_plane = pv.Plane(i_size=1000, j_size=1000)", "self._add_axis(SE3(), 100) # Set camera. self._plotter.camera.position = (100, 1500, -500) self._plotter.camera.up = (-0.042739,", "the lab in 3D\"\"\" def __init__(self): \"\"\"Sets up the 3D viewer\"\"\" self._plotter =", "world :param pose_local_camera: The pose of the camera in the local coordinate system.", "= pose.to_matrix() point = pv.Sphere(radius=0.1*scale) point.transform(T) self._plotter.add_mesh(point) x_arrow = pv.Arrow(direction=(1.0, 0.0, 0.0), scale=scale)", "from pylie import SE3 class Viewer3D: \"\"\"Visualises the lab in 3D\"\"\" def __init__(self):", "rectangle.texture_map_to_plane(inplace=True) rectangle.transform(S) image_flipped_rgb = image[::-1, :, ::-1].copy() tex = pv.numpy_to_texture(image_flipped_rgb) self._plotter.add_mesh(pyramid, show_edges=True, style='wireframe')", "representing the camera pose to the 3D world :param pose_local_camera: The pose of", "point_bottom_right = np.squeeze(camera_model.pixel_to_normalised(np.array([0., img_height-1.]))) point_top_left = np.squeeze(camera_model.pixel_to_normalised(np.array([0., 0.]))) point_top_right = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., 0.]))) point_focal", "pv.Plane(i_size=1000, j_size=1000) self._plotter.add_mesh(scene_plane, show_edges=True, style='wireframe') self._add_axis(SE3(), 100) # Set camera. self._plotter.camera.position = (100,", "the body in the local coordinate system. \"\"\" self._add_axis(pose_local_body) def add_camera_axes(self, pose_local_camera: SE3):", "self._add_frustum(camera_model, image) def _add_axis(self, pose: SE3, scale=10.0): T = pose.to_matrix() point = pv.Sphere(radius=0.1*scale)", "the local coordinate system. \"\"\" self._add_axis(pose_local_camera) def add_camera_frustum(self, camera_model, image): \"\"\"Add a frustum", "Add scene origin and plane scene_plane = pv.Plane(i_size=1000, j_size=1000) self._plotter.add_mesh(scene_plane, show_edges=True, style='wireframe') self._add_axis(SE3(),", "camera model and image to the 3D world\"\"\" self._add_frustum(camera_model, image) def _add_axis(self, pose:", "SE3, scale=10.0): T = pose.to_matrix() point = pv.Sphere(radius=0.1*scale) point.transform(T) self._plotter.add_mesh(point) x_arrow = pv.Arrow(direction=(1.0,", "point_focal = np.zeros([3]) pyramid = pv.Pyramid([point_bottom_left, point_bottom_right, point_top_left, point_top_right, point_focal]) pyramid.transform(S) rectangle =", "3D world\"\"\" self._add_frustum(camera_model, image) def _add_axis(self, pose: SE3, scale=10.0): T = pose.to_matrix() point", "self._plotter.camera.position = (100, 1500, -500) self._plotter.camera.up = (-0.042739, -0.226979, -0.972961) self._plotter.camera.focal_point = (100,", "= pv.Plane(i_size=1000, j_size=1000) self._plotter.add_mesh(scene_plane, show_edges=True, style='wireframe') self._add_axis(SE3(), 100) # Set camera. self._plotter.camera.position =", "system. \"\"\" self._add_axis(pose_local_camera) def add_camera_frustum(self, camera_model, image): \"\"\"Add a frustum representing the camera", "S = camera_model.pose_world_camera.to_matrix() @ np.diag([scale, scale, scale, 1.0]) img_height, img_width = image.shape[:2] point_bottom_left", "pose to the 3D world :param pose_local_camera: The pose of the camera in", "camera. self._plotter.camera.position = (100, 1500, -500) self._plotter.camera.up = (-0.042739, -0.226979, -0.972961) self._plotter.camera.focal_point =", "T = pose.to_matrix() point = pv.Sphere(radius=0.1*scale) point.transform(T) self._plotter.add_mesh(point) x_arrow = pv.Arrow(direction=(1.0, 0.0, 0.0),", "= camera_model.pose_world_camera.to_matrix() @ np.diag([scale, scale, scale, 1.0]) img_height, img_width = image.shape[:2] point_bottom_left =", "_add_axis(self, pose: SE3, scale=10.0): T = pose.to_matrix() point = pv.Sphere(radius=0.1*scale) point.transform(T) self._plotter.add_mesh(point) x_arrow", "point_top_left, point_top_right]) rectangle.texture_map_to_plane(inplace=True) rectangle.transform(S) image_flipped_rgb = image[::-1, :, ::-1].copy() tex = pv.numpy_to_texture(image_flipped_rgb) self._plotter.add_mesh(pyramid,", "coordinate system. \"\"\" self._add_axis(pose_local_camera) def add_camera_frustum(self, camera_model, image): \"\"\"Add a frustum representing the", "SE3): \"\"\"Add axes representing the camera pose to the 3D world :param pose_local_camera:", "x_arrow.transform(T) self._plotter.add_mesh(x_arrow, color='red') y_arrow = pv.Arrow(direction=(0.0, 1.0, 0.0), scale=scale) y_arrow.transform(T) self._plotter.add_mesh(y_arrow, color='green') z_arrow", "of the body in the local coordinate system. \"\"\" self._add_axis(pose_local_body) def add_camera_axes(self, pose_local_camera:", "pv.numpy_to_texture(image_flipped_rgb) self._plotter.add_mesh(pyramid, show_edges=True, style='wireframe') self._plotter.add_mesh(rectangle, texture=tex, opacity=0.9) def update(self, time=500): self._plotter.update(time) def show(self):", "@ np.diag([scale, scale, scale, 1.0]) img_height, img_width = image.shape[:2] point_bottom_left = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., img_height-1.])))", "0.0, 1.0), scale=scale) z_arrow.transform(T) self._plotter.add_mesh(z_arrow, color='blue') def _add_frustum(self, camera_model, image, scale=20.0): S =", "body in the local coordinate system. \"\"\" self._add_axis(pose_local_body) def add_camera_axes(self, pose_local_camera: SE3): \"\"\"Add", "-0.226979, -0.972961) self._plotter.camera.focal_point = (100, 300, -200) self._plotter.show(title=\"3D visualization\", interactive_update=True) def add_body_axes(self, pose_local_body:", "def add_body_axes(self, pose_local_body: SE3): \"\"\"Add axes representing the body pose to the 3D", "0.0), scale=scale) x_arrow.transform(T) self._plotter.add_mesh(x_arrow, color='red') y_arrow = pv.Arrow(direction=(0.0, 1.0, 0.0), scale=scale) y_arrow.transform(T) self._plotter.add_mesh(y_arrow,", "pyramid = pv.Pyramid([point_bottom_left, point_bottom_right, point_top_left, point_top_right, point_focal]) pyramid.transform(S) rectangle = pv.Rectangle([point_bottom_left, point_bottom_right, point_top_left,", "def _add_frustum(self, camera_model, image, scale=20.0): S = camera_model.pose_world_camera.to_matrix() @ np.diag([scale, scale, scale, 1.0])", "as np import pyvista as pv from pylie import SE3 class Viewer3D: \"\"\"Visualises", "self._plotter.add_mesh(pyramid, show_edges=True, style='wireframe') self._plotter.add_mesh(rectangle, texture=tex, opacity=0.9) def update(self, time=500): self._plotter.update(time) def show(self): self._plotter.show()", "pose to the 3D world :param pose_local_body: The pose of the body in", "3D\"\"\" def __init__(self): \"\"\"Sets up the 3D viewer\"\"\" self._plotter = pv.Plotter() # Add", "style='wireframe') self._add_axis(SE3(), 100) # Set camera. self._plotter.camera.position = (100, 1500, -500) self._plotter.camera.up =", "np import pyvista as pv from pylie import SE3 class Viewer3D: \"\"\"Visualises the", "point_top_right = np.squeeze(camera_model.pixel_to_normalised(np.array([img_width-1., 0.]))) point_focal = np.zeros([3]) pyramid = pv.Pyramid([point_bottom_left, point_bottom_right, point_top_left, point_top_right,", "coordinate system. \"\"\" self._add_axis(pose_local_body) def add_camera_axes(self, pose_local_camera: SE3): \"\"\"Add axes representing the camera", "\"\"\"Add a frustum representing the camera model and image to the 3D world\"\"\"", "point_top_right]) rectangle.texture_map_to_plane(inplace=True) rectangle.transform(S) image_flipped_rgb = image[::-1, :, ::-1].copy() tex = pv.numpy_to_texture(image_flipped_rgb) self._plotter.add_mesh(pyramid, show_edges=True,", "image[::-1, :, ::-1].copy() tex = pv.numpy_to_texture(image_flipped_rgb) self._plotter.add_mesh(pyramid, show_edges=True, style='wireframe') self._plotter.add_mesh(rectangle, texture=tex, opacity=0.9) def", "import pyvista as pv from pylie import SE3 class Viewer3D: \"\"\"Visualises the lab", "world\"\"\" self._add_frustum(camera_model, image) def _add_axis(self, pose: SE3, scale=10.0): T = pose.to_matrix() point =", "color='green') z_arrow = pv.Arrow(direction=(0.0, 0.0, 1.0), scale=scale) z_arrow.transform(T) self._plotter.add_mesh(z_arrow, color='blue') def _add_frustum(self, camera_model,", "in 3D\"\"\" def __init__(self): \"\"\"Sets up the 3D viewer\"\"\" self._plotter = pv.Plotter() #" ]
[ "\"\"\" \"\"\" import pinot ds = pinot.data.moonshot_with_date return ds def test_moonshot(moonshot): \"\"\" Parameters", "pinot.data.datasets @pytest.fixture def moonshot(): \"\"\" \"\"\" import pinot ds = pinot.data.moonshot_with_date return ds", "\"\"\" import pinot ds = pinot.data.moonshot_with_date return ds def test_moonshot(moonshot): \"\"\" Parameters ----------", "moonshot(): \"\"\" \"\"\" import pinot ds = pinot.data.moonshot_with_date return ds def test_moonshot(moonshot): \"\"\"", "@pytest.fixture def moonshot(): \"\"\" \"\"\" import pinot ds = pinot.data.moonshot_with_date return ds def", "test_import(): \"\"\" \"\"\" import pinot.data.datasets @pytest.fixture def moonshot(): \"\"\" \"\"\" import pinot ds", "\"\"\" \"\"\" import pinot.data.datasets @pytest.fixture def moonshot(): \"\"\" \"\"\" import pinot ds =", "import pinot.data.datasets @pytest.fixture def moonshot(): \"\"\" \"\"\" import pinot ds = pinot.data.moonshot_with_date return", "def moonshot(): \"\"\" \"\"\" import pinot ds = pinot.data.moonshot_with_date return ds def test_moonshot(moonshot):", "pinot ds = pinot.data.moonshot_with_date return ds def test_moonshot(moonshot): \"\"\" Parameters ---------- moonshot :", "ds = pinot.data.moonshot_with_date return ds def test_moonshot(moonshot): \"\"\" Parameters ---------- moonshot : Returns", "import pinot ds = pinot.data.moonshot_with_date return ds def test_moonshot(moonshot): \"\"\" Parameters ---------- moonshot", "= pinot.data.moonshot_with_date return ds def test_moonshot(moonshot): \"\"\" Parameters ---------- moonshot : Returns -------", "def test_import(): \"\"\" \"\"\" import pinot.data.datasets @pytest.fixture def moonshot(): \"\"\" \"\"\" import pinot", "return ds def test_moonshot(moonshot): \"\"\" Parameters ---------- moonshot : Returns ------- \"\"\" print(moonshot)", "import pytest def test_import(): \"\"\" \"\"\" import pinot.data.datasets @pytest.fixture def moonshot(): \"\"\" \"\"\"", "pytest def test_import(): \"\"\" \"\"\" import pinot.data.datasets @pytest.fixture def moonshot(): \"\"\" \"\"\" import", "pinot.data.moonshot_with_date return ds def test_moonshot(moonshot): \"\"\" Parameters ---------- moonshot : Returns ------- \"\"\"", "\"\"\" import pinot.data.datasets @pytest.fixture def moonshot(): \"\"\" \"\"\" import pinot ds = pinot.data.moonshot_with_date" ]
[ "<reponame>YuriyLisovskiy/NeuralNetwork<filename>tests/unittest/test_exceptions.py import unittest from neural_network.network import net from neural_network.config.config import INPUT_LAYER, HIDDEN_LAYERS, OUTPUT_LAYER", "} net.NeuralNetwork(**params) def test_output_layer_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': [1, 1], 'hidden_layers':", "params = { 'input_layer': INPUT_LAYER, 'hidden_layers': [9], 'output_layer': OUTPUT_LAYER } net.NeuralNetwork(**params) def test_input_layer_exception(self):", "[9], 'output_layer': OUTPUT_LAYER } net.NeuralNetwork(**params) def test_input_layer_exception(self): with self.assertRaises(ValueError): params = { 'input_layer':", "HIDDEN_LAYERS, OUTPUT_LAYER class TestExceptions(unittest.TestCase): def test_last_layer_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER,", "class TestExceptions(unittest.TestCase): def test_last_layer_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS,", "OUTPUT_LAYER class TestExceptions(unittest.TestCase): def test_last_layer_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER, 'hidden_layers':", "neural_network.network import net from neural_network.config.config import INPUT_LAYER, HIDDEN_LAYERS, OUTPUT_LAYER class TestExceptions(unittest.TestCase): def test_last_layer_exception(self):", "test_last_layer_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [2] }", "1], 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [1] } net.NeuralNetwork(**params) def run(suite): suite.addTest(TestExceptions('test_last_layer_exception')) suite.addTest(TestExceptions('test_redundant_layers_exception')) suite.addTest(TestExceptions('test_input_layer_exception')) suite.addTest(TestExceptions('test_output_layer_exception'))", "neural_network.config.config import INPUT_LAYER, HIDDEN_LAYERS, OUTPUT_LAYER class TestExceptions(unittest.TestCase): def test_last_layer_exception(self): with self.assertRaises(ValueError): params =", "import INPUT_LAYER, HIDDEN_LAYERS, OUTPUT_LAYER class TestExceptions(unittest.TestCase): def test_last_layer_exception(self): with self.assertRaises(ValueError): params = {", "with self.assertRaises(ValueError): params = { 'input_layer': [1, 1], 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [1] }", "params = { 'input_layer': INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [1, 2] } net.NeuralNetwork(**params) def", "INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [2] } net.NeuralNetwork(**params) def test_redundant_layers_exception(self): with self.assertRaises(ValueError): params =", "2] } net.NeuralNetwork(**params) def test_output_layer_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': [1, 1],", "{ 'input_layer': INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [2] } net.NeuralNetwork(**params) def test_redundant_layers_exception(self): with self.assertRaises(ValueError):", "INPUT_LAYER, 'hidden_layers': [9], 'output_layer': OUTPUT_LAYER } net.NeuralNetwork(**params) def test_input_layer_exception(self): with self.assertRaises(ValueError): params =", "'input_layer': INPUT_LAYER, 'hidden_layers': [9], 'output_layer': OUTPUT_LAYER } net.NeuralNetwork(**params) def test_input_layer_exception(self): with self.assertRaises(ValueError): params", "[1, 2] } net.NeuralNetwork(**params) def test_output_layer_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': [1,", "def test_redundant_layers_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER, 'hidden_layers': [9], 'output_layer': OUTPUT_LAYER", "net.NeuralNetwork(**params) def test_output_layer_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': [1, 1], 'hidden_layers': HIDDEN_LAYERS,", "with self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [2] } net.NeuralNetwork(**params)", "{ 'input_layer': INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [1, 2] } net.NeuralNetwork(**params) def test_output_layer_exception(self): with", "def test_last_layer_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [2]", "'input_layer': [1, 1], 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [1] } net.NeuralNetwork(**params) def run(suite): suite.addTest(TestExceptions('test_last_layer_exception')) suite.addTest(TestExceptions('test_redundant_layers_exception'))", "'hidden_layers': [9], 'output_layer': OUTPUT_LAYER } net.NeuralNetwork(**params) def test_input_layer_exception(self): with self.assertRaises(ValueError): params = {", "= { 'input_layer': [1, 1], 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [1] } net.NeuralNetwork(**params) def run(suite):", "unittest from neural_network.network import net from neural_network.config.config import INPUT_LAYER, HIDDEN_LAYERS, OUTPUT_LAYER class TestExceptions(unittest.TestCase):", "} net.NeuralNetwork(**params) def test_input_layer_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS,", "= { 'input_layer': INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [2] } net.NeuralNetwork(**params) def test_redundant_layers_exception(self): with", "import net from neural_network.config.config import INPUT_LAYER, HIDDEN_LAYERS, OUTPUT_LAYER class TestExceptions(unittest.TestCase): def test_last_layer_exception(self): with", "'input_layer': INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [2] } net.NeuralNetwork(**params) def test_redundant_layers_exception(self): with self.assertRaises(ValueError): params", "{ 'input_layer': INPUT_LAYER, 'hidden_layers': [9], 'output_layer': OUTPUT_LAYER } net.NeuralNetwork(**params) def test_input_layer_exception(self): with self.assertRaises(ValueError):", "'output_layer': OUTPUT_LAYER } net.NeuralNetwork(**params) def test_input_layer_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER,", "test_input_layer_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [1, 2]", "with self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER, 'hidden_layers': [9], 'output_layer': OUTPUT_LAYER } net.NeuralNetwork(**params)", "self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER, 'hidden_layers': [9], 'output_layer': OUTPUT_LAYER } net.NeuralNetwork(**params) def", "= { 'input_layer': INPUT_LAYER, 'hidden_layers': [9], 'output_layer': OUTPUT_LAYER } net.NeuralNetwork(**params) def test_input_layer_exception(self): with", "with self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [1, 2] }", "self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [1, 2] } net.NeuralNetwork(**params)", "net.NeuralNetwork(**params) def test_input_layer_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS, 'output_layer':", "'output_layer': [1, 2] } net.NeuralNetwork(**params) def test_output_layer_exception(self): with self.assertRaises(ValueError): params = { 'input_layer':", "from neural_network.config.config import INPUT_LAYER, HIDDEN_LAYERS, OUTPUT_LAYER class TestExceptions(unittest.TestCase): def test_last_layer_exception(self): with self.assertRaises(ValueError): params", "net.NeuralNetwork(**params) def test_redundant_layers_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER, 'hidden_layers': [9], 'output_layer':", "'input_layer': INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [1, 2] } net.NeuralNetwork(**params) def test_output_layer_exception(self): with self.assertRaises(ValueError):", "= { 'input_layer': INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [1, 2] } net.NeuralNetwork(**params) def test_output_layer_exception(self):", "INPUT_LAYER, HIDDEN_LAYERS, OUTPUT_LAYER class TestExceptions(unittest.TestCase): def test_last_layer_exception(self): with self.assertRaises(ValueError): params = { 'input_layer':", "params = { 'input_layer': INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [2] } net.NeuralNetwork(**params) def test_redundant_layers_exception(self):", "} net.NeuralNetwork(**params) def test_redundant_layers_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER, 'hidden_layers': [9],", "from neural_network.network import net from neural_network.config.config import INPUT_LAYER, HIDDEN_LAYERS, OUTPUT_LAYER class TestExceptions(unittest.TestCase): def", "HIDDEN_LAYERS, 'output_layer': [2] } net.NeuralNetwork(**params) def test_redundant_layers_exception(self): with self.assertRaises(ValueError): params = { 'input_layer':", "{ 'input_layer': [1, 1], 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [1] } net.NeuralNetwork(**params) def run(suite): suite.addTest(TestExceptions('test_last_layer_exception'))", "net from neural_network.config.config import INPUT_LAYER, HIDDEN_LAYERS, OUTPUT_LAYER class TestExceptions(unittest.TestCase): def test_last_layer_exception(self): with self.assertRaises(ValueError):", "self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [2] } net.NeuralNetwork(**params) def", "'output_layer': [2] } net.NeuralNetwork(**params) def test_redundant_layers_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER,", "test_redundant_layers_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER, 'hidden_layers': [9], 'output_layer': OUTPUT_LAYER }", "def test_input_layer_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [1,", "test_output_layer_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': [1, 1], 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [1]", "def test_output_layer_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': [1, 1], 'hidden_layers': HIDDEN_LAYERS, 'output_layer':", "HIDDEN_LAYERS, 'output_layer': [1, 2] } net.NeuralNetwork(**params) def test_output_layer_exception(self): with self.assertRaises(ValueError): params = {", "'hidden_layers': HIDDEN_LAYERS, 'output_layer': [2] } net.NeuralNetwork(**params) def test_redundant_layers_exception(self): with self.assertRaises(ValueError): params = {", "OUTPUT_LAYER } net.NeuralNetwork(**params) def test_input_layer_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER, 'hidden_layers':", "[1, 1], 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [1] } net.NeuralNetwork(**params) def run(suite): suite.addTest(TestExceptions('test_last_layer_exception')) suite.addTest(TestExceptions('test_redundant_layers_exception')) suite.addTest(TestExceptions('test_input_layer_exception'))", "[2] } net.NeuralNetwork(**params) def test_redundant_layers_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER, 'hidden_layers':", "'hidden_layers': HIDDEN_LAYERS, 'output_layer': [1, 2] } net.NeuralNetwork(**params) def test_output_layer_exception(self): with self.assertRaises(ValueError): params =", "self.assertRaises(ValueError): params = { 'input_layer': [1, 1], 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [1] } net.NeuralNetwork(**params)", "params = { 'input_layer': [1, 1], 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [1] } net.NeuralNetwork(**params) def", "import unittest from neural_network.network import net from neural_network.config.config import INPUT_LAYER, HIDDEN_LAYERS, OUTPUT_LAYER class", "INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS, 'output_layer': [1, 2] } net.NeuralNetwork(**params) def test_output_layer_exception(self): with self.assertRaises(ValueError): params", "TestExceptions(unittest.TestCase): def test_last_layer_exception(self): with self.assertRaises(ValueError): params = { 'input_layer': INPUT_LAYER, 'hidden_layers': HIDDEN_LAYERS, 'output_layer':" ]
[ "ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t) _SS_PAD2SIZE = ( _SS_MAXSIZE - ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t) - _SS_PAD1SIZE", "ctypes.c_uint32), (\"kf_file_rdev_freebsd11\", ctypes.c_uint32), (\"kf_file_mode\", ctypes.c_uint16), (\"kf_file_pad0\", ctypes.c_uint16), (\"kf_file_pad1\", ctypes.c_uint32), ] class KinfoFileSem(ctypes.Structure): _fields_", "ctypes.c_uint16), (\"kf_sock_recvq\", ctypes.c_uint32), ] class KinfoFileFile(ctypes.Structure): _fields_ = [ (\"kf_file_type\", ctypes.c_int), (\"kf_spareint\", (ctypes.c_int", "* 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pts_dev_freebsd11\", ctypes.c_uint32), (\"kf_pts_pad0\", ctypes.c_uint32), (\"kf_pts_dev\", ctypes.c_uint64), (\"kf_pts_pad1\",", "class KinfoFile11(ctypes.Structure): _fields_ = [ (\"kf_vnode_type\", ctypes.c_int), (\"kf_sock_domain\", ctypes.c_int), (\"kf_sock_type\", ctypes.c_int), (\"kf_sock_protocol\", ctypes.c_int),", "KERN_PROC_FILEDESC, pid], None ) kinfo_file_size = ctypes.sizeof(KinfoFile) i = 0 while i <", "import ctypes import os from typing import Iterator, Optional from . import bsd_util", "SockaddrStorage), ] class KinfoFileSock(ctypes.Structure): _fields_ = [ (\"kf_sock_sendq\", ctypes.c_uint32), (\"kf_sock_domain0\", ctypes.c_int), (\"kf_sock_type0\", ctypes.c_int),", "(ctypes.c_uint64 * 32)), (\"kf_pid\", pid_t), ] class KinfoFileUn(ctypes.Union): _fields_ = [ (\"kf_freebsd11\", KinfoFile11),", "ctypes.c_uint32), (\"kf_sem_mode\", ctypes.c_uint16), ] class KinfoFilePipe(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)),", "(\"kf_sock_recvq\", ctypes.c_uint32), ] class KinfoFileFile(ctypes.Structure): _fields_ = [ (\"kf_file_type\", ctypes.c_int), (\"kf_spareint\", (ctypes.c_int *", "PATH_MAX = 1024 pid_t = ctypes.c_int sa_family_t = ctypes.c_uint8 _SS_MAXSIZE = 128 _SS_ALIGNSIZE", "* 4)), ] class KinfoFileProc(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\",", "33 KF_TYPE_VNODE = 1 PATH_MAX = 1024 pid_t = ctypes.c_int sa_family_t = ctypes.c_uint8", "- _SS_ALIGNSIZE ) CAP_RIGHTS_VERSION = 0 class SockaddrStorage(ctypes.Structure): _fields_ = [ (\"ss_len\", ctypes.c_ubyte),", "[ (\"kf_file_type\", ctypes.c_int), (\"kf_spareint\", (ctypes.c_int * 3)), (\"kf_spareint64\", (ctypes.c_uint64 * 30)), (\"kf_file_fsid\", ctypes.c_uint64),", "= kinfo_file_data[i: i + kinfo_file_size].ljust(kinfo_file_size, b\"\\0\") kfile = KinfoFile.from_buffer_copy(kfile_data) if kfile.kf_structsize == 0:", "ctypes.c_uint64), (\"kf_sock_inpcb\", ctypes.c_uint64), (\"kf_sock_unpconn\", ctypes.c_uint64), (\"kf_sock_snd_sb_state\", ctypes.c_uint16), (\"kf_sock_rcv_sb_state\", ctypes.c_uint16), (\"kf_sock_recvq\", ctypes.c_uint32), ] class", "(\"kf_sock_unpconn\", ctypes.c_uint64), (\"kf_sock_snd_sb_state\", ctypes.c_uint16), (\"kf_sock_rcv_sb_state\", ctypes.c_uint16), (\"kf_sock_recvq\", ctypes.c_uint32), ] class KinfoFileFile(ctypes.Structure): _fields_ =", "ctypes.c_int), (\"kf_fd\", ctypes.c_int), (\"kf_ref_count\", ctypes.c_int), (\"kf_flags\", ctypes.c_int), (\"kf_pad0\", ctypes.c_int), (\"kf_offset\", ctypes.c_int64), (\"kf_un\", KinfoFileUn),", "== 0: break yield kfile i += kfile.kf_structsize def try_recover_fd_path(fd: int) -> Optional[str]:", "(\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pipe_addr\", ctypes.c_uint64), (\"kf_pipe_peer\", ctypes.c_uint64), (\"kf_pipe_buffer_cnt\",", "(\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_sem_value\", ctypes.c_uint32), (\"kf_sem_mode\", ctypes.c_uint16), ]", "ctypes.sizeof(sa_family_t) - _SS_PAD1SIZE - _SS_ALIGNSIZE ) CAP_RIGHTS_VERSION = 0 class SockaddrStorage(ctypes.Structure): _fields_ =", "= _SS_ALIGNSIZE - ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t) _SS_PAD2SIZE = ( _SS_MAXSIZE - ctypes.sizeof(ctypes.c_ubyte) -", "ctypes.c_uint16), (\"kf_pad1\", ctypes.c_uint16), (\"_kf_ispare0\", ctypes.c_int), (\"kf_cap_rights\", CapRights), (\"_kf_cap_spare\", ctypes.c_uint64), (\"kf_path\", (ctypes.c_char * PATH_MAX)),", "(\"kf_sock_protocol\", ctypes.c_int), (\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage), ] class KinfoFileSock(ctypes.Structure): _fields_ = [ (\"kf_sock_sendq\",", "(\"kf_type\", ctypes.c_int), (\"kf_fd\", ctypes.c_int), (\"kf_ref_count\", ctypes.c_int), (\"kf_flags\", ctypes.c_int), (\"kf_pad0\", ctypes.c_int), (\"kf_offset\", ctypes.c_int64), (\"kf_un\",", "= [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pid\", pid_t), ]", "(\"kf_file_fsid\", ctypes.c_uint64), (\"kf_file_rdev\", ctypes.c_uint64), (\"kf_file_fileid\", ctypes.c_uint64), (\"kf_file_size\", ctypes.c_uint64), (\"kf_file_fsid_freebsd11\", ctypes.c_uint32), (\"kf_file_rdev_freebsd11\", ctypes.c_uint32), (\"kf_file_mode\",", "ctypes.c_uint32), (\"kf_pts_pad0\", ctypes.c_uint32), (\"kf_pts_dev\", ctypes.c_uint64), (\"kf_pts_pad1\", (ctypes.c_uint32 * 4)), ] class KinfoFileProc(ctypes.Structure): _fields_", "ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t) - _SS_PAD1SIZE - _SS_ALIGNSIZE ) CAP_RIGHTS_VERSION = 0 class SockaddrStorage(ctypes.Structure):", "4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_sem_value\", ctypes.c_uint32), (\"kf_sem_mode\", ctypes.c_uint16), ] class KinfoFilePipe(ctypes.Structure): _fields_", "_iter_kinfo_files(pid: int) -> Iterator[KinfoFile]: kinfo_file_data = bsd_util.sysctl_bytes_retry( [CTL_KERN, KERN_PROC, KERN_PROC_FILEDESC, pid], None )", "KinfoFileSem(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_sem_value\",", "class KinfoFilePipe(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)),", "class KinfoFilePts(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)),", "(\"kf_file\", KinfoFileFile), (\"kf_sem\", KinfoFileSem), (\"kf_pipe\", KinfoFilePipe), (\"kf_pts\", KinfoFilePts), (\"kf_proc\", KinfoFileProc), ] class KinfoFile(ctypes.Structure):", "KinfoFileSock), (\"kf_file\", KinfoFileFile), (\"kf_sem\", KinfoFileSem), (\"kf_pipe\", KinfoFilePipe), (\"kf_pts\", KinfoFilePts), (\"kf_proc\", KinfoFileProc), ] class", "* _SS_PAD2SIZE)), ] class CapRights(ctypes.Structure): _fields_ = [ (\"cr_rights\", (ctypes.c_uint64 * (CAP_RIGHTS_VERSION +", "ctypes.c_int), (\"kf_pad0\", ctypes.c_int), (\"kf_offset\", ctypes.c_int64), (\"kf_un\", KinfoFileUn), (\"kf_status\", ctypes.c_uint16), (\"kf_pad1\", ctypes.c_uint16), (\"_kf_ispare0\", ctypes.c_int),", "pid_t), ] class KinfoFileUn(ctypes.Union): _fields_ = [ (\"kf_freebsd11\", KinfoFile11), (\"kf_sock\", KinfoFileSock), (\"kf_file\", KinfoFileFile),", "fd and kfile.kf_type == KF_TYPE_VNODE: # Sometimes the path is empty (\"\") for", "= 33 KF_TYPE_VNODE = 1 PATH_MAX = 1024 pid_t = ctypes.c_int sa_family_t =", "(\"kf_sem\", KinfoFileSem), (\"kf_pipe\", KinfoFilePipe), (\"kf_pts\", KinfoFilePts), (\"kf_proc\", KinfoFileProc), ] class KinfoFile(ctypes.Structure): _fields_ =", "[ (\"kf_vnode_type\", ctypes.c_int), (\"kf_sock_domain\", ctypes.c_int), (\"kf_sock_type\", ctypes.c_int), (\"kf_sock_protocol\", ctypes.c_int), (\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage),", "32)), (\"kf_pts_dev_freebsd11\", ctypes.c_uint32), (\"kf_pts_pad0\", ctypes.c_uint32), (\"kf_pts_dev\", ctypes.c_uint64), (\"kf_pts_pad1\", (ctypes.c_uint32 * 4)), ] class", "sa_family_t = ctypes.c_uint8 _SS_MAXSIZE = 128 _SS_ALIGNSIZE = ctypes.sizeof(ctypes.c_int64) _SS_PAD1SIZE = _SS_ALIGNSIZE -", "ctypes.c_uint64), (\"kf_pts_pad1\", (ctypes.c_uint32 * 4)), ] class KinfoFileProc(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32", "kinfo_file_size = ctypes.sizeof(KinfoFile) i = 0 while i < len(kinfo_file_data): kfile_data = kinfo_file_data[i:", "(\"kf_spareint\", (ctypes.c_int * 3)), (\"kf_spareint64\", (ctypes.c_uint64 * 30)), (\"kf_file_fsid\", ctypes.c_uint64), (\"kf_file_rdev\", ctypes.c_uint64), (\"kf_file_fileid\",", "- ctypes.sizeof(sa_family_t) _SS_PAD2SIZE = ( _SS_MAXSIZE - ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t) - _SS_PAD1SIZE -", "_fields_ = [ (\"cr_rights\", (ctypes.c_uint64 * (CAP_RIGHTS_VERSION + 2))), ] class KinfoFile11(ctypes.Structure): _fields_", "- ctypes.sizeof(sa_family_t) - _SS_PAD1SIZE - _SS_ALIGNSIZE ) CAP_RIGHTS_VERSION = 0 class SockaddrStorage(ctypes.Structure): _fields_", "(\"kf_pts_pad1\", (ctypes.c_uint32 * 4)), ] class KinfoFileProc(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 *", "ctypes.c_int), (\"kf_ref_count\", ctypes.c_int), (\"kf_flags\", ctypes.c_int), (\"kf_pad0\", ctypes.c_int), (\"kf_offset\", ctypes.c_int64), (\"kf_un\", KinfoFileUn), (\"kf_status\", ctypes.c_uint16),", "break yield kfile i += kfile.kf_structsize def try_recover_fd_path(fd: int) -> Optional[str]: for kfile", "3)), (\"kf_spareint64\", (ctypes.c_uint64 * 30)), (\"kf_file_fsid\", ctypes.c_uint64), (\"kf_file_rdev\", ctypes.c_uint64), (\"kf_file_fileid\", ctypes.c_uint64), (\"kf_file_size\", ctypes.c_uint64),", "+= kfile.kf_structsize def try_recover_fd_path(fd: int) -> Optional[str]: for kfile in _iter_kinfo_files(os.getpid()): if kfile.kf_fd", "_fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_sem_value\", ctypes.c_uint32),", "= ( _SS_MAXSIZE - ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t) - _SS_PAD1SIZE - _SS_ALIGNSIZE ) CAP_RIGHTS_VERSION", "from . import bsd_util CTL_KERN = 1 KERN_PROC = 14 KERN_PROC_FILEDESC = 33", "len(kinfo_file_data): kfile_data = kinfo_file_data[i: i + kinfo_file_size].ljust(kinfo_file_size, b\"\\0\") kfile = KinfoFile.from_buffer_copy(kfile_data) if kfile.kf_structsize", ". import bsd_util CTL_KERN = 1 KERN_PROC = 14 KERN_PROC_FILEDESC = 33 KF_TYPE_VNODE", "14 KERN_PROC_FILEDESC = 33 KF_TYPE_VNODE = 1 PATH_MAX = 1024 pid_t = ctypes.c_int", "_SS_PAD1SIZE = _SS_ALIGNSIZE - ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t) _SS_PAD2SIZE = ( _SS_MAXSIZE - ctypes.sizeof(ctypes.c_ubyte)", "ctypes.c_int), (\"kf_cap_rights\", CapRights), (\"_kf_cap_spare\", ctypes.c_uint64), (\"kf_path\", (ctypes.c_char * PATH_MAX)), ] def _iter_kinfo_files(pid: int)", "(\"kf_pid\", pid_t), ] class KinfoFileUn(ctypes.Union): _fields_ = [ (\"kf_freebsd11\", KinfoFile11), (\"kf_sock\", KinfoFileSock), (\"kf_file\",", "* _SS_PAD1SIZE)), (\"ss_align\", ctypes.c_int64), (\"ss_pad2\", (ctypes.c_char * _SS_PAD2SIZE)), ] class CapRights(ctypes.Structure): _fields_ =", "32)), (\"kf_sem_value\", ctypes.c_uint32), (\"kf_sem_mode\", ctypes.c_uint16), ] class KinfoFilePipe(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32", "ctypes.c_int), (\"kf_offset\", ctypes.c_int64), (\"kf_un\", KinfoFileUn), (\"kf_status\", ctypes.c_uint16), (\"kf_pad1\", ctypes.c_uint16), (\"_kf_ispare0\", ctypes.c_int), (\"kf_cap_rights\", CapRights),", "PATH_MAX)), ] def _iter_kinfo_files(pid: int) -> Iterator[KinfoFile]: kinfo_file_data = bsd_util.sysctl_bytes_retry( [CTL_KERN, KERN_PROC, KERN_PROC_FILEDESC,", "i < len(kinfo_file_data): kfile_data = kinfo_file_data[i: i + kinfo_file_size].ljust(kinfo_file_size, b\"\\0\") kfile = KinfoFile.from_buffer_copy(kfile_data)", "kfile.kf_fd == fd and kfile.kf_type == KF_TYPE_VNODE: # Sometimes the path is empty", "* 32)), (\"kf_sem_value\", ctypes.c_uint32), (\"kf_sem_mode\", ctypes.c_uint16), ] class KinfoFilePipe(ctypes.Structure): _fields_ = [ (\"kf_spareint\",", "# pylint: disable=invalid-name,too-few-public-methods import ctypes import os from typing import Iterator, Optional from", "1 PATH_MAX = 1024 pid_t = ctypes.c_int sa_family_t = ctypes.c_uint8 _SS_MAXSIZE = 128", "0: break yield kfile i += kfile.kf_structsize def try_recover_fd_path(fd: int) -> Optional[str]: for", "(\"kf_pts\", KinfoFilePts), (\"kf_proc\", KinfoFileProc), ] class KinfoFile(ctypes.Structure): _fields_ = [ (\"kf_structsize\", ctypes.c_int), (\"kf_type\",", "<gh_stars>0 # pylint: disable=invalid-name,too-few-public-methods import ctypes import os from typing import Iterator, Optional", "(\"kf_file_fsid_freebsd11\", ctypes.c_uint32), (\"kf_file_rdev_freebsd11\", ctypes.c_uint32), (\"kf_file_mode\", ctypes.c_uint16), (\"kf_file_pad0\", ctypes.c_uint16), (\"kf_file_pad1\", ctypes.c_uint32), ] class KinfoFileSem(ctypes.Structure):", "(\"kf_file_rdev_freebsd11\", ctypes.c_uint32), (\"kf_file_mode\", ctypes.c_uint16), (\"kf_file_pad0\", ctypes.c_uint16), (\"kf_file_pad1\", ctypes.c_uint32), ] class KinfoFileSem(ctypes.Structure): _fields_ =", "_SS_ALIGNSIZE ) CAP_RIGHTS_VERSION = 0 class SockaddrStorage(ctypes.Structure): _fields_ = [ (\"ss_len\", ctypes.c_ubyte), (\"ss_family\",", "(\"_kf_ispare0\", ctypes.c_int), (\"kf_cap_rights\", CapRights), (\"_kf_cap_spare\", ctypes.c_uint64), (\"kf_path\", (ctypes.c_char * PATH_MAX)), ] def _iter_kinfo_files(pid:", "disable=invalid-name,too-few-public-methods import ctypes import os from typing import Iterator, Optional from . import", "path is empty (\"\") for no apparent reason. return os.fsdecode(kfile.kf_path) or None return", "= ctypes.sizeof(KinfoFile) i = 0 while i < len(kinfo_file_data): kfile_data = kinfo_file_data[i: i", "ctypes.c_uint64), (\"kf_file_rdev\", ctypes.c_uint64), (\"kf_file_fileid\", ctypes.c_uint64), (\"kf_file_size\", ctypes.c_uint64), (\"kf_file_fsid_freebsd11\", ctypes.c_uint32), (\"kf_file_rdev_freebsd11\", ctypes.c_uint32), (\"kf_file_mode\", ctypes.c_uint16),", "ctypes.c_int), (\"kf_spareint\", (ctypes.c_int * 3)), (\"kf_spareint64\", (ctypes.c_uint64 * 30)), (\"kf_file_fsid\", ctypes.c_uint64), (\"kf_file_rdev\", ctypes.c_uint64),", "(\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pid\", pid_t), ] class KinfoFileUn(ctypes.Union):", "_iter_kinfo_files(os.getpid()): if kfile.kf_fd == fd and kfile.kf_type == KF_TYPE_VNODE: # Sometimes the path", "32)), (\"kf_pipe_addr\", ctypes.c_uint64), (\"kf_pipe_peer\", ctypes.c_uint64), (\"kf_pipe_buffer_cnt\", ctypes.c_uint32), (\"kf_pts_pad0\", (ctypes.c_uint32 * 3)), ] class", "(\"kf_ref_count\", ctypes.c_int), (\"kf_flags\", ctypes.c_int), (\"kf_pad0\", ctypes.c_int), (\"kf_offset\", ctypes.c_int64), (\"kf_un\", KinfoFileUn), (\"kf_status\", ctypes.c_uint16), (\"kf_pad1\",", "] class CapRights(ctypes.Structure): _fields_ = [ (\"cr_rights\", (ctypes.c_uint64 * (CAP_RIGHTS_VERSION + 2))), ]", "(\"kf_pad1\", ctypes.c_uint16), (\"_kf_ispare0\", ctypes.c_int), (\"kf_cap_rights\", CapRights), (\"_kf_cap_spare\", ctypes.c_uint64), (\"kf_path\", (ctypes.c_char * PATH_MAX)), ]", "* 3)), (\"kf_spareint64\", (ctypes.c_uint64 * 30)), (\"kf_file_fsid\", ctypes.c_uint64), (\"kf_file_rdev\", ctypes.c_uint64), (\"kf_file_fileid\", ctypes.c_uint64), (\"kf_file_size\",", "(\"kf_pts_dev\", ctypes.c_uint64), (\"kf_pts_pad1\", (ctypes.c_uint32 * 4)), ] class KinfoFileProc(ctypes.Structure): _fields_ = [ (\"kf_spareint\",", "kfile in _iter_kinfo_files(os.getpid()): if kfile.kf_fd == fd and kfile.kf_type == KF_TYPE_VNODE: # Sometimes", "= bsd_util.sysctl_bytes_retry( [CTL_KERN, KERN_PROC, KERN_PROC_FILEDESC, pid], None ) kinfo_file_size = ctypes.sizeof(KinfoFile) i =", "0 while i < len(kinfo_file_data): kfile_data = kinfo_file_data[i: i + kinfo_file_size].ljust(kinfo_file_size, b\"\\0\") kfile", "ctypes.c_uint64), (\"kf_file_size\", ctypes.c_uint64), (\"kf_file_fsid_freebsd11\", ctypes.c_uint32), (\"kf_file_rdev_freebsd11\", ctypes.c_uint32), (\"kf_file_mode\", ctypes.c_uint16), (\"kf_file_pad0\", ctypes.c_uint16), (\"kf_file_pad1\", ctypes.c_uint32),", "_fields_ = [ (\"kf_vnode_type\", ctypes.c_int), (\"kf_sock_domain\", ctypes.c_int), (\"kf_sock_type\", ctypes.c_int), (\"kf_sock_protocol\", ctypes.c_int), (\"kf_sa_local\", SockaddrStorage),", "_fields_ = [ (\"ss_len\", ctypes.c_ubyte), (\"ss_family\", sa_family_t), (\"ss_pad1\", (ctypes.c_char * _SS_PAD1SIZE)), (\"ss_align\", ctypes.c_int64),", "= 128 _SS_ALIGNSIZE = ctypes.sizeof(ctypes.c_int64) _SS_PAD1SIZE = _SS_ALIGNSIZE - ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t) _SS_PAD2SIZE", "(\"kf_sock_domain\", ctypes.c_int), (\"kf_sock_type\", ctypes.c_int), (\"kf_sock_protocol\", ctypes.c_int), (\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage), ] class KinfoFileSock(ctypes.Structure):", "[CTL_KERN, KERN_PROC, KERN_PROC_FILEDESC, pid], None ) kinfo_file_size = ctypes.sizeof(KinfoFile) i = 0 while", "== KF_TYPE_VNODE: # Sometimes the path is empty (\"\") for no apparent reason.", "+ kinfo_file_size].ljust(kinfo_file_size, b\"\\0\") kfile = KinfoFile.from_buffer_copy(kfile_data) if kfile.kf_structsize == 0: break yield kfile", "int) -> Optional[str]: for kfile in _iter_kinfo_files(os.getpid()): if kfile.kf_fd == fd and kfile.kf_type", "* 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pid\", pid_t), ] class KinfoFileUn(ctypes.Union): _fields_ =", "* 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pipe_addr\", ctypes.c_uint64), (\"kf_pipe_peer\", ctypes.c_uint64), (\"kf_pipe_buffer_cnt\", ctypes.c_uint32), (\"kf_pts_pad0\",", "ctypes.sizeof(sa_family_t) _SS_PAD2SIZE = ( _SS_MAXSIZE - ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t) - _SS_PAD1SIZE - _SS_ALIGNSIZE", "] class KinfoFileProc(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 *", "yield kfile i += kfile.kf_structsize def try_recover_fd_path(fd: int) -> Optional[str]: for kfile in", "kfile_data = kinfo_file_data[i: i + kinfo_file_size].ljust(kinfo_file_size, b\"\\0\") kfile = KinfoFile.from_buffer_copy(kfile_data) if kfile.kf_structsize ==", "KERN_PROC, KERN_PROC_FILEDESC, pid], None ) kinfo_file_size = ctypes.sizeof(KinfoFile) i = 0 while i", "ctypes.c_uint32), ] class KinfoFileFile(ctypes.Structure): _fields_ = [ (\"kf_file_type\", ctypes.c_int), (\"kf_spareint\", (ctypes.c_int * 3)),", "(\"kf_file_fileid\", ctypes.c_uint64), (\"kf_file_size\", ctypes.c_uint64), (\"kf_file_fsid_freebsd11\", ctypes.c_uint32), (\"kf_file_rdev_freebsd11\", ctypes.c_uint32), (\"kf_file_mode\", ctypes.c_uint16), (\"kf_file_pad0\", ctypes.c_uint16), (\"kf_file_pad1\",", "KinfoFileProc), ] class KinfoFile(ctypes.Structure): _fields_ = [ (\"kf_structsize\", ctypes.c_int), (\"kf_type\", ctypes.c_int), (\"kf_fd\", ctypes.c_int),", "if kfile.kf_fd == fd and kfile.kf_type == KF_TYPE_VNODE: # Sometimes the path is", "pid], None ) kinfo_file_size = ctypes.sizeof(KinfoFile) i = 0 while i < len(kinfo_file_data):", "SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage), ] class KinfoFileSock(ctypes.Structure): _fields_ = [ (\"kf_sock_sendq\", ctypes.c_uint32), (\"kf_sock_domain0\", ctypes.c_int),", "ctypes.c_uint16), (\"kf_sock_rcv_sb_state\", ctypes.c_uint16), (\"kf_sock_recvq\", ctypes.c_uint32), ] class KinfoFileFile(ctypes.Structure): _fields_ = [ (\"kf_file_type\", ctypes.c_int),", "- _SS_PAD1SIZE - _SS_ALIGNSIZE ) CAP_RIGHTS_VERSION = 0 class SockaddrStorage(ctypes.Structure): _fields_ = [", "(\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_sem_value\", ctypes.c_uint32), (\"kf_sem_mode\", ctypes.c_uint16), ] class KinfoFilePipe(ctypes.Structure): _fields_ =", "_fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pid\", pid_t),", "] class KinfoFile11(ctypes.Structure): _fields_ = [ (\"kf_vnode_type\", ctypes.c_int), (\"kf_sock_domain\", ctypes.c_int), (\"kf_sock_type\", ctypes.c_int), (\"kf_sock_protocol\",", "(\"kf_sock_protocol0\", ctypes.c_int), (\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage), (\"kf_sock_pcb\", ctypes.c_uint64), (\"kf_sock_inpcb\", ctypes.c_uint64), (\"kf_sock_unpconn\", ctypes.c_uint64), (\"kf_sock_snd_sb_state\",", "_SS_PAD1SIZE - _SS_ALIGNSIZE ) CAP_RIGHTS_VERSION = 0 class SockaddrStorage(ctypes.Structure): _fields_ = [ (\"ss_len\",", "(\"kf_spareint64\", (ctypes.c_uint64 * 30)), (\"kf_file_fsid\", ctypes.c_uint64), (\"kf_file_rdev\", ctypes.c_uint64), (\"kf_file_fileid\", ctypes.c_uint64), (\"kf_file_size\", ctypes.c_uint64), (\"kf_file_fsid_freebsd11\",", "(\"cr_rights\", (ctypes.c_uint64 * (CAP_RIGHTS_VERSION + 2))), ] class KinfoFile11(ctypes.Structure): _fields_ = [ (\"kf_vnode_type\",", "2))), ] class KinfoFile11(ctypes.Structure): _fields_ = [ (\"kf_vnode_type\", ctypes.c_int), (\"kf_sock_domain\", ctypes.c_int), (\"kf_sock_type\", ctypes.c_int),", "class KinfoFileFile(ctypes.Structure): _fields_ = [ (\"kf_file_type\", ctypes.c_int), (\"kf_spareint\", (ctypes.c_int * 3)), (\"kf_spareint64\", (ctypes.c_uint64", "(CAP_RIGHTS_VERSION + 2))), ] class KinfoFile11(ctypes.Structure): _fields_ = [ (\"kf_vnode_type\", ctypes.c_int), (\"kf_sock_domain\", ctypes.c_int),", "ctypes.c_uint64), (\"kf_file_fsid_freebsd11\", ctypes.c_uint32), (\"kf_file_rdev_freebsd11\", ctypes.c_uint32), (\"kf_file_mode\", ctypes.c_uint16), (\"kf_file_pad0\", ctypes.c_uint16), (\"kf_file_pad1\", ctypes.c_uint32), ] class", "(ctypes.c_int * 3)), (\"kf_spareint64\", (ctypes.c_uint64 * 30)), (\"kf_file_fsid\", ctypes.c_uint64), (\"kf_file_rdev\", ctypes.c_uint64), (\"kf_file_fileid\", ctypes.c_uint64),", "(\"ss_align\", ctypes.c_int64), (\"ss_pad2\", (ctypes.c_char * _SS_PAD2SIZE)), ] class CapRights(ctypes.Structure): _fields_ = [ (\"cr_rights\",", "ctypes.c_ubyte), (\"ss_family\", sa_family_t), (\"ss_pad1\", (ctypes.c_char * _SS_PAD1SIZE)), (\"ss_align\", ctypes.c_int64), (\"ss_pad2\", (ctypes.c_char * _SS_PAD2SIZE)),", "Optional from . import bsd_util CTL_KERN = 1 KERN_PROC = 14 KERN_PROC_FILEDESC =", "] class KinfoFilePipe(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 *", "i += kfile.kf_structsize def try_recover_fd_path(fd: int) -> Optional[str]: for kfile in _iter_kinfo_files(os.getpid()): if", "KinfoFile.from_buffer_copy(kfile_data) if kfile.kf_structsize == 0: break yield kfile i += kfile.kf_structsize def try_recover_fd_path(fd:", "ctypes.c_int), (\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage), ] class KinfoFileSock(ctypes.Structure): _fields_ = [ (\"kf_sock_sendq\", ctypes.c_uint32),", "the path is empty (\"\") for no apparent reason. return os.fsdecode(kfile.kf_path) or None", "while i < len(kinfo_file_data): kfile_data = kinfo_file_data[i: i + kinfo_file_size].ljust(kinfo_file_size, b\"\\0\") kfile =", "SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage), (\"kf_sock_pcb\", ctypes.c_uint64), (\"kf_sock_inpcb\", ctypes.c_uint64), (\"kf_sock_unpconn\", ctypes.c_uint64), (\"kf_sock_snd_sb_state\", ctypes.c_uint16), (\"kf_sock_rcv_sb_state\", ctypes.c_uint16),", "import Iterator, Optional from . import bsd_util CTL_KERN = 1 KERN_PROC = 14", "_SS_PAD2SIZE)), ] class CapRights(ctypes.Structure): _fields_ = [ (\"cr_rights\", (ctypes.c_uint64 * (CAP_RIGHTS_VERSION + 2))),", "= 14 KERN_PROC_FILEDESC = 33 KF_TYPE_VNODE = 1 PATH_MAX = 1024 pid_t =", "ctypes.c_uint32), (\"kf_sock_domain0\", ctypes.c_int), (\"kf_sock_type0\", ctypes.c_int), (\"kf_sock_protocol0\", ctypes.c_int), (\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage), (\"kf_sock_pcb\", ctypes.c_uint64),", "KinfoFilePipe(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pipe_addr\",", "(\"kf_pts_pad0\", ctypes.c_uint32), (\"kf_pts_dev\", ctypes.c_uint64), (\"kf_pts_pad1\", (ctypes.c_uint32 * 4)), ] class KinfoFileProc(ctypes.Structure): _fields_ =", "KF_TYPE_VNODE: # Sometimes the path is empty (\"\") for no apparent reason. return", "ctypes.c_uint64), (\"kf_sock_snd_sb_state\", ctypes.c_uint16), (\"kf_sock_rcv_sb_state\", ctypes.c_uint16), (\"kf_sock_recvq\", ctypes.c_uint32), ] class KinfoFileFile(ctypes.Structure): _fields_ = [", "ctypes.c_uint64), (\"kf_path\", (ctypes.c_char * PATH_MAX)), ] def _iter_kinfo_files(pid: int) -> Iterator[KinfoFile]: kinfo_file_data =", "(ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pts_dev_freebsd11\", ctypes.c_uint32), (\"kf_pts_pad0\", ctypes.c_uint32), (\"kf_pts_dev\", ctypes.c_uint64),", "] class KinfoFileSem(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 *", "KinfoFileSock(ctypes.Structure): _fields_ = [ (\"kf_sock_sendq\", ctypes.c_uint32), (\"kf_sock_domain0\", ctypes.c_int), (\"kf_sock_type0\", ctypes.c_int), (\"kf_sock_protocol0\", ctypes.c_int), (\"kf_sa_local\",", "KF_TYPE_VNODE = 1 PATH_MAX = 1024 pid_t = ctypes.c_int sa_family_t = ctypes.c_uint8 _SS_MAXSIZE", "4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pts_dev_freebsd11\", ctypes.c_uint32), (\"kf_pts_pad0\", ctypes.c_uint32), (\"kf_pts_dev\", ctypes.c_uint64), (\"kf_pts_pad1\", (ctypes.c_uint32", "(ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pid\", pid_t), ] class KinfoFileUn(ctypes.Union): _fields_", "= ctypes.c_uint8 _SS_MAXSIZE = 128 _SS_ALIGNSIZE = ctypes.sizeof(ctypes.c_int64) _SS_PAD1SIZE = _SS_ALIGNSIZE - ctypes.sizeof(ctypes.c_ubyte)", "(\"kf_offset\", ctypes.c_int64), (\"kf_un\", KinfoFileUn), (\"kf_status\", ctypes.c_uint16), (\"kf_pad1\", ctypes.c_uint16), (\"_kf_ispare0\", ctypes.c_int), (\"kf_cap_rights\", CapRights), (\"_kf_cap_spare\",", "(\"kf_sock_domain0\", ctypes.c_int), (\"kf_sock_type0\", ctypes.c_int), (\"kf_sock_protocol0\", ctypes.c_int), (\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage), (\"kf_sock_pcb\", ctypes.c_uint64), (\"kf_sock_inpcb\",", "1024 pid_t = ctypes.c_int sa_family_t = ctypes.c_uint8 _SS_MAXSIZE = 128 _SS_ALIGNSIZE = ctypes.sizeof(ctypes.c_int64)", "(\"kf_sem_mode\", ctypes.c_uint16), ] class KinfoFilePipe(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\",", "[ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pid\", pid_t), ] class", "32)), (\"kf_pid\", pid_t), ] class KinfoFileUn(ctypes.Union): _fields_ = [ (\"kf_freebsd11\", KinfoFile11), (\"kf_sock\", KinfoFileSock),", "(\"ss_pad1\", (ctypes.c_char * _SS_PAD1SIZE)), (\"ss_align\", ctypes.c_int64), (\"ss_pad2\", (ctypes.c_char * _SS_PAD2SIZE)), ] class CapRights(ctypes.Structure):", "(\"kf_sem_value\", ctypes.c_uint32), (\"kf_sem_mode\", ctypes.c_uint16), ] class KinfoFilePipe(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 *", "(\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pid\", pid_t), ] class KinfoFileUn(ctypes.Union): _fields_ = [ (\"kf_freebsd11\",", "bsd_util.sysctl_bytes_retry( [CTL_KERN, KERN_PROC, KERN_PROC_FILEDESC, pid], None ) kinfo_file_size = ctypes.sizeof(KinfoFile) i = 0", "and kfile.kf_type == KF_TYPE_VNODE: # Sometimes the path is empty (\"\") for no", "CapRights), (\"_kf_cap_spare\", ctypes.c_uint64), (\"kf_path\", (ctypes.c_char * PATH_MAX)), ] def _iter_kinfo_files(pid: int) -> Iterator[KinfoFile]:", "(\"kf_status\", ctypes.c_uint16), (\"kf_pad1\", ctypes.c_uint16), (\"_kf_ispare0\", ctypes.c_int), (\"kf_cap_rights\", CapRights), (\"_kf_cap_spare\", ctypes.c_uint64), (\"kf_path\", (ctypes.c_char *", "] class KinfoFileFile(ctypes.Structure): _fields_ = [ (\"kf_file_type\", ctypes.c_int), (\"kf_spareint\", (ctypes.c_int * 3)), (\"kf_spareint64\",", "ctypes.c_uint32), (\"kf_pts_pad0\", (ctypes.c_uint32 * 3)), ] class KinfoFilePts(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32", "[ (\"cr_rights\", (ctypes.c_uint64 * (CAP_RIGHTS_VERSION + 2))), ] class KinfoFile11(ctypes.Structure): _fields_ = [", "(\"kf_sock_pcb\", ctypes.c_uint64), (\"kf_sock_inpcb\", ctypes.c_uint64), (\"kf_sock_unpconn\", ctypes.c_uint64), (\"kf_sock_snd_sb_state\", ctypes.c_uint16), (\"kf_sock_rcv_sb_state\", ctypes.c_uint16), (\"kf_sock_recvq\", ctypes.c_uint32), ]", "Optional[str]: for kfile in _iter_kinfo_files(os.getpid()): if kfile.kf_fd == fd and kfile.kf_type == KF_TYPE_VNODE:", "_SS_ALIGNSIZE - ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t) _SS_PAD2SIZE = ( _SS_MAXSIZE - ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t)", "* 30)), (\"kf_file_fsid\", ctypes.c_uint64), (\"kf_file_rdev\", ctypes.c_uint64), (\"kf_file_fileid\", ctypes.c_uint64), (\"kf_file_size\", ctypes.c_uint64), (\"kf_file_fsid_freebsd11\", ctypes.c_uint32), (\"kf_file_rdev_freebsd11\",", "1 KERN_PROC = 14 KERN_PROC_FILEDESC = 33 KF_TYPE_VNODE = 1 PATH_MAX = 1024", "= [ (\"cr_rights\", (ctypes.c_uint64 * (CAP_RIGHTS_VERSION + 2))), ] class KinfoFile11(ctypes.Structure): _fields_ =", "(ctypes.c_char * PATH_MAX)), ] def _iter_kinfo_files(pid: int) -> Iterator[KinfoFile]: kinfo_file_data = bsd_util.sysctl_bytes_retry( [CTL_KERN,", "(ctypes.c_char * _SS_PAD1SIZE)), (\"ss_align\", ctypes.c_int64), (\"ss_pad2\", (ctypes.c_char * _SS_PAD2SIZE)), ] class CapRights(ctypes.Structure): _fields_", "(\"kf_un\", KinfoFileUn), (\"kf_status\", ctypes.c_uint16), (\"kf_pad1\", ctypes.c_uint16), (\"_kf_ispare0\", ctypes.c_int), (\"kf_cap_rights\", CapRights), (\"_kf_cap_spare\", ctypes.c_uint64), (\"kf_path\",", "ctypes.sizeof(KinfoFile) i = 0 while i < len(kinfo_file_data): kfile_data = kinfo_file_data[i: i +", "KinfoFileFile(ctypes.Structure): _fields_ = [ (\"kf_file_type\", ctypes.c_int), (\"kf_spareint\", (ctypes.c_int * 3)), (\"kf_spareint64\", (ctypes.c_uint64 *", "kinfo_file_data[i: i + kinfo_file_size].ljust(kinfo_file_size, b\"\\0\") kfile = KinfoFile.from_buffer_copy(kfile_data) if kfile.kf_structsize == 0: break", "class KinfoFileSock(ctypes.Structure): _fields_ = [ (\"kf_sock_sendq\", ctypes.c_uint32), (\"kf_sock_domain0\", ctypes.c_int), (\"kf_sock_type0\", ctypes.c_int), (\"kf_sock_protocol0\", ctypes.c_int),", "(\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pts_dev_freebsd11\", ctypes.c_uint32), (\"kf_pts_pad0\", ctypes.c_uint32), (\"kf_pts_dev\",", "ctypes.c_uint32), (\"kf_pts_dev\", ctypes.c_uint64), (\"kf_pts_pad1\", (ctypes.c_uint32 * 4)), ] class KinfoFileProc(ctypes.Structure): _fields_ = [", "sa_family_t), (\"ss_pad1\", (ctypes.c_char * _SS_PAD1SIZE)), (\"ss_align\", ctypes.c_int64), (\"ss_pad2\", (ctypes.c_char * _SS_PAD2SIZE)), ] class", "ctypes.c_int64), (\"ss_pad2\", (ctypes.c_char * _SS_PAD2SIZE)), ] class CapRights(ctypes.Structure): _fields_ = [ (\"cr_rights\", (ctypes.c_uint64", "* 3)), ] class KinfoFilePts(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\",", "(\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage), ] class KinfoFileSock(ctypes.Structure): _fields_ = [ (\"kf_sock_sendq\", ctypes.c_uint32), (\"kf_sock_domain0\",", "* 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_sem_value\", ctypes.c_uint32), (\"kf_sem_mode\", ctypes.c_uint16), ] class KinfoFilePipe(ctypes.Structure):", "ctypes.c_int), (\"kf_sock_type0\", ctypes.c_int), (\"kf_sock_protocol0\", ctypes.c_int), (\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage), (\"kf_sock_pcb\", ctypes.c_uint64), (\"kf_sock_inpcb\", ctypes.c_uint64),", "import os from typing import Iterator, Optional from . import bsd_util CTL_KERN =", "3)), ] class KinfoFilePts(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64", "is empty (\"\") for no apparent reason. return os.fsdecode(kfile.kf_path) or None return None", "ctypes.c_uint32), (\"kf_file_mode\", ctypes.c_uint16), (\"kf_file_pad0\", ctypes.c_uint16), (\"kf_file_pad1\", ctypes.c_uint32), ] class KinfoFileSem(ctypes.Structure): _fields_ = [", "] class KinfoFileUn(ctypes.Union): _fields_ = [ (\"kf_freebsd11\", KinfoFile11), (\"kf_sock\", KinfoFileSock), (\"kf_file\", KinfoFileFile), (\"kf_sem\",", "[ (\"kf_sock_sendq\", ctypes.c_uint32), (\"kf_sock_domain0\", ctypes.c_int), (\"kf_sock_type0\", ctypes.c_int), (\"kf_sock_protocol0\", ctypes.c_int), (\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage),", "(\"kf_sock_sendq\", ctypes.c_uint32), (\"kf_sock_domain0\", ctypes.c_int), (\"kf_sock_type0\", ctypes.c_int), (\"kf_sock_protocol0\", ctypes.c_int), (\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage), (\"kf_sock_pcb\",", "= KinfoFile.from_buffer_copy(kfile_data) if kfile.kf_structsize == 0: break yield kfile i += kfile.kf_structsize def", "ctypes.c_uint16), (\"_kf_ispare0\", ctypes.c_int), (\"kf_cap_rights\", CapRights), (\"_kf_cap_spare\", ctypes.c_uint64), (\"kf_path\", (ctypes.c_char * PATH_MAX)), ] def", "_fields_ = [ (\"kf_sock_sendq\", ctypes.c_uint32), (\"kf_sock_domain0\", ctypes.c_int), (\"kf_sock_type0\", ctypes.c_int), (\"kf_sock_protocol0\", ctypes.c_int), (\"kf_sa_local\", SockaddrStorage),", "= [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_sem_value\", ctypes.c_uint32), (\"kf_sem_mode\",", "def try_recover_fd_path(fd: int) -> Optional[str]: for kfile in _iter_kinfo_files(os.getpid()): if kfile.kf_fd == fd", "class SockaddrStorage(ctypes.Structure): _fields_ = [ (\"ss_len\", ctypes.c_ubyte), (\"ss_family\", sa_family_t), (\"ss_pad1\", (ctypes.c_char * _SS_PAD1SIZE)),", "class KinfoFileProc(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)),", "bsd_util CTL_KERN = 1 KERN_PROC = 14 KERN_PROC_FILEDESC = 33 KF_TYPE_VNODE = 1", "(ctypes.c_uint32 * 3)), ] class KinfoFilePts(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)),", "Sometimes the path is empty (\"\") for no apparent reason. return os.fsdecode(kfile.kf_path) or", "(\"ss_pad2\", (ctypes.c_char * _SS_PAD2SIZE)), ] class CapRights(ctypes.Structure): _fields_ = [ (\"cr_rights\", (ctypes.c_uint64 *", "(ctypes.c_uint64 * 32)), (\"kf_sem_value\", ctypes.c_uint32), (\"kf_sem_mode\", ctypes.c_uint16), ] class KinfoFilePipe(ctypes.Structure): _fields_ = [", "(\"kf_file_mode\", ctypes.c_uint16), (\"kf_file_pad0\", ctypes.c_uint16), (\"kf_file_pad1\", ctypes.c_uint32), ] class KinfoFileSem(ctypes.Structure): _fields_ = [ (\"kf_spareint\",", "_fields_ = [ (\"kf_freebsd11\", KinfoFile11), (\"kf_sock\", KinfoFileSock), (\"kf_file\", KinfoFileFile), (\"kf_sem\", KinfoFileSem), (\"kf_pipe\", KinfoFilePipe),", "ctypes.c_int64), (\"kf_un\", KinfoFileUn), (\"kf_status\", ctypes.c_uint16), (\"kf_pad1\", ctypes.c_uint16), (\"_kf_ispare0\", ctypes.c_int), (\"kf_cap_rights\", CapRights), (\"_kf_cap_spare\", ctypes.c_uint64),", "(ctypes.c_char * _SS_PAD2SIZE)), ] class CapRights(ctypes.Structure): _fields_ = [ (\"cr_rights\", (ctypes.c_uint64 * (CAP_RIGHTS_VERSION", "-> Optional[str]: for kfile in _iter_kinfo_files(os.getpid()): if kfile.kf_fd == fd and kfile.kf_type ==", "# Sometimes the path is empty (\"\") for no apparent reason. return os.fsdecode(kfile.kf_path)", "KinfoFileSem), (\"kf_pipe\", KinfoFilePipe), (\"kf_pts\", KinfoFilePts), (\"kf_proc\", KinfoFileProc), ] class KinfoFile(ctypes.Structure): _fields_ = [", "= 1 PATH_MAX = 1024 pid_t = ctypes.c_int sa_family_t = ctypes.c_uint8 _SS_MAXSIZE =", "(ctypes.c_uint64 * 30)), (\"kf_file_fsid\", ctypes.c_uint64), (\"kf_file_rdev\", ctypes.c_uint64), (\"kf_file_fileid\", ctypes.c_uint64), (\"kf_file_size\", ctypes.c_uint64), (\"kf_file_fsid_freebsd11\", ctypes.c_uint32),", "* 32)), (\"kf_pts_dev_freebsd11\", ctypes.c_uint32), (\"kf_pts_pad0\", ctypes.c_uint32), (\"kf_pts_dev\", ctypes.c_uint64), (\"kf_pts_pad1\", (ctypes.c_uint32 * 4)), ]", "i = 0 while i < len(kinfo_file_data): kfile_data = kinfo_file_data[i: i + kinfo_file_size].ljust(kinfo_file_size,", "KinfoFile(ctypes.Structure): _fields_ = [ (\"kf_structsize\", ctypes.c_int), (\"kf_type\", ctypes.c_int), (\"kf_fd\", ctypes.c_int), (\"kf_ref_count\", ctypes.c_int), (\"kf_flags\",", "KinfoFilePts), (\"kf_proc\", KinfoFileProc), ] class KinfoFile(ctypes.Structure): _fields_ = [ (\"kf_structsize\", ctypes.c_int), (\"kf_type\", ctypes.c_int),", "ctypes.c_int), (\"kf_sock_domain\", ctypes.c_int), (\"kf_sock_type\", ctypes.c_int), (\"kf_sock_protocol\", ctypes.c_int), (\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage), ] class", "= [ (\"ss_len\", ctypes.c_ubyte), (\"ss_family\", sa_family_t), (\"ss_pad1\", (ctypes.c_char * _SS_PAD1SIZE)), (\"ss_align\", ctypes.c_int64), (\"ss_pad2\",", "KinfoFile11), (\"kf_sock\", KinfoFileSock), (\"kf_file\", KinfoFileFile), (\"kf_sem\", KinfoFileSem), (\"kf_pipe\", KinfoFilePipe), (\"kf_pts\", KinfoFilePts), (\"kf_proc\", KinfoFileProc),", "in _iter_kinfo_files(os.getpid()): if kfile.kf_fd == fd and kfile.kf_type == KF_TYPE_VNODE: # Sometimes the", "(\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pipe_addr\", ctypes.c_uint64), (\"kf_pipe_peer\", ctypes.c_uint64), (\"kf_pipe_buffer_cnt\", ctypes.c_uint32), (\"kf_pts_pad0\", (ctypes.c_uint32 *", "(\"kf_sock_inpcb\", ctypes.c_uint64), (\"kf_sock_unpconn\", ctypes.c_uint64), (\"kf_sock_snd_sb_state\", ctypes.c_uint16), (\"kf_sock_rcv_sb_state\", ctypes.c_uint16), (\"kf_sock_recvq\", ctypes.c_uint32), ] class KinfoFileFile(ctypes.Structure):", "(\"kf_pipe_peer\", ctypes.c_uint64), (\"kf_pipe_buffer_cnt\", ctypes.c_uint32), (\"kf_pts_pad0\", (ctypes.c_uint32 * 3)), ] class KinfoFilePts(ctypes.Structure): _fields_ =", "(ctypes.c_uint64 * (CAP_RIGHTS_VERSION + 2))), ] class KinfoFile11(ctypes.Structure): _fields_ = [ (\"kf_vnode_type\", ctypes.c_int),", "(\"kf_pipe_buffer_cnt\", ctypes.c_uint32), (\"kf_pts_pad0\", (ctypes.c_uint32 * 3)), ] class KinfoFilePts(ctypes.Structure): _fields_ = [ (\"kf_spareint\",", "CapRights(ctypes.Structure): _fields_ = [ (\"cr_rights\", (ctypes.c_uint64 * (CAP_RIGHTS_VERSION + 2))), ] class KinfoFile11(ctypes.Structure):", "_SS_PAD2SIZE = ( _SS_MAXSIZE - ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t) - _SS_PAD1SIZE - _SS_ALIGNSIZE )", "Iterator[KinfoFile]: kinfo_file_data = bsd_util.sysctl_bytes_retry( [CTL_KERN, KERN_PROC, KERN_PROC_FILEDESC, pid], None ) kinfo_file_size = ctypes.sizeof(KinfoFile)", ") kinfo_file_size = ctypes.sizeof(KinfoFile) i = 0 while i < len(kinfo_file_data): kfile_data =", "- ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t) - _SS_PAD1SIZE - _SS_ALIGNSIZE ) CAP_RIGHTS_VERSION = 0 class", "_SS_MAXSIZE - ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t) - _SS_PAD1SIZE - _SS_ALIGNSIZE ) CAP_RIGHTS_VERSION = 0", "(ctypes.c_uint64 * 32)), (\"kf_pipe_addr\", ctypes.c_uint64), (\"kf_pipe_peer\", ctypes.c_uint64), (\"kf_pipe_buffer_cnt\", ctypes.c_uint32), (\"kf_pts_pad0\", (ctypes.c_uint32 * 3)),", "KinfoFileProc(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pid\",", "import bsd_util CTL_KERN = 1 KERN_PROC = 14 KERN_PROC_FILEDESC = 33 KF_TYPE_VNODE =", "ctypes.c_uint16), (\"kf_file_pad1\", ctypes.c_uint32), ] class KinfoFileSem(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)),", "(ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pipe_addr\", ctypes.c_uint64), (\"kf_pipe_peer\", ctypes.c_uint64), (\"kf_pipe_buffer_cnt\", ctypes.c_uint32),", "from typing import Iterator, Optional from . import bsd_util CTL_KERN = 1 KERN_PROC", "* PATH_MAX)), ] def _iter_kinfo_files(pid: int) -> Iterator[KinfoFile]: kinfo_file_data = bsd_util.sysctl_bytes_retry( [CTL_KERN, KERN_PROC,", "[ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_sem_value\", ctypes.c_uint32), (\"kf_sem_mode\", ctypes.c_uint16),", "(\"ss_family\", sa_family_t), (\"ss_pad1\", (ctypes.c_char * _SS_PAD1SIZE)), (\"ss_align\", ctypes.c_int64), (\"ss_pad2\", (ctypes.c_char * _SS_PAD2SIZE)), ]", "(\"kf_sa_peer\", SockaddrStorage), ] class KinfoFileSock(ctypes.Structure): _fields_ = [ (\"kf_sock_sendq\", ctypes.c_uint32), (\"kf_sock_domain0\", ctypes.c_int), (\"kf_sock_type0\",", "ctypes.c_uint16), ] class KinfoFilePipe(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64", "ctypes.c_uint64), (\"kf_file_fileid\", ctypes.c_uint64), (\"kf_file_size\", ctypes.c_uint64), (\"kf_file_fsid_freebsd11\", ctypes.c_uint32), (\"kf_file_rdev_freebsd11\", ctypes.c_uint32), (\"kf_file_mode\", ctypes.c_uint16), (\"kf_file_pad0\", ctypes.c_uint16),", "_SS_ALIGNSIZE = ctypes.sizeof(ctypes.c_int64) _SS_PAD1SIZE = _SS_ALIGNSIZE - ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t) _SS_PAD2SIZE = (", "ctypes.c_int), (\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage), (\"kf_sock_pcb\", ctypes.c_uint64), (\"kf_sock_inpcb\", ctypes.c_uint64), (\"kf_sock_unpconn\", ctypes.c_uint64), (\"kf_sock_snd_sb_state\", ctypes.c_uint16),", "CTL_KERN = 1 KERN_PROC = 14 KERN_PROC_FILEDESC = 33 KF_TYPE_VNODE = 1 PATH_MAX", "= ctypes.sizeof(ctypes.c_int64) _SS_PAD1SIZE = _SS_ALIGNSIZE - ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t) _SS_PAD2SIZE = ( _SS_MAXSIZE", "+ 2))), ] class KinfoFile11(ctypes.Structure): _fields_ = [ (\"kf_vnode_type\", ctypes.c_int), (\"kf_sock_domain\", ctypes.c_int), (\"kf_sock_type\",", "(ctypes.c_uint64 * 32)), (\"kf_pts_dev_freebsd11\", ctypes.c_uint32), (\"kf_pts_pad0\", ctypes.c_uint32), (\"kf_pts_dev\", ctypes.c_uint64), (\"kf_pts_pad1\", (ctypes.c_uint32 * 4)),", "kfile.kf_structsize def try_recover_fd_path(fd: int) -> Optional[str]: for kfile in _iter_kinfo_files(os.getpid()): if kfile.kf_fd ==", "ctypes import os from typing import Iterator, Optional from . import bsd_util CTL_KERN", "= ctypes.c_int sa_family_t = ctypes.c_uint8 _SS_MAXSIZE = 128 _SS_ALIGNSIZE = ctypes.sizeof(ctypes.c_int64) _SS_PAD1SIZE =", "128 _SS_ALIGNSIZE = ctypes.sizeof(ctypes.c_int64) _SS_PAD1SIZE = _SS_ALIGNSIZE - ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t) _SS_PAD2SIZE =", "4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pid\", pid_t), ] class KinfoFileUn(ctypes.Union): _fields_ = [", "(\"kf_vnode_type\", ctypes.c_int), (\"kf_sock_domain\", ctypes.c_int), (\"kf_sock_type\", ctypes.c_int), (\"kf_sock_protocol\", ctypes.c_int), (\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage), ]", "i + kinfo_file_size].ljust(kinfo_file_size, b\"\\0\") kfile = KinfoFile.from_buffer_copy(kfile_data) if kfile.kf_structsize == 0: break yield", "(\"kf_file_pad1\", ctypes.c_uint32), ] class KinfoFileSem(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\",", "= [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pts_dev_freebsd11\", ctypes.c_uint32), (\"kf_pts_pad0\",", "_fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pts_dev_freebsd11\", ctypes.c_uint32),", "4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pipe_addr\", ctypes.c_uint64), (\"kf_pipe_peer\", ctypes.c_uint64), (\"kf_pipe_buffer_cnt\", ctypes.c_uint32), (\"kf_pts_pad0\", (ctypes.c_uint32", "(\"kf_proc\", KinfoFileProc), ] class KinfoFile(ctypes.Structure): _fields_ = [ (\"kf_structsize\", ctypes.c_int), (\"kf_type\", ctypes.c_int), (\"kf_fd\",", "kinfo_file_size].ljust(kinfo_file_size, b\"\\0\") kfile = KinfoFile.from_buffer_copy(kfile_data) if kfile.kf_structsize == 0: break yield kfile i", "ctypes.c_uint64), (\"kf_sock_unpconn\", ctypes.c_uint64), (\"kf_sock_snd_sb_state\", ctypes.c_uint16), (\"kf_sock_rcv_sb_state\", ctypes.c_uint16), (\"kf_sock_recvq\", ctypes.c_uint32), ] class KinfoFileFile(ctypes.Structure): _fields_", "if kfile.kf_structsize == 0: break yield kfile i += kfile.kf_structsize def try_recover_fd_path(fd: int)", "KinfoFile11(ctypes.Structure): _fields_ = [ (\"kf_vnode_type\", ctypes.c_int), (\"kf_sock_domain\", ctypes.c_int), (\"kf_sock_type\", ctypes.c_int), (\"kf_sock_protocol\", ctypes.c_int), (\"kf_sa_local\",", "ctypes.c_int), (\"kf_sock_protocol0\", ctypes.c_int), (\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage), (\"kf_sock_pcb\", ctypes.c_uint64), (\"kf_sock_inpcb\", ctypes.c_uint64), (\"kf_sock_unpconn\", ctypes.c_uint64),", "= [ (\"kf_sock_sendq\", ctypes.c_uint32), (\"kf_sock_domain0\", ctypes.c_int), (\"kf_sock_type0\", ctypes.c_int), (\"kf_sock_protocol0\", ctypes.c_int), (\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\",", "KERN_PROC = 14 KERN_PROC_FILEDESC = 33 KF_TYPE_VNODE = 1 PATH_MAX = 1024 pid_t", "(\"kf_pts_pad0\", (ctypes.c_uint32 * 3)), ] class KinfoFilePts(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 *", "try_recover_fd_path(fd: int) -> Optional[str]: for kfile in _iter_kinfo_files(os.getpid()): if kfile.kf_fd == fd and", "= [ (\"kf_file_type\", ctypes.c_int), (\"kf_spareint\", (ctypes.c_int * 3)), (\"kf_spareint64\", (ctypes.c_uint64 * 30)), (\"kf_file_fsid\",", "[ (\"ss_len\", ctypes.c_ubyte), (\"ss_family\", sa_family_t), (\"ss_pad1\", (ctypes.c_char * _SS_PAD1SIZE)), (\"ss_align\", ctypes.c_int64), (\"ss_pad2\", (ctypes.c_char", "class KinfoFile(ctypes.Structure): _fields_ = [ (\"kf_structsize\", ctypes.c_int), (\"kf_type\", ctypes.c_int), (\"kf_fd\", ctypes.c_int), (\"kf_ref_count\", ctypes.c_int),", "] class KinfoFileSock(ctypes.Structure): _fields_ = [ (\"kf_sock_sendq\", ctypes.c_uint32), (\"kf_sock_domain0\", ctypes.c_int), (\"kf_sock_type0\", ctypes.c_int), (\"kf_sock_protocol0\",", "* 32)), (\"kf_pipe_addr\", ctypes.c_uint64), (\"kf_pipe_peer\", ctypes.c_uint64), (\"kf_pipe_buffer_cnt\", ctypes.c_uint32), (\"kf_pts_pad0\", (ctypes.c_uint32 * 3)), ]", "* 32)), (\"kf_pid\", pid_t), ] class KinfoFileUn(ctypes.Union): _fields_ = [ (\"kf_freebsd11\", KinfoFile11), (\"kf_sock\",", "(\"kf_path\", (ctypes.c_char * PATH_MAX)), ] def _iter_kinfo_files(pid: int) -> Iterator[KinfoFile]: kinfo_file_data = bsd_util.sysctl_bytes_retry(", "= [ (\"kf_freebsd11\", KinfoFile11), (\"kf_sock\", KinfoFileSock), (\"kf_file\", KinfoFileFile), (\"kf_sem\", KinfoFileSem), (\"kf_pipe\", KinfoFilePipe), (\"kf_pts\",", "4)), ] class KinfoFileProc(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64", "-> Iterator[KinfoFile]: kinfo_file_data = bsd_util.sysctl_bytes_retry( [CTL_KERN, KERN_PROC, KERN_PROC_FILEDESC, pid], None ) kinfo_file_size =", "KinfoFilePipe), (\"kf_pts\", KinfoFilePts), (\"kf_proc\", KinfoFileProc), ] class KinfoFile(ctypes.Structure): _fields_ = [ (\"kf_structsize\", ctypes.c_int),", "(\"kf_cap_rights\", CapRights), (\"_kf_cap_spare\", ctypes.c_uint64), (\"kf_path\", (ctypes.c_char * PATH_MAX)), ] def _iter_kinfo_files(pid: int) ->", "= 0 while i < len(kinfo_file_data): kfile_data = kinfo_file_data[i: i + kinfo_file_size].ljust(kinfo_file_size, b\"\\0\")", "ctypes.c_uint32), ] class KinfoFileSem(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64", ") CAP_RIGHTS_VERSION = 0 class SockaddrStorage(ctypes.Structure): _fields_ = [ (\"ss_len\", ctypes.c_ubyte), (\"ss_family\", sa_family_t),", "ctypes.c_int), (\"kf_flags\", ctypes.c_int), (\"kf_pad0\", ctypes.c_int), (\"kf_offset\", ctypes.c_int64), (\"kf_un\", KinfoFileUn), (\"kf_status\", ctypes.c_uint16), (\"kf_pad1\", ctypes.c_uint16),", "os from typing import Iterator, Optional from . import bsd_util CTL_KERN = 1", "CAP_RIGHTS_VERSION = 0 class SockaddrStorage(ctypes.Structure): _fields_ = [ (\"ss_len\", ctypes.c_ubyte), (\"ss_family\", sa_family_t), (\"ss_pad1\",", "Iterator, Optional from . import bsd_util CTL_KERN = 1 KERN_PROC = 14 KERN_PROC_FILEDESC", "_SS_MAXSIZE = 128 _SS_ALIGNSIZE = ctypes.sizeof(ctypes.c_int64) _SS_PAD1SIZE = _SS_ALIGNSIZE - ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t)", "[ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pipe_addr\", ctypes.c_uint64), (\"kf_pipe_peer\", ctypes.c_uint64),", "ctypes.c_uint64), (\"kf_pipe_peer\", ctypes.c_uint64), (\"kf_pipe_buffer_cnt\", ctypes.c_uint32), (\"kf_pts_pad0\", (ctypes.c_uint32 * 3)), ] class KinfoFilePts(ctypes.Structure): _fields_", "(\"kf_file_pad0\", ctypes.c_uint16), (\"kf_file_pad1\", ctypes.c_uint32), ] class KinfoFileSem(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 *", "(\"_kf_cap_spare\", ctypes.c_uint64), (\"kf_path\", (ctypes.c_char * PATH_MAX)), ] def _iter_kinfo_files(pid: int) -> Iterator[KinfoFile]: kinfo_file_data", "== fd and kfile.kf_type == KF_TYPE_VNODE: # Sometimes the path is empty (\"\")", "[ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pts_dev_freebsd11\", ctypes.c_uint32), (\"kf_pts_pad0\", ctypes.c_uint32),", "_fields_ = [ (\"kf_structsize\", ctypes.c_int), (\"kf_type\", ctypes.c_int), (\"kf_fd\", ctypes.c_int), (\"kf_ref_count\", ctypes.c_int), (\"kf_flags\", ctypes.c_int),", "SockaddrStorage(ctypes.Structure): _fields_ = [ (\"ss_len\", ctypes.c_ubyte), (\"ss_family\", sa_family_t), (\"ss_pad1\", (ctypes.c_char * _SS_PAD1SIZE)), (\"ss_align\",", "KinfoFilePts(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pts_dev_freebsd11\",", "[ (\"kf_freebsd11\", KinfoFile11), (\"kf_sock\", KinfoFileSock), (\"kf_file\", KinfoFileFile), (\"kf_sem\", KinfoFileSem), (\"kf_pipe\", KinfoFilePipe), (\"kf_pts\", KinfoFilePts),", "(\"kf_freebsd11\", KinfoFile11), (\"kf_sock\", KinfoFileSock), (\"kf_file\", KinfoFileFile), (\"kf_sem\", KinfoFileSem), (\"kf_pipe\", KinfoFilePipe), (\"kf_pts\", KinfoFilePts), (\"kf_proc\",", "= 1 KERN_PROC = 14 KERN_PROC_FILEDESC = 33 KF_TYPE_VNODE = 1 PATH_MAX =", "_SS_PAD1SIZE)), (\"ss_align\", ctypes.c_int64), (\"ss_pad2\", (ctypes.c_char * _SS_PAD2SIZE)), ] class CapRights(ctypes.Structure): _fields_ = [", "(\"kf_pipe_addr\", ctypes.c_uint64), (\"kf_pipe_peer\", ctypes.c_uint64), (\"kf_pipe_buffer_cnt\", ctypes.c_uint32), (\"kf_pts_pad0\", (ctypes.c_uint32 * 3)), ] class KinfoFilePts(ctypes.Structure):", "kinfo_file_data = bsd_util.sysctl_bytes_retry( [CTL_KERN, KERN_PROC, KERN_PROC_FILEDESC, pid], None ) kinfo_file_size = ctypes.sizeof(KinfoFile) i", "typing import Iterator, Optional from . import bsd_util CTL_KERN = 1 KERN_PROC =", "ctypes.c_uint16), (\"kf_file_pad0\", ctypes.c_uint16), (\"kf_file_pad1\", ctypes.c_uint32), ] class KinfoFileSem(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32", "] class KinfoFilePts(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 *", "= [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pipe_addr\", ctypes.c_uint64), (\"kf_pipe_peer\",", "(\"ss_len\", ctypes.c_ubyte), (\"ss_family\", sa_family_t), (\"ss_pad1\", (ctypes.c_char * _SS_PAD1SIZE)), (\"ss_align\", ctypes.c_int64), (\"ss_pad2\", (ctypes.c_char *", "(\"kf_flags\", ctypes.c_int), (\"kf_pad0\", ctypes.c_int), (\"kf_offset\", ctypes.c_int64), (\"kf_un\", KinfoFileUn), (\"kf_status\", ctypes.c_uint16), (\"kf_pad1\", ctypes.c_uint16), (\"_kf_ispare0\",", "(\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage), (\"kf_sock_pcb\", ctypes.c_uint64), (\"kf_sock_inpcb\", ctypes.c_uint64), (\"kf_sock_unpconn\", ctypes.c_uint64), (\"kf_sock_snd_sb_state\", ctypes.c_uint16), (\"kf_sock_rcv_sb_state\",", "(\"kf_file_type\", ctypes.c_int), (\"kf_spareint\", (ctypes.c_int * 3)), (\"kf_spareint64\", (ctypes.c_uint64 * 30)), (\"kf_file_fsid\", ctypes.c_uint64), (\"kf_file_rdev\",", "kfile.kf_structsize == 0: break yield kfile i += kfile.kf_structsize def try_recover_fd_path(fd: int) ->", "kfile.kf_type == KF_TYPE_VNODE: # Sometimes the path is empty (\"\") for no apparent", "0 class SockaddrStorage(ctypes.Structure): _fields_ = [ (\"ss_len\", ctypes.c_ubyte), (\"ss_family\", sa_family_t), (\"ss_pad1\", (ctypes.c_char *", "(\"kf_sock_type0\", ctypes.c_int), (\"kf_sock_protocol0\", ctypes.c_int), (\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage), (\"kf_sock_pcb\", ctypes.c_uint64), (\"kf_sock_inpcb\", ctypes.c_uint64), (\"kf_sock_unpconn\",", "class KinfoFileUn(ctypes.Union): _fields_ = [ (\"kf_freebsd11\", KinfoFile11), (\"kf_sock\", KinfoFileSock), (\"kf_file\", KinfoFileFile), (\"kf_sem\", KinfoFileSem),", "- ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t) _SS_PAD2SIZE = ( _SS_MAXSIZE - ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t) -", "_fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pipe_addr\", ctypes.c_uint64),", "ctypes.c_uint8 _SS_MAXSIZE = 128 _SS_ALIGNSIZE = ctypes.sizeof(ctypes.c_int64) _SS_PAD1SIZE = _SS_ALIGNSIZE - ctypes.sizeof(ctypes.c_ubyte) -", "ctypes.c_int), (\"kf_sock_protocol\", ctypes.c_int), (\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage), ] class KinfoFileSock(ctypes.Structure): _fields_ = [", "] class KinfoFile(ctypes.Structure): _fields_ = [ (\"kf_structsize\", ctypes.c_int), (\"kf_type\", ctypes.c_int), (\"kf_fd\", ctypes.c_int), (\"kf_ref_count\",", "(\"kf_sock_rcv_sb_state\", ctypes.c_uint16), (\"kf_sock_recvq\", ctypes.c_uint32), ] class KinfoFileFile(ctypes.Structure): _fields_ = [ (\"kf_file_type\", ctypes.c_int), (\"kf_spareint\",", "ctypes.c_int), (\"kf_sock_type\", ctypes.c_int), (\"kf_sock_protocol\", ctypes.c_int), (\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage), ] class KinfoFileSock(ctypes.Structure): _fields_", "int) -> Iterator[KinfoFile]: kinfo_file_data = bsd_util.sysctl_bytes_retry( [CTL_KERN, KERN_PROC, KERN_PROC_FILEDESC, pid], None ) kinfo_file_size", "_fields_ = [ (\"kf_file_type\", ctypes.c_int), (\"kf_spareint\", (ctypes.c_int * 3)), (\"kf_spareint64\", (ctypes.c_uint64 * 30)),", "(\"kf_sock_snd_sb_state\", ctypes.c_uint16), (\"kf_sock_rcv_sb_state\", ctypes.c_uint16), (\"kf_sock_recvq\", ctypes.c_uint32), ] class KinfoFileFile(ctypes.Structure): _fields_ = [ (\"kf_file_type\",", "(\"kf_file_rdev\", ctypes.c_uint64), (\"kf_file_fileid\", ctypes.c_uint64), (\"kf_file_size\", ctypes.c_uint64), (\"kf_file_fsid_freebsd11\", ctypes.c_uint32), (\"kf_file_rdev_freebsd11\", ctypes.c_uint32), (\"kf_file_mode\", ctypes.c_uint16), (\"kf_file_pad0\",", "ctypes.c_int sa_family_t = ctypes.c_uint8 _SS_MAXSIZE = 128 _SS_ALIGNSIZE = ctypes.sizeof(ctypes.c_int64) _SS_PAD1SIZE = _SS_ALIGNSIZE", "(\"kf_sa_peer\", SockaddrStorage), (\"kf_sock_pcb\", ctypes.c_uint64), (\"kf_sock_inpcb\", ctypes.c_uint64), (\"kf_sock_unpconn\", ctypes.c_uint64), (\"kf_sock_snd_sb_state\", ctypes.c_uint16), (\"kf_sock_rcv_sb_state\", ctypes.c_uint16), (\"kf_sock_recvq\",", "[ (\"kf_structsize\", ctypes.c_int), (\"kf_type\", ctypes.c_int), (\"kf_fd\", ctypes.c_int), (\"kf_ref_count\", ctypes.c_int), (\"kf_flags\", ctypes.c_int), (\"kf_pad0\", ctypes.c_int),", "class CapRights(ctypes.Structure): _fields_ = [ (\"cr_rights\", (ctypes.c_uint64 * (CAP_RIGHTS_VERSION + 2))), ] class", "pid_t = ctypes.c_int sa_family_t = ctypes.c_uint8 _SS_MAXSIZE = 128 _SS_ALIGNSIZE = ctypes.sizeof(ctypes.c_int64) _SS_PAD1SIZE", "(\"kf_sock_type\", ctypes.c_int), (\"kf_sock_protocol\", ctypes.c_int), (\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\", SockaddrStorage), ] class KinfoFileSock(ctypes.Structure): _fields_ =", "(\"kf_sock\", KinfoFileSock), (\"kf_file\", KinfoFileFile), (\"kf_sem\", KinfoFileSem), (\"kf_pipe\", KinfoFilePipe), (\"kf_pts\", KinfoFilePts), (\"kf_proc\", KinfoFileProc), ]", "b\"\\0\") kfile = KinfoFile.from_buffer_copy(kfile_data) if kfile.kf_structsize == 0: break yield kfile i +=", "kfile = KinfoFile.from_buffer_copy(kfile_data) if kfile.kf_structsize == 0: break yield kfile i += kfile.kf_structsize", "KinfoFileUn(ctypes.Union): _fields_ = [ (\"kf_freebsd11\", KinfoFile11), (\"kf_sock\", KinfoFileSock), (\"kf_file\", KinfoFileFile), (\"kf_sem\", KinfoFileSem), (\"kf_pipe\",", "ctypes.sizeof(ctypes.c_int64) _SS_PAD1SIZE = _SS_ALIGNSIZE - ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t) _SS_PAD2SIZE = ( _SS_MAXSIZE -", "(\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_pts_dev_freebsd11\", ctypes.c_uint32), (\"kf_pts_pad0\", ctypes.c_uint32), (\"kf_pts_dev\", ctypes.c_uint64), (\"kf_pts_pad1\", (ctypes.c_uint32 *", "KinfoFileFile), (\"kf_sem\", KinfoFileSem), (\"kf_pipe\", KinfoFilePipe), (\"kf_pts\", KinfoFilePts), (\"kf_proc\", KinfoFileProc), ] class KinfoFile(ctypes.Structure): _fields_", "= [ (\"kf_vnode_type\", ctypes.c_int), (\"kf_sock_domain\", ctypes.c_int), (\"kf_sock_type\", ctypes.c_int), (\"kf_sock_protocol\", ctypes.c_int), (\"kf_sa_local\", SockaddrStorage), (\"kf_sa_peer\",", "class KinfoFileSem(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)),", "* (CAP_RIGHTS_VERSION + 2))), ] class KinfoFile11(ctypes.Structure): _fields_ = [ (\"kf_vnode_type\", ctypes.c_int), (\"kf_sock_domain\",", "kfile i += kfile.kf_structsize def try_recover_fd_path(fd: int) -> Optional[str]: for kfile in _iter_kinfo_files(os.getpid()):", "(\"kf_pts_dev_freebsd11\", ctypes.c_uint32), (\"kf_pts_pad0\", ctypes.c_uint32), (\"kf_pts_dev\", ctypes.c_uint64), (\"kf_pts_pad1\", (ctypes.c_uint32 * 4)), ] class KinfoFileProc(ctypes.Structure):", "KinfoFileUn), (\"kf_status\", ctypes.c_uint16), (\"kf_pad1\", ctypes.c_uint16), (\"_kf_ispare0\", ctypes.c_int), (\"kf_cap_rights\", CapRights), (\"_kf_cap_spare\", ctypes.c_uint64), (\"kf_path\", (ctypes.c_char", "(\"kf_pipe\", KinfoFilePipe), (\"kf_pts\", KinfoFilePts), (\"kf_proc\", KinfoFileProc), ] class KinfoFile(ctypes.Structure): _fields_ = [ (\"kf_structsize\",", "ctypes.c_int), (\"kf_type\", ctypes.c_int), (\"kf_fd\", ctypes.c_int), (\"kf_ref_count\", ctypes.c_int), (\"kf_flags\", ctypes.c_int), (\"kf_pad0\", ctypes.c_int), (\"kf_offset\", ctypes.c_int64),", "(\"kf_fd\", ctypes.c_int), (\"kf_ref_count\", ctypes.c_int), (\"kf_flags\", ctypes.c_int), (\"kf_pad0\", ctypes.c_int), (\"kf_offset\", ctypes.c_int64), (\"kf_un\", KinfoFileUn), (\"kf_status\",", "ctypes.c_uint64), (\"kf_pipe_buffer_cnt\", ctypes.c_uint32), (\"kf_pts_pad0\", (ctypes.c_uint32 * 3)), ] class KinfoFilePts(ctypes.Structure): _fields_ = [", "def _iter_kinfo_files(pid: int) -> Iterator[KinfoFile]: kinfo_file_data = bsd_util.sysctl_bytes_retry( [CTL_KERN, KERN_PROC, KERN_PROC_FILEDESC, pid], None", "( _SS_MAXSIZE - ctypes.sizeof(ctypes.c_ubyte) - ctypes.sizeof(sa_family_t) - _SS_PAD1SIZE - _SS_ALIGNSIZE ) CAP_RIGHTS_VERSION =", "(\"kf_file_size\", ctypes.c_uint64), (\"kf_file_fsid_freebsd11\", ctypes.c_uint32), (\"kf_file_rdev_freebsd11\", ctypes.c_uint32), (\"kf_file_mode\", ctypes.c_uint16), (\"kf_file_pad0\", ctypes.c_uint16), (\"kf_file_pad1\", ctypes.c_uint32), ]", "SockaddrStorage), (\"kf_sock_pcb\", ctypes.c_uint64), (\"kf_sock_inpcb\", ctypes.c_uint64), (\"kf_sock_unpconn\", ctypes.c_uint64), (\"kf_sock_snd_sb_state\", ctypes.c_uint16), (\"kf_sock_rcv_sb_state\", ctypes.c_uint16), (\"kf_sock_recvq\", ctypes.c_uint32),", "None ) kinfo_file_size = ctypes.sizeof(KinfoFile) i = 0 while i < len(kinfo_file_data): kfile_data", "= 1024 pid_t = ctypes.c_int sa_family_t = ctypes.c_uint8 _SS_MAXSIZE = 128 _SS_ALIGNSIZE =", "KERN_PROC_FILEDESC = 33 KF_TYPE_VNODE = 1 PATH_MAX = 1024 pid_t = ctypes.c_int sa_family_t", "= [ (\"kf_structsize\", ctypes.c_int), (\"kf_type\", ctypes.c_int), (\"kf_fd\", ctypes.c_int), (\"kf_ref_count\", ctypes.c_int), (\"kf_flags\", ctypes.c_int), (\"kf_pad0\",", "30)), (\"kf_file_fsid\", ctypes.c_uint64), (\"kf_file_rdev\", ctypes.c_uint64), (\"kf_file_fileid\", ctypes.c_uint64), (\"kf_file_size\", ctypes.c_uint64), (\"kf_file_fsid_freebsd11\", ctypes.c_uint32), (\"kf_file_rdev_freebsd11\", ctypes.c_uint32),", "(\"kf_pad0\", ctypes.c_int), (\"kf_offset\", ctypes.c_int64), (\"kf_un\", KinfoFileUn), (\"kf_status\", ctypes.c_uint16), (\"kf_pad1\", ctypes.c_uint16), (\"_kf_ispare0\", ctypes.c_int), (\"kf_cap_rights\",", "= 0 class SockaddrStorage(ctypes.Structure): _fields_ = [ (\"ss_len\", ctypes.c_ubyte), (\"ss_family\", sa_family_t), (\"ss_pad1\", (ctypes.c_char", "< len(kinfo_file_data): kfile_data = kinfo_file_data[i: i + kinfo_file_size].ljust(kinfo_file_size, b\"\\0\") kfile = KinfoFile.from_buffer_copy(kfile_data) if", "for kfile in _iter_kinfo_files(os.getpid()): if kfile.kf_fd == fd and kfile.kf_type == KF_TYPE_VNODE: #", "pylint: disable=invalid-name,too-few-public-methods import ctypes import os from typing import Iterator, Optional from .", "] def _iter_kinfo_files(pid: int) -> Iterator[KinfoFile]: kinfo_file_data = bsd_util.sysctl_bytes_retry( [CTL_KERN, KERN_PROC, KERN_PROC_FILEDESC, pid],", "(ctypes.c_uint32 * 4)), ] class KinfoFileProc(ctypes.Structure): _fields_ = [ (\"kf_spareint\", (ctypes.c_uint32 * 4)),", "(ctypes.c_uint32 * 4)), (\"kf_spareint64\", (ctypes.c_uint64 * 32)), (\"kf_sem_value\", ctypes.c_uint32), (\"kf_sem_mode\", ctypes.c_uint16), ] class", "(\"kf_structsize\", ctypes.c_int), (\"kf_type\", ctypes.c_int), (\"kf_fd\", ctypes.c_int), (\"kf_ref_count\", ctypes.c_int), (\"kf_flags\", ctypes.c_int), (\"kf_pad0\", ctypes.c_int), (\"kf_offset\"," ]
[ "home_dir = os.path.expanduser(\"~\") default_config = os.path.join(module_dir, \"opensda_flasher.ini\") home_config = os.path.join(home_dir, \".opensda_flasher.ini\") config =", "os.path.dirname(__file__) home_dir = os.path.expanduser(\"~\") default_config = os.path.join(module_dir, \"opensda_flasher.ini\") home_config = os.path.join(home_dir, \".opensda_flasher.ini\") config", "read_config() for k, v in cfg.items(): print(k) for k2, v2 in v.items(): print(f\"\\t{k2}:", "cfg = read_config() for k, v in cfg.items(): print(k) for k2, v2 in", "File.\"\"\" if local_config is None: local_config = \"\" module_dir = os.path.dirname(__file__) home_dir =", "local_config = \"\" module_dir = os.path.dirname(__file__) home_dir = os.path.expanduser(\"~\") default_config = os.path.join(module_dir, \"opensda_flasher.ini\")", "__name__ == \"__main__\": if len(sys.argv) > 1: cfg = read_config(sys.argv[1]) else: cfg =", "\"\"\"Read Configuration File.\"\"\" if local_config is None: local_config = \"\" module_dir = os.path.dirname(__file__)", "\"__main__\": if len(sys.argv) > 1: cfg = read_config(sys.argv[1]) else: cfg = read_config() for", "local_config is None: local_config = \"\" module_dir = os.path.dirname(__file__) home_dir = os.path.expanduser(\"~\") default_config", "\"\"\"Module config functions.\"\"\" import os import sys from configparser import ConfigParser from configparser", "None: local_config = \"\" module_dir = os.path.dirname(__file__) home_dir = os.path.expanduser(\"~\") default_config = os.path.join(module_dir,", "return config if __name__ == \"__main__\": if len(sys.argv) > 1: cfg = read_config(sys.argv[1])", "ExtendedInterpolation def read_config(local_config=None): \"\"\"Read Configuration File.\"\"\" if local_config is None: local_config = \"\"", "home_config, local_config]) return config if __name__ == \"__main__\": if len(sys.argv) > 1: cfg", "> 1: cfg = read_config(sys.argv[1]) else: cfg = read_config() for k, v in", "read_config(sys.argv[1]) else: cfg = read_config() for k, v in cfg.items(): print(k) for k2,", "= os.path.dirname(__file__) home_dir = os.path.expanduser(\"~\") default_config = os.path.join(module_dir, \"opensda_flasher.ini\") home_config = os.path.join(home_dir, \".opensda_flasher.ini\")", "= os.path.expanduser(\"~\") default_config = os.path.join(module_dir, \"opensda_flasher.ini\") home_config = os.path.join(home_dir, \".opensda_flasher.ini\") config = ConfigParser(interpolation=ExtendedInterpolation())", "import ConfigParser from configparser import ExtendedInterpolation def read_config(local_config=None): \"\"\"Read Configuration File.\"\"\" if local_config", "ConfigParser(interpolation=ExtendedInterpolation()) config.read([default_config, home_config, local_config]) return config if __name__ == \"__main__\": if len(sys.argv) >", "module_dir = os.path.dirname(__file__) home_dir = os.path.expanduser(\"~\") default_config = os.path.join(module_dir, \"opensda_flasher.ini\") home_config = os.path.join(home_dir,", "local_config]) return config if __name__ == \"__main__\": if len(sys.argv) > 1: cfg =", "config.read([default_config, home_config, local_config]) return config if __name__ == \"__main__\": if len(sys.argv) > 1:", "configparser import ConfigParser from configparser import ExtendedInterpolation def read_config(local_config=None): \"\"\"Read Configuration File.\"\"\" if", "= os.path.join(home_dir, \".opensda_flasher.ini\") config = ConfigParser(interpolation=ExtendedInterpolation()) config.read([default_config, home_config, local_config]) return config if __name__", "default_config = os.path.join(module_dir, \"opensda_flasher.ini\") home_config = os.path.join(home_dir, \".opensda_flasher.ini\") config = ConfigParser(interpolation=ExtendedInterpolation()) config.read([default_config, home_config,", "ConfigParser from configparser import ExtendedInterpolation def read_config(local_config=None): \"\"\"Read Configuration File.\"\"\" if local_config is", "read_config(local_config=None): \"\"\"Read Configuration File.\"\"\" if local_config is None: local_config = \"\" module_dir =", "sys from configparser import ConfigParser from configparser import ExtendedInterpolation def read_config(local_config=None): \"\"\"Read Configuration", "== \"__main__\": if len(sys.argv) > 1: cfg = read_config(sys.argv[1]) else: cfg = read_config()", "is None: local_config = \"\" module_dir = os.path.dirname(__file__) home_dir = os.path.expanduser(\"~\") default_config =", "\"\" module_dir = os.path.dirname(__file__) home_dir = os.path.expanduser(\"~\") default_config = os.path.join(module_dir, \"opensda_flasher.ini\") home_config =", "else: cfg = read_config() for k, v in cfg.items(): print(k) for k2, v2", "for k, v in cfg.items(): print(k) for k2, v2 in v.items(): print(f\"\\t{k2}: {v2}\")", "if len(sys.argv) > 1: cfg = read_config(sys.argv[1]) else: cfg = read_config() for k,", "config functions.\"\"\" import os import sys from configparser import ConfigParser from configparser import", "config if __name__ == \"__main__\": if len(sys.argv) > 1: cfg = read_config(sys.argv[1]) else:", "config = ConfigParser(interpolation=ExtendedInterpolation()) config.read([default_config, home_config, local_config]) return config if __name__ == \"__main__\": if", "Configuration File.\"\"\" if local_config is None: local_config = \"\" module_dir = os.path.dirname(__file__) home_dir", "= os.path.join(module_dir, \"opensda_flasher.ini\") home_config = os.path.join(home_dir, \".opensda_flasher.ini\") config = ConfigParser(interpolation=ExtendedInterpolation()) config.read([default_config, home_config, local_config])", "import ExtendedInterpolation def read_config(local_config=None): \"\"\"Read Configuration File.\"\"\" if local_config is None: local_config =", "if local_config is None: local_config = \"\" module_dir = os.path.dirname(__file__) home_dir = os.path.expanduser(\"~\")", "len(sys.argv) > 1: cfg = read_config(sys.argv[1]) else: cfg = read_config() for k, v", "= ConfigParser(interpolation=ExtendedInterpolation()) config.read([default_config, home_config, local_config]) return config if __name__ == \"__main__\": if len(sys.argv)", "from configparser import ExtendedInterpolation def read_config(local_config=None): \"\"\"Read Configuration File.\"\"\" if local_config is None:", "\"opensda_flasher.ini\") home_config = os.path.join(home_dir, \".opensda_flasher.ini\") config = ConfigParser(interpolation=ExtendedInterpolation()) config.read([default_config, home_config, local_config]) return config", "from configparser import ConfigParser from configparser import ExtendedInterpolation def read_config(local_config=None): \"\"\"Read Configuration File.\"\"\"", "def read_config(local_config=None): \"\"\"Read Configuration File.\"\"\" if local_config is None: local_config = \"\" module_dir", "os.path.join(module_dir, \"opensda_flasher.ini\") home_config = os.path.join(home_dir, \".opensda_flasher.ini\") config = ConfigParser(interpolation=ExtendedInterpolation()) config.read([default_config, home_config, local_config]) return", "<reponame>jed-frey/opensda_flasher \"\"\"Module config functions.\"\"\" import os import sys from configparser import ConfigParser from", "\".opensda_flasher.ini\") config = ConfigParser(interpolation=ExtendedInterpolation()) config.read([default_config, home_config, local_config]) return config if __name__ == \"__main__\":", "= read_config(sys.argv[1]) else: cfg = read_config() for k, v in cfg.items(): print(k) for", "os.path.expanduser(\"~\") default_config = os.path.join(module_dir, \"opensda_flasher.ini\") home_config = os.path.join(home_dir, \".opensda_flasher.ini\") config = ConfigParser(interpolation=ExtendedInterpolation()) config.read([default_config,", "cfg = read_config(sys.argv[1]) else: cfg = read_config() for k, v in cfg.items(): print(k)", "configparser import ExtendedInterpolation def read_config(local_config=None): \"\"\"Read Configuration File.\"\"\" if local_config is None: local_config", "import sys from configparser import ConfigParser from configparser import ExtendedInterpolation def read_config(local_config=None): \"\"\"Read", "os import sys from configparser import ConfigParser from configparser import ExtendedInterpolation def read_config(local_config=None):", "if __name__ == \"__main__\": if len(sys.argv) > 1: cfg = read_config(sys.argv[1]) else: cfg", "functions.\"\"\" import os import sys from configparser import ConfigParser from configparser import ExtendedInterpolation", "home_config = os.path.join(home_dir, \".opensda_flasher.ini\") config = ConfigParser(interpolation=ExtendedInterpolation()) config.read([default_config, home_config, local_config]) return config if", "os.path.join(home_dir, \".opensda_flasher.ini\") config = ConfigParser(interpolation=ExtendedInterpolation()) config.read([default_config, home_config, local_config]) return config if __name__ ==", "= read_config() for k, v in cfg.items(): print(k) for k2, v2 in v.items():", "1: cfg = read_config(sys.argv[1]) else: cfg = read_config() for k, v in cfg.items():", "import os import sys from configparser import ConfigParser from configparser import ExtendedInterpolation def", "= \"\" module_dir = os.path.dirname(__file__) home_dir = os.path.expanduser(\"~\") default_config = os.path.join(module_dir, \"opensda_flasher.ini\") home_config" ]
[ "**kwargs) class CancellableAsyncMock(AsyncMock): # pylint: disable = too-many-ancestors async def __call__(self, *args, **kwargs):", "# AsyncMock is new in Python 3.8 class AsyncMock(MagicMock): async def __call__(self, *args,", "return super().__call__(*args, **kwargs) class CancellableAsyncMock(AsyncMock): # pylint: disable = too-many-ancestors async def __call__(self,", "= too-many-ancestors async def __call__(self, *args, **kwargs): await asyncio.sleep(1) return await super().__call__(*args, **kwargs)", "pylint: disable = invalid-overridden-method, useless-super-delegation return super().__call__(*args, **kwargs) class CancellableAsyncMock(AsyncMock): # pylint: disable", "def __call__(self, *args, **kwargs): # pylint: disable = invalid-overridden-method, useless-super-delegation return super().__call__(*args, **kwargs)", "# pylint: disable = too-many-ancestors async def __call__(self, *args, **kwargs): await asyncio.sleep(1) return", "pylint: disable = too-many-ancestors async def __call__(self, *args, **kwargs): await asyncio.sleep(1) return await", "MockException(Exception): pass # AsyncMock is new in Python 3.8 class AsyncMock(MagicMock): async def", "unittest.mock import MagicMock class MockException(Exception): pass # AsyncMock is new in Python 3.8", "new in Python 3.8 class AsyncMock(MagicMock): async def __call__(self, *args, **kwargs): # pylint:", "**kwargs): # pylint: disable = invalid-overridden-method, useless-super-delegation return super().__call__(*args, **kwargs) class CancellableAsyncMock(AsyncMock): #", "import MagicMock class MockException(Exception): pass # AsyncMock is new in Python 3.8 class", "MagicMock class MockException(Exception): pass # AsyncMock is new in Python 3.8 class AsyncMock(MagicMock):", "super().__call__(*args, **kwargs) class CancellableAsyncMock(AsyncMock): # pylint: disable = too-many-ancestors async def __call__(self, *args,", "from unittest.mock import MagicMock class MockException(Exception): pass # AsyncMock is new in Python", "AsyncMock is new in Python 3.8 class AsyncMock(MagicMock): async def __call__(self, *args, **kwargs):", "Python 3.8 class AsyncMock(MagicMock): async def __call__(self, *args, **kwargs): # pylint: disable =", "__call__(self, *args, **kwargs): # pylint: disable = invalid-overridden-method, useless-super-delegation return super().__call__(*args, **kwargs) class", "*args, **kwargs): # pylint: disable = invalid-overridden-method, useless-super-delegation return super().__call__(*args, **kwargs) class CancellableAsyncMock(AsyncMock):", "CancellableAsyncMock(AsyncMock): # pylint: disable = too-many-ancestors async def __call__(self, *args, **kwargs): await asyncio.sleep(1)", "useless-super-delegation return super().__call__(*args, **kwargs) class CancellableAsyncMock(AsyncMock): # pylint: disable = too-many-ancestors async def", "import asyncio from unittest.mock import MagicMock class MockException(Exception): pass # AsyncMock is new", "class CancellableAsyncMock(AsyncMock): # pylint: disable = too-many-ancestors async def __call__(self, *args, **kwargs): await", "invalid-overridden-method, useless-super-delegation return super().__call__(*args, **kwargs) class CancellableAsyncMock(AsyncMock): # pylint: disable = too-many-ancestors async", "# pylint: disable = invalid-overridden-method, useless-super-delegation return super().__call__(*args, **kwargs) class CancellableAsyncMock(AsyncMock): # pylint:", "class AsyncMock(MagicMock): async def __call__(self, *args, **kwargs): # pylint: disable = invalid-overridden-method, useless-super-delegation", "pass # AsyncMock is new in Python 3.8 class AsyncMock(MagicMock): async def __call__(self,", "disable = invalid-overridden-method, useless-super-delegation return super().__call__(*args, **kwargs) class CancellableAsyncMock(AsyncMock): # pylint: disable =", "async def __call__(self, *args, **kwargs): # pylint: disable = invalid-overridden-method, useless-super-delegation return super().__call__(*args,", "3.8 class AsyncMock(MagicMock): async def __call__(self, *args, **kwargs): # pylint: disable = invalid-overridden-method,", "in Python 3.8 class AsyncMock(MagicMock): async def __call__(self, *args, **kwargs): # pylint: disable", "is new in Python 3.8 class AsyncMock(MagicMock): async def __call__(self, *args, **kwargs): #", "disable = too-many-ancestors async def __call__(self, *args, **kwargs): await asyncio.sleep(1) return await super().__call__(*args,", "AsyncMock(MagicMock): async def __call__(self, *args, **kwargs): # pylint: disable = invalid-overridden-method, useless-super-delegation return", "= invalid-overridden-method, useless-super-delegation return super().__call__(*args, **kwargs) class CancellableAsyncMock(AsyncMock): # pylint: disable = too-many-ancestors", "asyncio from unittest.mock import MagicMock class MockException(Exception): pass # AsyncMock is new in", "class MockException(Exception): pass # AsyncMock is new in Python 3.8 class AsyncMock(MagicMock): async" ]
[ "= 9 # tuples are immutable, can't change items once assigned except Exception", "by creating a new tuple new_number_tuple = number_tuple + (5) # TypeError: can", "mixed_tuple = (1, 'a', 2, 'b', [88, 99]) # can mix different types", "Exception:', e) # TypeError: 'tuple' object does not support item assignment try: #", "# it appears as if we are updating it print(id(number_tuple)) # but they", "assigned except Exception as e: print('** caught Exception:', e) # TypeError: 'tuple' object", "as if we are updating it print(id(number_tuple)) # but they are two different", "print('** caught Exception:', e) new_number_tuple = number_tuple + (5,) # have to add", "# can mix different types print(number_tuple) print(letter_tuple) print(mixed_tuple) print(type(number_tuple)) # <class 'tuple'> try:", "# the variable name is now pointing to a new object print(number_tuple[:]) #", "print(mixed_tuple) print(type(number_tuple)) # <class 'tuple'> try: number_tuple[0] = 9 # tuples are immutable,", "9 # tuples are immutable, can't change items once assigned except Exception as", "= number_tuple + (5) # TypeError: can only concatenate tuple (not \"int\") to", "e) new_number_tuple = number_tuple + (5,) # have to add a comma to", "new_number_tuple = number_tuple + (5,) # have to add a comma to make", "= number_tuple + (5,) # if we reassign to original tuple value, print(number_tuple)", "# TypeError: 'tuple' object does not support item assignment try: # try to", "Exception as e: print('** caught Exception:', e) new_number_tuple = number_tuple + (5,) #", "# TypeError: can only concatenate tuple (not \"int\") to tuple except Exception as", "if we reassign to original tuple value, print(number_tuple) # it appears as if", "<NAME> ''' # ------------ Tuples ------------ number_tuple = (1, 2, 3, 4) #", "print(type(number_tuple)) # <class 'tuple'> try: number_tuple[0] = 9 # tuples are immutable, can't", "print(number_tuple) # it appears as if we are updating it print(id(number_tuple)) # but", "new_number_tuple = number_tuple + (5) # TypeError: can only concatenate tuple (not \"int\")", "TypeError: can only concatenate tuple (not \"int\") to tuple except Exception as e:", "different objects number_tuple = number_tuple + (6,) print(id(number_tuple)) # the variable name is", "types print(number_tuple) print(letter_tuple) print(mixed_tuple) print(type(number_tuple)) # <class 'tuple'> try: number_tuple[0] = 9 #", "they are two different objects number_tuple = number_tuple + (6,) print(id(number_tuple)) # the", "(5) # TypeError: can only concatenate tuple (not \"int\") to tuple except Exception", "add item to tuples by creating a new tuple new_number_tuple = number_tuple +", "Exception:', e) new_number_tuple = number_tuple + (5,) # have to add a comma", "item to tuples by creating a new tuple new_number_tuple = number_tuple + (5)", "the variable name is now pointing to a new object print(number_tuple[:]) # slice", "caught Exception:', e) # TypeError: 'tuple' object does not support item assignment try:", "# tuples use parentheses for creation letter_tuple = ('a', 'b', 'c', 'd') mixed_tuple", "add a comma to make it a tuple print(new_number_tuple) number_tuple = number_tuple +", "to a new object print(number_tuple[:]) # slice operator - entire tuple print(number_tuple[1:3]) #", "number_tuple = (1, 2, 3, 4) # tuples use parentheses for creation letter_tuple", "tuple value, print(number_tuple) # it appears as if we are updating it print(id(number_tuple))", "a new object print(number_tuple[:]) # slice operator - entire tuple print(number_tuple[1:3]) # start", "= ('a', 'b', 'c', 'd') mixed_tuple = (1, 'a', 2, 'b', [88, 99])", "print(new_number_tuple) number_tuple = number_tuple + (5,) # if we reassign to original tuple", "print(number_tuple[1:3]) # start at element 1, end at 3 (exclusive) print(number_tuple[3:]) # start", "are updating it print(id(number_tuple)) # but they are two different objects number_tuple =", "'c', 'd') mixed_tuple = (1, 'a', 2, 'b', [88, 99]) # can mix", "new object print(number_tuple[:]) # slice operator - entire tuple print(number_tuple[1:3]) # start at", "print(letter_tuple) print(mixed_tuple) print(type(number_tuple)) # <class 'tuple'> try: number_tuple[0] = 9 # tuples are", "can only concatenate tuple (not \"int\") to tuple except Exception as e: print('**", "tuples use parentheses for creation letter_tuple = ('a', 'b', 'c', 'd') mixed_tuple =", "make it a tuple print(new_number_tuple) number_tuple = number_tuple + (5,) # if we", "'b', 'c', 'd') mixed_tuple = (1, 'a', 2, 'b', [88, 99]) # can", "2, 'b', [88, 99]) # can mix different types print(number_tuple) print(letter_tuple) print(mixed_tuple) print(type(number_tuple))", "mix different types print(number_tuple) print(letter_tuple) print(mixed_tuple) print(type(number_tuple)) # <class 'tuple'> try: number_tuple[0] =", "concatenate tuple (not \"int\") to tuple except Exception as e: print('** caught Exception:',", "if we are updating it print(id(number_tuple)) # but they are two different objects", "assignment try: # try to add item to tuples by creating a new", "change items once assigned except Exception as e: print('** caught Exception:', e) #", "it print(id(number_tuple)) # but they are two different objects number_tuple = number_tuple +", "Tuples ------------ number_tuple = (1, 2, 3, 4) # tuples use parentheses for", "<class 'tuple'> try: number_tuple[0] = 9 # tuples are immutable, can't change items", "as e: print('** caught Exception:', e) # TypeError: 'tuple' object does not support", "'tuple'> try: number_tuple[0] = 9 # tuples are immutable, can't change items once", "now pointing to a new object print(number_tuple[:]) # slice operator - entire tuple", "'a', 2, 'b', [88, 99]) # can mix different types print(number_tuple) print(letter_tuple) print(mixed_tuple)", "a tuple print(new_number_tuple) number_tuple = number_tuple + (5,) # if we reassign to", "object print(number_tuple[:]) # slice operator - entire tuple print(number_tuple[1:3]) # start at element", "('a', 'b', 'c', 'd') mixed_tuple = (1, 'a', 2, 'b', [88, 99]) #", "a comma to make it a tuple print(new_number_tuple) number_tuple = number_tuple + (5,)", "1, end at 3 (exclusive) print(number_tuple[3:]) # start at element 3 until the", "# ------------ Tuples ------------ number_tuple = (1, 2, 3, 4) # tuples use", "99]) # can mix different types print(number_tuple) print(letter_tuple) print(mixed_tuple) print(type(number_tuple)) # <class 'tuple'>", "= (1, 2, 3, 4) # tuples use parentheses for creation letter_tuple =", "object does not support item assignment try: # try to add item to", "comma to make it a tuple print(new_number_tuple) number_tuple = number_tuple + (5,) #", "to add a comma to make it a tuple print(new_number_tuple) number_tuple = number_tuple", "to make it a tuple print(new_number_tuple) number_tuple = number_tuple + (5,) # if", "tuples by creating a new tuple new_number_tuple = number_tuple + (5) # TypeError:", "try to add item to tuples by creating a new tuple new_number_tuple =", "updating it print(id(number_tuple)) # but they are two different objects number_tuple = number_tuple", "(1, 2, 3, 4) # tuples use parentheses for creation letter_tuple = ('a',", "variable name is now pointing to a new object print(number_tuple[:]) # slice operator", "to tuple except Exception as e: print('** caught Exception:', e) new_number_tuple = number_tuple", "to original tuple value, print(number_tuple) # it appears as if we are updating", "reassign to original tuple value, print(number_tuple) # it appears as if we are", "tuple except Exception as e: print('** caught Exception:', e) new_number_tuple = number_tuple +", "(5,) # have to add a comma to make it a tuple print(new_number_tuple)", "can't change items once assigned except Exception as e: print('** caught Exception:', e)", "number_tuple = number_tuple + (6,) print(id(number_tuple)) # the variable name is now pointing", "we are updating it print(id(number_tuple)) # but they are two different objects number_tuple", "but they are two different objects number_tuple = number_tuple + (6,) print(id(number_tuple)) #", "+ (5) # TypeError: can only concatenate tuple (not \"int\") to tuple except", "Exception as e: print('** caught Exception:', e) # TypeError: 'tuple' object does not", "[88, 99]) # can mix different types print(number_tuple) print(letter_tuple) print(mixed_tuple) print(type(number_tuple)) # <class", "3, 4) # tuples use parentheses for creation letter_tuple = ('a', 'b', 'c',", "name is now pointing to a new object print(number_tuple[:]) # slice operator -", "we reassign to original tuple value, print(number_tuple) # it appears as if we", "start at element 1, end at 3 (exclusive) print(number_tuple[3:]) # start at element", "tuple new_number_tuple = number_tuple + (5) # TypeError: can only concatenate tuple (not", "# but they are two different objects number_tuple = number_tuple + (6,) print(id(number_tuple))", "as e: print('** caught Exception:', e) new_number_tuple = number_tuple + (5,) # have", "(6,) print(id(number_tuple)) # the variable name is now pointing to a new object", "does not support item assignment try: # try to add item to tuples", "print(number_tuple[:]) # slice operator - entire tuple print(number_tuple[1:3]) # start at element 1,", "# start at element 1, end at 3 (exclusive) print(number_tuple[3:]) # start at", "support item assignment try: # try to add item to tuples by creating", "number_tuple + (5,) # have to add a comma to make it a", "'b', [88, 99]) # can mix different types print(number_tuple) print(letter_tuple) print(mixed_tuple) print(type(number_tuple)) #", "parentheses for creation letter_tuple = ('a', 'b', 'c', 'd') mixed_tuple = (1, 'a',", "e) # TypeError: 'tuple' object does not support item assignment try: # try", "entire tuple print(number_tuple[1:3]) # start at element 1, end at 3 (exclusive) print(number_tuple[3:])", "- entire tuple print(number_tuple[1:3]) # start at element 1, end at 3 (exclusive)", "print(id(number_tuple)) # but they are two different objects number_tuple = number_tuple + (6,)", "pointing to a new object print(number_tuple[:]) # slice operator - entire tuple print(number_tuple[1:3])", "once assigned except Exception as e: print('** caught Exception:', e) # TypeError: 'tuple'", "tuple print(new_number_tuple) number_tuple = number_tuple + (5,) # if we reassign to original", "is now pointing to a new object print(number_tuple[:]) # slice operator - entire", "creation letter_tuple = ('a', 'b', 'c', 'd') mixed_tuple = (1, 'a', 2, 'b',", "to add item to tuples by creating a new tuple new_number_tuple = number_tuple", "number_tuple + (5) # TypeError: can only concatenate tuple (not \"int\") to tuple", "immutable, can't change items once assigned except Exception as e: print('** caught Exception:',", "+ (5,) # if we reassign to original tuple value, print(number_tuple) # it", "have to add a comma to make it a tuple print(new_number_tuple) number_tuple =", "Created on Mar 31, 2018 @author: <NAME> ''' # ------------ Tuples ------------ number_tuple", "tuple (not \"int\") to tuple except Exception as e: print('** caught Exception:', e)", "appears as if we are updating it print(id(number_tuple)) # but they are two", "number_tuple + (6,) print(id(number_tuple)) # the variable name is now pointing to a", "+ (6,) print(id(number_tuple)) # the variable name is now pointing to a new", "at element 1, end at 3 (exclusive) print(number_tuple[3:]) # start at element 3", "number_tuple[0] = 9 # tuples are immutable, can't change items once assigned except", "number_tuple = number_tuple + (5,) # if we reassign to original tuple value,", "(not \"int\") to tuple except Exception as e: print('** caught Exception:', e) new_number_tuple", "+ (5,) # have to add a comma to make it a tuple", "print(number_tuple) print(letter_tuple) print(mixed_tuple) print(type(number_tuple)) # <class 'tuple'> try: number_tuple[0] = 9 # tuples", "to tuples by creating a new tuple new_number_tuple = number_tuple + (5) #", "element 1, end at 3 (exclusive) print(number_tuple[3:]) # start at element 3 until", "are two different objects number_tuple = number_tuple + (6,) print(id(number_tuple)) # the variable", "operator - entire tuple print(number_tuple[1:3]) # start at element 1, end at 3", "= (1, 'a', 2, 'b', [88, 99]) # can mix different types print(number_tuple)", "2018 @author: <NAME> ''' # ------------ Tuples ------------ number_tuple = (1, 2, 3,", "number_tuple + (5,) # if we reassign to original tuple value, print(number_tuple) #", "try: number_tuple[0] = 9 # tuples are immutable, can't change items once assigned", "for creation letter_tuple = ('a', 'b', 'c', 'd') mixed_tuple = (1, 'a', 2,", "(1, 'a', 2, 'b', [88, 99]) # can mix different types print(number_tuple) print(letter_tuple)", "creating a new tuple new_number_tuple = number_tuple + (5) # TypeError: can only", "@author: <NAME> ''' # ------------ Tuples ------------ number_tuple = (1, 2, 3, 4)", "not support item assignment try: # try to add item to tuples by", "can mix different types print(number_tuple) print(letter_tuple) print(mixed_tuple) print(type(number_tuple)) # <class 'tuple'> try: number_tuple[0]", "'tuple' object does not support item assignment try: # try to add item", "except Exception as e: print('** caught Exception:', e) # TypeError: 'tuple' object does", "# <class 'tuple'> try: number_tuple[0] = 9 # tuples are immutable, can't change", "it appears as if we are updating it print(id(number_tuple)) # but they are", "''' Created on Mar 31, 2018 @author: <NAME> ''' # ------------ Tuples ------------", "print('** caught Exception:', e) # TypeError: 'tuple' object does not support item assignment", "new tuple new_number_tuple = number_tuple + (5) # TypeError: can only concatenate tuple", "(5,) # if we reassign to original tuple value, print(number_tuple) # it appears", "# tuples are immutable, can't change items once assigned except Exception as e:", "2, 3, 4) # tuples use parentheses for creation letter_tuple = ('a', 'b',", "tuple print(number_tuple[1:3]) # start at element 1, end at 3 (exclusive) print(number_tuple[3:]) #", "= number_tuple + (6,) print(id(number_tuple)) # the variable name is now pointing to", "letter_tuple = ('a', 'b', 'c', 'd') mixed_tuple = (1, 'a', 2, 'b', [88,", "it a tuple print(new_number_tuple) number_tuple = number_tuple + (5,) # if we reassign", "value, print(number_tuple) # it appears as if we are updating it print(id(number_tuple)) #", "''' # ------------ Tuples ------------ number_tuple = (1, 2, 3, 4) # tuples", "at 3 (exclusive) print(number_tuple[3:]) # start at element 3 until the end (inclusive)", "try: # try to add item to tuples by creating a new tuple", "# try to add item to tuples by creating a new tuple new_number_tuple", "31, 2018 @author: <NAME> ''' # ------------ Tuples ------------ number_tuple = (1, 2,", "objects number_tuple = number_tuple + (6,) print(id(number_tuple)) # the variable name is now", "only concatenate tuple (not \"int\") to tuple except Exception as e: print('** caught", "caught Exception:', e) new_number_tuple = number_tuple + (5,) # have to add a", "# if we reassign to original tuple value, print(number_tuple) # it appears as", "end at 3 (exclusive) print(number_tuple[3:]) # start at element 3 until the end", "a new tuple new_number_tuple = number_tuple + (5) # TypeError: can only concatenate", "------------ number_tuple = (1, 2, 3, 4) # tuples use parentheses for creation", "\"int\") to tuple except Exception as e: print('** caught Exception:', e) new_number_tuple =", "except Exception as e: print('** caught Exception:', e) new_number_tuple = number_tuple + (5,)", "print(id(number_tuple)) # the variable name is now pointing to a new object print(number_tuple[:])", "are immutable, can't change items once assigned except Exception as e: print('** caught", "# slice operator - entire tuple print(number_tuple[1:3]) # start at element 1, end", "items once assigned except Exception as e: print('** caught Exception:', e) # TypeError:", "item assignment try: # try to add item to tuples by creating a", "slice operator - entire tuple print(number_tuple[1:3]) # start at element 1, end at", "on Mar 31, 2018 @author: <NAME> ''' # ------------ Tuples ------------ number_tuple =", "use parentheses for creation letter_tuple = ('a', 'b', 'c', 'd') mixed_tuple = (1,", "original tuple value, print(number_tuple) # it appears as if we are updating it", "4) # tuples use parentheses for creation letter_tuple = ('a', 'b', 'c', 'd')", "tuples are immutable, can't change items once assigned except Exception as e: print('**", "------------ Tuples ------------ number_tuple = (1, 2, 3, 4) # tuples use parentheses", "TypeError: 'tuple' object does not support item assignment try: # try to add", "'d') mixed_tuple = (1, 'a', 2, 'b', [88, 99]) # can mix different", "different types print(number_tuple) print(letter_tuple) print(mixed_tuple) print(type(number_tuple)) # <class 'tuple'> try: number_tuple[0] = 9", "Mar 31, 2018 @author: <NAME> ''' # ------------ Tuples ------------ number_tuple = (1,", "# have to add a comma to make it a tuple print(new_number_tuple) number_tuple", "two different objects number_tuple = number_tuple + (6,) print(id(number_tuple)) # the variable name", "= number_tuple + (5,) # have to add a comma to make it", "e: print('** caught Exception:', e) # TypeError: 'tuple' object does not support item", "e: print('** caught Exception:', e) new_number_tuple = number_tuple + (5,) # have to" ]
[ "< len(cities_order) else cities_order[0] total_distance += self.distances[city][next_city] return Fitness(-total_distance, is_natural=False) def print_results(result): print('Visit", "i, city in enumerate(cities_order): next_city = cities_order[i + 1] if i + 1", "360, 0] # d ] class SalesmanFitnessEvaluator(FitnessEvaluator): def __init__(self, distances): super().__init__() self.distances =", "engine = GenerationalEvolutionEngine() engine.create(candidate_factory, operator, fitness_evaluator, selection_strategy) population = engine.evolve(population_size, elite_count, termination_condition) print_results(population.get_best())", "0, 130, 180, 300], # a [130, 0, 320, 350], # b [180,", "+ 1 < len(cities_order) else cities_order[0] print('- ', city_names[city], distances[city][next_city]) print('Total distance: ',", "] class SalesmanFitnessEvaluator(FitnessEvaluator): def __init__(self, distances): super().__init__() self.distances = distances def get_fitness(self, candidate,", "self.distances[city][next_city] return Fitness(-total_distance, is_natural=False) def print_results(result): print('Visit cities in this order:') cities_order =", "130, 180, 300], # a [130, 0, 320, 350], # b [180, 320,", "Stagnation(100) engine = GenerationalEvolutionEngine() engine.create(candidate_factory, operator, fitness_evaluator, selection_strategy) population = engine.evolve(population_size, elite_count, termination_condition)", "[130, 0, 320, 350], # b [180, 320, 0, 360], # c [300,", "random = Random() probability = Probability(crossover_mutate_probability, random) candidate_factory = ListFactory(random, len(distances)-1) crossover =", "city_names[city], distances[city][next_city]) print('Total distance: ', abs(result.fitness)) logging.basicConfig(level=logging.DEBUG) random = Random() probability = Probability(crossover_mutate_probability,", "2) operator = PipelineOperator() operator.append_operator(crossover) operator.append_operator(mutation) fitness_evaluator = SalesmanFitnessEvaluator(distances) selection_strategy = RouletteWheelSelection(random) termination_condition", "in this order:') cities_order = result.data for i, city in enumerate(cities_order): next_city =", "1] if i + 1 < len(cities_order) else cities_order[0] print('- ', city_names[city], distances[city][next_city])", "# d ] class SalesmanFitnessEvaluator(FitnessEvaluator): def __init__(self, distances): super().__init__() self.distances = distances def", "i+1 < len(cities_order) else cities_order[0] total_distance += self.distances[city][next_city] return Fitness(-total_distance, is_natural=False) def print_results(result):", "0, 360], # c [300, 350, 360, 0] # d ] class SalesmanFitnessEvaluator(FitnessEvaluator):", "total_distance = 0 cities_order = candidate.data for i, city in enumerate(cities_order): next_city =", "= 2 crossover_points = 2 crossover_mutate_probability = 0.2 max_weight = 15 city_names =", "candidate, population): total_distance = 0 cities_order = candidate.data for i, city in enumerate(cities_order):", "', city_names[city], distances[city][next_city]) print('Total distance: ', abs(result.fitness)) logging.basicConfig(level=logging.DEBUG) random = Random() probability =", "= 2 crossover_mutate_probability = 0.2 max_weight = 15 city_names = ['a', 'b', 'c',", "print('- ', city_names[city], distances[city][next_city]) print('Total distance: ', abs(result.fitness)) logging.basicConfig(level=logging.DEBUG) random = Random() probability", "= cities_order[i + 1] if i + 1 < len(cities_order) else cities_order[0] print('-", "0.2 max_weight = 15 city_names = ['a', 'b', 'c', 'd'] distances = [", "'b', 'c', 'd'] distances = [ # a b c d [ 0,", "ListOrderMutation(probability, random, 2) operator = PipelineOperator() operator.append_operator(crossover) operator.append_operator(mutation) fitness_evaluator = SalesmanFitnessEvaluator(distances) selection_strategy =", "def get_fitness(self, candidate, population): total_distance = 0 cities_order = candidate.data for i, city", "c d [ 0, 130, 180, 300], # a [130, 0, 320, 350],", "# b [180, 320, 0, 360], # c [300, 350, 360, 0] #", "cities_order[i + 1] if i + 1 < len(cities_order) else cities_order[0] print('- ',", "= distances def get_fitness(self, candidate, population): total_distance = 0 cities_order = candidate.data for", "distances = [ # a b c d [ 0, 130, 180, 300],", "320, 350], # b [180, 320, 0, 360], # c [300, 350, 360,", "[180, 320, 0, 360], # c [300, 350, 360, 0] # d ]", "320, 0, 360], # c [300, 350, 360, 0] # d ] class", "2 crossover_points = 2 crossover_mutate_probability = 0.2 max_weight = 15 city_names = ['a',", "print_results(result): print('Visit cities in this order:') cities_order = result.data for i, city in", "random, 2) operator = PipelineOperator() operator.append_operator(crossover) operator.append_operator(mutation) fitness_evaluator = SalesmanFitnessEvaluator(distances) selection_strategy = RouletteWheelSelection(random)", "is_natural=False) def print_results(result): print('Visit cities in this order:') cities_order = result.data for i,", "sys.path.append(os.environ[\"PWD\"]) from pyga import * population_size = 10 elite_count = 2 crossover_points =", "self.distances = distances def get_fitness(self, candidate, population): total_distance = 0 cities_order = candidate.data", "[ 0, 130, 180, 300], # a [130, 0, 320, 350], # b", "city_names = ['a', 'b', 'c', 'd'] distances = [ # a b c", "= RouletteWheelSelection(random) termination_condition = Stagnation(100) engine = GenerationalEvolutionEngine() engine.create(candidate_factory, operator, fitness_evaluator, selection_strategy) population", "max_weight = 15 city_names = ['a', 'b', 'c', 'd'] distances = [ #", "for i, city in enumerate(cities_order): next_city = cities_order[i+1] if i+1 < len(cities_order) else", "random) candidate_factory = ListFactory(random, len(distances)-1) crossover = ListOrderCrossover(probability, random) mutation = ListOrderMutation(probability, random,", "crossover = ListOrderCrossover(probability, random) mutation = ListOrderMutation(probability, random, 2) operator = PipelineOperator() operator.append_operator(crossover)", "len(cities_order) else cities_order[0] total_distance += self.distances[city][next_city] return Fitness(-total_distance, is_natural=False) def print_results(result): print('Visit cities", "crossover_points = 2 crossover_mutate_probability = 0.2 max_weight = 15 city_names = ['a', 'b',", "a b c d [ 0, 130, 180, 300], # a [130, 0,", "print('Total distance: ', abs(result.fitness)) logging.basicConfig(level=logging.DEBUG) random = Random() probability = Probability(crossover_mutate_probability, random) candidate_factory", "if i + 1 < len(cities_order) else cities_order[0] print('- ', city_names[city], distances[city][next_city]) print('Total", "probability = Probability(crossover_mutate_probability, random) candidate_factory = ListFactory(random, len(distances)-1) crossover = ListOrderCrossover(probability, random) mutation", "350, 360, 0] # d ] class SalesmanFitnessEvaluator(FitnessEvaluator): def __init__(self, distances): super().__init__() self.distances", "crossover_mutate_probability = 0.2 max_weight = 15 city_names = ['a', 'b', 'c', 'd'] distances", "= Probability(crossover_mutate_probability, random) candidate_factory = ListFactory(random, len(distances)-1) crossover = ListOrderCrossover(probability, random) mutation =", "= 15 city_names = ['a', 'b', 'c', 'd'] distances = [ # a", "result.data for i, city in enumerate(cities_order): next_city = cities_order[i + 1] if i", "= SalesmanFitnessEvaluator(distances) selection_strategy = RouletteWheelSelection(random) termination_condition = Stagnation(100) engine = GenerationalEvolutionEngine() engine.create(candidate_factory, operator,", "cities_order = candidate.data for i, city in enumerate(cities_order): next_city = cities_order[i+1] if i+1", "Random() probability = Probability(crossover_mutate_probability, random) candidate_factory = ListFactory(random, len(distances)-1) crossover = ListOrderCrossover(probability, random)", "* population_size = 10 elite_count = 2 crossover_points = 2 crossover_mutate_probability = 0.2", "else cities_order[0] total_distance += self.distances[city][next_city] return Fitness(-total_distance, is_natural=False) def print_results(result): print('Visit cities in", "for i, city in enumerate(cities_order): next_city = cities_order[i + 1] if i +", "else cities_order[0] print('- ', city_names[city], distances[city][next_city]) print('Total distance: ', abs(result.fitness)) logging.basicConfig(level=logging.DEBUG) random =", "if i+1 < len(cities_order) else cities_order[0] total_distance += self.distances[city][next_city] return Fitness(-total_distance, is_natural=False) def", "candidate.data for i, city in enumerate(cities_order): next_city = cities_order[i+1] if i+1 < len(cities_order)", "sys import os import logging sys.path.append(os.environ[\"PWD\"]) from pyga import * population_size = 10", "[ # a b c d [ 0, 130, 180, 300], # a", "180, 300], # a [130, 0, 320, 350], # b [180, 320, 0,", "operator.append_operator(mutation) fitness_evaluator = SalesmanFitnessEvaluator(distances) selection_strategy = RouletteWheelSelection(random) termination_condition = Stagnation(100) engine = GenerationalEvolutionEngine()", "elite_count = 2 crossover_points = 2 crossover_mutate_probability = 0.2 max_weight = 15 city_names", "next_city = cities_order[i + 1] if i + 1 < len(cities_order) else cities_order[0]", "ListOrderCrossover(probability, random) mutation = ListOrderMutation(probability, random, 2) operator = PipelineOperator() operator.append_operator(crossover) operator.append_operator(mutation) fitness_evaluator", "cities in this order:') cities_order = result.data for i, city in enumerate(cities_order): next_city", "0, 320, 350], # b [180, 320, 0, 360], # c [300, 350,", "= candidate.data for i, city in enumerate(cities_order): next_city = cities_order[i+1] if i+1 <", "', abs(result.fitness)) logging.basicConfig(level=logging.DEBUG) random = Random() probability = Probability(crossover_mutate_probability, random) candidate_factory = ListFactory(random,", "this order:') cities_order = result.data for i, city in enumerate(cities_order): next_city = cities_order[i", "return Fitness(-total_distance, is_natural=False) def print_results(result): print('Visit cities in this order:') cities_order = result.data", "print('Visit cities in this order:') cities_order = result.data for i, city in enumerate(cities_order):", "mutation = ListOrderMutation(probability, random, 2) operator = PipelineOperator() operator.append_operator(crossover) operator.append_operator(mutation) fitness_evaluator = SalesmanFitnessEvaluator(distances)", "random) mutation = ListOrderMutation(probability, random, 2) operator = PipelineOperator() operator.append_operator(crossover) operator.append_operator(mutation) fitness_evaluator =", "300], # a [130, 0, 320, 350], # b [180, 320, 0, 360],", "next_city = cities_order[i+1] if i+1 < len(cities_order) else cities_order[0] total_distance += self.distances[city][next_city] return", "0] # d ] class SalesmanFitnessEvaluator(FitnessEvaluator): def __init__(self, distances): super().__init__() self.distances = distances", "b c d [ 0, 130, 180, 300], # a [130, 0, 320,", "city in enumerate(cities_order): next_city = cities_order[i + 1] if i + 1 <", "360], # c [300, 350, 360, 0] # d ] class SalesmanFitnessEvaluator(FitnessEvaluator): def", "total_distance += self.distances[city][next_city] return Fitness(-total_distance, is_natural=False) def print_results(result): print('Visit cities in this order:')", "SalesmanFitnessEvaluator(distances) selection_strategy = RouletteWheelSelection(random) termination_condition = Stagnation(100) engine = GenerationalEvolutionEngine() engine.create(candidate_factory, operator, fitness_evaluator,", "operator.append_operator(crossover) operator.append_operator(mutation) fitness_evaluator = SalesmanFitnessEvaluator(distances) selection_strategy = RouletteWheelSelection(random) termination_condition = Stagnation(100) engine =", "population): total_distance = 0 cities_order = candidate.data for i, city in enumerate(cities_order): next_city", "distances[city][next_city]) print('Total distance: ', abs(result.fitness)) logging.basicConfig(level=logging.DEBUG) random = Random() probability = Probability(crossover_mutate_probability, random)", "SalesmanFitnessEvaluator(FitnessEvaluator): def __init__(self, distances): super().__init__() self.distances = distances def get_fitness(self, candidate, population): total_distance", "c [300, 350, 360, 0] # d ] class SalesmanFitnessEvaluator(FitnessEvaluator): def __init__(self, distances):", "= ListOrderCrossover(probability, random) mutation = ListOrderMutation(probability, random, 2) operator = PipelineOperator() operator.append_operator(crossover) operator.append_operator(mutation)", "[300, 350, 360, 0] # d ] class SalesmanFitnessEvaluator(FitnessEvaluator): def __init__(self, distances): super().__init__()", "Probability(crossover_mutate_probability, random) candidate_factory = ListFactory(random, len(distances)-1) crossover = ListOrderCrossover(probability, random) mutation = ListOrderMutation(probability,", "i, city in enumerate(cities_order): next_city = cities_order[i+1] if i+1 < len(cities_order) else cities_order[0]", "['a', 'b', 'c', 'd'] distances = [ # a b c d [", "15 city_names = ['a', 'b', 'c', 'd'] distances = [ # a b", "cities_order[0] print('- ', city_names[city], distances[city][next_city]) print('Total distance: ', abs(result.fitness)) logging.basicConfig(level=logging.DEBUG) random = Random()", "selection_strategy = RouletteWheelSelection(random) termination_condition = Stagnation(100) engine = GenerationalEvolutionEngine() engine.create(candidate_factory, operator, fitness_evaluator, selection_strategy)", "enumerate(cities_order): next_city = cities_order[i + 1] if i + 1 < len(cities_order) else", "import sys import os import logging sys.path.append(os.environ[\"PWD\"]) from pyga import * population_size =", "abs(result.fitness)) logging.basicConfig(level=logging.DEBUG) random = Random() probability = Probability(crossover_mutate_probability, random) candidate_factory = ListFactory(random, len(distances)-1)", "2 crossover_mutate_probability = 0.2 max_weight = 15 city_names = ['a', 'b', 'c', 'd']", "a [130, 0, 320, 350], # b [180, 320, 0, 360], # c", "fitness_evaluator = SalesmanFitnessEvaluator(distances) selection_strategy = RouletteWheelSelection(random) termination_condition = Stagnation(100) engine = GenerationalEvolutionEngine() engine.create(candidate_factory,", "< len(cities_order) else cities_order[0] print('- ', city_names[city], distances[city][next_city]) print('Total distance: ', abs(result.fitness)) logging.basicConfig(level=logging.DEBUG)", "super().__init__() self.distances = distances def get_fitness(self, candidate, population): total_distance = 0 cities_order =", "import logging sys.path.append(os.environ[\"PWD\"]) from pyga import * population_size = 10 elite_count = 2", "distances def get_fitness(self, candidate, population): total_distance = 0 cities_order = candidate.data for i,", "= [ # a b c d [ 0, 130, 180, 300], #", "cities_order[i+1] if i+1 < len(cities_order) else cities_order[0] total_distance += self.distances[city][next_city] return Fitness(-total_distance, is_natural=False)", "d ] class SalesmanFitnessEvaluator(FitnessEvaluator): def __init__(self, distances): super().__init__() self.distances = distances def get_fitness(self,", "in enumerate(cities_order): next_city = cities_order[i+1] if i+1 < len(cities_order) else cities_order[0] total_distance +=", "+ 1] if i + 1 < len(cities_order) else cities_order[0] print('- ', city_names[city],", "city in enumerate(cities_order): next_city = cities_order[i+1] if i+1 < len(cities_order) else cities_order[0] total_distance", "= ListOrderMutation(probability, random, 2) operator = PipelineOperator() operator.append_operator(crossover) operator.append_operator(mutation) fitness_evaluator = SalesmanFitnessEvaluator(distances) selection_strategy", "PipelineOperator() operator.append_operator(crossover) operator.append_operator(mutation) fitness_evaluator = SalesmanFitnessEvaluator(distances) selection_strategy = RouletteWheelSelection(random) termination_condition = Stagnation(100) engine", "350], # b [180, 320, 0, 360], # c [300, 350, 360, 0]", "d [ 0, 130, 180, 300], # a [130, 0, 320, 350], #", "= ListFactory(random, len(distances)-1) crossover = ListOrderCrossover(probability, random) mutation = ListOrderMutation(probability, random, 2) operator", "'d'] distances = [ # a b c d [ 0, 130, 180,", "= Random() probability = Probability(crossover_mutate_probability, random) candidate_factory = ListFactory(random, len(distances)-1) crossover = ListOrderCrossover(probability,", "= 0 cities_order = candidate.data for i, city in enumerate(cities_order): next_city = cities_order[i+1]", "0 cities_order = candidate.data for i, city in enumerate(cities_order): next_city = cities_order[i+1] if", "logging.basicConfig(level=logging.DEBUG) random = Random() probability = Probability(crossover_mutate_probability, random) candidate_factory = ListFactory(random, len(distances)-1) crossover", "candidate_factory = ListFactory(random, len(distances)-1) crossover = ListOrderCrossover(probability, random) mutation = ListOrderMutation(probability, random, 2)", "def __init__(self, distances): super().__init__() self.distances = distances def get_fitness(self, candidate, population): total_distance =", "os import logging sys.path.append(os.environ[\"PWD\"]) from pyga import * population_size = 10 elite_count =", "termination_condition = Stagnation(100) engine = GenerationalEvolutionEngine() engine.create(candidate_factory, operator, fitness_evaluator, selection_strategy) population = engine.evolve(population_size,", "__init__(self, distances): super().__init__() self.distances = distances def get_fitness(self, candidate, population): total_distance = 0", "from pyga import * population_size = 10 elite_count = 2 crossover_points = 2", "import * population_size = 10 elite_count = 2 crossover_points = 2 crossover_mutate_probability =", "+= self.distances[city][next_city] return Fitness(-total_distance, is_natural=False) def print_results(result): print('Visit cities in this order:') cities_order", "i + 1 < len(cities_order) else cities_order[0] print('- ', city_names[city], distances[city][next_city]) print('Total distance:", "import os import logging sys.path.append(os.environ[\"PWD\"]) from pyga import * population_size = 10 elite_count", "operator = PipelineOperator() operator.append_operator(crossover) operator.append_operator(mutation) fitness_evaluator = SalesmanFitnessEvaluator(distances) selection_strategy = RouletteWheelSelection(random) termination_condition =", "in enumerate(cities_order): next_city = cities_order[i + 1] if i + 1 < len(cities_order)", "population_size = 10 elite_count = 2 crossover_points = 2 crossover_mutate_probability = 0.2 max_weight", "RouletteWheelSelection(random) termination_condition = Stagnation(100) engine = GenerationalEvolutionEngine() engine.create(candidate_factory, operator, fitness_evaluator, selection_strategy) population =", "= 0.2 max_weight = 15 city_names = ['a', 'b', 'c', 'd'] distances =", "len(distances)-1) crossover = ListOrderCrossover(probability, random) mutation = ListOrderMutation(probability, random, 2) operator = PipelineOperator()", "enumerate(cities_order): next_city = cities_order[i+1] if i+1 < len(cities_order) else cities_order[0] total_distance += self.distances[city][next_city]", "logging sys.path.append(os.environ[\"PWD\"]) from pyga import * population_size = 10 elite_count = 2 crossover_points", "pyga import * population_size = 10 elite_count = 2 crossover_points = 2 crossover_mutate_probability", "= PipelineOperator() operator.append_operator(crossover) operator.append_operator(mutation) fitness_evaluator = SalesmanFitnessEvaluator(distances) selection_strategy = RouletteWheelSelection(random) termination_condition = Stagnation(100)", "cities_order = result.data for i, city in enumerate(cities_order): next_city = cities_order[i + 1]", "# a [130, 0, 320, 350], # b [180, 320, 0, 360], #", "get_fitness(self, candidate, population): total_distance = 0 cities_order = candidate.data for i, city in", "distances): super().__init__() self.distances = distances def get_fitness(self, candidate, population): total_distance = 0 cities_order", "= ['a', 'b', 'c', 'd'] distances = [ # a b c d", "= cities_order[i+1] if i+1 < len(cities_order) else cities_order[0] total_distance += self.distances[city][next_city] return Fitness(-total_distance,", "= 10 elite_count = 2 crossover_points = 2 crossover_mutate_probability = 0.2 max_weight =", "'c', 'd'] distances = [ # a b c d [ 0, 130,", "cities_order[0] total_distance += self.distances[city][next_city] return Fitness(-total_distance, is_natural=False) def print_results(result): print('Visit cities in this", "# c [300, 350, 360, 0] # d ] class SalesmanFitnessEvaluator(FitnessEvaluator): def __init__(self,", "1 < len(cities_order) else cities_order[0] print('- ', city_names[city], distances[city][next_city]) print('Total distance: ', abs(result.fitness))", "b [180, 320, 0, 360], # c [300, 350, 360, 0] # d", "distance: ', abs(result.fitness)) logging.basicConfig(level=logging.DEBUG) random = Random() probability = Probability(crossover_mutate_probability, random) candidate_factory =", "= result.data for i, city in enumerate(cities_order): next_city = cities_order[i + 1] if", "ListFactory(random, len(distances)-1) crossover = ListOrderCrossover(probability, random) mutation = ListOrderMutation(probability, random, 2) operator =", "= Stagnation(100) engine = GenerationalEvolutionEngine() engine.create(candidate_factory, operator, fitness_evaluator, selection_strategy) population = engine.evolve(population_size, elite_count,", "# a b c d [ 0, 130, 180, 300], # a [130,", "def print_results(result): print('Visit cities in this order:') cities_order = result.data for i, city", "class SalesmanFitnessEvaluator(FitnessEvaluator): def __init__(self, distances): super().__init__() self.distances = distances def get_fitness(self, candidate, population):", "len(cities_order) else cities_order[0] print('- ', city_names[city], distances[city][next_city]) print('Total distance: ', abs(result.fitness)) logging.basicConfig(level=logging.DEBUG) random", "order:') cities_order = result.data for i, city in enumerate(cities_order): next_city = cities_order[i +", "10 elite_count = 2 crossover_points = 2 crossover_mutate_probability = 0.2 max_weight = 15", "Fitness(-total_distance, is_natural=False) def print_results(result): print('Visit cities in this order:') cities_order = result.data for" ]
[ "with a T\") # checking the weekday number is equal to 3 using", "datetime of the system using import syntax import datetime # checking the weekday", "is equal to 1 using if condition if datetime.datetime.today().weekday() == 1: # print", "elif conditon elif datetime.datetime.today().weekday() == 3: # print result if the weekday number", "Version 1.0 # import the datetime of the system using import syntax import", "the system using import syntax import datetime # checking the weekday number is", "weekday is not 1 & 3 print(\"No - today is doesn't begins with", "with T since the weekday is not 1 & 3 print(\"No - today", "if the weekday number is 1 print(\"Yes - today begins with a T\")", "is not 1 & 3 print(\"No - today is doesn't begins with T\")", "1 print(\"Yes - today begins with a T\") # checking the weekday number", "<NAME> # Date : 05/04/2019 Time : 19:00 pm # Solution for problem", "# Date : 05/04/2019 Time : 19:00 pm # Solution for problem number", "is equal to 3 using elif conditon elif datetime.datetime.today().weekday() == 3: # print", "3: # print result if the weekday number is 3 print(\"Yes - today", "print result if the weekday number is 3 print(\"Yes - today begins with", "not beging with T since the weekday is not 1 & 3 print(\"No", "print result if the weekday number is 1 print(\"Yes - today begins with", "begins with a T\") # pass on if the weekday number is not", "import syntax import datetime # checking the weekday number is equal to 1", "with a T\") # pass on if the weekday number is not 1", "T\") # checking the weekday number is equal to 3 using elif conditon", "if the weekday number is not 1 or 3 else: # print the", "number is 3 print(\"Yes - today begins with a T\") # pass on", "# Solution for problem number 2 # Version 1.0 # import the datetime", "of the system using import syntax import datetime # checking the weekday number", "equal to 1 using if condition if datetime.datetime.today().weekday() == 1: # print result", "3 print(\"Yes - today begins with a T\") # pass on if the", "05/04/2019 Time : 19:00 pm # Solution for problem number 2 # Version", "1: # print result if the weekday number is 1 print(\"Yes - today", "1 or 3 else: # print the day is not beging with T", "using elif conditon elif datetime.datetime.today().weekday() == 3: # print result if the weekday", "problem number 2 # Version 1.0 # import the datetime of the system", "1.0 # import the datetime of the system using import syntax import datetime", "using if condition if datetime.datetime.today().weekday() == 1: # print result if the weekday", "is 3 print(\"Yes - today begins with a T\") # pass on if", "the datetime of the system using import syntax import datetime # checking the", "the weekday number is equal to 3 using elif conditon elif datetime.datetime.today().weekday() ==", "2 # Version 1.0 # import the datetime of the system using import", "weekday number is equal to 3 using elif conditon elif datetime.datetime.today().weekday() == 3:", "the day is not beging with T since the weekday is not 1", "system using import syntax import datetime # checking the weekday number is equal", "3 using elif conditon elif datetime.datetime.today().weekday() == 3: # print result if the", "if datetime.datetime.today().weekday() == 1: # print result if the weekday number is 1", "or 3 else: # print the day is not beging with T since", "Solution for problem number 2 # Version 1.0 # import the datetime of", "T since the weekday is not 1 & 3 print(\"No - today is", "on if the weekday number is not 1 or 3 else: # print", "elif datetime.datetime.today().weekday() == 3: # print result if the weekday number is 3", "Author : <NAME> # Date : 05/04/2019 Time : 19:00 pm # Solution", "import datetime # checking the weekday number is equal to 1 using if", "begins with a T\") # checking the weekday number is equal to 3", "datetime.datetime.today().weekday() == 3: # print result if the weekday number is 3 print(\"Yes", "weekday number is 3 print(\"Yes - today begins with a T\") # pass", "== 1: # print result if the weekday number is 1 print(\"Yes -", "the weekday number is not 1 or 3 else: # print the day", "the weekday number is equal to 1 using if condition if datetime.datetime.today().weekday() ==", "# checking the weekday number is equal to 1 using if condition if", "number is not 1 or 3 else: # print the day is not", "is not 1 or 3 else: # print the day is not beging", "<reponame>babubaskaran/pands-problem-set # Author : <NAME> # Date : 05/04/2019 Time : 19:00 pm", "# checking the weekday number is equal to 3 using elif conditon elif", "# Author : <NAME> # Date : 05/04/2019 Time : 19:00 pm #", "beging with T since the weekday is not 1 & 3 print(\"No -", "# print the day is not beging with T since the weekday is", "weekday number is not 1 or 3 else: # print the day is", "else: # print the day is not beging with T since the weekday", "day is not beging with T since the weekday is not 1 &", "checking the weekday number is equal to 3 using elif conditon elif datetime.datetime.today().weekday()", "- today begins with a T\") # pass on if the weekday number", "syntax import datetime # checking the weekday number is equal to 1 using", "print the day is not beging with T since the weekday is not", "print(\"Yes - today begins with a T\") # checking the weekday number is", "result if the weekday number is 1 print(\"Yes - today begins with a", "# print result if the weekday number is 1 print(\"Yes - today begins", "number is 1 print(\"Yes - today begins with a T\") # checking the", "the weekday is not 1 & 3 print(\"No - today is doesn't begins", "pass on if the weekday number is not 1 or 3 else: #", "today begins with a T\") # checking the weekday number is equal to", "today begins with a T\") # pass on if the weekday number is", "weekday number is equal to 1 using if condition if datetime.datetime.today().weekday() == 1:", "== 3: # print result if the weekday number is 3 print(\"Yes -", "to 3 using elif conditon elif datetime.datetime.today().weekday() == 3: # print result if", "datetime # checking the weekday number is equal to 1 using if condition", "Time : 19:00 pm # Solution for problem number 2 # Version 1.0", "- today begins with a T\") # checking the weekday number is equal", ": <NAME> # Date : 05/04/2019 Time : 19:00 pm # Solution for", "the weekday number is 3 print(\"Yes - today begins with a T\") #", "a T\") # checking the weekday number is equal to 3 using elif", "to 1 using if condition if datetime.datetime.today().weekday() == 1: # print result if", "is 1 print(\"Yes - today begins with a T\") # checking the weekday", "datetime.datetime.today().weekday() == 1: # print result if the weekday number is 1 print(\"Yes", "T\") # pass on if the weekday number is not 1 or 3", "number is equal to 3 using elif conditon elif datetime.datetime.today().weekday() == 3: #", "# Version 1.0 # import the datetime of the system using import syntax", "if condition if datetime.datetime.today().weekday() == 1: # print result if the weekday number", "Date : 05/04/2019 Time : 19:00 pm # Solution for problem number 2", "a T\") # pass on if the weekday number is not 1 or", "condition if datetime.datetime.today().weekday() == 1: # print result if the weekday number is", "# pass on if the weekday number is not 1 or 3 else:", "pm # Solution for problem number 2 # Version 1.0 # import the", "conditon elif datetime.datetime.today().weekday() == 3: # print result if the weekday number is", "the weekday number is 1 print(\"Yes - today begins with a T\") #", "if the weekday number is 3 print(\"Yes - today begins with a T\")", "is not beging with T since the weekday is not 1 & 3", "for problem number 2 # Version 1.0 # import the datetime of the", "since the weekday is not 1 & 3 print(\"No - today is doesn't", "equal to 3 using elif conditon elif datetime.datetime.today().weekday() == 3: # print result", "weekday number is 1 print(\"Yes - today begins with a T\") # checking", "import the datetime of the system using import syntax import datetime # checking", "number 2 # Version 1.0 # import the datetime of the system using", "number is equal to 1 using if condition if datetime.datetime.today().weekday() == 1: #", "19:00 pm # Solution for problem number 2 # Version 1.0 # import", ": 19:00 pm # Solution for problem number 2 # Version 1.0 #", "# import the datetime of the system using import syntax import datetime #", ": 05/04/2019 Time : 19:00 pm # Solution for problem number 2 #", "# print result if the weekday number is 3 print(\"Yes - today begins", "using import syntax import datetime # checking the weekday number is equal to", "print(\"Yes - today begins with a T\") # pass on if the weekday", "not 1 or 3 else: # print the day is not beging with", "result if the weekday number is 3 print(\"Yes - today begins with a", "3 else: # print the day is not beging with T since the", "1 using if condition if datetime.datetime.today().weekday() == 1: # print result if the", "checking the weekday number is equal to 1 using if condition if datetime.datetime.today().weekday()" ]
[ "zrxns, _ = _id_reaction(rxn_info) if rxn_class is None: print( 'Error: user did not", "'basis': '6-31g*' }, 'lvl_b3mg': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis': '6-311g**'", "cnf_fs[-1].existing(): current_rid, _ = locs if current_rid in checked_rids: continue if cnf_fs[-1].file.geometry.exists(locs): checked_rids.append(current_rid)", "stereo=True) except IndexError: pich = automol.graph.inchi(pgra) psmi = automol.inchi.smiles(pich) ts_ichs[1].append(pich) ts_smis[1].append(psmi) reactant_match =", "len(value) > 1: insert_dct['saddle'] = True reactants, products = value reactants = reactants.split('", "not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) if mult is None: print('Error: user did not", "!= geo_ich: print( 'user specified inchi {}'.format(ich) + 'does not match inchi from", "= [] for locs in cnf_fs[-1].existing(): current_rid, _ = locs if current_rid in", "cnsampd += 1 cinf_obj.nsamp = cnsampd else: cinf_obj = autofile.schema.info_objects.conformer_branch(0) cinf_obj.nsamp = 1", "'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvqz' }, 'lvl_b3s': { 'orb_res': 'RU', 'program': 'gaussian09',", "spc_info, rxn_class = parse_user_reaction(insert_dct) zrxn, zma, geo, rxn_info = get_zrxn(geo, rxn_info, rxn_class) #", "'') else: value = value.split(' = ') if len(value) > 1: insert_dct['saddle'] =", "but not in between words value = value.split() for i, val in enumerate(value):", "len(oversaturated_atoms) == 1: chosen_ts_gra = ts_gras[0] chosen_oversaturated_atom = oversaturated_atoms[0] break if chosen_oversaturated_atom is", "cnf_fs[1].file.info.write(cinf_obj, [locs[0]]) hess, freqs, imags = None, None, None if hess is not", "status=autofile.schema.RunStatus.SUCCESS) hess_ret = (hess_inf_obj, inp_str, out_str) save_saddle_point( zrxn, ret, hess_ret, freqs, imags, mod_thy_info,", "'basis': 'cc-pvtz'}, 'lvl_b2q': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvqz' },", "None: print('Error: user did not specify charge in input') sys.exit() return sinfo.from_data(ich, chg,", "'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvdz'}, 'cc_lvl_t': { 'orb_res': 'RR', 'program':", "thy_fs[-1].path(mod_thy_info[1:]) if ts_locs is None: ts_locs = (0,) ts_fs = autofile.fs.transition_state(thy_prefix) ts_fs[-1].create(ts_locs) ts_prefix", "print('forRXN', automol.graph.string(zrxn_i.forward_ts_graph)) # print('forRXN', automol.graph.string(forward_gra)) # print('bacRXN', automol.graph.string(zrxn_i.backward_ts_graph)) # print('bacRXN', automol.graph.string(backward_gra)) # if", "= automol.graph.inchi(rgra, stereo=True) except IndexError: rich = automol.graph.inchi(rgra) rsmi = automol.inchi.smiles(rich) ts_ichs[0].append(rich) ts_smis[0].append(rsmi)", "# if forward_gra == automol.geom.graph(geo, stereo=False): # zrxn = zrxn_i # zma, _,", "None: print( 'ERROR: No filename is specified for {}'.format(keyword) + 'Script will exit')", "= {breaking_bond: 0.9, forming_bond: 0.1} back_bnd_ord_dct = {breaking_bond: 0.1, forming_bond: 0.9} forward_gra =", "mod_thy_info, ts_locs=insert_dct['ts_locs'], locs=None) cnf_fs = fs_array[-1] if not locs_match(geo, cnf_fs, locs): print( 'I", "'gaussian09', 'method': 'wb97xd', 'basis': 'cc-pvtz'}, 'lvl_m06s': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x',", "None, 'zrxn_file': None, 'run_path': None, 'saddle': False, } for i, line in enumerate(script_input):", "forw_form_key) # backward_gra = automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds( # zrxn_i.backward_ts_graph))) # backward_gra", "None, None, None if hess is not None and zrxn is not None:", "if len(ts_gras) != 1: continue for ts_gra_i in ts_gras: vals = automol.graph.atom_unsaturated_valences(ts_gra_i, bond_order=True)", "geo_reorder_dct[idx_i + remove_idx] = idx_j # ts_geo = automol.geom.reorder_coordinates(geo, geo_reorder_dct) else: print( 'The", "'program': 'gaussian09', 'method': 'b3lyp', 'basis': 'cc-pvtz'}, 'cc_lvl_d': { 'orb_res': 'RR', 'program': 'molpro2015', 'method':", "theory filesystem thy_fs = autofile.fs.theory(rxn_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix = thy_fs[-1].path(mod_thy_info[1:]) if ts_locs is None:", "are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit() keyword, value = line.split(':') if keyword in insert_dct:", "'basis': 'cc-pvtz'}, 'cc_lvl_d': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvdz'}, 'cc_lvl_t':", "and product_match: reactant_keys = [] for gra in rxn_gras[0]: reactant_keys.append(automol.graph.atom_keys(gra)) product_keys = []", "for inchi {}'.format(ich)) sys.exit() rxn_muls[1].append(mults[idx]) rxn_chgs[1].append(chgs[idx]) idx += 1 ts_mult = insert_dct['ts_mult'] if", "mechanalyzer.inf import thy as tinfo from mechanalyzer.inf import rxn as rinfo from mechanalyzer.inf", "main(insert_dct): prefix = read_user_filesystem(insert_dct) # Read in the input and output files that", "# conformer cnf_fs = autofile.fs.conformer(ts_prefix) if locs is not None: cnf_fs[-1].create(locs) cnf_prefix =", "frag_zma = automol.geom.zmatrix(frag_geo) checked_rids = [] for locs in cnf_fs[-1].existing(): current_rid, _ =", "# script_str, freq_run_path, [geo], [[]], [hess]) # if len(imags) != 1: # print(", "theory in THEORY_DCT: thy_info = tinfo.from_dct(THEORY_DCT[theory]) else: print( 'Error: user did not specify", "def locs_match(geo, cnf_fs, locs): match = True rid = locs[0] geo_rid = rng_loc_for_geo(geo,", "inf_obj, inp_str, zrxn=zrxn, zma_locs=(0,)) print( 'geometry is now saved at {}'.format(cnf_fs[-1].path(locs))) else: print(", "line\\n({}) {}\\n is not parsable, '.format(i, line) + 'script will exit until input", "elif keyword not in ['smiles', 'inchi']: value = value.replace(' ', '') else: value", "for idx_i, idx_j in enumerate(zma_keys): # if idx_i in dummies: # remove_idx -=", "ich is None: ich = automol.smiles.inchi(smi) if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) if", "# elif backward_gra == automol.geom.graph(geo, stereo=False): # zrxn = automol.reac.reverse(zrxn_i) # zma, _,", "species_match(geo, spc_info): match = True ich, _, mul = spc_info mults_allowed = automol.graph.possible_spin_multiplicities(", "to your database usiing a log file \"\"\" import sys import os import", "cnf_prefix = cnf_fs[-1].path(locs) else: cnf_prefix = None return ( (rxn_fs, thy_fs, ts_fs, cnf_fs),", "def create_reaction_filesystems( prefix, rxn_info, mod_thy_info, ts_locs=None, locs=None): # species filesystem print('rxn_info', rxn_info) rxn_fs", "= fs_array[-1] if not locs_match(geo, cnf_fs, locs): print( 'I refuse to save this", "info matches the info in user given output') sys.exit() # Check that the", "version='', method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS) hess_ret = (hess_inf_obj, inp_str, out_str) save_saddle_point( zrxn, ret, hess_ret,", "else: # geo_reorder_dct[idx_i + remove_idx] = idx_j # ts_geo = automol.geom.reorder_coordinates(geo, geo_reorder_dct) else:", "specify rxn_class') sys.exit() return rxn_info, ts_info, rxn_class def parse_user_theory(insert_dct): # Get input method", "= automol.reac.forming_bond_keys(zrxn_i, rev=True) # forw_brk_key = automol.reac.breaking_bond_keys(zrxn_i) # back_brk_key = automol.reac.breaking_bond_keys(zrxn_i, rev=True) #", "_saved_cnf_info( cnf_fs, mod_thy_info) if _geo_unique(geo, ene, saved_geos, saved_enes, zrxn=zrxn): sym_id = _sym_unique( geo,", "single' + # 'imaginary frequency, projrot found the following' + # 'frequencies: '", "# If hess is None: # print( # 'No hessian found in output,", "automol.inchi.graph(ich, stereo=False)) geo_ich = automol.geom.inchi(geo, stereo=True) if ich != geo_ich: print( 'user specified", "= autofile.fs.conformer(ts_prefix) if locs is not None: cnf_fs[-1].create(locs) cnf_prefix = cnf_fs[-1].path(locs) else: cnf_prefix", "mod_thy_info, ts_locs=None, locs=None): # species filesystem print('rxn_info', rxn_info) rxn_fs = autofile.fs.reaction(prefix) sort_rxn_info =", "'RR', 'program': 'molpro2015', 'method': 'caspt2', 'basis': 'cc-pvdz'}, 'mlvl_cas_tz': { 'orb_res': 'RR', 'program': 'molpro2015',", "the following' + # 'frequencies: ' + ','.join(imags)) # sys.exit() else: spc_info =", "theory dictionary theory = insert_dct['theory'] if theory is None: if program is None:", "{}'.format(geo_rid) + '\\nthe user rid in input file is {}'.format(rid)) match = False", "continue elif '!' in line[0]: continue line = line.split('!')[0] if ':' not in", "filesystem thy_fs = autofile.fs.theory(spc_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix = thy_fs[-1].path(mod_thy_info[1:]) # conformer cnf_fs = autofile.fs.conformer(thy_prefix)", "+ # 'imaginary frequency, projrot found the following' + # 'frequencies: ' +", "True ich, _, mul = spc_info mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) geo_ich =", "thy_fs, cnf_fs), (spc_prefix, thy_prefix, cnf_prefix)) def create_reaction_filesystems( prefix, rxn_info, mod_thy_info, ts_locs=None, locs=None): #", "# if idx_i in dummies: # remove_idx -= 1 # else: # geo_reorder_dct[idx_i", "1 # dummies.append(dummy + add_idx) # remove_idx = 0 # for idx_i, idx_j", "user did not specify charge in input') sys.exit() return sinfo.from_data(ich, chg, mult) def", "'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvtz-f12' }, 'cc_lvl_qf': { 'orb_res': 'RR', 'program': 'molpro2015',", "inpupt is resolved to avoid' + ' filesystem contamination.' + 'Allowed keywords are:\\n'", "else: cnf_prefix = None return ( (spc_fs, thy_fs, cnf_fs), (spc_prefix, thy_prefix, cnf_prefix)) def", "thy_prefix, ts_prefix, cnf_prefix)) def read_user_file(dct, keyword): if dct[keyword] is None: print( 'ERROR: No", "= products.split(' + ') values = [[], []] for reactant in reactants: values[0].append(reactant.replace('", "else: value = values elif keyword in ['ts_locs']: value = (int(value),) elif keyword", "did not specify a theory {}'.format(theory) + ' that is in the THEORY_DCT'", "did not specify charges in input') sys.exit() flat_ichs = sum(ichs, []) if len(flat_ichs)", "ts_smis[1].append(psmi) reactant_match = False product_match = False if ts_smis[0] == rxn_smis[0]: reactant_match =", "automol.geom.reorder_coordinates(geo, geo_reorder_dct) else: print( 'The reactants and products found for the transition state'", "the save location matches geo information if not insert_dct['saddle']: if not species_match(geo, spc_info):", "# zrxns = [automol.reac.from_string(zrxn_str)] # else: # zrxns, _ = _id_reaction(rxn_info) if rxn_class", "= autofile.io_.read_file(script_input_file).splitlines() insert_dct = { 'save_filesystem': None, 'smiles': None, 'inchi': None, 'mult': None,", "ich != geo_ich: print( 'user specified inchi {}'.format(ich) + 'does not match inchi", "is None: print('could not figure out which H is being transfered') sys.exit() return", "_ = thy_info ene = elstruct.reader.energy(prog, method, out_str) geo = elstruct.reader.opt_geometry(prog, out_str) if", "frequency, projrot found the following' + # 'frequencies: ' + ','.join(imags)) # sys.exit()", "2)] chosen_ts_gra = [] chosen_oversaturated_atom = None for rqh in rqhs: ts_gras =", "= cnf_fs[-1].path(locs) else: cnf_prefix = None return ( (spc_fs, thy_fs, cnf_fs), (spc_prefix, thy_prefix,", "the future') sys.exit() breaking_bond, forming_bond = bonds # when we move on to", "resolved to avoid' + ' filesystem contamination.' + 'Comment lines should contain \"!\"'", "atoms_bnd = automol.graph.atoms_bond_keys(ts_gra) bonds = atoms_bnd[oversaturated_atom] if len(bonds) != 2: print('too many bonds", "None if insert_dct['saddle']: rxn_info, spc_info, rxn_class = parse_user_reaction(insert_dct) zrxn, zma, geo, rxn_info =", "prefix, spc_info, mod_thy_info, locs=None) else: fs_array, prefix_array = create_reaction_filesystems( prefix, rxn_info, mod_thy_info, ts_locs=insert_dct['ts_locs'],", "ts_locs is None: ts_locs = (0,) ts_fs = autofile.fs.transition_state(thy_prefix) ts_fs[-1].create(ts_locs) ts_prefix = ts_fs[-1].path(ts_locs)", "'')) value = values else: value = value[0].replace(' ', '') print(keyword, value) insert_dct[keyword]", "geo) ts_gras = automol.graph.connected_components(ts_gras) if len(ts_gras) != 1: continue for ts_gra_i in ts_gras:", "based on geometry from output:\\n' + '{}'.format(automol.geom.string(geo))) match = False if mul not", "are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit() return insert_dct if __name__ == '__main__': SCRIPT_INPUT_FILE =", "'cc-pvtz'}, 'cc_lvl_d': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvdz'}, 'cc_lvl_t': {", "that we # Are inserting into the filesystem inp_str = read_user_file(insert_dct, 'input_file') out_str", "= None return ( (rxn_fs, thy_fs, ts_fs, cnf_fs), (rxn_prefix, thy_prefix, ts_prefix, cnf_prefix)) def", "'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis': 'cc-pvtz'}, 'lvl_m06s': { 'orb_res': 'RU', 'program': 'gaussian09',", "sys.exit() elif orb_res is None: print('Error: user did not specify orb_res in input')", "in input') sys.exit() if chg is None: print('Error: user did not specify charge", "forward_gra = automol.graph.remove_bonds(forward_gra, forw_brk_key) # backward_gra = automol.graph.remove_bonds(backward_gra, back_brk_key) # print('forRXN', automol.graph.string(zrxn_i.forward_ts_graph)) #", "program/method/basis/orb_dct' + 'keywords instead of theory') sys.exit() return thy_info def create_species_filesystems(prefix, spc_info, mod_thy_info,", "in reactants: values[0].append(reactant.replace(' ', '')) for product in products: values[1].append(product.replace(' ', '')) value", "spc_info): print( 'I refuse to save this geometry until user specified' + '", "else: thy_info = (program, method, basis, orb_res) else: if theory in THEORY_DCT: thy_info", "ene, saved_geos, saved_enes, zrxn=zrxn): sym_id = _sym_unique( geo, ene, saved_geos, saved_enes) if sym_id", "cid is None: cid = autofile.schema.generate_new_conformer_id() return (rid, cid) def parse_user_species(insert_dct): smi =", "in zrxns: # forw_form_key = automol.reac.forming_bond_keys(zrxn_i) # back_form_key = automol.reac.forming_bond_keys(zrxn_i, rev=True) # forw_brk_key", "spc_info) locs = parse_user_locs(insert_dct) # Check that the save location matches geo information", "'basis': None, 'orb_res': None, 'input_file': None, 'output_file': None, 'ts_locs': None, 'ts_mult': None, 'rxn_class':", "cid) def parse_user_species(insert_dct): smi = insert_dct['smiles'] ich = insert_dct['inchi'] mult = insert_dct['mult'] chg", "user save specifications match output prog, method, basis, _ = thy_info ene =", "the filesystem fs_array, prefix_array = create_species_filesystems( prefix, spc_info, mod_thy_info, locs=None) else: fs_array, prefix_array", "theory is None: if program is None: print('Error: user did not specify program", "input') sys.exit() if chgs is None: print('Error: user did not specify charges in", "[[], []] for smi in smis[0]: ichs[0].append(automol.smiles.inchi(smi)) for smi in smis[1]: ichs[1].append(automol.smiles.inchi(smi)) for", "= automol.reac.ts_zmatrix(zrxn, geo) # elif backward_gra == automol.geom.graph(geo, stereo=False): # zrxn = automol.reac.reverse(zrxn_i)", "job=elstruct.Job.OPTIMIZATION, prog=prog, version='', method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS) ret = (inf_obj, inp_str, out_str) _, saved_geos,", "'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvtz-f12' }, 'cc_lvl_qf': { 'orb_res': 'RR', 'program': 'molpro2015', 'method':", "vals = automol.graph.atom_unsaturated_valences(ts_gra_i, bond_order=True) oversaturated_atoms = [atm for atm, val in vals.items() if", "rxn_info) rxn_fs = autofile.fs.reaction(prefix) sort_rxn_info = rinfo.sort(rxn_info, scheme='autofile') rxn_fs[-1].create(sort_rxn_info) rxn_prefix = rxn_fs[-1].path(sort_rxn_info) #", "+= 1 for ich in ichs[1]: mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) if mults[idx]", "return chosen_ts_gra, chosen_oversaturated_atom def get_zrxn(geo, rxn_info, rxn_class): ts_gra, oversaturated_atom = choose_cutoff_distance(geo) atoms_bnd =", "number of species does not match number of charges') sys.exit() idx = 0", "if zrxn is None: # print( # 'Your geometry did not match any", "rqq_bond_max=3.5, rqh_bond_max=rqh, rhh_bond_max=2.3) ts_gras = automol.graph.set_stereo_from_geometry(ts_gras, geo) ts_gras = automol.graph.connected_components(ts_gras) if len(ts_gras) !=", "'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvdz-f12' }, 'cc_lvl_tf': { 'orb_res': 'RR',", "when we move on to other reaction types we have to check for", "automol.graph.remove_bonds(backward_gra, back_brk_key) # print('forRXN', automol.graph.string(zrxn_i.forward_ts_graph)) # print('forRXN', automol.graph.string(forward_gra)) # print('bacRXN', automol.graph.string(zrxn_i.backward_ts_graph)) # print('bacRXN',", "line in enumerate(script_input): if len(line) < 2: continue elif '!' in line[0]: continue", "= line.split(':') if keyword in insert_dct: if 'None' in value: value = None", "i will be smarter in the future') sys.exit() breaking_bond, forming_bond = bonds #", "for atm, val in vals.items() if val < 0] if len(oversaturated_atoms) == 1:", "from insert input file thy_info = parse_user_theory(insert_dct) # parse out geo information first,", "print('Error: user did not specify orb_res in input') sys.exit() else: thy_info = (program,", "ts_ichs[0][::-1] ts_smis[0] = ts_smis[0][::-1] reactant_match = True if reactant_match: if ts_smis[1] == rxn_smis[1]:", "\"\"\" Add a species to your database usiing a log file \"\"\" import", "# sys.exit() # # hess = elstruct.reader.hessian(prog, out_str) # Hess = None #", "and zrxn is not None: hess_inf_obj = autofile.schema.info_objects.run( job=elstruct.Job.HESSIAN, prog=prog, version='', method=method, basis=basis,", "None: print( 'No geometry could be parsed from output' + 'Check that the", "sys.exit() if len(flat_ichs) != len(chgs): print( 'Error: number of species does not match", "elif backward_gra == automol.geom.graph(geo, stereo=False): # zrxn = automol.reac.reverse(zrxn_i) # zma, _, _", "sys.exit() return sinfo.from_data(ich, chg, mult) def parse_user_reaction(insert_dct): smis = insert_dct['smiles'] ichs = insert_dct['inchi']", "rxn_muls[0].append(mults[idx]) rxn_chgs[0].append(chgs[idx]) idx += 1 for ich in ichs[1]: mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich,", "+ ','.join(imags)) # sys.exit() else: spc_info = parse_user_species(insert_dct) mod_thy_info = tinfo.modify_orb_label(thy_info, spc_info) locs", "insert_dct['mult'] chg = insert_dct['charge'] if ich is None and smi is None: print(", "bonds when doing bond orders forw_bnd_ord_dct = {breaking_bond: 0.9, forming_bond: 0.1} back_bnd_ord_dct =", "'b3lyp', 'basis': '6-311g**' }, 'lvl_b3t': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis':", "not locs_match(geo, cnf_fs, locs): print( 'I refuse to save this geometry until user", "'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvtz'}, 'cc_lvl_q': { 'orb_res': 'RR', 'program':", "+ '\\nthe user rid in input file is {}'.format(rid)) match = False return", "== rxn_smis[0]: ts_ichs[0] = ts_ichs[0][::-1] ts_smis[0] = ts_smis[0][::-1] reactant_match = True else: ts_ichs", "# zrxns, _ = _id_reaction(rxn_info) if rxn_class is None: print( 'Error: user did", "for ich in ichs[0]: mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) if mults[idx] not in", "sys.exit() return thy_info def create_species_filesystems(prefix, spc_info, mod_thy_info, locs=None): # species filesystem spc_fs =", "print(keyword, value) insert_dct[keyword] = value else: print( 'ERROR: Keyword {} is not recognized'.format(keyword)", "is None: # run_path = os.getcwd() # run_fs = autofile.fs.run(run_path) # freq_run_path =", "match those specified in user input') sys.exit() return std_zrxn, ts_zma, ts_geo, rxn_info def", "# print( # 'Can only save a transition state that has a single'", "allowed multiplicty for inchi {}'.format(ich)) sys.exit() rxn_muls[0].append(mults[idx]) rxn_chgs[0].append(chgs[idx]) idx += 1 for ich", "forw_bnd_ord_dct) backward_gra = automol.graph.set_bond_orders(ts_gra, back_bnd_ord_dct) reactant_gras = automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(forward_gra)) reactant_gras = automol.graph.connected_components(reactant_gras) product_gras", "parse_user_locs(insert_dct) # Check that the save location matches geo information if not insert_dct['saddle']:", "= os.getcwd() # run_fs = autofile.fs.run(run_path) # freq_run_path = run_fs[-1].path(['hessian']) # run_fs[-1].create(['hessian']) #", "(inf_obj, inp_str, out_str) _, saved_geos, saved_enes = _saved_cnf_info( cnf_fs, mod_thy_info) if _geo_unique(geo, ene,", "saved_geos, saved_enes = _saved_cnf_info( cnf_fs, mod_thy_info) if _geo_unique(geo, ene, saved_geos, saved_enes, zrxn=zrxn): sym_id", "did not specify rxn_class') sys.exit() return rxn_info, ts_info, rxn_class def parse_user_theory(insert_dct): # Get", "read_user_file(insert_dct, 'output_file') # parse method from insert input file thy_info = parse_user_theory(insert_dct) #", "cnf_fs[-1].path(locs) else: cnf_prefix = None return ( (spc_fs, thy_fs, cnf_fs), (spc_prefix, thy_prefix, cnf_prefix))", "insert_dct['inchi'] mult = insert_dct['mult'] chg = insert_dct['charge'] if ich is None and smi", "= False return match def rng_loc_for_geo(geo, cnf_fs): rid = None frag_geo = _fragment_ring_geo(geo)", "input') sys.exit() if chg is None: print('Error: user did not specify charge in", "get_zrxn(geo, rxn_info, rxn_class) # for zrxn_i in zrxns: # forw_form_key = automol.reac.forming_bond_keys(zrxn_i) #", "if chgs is None: print('Error: user did not specify charges in input') sys.exit()", "try: rich = automol.graph.inchi(rgra, stereo=True) except IndexError: rich = automol.graph.inchi(rgra) rsmi = automol.inchi.smiles(rich)", "geometry in the output is not unique to filesystem' + '... not saving')", "contamination.' + 'Allowed keywords are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit() return insert_dct if __name__", "not figure out which H is being transfered') sys.exit() return chosen_ts_gra, chosen_oversaturated_atom def", "frag_locs_zma = automol.geom.zmatrix(frag_locs_geo) if automol.zmat.almost_equal( frag_locs_zma, frag_zma, dist_rtol=0.1, ang_atol=.4): rid = locs[0] break", "'method': 'b3lyp', 'basis': '6-311g**' }, 'lvl_b3t': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp',", "is None: print( 'Error: user did not specify ts_mul') sys.exit() rxn_info = rinfo.sort((ichs,", "{ 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis': '6-31+g*' }, 'lvl_m06t': { 'orb_res':", "# for zrxn_i in zrxns: # forw_form_key = automol.reac.forming_bond_keys(zrxn_i) # back_form_key = automol.reac.forming_bond_keys(zrxn_i,", "= insert_dct['theory'] if theory is None: if program is None: print('Error: user did", "thy_info = parse_user_theory(insert_dct) # parse out geo information first, to make sure #", "zrxn_str = autofile.io_.read_file(zrxn_file) # zrxns = [automol.reac.from_string(zrxn_str)] # else: # zrxns, _ =", "(spc_prefix, thy_prefix, cnf_prefix)) def create_reaction_filesystems( prefix, rxn_info, mod_thy_info, ts_locs=None, locs=None): # species filesystem", "ts_info = rinfo.ts_info(rxn_info) # if zrxn_file is not None: # zrxn_str = autofile.io_.read_file(zrxn_file)", "sys.exit() else: spc_info = parse_user_species(insert_dct) mod_thy_info = tinfo.modify_orb_label(thy_info, spc_info) locs = parse_user_locs(insert_dct) #", "+ ') values = [[], []] for reactant in reactants: values[0].append(reactant.replace(' ', ''))", "if locs is not None: cnf_fs[-1].create(locs) cnf_prefix = cnf_fs[-1].path(locs) else: cnf_prefix = None", "idx, ich in enumerate(ichs[0]): if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) ichs[0][idx] = ich", "rxn_smis = [[], []] for i, side in enumerate(rxn_info[0]): for ich in side:", "automol.reac.ts_zmatrix(zrxn, geo) # if zrxn is None: # print( # 'Your geometry did", "keyword in ['rxn_class']: # strip whitespaces form either side of reaction # class", "have to check for double # bonds when doing bond orders forw_bnd_ord_dct =", "'smiles': None, 'inchi': None, 'mult': None, 'charge': None, 'rid': None, 'cid': None, 'theory':", "to the dct in the script or use program/method/basis/orb_dct' + 'keywords instead of", "+ 'which is based on geometry from output:\\n' + '{}'.format(automol.geom.string(geo))) match = False", "mechroutines.es._routines.conformer import _sym_unique from mechroutines.es._routines.conformer import _save_unique_parsed_conformer from mechroutines.es._routines.conformer import _geo_unique from mechroutines.es._routines.conformer", "[reactant_gras, product_gras] rxn_smis = [[], []] for i, side in enumerate(rxn_info[0]): for ich", "= ich for idx, ich in enumerate(ichs[1]): if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich)", "class but not in between words value = value.split() for i, val in", "print( # 'Your geometry did not match any of the attempted ' +", "filesystem thy_fs = autofile.fs.theory(rxn_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix = thy_fs[-1].path(mod_thy_info[1:]) if ts_locs is None: ts_locs", "is None: print( 'ERROR: No save_filesystem}' + 'Script will exit') sys.exit() return dct['save_filesystem']", "= automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(forward_gra)) reactant_gras = automol.graph.connected_components(reactant_gras) product_gras = automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(backward_gra)) product_gras = automol.graph.connected_components(product_gras)", "automol.smiles.inchi(smi) if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) if mult is None: print('Error: user", "len(chgs): print( 'Error: number of species does not match number of charges') sys.exit()", "in smis[1]: ichs[1].append(automol.smiles.inchi(smi)) for idx, ich in enumerate(ichs[0]): if not automol.inchi.is_complete(ich): ich =", "value: value = None elif keyword in ['mult', 'charge', 'ts_mult']: values = []", "{ 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvqz' }, 'lvl_b3s': { 'orb_res':", "a theory {}'.format(theory) + ' that is in the THEORY_DCT' + 'please add", "# automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds( # zrxn_i.forward_ts_graph))) # forward_gra = automol.graph.add_bonds(forward_gra, forw_form_key) # backward_gra", "until inpupt is resolved to avoid' + ' filesystem contamination.' + 'Allowed keywords", "automol.inchi.add_stereo(ich) if mult is None: print('Error: user did not specify mult in input')", "','.join(imags)) # sys.exit() else: spc_info = parse_user_species(insert_dct) mod_thy_info = tinfo.modify_orb_label(thy_info, spc_info) locs =", "from mechlib.reaction.rxnid import _id_reaction THEORY_DCT = { 'lvl_wbs': { 'orb_res': 'RU', 'program': 'gaussian09',", "= automol.inchi.smiles(pich) ts_ichs[1].append(pich) ts_smis[1].append(psmi) reactant_match = False product_match = False if ts_smis[0] ==", "print( 'No geometry could be parsed from output' + 'Check that the program", "zrxn, zma, geo, rxn_info = get_zrxn(geo, rxn_info, rxn_class) # for zrxn_i in zrxns:", "filesystem at' + ' {}'.format(cnf_fs[0].path()) + '\\nthe expected rid for this geo is", "cnf_fs[1].file.info.exists([locs[0]]): cinf_obj = cnf_fs[1].file.info.read(locs[0]) cnsampd = cinf_obj.nsamp cnsampd += 1 cinf_obj.nsamp = cnsampd", "'Comment lines should contain \"!\"' + 'Key format should be:\\n' + '<Keyword>: <Value>\\n'", "= rinfo.sort(rxn_info, scheme='autofile') rxn_fs[-1].create(sort_rxn_info) rxn_prefix = rxn_fs[-1].path(sort_rxn_info) # theory filesystem thy_fs = autofile.fs.theory(rxn_prefix)", "if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) if mult is None: print('Error: user did", "check for double # bonds when doing bond orders forw_bnd_ord_dct = {breaking_bond: 0.9,", "print(zrxns) # sys.exit() # # hess = elstruct.reader.hessian(prog, out_str) # Hess = None", "automol.geom.graph(geo, stereo=False): # zrxn = automol.reac.reverse(zrxn_i) # zma, _, _ = automol.reac.ts_zmatrix(zrxn, geo)", "+ 'is not an allowed multiplicty for inchi {}'.format(ich)) sys.exit() rxn_muls[0].append(mults[idx]) rxn_chgs[0].append(chgs[idx]) idx", "'') print(keyword, value) insert_dct[keyword] = value else: print( 'ERROR: Keyword {} is not", "rsmi = automol.inchi.smiles(rich) ts_ichs[0].append(rich) ts_smis[0].append(rsmi) for pgra in product_gras: try: pich = automol.graph.inchi(pgra,", "idx_i in dummies: # remove_idx -= 1 # else: # geo_reorder_dct[idx_i + remove_idx]", "smis[0]: ichs[0].append(automol.smiles.inchi(smi)) for smi in smis[1]: ichs[1].append(automol.smiles.inchi(smi)) for idx, ich in enumerate(ichs[0]): if", "cnf_prefix)) def read_user_file(dct, keyword): if dct[keyword] is None: print( 'ERROR: No filename is", "ichs is None: ichs = [[], []] for smi in smis[0]: ichs[0].append(automol.smiles.inchi(smi)) for", "ene), inf_obj, inp_str, zrxn=zrxn, zma_locs=(0,)) print( 'geometry is now saved at {}'.format(cnf_fs[-1].path(locs))) else:", "is None: ich = automol.smiles.inchi(smi) if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) if mult", "rid = autofile.schema.generate_new_ring_id() if cid is None: cid = autofile.schema.generate_new_conformer_id() return (rid, cid)", "False product_match = False if ts_smis[0] == rxn_smis[0]: reactant_match = True elif ts_smis[0][::-1]", "did not specify charge in input') sys.exit() return sinfo.from_data(ich, chg, mult) def parse_user_reaction(insert_dct):", "= idx_j # ts_geo = automol.geom.reorder_coordinates(geo, geo_reorder_dct) else: print( 'The reactants and products", "basis, orb_res) else: if theory in THEORY_DCT: thy_info = tinfo.from_dct(THEORY_DCT[theory]) else: print( 'Error:", "= values else: value = value[0].replace(' ', '') print(keyword, value) insert_dct[keyword] = value", "not species_match(geo, spc_info): print( 'I refuse to save this geometry until user specified'", "range(26, 38, 2)] chosen_ts_gra = [] chosen_oversaturated_atom = None for rqh in rqhs:", "{}\\n is not parsable, '.format(i, line) + 'script will exit until input is", "for me to figure out') print('I promise i will be smarter in the", "= cnf_fs[-1].path(locs) else: cnf_prefix = None return ( (rxn_fs, thy_fs, ts_fs, cnf_fs), (rxn_prefix,", "*ts_gras, reactant_keys, product_keys) ts_zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix( std_rxn, geo) std_zrxn = automol.reac.relabel_for_zmatrix(", "+ 'Key format should be:\\n' + '<Keyword>: <Value>\\n' + 'Allowed keywords are:\\n' +", "'cc-pvtz'}, 'cc_lvl_q': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvqz' }, 'cc_lvl_df':", "in between words value = value.split() for i, val in enumerate(value): value[i] =", "found for the transition state' + 'did not match those specified in user", "'method': 'wb97xd', 'basis': '6-31g*' }, 'lvl_wbm': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd',", "match inchi from output {}'.format(geo_ich) + 'which is based on geometry from output:\\n'", "geo = elstruct.reader.opt_geometry(prog, out_str) if geo is None: print( 'No geometry could be", "prog=prog, version='', method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS) ret = (inf_obj, inp_str, out_str) _, saved_geos, saved_enes", "ich = automol.smiles.inchi(smi) if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) if mult is None:", "specified' + ' info matches the info in user given output') sys.exit() #", "sys.exit() else: thy_info = (program, method, basis, orb_res) else: if theory in THEORY_DCT:", "sym_id = _sym_unique( geo, ene, saved_geos, saved_enes) if sym_id is None: if cnf_fs[0].file.info.exists():", "std_rxn = automol.reac.Reaction( rxn_class, *ts_gras, reactant_keys, product_keys) ts_zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix( std_rxn,", "dct[keyword] is None: print( 'ERROR: No filename is specified for {}'.format(keyword) + 'Script", "method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS) ret = (inf_obj, inp_str, out_str) _, saved_geos, saved_enes = _saved_cnf_info(", "'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvqz' }, 'cc_lvl_df': { 'orb_res': 'RR', 'program': 'molpro2015',", "if len(line) < 2: continue elif '!' in line[0]: continue line = line.split('!')[0]", "= 0 # for idx_i, idx_j in enumerate(zma_keys): # if idx_i in dummies:", "rxn_muls, ts_mult)) ts_info = rinfo.ts_info(rxn_info) # if zrxn_file is not None: # zrxn_str", "'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvdz-f12' }, 'cc_lvl_tf': { 'orb_res': 'RR', 'program': 'molpro2015',", "mults = insert_dct['mult'] chgs = insert_dct['charge'] rxn_class = insert_dct['rxn_class'] # zrxn_file = insert_dct['zrxn_file']", "return std_zrxn, ts_zma, ts_geo, rxn_info def main(insert_dct): prefix = read_user_filesystem(insert_dct) # Read in", "# forward_gra = automol.graph.remove_bonds(forward_gra, forw_brk_key) # backward_gra = automol.graph.remove_bonds(backward_gra, back_brk_key) # print('forRXN', automol.graph.string(zrxn_i.forward_ts_graph))", "autofile.schema.info_objects.conformer_branch(0) cinf_obj.nsamp = 1 cnf_fs[1].create([locs[0]]) cnf_fs[0].file.info.write(rinf_obj) cnf_fs[1].file.info.write(cinf_obj, [locs[0]]) hess, freqs, imags = None,", "= None frag_geo = _fragment_ring_geo(geo) if frag_geo is not None: frag_zma = automol.geom.zmatrix(frag_geo)", "product_gras: try: pich = automol.graph.inchi(pgra, stereo=True) except IndexError: pich = automol.graph.inchi(pgra) psmi =", "reactant_keys = [] for gra in rxn_gras[0]: reactant_keys.append(automol.graph.atom_keys(gra)) product_keys = [] for gra", "print('too many bonds to transfered atom for me to figure out') print('I promise", "not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) ichs[1][idx] = ich if mults is None: print('Error:", "= rinfo.sort((ichs, rxn_chgs, rxn_muls, ts_mult)) ts_info = rinfo.ts_info(rxn_info) # if zrxn_file is not", "saved_geos, saved_enes, zrxn=zrxn): sym_id = _sym_unique( geo, ene, saved_geos, saved_enes) if sym_id is", "is None: print('Error: user did not specify basis in input') sys.exit() elif orb_res", "in zrxns: # print(zrxns) # sys.exit() # # hess = elstruct.reader.hessian(prog, out_str) #", "mod_thy_info, {'runlvl_cnf_fs': (cnf_fs, None)}, locs, zma_locs=(0,), zma=zma) else: _save_unique_parsed_conformer( mod_thy_info, cnf_fs, locs, (geo,", "insert_dct['charge'] rxn_class = insert_dct['rxn_class'] # zrxn_file = insert_dct['zrxn_file'] if ichs is None: ichs", "rxn_info def main(insert_dct): prefix = read_user_filesystem(insert_dct) # Read in the input and output", "allowed multiplicty for inchi {}'.format(ich)) match = False return match def locs_match(geo, cnf_fs,", "', '') print(keyword, value) insert_dct[keyword] = value else: print( 'ERROR: Keyword {} is", "}, 'mlvl_cas_dz': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'caspt2', 'basis': 'cc-pvdz'}, 'mlvl_cas_tz': {", "insert_dct['basis'] orb_res = insert_dct['orb_res'] # Get input method from theory dictionary theory =", "hess_inf_obj = autofile.schema.info_objects.run( job=elstruct.Job.HESSIAN, prog=prog, version='', method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS) hess_ret = (hess_inf_obj, inp_str,", "[[], []] for i, side in enumerate(rxn_info[0]): for ich in side: rxn_smis[i].append(automol.inchi.smiles(ich)) ts_smis", "rid: print( 'Error: rid mismatch for the filesystem at' + ' {}'.format(cnf_fs[0].path()) +", "= [] for val in value.split(','): values.append(int(val)) if len(values) == 1: value =", "get_zrxn(geo, rxn_info, rxn_class): ts_gra, oversaturated_atom = choose_cutoff_distance(geo) atoms_bnd = automol.graph.atoms_bond_keys(ts_gra) bonds = atoms_bnd[oversaturated_atom]", "ts_ichs[0] = ts_ichs[0][::-1] ts_smis[0] = ts_smis[0][::-1] reactant_match = True else: ts_ichs = ts_ichs[::-1]", "return thy_info def create_species_filesystems(prefix, spc_info, mod_thy_info, locs=None): # species filesystem spc_fs = autofile.fs.species(prefix)", "'gaussian09', 'method': 'b3lyp', 'basis': '6-31g*' }, 'lvl_b3mg': { 'orb_res': 'RU', 'program': 'gaussian09', 'method':", "forming_bond: 0.1} back_bnd_ord_dct = {breaking_bond: 0.1, forming_bond: 0.9} forward_gra = automol.graph.set_bond_orders(ts_gra, forw_bnd_ord_dct) backward_gra", "ich = insert_dct['inchi'] mult = insert_dct['mult'] chg = insert_dct['charge'] if ich is None", "figure out') print('I promise i will be smarter in the future') sys.exit() breaking_bond,", "input method explicitly inputted program = insert_dct['program'] method = insert_dct['method'] basis = insert_dct['basis']", "print('bacRXN', automol.graph.string(zrxn_i.backward_ts_graph)) # print('bacRXN', automol.graph.string(backward_gra)) # if forward_gra == automol.geom.graph(geo, stereo=False): # zrxn", "value = (int(value),) elif keyword in ['rxn_class']: # strip whitespaces form either side", "zrxn=zrxn): sym_id = _sym_unique( geo, ene, saved_geos, saved_enes) if sym_id is None: if", "rid mismatch for the filesystem at' + ' {}'.format(cnf_fs[0].path()) + '\\nthe expected rid", "geo_rid = rng_loc_for_geo(geo, cnf_fs) if geo_rid is not None: if geo_rid != rid:", "= [[], []] for rgra in reactant_gras: try: rich = automol.graph.inchi(rgra, stereo=True) except", "{ 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvdz-f12' }, 'cc_lvl_tf': { 'orb_res':", "= automol.graph.add_bonds(backward_gra, back_form_key) # if zrxn_i.class_ == 'hydrogen abstraction': # forward_gra = automol.graph.remove_bonds(forward_gra,", "= parse_user_theory(insert_dct) # parse out geo information first, to make sure # user", "geometry could be parsed from output' + 'Check that the program matches user", "(hess_inf_obj, inp_str, out_str) save_saddle_point( zrxn, ret, hess_ret, freqs, imags, mod_thy_info, {'runlvl_cnf_fs': (cnf_fs, None)},", "rxn_gras = [reactant_gras, product_gras] rxn_smis = [[], []] for i, side in enumerate(rxn_info[0]):", "value = value.split() for i, val in enumerate(value): value[i] = val.replace(' ', '')", "out user specified save location zrxn = None if insert_dct['saddle']: rxn_info, spc_info, rxn_class", "return (rid, cid) def parse_user_species(insert_dct): smi = insert_dct['smiles'] ich = insert_dct['inchi'] mult =", "[[], []] for rgra in reactant_gras: try: rich = automol.graph.inchi(rgra, stereo=True) except IndexError:", "for pgra in product_gras: try: pich = automol.graph.inchi(pgra, stereo=True) except IndexError: pich =", "run_fs[-1].create(['hessian']) # script_str = autorun.SCRIPT_DCT['projrot'] # freqs, _, imags, _ = autorun.projrot.frequencies( #", "True rid = locs[0] geo_rid = rng_loc_for_geo(geo, cnf_fs) if geo_rid is not None:", "freq_run_path, [geo], [[]], [hess]) # if len(imags) != 1: # print( # 'Can", "specify program in input') sys.exit() elif method is None: print('Error: user did not", "'program': 'gaussian09', 'method': 'wb97xd', 'basis': '6-31g*' }, 'lvl_wbm': { 'orb_res': 'RU', 'program': 'gaussian09',", "{}'.format(ich)) match = False return match def locs_match(geo, cnf_fs, locs): match = True", "value else: print( 'ERROR: Keyword {} is not recognized'.format(keyword) + 'script will exit", "'basis': 'cc-pvtz'}} def parse_user_locs(insert_dct): rid = insert_dct['rid'] cid = insert_dct['cid'] if rid is", "rxn_info, mod_thy_info, ts_locs=insert_dct['ts_locs'], locs=None) cnf_fs = fs_array[-1] if not locs_match(geo, cnf_fs, locs): print(", "sys.exit() # Parse out user specified save location zrxn = None if insert_dct['saddle']:", "mult) def parse_user_reaction(insert_dct): smis = insert_dct['smiles'] ichs = insert_dct['inchi'] mults = insert_dct['mult'] chgs", "{}'.format(theory) + ' that is in the THEORY_DCT' + 'please add it to", "method from theory dictionary theory = insert_dct['theory'] if theory is None: if program", "import _fragment_ring_geo from mechroutines.es._routines._sadpt import save_saddle_point from mechlib.reaction.rxnid import _id_reaction THEORY_DCT = {", "-= 1 # else: # geo_reorder_dct[idx_i + remove_idx] = idx_j # ts_geo =", "{ 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis': '6-31g*' }, 'lvl_wbm': { 'orb_res':", "# Check that the save location matches geo information if not insert_dct['saddle']: if", "{ 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis': '6-31g*' }, 'lvl_m06m': { 'orb_res':", "1 ts_mult = insert_dct['ts_mult'] if ts_mult is None: print( 'Error: user did not", "remove_idx] = idx_j # ts_geo = automol.geom.reorder_coordinates(geo, geo_reorder_dct) else: print( 'The reactants and", "forward_gra == automol.geom.graph(geo, stereo=False): # zrxn = zrxn_i # zma, _, _ =", "val in enumerate(value): value[i] = val.replace(' ', '') value = ' '.join(value) elif", "ich in ichs[0]: mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) if mults[idx] not in mults_allowed:", "H is being transfered') sys.exit() return chosen_ts_gra, chosen_oversaturated_atom def get_zrxn(geo, rxn_info, rxn_class): ts_gra,", "insert_dct['orb_res'] # Get input method from theory dictionary theory = insert_dct['theory'] if theory", "ts_ichs = ts_ichs[::-1] ts_smis = ts_smis[::-1] ts_gras = ts_gras[::-1] rxn_gras = rxn_gras[::-1] if", "first, to make sure # user save specifications match output prog, method, basis,", "'gaussian09', 'method': 'm062x', 'basis': '6-31g*' }, 'lvl_m06m': { 'orb_res': 'RU', 'program': 'gaussian09', 'method':", "= line.split('!')[0] if ':' not in line: print( 'ERROR: line\\n({}) {}\\n is not", "mechroutines.es._routines.conformer import _save_unique_parsed_conformer from mechroutines.es._routines.conformer import _geo_unique from mechroutines.es._routines.conformer import _fragment_ring_geo from mechroutines.es._routines._sadpt", "that is in the THEORY_DCT' + 'please add it to the dct in", "products: values[1].append(product.replace(' ', '')) value = values else: value = value[0].replace(' ', '')", "values else: value = value[0].replace(' ', '') print(keyword, value) insert_dct[keyword] = value else:", "dummy_key_dct) rxn_info = (ts_ichs, *rxn_info[1:]) ts_geo = automol.zmat.geometry(ts_zma) # geo_reorder_dct = {} #", "save location matches geo information if not insert_dct['saddle']: if not species_match(geo, spc_info): print(", "+= 1 ts_mult = insert_dct['ts_mult'] if ts_mult is None: print( 'Error: user did", "if _geo_unique(geo, ene, saved_geos, saved_enes, zrxn=zrxn): sym_id = _sym_unique( geo, ene, saved_geos, saved_enes)", "= autofile.fs.conformer(thy_prefix) if locs is not None: cnf_fs[-1].create(locs) cnf_prefix = cnf_fs[-1].path(locs) else: cnf_prefix", "information if not insert_dct['saddle']: if not species_match(geo, spc_info): print( 'I refuse to save", "filesystem fs_array, prefix_array = create_species_filesystems( prefix, spc_info, mod_thy_info, locs=None) else: fs_array, prefix_array =", "ts_smis[0] = ts_smis[0][::-1] reactant_match = True else: ts_ichs = ts_ichs[::-1] ts_smis = ts_smis[::-1]", "not None: frag_zma = automol.geom.zmatrix(frag_geo) checked_rids = [] for locs in cnf_fs[-1].existing(): current_rid,", "# ts_geo = automol.geom.reorder_coordinates(geo, geo_reorder_dct) else: print( 'The reactants and products found for", "= automol.graph.atoms_bond_keys(ts_gra) bonds = atoms_bnd[oversaturated_atom] if len(bonds) != 2: print('too many bonds to", "to transfered atom for me to figure out') print('I promise i will be", "line.split('!')[0] if ':' not in line: print( 'ERROR: line\\n({}) {}\\n is not parsable,", "in enumerate(value): value[i] = val.replace(' ', '') value = ' '.join(value) elif keyword", "smis = insert_dct['smiles'] ichs = insert_dct['inchi'] mults = insert_dct['mult'] chgs = insert_dct['charge'] rxn_class", "'ccsd(t)-f12', 'basis': 'cc-pvqz-f12' }, 'mlvl_cas_dz': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'caspt2', 'basis':", "ts_ichs[0].append(rich) ts_smis[0].append(rsmi) for pgra in product_gras: try: pich = automol.graph.inchi(pgra, stereo=True) except IndexError:", "print('Error: user did not specify charges in input') sys.exit() flat_ichs = sum(ichs, [])", "exit until input is resolved to avoid' + ' filesystem contamination.' + 'Comment", "return rxn_info, ts_info, rxn_class def parse_user_theory(insert_dct): # Get input method explicitly inputted program", "[]] for rgra in reactant_gras: try: rich = automol.graph.inchi(rgra, stereo=True) except IndexError: rich", "oversaturated_atoms[0] break if chosen_oversaturated_atom is None: print('could not figure out which H is", "'mult': None, 'charge': None, 'rid': None, 'cid': None, 'theory': None, 'program': None, 'method':", "elif ts_smis[0][::-1] == rxn_smis[0]: ts_ichs[0] = ts_ichs[0][::-1] ts_smis[0] = ts_smis[0][::-1] reactant_match = True", "= insert_dct['cid'] if rid is None: rid = autofile.schema.generate_new_ring_id() if cid is None:", "[]] ts_ichs = [[], []] for rgra in reactant_gras: try: rich = automol.graph.inchi(rgra,", "job=elstruct.Job.HESSIAN, prog=prog, version='', method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS) hess_ret = (hess_inf_obj, inp_str, out_str) save_saddle_point( zrxn,", "in THEORY_DCT: thy_info = tinfo.from_dct(THEORY_DCT[theory]) else: print( 'Error: user did not specify a", "= ts_gras[0] chosen_oversaturated_atom = oversaturated_atoms[0] break if chosen_oversaturated_atom is None: print('could not figure", "None, 'ts_mult': None, 'rxn_class': None, 'zrxn_file': None, 'run_path': None, 'saddle': False, } for", "'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvtz'}, 'lvl_b2q': { 'orb_res': 'RU', 'program': 'gaussian09', 'method':", "= value reactants = reactants.split(' + ') products = products.split(' + ') values", "for dumm_j in dummies: # if dummy > dumm_j: # add_idx += 1", "database usiing a log file \"\"\" import sys import os import autofile import", "autofile.fs.reaction(prefix) sort_rxn_info = rinfo.sort(rxn_info, scheme='autofile') rxn_fs[-1].create(sort_rxn_info) rxn_prefix = rxn_fs[-1].path(sort_rxn_info) # theory filesystem thy_fs", "'a transition state without a hessian') # sys.exit() # run_path = insert_dct['run_path'] #", "the input and output files that we # Are inserting into the filesystem", "# freq_run_path = run_fs[-1].path(['hessian']) # run_fs[-1].create(['hessian']) # script_str = autorun.SCRIPT_DCT['projrot'] # freqs, _,", "+ 'with an inchi or smiles in input') sys.exit() if ich is None:", "= rxn_gras[::-1] if ts_smis[0] == rxn_smis[0]: reactant_match = True elif ts_smis[0][::-1] == rxn_smis[0]:", "elstruct.reader.energy(prog, method, out_str) geo = elstruct.reader.opt_geometry(prog, out_str) if geo is None: print( 'No", "automol.graph.atom_unsaturated_valences(ts_gra_i, bond_order=True) oversaturated_atoms = [atm for atm, val in vals.items() if val <", "from mechroutines.es._routines.conformer import _saved_cnf_info from mechroutines.es._routines.conformer import _sym_unique from mechroutines.es._routines.conformer import _save_unique_parsed_conformer from", "transfered') sys.exit() return chosen_ts_gra, chosen_oversaturated_atom def get_zrxn(geo, rxn_info, rxn_class): ts_gra, oversaturated_atom = choose_cutoff_distance(geo)", "keyword): if dct[keyword] is None: print( 'ERROR: No filename is specified for {}'.format(keyword)", "info matches the filesystem fs_array, prefix_array = create_species_filesystems( prefix, spc_info, mod_thy_info, locs=None) else:", "return match def locs_match(geo, cnf_fs, locs): match = True rid = locs[0] geo_rid", "# sys.exit() # run_path = insert_dct['run_path'] # if run_path is None: # run_path", "autofile.io_.read_file(zrxn_file) # zrxns = [automol.reac.from_string(zrxn_str)] # else: # zrxns, _ = _id_reaction(rxn_info) if", "method matches' + ' {}'.format(method)) sys.exit() # Parse out user specified save location", "* 0.1 for x in range(26, 38, 2)] chosen_ts_gra = [] chosen_oversaturated_atom =", "'program': None, 'method': None, 'basis': None, 'orb_res': None, 'input_file': None, 'output_file': None, 'ts_locs':", "ret = (inf_obj, inp_str, out_str) _, saved_geos, saved_enes = _saved_cnf_info( cnf_fs, mod_thy_info) if", "reactants = reactants.split(' + ') products = products.split(' + ') values = [[],", "mod_thy_info, cnf_fs, locs, (geo, zma, ene), inf_obj, inp_str, zrxn=zrxn, zma_locs=(0,)) print( 'geometry is", "cnsampd = cinf_obj.nsamp cnsampd += 1 cinf_obj.nsamp = cnsampd else: cinf_obj = autofile.schema.info_objects.conformer_branch(0)", "None frag_geo = _fragment_ring_geo(geo) if frag_geo is not None: frag_zma = automol.geom.zmatrix(frag_geo) checked_rids", "'cc-pvdz'}, 'mlvl_cas_tz': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'caspt2', 'basis': 'cc-pvtz'}} def parse_user_locs(insert_dct):", "= autofile.schema.info_objects.conformer_trunk(0) rinf_obj.nsamp = 1 if cnf_fs[1].file.info.exists([locs[0]]): cinf_obj = cnf_fs[1].file.info.read(locs[0]) cnsampd = cinf_obj.nsamp", "(spc_fs, thy_fs, cnf_fs), (spc_prefix, thy_prefix, cnf_prefix)) def create_reaction_filesystems( prefix, rxn_info, mod_thy_info, ts_locs=None, locs=None):", "format should be:\\n' + '<Keyword>: <Value>\\n' + 'Allowed keywords are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys()))) )", "form either side of reaction # class but not in between words value", "prefix, rxn_info, mod_thy_info, ts_locs=None, locs=None): # species filesystem print('rxn_info', rxn_info) rxn_fs = autofile.fs.reaction(prefix)", "sinfo.from_data(ich, chg, mult) def parse_user_reaction(insert_dct): smis = insert_dct['smiles'] ichs = insert_dct['inchi'] mults =", "pgra in product_gras: try: pich = automol.graph.inchi(pgra, stereo=True) except IndexError: pich = automol.graph.inchi(pgra)", "out_str) geo = elstruct.reader.opt_geometry(prog, out_str) if geo is None: print( 'No geometry could", "def read_user_filesystem(dct): if dct['save_filesystem'] is None: print( 'ERROR: No save_filesystem}' + 'Script will", "= _sym_unique( geo, ene, saved_geos, saved_enes) if sym_id is None: if cnf_fs[0].file.info.exists(): rinf_obj", "else: value = value[0].replace(' ', '') print(keyword, value) insert_dct[keyword] = value else: print(", "print('bacRXN', automol.graph.string(backward_gra)) # if forward_gra == automol.geom.graph(geo, stereo=False): # zrxn = zrxn_i #", "= _id_reaction(rxn_info) if rxn_class is None: print( 'Error: user did not specify rxn_class')", "move on to other reaction types we have to check for double #", "= ts_ichs[::-1] ts_smis = ts_smis[::-1] ts_gras = ts_gras[::-1] rxn_gras = rxn_gras[::-1] if ts_smis[0]", "'Allowed keywords are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit() return insert_dct if __name__ == '__main__':", "'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis': 'cc-pvtz'}, 'lvl_b2d': { 'orb_res': 'RU', 'program':", "cnf_prefix = cnf_fs[-1].path(locs) else: cnf_prefix = None return ( (spc_fs, thy_fs, cnf_fs), (spc_prefix,", "is not None: frag_zma = automol.geom.zmatrix(frag_geo) checked_rids = [] for locs in cnf_fs[-1].existing():", "_id_reaction(rxn_info) if rxn_class is None: print( 'Error: user did not specify rxn_class') sys.exit()", "add_idx) # remove_idx = 0 # for idx_i, idx_j in enumerate(zma_keys): # if", "checked_rids: continue if cnf_fs[-1].file.geometry.exists(locs): checked_rids.append(current_rid) locs_geo = cnf_fs[-1].file.geometry.read(locs) frag_locs_geo = _fragment_ring_geo(locs_geo) if frag_locs_geo", "not match number of charges') sys.exit() idx = 0 rxn_muls = [[], []]", "'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvqz' }, 'lvl_b3s': { 'orb_res': 'RU', 'program':", "inf_obj = autofile.schema.info_objects.run( job=elstruct.Job.OPTIMIZATION, prog=prog, version='', method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS) ret = (inf_obj, inp_str,", "orders forw_bnd_ord_dct = {breaking_bond: 0.9, forming_bond: 0.1} back_bnd_ord_dct = {breaking_bond: 0.1, forming_bond: 0.9}", "rxn_class = parse_user_reaction(insert_dct) zrxn, zma, geo, rxn_info = get_zrxn(geo, rxn_info, rxn_class) # for", "input') sys.exit() elif basis is None: print('Error: user did not specify basis in", "specified mult of {}'.format(mults[idx]) + 'is not an allowed multiplicty for inchi {}'.format(ich))", "ts_smis[0][::-1] reactant_match = True if reactant_match: if ts_smis[1] == rxn_smis[1]: product_match = True", "geo) std_zrxn = automol.reac.relabel_for_zmatrix( std_rxn, zma_keys, dummy_key_dct) rxn_info = (ts_ichs, *rxn_info[1:]) ts_geo =", "idx += 1 for ich in ichs[1]: mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) if", "{ 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis': '6-31g*' }, 'lvl_b3mg': { 'orb_res':", "sys.exit() idx = 0 rxn_muls = [[], []] rxn_chgs = [[], []] for", "in the script or use program/method/basis/orb_dct' + 'keywords instead of theory') sys.exit() return", "None return ( (rxn_fs, thy_fs, ts_fs, cnf_fs), (rxn_prefix, thy_prefix, ts_prefix, cnf_prefix)) def read_user_file(dct,", "automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds( # zrxn_i.backward_ts_graph))) # backward_gra = automol.graph.add_bonds(backward_gra, back_form_key) #", "orb_res) else: if theory in THEORY_DCT: thy_info = tinfo.from_dct(THEORY_DCT[theory]) else: print( 'Error: user", "match number of mults') sys.exit() if len(flat_ichs) != len(chgs): print( 'Error: number of", "out which H is being transfered') sys.exit() return chosen_ts_gra, chosen_oversaturated_atom def get_zrxn(geo, rxn_info,", "'basis': '6-31+g*' }, 'lvl_wbt': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis': 'cc-pvtz'},", "autorun.SCRIPT_DCT['projrot'] # freqs, _, imags, _ = autorun.projrot.frequencies( # script_str, freq_run_path, [geo], [[]],", "print('Error: user did not specify program in input') sys.exit() elif method is None:", "autofile.fs.species(prefix) spc_fs[-1].create(spc_info) spc_prefix = spc_fs[-1].path(spc_info) # theory filesystem thy_fs = autofile.fs.theory(spc_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix", "projrot found the following' + # 'frequencies: ' + ','.join(imags)) # sys.exit() else:", "gra in rxn_gras[0]: reactant_keys.append(automol.graph.atom_keys(gra)) product_keys = [] for gra in rxn_gras[1]: product_keys.append(automol.graph.atom_keys(gra)) std_rxn", "if val < 0] if len(oversaturated_atoms) == 1: chosen_ts_gra = ts_gras[0] chosen_oversaturated_atom =", "filesystem' + '... not saving') def species_match(geo, spc_info): match = True ich, _,", "else: spc_info = parse_user_species(insert_dct) mod_thy_info = tinfo.modify_orb_label(thy_info, spc_info) locs = parse_user_locs(insert_dct) # Check", "not an allowed multiplicty for inchi {}'.format(ich)) match = False return match def", "None, 'smiles': None, 'inchi': None, 'mult': None, 'charge': None, 'rid': None, 'cid': None,", "zrxns: # forw_form_key = automol.reac.forming_bond_keys(zrxn_i) # back_form_key = automol.reac.forming_bond_keys(zrxn_i, rev=True) # forw_brk_key =", "'wb97xd', 'basis': 'cc-pvtz'}, 'lvl_m06s': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis': '6-31g*'", "value = values elif keyword in ['ts_locs']: value = (int(value),) elif keyword in", "+ '\\nthe expected rid for this geo is {}'.format(geo_rid) + '\\nthe user rid", "'Key format should be:\\n' + '<Keyword>: <Value>\\n' + 'Allowed keywords are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys())))", "import _geo_unique from mechroutines.es._routines.conformer import _fragment_ring_geo from mechroutines.es._routines._sadpt import save_saddle_point from mechlib.reaction.rxnid import", "user rid in input file is {}'.format(rid)) match = False return match def", "out_str) _, saved_geos, saved_enes = _saved_cnf_info( cnf_fs, mod_thy_info) if _geo_unique(geo, ene, saved_geos, saved_enes,", "= [[], []] for smi in smis[0]: ichs[0].append(automol.smiles.inchi(smi)) for smi in smis[1]: ichs[1].append(automol.smiles.inchi(smi))", "= None elif keyword in ['mult', 'charge', 'ts_mult']: values = [] for val", "No save_filesystem}' + 'Script will exit') sys.exit() return dct['save_filesystem'] def choose_cutoff_distance(geo): rqhs =", "specify mults in input') sys.exit() if chgs is None: print('Error: user did not", "in enumerate(script_input): if len(line) < 2: continue elif '!' in line[0]: continue line", "# add_idx = 1 # for dumm_j in dummies: # if dummy >", "' and method matches' + ' {}'.format(method)) sys.exit() # Parse out user specified", "print('rxn_info', rxn_info) rxn_fs = autofile.fs.reaction(prefix) sort_rxn_info = rinfo.sort(rxn_info, scheme='autofile') rxn_fs[-1].create(sort_rxn_info) rxn_prefix = rxn_fs[-1].path(sort_rxn_info)", "rqhs = [x * 0.1 for x in range(26, 38, 2)] chosen_ts_gra =", "# 'zrxns, which are the following') # for zrxn_i in zrxns: # print(zrxns)", "info in user given output') sys.exit() # Check that the rid/cid info matches", "geometry from output:\\n' + '{}'.format(automol.geom.string(geo))) match = False if mul not in mults_allowed:", "' '.join(value) elif keyword not in ['smiles', 'inchi']: value = value.replace(' ', '')", "'lvl_m06m': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis': '6-31+g*' }, 'lvl_m06t': {", "'Error: number of species does not match number of mults') sys.exit() if len(flat_ichs)", "enumerate(rxn_info[0]): for ich in side: rxn_smis[i].append(automol.inchi.smiles(ich)) ts_smis = [[], []] ts_ichs = [[],", "None: # print( # 'No hessian found in output, cannot save ' +", "':' not in line: print( 'ERROR: line\\n({}) {}\\n is not parsable, '.format(i, line)", "== rxn_smis[1]: product_match = True elif ts_smis[1][::-1] == rxn_smis[-1]: ts_ichs[1] = ts_ichs[1][::-1] ts_smis[1]", "# # hess = elstruct.reader.hessian(prog, out_str) # Hess = None # If hess", "try: pich = automol.graph.inchi(pgra, stereo=True) except IndexError: pich = automol.graph.inchi(pgra) psmi = automol.inchi.smiles(pich)", "+ 'is not an allowed multiplicty for inchi {}'.format(ich)) sys.exit() rxn_muls[1].append(mults[idx]) rxn_chgs[1].append(chgs[idx]) idx", "chgs is None: print('Error: user did not specify charges in input') sys.exit() flat_ichs", "user did not specify rxn_class') sys.exit() return rxn_info, ts_info, rxn_class def parse_user_theory(insert_dct): #", "for double # bonds when doing bond orders forw_bnd_ord_dct = {breaking_bond: 0.9, forming_bond:", "cnf_fs[-1].create(locs) cnf_prefix = cnf_fs[-1].path(locs) else: cnf_prefix = None return ( (rxn_fs, thy_fs, ts_fs,", "print( 'Error: rid mismatch for the filesystem at' + ' {}'.format(cnf_fs[0].path()) + '\\nthe", "'cc-pvtz'}, 'lvl_b2d': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvdz'}, 'lvl_b2t': {", "+ 'Comment lines should contain \"!\"' + 'Key format should be:\\n' + '<Keyword>:", "{ 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvqz-f12' }, 'mlvl_cas_dz': { 'orb_res':", "value = values[0] else: value = values elif keyword in ['ts_locs']: value =", "rxn_class, *ts_gras, reactant_keys, product_keys) ts_zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix( std_rxn, geo) std_zrxn =", "ts_zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix( std_rxn, geo) std_zrxn = automol.reac.relabel_for_zmatrix( std_rxn, zma_keys, dummy_key_dct)", "enumerate(zma_keys): # if idx_i in dummies: # remove_idx -= 1 # else: #", "= True elif ts_smis[1][::-1] == rxn_smis[-1]: ts_ichs[1] = ts_ichs[1][::-1] ts_smis[1] = ts_smis[1][::-1] product_match", "without a hessian') # sys.exit() # run_path = insert_dct['run_path'] # if run_path is", "1: insert_dct['saddle'] = True reactants, products = value reactants = reactants.split(' + ')", "}, 'lvl_m06t': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis': 'cc-pvtz'}, 'lvl_b2d': {", "{breaking_bond: 0.1, forming_bond: 0.9} forward_gra = automol.graph.set_bond_orders(ts_gra, forw_bnd_ord_dct) backward_gra = automol.graph.set_bond_orders(ts_gra, back_bnd_ord_dct) reactant_gras", "zrxn_i in zrxns: # forw_form_key = automol.reac.forming_bond_keys(zrxn_i) # back_form_key = automol.reac.forming_bond_keys(zrxn_i, rev=True) #", "None if hess is not None and zrxn is not None: hess_inf_obj =", "many bonds to transfered atom for me to figure out') print('I promise i", "in line: print( 'ERROR: line\\n({}) {}\\n is not parsable, '.format(i, line) + 'script", "'is not an allowed multiplicty for inchi {}'.format(ich)) match = False return match", "= automol.inchi.add_stereo(ich) ichs[1][idx] = ich if mults is None: print('Error: user did not", "values = [] for val in value.split(','): values.append(int(val)) if len(values) == 1: value", "not recognized'.format(keyword) + 'script will exit until inpupt is resolved to avoid' +", "given output') sys.exit() inf_obj = autofile.schema.info_objects.run( job=elstruct.Job.OPTIMIZATION, prog=prog, version='', method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS) ret", "for reactant in reactants: values[0].append(reactant.replace(' ', '')) for product in products: values[1].append(product.replace(' ',", "# Hess = None # If hess is None: # print( # 'No", "back_form_key = automol.reac.forming_bond_keys(zrxn_i, rev=True) # forw_brk_key = automol.reac.breaking_bond_keys(zrxn_i) # back_brk_key = automol.reac.breaking_bond_keys(zrxn_i, rev=True)", "reactant_gras = automol.graph.connected_components(reactant_gras) product_gras = automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(backward_gra)) product_gras = automol.graph.connected_components(product_gras) ts_gras = [forward_gra,", "the info in user given output') sys.exit() # Check that the rid/cid info", "break frag_locs_zma = automol.geom.zmatrix(frag_locs_geo) if automol.zmat.almost_equal( frag_locs_zma, frag_zma, dist_rtol=0.1, ang_atol=.4): rid = locs[0]", "user did not specify ts_mul') sys.exit() rxn_info = rinfo.sort((ichs, rxn_chgs, rxn_muls, ts_mult)) ts_info", "cnf_fs[-1].file.geometry.read(locs) frag_locs_geo = _fragment_ring_geo(locs_geo) if frag_locs_geo is None: rid = locs[0] break frag_locs_zma", "on to other reaction types we have to check for double # bonds", "'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvqz' }, 'cc_lvl_df': { 'orb_res': 'RR',", "else: cnf_prefix = None return ( (rxn_fs, thy_fs, ts_fs, cnf_fs), (rxn_prefix, thy_prefix, ts_prefix,", "automol.inchi.graph(ich, stereo=False)) if mults[idx] not in mults_allowed: print( 'user specified mult of {}'.format(mults[idx])", "is None: print('Error: user did not specify mults in input') sys.exit() if chgs", "_fragment_ring_geo(geo) if frag_geo is not None: frag_zma = automol.geom.zmatrix(frag_geo) checked_rids = [] for", "'cc_lvl_d': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvdz'}, 'cc_lvl_t': { 'orb_res':", "prefix_array = create_species_filesystems( prefix, spc_info, mod_thy_info, locs=None) else: fs_array, prefix_array = create_reaction_filesystems( prefix,", "'gaussian09', 'method': 'm062x', 'basis': 'cc-pvtz'}, 'lvl_b2d': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3',", "None, 'charge': None, 'rid': None, 'cid': None, 'theory': None, 'program': None, 'method': None,", "sys.exit() if chg is None: print('Error: user did not specify charge in input')", "# run_path = os.getcwd() # run_fs = autofile.fs.run(run_path) # freq_run_path = run_fs[-1].path(['hessian']) #", "reactant_gras = automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(forward_gra)) reactant_gras = automol.graph.connected_components(reactant_gras) product_gras = automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(backward_gra)) product_gras =", "geo_reorder_dct = {} # dummies = [] # for dummy in dummy_key_dct.keys(): #", "print( 'Error: user did not specify a theory {}'.format(theory) + ' that is", "== automol.geom.graph(geo, stereo=False): # zrxn = zrxn_i # zma, _, _ = automol.reac.ts_zmatrix(zrxn,", "import autorun from mechroutines.es._routines.conformer import _saved_cnf_info from mechroutines.es._routines.conformer import _sym_unique from mechroutines.es._routines.conformer import", "'6-31g*' }, 'lvl_wbm': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis': '6-31+g*' },", "line.split(':') if keyword in insert_dct: if 'None' in value: value = None elif", "method is None: print('Error: user did not specify method in input') sys.exit() elif", "ts_gras = automol.graph.set_stereo_from_geometry(ts_gras, geo) ts_gras = automol.graph.connected_components(ts_gras) if len(ts_gras) != 1: continue for", "dummies: # remove_idx -= 1 # else: # geo_reorder_dct[idx_i + remove_idx] = idx_j", "= dct[keyword] return autofile.io_.read_file(file_name) def read_user_filesystem(dct): if dct['save_filesystem'] is None: print( 'ERROR: No", "if chosen_oversaturated_atom is None: print('could not figure out which H is being transfered')", "None, 'saddle': False, } for i, line in enumerate(script_input): if len(line) < 2:", "zma, geo, rxn_info = get_zrxn(geo, rxn_info, rxn_class) # for zrxn_i in zrxns: #", "specify basis in input') sys.exit() elif orb_res is None: print('Error: user did not", "mult of {}'.format(mults[idx]) + 'is not an allowed multiplicty for inchi {}'.format(ich)) sys.exit()", "input') sys.exit() return std_zrxn, ts_zma, ts_geo, rxn_info def main(insert_dct): prefix = read_user_filesystem(insert_dct) #", "== automol.geom.graph(geo, stereo=False): # zrxn = automol.reac.reverse(zrxn_i) # zma, _, _ = automol.reac.ts_zmatrix(zrxn,", "= automol.geom.reorder_coordinates(geo, geo_reorder_dct) else: print( 'The reactants and products found for the transition", "{}'.format(mults[idx]) + 'is not an allowed multiplicty for inchi {}'.format(ich)) sys.exit() rxn_muls[0].append(mults[idx]) rxn_chgs[0].append(chgs[idx])", "multiplicty for inchi {}'.format(ich)) sys.exit() rxn_muls[1].append(mults[idx]) rxn_chgs[1].append(chgs[idx]) idx += 1 ts_mult = insert_dct['ts_mult']", "mechroutines.es._routines.conformer import _saved_cnf_info from mechroutines.es._routines.conformer import _sym_unique from mechroutines.es._routines.conformer import _save_unique_parsed_conformer from mechroutines.es._routines.conformer", "}, 'lvl_b3t': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis': 'cc-pvtz'}, 'cc_lvl_d': {", "1: chosen_ts_gra = ts_gras[0] chosen_oversaturated_atom = oversaturated_atoms[0] break if chosen_oversaturated_atom is None: print('could", "'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis': '6-31+g*' }, 'lvl_m06t': { 'orb_res': 'RU', 'program':", "# for dumm_j in dummies: # if dummy > dumm_j: # add_idx +=", "charges') sys.exit() idx = 0 rxn_muls = [[], []] rxn_chgs = [[], []]", "<Value>\\n' + 'Allowed keywords are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit() keyword, value = line.split(':')", "bond_order=True) oversaturated_atoms = [atm for atm, val in vals.items() if val < 0]", "is specified for {}'.format(keyword) + 'Script will exit') sys.exit() file_name = dct[keyword] return", "ich in side: rxn_smis[i].append(automol.inchi.smiles(ich)) ts_smis = [[], []] ts_ichs = [[], []] for", "in the input and output files that we # Are inserting into the", "'script will exit until inpupt is resolved to avoid' + ' filesystem contamination.'", "if ':' not in line: print( 'ERROR: line\\n({}) {}\\n is not parsable, '.format(i,", "autofile.schema.info_objects.run( job=elstruct.Job.HESSIAN, prog=prog, version='', method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS) hess_ret = (hess_inf_obj, inp_str, out_str) save_saddle_point(", "if cnf_fs[0].file.info.exists(): rinf_obj = cnf_fs[0].file.info.read() else: rinf_obj = autofile.schema.info_objects.conformer_trunk(0) rinf_obj.nsamp = 1 if", "'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvqz' }, 'cc_lvl_df': { 'orb_res': 'RR', 'program': 'molpro2015', 'method':", "'\\nthe user rid in input file is {}'.format(rid)) match = False return match", "cnf_prefix)) def create_reaction_filesystems( prefix, rxn_info, mod_thy_info, ts_locs=None, locs=None): # species filesystem print('rxn_info', rxn_info)", "Add a species to your database usiing a log file \"\"\" import sys", "mult is None: print('Error: user did not specify mult in input') sys.exit() if", "None, None if hess is not None and zrxn is not None: hess_inf_obj", "= [] # for dummy in dummy_key_dct.keys(): # add_idx = 1 # for", "automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(forward_gra)) reactant_gras = automol.graph.connected_components(reactant_gras) product_gras = automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(backward_gra)) product_gras = automol.graph.connected_components(product_gras) ts_gras", "forward_gra = automol.graph.set_bond_orders(ts_gra, forw_bnd_ord_dct) backward_gra = automol.graph.set_bond_orders(ts_gra, back_bnd_ord_dct) reactant_gras = automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(forward_gra)) reactant_gras", "# parse out geo information first, to make sure # user save specifications", "+ 'is not an allowed multiplicty for inchi {}'.format(ich)) match = False return", "'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvdz'}, 'lvl_b2t': { 'orb_res': 'RU', 'program': 'gaussian09',", "user did not specify species' + 'with an inchi or smiles in input')", "'program': 'gaussian09', 'method': 'b3lyp', 'basis': '6-311g**' }, 'lvl_b3t': { 'orb_res': 'RU', 'program': 'gaussian09',", "# run_fs[-1].create(['hessian']) # script_str = autorun.SCRIPT_DCT['projrot'] # freqs, _, imags, _ = autorun.projrot.frequencies(", "[[], []] for ich in ichs[0]: mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) if mults[idx]", "# species filesystem print('rxn_info', rxn_info) rxn_fs = autofile.fs.reaction(prefix) sort_rxn_info = rinfo.sort(rxn_info, scheme='autofile') rxn_fs[-1].create(sort_rxn_info)", "values[0] else: value = values elif keyword in ['ts_locs']: value = (int(value),) elif", "cnf_fs, locs, (geo, zma, ene), inf_obj, inp_str, zrxn=zrxn, zma_locs=(0,)) print( 'geometry is now", "+ remove_idx] = idx_j # ts_geo = automol.geom.reorder_coordinates(geo, geo_reorder_dct) else: print( 'The reactants", "}, 'cc_lvl_qf': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvqz-f12' }, 'mlvl_cas_dz':", "cnf_fs) if geo_rid is not None: if geo_rid != rid: print( 'Error: rid", "0.9} forward_gra = automol.graph.set_bond_orders(ts_gra, forw_bnd_ord_dct) backward_gra = automol.graph.set_bond_orders(ts_gra, back_bnd_ord_dct) reactant_gras = automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(forward_gra))", "automol.reac.ts_zmatrix(zrxn, geo) # elif backward_gra == automol.geom.graph(geo, stereo=False): # zrxn = automol.reac.reverse(zrxn_i) #", "match any of the attempted ' + # 'zrxns, which are the following')", "None: # zrxn_str = autofile.io_.read_file(zrxn_file) # zrxns = [automol.reac.from_string(zrxn_str)] # else: # zrxns,", "automol.graph.inchi(pgra, stereo=True) except IndexError: pich = automol.graph.inchi(pgra) psmi = automol.inchi.smiles(pich) ts_ichs[1].append(pich) ts_smis[1].append(psmi) reactant_match", "None, 'input_file': None, 'output_file': None, 'ts_locs': None, 'ts_mult': None, 'rxn_class': None, 'zrxn_file': None,", "rid = locs[0] break return rid def parse_script_input(script_input_file): script_input = autofile.io_.read_file(script_input_file).splitlines() insert_dct =", "not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) ichs[0][idx] = ich for idx, ich in enumerate(ichs[1]):", "insert_dct['saddle']: if not species_match(geo, spc_info): print( 'I refuse to save this geometry until", "instead of theory') sys.exit() return thy_info def create_species_filesystems(prefix, spc_info, mod_thy_info, locs=None): # species", "# if zrxn is None: # print( # 'Your geometry did not match", "import _id_reaction THEORY_DCT = { 'lvl_wbs': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd',", "rxn_info, rxn_class) # for zrxn_i in zrxns: # forw_form_key = automol.reac.forming_bond_keys(zrxn_i) # back_form_key", "rid/cid info matches the filesystem fs_array, prefix_array = create_species_filesystems( prefix, spc_info, mod_thy_info, locs=None)", "run_path is None: # run_path = os.getcwd() # run_fs = autofile.fs.run(run_path) # freq_run_path", "not specify mults in input') sys.exit() if chgs is None: print('Error: user did", "= autofile.io_.read_file(zrxn_file) # zrxns = [automol.reac.from_string(zrxn_str)] # else: # zrxns, _ = _id_reaction(rxn_info)", "user given output') sys.exit() inf_obj = autofile.schema.info_objects.run( job=elstruct.Job.OPTIMIZATION, prog=prog, version='', method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS)", "of species does not match number of mults') sys.exit() if len(flat_ichs) != len(chgs):", "') if len(value) > 1: insert_dct['saddle'] = True reactants, products = value reactants", "forw_bnd_ord_dct = {breaking_bond: 0.9, forming_bond: 0.1} back_bnd_ord_dct = {breaking_bond: 0.1, forming_bond: 0.9} forward_gra", "cinf_obj.nsamp = 1 cnf_fs[1].create([locs[0]]) cnf_fs[0].file.info.write(rinf_obj) cnf_fs[1].file.info.write(cinf_obj, [locs[0]]) hess, freqs, imags = None, None,", "'cc-pvqz' }, 'cc_lvl_df': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvdz-f12' },", "= False return match def locs_match(geo, cnf_fs, locs): match = True rid =", "import _sym_unique from mechroutines.es._routines.conformer import _save_unique_parsed_conformer from mechroutines.es._routines.conformer import _geo_unique from mechroutines.es._routines.conformer import", "output') sys.exit() inf_obj = autofile.schema.info_objects.run( job=elstruct.Job.OPTIMIZATION, prog=prog, version='', method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS) ret =", "is now saved at {}'.format(cnf_fs[-1].path(locs))) else: print( 'the geometry in the output is", "print( 'Error: user did not specify species' + 'with an inchi or smiles", "'gaussian09', 'method': 'b3lyp', 'basis': '6-311g**' }, 'lvl_b3t': { 'orb_res': 'RU', 'program': 'gaussian09', 'method':", "geo is None: print( 'No geometry could be parsed from output' + 'Check", "backward_gra = automol.graph.remove_bonds(backward_gra, back_brk_key) # print('forRXN', automol.graph.string(zrxn_i.forward_ts_graph)) # print('forRXN', automol.graph.string(forward_gra)) # print('bacRXN', automol.graph.string(zrxn_i.backward_ts_graph))", "['rxn_class']: # strip whitespaces form either side of reaction # class but not", "{}'.format(ich)) sys.exit() rxn_muls[0].append(mults[idx]) rxn_chgs[0].append(chgs[idx]) idx += 1 for ich in ichs[1]: mults_allowed =", "print( # 'No hessian found in output, cannot save ' + # 'a", "an allowed multiplicty for inchi {}'.format(ich)) sys.exit() rxn_muls[0].append(mults[idx]) rxn_chgs[0].append(chgs[idx]) idx += 1 for", "elif basis is None: print('Error: user did not specify basis in input') sys.exit()", "print('forRXN', automol.graph.string(forward_gra)) # print('bacRXN', automol.graph.string(zrxn_i.backward_ts_graph)) # print('bacRXN', automol.graph.string(backward_gra)) # if forward_gra == automol.geom.graph(geo,", "matches user specied' + ' {}'.format(prog) + ' and method matches' + '", "as sinfo import elstruct import autorun from mechroutines.es._routines.conformer import _saved_cnf_info from mechroutines.es._routines.conformer import", "insert_dct[keyword] = value else: print( 'ERROR: Keyword {} is not recognized'.format(keyword) + 'script", "sys.exit() keyword, value = line.split(':') if keyword in insert_dct: if 'None' in value:", "'cc-pvdz'}, 'lvl_b2t': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvtz'}, 'lvl_b2q': {", "idx_j in enumerate(zma_keys): # if idx_i in dummies: # remove_idx -= 1 #", "in enumerate(zma_keys): # if idx_i in dummies: # remove_idx -= 1 # else:", "not in mults_allowed: print( 'user specified mult of {}'.format(mults[idx]) + 'is not an", "'cc_lvl_tf': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvtz-f12' }, 'cc_lvl_qf': {", "stereo=True) if ich != geo_ich: print( 'user specified inchi {}'.format(ich) + 'does not", "None, 'output_file': None, 'ts_locs': None, 'ts_mult': None, 'rxn_class': None, 'zrxn_file': None, 'run_path': None,", "input') sys.exit() if ich is None: ich = automol.smiles.inchi(smi) if not automol.inchi.is_complete(ich): ich", "basis = insert_dct['basis'] orb_res = insert_dct['orb_res'] # Get input method from theory dictionary", "print('could not figure out which H is being transfered') sys.exit() return chosen_ts_gra, chosen_oversaturated_atom", "if ich != geo_ich: print( 'user specified inchi {}'.format(ich) + 'does not match", "'program': 'gaussian09', 'method': 'm062x', 'basis': 'cc-pvtz'}, 'lvl_b2d': { 'orb_res': 'RU', 'program': 'gaussian09', 'method':", "in ['rxn_class']: # strip whitespaces form either side of reaction # class but", "None: rid = locs[0] break frag_locs_zma = automol.geom.zmatrix(frag_locs_geo) if automol.zmat.almost_equal( frag_locs_zma, frag_zma, dist_rtol=0.1,", "an inchi or smiles in input') sys.exit() if ich is None: ich =", "for gra in rxn_gras[0]: reactant_keys.append(automol.graph.atom_keys(gra)) product_keys = [] for gra in rxn_gras[1]: product_keys.append(automol.graph.atom_keys(gra))", "zrxn_i in zrxns: # print(zrxns) # sys.exit() # # hess = elstruct.reader.hessian(prog, out_str)", "insert_dct['inchi'] mults = insert_dct['mult'] chgs = insert_dct['charge'] rxn_class = insert_dct['rxn_class'] # zrxn_file =", "filesystem contamination.' + 'Allowed keywords are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit() return insert_dct if", "_id_reaction THEORY_DCT = { 'lvl_wbs': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis':", "= [] chosen_oversaturated_atom = None for rqh in rqhs: ts_gras = automol.geom.connectivity_graph(geo, rqq_bond_max=3.5,", "mod_thy_info) if _geo_unique(geo, ene, saved_geos, saved_enes, zrxn=zrxn): sym_id = _sym_unique( geo, ene, saved_geos,", "sys.exit() return insert_dct if __name__ == '__main__': SCRIPT_INPUT_FILE = 'insert_options.txt' insert_dct = parse_script_input(SCRIPT_INPUT_FILE)", "{ 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvdz'}, 'cc_lvl_t': { 'orb_res': 'RR',", "{ 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvtz'}, 'lvl_b2q': { 'orb_res': 'RU',", "contain \"!\"' + 'Key format should be:\\n' + '<Keyword>: <Value>\\n' + 'Allowed keywords", "= { 'lvl_wbs': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis': '6-31g*' },", "= cnsampd else: cinf_obj = autofile.schema.info_objects.conformer_branch(0) cinf_obj.nsamp = 1 cnf_fs[1].create([locs[0]]) cnf_fs[0].file.info.write(rinf_obj) cnf_fs[1].file.info.write(cinf_obj, [locs[0]])", ") sys.exit() keyword, value = line.split(':') if keyword in insert_dct: if 'None' in", "enumerate(value): value[i] = val.replace(' ', '') value = ' '.join(value) elif keyword not", "= value[0].replace(' ', '') print(keyword, value) insert_dct[keyword] = value else: print( 'ERROR: Keyword", "True if reactant_match and product_match: reactant_keys = [] for gra in rxn_gras[0]: reactant_keys.append(automol.graph.atom_keys(gra))", "'user specified mult of {}'.format(mul) + 'is not an allowed multiplicty for inchi", "def choose_cutoff_distance(geo): rqhs = [x * 0.1 for x in range(26, 38, 2)]", "else: _save_unique_parsed_conformer( mod_thy_info, cnf_fs, locs, (geo, zma, ene), inf_obj, inp_str, zrxn=zrxn, zma_locs=(0,)) print(", "automol.reac.ts_zmatrix( std_rxn, geo) std_zrxn = automol.reac.relabel_for_zmatrix( std_rxn, zma_keys, dummy_key_dct) rxn_info = (ts_ichs, *rxn_info[1:])", "hessian found in output, cannot save ' + # 'a transition state without", "{}'.format(ich) + 'does not match inchi from output {}'.format(geo_ich) + 'which is based", "in rxn_gras[0]: reactant_keys.append(automol.graph.atom_keys(gra)) product_keys = [] for gra in rxn_gras[1]: product_keys.append(automol.graph.atom_keys(gra)) std_rxn =", "from mechroutines.es._routines.conformer import _sym_unique from mechroutines.es._routines.conformer import _save_unique_parsed_conformer from mechroutines.es._routines.conformer import _geo_unique from", "ts_smis[0] = ts_smis[0][::-1] reactant_match = True if reactant_match: if ts_smis[1] == rxn_smis[1]: product_match", "= automol.graph.remove_bonds(forward_gra, forw_brk_key) # backward_gra = automol.graph.remove_bonds(backward_gra, back_brk_key) # print('forRXN', automol.graph.string(zrxn_i.forward_ts_graph)) # print('forRXN',", "'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvdz'}, 'lvl_b2t': { 'orb_res': 'RU', 'program':", "True else: ts_ichs = ts_ichs[::-1] ts_smis = ts_smis[::-1] ts_gras = ts_gras[::-1] rxn_gras =", "= None return ( (spc_fs, thy_fs, cnf_fs), (spc_prefix, thy_prefix, cnf_prefix)) def create_reaction_filesystems( prefix,", "'basis': '6-311g**' }, 'lvl_b3t': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis': 'cc-pvtz'},", "in cnf_fs[-1].existing(): current_rid, _ = locs if current_rid in checked_rids: continue if cnf_fs[-1].file.geometry.exists(locs):", "automol.graph.set_stereo_from_geometry(ts_gras, geo) ts_gras = automol.graph.connected_components(ts_gras) if len(ts_gras) != 1: continue for ts_gra_i in", "else: print( 'The reactants and products found for the transition state' + 'did", "prog, method, basis, _ = thy_info ene = elstruct.reader.energy(prog, method, out_str) geo =", "not specify a theory {}'.format(theory) + ' that is in the THEORY_DCT' +", "None: hess_inf_obj = autofile.schema.info_objects.run( job=elstruct.Job.HESSIAN, prog=prog, version='', method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS) hess_ret = (hess_inf_obj,", "def rng_loc_for_geo(geo, cnf_fs): rid = None frag_geo = _fragment_ring_geo(geo) if frag_geo is not", "return rid def parse_script_input(script_input_file): script_input = autofile.io_.read_file(script_input_file).splitlines() insert_dct = { 'save_filesystem': None, 'smiles':", "autofile.fs.conformer(thy_prefix) if locs is not None: cnf_fs[-1].create(locs) cnf_prefix = cnf_fs[-1].path(locs) else: cnf_prefix =", "rinfo.sort(rxn_info, scheme='autofile') rxn_fs[-1].create(sort_rxn_info) rxn_prefix = rxn_fs[-1].path(sort_rxn_info) # theory filesystem thy_fs = autofile.fs.theory(rxn_prefix) thy_fs[-1].create(mod_thy_info[1:])", "sys.exit() if ich is None: ich = automol.smiles.inchi(smi) if not automol.inchi.is_complete(ich): ich =", "{'runlvl_cnf_fs': (cnf_fs, None)}, locs, zma_locs=(0,), zma=zma) else: _save_unique_parsed_conformer( mod_thy_info, cnf_fs, locs, (geo, zma,", "def parse_user_theory(insert_dct): # Get input method explicitly inputted program = insert_dct['program'] method =", "= automol.graph.atom_unsaturated_valences(ts_gra_i, bond_order=True) oversaturated_atoms = [atm for atm, val in vals.items() if val", "in dummy_key_dct.keys(): # add_idx = 1 # for dumm_j in dummies: # if", "if chg is None: print('Error: user did not specify charge in input') sys.exit()", "breaking_bond, forming_bond = bonds # when we move on to other reaction types", "thy_fs, ts_fs, cnf_fs), (rxn_prefix, thy_prefix, ts_prefix, cnf_prefix)) def read_user_file(dct, keyword): if dct[keyword] is", "oversaturated_atom = choose_cutoff_distance(geo) atoms_bnd = automol.graph.atoms_bond_keys(ts_gra) bonds = atoms_bnd[oversaturated_atom] if len(bonds) != 2:", "choose_cutoff_distance(geo) atoms_bnd = automol.graph.atoms_bond_keys(ts_gra) bonds = atoms_bnd[oversaturated_atom] if len(bonds) != 2: print('too many", "else: if theory in THEORY_DCT: thy_info = tinfo.from_dct(THEORY_DCT[theory]) else: print( 'Error: user did", "THEORY_DCT: thy_info = tinfo.from_dct(THEORY_DCT[theory]) else: print( 'Error: user did not specify a theory", "mults_allowed: print( 'user specified mult of {}'.format(mul) + 'is not an allowed multiplicty", "= cnf_fs[1].file.info.read(locs[0]) cnsampd = cinf_obj.nsamp cnsampd += 1 cinf_obj.nsamp = cnsampd else: cinf_obj", "chg, mult) def parse_user_reaction(insert_dct): smis = insert_dct['smiles'] ichs = insert_dct['inchi'] mults = insert_dct['mult']", "forward_gra = automol.graph.add_bonds(forward_gra, forw_form_key) # backward_gra = automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds( #", "_sym_unique from mechroutines.es._routines.conformer import _save_unique_parsed_conformer from mechroutines.es._routines.conformer import _geo_unique from mechroutines.es._routines.conformer import _fragment_ring_geo", "= autofile.schema.info_objects.run( job=elstruct.Job.OPTIMIZATION, prog=prog, version='', method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS) ret = (inf_obj, inp_str, out_str)", "[forward_gra, backward_gra] rxn_gras = [reactant_gras, product_gras] rxn_smis = [[], []] for i, side", "'ccsd(t)', 'basis': 'cc-pvtz'}, 'cc_lvl_q': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvqz'", "user specied' + ' {}'.format(prog) + ' and method matches' + ' {}'.format(method))", "# if run_path is None: # run_path = os.getcwd() # run_fs = autofile.fs.run(run_path)", "1 for ich in ichs[1]: mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) if mults[idx] not", "'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvdz-f12' }, 'cc_lvl_tf': { 'orb_res': 'RR', 'program': 'molpro2015', 'method':", "+ '... not saving') def species_match(geo, spc_info): match = True ich, _, mul", "None, 'cid': None, 'theory': None, 'program': None, 'method': None, 'basis': None, 'orb_res': None,", "if geo_rid != rid: print( 'Error: rid mismatch for the filesystem at' +", "elif ts_smis[1][::-1] == rxn_smis[-1]: ts_ichs[1] = ts_ichs[1][::-1] ts_smis[1] = ts_smis[1][::-1] product_match = True", "'molpro2015', 'method': 'caspt2', 'basis': 'cc-pvtz'}} def parse_user_locs(insert_dct): rid = insert_dct['rid'] cid = insert_dct['cid']", "in ichs[1]: mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) if mults[idx] not in mults_allowed: print(", "automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) geo_ich = automol.geom.inchi(geo, stereo=True) if ich != geo_ich: print( 'user", "ts_mult)) ts_info = rinfo.ts_info(rxn_info) # if zrxn_file is not None: # zrxn_str =", "ene, saved_geos, saved_enes) if sym_id is None: if cnf_fs[0].file.info.exists(): rinf_obj = cnf_fs[0].file.info.read() else:", "in line[0]: continue line = line.split('!')[0] if ':' not in line: print( 'ERROR:", "in vals.items() if val < 0] if len(oversaturated_atoms) == 1: chosen_ts_gra = ts_gras[0]", "value) insert_dct[keyword] = value else: print( 'ERROR: Keyword {} is not recognized'.format(keyword) +", "'script will exit until input is resolved to avoid' + ' filesystem contamination.'", "'output_file': None, 'ts_locs': None, 'ts_mult': None, 'rxn_class': None, 'zrxn_file': None, 'run_path': None, 'saddle':", "back_bnd_ord_dct) reactant_gras = automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(forward_gra)) reactant_gras = automol.graph.connected_components(reactant_gras) product_gras = automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(backward_gra)) product_gras", "(rxn_fs, thy_fs, ts_fs, cnf_fs), (rxn_prefix, thy_prefix, ts_prefix, cnf_prefix)) def read_user_file(dct, keyword): if dct[keyword]", "# zma, _, _ = automol.reac.ts_zmatrix(zrxn, geo) # if zrxn is None: #", "0] if len(oversaturated_atoms) == 1: chosen_ts_gra = ts_gras[0] chosen_oversaturated_atom = oversaturated_atoms[0] break if", "enumerate(script_input): if len(line) < 2: continue elif '!' in line[0]: continue line =", "None: print('Error: user did not specify charges in input') sys.exit() flat_ichs = sum(ichs,", "in value: value = None elif keyword in ['mult', 'charge', 'ts_mult']: values =", "'program': 'gaussian09', 'method': 'm062x', 'basis': '6-31g*' }, 'lvl_m06m': { 'orb_res': 'RU', 'program': 'gaussian09',", "ts_fs = autofile.fs.transition_state(thy_prefix) ts_fs[-1].create(ts_locs) ts_prefix = ts_fs[-1].path(ts_locs) # conformer cnf_fs = autofile.fs.conformer(ts_prefix) if", "if len(values) == 1: value = values[0] else: value = values elif keyword", "insert_dct['cid'] if rid is None: rid = autofile.schema.generate_new_ring_id() if cid is None: cid", "ts_ichs = [[], []] for rgra in reactant_gras: try: rich = automol.graph.inchi(rgra, stereo=True)", "smi in smis[1]: ichs[1].append(automol.smiles.inchi(smi)) for idx, ich in enumerate(ichs[0]): if not automol.inchi.is_complete(ich): ich", "filesystem print('rxn_info', rxn_info) rxn_fs = autofile.fs.reaction(prefix) sort_rxn_info = rinfo.sort(rxn_info, scheme='autofile') rxn_fs[-1].create(sort_rxn_info) rxn_prefix =", "[x * 0.1 for x in range(26, 38, 2)] chosen_ts_gra = [] chosen_oversaturated_atom", "for rgra in reactant_gras: try: rich = automol.graph.inchi(rgra, stereo=True) except IndexError: rich =", "False return match def rng_loc_for_geo(geo, cnf_fs): rid = None frag_geo = _fragment_ring_geo(geo) if", "+ ' filesystem contamination.' + 'Allowed keywords are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit() return", "ich if mults is None: print('Error: user did not specify mults in input')", "is None: if program is None: print('Error: user did not specify program in", "fs_array, prefix_array = create_species_filesystems( prefix, spc_info, mod_thy_info, locs=None) else: fs_array, prefix_array = create_reaction_filesystems(", "imags, _ = autorun.projrot.frequencies( # script_str, freq_run_path, [geo], [[]], [hess]) # if len(imags)", "# dummies = [] # for dummy in dummy_key_dct.keys(): # add_idx = 1", "return match def rng_loc_for_geo(geo, cnf_fs): rid = None frag_geo = _fragment_ring_geo(geo) if frag_geo", "'is not an allowed multiplicty for inchi {}'.format(ich)) sys.exit() rxn_muls[1].append(mults[idx]) rxn_chgs[1].append(chgs[idx]) idx +=", "print( 'ERROR: No save_filesystem}' + 'Script will exit') sys.exit() return dct['save_filesystem'] def choose_cutoff_distance(geo):", "'cc_lvl_q': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvqz' }, 'cc_lvl_df': {", "parse_script_input(script_input_file): script_input = autofile.io_.read_file(script_input_file).splitlines() insert_dct = { 'save_filesystem': None, 'smiles': None, 'inchi': None,", "keywords are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit() return insert_dct if __name__ == '__main__': SCRIPT_INPUT_FILE", "{ 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis': '6-31+g*' }, 'lvl_wbt': { 'orb_res':", "geo_rid != rid: print( 'Error: rid mismatch for the filesystem at' + '", "None, 'rxn_class': None, 'zrxn_file': None, 'run_path': None, 'saddle': False, } for i, line", "'input_file') out_str = read_user_file(insert_dct, 'output_file') # parse method from insert input file thy_info", "(rxn_prefix, thy_prefix, ts_prefix, cnf_prefix)) def read_user_file(dct, keyword): if dct[keyword] is None: print( 'ERROR:", "Get input method explicitly inputted program = insert_dct['program'] method = insert_dct['method'] basis =", "user input') sys.exit() return std_zrxn, ts_zma, ts_geo, rxn_info def main(insert_dct): prefix = read_user_filesystem(insert_dct)", "if hess is not None and zrxn is not None: hess_inf_obj = autofile.schema.info_objects.run(", "+ 'please add it to the dct in the script or use program/method/basis/orb_dct'", "insert_dct['saddle']: rxn_info, spc_info, rxn_class = parse_user_reaction(insert_dct) zrxn, zma, geo, rxn_info = get_zrxn(geo, rxn_info,", "idx, ich in enumerate(ichs[1]): if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) ichs[1][idx] = ich", "until user specified' + ' info matches the info in user given output')", "{} is not recognized'.format(keyword) + 'script will exit until inpupt is resolved to", "product_keys = [] for gra in rxn_gras[1]: product_keys.append(automol.graph.atom_keys(gra)) std_rxn = automol.reac.Reaction( rxn_class, *ts_gras,", "+ '{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit() keyword, value = line.split(':') if keyword in insert_dct: if", "locs if current_rid in checked_rids: continue if cnf_fs[-1].file.geometry.exists(locs): checked_rids.append(current_rid) locs_geo = cnf_fs[-1].file.geometry.read(locs) frag_locs_geo", "rev=True) # forward_gra = automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds( # zrxn_i.forward_ts_graph))) # forward_gra", "automol.inchi.smiles(rich) ts_ichs[0].append(rich) ts_smis[0].append(rsmi) for pgra in product_gras: try: pich = automol.graph.inchi(pgra, stereo=True) except", "locs): print( 'I refuse to save this geometry until user specified' + '", "is None: if cnf_fs[0].file.info.exists(): rinf_obj = cnf_fs[0].file.info.read() else: rinf_obj = autofile.schema.info_objects.conformer_trunk(0) rinf_obj.nsamp =", "import os import autofile import automol from mechanalyzer.inf import thy as tinfo from", "match number of charges') sys.exit() idx = 0 rxn_muls = [[], []] rxn_chgs", "in ts_gras: vals = automol.graph.atom_unsaturated_valences(ts_gra_i, bond_order=True) oversaturated_atoms = [atm for atm, val in", "ts_fs[-1].create(ts_locs) ts_prefix = ts_fs[-1].path(ts_locs) # conformer cnf_fs = autofile.fs.conformer(ts_prefix) if locs is not", "= None, None, None if hess is not None and zrxn is not", "_save_unique_parsed_conformer from mechroutines.es._routines.conformer import _geo_unique from mechroutines.es._routines.conformer import _fragment_ring_geo from mechroutines.es._routines._sadpt import save_saddle_point", "None # If hess is None: # print( # 'No hessian found in", "input') sys.exit() elif orb_res is None: print('Error: user did not specify orb_res in", "in output, cannot save ' + # 'a transition state without a hessian')", "def parse_script_input(script_input_file): script_input = autofile.io_.read_file(script_input_file).splitlines() insert_dct = { 'save_filesystem': None, 'smiles': None, 'inchi':", "spc_fs = autofile.fs.species(prefix) spc_fs[-1].create(spc_info) spc_prefix = spc_fs[-1].path(spc_info) # theory filesystem thy_fs = autofile.fs.theory(spc_prefix)", "ts_info, rxn_class def parse_user_theory(insert_dct): # Get input method explicitly inputted program = insert_dct['program']", "from mechanalyzer.inf import spc as sinfo import elstruct import autorun from mechroutines.es._routines.conformer import", "= insert_dct['basis'] orb_res = insert_dct['orb_res'] # Get input method from theory dictionary theory", "'method': 'b3lyp', 'basis': '6-31g*' }, 'lvl_b3mg': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp',", "parsed from output' + 'Check that the program matches user specied' + '", "'Can only save a transition state that has a single' + # 'imaginary", "No filename is specified for {}'.format(keyword) + 'Script will exit') sys.exit() file_name =", "reactant_keys, product_keys) ts_zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix( std_rxn, geo) std_zrxn = automol.reac.relabel_for_zmatrix( std_rxn,", "side in enumerate(rxn_info[0]): for ich in side: rxn_smis[i].append(automol.inchi.smiles(ich)) ts_smis = [[], []] ts_ichs", "automol.geom.graph(geo, stereo=False): # zrxn = zrxn_i # zma, _, _ = automol.reac.ts_zmatrix(zrxn, geo)", "not saving') def species_match(geo, spc_info): match = True ich, _, mul = spc_info", "= sum(ichs, []) if len(flat_ichs) != len(mults): print( 'Error: number of species does", "'saddle': False, } for i, line in enumerate(script_input): if len(line) < 2: continue", "hess, freqs, imags = None, None, None if hess is not None and", "ts_ichs[0][::-1] ts_smis[0] = ts_smis[0][::-1] reactant_match = True else: ts_ichs = ts_ichs[::-1] ts_smis =", "'program': 'gaussian09', 'method': 'wb97xd', 'basis': 'cc-pvtz'}, 'lvl_m06s': { 'orb_res': 'RU', 'program': 'gaussian09', 'method':", "rqh in rqhs: ts_gras = automol.geom.connectivity_graph(geo, rqq_bond_max=3.5, rqh_bond_max=rqh, rhh_bond_max=2.3) ts_gras = automol.graph.set_stereo_from_geometry(ts_gras, geo)", "in input') sys.exit() return sinfo.from_data(ich, chg, mult) def parse_user_reaction(insert_dct): smis = insert_dct['smiles'] ichs", "insert_dct['smiles'] ichs = insert_dct['inchi'] mults = insert_dct['mult'] chgs = insert_dct['charge'] rxn_class = insert_dct['rxn_class']", "stereo=False)) geo_ich = automol.geom.inchi(geo, stereo=True) if ich != geo_ich: print( 'user specified inchi", "if geo_rid is not None: if geo_rid != rid: print( 'Error: rid mismatch", "user specified' + ' info matches the info in user given output') sys.exit()", "rng_loc_for_geo(geo, cnf_fs) if geo_rid is not None: if geo_rid != rid: print( 'Error:", "thy_fs[-1].path(mod_thy_info[1:]) # conformer cnf_fs = autofile.fs.conformer(thy_prefix) if locs is not None: cnf_fs[-1].create(locs) cnf_prefix", "to figure out') print('I promise i will be smarter in the future') sys.exit()", "ene = elstruct.reader.energy(prog, method, out_str) geo = elstruct.reader.opt_geometry(prog, out_str) if geo is None:", "ts_gras[::-1] rxn_gras = rxn_gras[::-1] if ts_smis[0] == rxn_smis[0]: reactant_match = True elif ts_smis[0][::-1]", "psmi = automol.inchi.smiles(pich) ts_ichs[1].append(pich) ts_smis[1].append(psmi) reactant_match = False product_match = False if ts_smis[0]", "save this geometry until user specified' + ' info matches the info in", "does not match number of charges') sys.exit() idx = 0 rxn_muls = [[],", "rxn_info, rxn_class): ts_gra, oversaturated_atom = choose_cutoff_distance(geo) atoms_bnd = automol.graph.atoms_bond_keys(ts_gra) bonds = atoms_bnd[oversaturated_atom] if", "True if reactant_match: if ts_smis[1] == rxn_smis[1]: product_match = True elif ts_smis[1][::-1] ==", "def species_match(geo, spc_info): match = True ich, _, mul = spc_info mults_allowed =", "run_path = os.getcwd() # run_fs = autofile.fs.run(run_path) # freq_run_path = run_fs[-1].path(['hessian']) # run_fs[-1].create(['hessian'])", "val < 0] if len(oversaturated_atoms) == 1: chosen_ts_gra = ts_gras[0] chosen_oversaturated_atom = oversaturated_atoms[0]", "product_match: reactant_keys = [] for gra in rxn_gras[0]: reactant_keys.append(automol.graph.atom_keys(gra)) product_keys = [] for", "of reaction # class but not in between words value = value.split() for", "spc as sinfo import elstruct import autorun from mechroutines.es._routines.conformer import _saved_cnf_info from mechroutines.es._routines.conformer", "else: value = value.split(' = ') if len(value) > 1: insert_dct['saddle'] = True", "automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) ichs[0][idx] = ich for idx, ich in enumerate(ichs[1]): if", "{ 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis': 'cc-pvtz'}, 'lvl_m06s': { 'orb_res': 'RU',", "is None: print( 'No geometry could be parsed from output' + 'Check that", "if len(flat_ichs) != len(chgs): print( 'Error: number of species does not match number", "values.append(int(val)) if len(values) == 1: value = values[0] else: value = values elif", "elstruct.reader.opt_geometry(prog, out_str) if geo is None: print( 'No geometry could be parsed from", "# back_form_key = automol.reac.forming_bond_keys(zrxn_i, rev=True) # forw_brk_key = automol.reac.breaking_bond_keys(zrxn_i) # back_brk_key = automol.reac.breaking_bond_keys(zrxn_i,", "print('Error: user did not specify basis in input') sys.exit() elif orb_res is None:", "'cc-pvtz-f12' }, 'cc_lvl_qf': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvqz-f12' },", "automol.graph.without_fractional_bonds(forward_gra)) reactant_gras = automol.graph.connected_components(reactant_gras) product_gras = automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(backward_gra)) product_gras = automol.graph.connected_components(product_gras) ts_gras =", "automol.reac.Reaction( rxn_class, *ts_gras, reactant_keys, product_keys) ts_zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix( std_rxn, geo) std_zrxn", "= insert_dct['smiles'] ich = insert_dct['inchi'] mult = insert_dct['mult'] chg = insert_dct['charge'] if ich", "dummy_key_dct = automol.reac.ts_zmatrix( std_rxn, geo) std_zrxn = automol.reac.relabel_for_zmatrix( std_rxn, zma_keys, dummy_key_dct) rxn_info =", "lines should contain \"!\"' + 'Key format should be:\\n' + '<Keyword>: <Value>\\n' +", "inchi {}'.format(ich) + 'does not match inchi from output {}'.format(geo_ich) + 'which is", "= rxn_fs[-1].path(sort_rxn_info) # theory filesystem thy_fs = autofile.fs.theory(rxn_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix = thy_fs[-1].path(mod_thy_info[1:]) if", "None: if geo_rid != rid: print( 'Error: rid mismatch for the filesystem at'", "user did not specify charges in input') sys.exit() flat_ichs = sum(ichs, []) if", "location matches geo information if not insert_dct['saddle']: if not species_match(geo, spc_info): print( 'I", "in the future') sys.exit() breaking_bond, forming_bond = bonds # when we move on", "automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(backward_gra)) product_gras = automol.graph.connected_components(product_gras) ts_gras = [forward_gra, backward_gra] rxn_gras = [reactant_gras, product_gras]", "for this geo is {}'.format(geo_rid) + '\\nthe user rid in input file is", "continue if cnf_fs[-1].file.geometry.exists(locs): checked_rids.append(current_rid) locs_geo = cnf_fs[-1].file.geometry.read(locs) frag_locs_geo = _fragment_ring_geo(locs_geo) if frag_locs_geo is", "automol.graph.add_bonds(backward_gra, back_form_key) # if zrxn_i.class_ == 'hydrogen abstraction': # forward_gra = automol.graph.remove_bonds(forward_gra, forw_brk_key)", "if len(value) > 1: insert_dct['saddle'] = True reactants, products = value reactants =", "'No geometry could be parsed from output' + 'Check that the program matches", "method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS) hess_ret = (hess_inf_obj, inp_str, out_str) save_saddle_point( zrxn, ret, hess_ret, freqs,", "> 1: insert_dct['saddle'] = True reactants, products = value reactants = reactants.split(' +", "reaction types we have to check for double # bonds when doing bond", "exit') sys.exit() file_name = dct[keyword] return autofile.io_.read_file(file_name) def read_user_filesystem(dct): if dct['save_filesystem'] is None:", "[] chosen_oversaturated_atom = None for rqh in rqhs: ts_gras = automol.geom.connectivity_graph(geo, rqq_bond_max=3.5, rqh_bond_max=rqh,", "= autofile.fs.theory(spc_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix = thy_fs[-1].path(mod_thy_info[1:]) # conformer cnf_fs = autofile.fs.conformer(thy_prefix) if locs", "0.9, forming_bond: 0.1} back_bnd_ord_dct = {breaking_bond: 0.1, forming_bond: 0.9} forward_gra = automol.graph.set_bond_orders(ts_gra, forw_bnd_ord_dct)", "= locs[0] break frag_locs_zma = automol.geom.zmatrix(frag_locs_geo) if automol.zmat.almost_equal( frag_locs_zma, frag_zma, dist_rtol=0.1, ang_atol=.4): rid", "std_zrxn = automol.reac.relabel_for_zmatrix( std_rxn, zma_keys, dummy_key_dct) rxn_info = (ts_ichs, *rxn_info[1:]) ts_geo = automol.zmat.geometry(ts_zma)", "not specify method in input') sys.exit() elif basis is None: print('Error: user did", "= elstruct.reader.hessian(prog, out_str) # Hess = None # If hess is None: #", "+ add_idx) # remove_idx = 0 # for idx_i, idx_j in enumerate(zma_keys): #", "# Get input method explicitly inputted program = insert_dct['program'] method = insert_dct['method'] basis", "_, _ = automol.reac.ts_zmatrix(zrxn, geo) # if zrxn is None: # print( #", "print( 'Error: number of species does not match number of mults') sys.exit() if", "file thy_info = parse_user_theory(insert_dct) # parse out geo information first, to make sure", "# Check that the rid/cid info matches the filesystem fs_array, prefix_array = create_species_filesystems(", "product_keys.append(automol.graph.atom_keys(gra)) std_rxn = automol.reac.Reaction( rxn_class, *ts_gras, reactant_keys, product_keys) ts_zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix(", "of theory') sys.exit() return thy_info def create_species_filesystems(prefix, spc_info, mod_thy_info, locs=None): # species filesystem", "locs, (geo, zma, ene), inf_obj, inp_str, zrxn=zrxn, zma_locs=(0,)) print( 'geometry is now saved", "import sys import os import autofile import automol from mechanalyzer.inf import thy as", "_geo_unique(geo, ene, saved_geos, saved_enes, zrxn=zrxn): sym_id = _sym_unique( geo, ene, saved_geos, saved_enes) if", "side of reaction # class but not in between words value = value.split()", "is being transfered') sys.exit() return chosen_ts_gra, chosen_oversaturated_atom def get_zrxn(geo, rxn_info, rxn_class): ts_gra, oversaturated_atom", "thy as tinfo from mechanalyzer.inf import rxn as rinfo from mechanalyzer.inf import spc", "in range(26, 38, 2)] chosen_ts_gra = [] chosen_oversaturated_atom = None for rqh in", "# forward_gra = automol.graph.add_bonds(forward_gra, forw_form_key) # backward_gra = automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds(", "return insert_dct if __name__ == '__main__': SCRIPT_INPUT_FILE = 'insert_options.txt' insert_dct = parse_script_input(SCRIPT_INPUT_FILE) main(insert_dct)", "types we have to check for double # bonds when doing bond orders", "cnsampd else: cinf_obj = autofile.schema.info_objects.conformer_branch(0) cinf_obj.nsamp = 1 cnf_fs[1].create([locs[0]]) cnf_fs[0].file.info.write(rinf_obj) cnf_fs[1].file.info.write(cinf_obj, [locs[0]]) hess,", "'cc-pvqz' }, 'lvl_b3s': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis': '6-31g*' },", "= autofile.schema.info_objects.conformer_branch(0) cinf_obj.nsamp = 1 cnf_fs[1].create([locs[0]]) cnf_fs[0].file.info.write(rinf_obj) cnf_fs[1].file.info.write(cinf_obj, [locs[0]]) hess, freqs, imags =", "for inchi {}'.format(ich)) sys.exit() rxn_muls[0].append(mults[idx]) rxn_chgs[0].append(chgs[idx]) idx += 1 for ich in ichs[1]:", "cinf_obj.nsamp cnsampd += 1 cinf_obj.nsamp = cnsampd else: cinf_obj = autofile.schema.info_objects.conformer_branch(0) cinf_obj.nsamp =", "reactants, products = value reactants = reactants.split(' + ') products = products.split(' +", "if cnf_fs[-1].file.geometry.exists(locs): checked_rids.append(current_rid) locs_geo = cnf_fs[-1].file.geometry.read(locs) frag_locs_geo = _fragment_ring_geo(locs_geo) if frag_locs_geo is None:", "= ich if mults is None: print('Error: user did not specify mults in", "'charge': None, 'rid': None, 'cid': None, 'theory': None, 'program': None, 'method': None, 'basis':", "elif orb_res is None: print('Error: user did not specify orb_res in input') sys.exit()", "conformer cnf_fs = autofile.fs.conformer(ts_prefix) if locs is not None: cnf_fs[-1].create(locs) cnf_prefix = cnf_fs[-1].path(locs)", "+ ') products = products.split(' + ') values = [[], []] for reactant", "None: print( 'ERROR: No save_filesystem}' + 'Script will exit') sys.exit() return dct['save_filesystem'] def", "# user save specifications match output prog, method, basis, _ = thy_info ene", "{ 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvdz'}, 'lvl_b2t': { 'orb_res': 'RU',", "method = insert_dct['method'] basis = insert_dct['basis'] orb_res = insert_dct['orb_res'] # Get input method", "we # Are inserting into the filesystem inp_str = read_user_file(insert_dct, 'input_file') out_str =", "insert_dct = { 'save_filesystem': None, 'smiles': None, 'inchi': None, 'mult': None, 'charge': None,", "rxn_class) # for zrxn_i in zrxns: # forw_form_key = automol.reac.forming_bond_keys(zrxn_i) # back_form_key =", "_ = _id_reaction(rxn_info) if rxn_class is None: print( 'Error: user did not specify", "sym_id is None: if cnf_fs[0].file.info.exists(): rinf_obj = cnf_fs[0].file.info.read() else: rinf_obj = autofile.schema.info_objects.conformer_trunk(0) rinf_obj.nsamp", "checked_rids.append(current_rid) locs_geo = cnf_fs[-1].file.geometry.read(locs) frag_locs_geo = _fragment_ring_geo(locs_geo) if frag_locs_geo is None: rid =", "locs[0] geo_rid = rng_loc_for_geo(geo, cnf_fs) if geo_rid is not None: if geo_rid !=", "automol.geom.connectivity_graph(geo, rqq_bond_max=3.5, rqh_bond_max=rqh, rhh_bond_max=2.3) ts_gras = automol.graph.set_stereo_from_geometry(ts_gras, geo) ts_gras = automol.graph.connected_components(ts_gras) if len(ts_gras)", "will exit until inpupt is resolved to avoid' + ' filesystem contamination.' +", "= ') if len(value) > 1: insert_dct['saddle'] = True reactants, products = value", "elif keyword in ['mult', 'charge', 'ts_mult']: values = [] for val in value.split(','):", "None: ts_locs = (0,) ts_fs = autofile.fs.transition_state(thy_prefix) ts_fs[-1].create(ts_locs) ts_prefix = ts_fs[-1].path(ts_locs) # conformer", "being transfered') sys.exit() return chosen_ts_gra, chosen_oversaturated_atom def get_zrxn(geo, rxn_info, rxn_class): ts_gra, oversaturated_atom =", "'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvdz-f12' }, 'cc_lvl_tf': { 'orb_res': 'RR', 'program':", "'Script will exit') sys.exit() file_name = dct[keyword] return autofile.io_.read_file(file_name) def read_user_filesystem(dct): if dct['save_filesystem']", "# 'Your geometry did not match any of the attempted ' + #", "not match any of the attempted ' + # 'zrxns, which are the", "from mechanalyzer.inf import rxn as rinfo from mechanalyzer.inf import spc as sinfo import", "if len(flat_ichs) != len(mults): print( 'Error: number of species does not match number", "# run_fs = autofile.fs.run(run_path) # freq_run_path = run_fs[-1].path(['hessian']) # run_fs[-1].create(['hessian']) # script_str =", "future') sys.exit() breaking_bond, forming_bond = bonds # when we move on to other", "geo_ich: print( 'user specified inchi {}'.format(ich) + 'does not match inchi from output", "'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis': '6-31g*' }, 'lvl_wbm': { 'orb_res': 'RU',", "ts_locs = (0,) ts_fs = autofile.fs.transition_state(thy_prefix) ts_fs[-1].create(ts_locs) ts_prefix = ts_fs[-1].path(ts_locs) # conformer cnf_fs", "chosen_oversaturated_atom def get_zrxn(geo, rxn_info, rxn_class): ts_gra, oversaturated_atom = choose_cutoff_distance(geo) atoms_bnd = automol.graph.atoms_bond_keys(ts_gra) bonds", "gra in rxn_gras[1]: product_keys.append(automol.graph.atom_keys(gra)) std_rxn = automol.reac.Reaction( rxn_class, *ts_gras, reactant_keys, product_keys) ts_zma, zma_keys,", "exit until inpupt is resolved to avoid' + ' filesystem contamination.' + 'Allowed", "# class but not in between words value = value.split() for i, val", "locs=None): # species filesystem print('rxn_info', rxn_info) rxn_fs = autofile.fs.reaction(prefix) sort_rxn_info = rinfo.sort(rxn_info, scheme='autofile')", "print( 'geometry is now saved at {}'.format(cnf_fs[-1].path(locs))) else: print( 'the geometry in the", "'cc_lvl_qf': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvqz-f12' }, 'mlvl_cas_dz': {", "any of the attempted ' + # 'zrxns, which are the following') #", "[]] for smi in smis[0]: ichs[0].append(automol.smiles.inchi(smi)) for smi in smis[1]: ichs[1].append(automol.smiles.inchi(smi)) for idx,", "is None: print('Error: user did not specify orb_res in input') sys.exit() else: thy_info", "mults is None: print('Error: user did not specify mults in input') sys.exit() if", "not specify orb_res in input') sys.exit() else: thy_info = (program, method, basis, orb_res)", "(0,) ts_fs = autofile.fs.transition_state(thy_prefix) ts_fs[-1].create(ts_locs) ts_prefix = ts_fs[-1].path(ts_locs) # conformer cnf_fs = autofile.fs.conformer(ts_prefix)", "'lvl_wbm': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis': '6-31+g*' }, 'lvl_wbt': {", "method, basis, orb_res) else: if theory in THEORY_DCT: thy_info = tinfo.from_dct(THEORY_DCT[theory]) else: print(", "value = value[0].replace(' ', '') print(keyword, value) insert_dct[keyword] = value else: print( 'ERROR:", "rxn_class is None: print( 'Error: user did not specify rxn_class') sys.exit() return rxn_info,", "= 1 cnf_fs[1].create([locs[0]]) cnf_fs[0].file.info.write(rinf_obj) cnf_fs[1].file.info.write(cinf_obj, [locs[0]]) hess, freqs, imags = None, None, None", "mult = insert_dct['mult'] chg = insert_dct['charge'] if ich is None and smi is", "from output {}'.format(geo_ich) + 'which is based on geometry from output:\\n' + '{}'.format(automol.geom.string(geo)))", "# theory filesystem thy_fs = autofile.fs.theory(spc_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix = thy_fs[-1].path(mod_thy_info[1:]) # conformer cnf_fs", "dist_rtol=0.1, ang_atol=.4): rid = locs[0] break return rid def parse_script_input(script_input_file): script_input = autofile.io_.read_file(script_input_file).splitlines()", "'{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit() return insert_dct if __name__ == '__main__': SCRIPT_INPUT_FILE = 'insert_options.txt' insert_dct", "freqs, imags = None, None, None if hess is not None and zrxn", "= automol.graph.connected_components(ts_gras) if len(ts_gras) != 1: continue for ts_gra_i in ts_gras: vals =", "else: print( 'Error: user did not specify a theory {}'.format(theory) + ' that", "is not None and zrxn is not None: hess_inf_obj = autofile.schema.info_objects.run( job=elstruct.Job.HESSIAN, prog=prog,", "hess_ret, freqs, imags, mod_thy_info, {'runlvl_cnf_fs': (cnf_fs, None)}, locs, zma_locs=(0,), zma=zma) else: _save_unique_parsed_conformer( mod_thy_info,", "'basis': 'cc-pvqz-f12' }, 'mlvl_cas_dz': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'caspt2', 'basis': 'cc-pvdz'},", "# script_str = autorun.SCRIPT_DCT['projrot'] # freqs, _, imags, _ = autorun.projrot.frequencies( # script_str,", "= automol.reac.reverse(zrxn_i) # zma, _, _ = automol.reac.ts_zmatrix(zrxn, geo) # if zrxn is", "saved at {}'.format(cnf_fs[-1].path(locs))) else: print( 'the geometry in the output is not unique", "until input is resolved to avoid' + ' filesystem contamination.' + 'Comment lines", "return ( (rxn_fs, thy_fs, ts_fs, cnf_fs), (rxn_prefix, thy_prefix, ts_prefix, cnf_prefix)) def read_user_file(dct, keyword):", "'gaussian09', 'method': 'b3lyp', 'basis': 'cc-pvtz'}, 'cc_lvl_d': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)',", "= insert_dct['run_path'] # if run_path is None: # run_path = os.getcwd() # run_fs", "imags = None, None, None if hess is not None and zrxn is", "import elstruct import autorun from mechroutines.es._routines.conformer import _saved_cnf_info from mechroutines.es._routines.conformer import _sym_unique from", "for {}'.format(keyword) + 'Script will exit') sys.exit() file_name = dct[keyword] return autofile.io_.read_file(file_name) def", "autofile.fs.transition_state(thy_prefix) ts_fs[-1].create(ts_locs) ts_prefix = ts_fs[-1].path(ts_locs) # conformer cnf_fs = autofile.fs.conformer(ts_prefix) if locs is", "# Read in the input and output files that we # Are inserting", "inp_str, zrxn=zrxn, zma_locs=(0,)) print( 'geometry is now saved at {}'.format(cnf_fs[-1].path(locs))) else: print( 'the", "sys.exit() rxn_muls[0].append(mults[idx]) rxn_chgs[0].append(chgs[idx]) idx += 1 for ich in ichs[1]: mults_allowed = automol.graph.possible_spin_multiplicities(", "abstraction': # forward_gra = automol.graph.remove_bonds(forward_gra, forw_brk_key) # backward_gra = automol.graph.remove_bonds(backward_gra, back_brk_key) # print('forRXN',", "inchi or smiles in input') sys.exit() if ich is None: ich = automol.smiles.inchi(smi)", "'ccsd(t)', 'basis': 'cc-pvqz' }, 'cc_lvl_df': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis':", "for ts_gra_i in ts_gras: vals = automol.graph.atom_unsaturated_valences(ts_gra_i, bond_order=True) oversaturated_atoms = [atm for atm,", "if len(imags) != 1: # print( # 'Can only save a transition state", "orb_res is None: print('Error: user did not specify orb_res in input') sys.exit() else:", "= automol.geom.inchi(geo, stereo=True) if ich != geo_ich: print( 'user specified inchi {}'.format(ich) +", "for inchi {}'.format(ich)) match = False return match def locs_match(geo, cnf_fs, locs): match", "= locs[0] geo_rid = rng_loc_for_geo(geo, cnf_fs) if geo_rid is not None: if geo_rid", "'<Keyword>: <Value>\\n' + 'Allowed keywords are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit() keyword, value =", "are the following') # for zrxn_i in zrxns: # print(zrxns) # sys.exit() #", "'b2plypd3', 'basis': 'cc-pvqz' }, 'lvl_b3s': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis':", "2: continue elif '!' in line[0]: continue line = line.split('!')[0] if ':' not", "'the geometry in the output is not unique to filesystem' + '... not", "cnf_fs[0].file.info.read() else: rinf_obj = autofile.schema.info_objects.conformer_trunk(0) rinf_obj.nsamp = 1 if cnf_fs[1].file.info.exists([locs[0]]): cinf_obj = cnf_fs[1].file.info.read(locs[0])", "zrxn_file = insert_dct['zrxn_file'] if ichs is None: ichs = [[], []] for smi", "{ 'lvl_wbs': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis': '6-31g*' }, 'lvl_wbm':", "elif method is None: print('Error: user did not specify method in input') sys.exit()", "{}'.format(keyword) + 'Script will exit') sys.exit() file_name = dct[keyword] return autofile.io_.read_file(file_name) def read_user_filesystem(dct):", "ts_geo = automol.geom.reorder_coordinates(geo, geo_reorder_dct) else: print( 'The reactants and products found for the", "'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis': '6-31g*' }, 'lvl_b3mg': { 'orb_res': 'RU',", "None: print('Error: user did not specify basis in input') sys.exit() elif orb_res is", "', '')) for product in products: values[1].append(product.replace(' ', '')) value = values else:", "read_user_filesystem(dct): if dct['save_filesystem'] is None: print( 'ERROR: No save_filesystem}' + 'Script will exit')", "not in between words value = value.split() for i, val in enumerate(value): value[i]", "forw_form_key = automol.reac.forming_bond_keys(zrxn_i) # back_form_key = automol.reac.forming_bond_keys(zrxn_i, rev=True) # forw_brk_key = automol.reac.breaking_bond_keys(zrxn_i) #", "rxn_smis[0]: reactant_match = True elif ts_smis[0][::-1] == rxn_smis[0]: ts_ichs[0] = ts_ichs[0][::-1] ts_smis[0] =", "automol.graph.string(backward_gra)) # if forward_gra == automol.geom.graph(geo, stereo=False): # zrxn = zrxn_i # zma,", "= automol.reac.ts_zmatrix(zrxn, geo) # if zrxn is None: # print( # 'Your geometry", "expected rid for this geo is {}'.format(geo_rid) + '\\nthe user rid in input", "print( 'ERROR: No filename is specified for {}'.format(keyword) + 'Script will exit') sys.exit()", "given output') sys.exit() # Check that the rid/cid info matches the filesystem fs_array,", "'6-31+g*' }, 'lvl_wbt': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis': 'cc-pvtz'}, 'lvl_m06s':", "rxn_class def parse_user_theory(insert_dct): # Get input method explicitly inputted program = insert_dct['program'] method", "did not specify mult in input') sys.exit() if chg is None: print('Error: user", "backward_gra] rxn_gras = [reactant_gras, product_gras] rxn_smis = [[], []] for i, side in", "sys.exit() # Check that the rid/cid info matches the filesystem fs_array, prefix_array =", "zrxn=zrxn, zma_locs=(0,)) print( 'geometry is now saved at {}'.format(cnf_fs[-1].path(locs))) else: print( 'the geometry", "species' + 'with an inchi or smiles in input') sys.exit() if ich is", "specified for {}'.format(keyword) + 'Script will exit') sys.exit() file_name = dct[keyword] return autofile.io_.read_file(file_name)", "we move on to other reaction types we have to check for double", "freqs, imags, mod_thy_info, {'runlvl_cnf_fs': (cnf_fs, None)}, locs, zma_locs=(0,), zma=zma) else: _save_unique_parsed_conformer( mod_thy_info, cnf_fs,", "= atoms_bnd[oversaturated_atom] if len(bonds) != 2: print('too many bonds to transfered atom for", "method in input') sys.exit() elif basis is None: print('Error: user did not specify", "found the following' + # 'frequencies: ' + ','.join(imags)) # sys.exit() else: spc_info", "automol.graph.string(zrxn_i.backward_ts_graph)) # print('bacRXN', automol.graph.string(backward_gra)) # if forward_gra == automol.geom.graph(geo, stereo=False): # zrxn =", "automol.graph.inchi(rgra, stereo=True) except IndexError: rich = automol.graph.inchi(rgra) rsmi = automol.inchi.smiles(rich) ts_ichs[0].append(rich) ts_smis[0].append(rsmi) for", "# dummies.append(dummy + add_idx) # remove_idx = 0 # for idx_i, idx_j in", "!= 1: continue for ts_gra_i in ts_gras: vals = automol.graph.atom_unsaturated_valences(ts_gra_i, bond_order=True) oversaturated_atoms =", "input') sys.exit() elif method is None: print('Error: user did not specify method in", "if geo is None: print( 'No geometry could be parsed from output' +", "flat_ichs = sum(ichs, []) if len(flat_ichs) != len(mults): print( 'Error: number of species", "autofile.schema.info_objects.conformer_trunk(0) rinf_obj.nsamp = 1 if cnf_fs[1].file.info.exists([locs[0]]): cinf_obj = cnf_fs[1].file.info.read(locs[0]) cnsampd = cinf_obj.nsamp cnsampd", "ts_ichs[1] = ts_ichs[1][::-1] ts_smis[1] = ts_smis[1][::-1] product_match = True if reactant_match and product_match:", "'Error: user did not specify a theory {}'.format(theory) + ' that is in", "cnf_fs[1].create([locs[0]]) cnf_fs[0].file.info.write(rinf_obj) cnf_fs[1].file.info.write(cinf_obj, [locs[0]]) hess, freqs, imags = None, None, None if hess", "specified mult of {}'.format(mul) + 'is not an allowed multiplicty for inchi {}'.format(ich))", "at' + ' {}'.format(cnf_fs[0].path()) + '\\nthe expected rid for this geo is {}'.format(geo_rid)", "{}'.format(ich)) sys.exit() rxn_muls[1].append(mults[idx]) rxn_chgs[1].append(chgs[idx]) idx += 1 ts_mult = insert_dct['ts_mult'] if ts_mult is", "out_str = read_user_file(insert_dct, 'output_file') # parse method from insert input file thy_info =", "# print('bacRXN', automol.graph.string(zrxn_i.backward_ts_graph)) # print('bacRXN', automol.graph.string(backward_gra)) # if forward_gra == automol.geom.graph(geo, stereo=False): #", "for ich in side: rxn_smis[i].append(automol.inchi.smiles(ich)) ts_smis = [[], []] ts_ichs = [[], []]", "'basis': 'cc-pvtz-f12' }, 'cc_lvl_qf': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvqz-f12'", "'Error: user did not specify species' + 'with an inchi or smiles in", "= False if mul not in mults_allowed: print( 'user specified mult of {}'.format(mul)", "= insert_dct['inchi'] mult = insert_dct['mult'] chg = insert_dct['charge'] if ich is None and", "None for rqh in rqhs: ts_gras = automol.geom.connectivity_graph(geo, rqq_bond_max=3.5, rqh_bond_max=rqh, rhh_bond_max=2.3) ts_gras =", "rxn_chgs = [[], []] for ich in ichs[0]: mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False))", "'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis': 'cc-pvtz'}, 'lvl_m06s': { 'orb_res': 'RU', 'program':", "tinfo.modify_orb_label(thy_info, spc_info) locs = parse_user_locs(insert_dct) # Check that the save location matches geo", "'input_file': None, 'output_file': None, 'ts_locs': None, 'ts_mult': None, 'rxn_class': None, 'zrxn_file': None, 'run_path':", "chgs = insert_dct['charge'] rxn_class = insert_dct['rxn_class'] # zrxn_file = insert_dct['zrxn_file'] if ichs is", "that the rid/cid info matches the filesystem fs_array, prefix_array = create_species_filesystems( prefix, spc_info,", "[] for gra in rxn_gras[0]: reactant_keys.append(automol.graph.atom_keys(gra)) product_keys = [] for gra in rxn_gras[1]:", "'inchi']: value = value.replace(' ', '') else: value = value.split(' = ') if", "# for dummy in dummy_key_dct.keys(): # add_idx = 1 # for dumm_j in", "= True rid = locs[0] geo_rid = rng_loc_for_geo(geo, cnf_fs) if geo_rid is not", "ts_gras = automol.graph.connected_components(ts_gras) if len(ts_gras) != 1: continue for ts_gra_i in ts_gras: vals", "information first, to make sure # user save specifications match output prog, method,", "not None: hess_inf_obj = autofile.schema.info_objects.run( job=elstruct.Job.HESSIAN, prog=prog, version='', method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS) hess_ret =", "dummies: # if dummy > dumm_j: # add_idx += 1 # dummies.append(dummy +", "= ts_smis[0][::-1] reactant_match = True else: ts_ichs = ts_ichs[::-1] ts_smis = ts_smis[::-1] ts_gras", "+ 'keywords instead of theory') sys.exit() return thy_info def create_species_filesystems(prefix, spc_info, mod_thy_info, locs=None):", "'No hessian found in output, cannot save ' + # 'a transition state", "None return ( (spc_fs, thy_fs, cnf_fs), (spc_prefix, thy_prefix, cnf_prefix)) def create_reaction_filesystems( prefix, rxn_info,", "None: print('Error: user did not specify orb_res in input') sys.exit() else: thy_info =", "sys.exit() return std_zrxn, ts_zma, ts_geo, rxn_info def main(insert_dct): prefix = read_user_filesystem(insert_dct) # Read", "'lvl_wbs': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis': '6-31g*' }, 'lvl_wbm': {", "read_user_file(dct, keyword): if dct[keyword] is None: print( 'ERROR: No filename is specified for", "'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvqz-f12' }, 'mlvl_cas_dz': { 'orb_res': 'RR', 'program':", "insert input file thy_info = parse_user_theory(insert_dct) # parse out geo information first, to", "the script or use program/method/basis/orb_dct' + 'keywords instead of theory') sys.exit() return thy_info", "# backward_gra = automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds( # zrxn_i.backward_ts_graph))) # backward_gra =", "zrxn = zrxn_i # zma, _, _ = automol.reac.ts_zmatrix(zrxn, geo) # elif backward_gra", "cnf_fs), (spc_prefix, thy_prefix, cnf_prefix)) def create_reaction_filesystems( prefix, rxn_info, mod_thy_info, ts_locs=None, locs=None): # species", "= automol.graph.set_stereo_from_geometry(ts_gras, geo) ts_gras = automol.graph.connected_components(ts_gras) if len(ts_gras) != 1: continue for ts_gra_i", "when doing bond orders forw_bnd_ord_dct = {breaking_bond: 0.9, forming_bond: 0.1} back_bnd_ord_dct = {breaking_bond:", "automol.inchi.add_stereo(ich) ichs[1][idx] = ich if mults is None: print('Error: user did not specify", "for idx, ich in enumerate(ichs[0]): if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) ichs[0][idx] =", "backward_gra = automol.graph.set_bond_orders(ts_gra, back_bnd_ord_dct) reactant_gras = automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(forward_gra)) reactant_gras = automol.graph.connected_components(reactant_gras) product_gras =", "log file \"\"\" import sys import os import autofile import automol from mechanalyzer.inf", "= (int(value),) elif keyword in ['rxn_class']: # strip whitespaces form either side of", "[[], []] for reactant in reactants: values[0].append(reactant.replace(' ', '')) for product in products:", "# theory filesystem thy_fs = autofile.fs.theory(rxn_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix = thy_fs[-1].path(mod_thy_info[1:]) if ts_locs is", "info in user given output') sys.exit() inf_obj = autofile.schema.info_objects.run( job=elstruct.Job.OPTIMIZATION, prog=prog, version='', method=method,", "add_idx += 1 # dummies.append(dummy + add_idx) # remove_idx = 0 # for", "'save_filesystem': None, 'smiles': None, 'inchi': None, 'mult': None, 'charge': None, 'rid': None, 'cid':", "'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvqz' }, 'lvl_b3s': { 'orb_res': 'RU', 'program': 'gaussian09', 'method':", "!= 2: print('too many bonds to transfered atom for me to figure out')", "'rxn_class': None, 'zrxn_file': None, 'run_path': None, 'saddle': False, } for i, line in", "zma, ene), inf_obj, inp_str, zrxn=zrxn, zma_locs=(0,)) print( 'geometry is now saved at {}'.format(cnf_fs[-1].path(locs)))", "theory = insert_dct['theory'] if theory is None: if program is None: print('Error: user", "user did not specify method in input') sys.exit() elif basis is None: print('Error:", "= True ich, _, mul = spc_info mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) geo_ich", "in input') sys.exit() if ich is None: ich = automol.smiles.inchi(smi) if not automol.inchi.is_complete(ich):", "if idx_i in dummies: # remove_idx -= 1 # else: # geo_reorder_dct[idx_i +", "bond orders forw_bnd_ord_dct = {breaking_bond: 0.9, forming_bond: 0.1} back_bnd_ord_dct = {breaking_bond: 0.1, forming_bond:", "_fragment_ring_geo from mechroutines.es._routines._sadpt import save_saddle_point from mechlib.reaction.rxnid import _id_reaction THEORY_DCT = { 'lvl_wbs':", "not specify mult in input') sys.exit() if chg is None: print('Error: user did", "parse_user_theory(insert_dct): # Get input method explicitly inputted program = insert_dct['program'] method = insert_dct['method']", "None: print( 'Error: user did not specify species' + 'with an inchi or", "in input') sys.exit() elif basis is None: print('Error: user did not specify basis", "is not None: # zrxn_str = autofile.io_.read_file(zrxn_file) # zrxns = [automol.reac.from_string(zrxn_str)] # else:", "prefix, rxn_info, mod_thy_info, ts_locs=insert_dct['ts_locs'], locs=None) cnf_fs = fs_array[-1] if not locs_match(geo, cnf_fs, locs):", "a single' + # 'imaginary frequency, projrot found the following' + # 'frequencies:", "values = [[], []] for reactant in reactants: values[0].append(reactant.replace(' ', '')) for product", "' + # 'zrxns, which are the following') # for zrxn_i in zrxns:", "print( 'ERROR: line\\n({}) {}\\n is not parsable, '.format(i, line) + 'script will exit", "ich, _, mul = spc_info mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) geo_ich = automol.geom.inchi(geo,", "multiplicty for inchi {}'.format(ich)) match = False return match def locs_match(geo, cnf_fs, locs):", "for zrxn_i in zrxns: # forw_form_key = automol.reac.forming_bond_keys(zrxn_i) # back_form_key = automol.reac.forming_bond_keys(zrxn_i, rev=True)", "smi in smis[0]: ichs[0].append(automol.smiles.inchi(smi)) for smi in smis[1]: ichs[1].append(automol.smiles.inchi(smi)) for idx, ich in", "species filesystem spc_fs = autofile.fs.species(prefix) spc_fs[-1].create(spc_info) spc_prefix = spc_fs[-1].path(spc_info) # theory filesystem thy_fs", "ichs[0]: mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) if mults[idx] not in mults_allowed: print( 'user", "automol.graph.without_fractional_bonds( # zrxn_i.backward_ts_graph))) # backward_gra = automol.graph.add_bonds(backward_gra, back_form_key) # if zrxn_i.class_ == 'hydrogen", "'molpro2015', 'method': 'caspt2', 'basis': 'cc-pvdz'}, 'mlvl_cas_tz': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'caspt2',", "[atm for atm, val in vals.items() if val < 0] if len(oversaturated_atoms) ==", "will exit until input is resolved to avoid' + ' filesystem contamination.' +", "create_species_filesystems(prefix, spc_info, mod_thy_info, locs=None): # species filesystem spc_fs = autofile.fs.species(prefix) spc_fs[-1].create(spc_info) spc_prefix =", "elstruct.reader.hessian(prog, out_str) # Hess = None # If hess is None: # print(", "'geometry is now saved at {}'.format(cnf_fs[-1].path(locs))) else: print( 'the geometry in the output", "'b2plypd3', 'basis': 'cc-pvtz'}, 'lvl_b2q': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvqz'", "True elif ts_smis[0][::-1] == rxn_smis[0]: ts_ichs[0] = ts_ichs[0][::-1] ts_smis[0] = ts_smis[0][::-1] reactant_match =", "output {}'.format(geo_ich) + 'which is based on geometry from output:\\n' + '{}'.format(automol.geom.string(geo))) match", "save location zrxn = None if insert_dct['saddle']: rxn_info, spc_info, rxn_class = parse_user_reaction(insert_dct) zrxn,", "= [[], []] for reactant in reactants: values[0].append(reactant.replace(' ', '')) for product in", "_saved_cnf_info from mechroutines.es._routines.conformer import _sym_unique from mechroutines.es._routines.conformer import _save_unique_parsed_conformer from mechroutines.es._routines.conformer import _geo_unique", "'frequencies: ' + ','.join(imags)) # sys.exit() else: spc_info = parse_user_species(insert_dct) mod_thy_info = tinfo.modify_orb_label(thy_info,", "dummy_key_dct.keys(): # add_idx = 1 # for dumm_j in dummies: # if dummy", "# hess = elstruct.reader.hessian(prog, out_str) # Hess = None # If hess is", "match = False return match def rng_loc_for_geo(geo, cnf_fs): rid = None frag_geo =", "i, line in enumerate(script_input): if len(line) < 2: continue elif '!' in line[0]:", "'program': 'gaussian09', 'method': 'wb97xd', 'basis': '6-31+g*' }, 'lvl_wbt': { 'orb_res': 'RU', 'program': 'gaussian09',", "thy_fs = autofile.fs.theory(rxn_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix = thy_fs[-1].path(mod_thy_info[1:]) if ts_locs is None: ts_locs =", "for zrxn_i in zrxns: # print(zrxns) # sys.exit() # # hess = elstruct.reader.hessian(prog,", "if dummy > dumm_j: # add_idx += 1 # dummies.append(dummy + add_idx) #", "rxn_gras[0]: reactant_keys.append(automol.graph.atom_keys(gra)) product_keys = [] for gra in rxn_gras[1]: product_keys.append(automol.graph.atom_keys(gra)) std_rxn = automol.reac.Reaction(", "'method': None, 'basis': None, 'orb_res': None, 'input_file': None, 'output_file': None, 'ts_locs': None, 'ts_mult':", "in input') sys.exit() flat_ichs = sum(ichs, []) if len(flat_ichs) != len(mults): print( 'Error:", "None: print('Error: user did not specify mults in input') sys.exit() if chgs is", "= run_fs[-1].path(['hessian']) # run_fs[-1].create(['hessian']) # script_str = autorun.SCRIPT_DCT['projrot'] # freqs, _, imags, _", "back_bnd_ord_dct = {breaking_bond: 0.1, forming_bond: 0.9} forward_gra = automol.graph.set_bond_orders(ts_gra, forw_bnd_ord_dct) backward_gra = automol.graph.set_bond_orders(ts_gra,", "False return match def locs_match(geo, cnf_fs, locs): match = True rid = locs[0]", "print( 'Error: user did not specify ts_mul') sys.exit() rxn_info = rinfo.sort((ichs, rxn_chgs, rxn_muls,", "is resolved to avoid' + ' filesystem contamination.' + 'Allowed keywords are:\\n' +", "is not None: hess_inf_obj = autofile.schema.info_objects.run( job=elstruct.Job.HESSIAN, prog=prog, version='', method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS) hess_ret", "= thy_fs[-1].path(mod_thy_info[1:]) if ts_locs is None: ts_locs = (0,) ts_fs = autofile.fs.transition_state(thy_prefix) ts_fs[-1].create(ts_locs)", "run_fs[-1].path(['hessian']) # run_fs[-1].create(['hessian']) # script_str = autorun.SCRIPT_DCT['projrot'] # freqs, _, imags, _ =", "ts_mult = insert_dct['ts_mult'] if ts_mult is None: print( 'Error: user did not specify", "of {}'.format(mults[idx]) + 'is not an allowed multiplicty for inchi {}'.format(ich)) sys.exit() rxn_muls[0].append(mults[idx])", "output prog, method, basis, _ = thy_info ene = elstruct.reader.energy(prog, method, out_str) geo", "did not specify species' + 'with an inchi or smiles in input') sys.exit()", "dct[keyword] return autofile.io_.read_file(file_name) def read_user_filesystem(dct): if dct['save_filesystem'] is None: print( 'ERROR: No save_filesystem}'", "= spc_fs[-1].path(spc_info) # theory filesystem thy_fs = autofile.fs.theory(spc_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix = thy_fs[-1].path(mod_thy_info[1:]) #", "match = True ich, _, mul = spc_info mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False))", "'please add it to the dct in the script or use program/method/basis/orb_dct' +", "1 # for dumm_j in dummies: # if dummy > dumm_j: # add_idx", "# print( # 'Your geometry did not match any of the attempted '", "prefix_array = create_reaction_filesystems( prefix, rxn_info, mod_thy_info, ts_locs=insert_dct['ts_locs'], locs=None) cnf_fs = fs_array[-1] if not", "is None and smi is None: print( 'Error: user did not specify species'", "and smi is None: print( 'Error: user did not specify species' + 'with", "(cnf_fs, None)}, locs, zma_locs=(0,), zma=zma) else: _save_unique_parsed_conformer( mod_thy_info, cnf_fs, locs, (geo, zma, ene),", "transfered atom for me to figure out') print('I promise i will be smarter", "'ERROR: No filename is specified for {}'.format(keyword) + 'Script will exit') sys.exit() file_name", "locs=None) cnf_fs = fs_array[-1] if not locs_match(geo, cnf_fs, locs): print( 'I refuse to", "not None: # zrxn_str = autofile.io_.read_file(zrxn_file) # zrxns = [automol.reac.from_string(zrxn_str)] # else: #", "line) + 'script will exit until input is resolved to avoid' + '", "= insert_dct['program'] method = insert_dct['method'] basis = insert_dct['basis'] orb_res = insert_dct['orb_res'] # Get", "{ 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvtz-f12' }, 'cc_lvl_qf': { 'orb_res':", "'basis': 'cc-pvtz'}, 'lvl_m06s': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis': '6-31g*' },", "'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvdz'}, 'lvl_b2t': { 'orb_res': 'RU', 'program': 'gaussian09', 'method':", "= automol.graph.inchi(pgra) psmi = automol.inchi.smiles(pich) ts_ichs[1].append(pich) ts_smis[1].append(psmi) reactant_match = False product_match = False", "# print('forRXN', automol.graph.string(zrxn_i.forward_ts_graph)) # print('forRXN', automol.graph.string(forward_gra)) # print('bacRXN', automol.graph.string(zrxn_i.backward_ts_graph)) # print('bacRXN', automol.graph.string(backward_gra)) #", "[] for locs in cnf_fs[-1].existing(): current_rid, _ = locs if current_rid in checked_rids:", "following' + # 'frequencies: ' + ','.join(imags)) # sys.exit() else: spc_info = parse_user_species(insert_dct)", "= read_user_filesystem(insert_dct) # Read in the input and output files that we #", "= autofile.fs.run(run_path) # freq_run_path = run_fs[-1].path(['hessian']) # run_fs[-1].create(['hessian']) # script_str = autorun.SCRIPT_DCT['projrot'] #", "= automol.inchi.smiles(rich) ts_ichs[0].append(rich) ts_smis[0].append(rsmi) for pgra in product_gras: try: pich = automol.graph.inchi(pgra, stereo=True)", "cnf_fs = fs_array[-1] if not locs_match(geo, cnf_fs, locs): print( 'I refuse to save", "= [reactant_gras, product_gras] rxn_smis = [[], []] for i, side in enumerate(rxn_info[0]): for", "out_str) save_saddle_point( zrxn, ret, hess_ret, freqs, imags, mod_thy_info, {'runlvl_cnf_fs': (cnf_fs, None)}, locs, zma_locs=(0,),", "_ = automol.reac.ts_zmatrix(zrxn, geo) # elif backward_gra == automol.geom.graph(geo, stereo=False): # zrxn =", "rxn_class') sys.exit() return rxn_info, ts_info, rxn_class def parse_user_theory(insert_dct): # Get input method explicitly", "stereo=False): # zrxn = automol.reac.reverse(zrxn_i) # zma, _, _ = automol.reac.ts_zmatrix(zrxn, geo) #", "[locs[0]]) hess, freqs, imags = None, None, None if hess is not None", "for smi in smis[0]: ichs[0].append(automol.smiles.inchi(smi)) for smi in smis[1]: ichs[1].append(automol.smiles.inchi(smi)) for idx, ich", "= value.split(' = ') if len(value) > 1: insert_dct['saddle'] = True reactants, products", "user did not specify a theory {}'.format(theory) + ' that is in the", "= automol.geom.zmatrix(frag_geo) checked_rids = [] for locs in cnf_fs[-1].existing(): current_rid, _ = locs", "'mlvl_cas_dz': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'caspt2', 'basis': 'cc-pvdz'}, 'mlvl_cas_tz': { 'orb_res':", "'{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit() keyword, value = line.split(':') if keyword in insert_dct: if 'None'", "the following') # for zrxn_i in zrxns: # print(zrxns) # sys.exit() # #", "create_species_filesystems( prefix, spc_info, mod_thy_info, locs=None) else: fs_array, prefix_array = create_reaction_filesystems( prefix, rxn_info, mod_thy_info,", "insert_dct['smiles'] ich = insert_dct['inchi'] mult = insert_dct['mult'] chg = insert_dct['charge'] if ich is", "ts_smis[0][::-1] == rxn_smis[0]: ts_ichs[0] = ts_ichs[0][::-1] ts_smis[0] = ts_smis[0][::-1] reactant_match = True else:", "user specified save location zrxn = None if insert_dct['saddle']: rxn_info, spc_info, rxn_class =", "reactant_match = True else: ts_ichs = ts_ichs[::-1] ts_smis = ts_smis[::-1] ts_gras = ts_gras[::-1]", "ts_gra_i in ts_gras: vals = automol.graph.atom_unsaturated_valences(ts_gra_i, bond_order=True) oversaturated_atoms = [atm for atm, val", "not None: cnf_fs[-1].create(locs) cnf_prefix = cnf_fs[-1].path(locs) else: cnf_prefix = None return ( (spc_fs,", "= [[], []] for ich in ichs[0]: mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) if", "automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) if mults[idx] not in mults_allowed: print( 'user specified mult of", "= ts_smis[0][::-1] reactant_match = True if reactant_match: if ts_smis[1] == rxn_smis[1]: product_match =", "sys.exit() rxn_muls[1].append(mults[idx]) rxn_chgs[1].append(chgs[idx]) idx += 1 ts_mult = insert_dct['ts_mult'] if ts_mult is None:", "zma, _, _ = automol.reac.ts_zmatrix(zrxn, geo) # if zrxn is None: # print(", "ts_prefix = ts_fs[-1].path(ts_locs) # conformer cnf_fs = autofile.fs.conformer(ts_prefix) if locs is not None:", "doing bond orders forw_bnd_ord_dct = {breaking_bond: 0.9, forming_bond: 0.1} back_bnd_ord_dct = {breaking_bond: 0.1,", "val.replace(' ', '') value = ' '.join(value) elif keyword not in ['smiles', 'inchi']:", "strip whitespaces form either side of reaction # class but not in between", "sys.exit() # # hess = elstruct.reader.hessian(prog, out_str) # Hess = None # If", "'method': 'wb97xd', 'basis': '6-31+g*' }, 'lvl_wbt': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd',", "autofile.io_.read_file(script_input_file).splitlines() insert_dct = { 'save_filesystem': None, 'smiles': None, 'inchi': None, 'mult': None, 'charge':", "enumerate(ichs[1]): if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) ichs[1][idx] = ich if mults is", "from theory dictionary theory = insert_dct['theory'] if theory is None: if program is", "'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvtz'}, 'cc_lvl_q': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)',", "'Error: number of species does not match number of charges') sys.exit() idx =", "val in vals.items() if val < 0] if len(oversaturated_atoms) == 1: chosen_ts_gra =", "len(mults): print( 'Error: number of species does not match number of mults') sys.exit()", "mult of {}'.format(mul) + 'is not an allowed multiplicty for inchi {}'.format(ich)) match", "forming_bond: 0.9} forward_gra = automol.graph.set_bond_orders(ts_gra, forw_bnd_ord_dct) backward_gra = automol.graph.set_bond_orders(ts_gra, back_bnd_ord_dct) reactant_gras = automol.graph.without_dummy_bonds(", "from mechroutines.es._routines.conformer import _geo_unique from mechroutines.es._routines.conformer import _fragment_ring_geo from mechroutines.es._routines._sadpt import save_saddle_point from", "cnf_prefix = None return ( (rxn_fs, thy_fs, ts_fs, cnf_fs), (rxn_prefix, thy_prefix, ts_prefix, cnf_prefix))", "automol.graph.add_bonds(forward_gra, forw_form_key) # backward_gra = automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds( # zrxn_i.backward_ts_graph))) #", "None, 'method': None, 'basis': None, 'orb_res': None, 'input_file': None, 'output_file': None, 'ts_locs': None,", "chg = insert_dct['charge'] if ich is None and smi is None: print( 'Error:", "of charges') sys.exit() idx = 0 rxn_muls = [[], []] rxn_chgs = [[],", "'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis': '6-31g*' }, 'lvl_m06m': { 'orb_res': 'RU',", "rid = insert_dct['rid'] cid = insert_dct['cid'] if rid is None: rid = autofile.schema.generate_new_ring_id()", "_fragment_ring_geo(locs_geo) if frag_locs_geo is None: rid = locs[0] break frag_locs_zma = automol.geom.zmatrix(frag_locs_geo) if", "'basis': 'cc-pvqz' }, 'lvl_b3s': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis': '6-31g*'", "+ ' and method matches' + ' {}'.format(method)) sys.exit() # Parse out user", "parse_user_reaction(insert_dct) zrxn, zma, geo, rxn_info = get_zrxn(geo, rxn_info, rxn_class) # for zrxn_i in", "( (rxn_fs, thy_fs, ts_fs, cnf_fs), (rxn_prefix, thy_prefix, ts_prefix, cnf_prefix)) def read_user_file(dct, keyword): if", "species does not match number of charges') sys.exit() idx = 0 rxn_muls =", "not specify program in input') sys.exit() elif method is None: print('Error: user did", "[]) if len(flat_ichs) != len(mults): print( 'Error: number of species does not match", "zrxns: # print(zrxns) # sys.exit() # # hess = elstruct.reader.hessian(prog, out_str) # Hess", "None: print( 'Error: user did not specify ts_mul') sys.exit() rxn_info = rinfo.sort((ichs, rxn_chgs,", "ts_ichs[1][::-1] ts_smis[1] = ts_smis[1][::-1] product_match = True if reactant_match and product_match: reactant_keys =", "geo_ich = automol.geom.inchi(geo, stereo=True) if ich != geo_ich: print( 'user specified inchi {}'.format(ich)", "'basis': '6-31+g*' }, 'lvl_m06t': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis': 'cc-pvtz'},", "None: cnf_fs[-1].create(locs) cnf_prefix = cnf_fs[-1].path(locs) else: cnf_prefix = None return ( (rxn_fs, thy_fs,", "# back_brk_key = automol.reac.breaking_bond_keys(zrxn_i, rev=True) # forward_gra = automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds(", "matches' + ' {}'.format(method)) sys.exit() # Parse out user specified save location zrxn", "either side of reaction # class but not in between words value =", "user did not specify basis in input') sys.exit() elif orb_res is None: print('Error:", "[automol.reac.from_string(zrxn_str)] # else: # zrxns, _ = _id_reaction(rxn_info) if rxn_class is None: print(", "}, 'lvl_wbt': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis': 'cc-pvtz'}, 'lvl_m06s': {", "info matches the info in user given output') sys.exit() inf_obj = autofile.schema.info_objects.run( job=elstruct.Job.OPTIMIZATION,", "explicitly inputted program = insert_dct['program'] method = insert_dct['method'] basis = insert_dct['basis'] orb_res =", "!= len(chgs): print( 'Error: number of species does not match number of charges')", "+ # 'zrxns, which are the following') # for zrxn_i in zrxns: #", "= automol.inchi.add_stereo(ich) if mult is None: print('Error: user did not specify mult in", "remove_idx -= 1 # else: # geo_reorder_dct[idx_i + remove_idx] = idx_j # ts_geo", "reactant_match = True if reactant_match: if ts_smis[1] == rxn_smis[1]: product_match = True elif", "current_rid, _ = locs if current_rid in checked_rids: continue if cnf_fs[-1].file.geometry.exists(locs): checked_rids.append(current_rid) locs_geo", "line = line.split('!')[0] if ':' not in line: print( 'ERROR: line\\n({}) {}\\n is", "mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) if mults[idx] not in mults_allowed: print( 'user specified", "cnf_fs[0].file.info.write(rinf_obj) cnf_fs[1].file.info.write(cinf_obj, [locs[0]]) hess, freqs, imags = None, None, None if hess is", "if zrxn_file is not None: # zrxn_str = autofile.io_.read_file(zrxn_file) # zrxns = [automol.reac.from_string(zrxn_str)]", "+= 1 # dummies.append(dummy + add_idx) # remove_idx = 0 # for idx_i,", "line: print( 'ERROR: line\\n({}) {}\\n is not parsable, '.format(i, line) + 'script will", "if ich is None and smi is None: print( 'Error: user did not", "= automol.graph.remove_bonds(backward_gra, back_brk_key) # print('forRXN', automol.graph.string(zrxn_i.forward_ts_graph)) # print('forRXN', automol.graph.string(forward_gra)) # print('bacRXN', automol.graph.string(zrxn_i.backward_ts_graph)) #", "on geometry from output:\\n' + '{}'.format(automol.geom.string(geo))) match = False if mul not in", "unique to filesystem' + '... not saving') def species_match(geo, spc_info): match = True", "ts_gra, oversaturated_atom = choose_cutoff_distance(geo) atoms_bnd = automol.graph.atoms_bond_keys(ts_gra) bonds = atoms_bnd[oversaturated_atom] if len(bonds) !=", "'ts_mult']: values = [] for val in value.split(','): values.append(int(val)) if len(values) == 1:", "# zrxn_str = autofile.io_.read_file(zrxn_file) # zrxns = [automol.reac.from_string(zrxn_str)] # else: # zrxns, _", "= automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds( # zrxn_i.backward_ts_graph))) # backward_gra = automol.graph.add_bonds(backward_gra, back_form_key)", "# species filesystem spc_fs = autofile.fs.species(prefix) spc_fs[-1].create(spc_info) spc_prefix = spc_fs[-1].path(spc_info) # theory filesystem", "i, side in enumerate(rxn_info[0]): for ich in side: rxn_smis[i].append(automol.inchi.smiles(ich)) ts_smis = [[], []]", "species filesystem print('rxn_info', rxn_info) rxn_fs = autofile.fs.reaction(prefix) sort_rxn_info = rinfo.sort(rxn_info, scheme='autofile') rxn_fs[-1].create(sort_rxn_info) rxn_prefix", "# 'No hessian found in output, cannot save ' + # 'a transition", "if run_path is None: # run_path = os.getcwd() # run_fs = autofile.fs.run(run_path) #", "saved_enes) if sym_id is None: if cnf_fs[0].file.info.exists(): rinf_obj = cnf_fs[0].file.info.read() else: rinf_obj =", "len(bonds) != 2: print('too many bonds to transfered atom for me to figure", "# automol.graph.without_fractional_bonds( # zrxn_i.backward_ts_graph))) # backward_gra = automol.graph.add_bonds(backward_gra, back_form_key) # if zrxn_i.class_ ==", "'program': 'gaussian09', 'method': 'm062x', 'basis': '6-31+g*' }, 'lvl_m06t': { 'orb_res': 'RU', 'program': 'gaussian09',", "words value = value.split() for i, val in enumerate(value): value[i] = val.replace(' ',", "specify method in input') sys.exit() elif basis is None: print('Error: user did not", "dumm_j in dummies: # if dummy > dumm_j: # add_idx += 1 #", "an allowed multiplicty for inchi {}'.format(ich)) sys.exit() rxn_muls[1].append(mults[idx]) rxn_chgs[1].append(chgs[idx]) idx += 1 ts_mult", "'output_file') # parse method from insert input file thy_info = parse_user_theory(insert_dct) # parse", "of the attempted ' + # 'zrxns, which are the following') # for", "locs[0] break frag_locs_zma = automol.geom.zmatrix(frag_locs_geo) if automol.zmat.almost_equal( frag_locs_zma, frag_zma, dist_rtol=0.1, ang_atol=.4): rid =", "None, 'theory': None, 'program': None, 'method': None, 'basis': None, 'orb_res': None, 'input_file': None,", "values[0].append(reactant.replace(' ', '')) for product in products: values[1].append(product.replace(' ', '')) value = values", "rxn_chgs[0].append(chgs[idx]) idx += 1 for ich in ichs[1]: mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False))", "cnf_fs = autofile.fs.conformer(thy_prefix) if locs is not None: cnf_fs[-1].create(locs) cnf_prefix = cnf_fs[-1].path(locs) else:", "frag_geo = _fragment_ring_geo(geo) if frag_geo is not None: frag_zma = automol.geom.zmatrix(frag_geo) checked_rids =", "number of charges') sys.exit() idx = 0 rxn_muls = [[], []] rxn_chgs =", "= thy_fs[-1].path(mod_thy_info[1:]) # conformer cnf_fs = autofile.fs.conformer(thy_prefix) if locs is not None: cnf_fs[-1].create(locs)", "spc_info mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) geo_ich = automol.geom.inchi(geo, stereo=True) if ich !=", "locs_geo = cnf_fs[-1].file.geometry.read(locs) frag_locs_geo = _fragment_ring_geo(locs_geo) if frag_locs_geo is None: rid = locs[0]", "except IndexError: pich = automol.graph.inchi(pgra) psmi = automol.inchi.smiles(pich) ts_ichs[1].append(pich) ts_smis[1].append(psmi) reactant_match = False", "create_reaction_filesystems( prefix, rxn_info, mod_thy_info, ts_locs=insert_dct['ts_locs'], locs=None) cnf_fs = fs_array[-1] if not locs_match(geo, cnf_fs,", "as tinfo from mechanalyzer.inf import rxn as rinfo from mechanalyzer.inf import spc as", "Read in the input and output files that we # Are inserting into", "val in value.split(','): values.append(int(val)) if len(values) == 1: value = values[0] else: value", "= automol.graph.inchi(pgra, stereo=True) except IndexError: pich = automol.graph.inchi(pgra) psmi = automol.inchi.smiles(pich) ts_ichs[1].append(pich) ts_smis[1].append(psmi)", "{}'.format(cnf_fs[-1].path(locs))) else: print( 'the geometry in the output is not unique to filesystem'", "mults_allowed: print( 'user specified mult of {}'.format(mults[idx]) + 'is not an allowed multiplicty", "= ts_fs[-1].path(ts_locs) # conformer cnf_fs = autofile.fs.conformer(ts_prefix) if locs is not None: cnf_fs[-1].create(locs)", "will be smarter in the future') sys.exit() breaking_bond, forming_bond = bonds # when", "cnf_fs[-1].create(locs) cnf_prefix = cnf_fs[-1].path(locs) else: cnf_prefix = None return ( (spc_fs, thy_fs, cnf_fs),", "= ts_gras[::-1] rxn_gras = rxn_gras[::-1] if ts_smis[0] == rxn_smis[0]: reactant_match = True elif", "parse method from insert input file thy_info = parse_user_theory(insert_dct) # parse out geo", "print( 'user specified mult of {}'.format(mults[idx]) + 'is not an allowed multiplicty for", "ichs[0].append(automol.smiles.inchi(smi)) for smi in smis[1]: ichs[1].append(automol.smiles.inchi(smi)) for idx, ich in enumerate(ichs[0]): if not", "promise i will be smarter in the future') sys.exit() breaking_bond, forming_bond = bonds", "for idx, ich in enumerate(ichs[1]): if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) ichs[1][idx] =", "figure out which H is being transfered') sys.exit() return chosen_ts_gra, chosen_oversaturated_atom def get_zrxn(geo,", "len(flat_ichs) != len(chgs): print( 'Error: number of species does not match number of", "method from insert input file thy_info = parse_user_theory(insert_dct) # parse out geo information", "not parsable, '.format(i, line) + 'script will exit until input is resolved to", "keyword in insert_dct: if 'None' in value: value = None elif keyword in", "dummy in dummy_key_dct.keys(): # add_idx = 1 # for dumm_j in dummies: #", "geo) # if zrxn is None: # print( # 'Your geometry did not", "None, 'program': None, 'method': None, 'basis': None, 'orb_res': None, 'input_file': None, 'output_file': None,", "= 1 # for dumm_j in dummies: # if dummy > dumm_j: #", "None: ich = automol.smiles.inchi(smi) if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) if mult is", "reactant_match and product_match: reactant_keys = [] for gra in rxn_gras[0]: reactant_keys.append(automol.graph.atom_keys(gra)) product_keys =", "'caspt2', 'basis': 'cc-pvdz'}, 'mlvl_cas_tz': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'caspt2', 'basis': 'cc-pvtz'}}", "None: rid = autofile.schema.generate_new_ring_id() if cid is None: cid = autofile.schema.generate_new_conformer_id() return (rid,", "is None: ts_locs = (0,) ts_fs = autofile.fs.transition_state(thy_prefix) ts_fs[-1].create(ts_locs) ts_prefix = ts_fs[-1].path(ts_locs) #", "# strip whitespaces form either side of reaction # class but not in", "specifications match output prog, method, basis, _ = thy_info ene = elstruct.reader.energy(prog, method,", "def parse_user_species(insert_dct): smi = insert_dct['smiles'] ich = insert_dct['inchi'] mult = insert_dct['mult'] chg =", "+ 'did not match those specified in user input') sys.exit() return std_zrxn, ts_zma,", "= autorun.SCRIPT_DCT['projrot'] # freqs, _, imags, _ = autorun.projrot.frequencies( # script_str, freq_run_path, [geo],", "'Allowed keywords are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit() keyword, value = line.split(':') if keyword", "thy_info = tinfo.from_dct(THEORY_DCT[theory]) else: print( 'Error: user did not specify a theory {}'.format(theory)", "ichs = insert_dct['inchi'] mults = insert_dct['mult'] chgs = insert_dct['charge'] rxn_class = insert_dct['rxn_class'] #", "it to the dct in the script or use program/method/basis/orb_dct' + 'keywords instead", "= _saved_cnf_info( cnf_fs, mod_thy_info) if _geo_unique(geo, ene, saved_geos, saved_enes, zrxn=zrxn): sym_id = _sym_unique(", "frag_locs_geo = _fragment_ring_geo(locs_geo) if frag_locs_geo is None: rid = locs[0] break frag_locs_zma =", "if sym_id is None: if cnf_fs[0].file.info.exists(): rinf_obj = cnf_fs[0].file.info.read() else: rinf_obj = autofile.schema.info_objects.conformer_trunk(0)", "# zma, _, _ = automol.reac.ts_zmatrix(zrxn, geo) # elif backward_gra == automol.geom.graph(geo, stereo=False):", "'basis': 'cc-pvdz-f12' }, 'cc_lvl_tf': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvtz-f12'", "'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis': '6-311g**' }, 'lvl_b3t': { 'orb_res': 'RU', 'program':", "(program, method, basis, orb_res) else: if theory in THEORY_DCT: thy_info = tinfo.from_dct(THEORY_DCT[theory]) else:", "= locs if current_rid in checked_rids: continue if cnf_fs[-1].file.geometry.exists(locs): checked_rids.append(current_rid) locs_geo = cnf_fs[-1].file.geometry.read(locs)", "= create_reaction_filesystems( prefix, rxn_info, mod_thy_info, ts_locs=insert_dct['ts_locs'], locs=None) cnf_fs = fs_array[-1] if not locs_match(geo,", "from mechroutines.es._routines.conformer import _save_unique_parsed_conformer from mechroutines.es._routines.conformer import _geo_unique from mechroutines.es._routines.conformer import _fragment_ring_geo from", "[] for gra in rxn_gras[1]: product_keys.append(automol.graph.atom_keys(gra)) std_rxn = automol.reac.Reaction( rxn_class, *ts_gras, reactant_keys, product_keys)", "filesystem inp_str = read_user_file(insert_dct, 'input_file') out_str = read_user_file(insert_dct, 'output_file') # parse method from", "= ts_ichs[0][::-1] ts_smis[0] = ts_smis[0][::-1] reactant_match = True if reactant_match: if ts_smis[1] ==", "0 # for idx_i, idx_j in enumerate(zma_keys): # if idx_i in dummies: #", "else: print( 'the geometry in the output is not unique to filesystem' +", "backward_gra == automol.geom.graph(geo, stereo=False): # zrxn = automol.reac.reverse(zrxn_i) # zma, _, _ =", "in ['smiles', 'inchi']: value = value.replace(' ', '') else: value = value.split(' =", "automol.graph.atoms_bond_keys(ts_gra) bonds = atoms_bnd[oversaturated_atom] if len(bonds) != 2: print('too many bonds to transfered", "'basis': 'cc-pvtz'}, 'cc_lvl_q': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvqz' },", "insert_dct['ts_mult'] if ts_mult is None: print( 'Error: user did not specify ts_mul') sys.exit()", "mult in input') sys.exit() if chg is None: print('Error: user did not specify", "filename is specified for {}'.format(keyword) + 'Script will exit') sys.exit() file_name = dct[keyword]", "run_fs = autofile.fs.run(run_path) # freq_run_path = run_fs[-1].path(['hessian']) # run_fs[-1].create(['hessian']) # script_str = autorun.SCRIPT_DCT['projrot']", "tinfo from mechanalyzer.inf import rxn as rinfo from mechanalyzer.inf import spc as sinfo", "If hess is None: # print( # 'No hessian found in output, cannot", "reactants.split(' + ') products = products.split(' + ') values = [[], []] for", "did not specify program in input') sys.exit() elif method is None: print('Error: user", "'cc-pvdz'}, 'cc_lvl_t': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvtz'}, 'cc_lvl_q': {", "locs_match(geo, cnf_fs, locs): match = True rid = locs[0] geo_rid = rng_loc_for_geo(geo, cnf_fs)", "product_match = False if ts_smis[0] == rxn_smis[0]: reactant_match = True elif ts_smis[0][::-1] ==", "'lvl_b3s': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis': '6-31g*' }, 'lvl_b3mg': {", "= None for rqh in rqhs: ts_gras = automol.geom.connectivity_graph(geo, rqq_bond_max=3.5, rqh_bond_max=rqh, rhh_bond_max=2.3) ts_gras", "IndexError: pich = automol.graph.inchi(pgra) psmi = automol.inchi.smiles(pich) ts_ichs[1].append(pich) ts_smis[1].append(psmi) reactant_match = False product_match", "# if len(imags) != 1: # print( # 'Can only save a transition", "{}'.format(rid)) match = False return match def rng_loc_for_geo(geo, cnf_fs): rid = None frag_geo", "parse_user_species(insert_dct): smi = insert_dct['smiles'] ich = insert_dct['inchi'] mult = insert_dct['mult'] chg = insert_dct['charge']", "spc_prefix = spc_fs[-1].path(spc_info) # theory filesystem thy_fs = autofile.fs.theory(spc_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix = thy_fs[-1].path(mod_thy_info[1:])", "reactant_match: if ts_smis[1] == rxn_smis[1]: product_match = True elif ts_smis[1][::-1] == rxn_smis[-1]: ts_ichs[1]", "insert_dct['rxn_class'] # zrxn_file = insert_dct['zrxn_file'] if ichs is None: ichs = [[], []]", "match def locs_match(geo, cnf_fs, locs): match = True rid = locs[0] geo_rid =", "= automol.graph.set_bond_orders(ts_gra, forw_bnd_ord_dct) backward_gra = automol.graph.set_bond_orders(ts_gra, back_bnd_ord_dct) reactant_gras = automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(forward_gra)) reactant_gras =", "save specifications match output prog, method, basis, _ = thy_info ene = elstruct.reader.energy(prog,", "return ( (spc_fs, thy_fs, cnf_fs), (spc_prefix, thy_prefix, cnf_prefix)) def create_reaction_filesystems( prefix, rxn_info, mod_thy_info,", "True reactants, products = value reactants = reactants.split(' + ') products = products.split('", "in user given output') sys.exit() # Check that the rid/cid info matches the", "dct['save_filesystem'] is None: print( 'ERROR: No save_filesystem}' + 'Script will exit') sys.exit() return", "len(flat_ichs) != len(mults): print( 'Error: number of species does not match number of", "value.split(','): values.append(int(val)) if len(values) == 1: value = values[0] else: value = values", "sys.exit() return chosen_ts_gra, chosen_oversaturated_atom def get_zrxn(geo, rxn_info, rxn_class): ts_gra, oversaturated_atom = choose_cutoff_distance(geo) atoms_bnd", "(rid, cid) def parse_user_species(insert_dct): smi = insert_dct['smiles'] ich = insert_dct['inchi'] mult = insert_dct['mult']", "save_filesystem}' + 'Script will exit') sys.exit() return dct['save_filesystem'] def choose_cutoff_distance(geo): rqhs = [x", "idx += 1 ts_mult = insert_dct['ts_mult'] if ts_mult is None: print( 'Error: user", "double # bonds when doing bond orders forw_bnd_ord_dct = {breaking_bond: 0.9, forming_bond: 0.1}", "keyword not in ['smiles', 'inchi']: value = value.replace(' ', '') else: value =", "rxn_info = rinfo.sort((ichs, rxn_chgs, rxn_muls, ts_mult)) ts_info = rinfo.ts_info(rxn_info) # if zrxn_file is", "to filesystem' + '... not saving') def species_match(geo, spc_info): match = True ich,", "automol.graph.inchi(pgra) psmi = automol.inchi.smiles(pich) ts_ichs[1].append(pich) ts_smis[1].append(psmi) reactant_match = False product_match = False if", "if cid is None: cid = autofile.schema.generate_new_conformer_id() return (rid, cid) def parse_user_species(insert_dct): smi", "run_path = insert_dct['run_path'] # if run_path is None: # run_path = os.getcwd() #", "theory') sys.exit() return thy_info def create_species_filesystems(prefix, spc_info, mod_thy_info, locs=None): # species filesystem spc_fs", "'cc-pvtz'}, 'lvl_b2q': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvqz' }, 'lvl_b3s':", "products.split(' + ') values = [[], []] for reactant in reactants: values[0].append(reactant.replace(' ',", "user did not specify program in input') sys.exit() elif method is None: print('Error:", "+ ' {}'.format(prog) + ' and method matches' + ' {}'.format(method)) sys.exit() #", "= ' '.join(value) elif keyword not in ['smiles', 'inchi']: value = value.replace(' ',", "'basis': 'cc-pvtz'}, 'lvl_b2d': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvdz'}, 'lvl_b2t':", "script_str, freq_run_path, [geo], [[]], [hess]) # if len(imags) != 1: # print( #", "if ts_mult is None: print( 'Error: user did not specify ts_mul') sys.exit() rxn_info", "' {}'.format(method)) sys.exit() # Parse out user specified save location zrxn = None", "the dct in the script or use program/method/basis/orb_dct' + 'keywords instead of theory')", "forward_gra = automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds( # zrxn_i.forward_ts_graph))) # forward_gra = automol.graph.add_bonds(forward_gra,", "None: if cnf_fs[0].file.info.exists(): rinf_obj = cnf_fs[0].file.info.read() else: rinf_obj = autofile.schema.info_objects.conformer_trunk(0) rinf_obj.nsamp = 1", "rxn_class = insert_dct['rxn_class'] # zrxn_file = insert_dct['zrxn_file'] if ichs is None: ichs =", "number of species does not match number of mults') sys.exit() if len(flat_ichs) !=", "is None: print('Error: user did not specify charges in input') sys.exit() flat_ichs =", "fs_array[-1] if not locs_match(geo, cnf_fs, locs): print( 'I refuse to save this geometry", "+ ' {}'.format(cnf_fs[0].path()) + '\\nthe expected rid for this geo is {}'.format(geo_rid) +", "= insert_dct['inchi'] mults = insert_dct['mult'] chgs = insert_dct['charge'] rxn_class = insert_dct['rxn_class'] # zrxn_file", "pich = automol.graph.inchi(pgra) psmi = automol.inchi.smiles(pich) ts_ichs[1].append(pich) ts_smis[1].append(psmi) reactant_match = False product_match =", "at {}'.format(cnf_fs[-1].path(locs))) else: print( 'the geometry in the output is not unique to", "sys.exit() elif method is None: print('Error: user did not specify method in input')", "output is not unique to filesystem' + '... not saving') def species_match(geo, spc_info):", "keyword, value = line.split(':') if keyword in insert_dct: if 'None' in value: value", "elstruct import autorun from mechroutines.es._routines.conformer import _saved_cnf_info from mechroutines.es._routines.conformer import _sym_unique from mechroutines.es._routines.conformer", "dct['save_filesystem'] def choose_cutoff_distance(geo): rqhs = [x * 0.1 for x in range(26, 38,", "values[1].append(product.replace(' ', '')) value = values else: value = value[0].replace(' ', '') print(keyword,", "ich = automol.inchi.add_stereo(ich) ichs[1][idx] = ich if mults is None: print('Error: user did", "{ 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvtz'}, 'cc_lvl_q': { 'orb_res': 'RR',", "'Error: user did not specify ts_mul') sys.exit() rxn_info = rinfo.sort((ichs, rxn_chgs, rxn_muls, ts_mult))", "should be:\\n' + '<Keyword>: <Value>\\n' + 'Allowed keywords are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit()", "values elif keyword in ['ts_locs']: value = (int(value),) elif keyword in ['rxn_class']: #", "cannot save ' + # 'a transition state without a hessian') # sys.exit()", "'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvdz'}, 'lvl_b2t': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3',", "bonds # when we move on to other reaction types we have to", "into the filesystem inp_str = read_user_file(insert_dct, 'input_file') out_str = read_user_file(insert_dct, 'output_file') # parse", "if frag_geo is not None: frag_zma = automol.geom.zmatrix(frag_geo) checked_rids = [] for locs", "user did not specify orb_res in input') sys.exit() else: thy_info = (program, method,", "chosen_oversaturated_atom = None for rqh in rqhs: ts_gras = automol.geom.connectivity_graph(geo, rqq_bond_max=3.5, rqh_bond_max=rqh, rhh_bond_max=2.3)", "'6-31g*' }, 'lvl_m06m': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis': '6-31+g*' },", "_save_unique_parsed_conformer( mod_thy_info, cnf_fs, locs, (geo, zma, ene), inf_obj, inp_str, zrxn=zrxn, zma_locs=(0,)) print( 'geometry", "cnf_fs, locs): print( 'I refuse to save this geometry until user specified' +", "_geo_unique from mechroutines.es._routines.conformer import _fragment_ring_geo from mechroutines.es._routines._sadpt import save_saddle_point from mechlib.reaction.rxnid import _id_reaction", "does not match number of mults') sys.exit() if len(flat_ichs) != len(chgs): print( 'Error:", "'did not match those specified in user input') sys.exit() return std_zrxn, ts_zma, ts_geo,", "'method': 'caspt2', 'basis': 'cc-pvdz'}, 'mlvl_cas_tz': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'caspt2', 'basis':", "'Check that the program matches user specied' + ' {}'.format(prog) + ' and", "automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) ichs[1][idx] = ich if mults is None: print('Error: user", "[[]], [hess]) # if len(imags) != 1: # print( # 'Can only save", "= False if ts_smis[0] == rxn_smis[0]: reactant_match = True elif ts_smis[0][::-1] == rxn_smis[0]:", "IndexError: rich = automol.graph.inchi(rgra) rsmi = automol.inchi.smiles(rich) ts_ichs[0].append(rich) ts_smis[0].append(rsmi) for pgra in product_gras:", "'cc_lvl_df': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvdz-f12' }, 'cc_lvl_tf': {", "not None: cnf_fs[-1].create(locs) cnf_prefix = cnf_fs[-1].path(locs) else: cnf_prefix = None return ( (rxn_fs,", "allowed multiplicty for inchi {}'.format(ich)) sys.exit() rxn_muls[1].append(mults[idx]) rxn_chgs[1].append(chgs[idx]) idx += 1 ts_mult =", "parse out geo information first, to make sure # user save specifications match", "else: fs_array, prefix_array = create_reaction_filesystems( prefix, rxn_info, mod_thy_info, ts_locs=insert_dct['ts_locs'], locs=None) cnf_fs = fs_array[-1]", "print('Error: user did not specify charge in input') sys.exit() return sinfo.from_data(ich, chg, mult)", "import thy as tinfo from mechanalyzer.inf import rxn as rinfo from mechanalyzer.inf import", "[]] for ich in ichs[0]: mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) if mults[idx] not", "'!' in line[0]: continue line = line.split('!')[0] if ':' not in line: print(", "is None: cid = autofile.schema.generate_new_conformer_id() return (rid, cid) def parse_user_species(insert_dct): smi = insert_dct['smiles']", "dummies = [] # for dummy in dummy_key_dct.keys(): # add_idx = 1 #", "ts_locs=insert_dct['ts_locs'], locs=None) cnf_fs = fs_array[-1] if not locs_match(geo, cnf_fs, locs): print( 'I refuse", "mechroutines.es._routines._sadpt import save_saddle_point from mechlib.reaction.rxnid import _id_reaction THEORY_DCT = { 'lvl_wbs': { 'orb_res':", "'program': 'molpro2015', 'method': 'caspt2', 'basis': 'cc-pvtz'}} def parse_user_locs(insert_dct): rid = insert_dct['rid'] cid =", "of species does not match number of charges') sys.exit() idx = 0 rxn_muls", "'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvdz'}, 'cc_lvl_t': { 'orb_res': 'RR', 'program': 'molpro2015', 'method':", "not specify rxn_class') sys.exit() return rxn_info, ts_info, rxn_class def parse_user_theory(insert_dct): # Get input", "bonds to transfered atom for me to figure out') print('I promise i will", "ts_geo = automol.zmat.geometry(ts_zma) # geo_reorder_dct = {} # dummies = [] # for", "+ ' filesystem contamination.' + 'Comment lines should contain \"!\"' + 'Key format", "rxn_smis[1]: product_match = True elif ts_smis[1][::-1] == rxn_smis[-1]: ts_ichs[1] = ts_ichs[1][::-1] ts_smis[1] =", "out_str) # Hess = None # If hess is None: # print( #", "automol.inchi.smiles(pich) ts_ichs[1].append(pich) ts_smis[1].append(psmi) reactant_match = False product_match = False if ts_smis[0] == rxn_smis[0]:", "_sym_unique( geo, ene, saved_geos, saved_enes) if sym_id is None: if cnf_fs[0].file.info.exists(): rinf_obj =", "'RR', 'program': 'molpro2015', 'method': 'caspt2', 'basis': 'cc-pvtz'}} def parse_user_locs(insert_dct): rid = insert_dct['rid'] cid", "exit') sys.exit() return dct['save_filesystem'] def choose_cutoff_distance(geo): rqhs = [x * 0.1 for x", "'method': 'caspt2', 'basis': 'cc-pvtz'}} def parse_user_locs(insert_dct): rid = insert_dct['rid'] cid = insert_dct['cid'] if", "parse_user_reaction(insert_dct): smis = insert_dct['smiles'] ichs = insert_dct['inchi'] mults = insert_dct['mult'] chgs = insert_dct['charge']", "product_match = True if reactant_match and product_match: reactant_keys = [] for gra in", "me to figure out') print('I promise i will be smarter in the future')", "zma_keys, dummy_key_dct) rxn_info = (ts_ichs, *rxn_info[1:]) ts_geo = automol.zmat.geometry(ts_zma) # geo_reorder_dct = {}", "# geo_reorder_dct[idx_i + remove_idx] = idx_j # ts_geo = automol.geom.reorder_coordinates(geo, geo_reorder_dct) else: print(", "inchi {}'.format(ich)) match = False return match def locs_match(geo, cnf_fs, locs): match =", "ts_smis[1][::-1] product_match = True if reactant_match and product_match: reactant_keys = [] for gra", "sys.exit() flat_ichs = sum(ichs, []) if len(flat_ichs) != len(mults): print( 'Error: number of", "= automol.reac.breaking_bond_keys(zrxn_i, rev=True) # forward_gra = automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds( # zrxn_i.forward_ts_graph)))", "thy_prefix = thy_fs[-1].path(mod_thy_info[1:]) # conformer cnf_fs = autofile.fs.conformer(thy_prefix) if locs is not None:", "user given output') sys.exit() # Check that the rid/cid info matches the filesystem", "use program/method/basis/orb_dct' + 'keywords instead of theory') sys.exit() return thy_info def create_species_filesystems(prefix, spc_info,", "'hydrogen abstraction': # forward_gra = automol.graph.remove_bonds(forward_gra, forw_brk_key) # backward_gra = automol.graph.remove_bonds(backward_gra, back_brk_key) #", "= [automol.reac.from_string(zrxn_str)] # else: # zrxns, _ = _id_reaction(rxn_info) if rxn_class is None:", "# conformer cnf_fs = autofile.fs.conformer(thy_prefix) if locs is not None: cnf_fs[-1].create(locs) cnf_prefix =", "not in ['smiles', 'inchi']: value = value.replace(' ', '') else: value = value.split('", "thy_fs[-1].create(mod_thy_info[1:]) thy_prefix = thy_fs[-1].path(mod_thy_info[1:]) # conformer cnf_fs = autofile.fs.conformer(thy_prefix) if locs is not", "specify mult in input') sys.exit() if chg is None: print('Error: user did not", "input') sys.exit() else: thy_info = (program, method, basis, orb_res) else: if theory in", "checked_rids = [] for locs in cnf_fs[-1].existing(): current_rid, _ = locs if current_rid", "0.1, forming_bond: 0.9} forward_gra = automol.graph.set_bond_orders(ts_gra, forw_bnd_ord_dct) backward_gra = automol.graph.set_bond_orders(ts_gra, back_bnd_ord_dct) reactant_gras =", "automol.inchi.add_stereo(ich) ichs[0][idx] = ich for idx, ich in enumerate(ichs[1]): if not automol.inchi.is_complete(ich): ich", "is None: print('Error: user did not specify charge in input') sys.exit() return sinfo.from_data(ich,", "if ts_smis[1] == rxn_smis[1]: product_match = True elif ts_smis[1][::-1] == rxn_smis[-1]: ts_ichs[1] =", "out_str) if geo is None: print( 'No geometry could be parsed from output'", "zrxn = automol.reac.reverse(zrxn_i) # zma, _, _ = automol.reac.ts_zmatrix(zrxn, geo) # if zrxn", "'basis': 'cc-pvdz'}, 'cc_lvl_t': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvtz'}, 'cc_lvl_q':", "= _fragment_ring_geo(geo) if frag_geo is not None: frag_zma = automol.geom.zmatrix(frag_geo) checked_rids = []", "None elif keyword in ['mult', 'charge', 'ts_mult']: values = [] for val in", "= value.split() for i, val in enumerate(value): value[i] = val.replace(' ', '') value", "None: print( 'Error: user did not specify rxn_class') sys.exit() return rxn_info, ts_info, rxn_class", "= automol.zmat.geometry(ts_zma) # geo_reorder_dct = {} # dummies = [] # for dummy", "keyword in ['mult', 'charge', 'ts_mult']: values = [] for val in value.split(','): values.append(int(val))", "{ 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvqz' }, 'cc_lvl_df': { 'orb_res':", "atoms_bnd[oversaturated_atom] if len(bonds) != 2: print('too many bonds to transfered atom for me", "os import autofile import automol from mechanalyzer.inf import thy as tinfo from mechanalyzer.inf", "value = value.replace(' ', '') else: value = value.split(' = ') if len(value)", "chosen_oversaturated_atom = oversaturated_atoms[0] break if chosen_oversaturated_atom is None: print('could not figure out which", "'lvl_b3mg': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis': '6-311g**' }, 'lvl_b3t': {", "Are inserting into the filesystem inp_str = read_user_file(insert_dct, 'input_file') out_str = read_user_file(insert_dct, 'output_file')", "None: ichs = [[], []] for smi in smis[0]: ichs[0].append(automol.smiles.inchi(smi)) for smi in", "stereo=True) except IndexError: rich = automol.graph.inchi(rgra) rsmi = automol.inchi.smiles(rich) ts_ichs[0].append(rich) ts_smis[0].append(rsmi) for pgra", "== rxn_smis[-1]: ts_ichs[1] = ts_ichs[1][::-1] ts_smis[1] = ts_smis[1][::-1] product_match = True if reactant_match", "for val in value.split(','): values.append(int(val)) if len(values) == 1: value = values[0] else:", "rqhs: ts_gras = automol.geom.connectivity_graph(geo, rqq_bond_max=3.5, rqh_bond_max=rqh, rhh_bond_max=2.3) ts_gras = automol.graph.set_stereo_from_geometry(ts_gras, geo) ts_gras =", "' {}'.format(cnf_fs[0].path()) + '\\nthe expected rid for this geo is {}'.format(geo_rid) + '\\nthe", "= automol.geom.zmatrix(frag_locs_geo) if automol.zmat.almost_equal( frag_locs_zma, frag_zma, dist_rtol=0.1, ang_atol=.4): rid = locs[0] break return", "filesystem contamination.' + 'Comment lines should contain \"!\"' + 'Key format should be:\\n'", "# automol.graph.without_fractional_bonds( # zrxn_i.forward_ts_graph))) # forward_gra = automol.graph.add_bonds(forward_gra, forw_form_key) # backward_gra = automol.graph.without_stereo_parities(", "smi = insert_dct['smiles'] ich = insert_dct['inchi'] mult = insert_dct['mult'] chg = insert_dct['charge'] if", "thy_prefix, cnf_prefix)) def create_reaction_filesystems( prefix, rxn_info, mod_thy_info, ts_locs=None, locs=None): # species filesystem print('rxn_info',", "sys.exit() file_name = dct[keyword] return autofile.io_.read_file(file_name) def read_user_filesystem(dct): if dct['save_filesystem'] is None: print(", "automol.geom.zmatrix(frag_locs_geo) if automol.zmat.almost_equal( frag_locs_zma, frag_zma, dist_rtol=0.1, ang_atol=.4): rid = locs[0] break return rid", "is in the THEORY_DCT' + 'please add it to the dct in the", "# Get input method from theory dictionary theory = insert_dct['theory'] if theory is", "= [[], []] ts_ichs = [[], []] for rgra in reactant_gras: try: rich", "automol.graph.string(zrxn_i.forward_ts_graph)) # print('forRXN', automol.graph.string(forward_gra)) # print('bacRXN', automol.graph.string(zrxn_i.backward_ts_graph)) # print('bacRXN', automol.graph.string(backward_gra)) # if forward_gra", "'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis': '6-31+g*' }, 'lvl_m06t': { 'orb_res': 'RU',", "None: frag_zma = automol.geom.zmatrix(frag_geo) checked_rids = [] for locs in cnf_fs[-1].existing(): current_rid, _", "break return rid def parse_script_input(script_input_file): script_input = autofile.io_.read_file(script_input_file).splitlines() insert_dct = { 'save_filesystem': None,", "in insert_dct: if 'None' in value: value = None elif keyword in ['mult',", "'with an inchi or smiles in input') sys.exit() if ich is None: ich", "rqh_bond_max=rqh, rhh_bond_max=2.3) ts_gras = automol.graph.set_stereo_from_geometry(ts_gras, geo) ts_gras = automol.graph.connected_components(ts_gras) if len(ts_gras) != 1:", "= [x * 0.1 for x in range(26, 38, 2)] chosen_ts_gra = []", "{ 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis': 'cc-pvtz'}, 'lvl_b2d': { 'orb_res': 'RU',", "be smarter in the future') sys.exit() breaking_bond, forming_bond = bonds # when we", "1: value = values[0] else: value = values elif keyword in ['ts_locs']: value", ") sys.exit() return insert_dct if __name__ == '__main__': SCRIPT_INPUT_FILE = 'insert_options.txt' insert_dct =", "zma, _, _ = automol.reac.ts_zmatrix(zrxn, geo) # elif backward_gra == automol.geom.graph(geo, stereo=False): #", "# bonds when doing bond orders forw_bnd_ord_dct = {breaking_bond: 0.9, forming_bond: 0.1} back_bnd_ord_dct", "'{}'.format(automol.geom.string(geo))) match = False if mul not in mults_allowed: print( 'user specified mult", "'ERROR: line\\n({}) {}\\n is not parsable, '.format(i, line) + 'script will exit until", "if ich is None: ich = automol.smiles.inchi(smi) if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich)", "insert_dct['method'] basis = insert_dct['basis'] orb_res = insert_dct['orb_res'] # Get input method from theory", "match = False return match def locs_match(geo, cnf_fs, locs): match = True rid", "'inchi': None, 'mult': None, 'charge': None, 'rid': None, 'cid': None, 'theory': None, 'program':", "rxn_gras[::-1] if ts_smis[0] == rxn_smis[0]: reactant_match = True elif ts_smis[0][::-1] == rxn_smis[0]: ts_ichs[0]", "specified save location zrxn = None if insert_dct['saddle']: rxn_info, spc_info, rxn_class = parse_user_reaction(insert_dct)", "}, 'cc_lvl_tf': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvtz-f12' }, 'cc_lvl_qf':", "automol.graph.set_bond_orders(ts_gra, forw_bnd_ord_dct) backward_gra = automol.graph.set_bond_orders(ts_gra, back_bnd_ord_dct) reactant_gras = automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(forward_gra)) reactant_gras = automol.graph.connected_components(reactant_gras)", "ts_smis[::-1] ts_gras = ts_gras[::-1] rxn_gras = rxn_gras[::-1] if ts_smis[0] == rxn_smis[0]: reactant_match =", "# print('bacRXN', automol.graph.string(backward_gra)) # if forward_gra == automol.geom.graph(geo, stereo=False): # zrxn = zrxn_i", "geo) # elif backward_gra == automol.geom.graph(geo, stereo=False): # zrxn = automol.reac.reverse(zrxn_i) # zma,", "# 'a transition state without a hessian') # sys.exit() # run_path = insert_dct['run_path']", "import autofile import automol from mechanalyzer.inf import thy as tinfo from mechanalyzer.inf import", "back_brk_key = automol.reac.breaking_bond_keys(zrxn_i, rev=True) # forward_gra = automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds( #", "_, mul = spc_info mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) geo_ich = automol.geom.inchi(geo, stereo=True)", "std_zrxn, ts_zma, ts_geo, rxn_info def main(insert_dct): prefix = read_user_filesystem(insert_dct) # Read in the", "automol.graph.string(forward_gra)) # print('bacRXN', automol.graph.string(zrxn_i.backward_ts_graph)) # print('bacRXN', automol.graph.string(backward_gra)) # if forward_gra == automol.geom.graph(geo, stereo=False):", "rxn_smis[i].append(automol.inchi.smiles(ich)) ts_smis = [[], []] ts_ichs = [[], []] for rgra in reactant_gras:", "value = ' '.join(value) elif keyword not in ['smiles', 'inchi']: value = value.replace('", "is resolved to avoid' + ' filesystem contamination.' + 'Comment lines should contain", "frag_locs_zma, frag_zma, dist_rtol=0.1, ang_atol=.4): rid = locs[0] break return rid def parse_script_input(script_input_file): script_input", "in side: rxn_smis[i].append(automol.inchi.smiles(ich)) ts_smis = [[], []] ts_ichs = [[], []] for rgra", "'which is based on geometry from output:\\n' + '{}'.format(automol.geom.string(geo))) match = False if", "False if ts_smis[0] == rxn_smis[0]: reactant_match = True elif ts_smis[0][::-1] == rxn_smis[0]: ts_ichs[0]", "= True if reactant_match: if ts_smis[1] == rxn_smis[1]: product_match = True elif ts_smis[1][::-1]", "_ = locs if current_rid in checked_rids: continue if cnf_fs[-1].file.geometry.exists(locs): checked_rids.append(current_rid) locs_geo =", "= value.replace(' ', '') else: value = value.split(' = ') if len(value) >", "print( 'Error: number of species does not match number of charges') sys.exit() idx", "# else: # zrxns, _ = _id_reaction(rxn_info) if rxn_class is None: print( 'Error:", "[]] for reactant in reactants: values[0].append(reactant.replace(' ', '')) for product in products: values[1].append(product.replace('", "'Script will exit') sys.exit() return dct['save_filesystem'] def choose_cutoff_distance(geo): rqhs = [x * 0.1", "'')) for product in products: values[1].append(product.replace(' ', '')) value = values else: value", "!= 1: # print( # 'Can only save a transition state that has", "= parse_user_locs(insert_dct) # Check that the save location matches geo information if not", "in enumerate(rxn_info[0]): for ich in side: rxn_smis[i].append(automol.inchi.smiles(ich)) ts_smis = [[], []] ts_ichs =", "None, 'inchi': None, 'mult': None, 'charge': None, 'rid': None, 'cid': None, 'theory': None,", "automol.graph.inchi(rgra) rsmi = automol.inchi.smiles(rich) ts_ichs[0].append(rich) ts_smis[0].append(rsmi) for pgra in product_gras: try: pich =", "autofile.schema.generate_new_conformer_id() return (rid, cid) def parse_user_species(insert_dct): smi = insert_dct['smiles'] ich = insert_dct['inchi'] mult", "mechanalyzer.inf import rxn as rinfo from mechanalyzer.inf import spc as sinfo import elstruct", "the transition state' + 'did not match those specified in user input') sys.exit()", "== 'hydrogen abstraction': # forward_gra = automol.graph.remove_bonds(forward_gra, forw_brk_key) # backward_gra = automol.graph.remove_bonds(backward_gra, back_brk_key)", "mul = spc_info mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) geo_ich = automol.geom.inchi(geo, stereo=True) if", "which are the following') # for zrxn_i in zrxns: # print(zrxns) # sys.exit()", "in user given output') sys.exit() inf_obj = autofile.schema.info_objects.run( job=elstruct.Job.OPTIMIZATION, prog=prog, version='', method=method, basis=basis,", "ts_mul') sys.exit() rxn_info = rinfo.sort((ichs, rxn_chgs, rxn_muls, ts_mult)) ts_info = rinfo.ts_info(rxn_info) # if", "'user specified inchi {}'.format(ich) + 'does not match inchi from output {}'.format(geo_ich) +", "is not None: if geo_rid != rid: print( 'Error: rid mismatch for the", "automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds( # zrxn_i.forward_ts_graph))) # forward_gra = automol.graph.add_bonds(forward_gra, forw_form_key) #", "locs in cnf_fs[-1].existing(): current_rid, _ = locs if current_rid in checked_rids: continue if", "if current_rid in checked_rids: continue if cnf_fs[-1].file.geometry.exists(locs): checked_rids.append(current_rid) locs_geo = cnf_fs[-1].file.geometry.read(locs) frag_locs_geo =", "reactant_match = True elif ts_smis[0][::-1] == rxn_smis[0]: ts_ichs[0] = ts_ichs[0][::-1] ts_smis[0] = ts_smis[0][::-1]", "filesystem spc_fs = autofile.fs.species(prefix) spc_fs[-1].create(spc_info) spc_prefix = spc_fs[-1].path(spc_info) # theory filesystem thy_fs =", "ts_ichs[1].append(pich) ts_smis[1].append(psmi) reactant_match = False product_match = False if ts_smis[0] == rxn_smis[0]: reactant_match", "(ts_ichs, *rxn_info[1:]) ts_geo = automol.zmat.geometry(ts_zma) # geo_reorder_dct = {} # dummies = []", "[] for val in value.split(','): values.append(int(val)) if len(values) == 1: value = values[0]", "= insert_dct['rid'] cid = insert_dct['cid'] if rid is None: rid = autofile.schema.generate_new_ring_id() if", "not match number of mults') sys.exit() if len(flat_ichs) != len(chgs): print( 'Error: number", "sys.exit() return rxn_info, ts_info, rxn_class def parse_user_theory(insert_dct): # Get input method explicitly inputted", "'ccsd(t)-f12', 'basis': 'cc-pvtz-f12' }, 'cc_lvl_qf': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis':", "insert_dct['run_path'] # if run_path is None: # run_path = os.getcwd() # run_fs =", "'method': 'm062x', 'basis': '6-31g*' }, 'lvl_m06m': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x',", "output' + 'Check that the program matches user specied' + ' {}'.format(prog) +", "geo_reorder_dct) else: print( 'The reactants and products found for the transition state' +", "avoid' + ' filesystem contamination.' + 'Comment lines should contain \"!\"' + 'Key", "zma_locs=(0,)) print( 'geometry is now saved at {}'.format(cnf_fs[-1].path(locs))) else: print( 'the geometry in", "autofile.fs.run(run_path) # freq_run_path = run_fs[-1].path(['hessian']) # run_fs[-1].create(['hessian']) # script_str = autorun.SCRIPT_DCT['projrot'] # freqs,", "not None and zrxn is not None: hess_inf_obj = autofile.schema.info_objects.run( job=elstruct.Job.HESSIAN, prog=prog, version='',", "ret, hess_ret, freqs, imags, mod_thy_info, {'runlvl_cnf_fs': (cnf_fs, None)}, locs, zma_locs=(0,), zma=zma) else: _save_unique_parsed_conformer(", "'6-31g*' }, 'lvl_b3mg': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis': '6-311g**' },", "'cid': None, 'theory': None, 'program': None, 'method': None, 'basis': None, 'orb_res': None, 'input_file':", "output, cannot save ' + # 'a transition state without a hessian') #", "line[0]: continue line = line.split('!')[0] if ':' not in line: print( 'ERROR: line\\n({})", "+ '{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit() return insert_dct if __name__ == '__main__': SCRIPT_INPUT_FILE = 'insert_options.txt'", "# 'frequencies: ' + ','.join(imags)) # sys.exit() else: spc_info = parse_user_species(insert_dct) mod_thy_info =", "'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvtz'}, 'lvl_b2q': { 'orb_res': 'RU', 'program': 'gaussian09',", "not in line: print( 'ERROR: line\\n({}) {}\\n is not parsable, '.format(i, line) +", "basis=basis, status=autofile.schema.RunStatus.SUCCESS) hess_ret = (hess_inf_obj, inp_str, out_str) save_saddle_point( zrxn, ret, hess_ret, freqs, imags,", "ichs[1].append(automol.smiles.inchi(smi)) for idx, ich in enumerate(ichs[0]): if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) ichs[0][idx]", "input and output files that we # Are inserting into the filesystem inp_str", "autofile import automol from mechanalyzer.inf import thy as tinfo from mechanalyzer.inf import rxn", "= values elif keyword in ['ts_locs']: value = (int(value),) elif keyword in ['rxn_class']:", "ts_gras: vals = automol.graph.atom_unsaturated_valences(ts_gra_i, bond_order=True) oversaturated_atoms = [atm for atm, val in vals.items()", "'basis': 'cc-pvdz'}, 'lvl_b2t': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvtz'}, 'lvl_b2q':", "Parse out user specified save location zrxn = None if insert_dct['saddle']: rxn_info, spc_info,", "in value.split(','): values.append(int(val)) if len(values) == 1: value = values[0] else: value =", "'\\nthe expected rid for this geo is {}'.format(geo_rid) + '\\nthe user rid in", "= insert_dct['mult'] chg = insert_dct['charge'] if ich is None and smi is None:", "rxn_smis[-1]: ts_ichs[1] = ts_ichs[1][::-1] ts_smis[1] = ts_smis[1][::-1] product_match = True if reactant_match and", "= parse_user_species(insert_dct) mod_thy_info = tinfo.modify_orb_label(thy_info, spc_info) locs = parse_user_locs(insert_dct) # Check that the", "locs=None) else: fs_array, prefix_array = create_reaction_filesystems( prefix, rxn_info, mod_thy_info, ts_locs=insert_dct['ts_locs'], locs=None) cnf_fs =", "spc_info = parse_user_species(insert_dct) mod_thy_info = tinfo.modify_orb_label(thy_info, spc_info) locs = parse_user_locs(insert_dct) # Check that", "'basis': 'cc-pvqz' }, 'cc_lvl_df': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvdz-f12'", "multiplicty for inchi {}'.format(ich)) sys.exit() rxn_muls[0].append(mults[idx]) rxn_chgs[0].append(chgs[idx]) idx += 1 for ich in", "if not species_match(geo, spc_info): print( 'I refuse to save this geometry until user", "chosen_ts_gra, chosen_oversaturated_atom def get_zrxn(geo, rxn_info, rxn_class): ts_gra, oversaturated_atom = choose_cutoff_distance(geo) atoms_bnd = automol.graph.atoms_bond_keys(ts_gra)", "if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) ichs[1][idx] = ich if mults is None:", "parse_user_theory(insert_dct) # parse out geo information first, to make sure # user save", "# zrxn_i.forward_ts_graph))) # forward_gra = automol.graph.add_bonds(forward_gra, forw_form_key) # backward_gra = automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds(", "hess is None: # print( # 'No hessian found in output, cannot save", "rxn_gras[1]: product_keys.append(automol.graph.atom_keys(gra)) std_rxn = automol.reac.Reaction( rxn_class, *ts_gras, reactant_keys, product_keys) ts_zma, zma_keys, dummy_key_dct =", "to avoid' + ' filesystem contamination.' + 'Comment lines should contain \"!\"' +", "autofile.schema.generate_new_ring_id() if cid is None: cid = autofile.schema.generate_new_conformer_id() return (rid, cid) def parse_user_species(insert_dct):", "# else: # geo_reorder_dct[idx_i + remove_idx] = idx_j # ts_geo = automol.geom.reorder_coordinates(geo, geo_reorder_dct)", "basis, _ = thy_info ene = elstruct.reader.energy(prog, method, out_str) geo = elstruct.reader.opt_geometry(prog, out_str)", "' that is in the THEORY_DCT' + 'please add it to the dct", "is not parsable, '.format(i, line) + 'script will exit until input is resolved", "rich = automol.graph.inchi(rgra, stereo=True) except IndexError: rich = automol.graph.inchi(rgra) rsmi = automol.inchi.smiles(rich) ts_ichs[0].append(rich)", "+ 'script will exit until inpupt is resolved to avoid' + ' filesystem", "transition state without a hessian') # sys.exit() # run_path = insert_dct['run_path'] # if", "if dct['save_filesystem'] is None: print( 'ERROR: No save_filesystem}' + 'Script will exit') sys.exit()", "remove_idx = 0 # for idx_i, idx_j in enumerate(zma_keys): # if idx_i in", "' + ','.join(imags)) # sys.exit() else: spc_info = parse_user_species(insert_dct) mod_thy_info = tinfo.modify_orb_label(thy_info, spc_info)", "# Parse out user specified save location zrxn = None if insert_dct['saddle']: rxn_info,", "cnf_fs, mod_thy_info) if _geo_unique(geo, ene, saved_geos, saved_enes, zrxn=zrxn): sym_id = _sym_unique( geo, ene,", "script_input = autofile.io_.read_file(script_input_file).splitlines() insert_dct = { 'save_filesystem': None, 'smiles': None, 'inchi': None, 'mult':", "= values[0] else: value = values elif keyword in ['ts_locs']: value = (int(value),)", "output files that we # Are inserting into the filesystem inp_str = read_user_file(insert_dct,", "_, _ = automol.reac.ts_zmatrix(zrxn, geo) # elif backward_gra == automol.geom.graph(geo, stereo=False): # zrxn", "in input') sys.exit() if chgs is None: print('Error: user did not specify charges", "match = False if mul not in mults_allowed: print( 'user specified mult of", "read_user_filesystem(insert_dct) # Read in the input and output files that we # Are", "imags, mod_thy_info, {'runlvl_cnf_fs': (cnf_fs, None)}, locs, zma_locs=(0,), zma=zma) else: _save_unique_parsed_conformer( mod_thy_info, cnf_fs, locs,", "specified' + ' info matches the info in user given output') sys.exit() inf_obj", "import _save_unique_parsed_conformer from mechroutines.es._routines.conformer import _geo_unique from mechroutines.es._routines.conformer import _fragment_ring_geo from mechroutines.es._routines._sadpt import", "{}'.format(mults[idx]) + 'is not an allowed multiplicty for inchi {}'.format(ich)) sys.exit() rxn_muls[1].append(mults[idx]) rxn_chgs[1].append(chgs[idx])", "method, basis, _ = thy_info ene = elstruct.reader.energy(prog, method, out_str) geo = elstruct.reader.opt_geometry(prog,", "stereo=False)) if mults[idx] not in mults_allowed: print( 'user specified mult of {}'.format(mults[idx]) +", "', '') else: value = value.split(' = ') if len(value) > 1: insert_dct['saddle']", "in reactant_gras: try: rich = automol.graph.inchi(rgra, stereo=True) except IndexError: rich = automol.graph.inchi(rgra) rsmi", "insert_dct['rid'] cid = insert_dct['cid'] if rid is None: rid = autofile.schema.generate_new_ring_id() if cid", "enumerate(ichs[0]): if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) ichs[0][idx] = ich for idx, ich", "automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds( # zrxn_i.backward_ts_graph))) # backward_gra = automol.graph.add_bonds(backward_gra, back_form_key) # if zrxn_i.class_", "is None: print('Error: user did not specify program in input') sys.exit() elif method", "ts_locs=None, locs=None): # species filesystem print('rxn_info', rxn_info) rxn_fs = autofile.fs.reaction(prefix) sort_rxn_info = rinfo.sort(rxn_info,", "None, 'basis': None, 'orb_res': None, 'input_file': None, 'output_file': None, 'ts_locs': None, 'ts_mult': None,", "= ts_ichs[0][::-1] ts_smis[0] = ts_smis[0][::-1] reactant_match = True else: ts_ichs = ts_ichs[::-1] ts_smis", "ich in ichs[1]: mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) if mults[idx] not in mults_allowed:", "specify ts_mul') sys.exit() rxn_info = rinfo.sort((ichs, rxn_chgs, rxn_muls, ts_mult)) ts_info = rinfo.ts_info(rxn_info) #", "continue line = line.split('!')[0] if ':' not in line: print( 'ERROR: line\\n({}) {}\\n", "} for i, line in enumerate(script_input): if len(line) < 2: continue elif '!'", "= cinf_obj.nsamp cnsampd += 1 cinf_obj.nsamp = cnsampd else: cinf_obj = autofile.schema.info_objects.conformer_branch(0) cinf_obj.nsamp", "# backward_gra = automol.graph.remove_bonds(backward_gra, back_brk_key) # print('forRXN', automol.graph.string(zrxn_i.forward_ts_graph)) # print('forRXN', automol.graph.string(forward_gra)) # print('bacRXN',", "rgra in reactant_gras: try: rich = automol.graph.inchi(rgra, stereo=True) except IndexError: rich = automol.graph.inchi(rgra)", "rxn_info, mod_thy_info, ts_locs=None, locs=None): # species filesystem print('rxn_info', rxn_info) rxn_fs = autofile.fs.reaction(prefix) sort_rxn_info", "'lvl_wbt': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis': 'cc-pvtz'}, 'lvl_m06s': { 'orb_res':", "= automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(backward_gra)) product_gras = automol.graph.connected_components(product_gras) ts_gras = [forward_gra, backward_gra] rxn_gras = [reactant_gras,", "value = None elif keyword in ['mult', 'charge', 'ts_mult']: values = [] for", "= autofile.schema.info_objects.run( job=elstruct.Job.HESSIAN, prog=prog, version='', method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS) hess_ret = (hess_inf_obj, inp_str, out_str)", "automol.graph.connected_components(ts_gras) if len(ts_gras) != 1: continue for ts_gra_i in ts_gras: vals = automol.graph.atom_unsaturated_valences(ts_gra_i,", "choose_cutoff_distance(geo): rqhs = [x * 0.1 for x in range(26, 38, 2)] chosen_ts_gra", "[[], []] ts_ichs = [[], []] for rgra in reactant_gras: try: rich =", "return dct['save_filesystem'] def choose_cutoff_distance(geo): rqhs = [x * 0.1 for x in range(26,", "# add_idx += 1 # dummies.append(dummy + add_idx) # remove_idx = 0 #", "if mult is None: print('Error: user did not specify mult in input') sys.exit()", "automol.geom.inchi(geo, stereo=True) if ich != geo_ich: print( 'user specified inchi {}'.format(ich) + 'does", "'method': 'b2plypd3', 'basis': 'cc-pvdz'}, 'lvl_b2t': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis':", "thy_prefix = thy_fs[-1].path(mod_thy_info[1:]) if ts_locs is None: ts_locs = (0,) ts_fs = autofile.fs.transition_state(thy_prefix)", "if ichs is None: ichs = [[], []] for smi in smis[0]: ichs[0].append(automol.smiles.inchi(smi))", "thy_fs = autofile.fs.theory(spc_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix = thy_fs[-1].path(mod_thy_info[1:]) # conformer cnf_fs = autofile.fs.conformer(thy_prefix) if", "is not None: cnf_fs[-1].create(locs) cnf_prefix = cnf_fs[-1].path(locs) else: cnf_prefix = None return (", "' filesystem contamination.' + 'Comment lines should contain \"!\"' + 'Key format should", "[[], []] rxn_chgs = [[], []] for ich in ichs[0]: mults_allowed = automol.graph.possible_spin_multiplicities(", "reaction # class but not in between words value = value.split() for i,", "'lvl_b3t': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis': 'cc-pvtz'}, 'cc_lvl_d': { 'orb_res':", "in input file is {}'.format(rid)) match = False return match def rng_loc_for_geo(geo, cnf_fs):", "dummy > dumm_j: # add_idx += 1 # dummies.append(dummy + add_idx) # remove_idx", "specify charge in input') sys.exit() return sinfo.from_data(ich, chg, mult) def parse_user_reaction(insert_dct): smis =", "autofile.fs.conformer(ts_prefix) if locs is not None: cnf_fs[-1].create(locs) cnf_prefix = cnf_fs[-1].path(locs) else: cnf_prefix =", "reactant in reactants: values[0].append(reactant.replace(' ', '')) for product in products: values[1].append(product.replace(' ', ''))", "thy_info def create_species_filesystems(prefix, spc_info, mod_thy_info, locs=None): # species filesystem spc_fs = autofile.fs.species(prefix) spc_fs[-1].create(spc_info)", "= zrxn_i # zma, _, _ = automol.reac.ts_zmatrix(zrxn, geo) # elif backward_gra ==", "an allowed multiplicty for inchi {}'.format(ich)) match = False return match def locs_match(geo,", "now saved at {}'.format(cnf_fs[-1].path(locs))) else: print( 'the geometry in the output is not", "False, } for i, line in enumerate(script_input): if len(line) < 2: continue elif", "not specify charge in input') sys.exit() return sinfo.from_data(ich, chg, mult) def parse_user_reaction(insert_dct): smis", "'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis': 'cc-pvtz'}, 'cc_lvl_d': { 'orb_res': 'RR', 'program':", "not specify ts_mul') sys.exit() rxn_info = rinfo.sort((ichs, rxn_chgs, rxn_muls, ts_mult)) ts_info = rinfo.ts_info(rxn_info)", "sys.exit() breaking_bond, forming_bond = bonds # when we move on to other reaction", "ts_smis[1] == rxn_smis[1]: product_match = True elif ts_smis[1][::-1] == rxn_smis[-1]: ts_ichs[1] = ts_ichs[1][::-1]", "= automol.reac.ts_zmatrix( std_rxn, geo) std_zrxn = automol.reac.relabel_for_zmatrix( std_rxn, zma_keys, dummy_key_dct) rxn_info = (ts_ichs,", "'method': 'b2plypd3', 'basis': 'cc-pvtz'}, 'lvl_b2q': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis':", "smarter in the future') sys.exit() breaking_bond, forming_bond = bonds # when we move", "'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvdz'}, 'cc_lvl_t': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)',", "zrxn, ret, hess_ret, freqs, imags, mod_thy_info, {'runlvl_cnf_fs': (cnf_fs, None)}, locs, zma_locs=(0,), zma=zma) else:", "0.1 for x in range(26, 38, 2)] chosen_ts_gra = [] chosen_oversaturated_atom = None", "value[0].replace(' ', '') print(keyword, value) insert_dct[keyword] = value else: print( 'ERROR: Keyword {}", "automol.zmat.geometry(ts_zma) # geo_reorder_dct = {} # dummies = [] # for dummy in", "= insert_dct['ts_mult'] if ts_mult is None: print( 'Error: user did not specify ts_mul')", "mults in input') sys.exit() if chgs is None: print('Error: user did not specify", "def parse_user_reaction(insert_dct): smis = insert_dct['smiles'] ichs = insert_dct['inchi'] mults = insert_dct['mult'] chgs =", "automol.graph.set_bond_orders(ts_gra, back_bnd_ord_dct) reactant_gras = automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(forward_gra)) reactant_gras = automol.graph.connected_components(reactant_gras) product_gras = automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(backward_gra))", "THEORY_DCT = { 'lvl_wbs': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis': '6-31g*'", "zrxn_i.backward_ts_graph))) # backward_gra = automol.graph.add_bonds(backward_gra, back_form_key) # if zrxn_i.class_ == 'hydrogen abstraction': #", "'lvl_m06s': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis': '6-31g*' }, 'lvl_m06m': {", "geometry did not match any of the attempted ' + # 'zrxns, which", "if zrxn_i.class_ == 'hydrogen abstraction': # forward_gra = automol.graph.remove_bonds(forward_gra, forw_brk_key) # backward_gra =", "value[i] = val.replace(' ', '') value = ' '.join(value) elif keyword not in", "'orb_res': None, 'input_file': None, 'output_file': None, 'ts_locs': None, 'ts_mult': None, 'rxn_class': None, 'zrxn_file':", "' filesystem contamination.' + 'Allowed keywords are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit() return insert_dct", "the THEORY_DCT' + 'please add it to the dct in the script or", "= autofile.schema.generate_new_ring_id() if cid is None: cid = autofile.schema.generate_new_conformer_id() return (rid, cid) def", "'cc-pvdz-f12' }, 'cc_lvl_tf': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvtz-f12' },", "keywords are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit() keyword, value = line.split(':') if keyword in", "def read_user_file(dct, keyword): if dct[keyword] is None: print( 'ERROR: No filename is specified", "locs): match = True rid = locs[0] geo_rid = rng_loc_for_geo(geo, cnf_fs) if geo_rid", "return autofile.io_.read_file(file_name) def read_user_filesystem(dct): if dct['save_filesystem'] is None: print( 'ERROR: No save_filesystem}' +", "= insert_dct['mult'] chgs = insert_dct['charge'] rxn_class = insert_dct['rxn_class'] # zrxn_file = insert_dct['zrxn_file'] if", "ts_smis[1][::-1] == rxn_smis[-1]: ts_ichs[1] = ts_ichs[1][::-1] ts_smis[1] = ts_smis[1][::-1] product_match = True if", "None, 'ts_locs': None, 'ts_mult': None, 'rxn_class': None, 'zrxn_file': None, 'run_path': None, 'saddle': False,", "idx_j # ts_geo = automol.geom.reorder_coordinates(geo, geo_reorder_dct) else: print( 'The reactants and products found", "to avoid' + ' filesystem contamination.' + 'Allowed keywords are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys()))) )", "and method matches' + ' {}'.format(method)) sys.exit() # Parse out user specified save", "tinfo.from_dct(THEORY_DCT[theory]) else: print( 'Error: user did not specify a theory {}'.format(theory) + '", "sort_rxn_info = rinfo.sort(rxn_info, scheme='autofile') rxn_fs[-1].create(sort_rxn_info) rxn_prefix = rxn_fs[-1].path(sort_rxn_info) # theory filesystem thy_fs =", "specify orb_res in input') sys.exit() else: thy_info = (program, method, basis, orb_res) else:", "automol.reac.reverse(zrxn_i) # zma, _, _ = automol.reac.ts_zmatrix(zrxn, geo) # if zrxn is None:", "not match those specified in user input') sys.exit() return std_zrxn, ts_zma, ts_geo, rxn_info", "0.1} back_bnd_ord_dct = {breaking_bond: 0.1, forming_bond: 0.9} forward_gra = automol.graph.set_bond_orders(ts_gra, forw_bnd_ord_dct) backward_gra =", "# for idx_i, idx_j in enumerate(zma_keys): # if idx_i in dummies: # remove_idx", "basis=basis, status=autofile.schema.RunStatus.SUCCESS) ret = (inf_obj, inp_str, out_str) _, saved_geos, saved_enes = _saved_cnf_info( cnf_fs,", "make sure # user save specifications match output prog, method, basis, _ =", "+ 'does not match inchi from output {}'.format(geo_ich) + 'which is based on", "thy_info = (program, method, basis, orb_res) else: if theory in THEORY_DCT: thy_info =", "'program': 'gaussian09', 'method': 'b3lyp', 'basis': '6-31g*' }, 'lvl_b3mg': { 'orb_res': 'RU', 'program': 'gaussian09',", "= automol.reac.Reaction( rxn_class, *ts_gras, reactant_keys, product_keys) ts_zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix( std_rxn, geo)", "inserting into the filesystem inp_str = read_user_file(insert_dct, 'input_file') out_str = read_user_file(insert_dct, 'output_file') #", "in ichs[0]: mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) if mults[idx] not in mults_allowed: print(", "is None: print( 'Error: user did not specify rxn_class') sys.exit() return rxn_info, ts_info,", "only save a transition state that has a single' + # 'imaginary frequency,", "print('Error: user did not specify mults in input') sys.exit() if chgs is None:", "mul not in mults_allowed: print( 'user specified mult of {}'.format(mul) + 'is not", "except IndexError: rich = automol.graph.inchi(rgra) rsmi = automol.inchi.smiles(rich) ts_ichs[0].append(rich) ts_smis[0].append(rsmi) for pgra in", "# print('forRXN', automol.graph.string(forward_gra)) # print('bacRXN', automol.graph.string(zrxn_i.backward_ts_graph)) # print('bacRXN', automol.graph.string(backward_gra)) # if forward_gra ==", "rxn_info, ts_info, rxn_class def parse_user_theory(insert_dct): # Get input method explicitly inputted program =", "{}'.format(cnf_fs[0].path()) + '\\nthe expected rid for this geo is {}'.format(geo_rid) + '\\nthe user", "{ 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'caspt2', 'basis': 'cc-pvtz'}} def parse_user_locs(insert_dct): rid =", "input') sys.exit() return sinfo.from_data(ich, chg, mult) def parse_user_reaction(insert_dct): smis = insert_dct['smiles'] ichs =", "= automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds( # zrxn_i.forward_ts_graph))) # forward_gra = automol.graph.add_bonds(forward_gra, forw_form_key)", "frag_zma, dist_rtol=0.1, ang_atol=.4): rid = locs[0] break return rid def parse_script_input(script_input_file): script_input =", "of {}'.format(mul) + 'is not an allowed multiplicty for inchi {}'.format(ich)) match =", "we have to check for double # bonds when doing bond orders forw_bnd_ord_dct", "None: print('Error: user did not specify program in input') sys.exit() elif method is", "rid = None frag_geo = _fragment_ring_geo(geo) if frag_geo is not None: frag_zma =", "mechroutines.es._routines.conformer import _fragment_ring_geo from mechroutines.es._routines._sadpt import save_saddle_point from mechlib.reaction.rxnid import _id_reaction THEORY_DCT =", "= autofile.fs.reaction(prefix) sort_rxn_info = rinfo.sort(rxn_info, scheme='autofile') rxn_fs[-1].create(sort_rxn_info) rxn_prefix = rxn_fs[-1].path(sort_rxn_info) # theory filesystem", "rid for this geo is {}'.format(geo_rid) + '\\nthe user rid in input file", "mod_thy_info = tinfo.modify_orb_label(thy_info, spc_info) locs = parse_user_locs(insert_dct) # Check that the save location", "match output prog, method, basis, _ = thy_info ene = elstruct.reader.energy(prog, method, out_str)", "1 # else: # geo_reorder_dct[idx_i + remove_idx] = idx_j # ts_geo = automol.geom.reorder_coordinates(geo,", "not unique to filesystem' + '... not saving') def species_match(geo, spc_info): match =", "'method': 'ccsd(t)', 'basis': 'cc-pvqz' }, 'cc_lvl_df': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12',", "'cc-pvtz'}} def parse_user_locs(insert_dct): rid = insert_dct['rid'] cid = insert_dct['cid'] if rid is None:", "# Are inserting into the filesystem inp_str = read_user_file(insert_dct, 'input_file') out_str = read_user_file(insert_dct,", "'I refuse to save this geometry until user specified' + ' info matches", "dictionary theory = insert_dct['theory'] if theory is None: if program is None: print('Error:", "= insert_dct['charge'] if ich is None and smi is None: print( 'Error: user", "for gra in rxn_gras[1]: product_keys.append(automol.graph.atom_keys(gra)) std_rxn = automol.reac.Reaction( rxn_class, *ts_gras, reactant_keys, product_keys) ts_zma,", "this geo is {}'.format(geo_rid) + '\\nthe user rid in input file is {}'.format(rid))", "contamination.' + 'Comment lines should contain \"!\"' + 'Key format should be:\\n' +", "for smi in smis[1]: ichs[1].append(automol.smiles.inchi(smi)) for idx, ich in enumerate(ichs[0]): if not automol.inchi.is_complete(ich):", "dumm_j: # add_idx += 1 # dummies.append(dummy + add_idx) # remove_idx = 0", "rev=True) # forw_brk_key = automol.reac.breaking_bond_keys(zrxn_i) # back_brk_key = automol.reac.breaking_bond_keys(zrxn_i, rev=True) # forward_gra =", "vals.items() if val < 0] if len(oversaturated_atoms) == 1: chosen_ts_gra = ts_gras[0] chosen_oversaturated_atom", "following') # for zrxn_i in zrxns: # print(zrxns) # sys.exit() # # hess", "found in output, cannot save ' + # 'a transition state without a", "automol.graph.without_fractional_bonds(backward_gra)) product_gras = automol.graph.connected_components(product_gras) ts_gras = [forward_gra, backward_gra] rxn_gras = [reactant_gras, product_gras] rxn_smis", "dummies.append(dummy + add_idx) # remove_idx = 0 # for idx_i, idx_j in enumerate(zma_keys):", "not specify charges in input') sys.exit() flat_ichs = sum(ichs, []) if len(flat_ichs) !=", "matches geo information if not insert_dct['saddle']: if not species_match(geo, spc_info): print( 'I refuse", "mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) geo_ich = automol.geom.inchi(geo, stereo=True) if ich != geo_ich:", "') values = [[], []] for reactant in reactants: values[0].append(reactant.replace(' ', '')) for", "rxn_gras = rxn_gras[::-1] if ts_smis[0] == rxn_smis[0]: reactant_match = True elif ts_smis[0][::-1] ==", "# run_path = insert_dct['run_path'] # if run_path is None: # run_path = os.getcwd()", "for x in range(26, 38, 2)] chosen_ts_gra = [] chosen_oversaturated_atom = None for", "# zrxn = automol.reac.reverse(zrxn_i) # zma, _, _ = automol.reac.ts_zmatrix(zrxn, geo) # if", "in dummies: # if dummy > dumm_j: # add_idx += 1 # dummies.append(dummy", "'ts_locs': None, 'ts_mult': None, 'rxn_class': None, 'zrxn_file': None, 'run_path': None, 'saddle': False, }", "zrxn is not None: hess_inf_obj = autofile.schema.info_objects.run( job=elstruct.Job.HESSIAN, prog=prog, version='', method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS)", "'b3lyp', 'basis': '6-31g*' }, 'lvl_b3mg': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis':", "product_keys) ts_zma, zma_keys, dummy_key_dct = automol.reac.ts_zmatrix( std_rxn, geo) std_zrxn = automol.reac.relabel_for_zmatrix( std_rxn, zma_keys,", "if ts_locs is None: ts_locs = (0,) ts_fs = autofile.fs.transition_state(thy_prefix) ts_fs[-1].create(ts_locs) ts_prefix =", "not an allowed multiplicty for inchi {}'.format(ich)) sys.exit() rxn_muls[0].append(mults[idx]) rxn_chgs[0].append(chgs[idx]) idx += 1", "'imaginary frequency, projrot found the following' + # 'frequencies: ' + ','.join(imags)) #", "1 if cnf_fs[1].file.info.exists([locs[0]]): cinf_obj = cnf_fs[1].file.info.read(locs[0]) cnsampd = cinf_obj.nsamp cnsampd += 1 cinf_obj.nsamp", "sys import os import autofile import automol from mechanalyzer.inf import thy as tinfo", "for the filesystem at' + ' {}'.format(cnf_fs[0].path()) + '\\nthe expected rid for this", "'method': 'm062x', 'basis': 'cc-pvtz'}, 'lvl_b2d': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis':", "or smiles in input') sys.exit() if ich is None: ich = automol.smiles.inchi(smi) if", "rhh_bond_max=2.3) ts_gras = automol.graph.set_stereo_from_geometry(ts_gras, geo) ts_gras = automol.graph.connected_components(ts_gras) if len(ts_gras) != 1: continue", "geo, rxn_info = get_zrxn(geo, rxn_info, rxn_class) # for zrxn_i in zrxns: # forw_form_key", "will exit') sys.exit() file_name = dct[keyword] return autofile.io_.read_file(file_name) def read_user_filesystem(dct): if dct['save_filesystem'] is", "', '')) value = values else: value = value[0].replace(' ', '') print(keyword, value)", "chg is None: print('Error: user did not specify charge in input') sys.exit() return", "back_form_key) # if zrxn_i.class_ == 'hydrogen abstraction': # forward_gra = automol.graph.remove_bonds(forward_gra, forw_brk_key) #", "'keywords instead of theory') sys.exit() return thy_info def create_species_filesystems(prefix, spc_info, mod_thy_info, locs=None): #", "rxn_chgs, rxn_muls, ts_mult)) ts_info = rinfo.ts_info(rxn_info) # if zrxn_file is not None: #", "'gaussian09', 'method': 'wb97xd', 'basis': '6-31+g*' }, 'lvl_wbt': { 'orb_res': 'RU', 'program': 'gaussian09', 'method':", "'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvqz' }, 'lvl_b3s': { 'orb_res': 'RU',", "( (spc_fs, thy_fs, cnf_fs), (spc_prefix, thy_prefix, cnf_prefix)) def create_reaction_filesystems( prefix, rxn_info, mod_thy_info, ts_locs=None,", "+ 'Script will exit') sys.exit() file_name = dct[keyword] return autofile.io_.read_file(file_name) def read_user_filesystem(dct): if", "input is resolved to avoid' + ' filesystem contamination.' + 'Comment lines should", "inp_str = read_user_file(insert_dct, 'input_file') out_str = read_user_file(insert_dct, 'output_file') # parse method from insert", "parse_user_species(insert_dct) mod_thy_info = tinfo.modify_orb_label(thy_info, spc_info) locs = parse_user_locs(insert_dct) # Check that the save", "products = products.split(' + ') values = [[], []] for reactant in reactants:", "= insert_dct['smiles'] ichs = insert_dct['inchi'] mults = insert_dct['mult'] chgs = insert_dct['charge'] rxn_class =", "[geo], [[]], [hess]) # if len(imags) != 1: # print( # 'Can only", "rxn_info = (ts_ichs, *rxn_info[1:]) ts_geo = automol.zmat.geometry(ts_zma) # geo_reorder_dct = {} # dummies", "backward_gra = automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds( # zrxn_i.backward_ts_graph))) # backward_gra = automol.graph.add_bonds(backward_gra,", "+ 'Allowed keywords are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit() keyword, value = line.split(':') if", "geo is {}'.format(geo_rid) + '\\nthe user rid in input file is {}'.format(rid)) match", "is None: rid = autofile.schema.generate_new_ring_id() if cid is None: cid = autofile.schema.generate_new_conformer_id() return", "the filesystem at' + ' {}'.format(cnf_fs[0].path()) + '\\nthe expected rid for this geo", "is None: # print( # 'No hessian found in output, cannot save '", "cnf_fs): rid = None frag_geo = _fragment_ring_geo(geo) if frag_geo is not None: frag_zma", "reactant_keys.append(automol.graph.atom_keys(gra)) product_keys = [] for gra in rxn_gras[1]: product_keys.append(automol.graph.atom_keys(gra)) std_rxn = automol.reac.Reaction( rxn_class,", "if dct[keyword] is None: print( 'ERROR: No filename is specified for {}'.format(keyword) +", "'theory': None, 'program': None, 'method': None, 'basis': None, 'orb_res': None, 'input_file': None, 'output_file':", "save_saddle_point( zrxn, ret, hess_ret, freqs, imags, mod_thy_info, {'runlvl_cnf_fs': (cnf_fs, None)}, locs, zma_locs=(0,), zma=zma)", "# 'imaginary frequency, projrot found the following' + # 'frequencies: ' + ','.join(imags))", "from mechroutines.es._routines.conformer import _fragment_ring_geo from mechroutines.es._routines._sadpt import save_saddle_point from mechlib.reaction.rxnid import _id_reaction THEORY_DCT", "matches the info in user given output') sys.exit() inf_obj = autofile.schema.info_objects.run( job=elstruct.Job.OPTIMIZATION, prog=prog,", "orb_res = insert_dct['orb_res'] # Get input method from theory dictionary theory = insert_dct['theory']", "< 2: continue elif '!' in line[0]: continue line = line.split('!')[0] if ':'", "if len(bonds) != 2: print('too many bonds to transfered atom for me to", "None: print('could not figure out which H is being transfered') sys.exit() return chosen_ts_gra,", "ts_gras = automol.geom.connectivity_graph(geo, rqq_bond_max=3.5, rqh_bond_max=rqh, rhh_bond_max=2.3) ts_gras = automol.graph.set_stereo_from_geometry(ts_gras, geo) ts_gras = automol.graph.connected_components(ts_gras)", "prefix = read_user_filesystem(insert_dct) # Read in the input and output files that we", "'program': 'molpro2015', 'method': 'caspt2', 'basis': 'cc-pvdz'}, 'mlvl_cas_tz': { 'orb_res': 'RR', 'program': 'molpro2015', 'method':", "0 rxn_muls = [[], []] rxn_chgs = [[], []] for ich in ichs[0]:", "autofile.fs.theory(spc_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix = thy_fs[-1].path(mod_thy_info[1:]) # conformer cnf_fs = autofile.fs.conformer(thy_prefix) if locs is", "cinf_obj.nsamp = cnsampd else: cinf_obj = autofile.schema.info_objects.conformer_branch(0) cinf_obj.nsamp = 1 cnf_fs[1].create([locs[0]]) cnf_fs[0].file.info.write(rinf_obj) cnf_fs[1].file.info.write(cinf_obj,", "in enumerate(ichs[1]): if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) ichs[1][idx] = ich if mults", "locs=None): # species filesystem spc_fs = autofile.fs.species(prefix) spc_fs[-1].create(spc_info) spc_prefix = spc_fs[-1].path(spc_info) # theory", "= locs[0] break return rid def parse_script_input(script_input_file): script_input = autofile.io_.read_file(script_input_file).splitlines() insert_dct = {", "this geometry until user specified' + ' info matches the info in user", "= rng_loc_for_geo(geo, cnf_fs) if geo_rid is not None: if geo_rid != rid: print(", "= (inf_obj, inp_str, out_str) _, saved_geos, saved_enes = _saved_cnf_info( cnf_fs, mod_thy_info) if _geo_unique(geo,", "'cc-pvqz-f12' }, 'mlvl_cas_dz': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'caspt2', 'basis': 'cc-pvdz'}, 'mlvl_cas_tz':", "+ '{}'.format(automol.geom.string(geo))) match = False if mul not in mults_allowed: print( 'user specified", "= val.replace(' ', '') value = ' '.join(value) elif keyword not in ['smiles',", "transition state' + 'did not match those specified in user input') sys.exit() return", "}, 'lvl_b3s': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis': '6-31g*' }, 'lvl_b3mg':", "rxn_prefix = rxn_fs[-1].path(sort_rxn_info) # theory filesystem thy_fs = autofile.fs.theory(rxn_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix = thy_fs[-1].path(mod_thy_info[1:])", "cnf_fs[-1].file.geometry.exists(locs): checked_rids.append(current_rid) locs_geo = cnf_fs[-1].file.geometry.read(locs) frag_locs_geo = _fragment_ring_geo(locs_geo) if frag_locs_geo is None: rid", "inp_str, out_str) save_saddle_point( zrxn, ret, hess_ret, freqs, imags, mod_thy_info, {'runlvl_cnf_fs': (cnf_fs, None)}, locs,", "if 'None' in value: value = None elif keyword in ['mult', 'charge', 'ts_mult']:", "is None: print( 'ERROR: No filename is specified for {}'.format(keyword) + 'Script will", "= read_user_file(insert_dct, 'input_file') out_str = read_user_file(insert_dct, 'output_file') # parse method from insert input", "output:\\n' + '{}'.format(automol.geom.string(geo))) match = False if mul not in mults_allowed: print( 'user", "= automol.graph.set_bond_orders(ts_gra, back_bnd_ord_dct) reactant_gras = automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(forward_gra)) reactant_gras = automol.graph.connected_components(reactant_gras) product_gras = automol.graph.without_dummy_bonds(", "saved_geos, saved_enes) if sym_id is None: if cnf_fs[0].file.info.exists(): rinf_obj = cnf_fs[0].file.info.read() else: rinf_obj", "Hess = None # If hess is None: # print( # 'No hessian", "x in range(26, 38, 2)] chosen_ts_gra = [] chosen_oversaturated_atom = None for rqh", "parsable, '.format(i, line) + 'script will exit until input is resolved to avoid'", "'ERROR: Keyword {} is not recognized'.format(keyword) + 'script will exit until inpupt is", "not an allowed multiplicty for inchi {}'.format(ich)) sys.exit() rxn_muls[1].append(mults[idx]) rxn_chgs[1].append(chgs[idx]) idx += 1", "ts_prefix, cnf_prefix)) def read_user_file(dct, keyword): if dct[keyword] is None: print( 'ERROR: No filename", "ts_zma, ts_geo, rxn_info def main(insert_dct): prefix = read_user_filesystem(insert_dct) # Read in the input", "products = value reactants = reactants.split(' + ') products = products.split(' + ')", "_, saved_geos, saved_enes = _saved_cnf_info( cnf_fs, mod_thy_info) if _geo_unique(geo, ene, saved_geos, saved_enes, zrxn=zrxn):", "+ ' that is in the THEORY_DCT' + 'please add it to the", "None: if program is None: print('Error: user did not specify program in input')", "# parse method from insert input file thy_info = parse_user_theory(insert_dct) # parse out", "= cnf_fs[-1].file.geometry.read(locs) frag_locs_geo = _fragment_ring_geo(locs_geo) if frag_locs_geo is None: rid = locs[0] break", "for i, line in enumerate(script_input): if len(line) < 2: continue elif '!' in", "'b2plypd3', 'basis': 'cc-pvdz'}, 'lvl_b2t': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvtz'},", "# zrxn = zrxn_i # zma, _, _ = automol.reac.ts_zmatrix(zrxn, geo) # elif", "state that has a single' + # 'imaginary frequency, projrot found the following'", "None: print('Error: user did not specify method in input') sys.exit() elif basis is", "'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvqz-f12' }, 'mlvl_cas_dz': { 'orb_res': 'RR', 'program': 'molpro2015',", "= [[], []] for i, side in enumerate(rxn_info[0]): for ich in side: rxn_smis[i].append(automol.inchi.smiles(ich))", "save a transition state that has a single' + # 'imaginary frequency, projrot", "'m062x', 'basis': '6-31+g*' }, 'lvl_m06t': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis':", "mults[idx] not in mults_allowed: print( 'user specified mult of {}'.format(mults[idx]) + 'is not", "ich in enumerate(ichs[1]): if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) ichs[1][idx] = ich if", "for product in products: values[1].append(product.replace(' ', '')) value = values else: value =", "reactants and products found for the transition state' + 'did not match those", "' info matches the info in user given output') sys.exit() # Check that", "inputted program = insert_dct['program'] method = insert_dct['method'] basis = insert_dct['basis'] orb_res = insert_dct['orb_res']", "not specify basis in input') sys.exit() elif orb_res is None: print('Error: user did", "= value else: print( 'ERROR: Keyword {} is not recognized'.format(keyword) + 'script will", "print( 'The reactants and products found for the transition state' + 'did not", "automol.graph.connected_components(product_gras) ts_gras = [forward_gra, backward_gra] rxn_gras = [reactant_gras, product_gras] rxn_smis = [[], []]", "hess_ret = (hess_inf_obj, inp_str, out_str) save_saddle_point( zrxn, ret, hess_ret, freqs, imags, mod_thy_info, {'runlvl_cnf_fs':", "}, 'lvl_wbm': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis': '6-31+g*' }, 'lvl_wbt':", "break if chosen_oversaturated_atom is None: print('could not figure out which H is being", "basis in input') sys.exit() elif orb_res is None: print('Error: user did not specify", "add it to the dct in the script or use program/method/basis/orb_dct' + 'keywords", "', '') value = ' '.join(value) elif keyword not in ['smiles', 'inchi']: value", "+ ' info matches the info in user given output') sys.exit() inf_obj =", "specify charges in input') sys.exit() flat_ichs = sum(ichs, []) if len(flat_ichs) != len(mults):", "side: rxn_smis[i].append(automol.inchi.smiles(ich)) ts_smis = [[], []] ts_ichs = [[], []] for rgra in", "= oversaturated_atoms[0] break if chosen_oversaturated_atom is None: print('could not figure out which H", "{} # dummies = [] # for dummy in dummy_key_dct.keys(): # add_idx =", "> dumm_j: # add_idx += 1 # dummies.append(dummy + add_idx) # remove_idx =", "zma_locs=(0,), zma=zma) else: _save_unique_parsed_conformer( mod_thy_info, cnf_fs, locs, (geo, zma, ene), inf_obj, inp_str, zrxn=zrxn,", "# print(zrxns) # sys.exit() # # hess = elstruct.reader.hessian(prog, out_str) # Hess =", "'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis': 'cc-pvtz'}, 'cc_lvl_d': { 'orb_res': 'RR', 'program': 'molpro2015',", "if program is None: print('Error: user did not specify program in input') sys.exit()", "did not specify ts_mul') sys.exit() rxn_info = rinfo.sort((ichs, rxn_chgs, rxn_muls, ts_mult)) ts_info =", "= elstruct.reader.energy(prog, method, out_str) geo = elstruct.reader.opt_geometry(prog, out_str) if geo is None: print(", "for i, side in enumerate(rxn_info[0]): for ich in side: rxn_smis[i].append(automol.inchi.smiles(ich)) ts_smis = [[],", "}, 'cc_lvl_df': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvdz-f12' }, 'cc_lvl_tf':", "if automol.zmat.almost_equal( frag_locs_zma, frag_zma, dist_rtol=0.1, ang_atol=.4): rid = locs[0] break return rid def", "cinf_obj = autofile.schema.info_objects.conformer_branch(0) cinf_obj.nsamp = 1 cnf_fs[1].create([locs[0]]) cnf_fs[0].file.info.write(rinf_obj) cnf_fs[1].file.info.write(cinf_obj, [locs[0]]) hess, freqs, imags", "oversaturated_atoms = [atm for atm, val in vals.items() if val < 0] if", "'orb_res': 'RR', 'program': 'molpro2015', 'method': 'caspt2', 'basis': 'cc-pvdz'}, 'mlvl_cas_tz': { 'orb_res': 'RR', 'program':", "'.format(i, line) + 'script will exit until input is resolved to avoid' +", "'charge', 'ts_mult']: values = [] for val in value.split(','): values.append(int(val)) if len(values) ==", "['mult', 'charge', 'ts_mult']: values = [] for val in value.split(','): values.append(int(val)) if len(values)", "sys.exit() inf_obj = autofile.schema.info_objects.run( job=elstruct.Job.OPTIMIZATION, prog=prog, version='', method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS) ret = (inf_obj,", "rxn_smis[0]: ts_ichs[0] = ts_ichs[0][::-1] ts_smis[0] = ts_smis[0][::-1] reactant_match = True else: ts_ichs =", "automol.reac.forming_bond_keys(zrxn_i, rev=True) # forw_brk_key = automol.reac.breaking_bond_keys(zrxn_i) # back_brk_key = automol.reac.breaking_bond_keys(zrxn_i, rev=True) # forward_gra", "'caspt2', 'basis': 'cc-pvtz'}} def parse_user_locs(insert_dct): rid = insert_dct['rid'] cid = insert_dct['cid'] if rid", "print('Error: user did not specify method in input') sys.exit() elif basis is None:", "recognized'.format(keyword) + 'script will exit until inpupt is resolved to avoid' + '", "sys.exit() if chgs is None: print('Error: user did not specify charges in input')", "if not insert_dct['saddle']: if not species_match(geo, spc_info): print( 'I refuse to save this", "the rid/cid info matches the filesystem fs_array, prefix_array = create_species_filesystems( prefix, spc_info, mod_thy_info,", "automol.reac.breaking_bond_keys(zrxn_i) # back_brk_key = automol.reac.breaking_bond_keys(zrxn_i, rev=True) # forward_gra = automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds( #", "could be parsed from output' + 'Check that the program matches user specied'", "thy_info ene = elstruct.reader.energy(prog, method, out_str) geo = elstruct.reader.opt_geometry(prog, out_str) if geo is", "did not specify orb_res in input') sys.exit() else: thy_info = (program, method, basis,", "script or use program/method/basis/orb_dct' + 'keywords instead of theory') sys.exit() return thy_info def", "}, 'lvl_b3mg': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis': '6-311g**' }, 'lvl_b3t':", "'Your geometry did not match any of the attempted ' + # 'zrxns,", "not specify species' + 'with an inchi or smiles in input') sys.exit() if", "# forward_gra = automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds( # zrxn_i.forward_ts_graph))) # forward_gra =", "cid = autofile.schema.generate_new_conformer_id() return (rid, cid) def parse_user_species(insert_dct): smi = insert_dct['smiles'] ich =", "'is not an allowed multiplicty for inchi {}'.format(ich)) sys.exit() rxn_muls[0].append(mults[idx]) rxn_chgs[0].append(chgs[idx]) idx +=", "did not match any of the attempted ' + # 'zrxns, which are", "zma_keys, dummy_key_dct = automol.reac.ts_zmatrix( std_rxn, geo) std_zrxn = automol.reac.relabel_for_zmatrix( std_rxn, zma_keys, dummy_key_dct) rxn_info", "1 cnf_fs[1].create([locs[0]]) cnf_fs[0].file.info.write(rinf_obj) cnf_fs[1].file.info.write(cinf_obj, [locs[0]]) hess, freqs, imags = None, None, None if", "input') sys.exit() flat_ichs = sum(ichs, []) if len(flat_ichs) != len(mults): print( 'Error: number", "locs[0] break return rid def parse_script_input(script_input_file): script_input = autofile.io_.read_file(script_input_file).splitlines() insert_dct = { 'save_filesystem':", "38, 2)] chosen_ts_gra = [] chosen_oversaturated_atom = None for rqh in rqhs: ts_gras", "save ' + # 'a transition state without a hessian') # sys.exit() #", "the info in user given output') sys.exit() inf_obj = autofile.schema.info_objects.run( job=elstruct.Job.OPTIMIZATION, prog=prog, version='',", "= insert_dct['charge'] rxn_class = insert_dct['rxn_class'] # zrxn_file = insert_dct['zrxn_file'] if ichs is None:", "locs_match(geo, cnf_fs, locs): print( 'I refuse to save this geometry until user specified'", "import automol from mechanalyzer.inf import thy as tinfo from mechanalyzer.inf import rxn as", "the output is not unique to filesystem' + '... not saving') def species_match(geo,", "in products: values[1].append(product.replace(' ', '')) value = values else: value = value[0].replace(' ',", "sure # user save specifications match output prog, method, basis, _ = thy_info", "did not specify method in input') sys.exit() elif basis is None: print('Error: user", "'ccsd(t)-f12', 'basis': 'cc-pvdz-f12' }, 'cc_lvl_tf': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis':", "a transition state that has a single' + # 'imaginary frequency, projrot found", "mod_thy_info, locs=None) else: fs_array, prefix_array = create_reaction_filesystems( prefix, rxn_info, mod_thy_info, ts_locs=insert_dct['ts_locs'], locs=None) cnf_fs", "'method': 'm062x', 'basis': '6-31+g*' }, 'lvl_m06t': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x',", "insert_dct['program'] method = insert_dct['method'] basis = insert_dct['basis'] orb_res = insert_dct['orb_res'] # Get input", "else: ts_ichs = ts_ichs[::-1] ts_smis = ts_smis[::-1] ts_gras = ts_gras[::-1] rxn_gras = rxn_gras[::-1]", "reactant_match = False product_match = False if ts_smis[0] == rxn_smis[0]: reactant_match = True", "= autofile.schema.generate_new_conformer_id() return (rid, cid) def parse_user_species(insert_dct): smi = insert_dct['smiles'] ich = insert_dct['inchi']", "that has a single' + # 'imaginary frequency, projrot found the following' +", "is based on geometry from output:\\n' + '{}'.format(automol.geom.string(geo))) match = False if mul", "zrxn_i.forward_ts_graph))) # forward_gra = automol.graph.add_bonds(forward_gra, forw_form_key) # backward_gra = automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds( #", "= (0,) ts_fs = autofile.fs.transition_state(thy_prefix) ts_fs[-1].create(ts_locs) ts_prefix = ts_fs[-1].path(ts_locs) # conformer cnf_fs =", "= 1 if cnf_fs[1].file.info.exists([locs[0]]): cinf_obj = cnf_fs[1].file.info.read(locs[0]) cnsampd = cinf_obj.nsamp cnsampd += 1", "'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvdz'}, 'cc_lvl_t': { 'orb_res': 'RR', 'program': 'molpro2015',", "'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis': '6-311g**' }, 'lvl_b3t': { 'orb_res': 'RU',", "== rxn_smis[0]: reactant_match = True elif ts_smis[0][::-1] == rxn_smis[0]: ts_ichs[0] = ts_ichs[0][::-1] ts_smis[0]", "= [atm for atm, val in vals.items() if val < 0] if len(oversaturated_atoms)", "import rxn as rinfo from mechanalyzer.inf import spc as sinfo import elstruct import", "cnf_fs = autofile.fs.conformer(ts_prefix) if locs is not None: cnf_fs[-1].create(locs) cnf_prefix = cnf_fs[-1].path(locs) else:", "ts_smis[0][::-1] reactant_match = True else: ts_ichs = ts_ichs[::-1] ts_smis = ts_smis[::-1] ts_gras =", "ts_fs, cnf_fs), (rxn_prefix, thy_prefix, ts_prefix, cnf_prefix)) def read_user_file(dct, keyword): if dct[keyword] is None:", "+ '<Keyword>: <Value>\\n' + 'Allowed keywords are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit() keyword, value", "transition state that has a single' + # 'imaginary frequency, projrot found the", "= 0 rxn_muls = [[], []] rxn_chgs = [[], []] for ich in", "== 1: value = values[0] else: value = values elif keyword in ['ts_locs']:", "and products found for the transition state' + 'did not match those specified", "= bonds # when we move on to other reaction types we have", "script_str = autorun.SCRIPT_DCT['projrot'] # freqs, _, imags, _ = autorun.projrot.frequencies( # script_str, freq_run_path,", "hess = elstruct.reader.hessian(prog, out_str) # Hess = None # If hess is None:", "== rxn_smis[0]: ts_ichs[0] = ts_ichs[0][::-1] ts_smis[0] = ts_smis[0][::-1] reactant_match = True if reactant_match:", "'basis': '6-31g*' }, 'lvl_m06m': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis': '6-31+g*'", "print( 'ERROR: Keyword {} is not recognized'.format(keyword) + 'script will exit until inpupt", "from output' + 'Check that the program matches user specied' + ' {}'.format(prog)", "method explicitly inputted program = insert_dct['program'] method = insert_dct['method'] basis = insert_dct['basis'] orb_res", "sys.exit() rxn_info = rinfo.sort((ichs, rxn_chgs, rxn_muls, ts_mult)) ts_info = rinfo.ts_info(rxn_info) # if zrxn_file", "rxn_fs[-1].path(sort_rxn_info) # theory filesystem thy_fs = autofile.fs.theory(rxn_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix = thy_fs[-1].path(mod_thy_info[1:]) if ts_locs", "print( 'user specified inchi {}'.format(ich) + 'does not match inchi from output {}'.format(geo_ich)", "= autofile.fs.theory(rxn_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix = thy_fs[-1].path(mod_thy_info[1:]) if ts_locs is None: ts_locs = (0,)", "= cnf_fs[0].file.info.read() else: rinf_obj = autofile.schema.info_objects.conformer_trunk(0) rinf_obj.nsamp = 1 if cnf_fs[1].file.info.exists([locs[0]]): cinf_obj =", "ichs = [[], []] for smi in smis[0]: ichs[0].append(automol.smiles.inchi(smi)) for smi in smis[1]:", "user did not specify mult in input') sys.exit() if chg is None: print('Error:", "should contain \"!\"' + 'Key format should be:\\n' + '<Keyword>: <Value>\\n' + 'Allowed", "'.join(value) elif keyword not in ['smiles', 'inchi']: value = value.replace(' ', '') else:", "species_match(geo, spc_info): print( 'I refuse to save this geometry until user specified' +", "species to your database usiing a log file \"\"\" import sys import os", "{}'.format(geo_ich) + 'which is based on geometry from output:\\n' + '{}'.format(automol.geom.string(geo))) match =", "matches the info in user given output') sys.exit() # Check that the rid/cid", "else: print( 'ERROR: Keyword {} is not recognized'.format(keyword) + 'script will exit until", "else: rinf_obj = autofile.schema.info_objects.conformer_trunk(0) rinf_obj.nsamp = 1 if cnf_fs[1].file.info.exists([locs[0]]): cinf_obj = cnf_fs[1].file.info.read(locs[0]) cnsampd", "state' + 'did not match those specified in user input') sys.exit() return std_zrxn,", "in mults_allowed: print( 'user specified mult of {}'.format(mults[idx]) + 'is not an allowed", "if mul not in mults_allowed: print( 'user specified mult of {}'.format(mul) + 'is", "'ccsd(t)', 'basis': 'cc-pvdz'}, 'cc_lvl_t': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvtz'},", "reactant_gras: try: rich = automol.graph.inchi(rgra, stereo=True) except IndexError: rich = automol.graph.inchi(rgra) rsmi =", "read_user_file(insert_dct, 'input_file') out_str = read_user_file(insert_dct, 'output_file') # parse method from insert input file", "prog=prog, version='', method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS) hess_ret = (hess_inf_obj, inp_str, out_str) save_saddle_point( zrxn, ret,", "value = value.split(' = ') if len(value) > 1: insert_dct['saddle'] = True reactants,", "True elif ts_smis[1][::-1] == rxn_smis[-1]: ts_ichs[1] = ts_ichs[1][::-1] ts_smis[1] = ts_smis[1][::-1] product_match =", "between words value = value.split() for i, val in enumerate(value): value[i] = val.replace('", "'6-311g**' }, 'lvl_b3t': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis': 'cc-pvtz'}, 'cc_lvl_d':", "inchi {}'.format(ich)) sys.exit() rxn_muls[0].append(mults[idx]) rxn_chgs[0].append(chgs[idx]) idx += 1 for ich in ichs[1]: mults_allowed", "version='', method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS) ret = (inf_obj, inp_str, out_str) _, saved_geos, saved_enes =", "user did not specify mults in input') sys.exit() if chgs is None: print('Error:", "!= rid: print( 'Error: rid mismatch for the filesystem at' + ' {}'.format(cnf_fs[0].path())", "orb_res in input') sys.exit() else: thy_info = (program, method, basis, orb_res) else: if", "sinfo import elstruct import autorun from mechroutines.es._routines.conformer import _saved_cnf_info from mechroutines.es._routines.conformer import _sym_unique", "from mechroutines.es._routines._sadpt import save_saddle_point from mechlib.reaction.rxnid import _id_reaction THEORY_DCT = { 'lvl_wbs': {", "THEORY_DCT' + 'please add it to the dct in the script or use", "# if dummy > dumm_j: # add_idx += 1 # dummies.append(dummy + add_idx)", "'ERROR: No save_filesystem}' + 'Script will exit') sys.exit() return dct['save_filesystem'] def choose_cutoff_distance(geo): rqhs", "insert_dct['zrxn_file'] if ichs is None: ichs = [[], []] for smi in smis[0]:", "'method': 'ccsd(t)', 'basis': 'cc-pvtz'}, 'cc_lvl_q': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis':", "fs_array, prefix_array = create_reaction_filesystems( prefix, rxn_info, mod_thy_info, ts_locs=insert_dct['ts_locs'], locs=None) cnf_fs = fs_array[-1] if", "frag_geo is not None: frag_zma = automol.geom.zmatrix(frag_geo) checked_rids = [] for locs in", "a species to your database usiing a log file \"\"\" import sys import", "= False product_match = False if ts_smis[0] == rxn_smis[0]: reactant_match = True elif", "None, 'orb_res': None, 'input_file': None, 'output_file': None, 'ts_locs': None, 'ts_mult': None, 'rxn_class': None,", "= automol.graph.inchi(rgra) rsmi = automol.inchi.smiles(rich) ts_ichs[0].append(rich) ts_smis[0].append(rsmi) for pgra in product_gras: try: pich", "2: print('too many bonds to transfered atom for me to figure out') print('I", "print( 'user specified mult of {}'.format(mul) + 'is not an allowed multiplicty for", "'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis': '6-31+g*' }, 'lvl_wbt': { 'orb_res': 'RU', 'program':", "automol.graph.connected_components(reactant_gras) product_gras = automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(backward_gra)) product_gras = automol.graph.connected_components(product_gras) ts_gras = [forward_gra, backward_gra] rxn_gras", "autorun.projrot.frequencies( # script_str, freq_run_path, [geo], [[]], [hess]) # if len(imags) != 1: #", "that the save location matches geo information if not insert_dct['saddle']: if not species_match(geo,", "in rxn_gras[1]: product_keys.append(automol.graph.atom_keys(gra)) std_rxn = automol.reac.Reaction( rxn_class, *ts_gras, reactant_keys, product_keys) ts_zma, zma_keys, dummy_key_dct", "# remove_idx -= 1 # else: # geo_reorder_dct[idx_i + remove_idx] = idx_j #", "forw_brk_key) # backward_gra = automol.graph.remove_bonds(backward_gra, back_brk_key) # print('forRXN', automol.graph.string(zrxn_i.forward_ts_graph)) # print('forRXN', automol.graph.string(forward_gra)) #", "the program matches user specied' + ' {}'.format(prog) + ' and method matches'", "i, val in enumerate(value): value[i] = val.replace(' ', '') value = ' '.join(value)", "those specified in user input') sys.exit() return std_zrxn, ts_zma, ts_geo, rxn_info def main(insert_dct):", "# for zrxn_i in zrxns: # print(zrxns) # sys.exit() # # hess =", "scheme='autofile') rxn_fs[-1].create(sort_rxn_info) rxn_prefix = rxn_fs[-1].path(sort_rxn_info) # theory filesystem thy_fs = autofile.fs.theory(rxn_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix", "add_idx = 1 # for dumm_j in dummies: # if dummy > dumm_j:", "charges in input') sys.exit() flat_ichs = sum(ichs, []) if len(flat_ichs) != len(mults): print(", "'lvl_b2q': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvqz' }, 'lvl_b3s': {", "= automol.geom.connectivity_graph(geo, rqq_bond_max=3.5, rqh_bond_max=rqh, rhh_bond_max=2.3) ts_gras = automol.graph.set_stereo_from_geometry(ts_gras, geo) ts_gras = automol.graph.connected_components(ts_gras) if", "ichs[1]: mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) if mults[idx] not in mults_allowed: print( 'user", "ichs[1][idx] = ich if mults is None: print('Error: user did not specify mults", "idx = 0 rxn_muls = [[], []] rxn_chgs = [[], []] for ich", "mod_thy_info, locs=None): # species filesystem spc_fs = autofile.fs.species(prefix) spc_fs[-1].create(spc_info) spc_prefix = spc_fs[-1].path(spc_info) #", "method, out_str) geo = elstruct.reader.opt_geometry(prog, out_str) if geo is None: print( 'No geometry", "= automol.graph.connected_components(reactant_gras) product_gras = automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(backward_gra)) product_gras = automol.graph.connected_components(product_gras) ts_gras = [forward_gra, backward_gra]", "!= len(mults): print( 'Error: number of species does not match number of mults')", "specified inchi {}'.format(ich) + 'does not match inchi from output {}'.format(geo_ich) + 'which", "{}'.format(mul) + 'is not an allowed multiplicty for inchi {}'.format(ich)) match = False", "{ 'save_filesystem': None, 'smiles': None, 'inchi': None, 'mult': None, 'charge': None, 'rid': None,", "# zrxn_file = insert_dct['zrxn_file'] if ichs is None: ichs = [[], []] for", "rxn_muls = [[], []] rxn_chgs = [[], []] for ich in ichs[0]: mults_allowed", "'wb97xd', 'basis': '6-31+g*' }, 'lvl_wbt': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis':", "None: cid = autofile.schema.generate_new_conformer_id() return (rid, cid) def parse_user_species(insert_dct): smi = insert_dct['smiles'] ich", "'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvtz-f12' }, 'cc_lvl_qf': { 'orb_res': 'RR',", "'m062x', 'basis': 'cc-pvtz'}, 'lvl_b2d': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvdz'},", "'None' in value: value = None elif keyword in ['mult', 'charge', 'ts_mult']: values", "is not unique to filesystem' + '... not saving') def species_match(geo, spc_info): match", "in user input') sys.exit() return std_zrxn, ts_zma, ts_geo, rxn_info def main(insert_dct): prefix =", "= [[], []] rxn_chgs = [[], []] for ich in ichs[0]: mults_allowed =", "is None: print('Error: user did not specify method in input') sys.exit() elif basis", "cid = insert_dct['cid'] if rid is None: rid = autofile.schema.generate_new_ring_id() if cid is", "ts_smis[1] = ts_smis[1][::-1] product_match = True if reactant_match and product_match: reactant_keys = []", "+ 'Script will exit') sys.exit() return dct['save_filesystem'] def choose_cutoff_distance(geo): rqhs = [x *", "the filesystem inp_str = read_user_file(insert_dct, 'input_file') out_str = read_user_file(insert_dct, 'output_file') # parse method", "< 0] if len(oversaturated_atoms) == 1: chosen_ts_gra = ts_gras[0] chosen_oversaturated_atom = oversaturated_atoms[0] break", "your database usiing a log file \"\"\" import sys import os import autofile", "insert_dct['mult'] chgs = insert_dct['charge'] rxn_class = insert_dct['rxn_class'] # zrxn_file = insert_dct['zrxn_file'] if ichs", "None: cnf_fs[-1].create(locs) cnf_prefix = cnf_fs[-1].path(locs) else: cnf_prefix = None return ( (spc_fs, thy_fs,", "= None if insert_dct['saddle']: rxn_info, spc_info, rxn_class = parse_user_reaction(insert_dct) zrxn, zma, geo, rxn_info", "rid is None: rid = autofile.schema.generate_new_ring_id() if cid is None: cid = autofile.schema.generate_new_conformer_id()", "= automol.graph.connected_components(product_gras) ts_gras = [forward_gra, backward_gra] rxn_gras = [reactant_gras, product_gras] rxn_smis = [[],", "zma=zma) else: _save_unique_parsed_conformer( mod_thy_info, cnf_fs, locs, (geo, zma, ene), inf_obj, inp_str, zrxn=zrxn, zma_locs=(0,))", "mechanalyzer.inf import spc as sinfo import elstruct import autorun from mechroutines.es._routines.conformer import _saved_cnf_info", "if rid is None: rid = autofile.schema.generate_new_ring_id() if cid is None: cid =", "rxn_muls[1].append(mults[idx]) rxn_chgs[1].append(chgs[idx]) idx += 1 ts_mult = insert_dct['ts_mult'] if ts_mult is None: print(", "the attempted ' + # 'zrxns, which are the following') # for zrxn_i", "conformer cnf_fs = autofile.fs.conformer(thy_prefix) if locs is not None: cnf_fs[-1].create(locs) cnf_prefix = cnf_fs[-1].path(locs)", "rid = locs[0] geo_rid = rng_loc_for_geo(geo, cnf_fs) if geo_rid is not None: if", "import spc as sinfo import elstruct import autorun from mechroutines.es._routines.conformer import _saved_cnf_info from", "+ ' info matches the info in user given output') sys.exit() # Check", "None: # run_path = os.getcwd() # run_fs = autofile.fs.run(run_path) # freq_run_path = run_fs[-1].path(['hessian'])", "= rinfo.ts_info(rxn_info) # if zrxn_file is not None: # zrxn_str = autofile.io_.read_file(zrxn_file) #", "frag_locs_geo is None: rid = locs[0] break frag_locs_zma = automol.geom.zmatrix(frag_locs_geo) if automol.zmat.almost_equal( frag_locs_zma,", "ich in enumerate(ichs[0]): if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) ichs[0][idx] = ich for", "rxn as rinfo from mechanalyzer.inf import spc as sinfo import elstruct import autorun", "zrxn = None if insert_dct['saddle']: rxn_info, spc_info, rxn_class = parse_user_reaction(insert_dct) zrxn, zma, geo,", "inp_str, out_str) _, saved_geos, saved_enes = _saved_cnf_info( cnf_fs, mod_thy_info) if _geo_unique(geo, ene, saved_geos,", "if insert_dct['saddle']: rxn_info, spc_info, rxn_class = parse_user_reaction(insert_dct) zrxn, zma, geo, rxn_info = get_zrxn(geo,", "keyword in ['ts_locs']: value = (int(value),) elif keyword in ['rxn_class']: # strip whitespaces", "autorun from mechroutines.es._routines.conformer import _saved_cnf_info from mechroutines.es._routines.conformer import _sym_unique from mechroutines.es._routines.conformer import _save_unique_parsed_conformer", "'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis': '6-31g*' }, 'lvl_b3mg': { 'orb_res': 'RU', 'program':", "for locs in cnf_fs[-1].existing(): current_rid, _ = locs if current_rid in checked_rids: continue", "forming_bond = bonds # when we move on to other reaction types we", "out geo information first, to make sure # user save specifications match output", "pich = automol.graph.inchi(pgra, stereo=True) except IndexError: pich = automol.graph.inchi(pgra) psmi = automol.inchi.smiles(pich) ts_ichs[1].append(pich)", "program is None: print('Error: user did not specify program in input') sys.exit() elif", "{ 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis': 'cc-pvtz'}, 'cc_lvl_d': { 'orb_res': 'RR',", "did not specify basis in input') sys.exit() elif orb_res is None: print('Error: user", "ts_smis[0][::-1] == rxn_smis[0]: ts_ichs[0] = ts_ichs[0][::-1] ts_smis[0] = ts_smis[0][::-1] reactant_match = True if", "len(line) < 2: continue elif '!' in line[0]: continue line = line.split('!')[0] if", "for rqh in rqhs: ts_gras = automol.geom.connectivity_graph(geo, rqq_bond_max=3.5, rqh_bond_max=rqh, rhh_bond_max=2.3) ts_gras = automol.graph.set_stereo_from_geometry(ts_gras,", "'Error: rid mismatch for the filesystem at' + ' {}'.format(cnf_fs[0].path()) + '\\nthe expected", "zrxns = [automol.reac.from_string(zrxn_str)] # else: # zrxns, _ = _id_reaction(rxn_info) if rxn_class is", "if rxn_class is None: print( 'Error: user did not specify rxn_class') sys.exit() return", "autofile.io_.read_file(file_name) def read_user_filesystem(dct): if dct['save_filesystem'] is None: print( 'ERROR: No save_filesystem}' + 'Script", "{breaking_bond: 0.9, forming_bond: 0.1} back_bnd_ord_dct = {breaking_bond: 0.1, forming_bond: 0.9} forward_gra = automol.graph.set_bond_orders(ts_gra,", "'does not match inchi from output {}'.format(geo_ich) + 'which is based on geometry", "which H is being transfered') sys.exit() return chosen_ts_gra, chosen_oversaturated_atom def get_zrxn(geo, rxn_info, rxn_class):", "match = True rid = locs[0] geo_rid = rng_loc_for_geo(geo, cnf_fs) if geo_rid is", "be:\\n' + '<Keyword>: <Value>\\n' + 'Allowed keywords are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit() keyword,", "ts_gras = [forward_gra, backward_gra] rxn_gras = [reactant_gras, product_gras] rxn_smis = [[], []] for", "if frag_locs_geo is None: rid = locs[0] break frag_locs_zma = automol.geom.zmatrix(frag_locs_geo) if automol.zmat.almost_equal(", "'... not saving') def species_match(geo, spc_info): match = True ich, _, mul =", "in input') sys.exit() elif orb_res is None: print('Error: user did not specify orb_res", "a hessian') # sys.exit() # run_path = insert_dct['run_path'] # if run_path is None:", "print( 'Error: user did not specify rxn_class') sys.exit() return rxn_info, ts_info, rxn_class def", "None: print('Error: user did not specify mult in input') sys.exit() if chg is", "= automol.reac.relabel_for_zmatrix( std_rxn, zma_keys, dummy_key_dct) rxn_info = (ts_ichs, *rxn_info[1:]) ts_geo = automol.zmat.geometry(ts_zma) #", "'zrxn_file': None, 'run_path': None, 'saddle': False, } for i, line in enumerate(script_input): if", "automol from mechanalyzer.inf import thy as tinfo from mechanalyzer.inf import rxn as rinfo", "ts_smis[0] == rxn_smis[0]: reactant_match = True elif ts_smis[0][::-1] == rxn_smis[0]: ts_ichs[0] = ts_ichs[0][::-1]", "= (hess_inf_obj, inp_str, out_str) save_saddle_point( zrxn, ret, hess_ret, freqs, imags, mod_thy_info, {'runlvl_cnf_fs': (cnf_fs,", "ts_ichs[::-1] ts_smis = ts_smis[::-1] ts_gras = ts_gras[::-1] rxn_gras = rxn_gras[::-1] if ts_smis[0] ==", "for ich in ichs[1]: mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) if mults[idx] not in", "in rqhs: ts_gras = automol.geom.connectivity_graph(geo, rqq_bond_max=3.5, rqh_bond_max=rqh, rhh_bond_max=2.3) ts_gras = automol.graph.set_stereo_from_geometry(ts_gras, geo) ts_gras", "'The reactants and products found for the transition state' + 'did not match", "value reactants = reactants.split(' + ') products = products.split(' + ') values =", "'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvtz'}, 'lvl_b2q': { 'orb_res': 'RU', 'program':", "refuse to save this geometry until user specified' + ' info matches the", "= insert_dct['zrxn_file'] if ichs is None: ichs = [[], []] for smi in", "= automol.smiles.inchi(smi) if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) if mult is None: print('Error:", "= automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) geo_ich = automol.geom.inchi(geo, stereo=True) if ich != geo_ich: print(", "program = insert_dct['program'] method = insert_dct['method'] basis = insert_dct['basis'] orb_res = insert_dct['orb_res'] #", "insert_dct['charge'] if ich is None and smi is None: print( 'Error: user did", "'user specified mult of {}'.format(mults[idx]) + 'is not an allowed multiplicty for inchi", "' + # 'a transition state without a hessian') # sys.exit() # run_path", "') products = products.split(' + ') values = [[], []] for reactant in", "parse_user_locs(insert_dct): rid = insert_dct['rid'] cid = insert_dct['cid'] if rid is None: rid =", "'6-31+g*' }, 'lvl_m06t': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis': 'cc-pvtz'}, 'lvl_b2d':", "'method': 'ccsd(t)-f12', 'basis': 'cc-pvdz-f12' }, 'cc_lvl_tf': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12',", "rinf_obj = autofile.schema.info_objects.conformer_trunk(0) rinf_obj.nsamp = 1 if cnf_fs[1].file.info.exists([locs[0]]): cinf_obj = cnf_fs[1].file.info.read(locs[0]) cnsampd =", "# when we move on to other reaction types we have to check", "_ = automol.reac.ts_zmatrix(zrxn, geo) # if zrxn is None: # print( # 'Your", "if reactant_match: if ts_smis[1] == rxn_smis[1]: product_match = True elif ts_smis[1][::-1] == rxn_smis[-1]:", "= insert_dct['rxn_class'] # zrxn_file = insert_dct['zrxn_file'] if ichs is None: ichs = [[],", "attempted ' + # 'zrxns, which are the following') # for zrxn_i in", "automol.reac.breaking_bond_keys(zrxn_i, rev=True) # forward_gra = automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds( # zrxn_i.forward_ts_graph))) #", "= automol.reac.breaking_bond_keys(zrxn_i) # back_brk_key = automol.reac.breaking_bond_keys(zrxn_i, rev=True) # forward_gra = automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds(", "if cnf_fs[1].file.info.exists([locs[0]]): cinf_obj = cnf_fs[1].file.info.read(locs[0]) cnsampd = cinf_obj.nsamp cnsampd += 1 cinf_obj.nsamp =", "automol.reac.forming_bond_keys(zrxn_i) # back_form_key = automol.reac.forming_bond_keys(zrxn_i, rev=True) # forw_brk_key = automol.reac.breaking_bond_keys(zrxn_i) # back_brk_key =", "+ 'script will exit until input is resolved to avoid' + ' filesystem", "product in products: values[1].append(product.replace(' ', '')) value = values else: value = value[0].replace('", "product_gras = automol.graph.connected_components(product_gras) ts_gras = [forward_gra, backward_gra] rxn_gras = [reactant_gras, product_gras] rxn_smis =", "status=autofile.schema.RunStatus.SUCCESS) ret = (inf_obj, inp_str, out_str) _, saved_geos, saved_enes = _saved_cnf_info( cnf_fs, mod_thy_info)", "rxn_class): ts_gra, oversaturated_atom = choose_cutoff_distance(geo) atoms_bnd = automol.graph.atoms_bond_keys(ts_gra) bonds = atoms_bnd[oversaturated_atom] if len(bonds)", "ich for idx, ich in enumerate(ichs[1]): if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) ichs[1][idx]", "rxn_chgs[1].append(chgs[idx]) idx += 1 ts_mult = insert_dct['ts_mult'] if ts_mult is None: print( 'Error:", "is None: print( 'Error: user did not specify species' + 'with an inchi", "= (program, method, basis, orb_res) else: if theory in THEORY_DCT: thy_info = tinfo.from_dct(THEORY_DCT[theory])", "to make sure # user save specifications match output prog, method, basis, _", "'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis': '6-31+g*' }, 'lvl_wbt': { 'orb_res': 'RU',", "= thy_info ene = elstruct.reader.energy(prog, method, out_str) geo = elstruct.reader.opt_geometry(prog, out_str) if geo", "spc_fs[-1].path(spc_info) # theory filesystem thy_fs = autofile.fs.theory(spc_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix = thy_fs[-1].path(mod_thy_info[1:]) # conformer", "state without a hessian') # sys.exit() # run_path = insert_dct['run_path'] # if run_path", "= tinfo.from_dct(THEORY_DCT[theory]) else: print( 'Error: user did not specify a theory {}'.format(theory) +", "in checked_rids: continue if cnf_fs[-1].file.geometry.exists(locs): checked_rids.append(current_rid) locs_geo = cnf_fs[-1].file.geometry.read(locs) frag_locs_geo = _fragment_ring_geo(locs_geo) if", "'method': 'ccsd(t)-f12', 'basis': 'cc-pvqz-f12' }, 'mlvl_cas_dz': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'caspt2',", "dct in the script or use program/method/basis/orb_dct' + 'keywords instead of theory') sys.exit()", "= None # If hess is None: # print( # 'No hessian found", "1: # print( # 'Can only save a transition state that has a", "print('I promise i will be smarter in the future') sys.exit() breaking_bond, forming_bond =", "= create_species_filesystems( prefix, spc_info, mod_thy_info, locs=None) else: fs_array, prefix_array = create_reaction_filesystems( prefix, rxn_info,", "= automol.graph.add_bonds(forward_gra, forw_form_key) # backward_gra = automol.graph.without_stereo_parities( # automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds( # zrxn_i.backward_ts_graph)))", "if mults[idx] not in mults_allowed: print( 'user specified mult of {}'.format(mults[idx]) + 'is", "= autofile.fs.species(prefix) spc_fs[-1].create(spc_info) spc_prefix = spc_fs[-1].path(spc_info) # theory filesystem thy_fs = autofile.fs.theory(spc_prefix) thy_fs[-1].create(mod_thy_info[1:])", "= automol.inchi.add_stereo(ich) ichs[0][idx] = ich for idx, ich in enumerate(ichs[1]): if not automol.inchi.is_complete(ich):", "zrxn_file is not None: # zrxn_str = autofile.io_.read_file(zrxn_file) # zrxns = [automol.reac.from_string(zrxn_str)] #", "theory filesystem thy_fs = autofile.fs.theory(spc_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix = thy_fs[-1].path(mod_thy_info[1:]) # conformer cnf_fs =", "'lvl_b2d': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvdz'}, 'lvl_b2t': { 'orb_res':", "ts_fs[-1].path(ts_locs) # conformer cnf_fs = autofile.fs.conformer(ts_prefix) if locs is not None: cnf_fs[-1].create(locs) cnf_prefix", "= read_user_file(insert_dct, 'output_file') # parse method from insert input file thy_info = parse_user_theory(insert_dct)", "if mults is None: print('Error: user did not specify mults in input') sys.exit()", "in the output is not unique to filesystem' + '... not saving') def", "False if mul not in mults_allowed: print( 'user specified mult of {}'.format(mul) +", "rxn_fs[-1].create(sort_rxn_info) rxn_prefix = rxn_fs[-1].path(sort_rxn_info) # theory filesystem thy_fs = autofile.fs.theory(rxn_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix =", "is {}'.format(rid)) match = False return match def rng_loc_for_geo(geo, cnf_fs): rid = None", "+ # 'frequencies: ' + ','.join(imags)) # sys.exit() else: spc_info = parse_user_species(insert_dct) mod_thy_info", "rid def parse_script_input(script_input_file): script_input = autofile.io_.read_file(script_input_file).splitlines() insert_dct = { 'save_filesystem': None, 'smiles': None,", "'basis': '6-31g*' }, 'lvl_wbm': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis': '6-31+g*'", "forw_brk_key = automol.reac.breaking_bond_keys(zrxn_i) # back_brk_key = automol.reac.breaking_bond_keys(zrxn_i, rev=True) # forward_gra = automol.graph.without_stereo_parities( #", "sys.exit() elif basis is None: print('Error: user did not specify basis in input')", "specied' + ' {}'.format(prog) + ' and method matches' + ' {}'.format(method)) sys.exit()", "# freqs, _, imags, _ = autorun.projrot.frequencies( # script_str, freq_run_path, [geo], [[]], [hess])", "basis is None: print('Error: user did not specify basis in input') sys.exit() elif", "input file thy_info = parse_user_theory(insert_dct) # parse out geo information first, to make", "= [] for gra in rxn_gras[0]: reactant_keys.append(automol.graph.atom_keys(gra)) product_keys = [] for gra in", "elif keyword in ['rxn_class']: # strip whitespaces form either side of reaction #", "'lvl_b2t': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvtz'}, 'lvl_b2q': { 'orb_res':", "mults') sys.exit() if len(flat_ichs) != len(chgs): print( 'Error: number of species does not", "to check for double # bonds when doing bond orders forw_bnd_ord_dct = {breaking_bond:", "= { 'save_filesystem': None, 'smiles': None, 'inchi': None, 'mult': None, 'charge': None, 'rid':", "ts_geo, rxn_info def main(insert_dct): prefix = read_user_filesystem(insert_dct) # Read in the input and", "else: # zrxns, _ = _id_reaction(rxn_info) if rxn_class is None: print( 'Error: user", "input method from theory dictionary theory = insert_dct['theory'] if theory is None: if", "= elstruct.reader.opt_geometry(prog, out_str) if geo is None: print( 'No geometry could be parsed", "geo_rid is not None: if geo_rid != rid: print( 'Error: rid mismatch for", "hess is not None and zrxn is not None: hess_inf_obj = autofile.schema.info_objects.run( job=elstruct.Job.HESSIAN,", "= autofile.fs.transition_state(thy_prefix) ts_fs[-1].create(ts_locs) ts_prefix = ts_fs[-1].path(ts_locs) # conformer cnf_fs = autofile.fs.conformer(ts_prefix) if locs", "'method': 'wb97xd', 'basis': 'cc-pvtz'}, 'lvl_m06s': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis':", "create_reaction_filesystems( prefix, rxn_info, mod_thy_info, ts_locs=None, locs=None): # species filesystem print('rxn_info', rxn_info) rxn_fs =", "is None: print('Error: user did not specify mult in input') sys.exit() if chg", "rxn_fs = autofile.fs.reaction(prefix) sort_rxn_info = rinfo.sort(rxn_info, scheme='autofile') rxn_fs[-1].create(sort_rxn_info) rxn_prefix = rxn_fs[-1].path(sort_rxn_info) # theory", "+ 'Check that the program matches user specied' + ' {}'.format(prog) + '", "from mechanalyzer.inf import thy as tinfo from mechanalyzer.inf import rxn as rinfo from", "ich = automol.inchi.add_stereo(ich) if mult is None: print('Error: user did not specify mult", "insert_dct['theory'] if theory is None: if program is None: print('Error: user did not", "not in mults_allowed: print( 'user specified mult of {}'.format(mul) + 'is not an", "if reactant_match and product_match: reactant_keys = [] for gra in rxn_gras[0]: reactant_keys.append(automol.graph.atom_keys(gra)) product_keys", "bonds = atoms_bnd[oversaturated_atom] if len(bonds) != 2: print('too many bonds to transfered atom", "None: # print( # 'Your geometry did not match any of the attempted", "zrxn_i.class_ == 'hydrogen abstraction': # forward_gra = automol.graph.remove_bonds(forward_gra, forw_brk_key) # backward_gra = automol.graph.remove_bonds(backward_gra,", "cnf_fs[0].file.info.exists(): rinf_obj = cnf_fs[0].file.info.read() else: rinf_obj = autofile.schema.info_objects.conformer_trunk(0) rinf_obj.nsamp = 1 if cnf_fs[1].file.info.exists([locs[0]]):", "rid = locs[0] break frag_locs_zma = automol.geom.zmatrix(frag_locs_geo) if automol.zmat.almost_equal( frag_locs_zma, frag_zma, dist_rtol=0.1, ang_atol=.4):", "not match inchi from output {}'.format(geo_ich) + 'which is based on geometry from", "ang_atol=.4): rid = locs[0] break return rid def parse_script_input(script_input_file): script_input = autofile.io_.read_file(script_input_file).splitlines() insert_dct", "print( 'I refuse to save this geometry until user specified' + ' info", "= insert_dct['method'] basis = insert_dct['basis'] orb_res = insert_dct['orb_res'] # Get input method from", "len(ts_gras) != 1: continue for ts_gra_i in ts_gras: vals = automol.graph.atom_unsaturated_valences(ts_gra_i, bond_order=True) oversaturated_atoms", "saved_enes, zrxn=zrxn): sym_id = _sym_unique( geo, ene, saved_geos, saved_enes) if sym_id is None:", "\"!\"' + 'Key format should be:\\n' + '<Keyword>: <Value>\\n' + 'Allowed keywords are:\\n'", "that the program matches user specied' + ' {}'.format(prog) + ' and method", "is None: # print( # 'Your geometry did not match any of the", "None, 'mult': None, 'charge': None, 'rid': None, 'cid': None, 'theory': None, 'program': None,", "mechroutines.es._routines.conformer import _geo_unique from mechroutines.es._routines.conformer import _fragment_ring_geo from mechroutines.es._routines._sadpt import save_saddle_point from mechlib.reaction.rxnid", "mechlib.reaction.rxnid import _id_reaction THEORY_DCT = { 'lvl_wbs': { 'orb_res': 'RU', 'program': 'gaussian09', 'method':", "= parse_user_reaction(insert_dct) zrxn, zma, geo, rxn_info = get_zrxn(geo, rxn_info, rxn_class) # for zrxn_i", "= True if reactant_match and product_match: reactant_keys = [] for gra in rxn_gras[0]:", "file_name = dct[keyword] return autofile.io_.read_file(file_name) def read_user_filesystem(dct): if dct['save_filesystem'] is None: print( 'ERROR:", "in the THEORY_DCT' + 'please add it to the dct in the script", "ts_ichs[0] = ts_ichs[0][::-1] ts_smis[0] = ts_smis[0][::-1] reactant_match = True if reactant_match: if ts_smis[1]", "automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds( # zrxn_i.forward_ts_graph))) # forward_gra = automol.graph.add_bonds(forward_gra, forw_form_key) # backward_gra =", "# remove_idx = 0 # for idx_i, idx_j in enumerate(zma_keys): # if idx_i", "'Error: user did not specify rxn_class') sys.exit() return rxn_info, ts_info, rxn_class def parse_user_theory(insert_dct):", "== 1: chosen_ts_gra = ts_gras[0] chosen_oversaturated_atom = oversaturated_atoms[0] break if chosen_oversaturated_atom is None:", "from output:\\n' + '{}'.format(automol.geom.string(geo))) match = False if mul not in mults_allowed: print(", "locs is not None: cnf_fs[-1].create(locs) cnf_prefix = cnf_fs[-1].path(locs) else: cnf_prefix = None return", "file \"\"\" import sys import os import autofile import automol from mechanalyzer.inf import", "'cc_lvl_t': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvtz'}, 'cc_lvl_q': { 'orb_res':", "'gaussian09', 'method': 'b2plypd3', 'basis': 'cc-pvtz'}, 'lvl_b2q': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b2plypd3',", "+ # 'a transition state without a hessian') # sys.exit() # run_path =", "whitespaces form either side of reaction # class but not in between words", "rinfo from mechanalyzer.inf import spc as sinfo import elstruct import autorun from mechroutines.es._routines.conformer", "to other reaction types we have to check for double # bonds when", "[]] for i, side in enumerate(rxn_info[0]): for ich in side: rxn_smis[i].append(automol.inchi.smiles(ich)) ts_smis =", "# if zrxn_file is not None: # zrxn_str = autofile.io_.read_file(zrxn_file) # zrxns =", "'method': 'ccsd(t)', 'basis': 'cc-pvdz'}, 'cc_lvl_t': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis':", "theory {}'.format(theory) + ' that is in the THEORY_DCT' + 'please add it", "rxn_smis[0]: ts_ichs[0] = ts_ichs[0][::-1] ts_smis[0] = ts_smis[0][::-1] reactant_match = True if reactant_match: if", "match def rng_loc_for_geo(geo, cnf_fs): rid = None frag_geo = _fragment_ring_geo(geo) if frag_geo is", "ts_gras = ts_gras[::-1] rxn_gras = rxn_gras[::-1] if ts_smis[0] == rxn_smis[0]: reactant_match = True", "not insert_dct['saddle']: if not species_match(geo, spc_info): print( 'I refuse to save this geometry", "'method': 'b3lyp', 'basis': 'cc-pvtz'}, 'cc_lvl_d': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis':", "reactants: values[0].append(reactant.replace(' ', '')) for product in products: values[1].append(product.replace(' ', '')) value =", "[hess]) # if len(imags) != 1: # print( # 'Can only save a", "in ['mult', 'charge', 'ts_mult']: values = [] for val in value.split(','): values.append(int(val)) if", "[]] rxn_chgs = [[], []] for ich in ichs[0]: mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich,", "sys.exit() # run_path = insert_dct['run_path'] # if run_path is None: # run_path =", "matches the filesystem fs_array, prefix_array = create_species_filesystems( prefix, spc_info, mod_thy_info, locs=None) else: fs_array,", "*rxn_info[1:]) ts_geo = automol.zmat.geometry(ts_zma) # geo_reorder_dct = {} # dummies = [] #", "number of mults') sys.exit() if len(flat_ichs) != len(chgs): print( 'Error: number of species", "'method': 'b2plypd3', 'basis': 'cc-pvqz' }, 'lvl_b3s': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp',", "if forward_gra == automol.geom.graph(geo, stereo=False): # zrxn = zrxn_i # zma, _, _", "product_gras] rxn_smis = [[], []] for i, side in enumerate(rxn_info[0]): for ich in", "spc_info): match = True ich, _, mul = spc_info mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich,", "program matches user specied' + ' {}'.format(prog) + ' and method matches' +", "= True else: ts_ichs = ts_ichs[::-1] ts_smis = ts_smis[::-1] ts_gras = ts_gras[::-1] rxn_gras", "saving') def species_match(geo, spc_info): match = True ich, _, mul = spc_info mults_allowed", "'gaussian09', 'method': 'm062x', 'basis': '6-31+g*' }, 'lvl_m06t': { 'orb_res': 'RU', 'program': 'gaussian09', 'method':", "or use program/method/basis/orb_dct' + 'keywords instead of theory') sys.exit() return thy_info def create_species_filesystems(prefix,", "1 cinf_obj.nsamp = cnsampd else: cinf_obj = autofile.schema.info_objects.conformer_branch(0) cinf_obj.nsamp = 1 cnf_fs[1].create([locs[0]]) cnf_fs[0].file.info.write(rinf_obj)", "did not specify mults in input') sys.exit() if chgs is None: print('Error: user", "species does not match number of mults') sys.exit() if len(flat_ichs) != len(chgs): print(", "geo information if not insert_dct['saddle']: if not species_match(geo, spc_info): print( 'I refuse to", "' {}'.format(prog) + ' and method matches' + ' {}'.format(method)) sys.exit() # Parse", "print( # 'Can only save a transition state that has a single' +", "specify a theory {}'.format(theory) + ' that is in the THEORY_DCT' + 'please", "'method': 'ccsd(t)-f12', 'basis': 'cc-pvtz-f12' }, 'cc_lvl_qf': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12',", "rinf_obj = cnf_fs[0].file.info.read() else: rinf_obj = autofile.schema.info_objects.conformer_trunk(0) rinf_obj.nsamp = 1 if cnf_fs[1].file.info.exists([locs[0]]): cinf_obj", "sum(ichs, []) if len(flat_ichs) != len(mults): print( 'Error: number of species does not", "to save this geometry until user specified' + ' info matches the info", "(geo, zma, ene), inf_obj, inp_str, zrxn=zrxn, zma_locs=(0,)) print( 'geometry is now saved at", "'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis': 'cc-pvtz'}, 'lvl_b2d': { 'orb_res': 'RU', 'program': 'gaussian09',", "= tinfo.modify_orb_label(thy_info, spc_info) locs = parse_user_locs(insert_dct) # Check that the save location matches", "std_rxn, geo) std_zrxn = automol.reac.relabel_for_zmatrix( std_rxn, zma_keys, dummy_key_dct) rxn_info = (ts_ichs, *rxn_info[1:]) ts_geo", "= autorun.projrot.frequencies( # script_str, freq_run_path, [geo], [[]], [hess]) # if len(imags) != 1:", "'rid': None, 'cid': None, 'theory': None, 'program': None, 'method': None, 'basis': None, 'orb_res':", "automol.graph.remove_bonds(forward_gra, forw_brk_key) # backward_gra = automol.graph.remove_bonds(backward_gra, back_brk_key) # print('forRXN', automol.graph.string(zrxn_i.forward_ts_graph)) # print('forRXN', automol.graph.string(forward_gra))", "out') print('I promise i will be smarter in the future') sys.exit() breaking_bond, forming_bond", "has a single' + # 'imaginary frequency, projrot found the following' + #", "len(values) == 1: value = values[0] else: value = values elif keyword in", "geometry until user specified' + ' info matches the info in user given", "= (ts_ichs, *rxn_info[1:]) ts_geo = automol.zmat.geometry(ts_zma) # geo_reorder_dct = {} # dummies =", "in product_gras: try: pich = automol.graph.inchi(pgra, stereo=True) except IndexError: pich = automol.graph.inchi(pgra) psmi", "continue for ts_gra_i in ts_gras: vals = automol.graph.atom_unsaturated_valences(ts_gra_i, bond_order=True) oversaturated_atoms = [atm for", "= True elif ts_smis[0][::-1] == rxn_smis[0]: ts_ichs[0] = ts_ichs[0][::-1] ts_smis[0] = ts_smis[0][::-1] reactant_match", "smiles in input') sys.exit() if ich is None: ich = automol.smiles.inchi(smi) if not", "spc_info, mod_thy_info, locs=None): # species filesystem spc_fs = autofile.fs.species(prefix) spc_fs[-1].create(spc_info) spc_prefix = spc_fs[-1].path(spc_info)", "inchi from output {}'.format(geo_ich) + 'which is based on geometry from output:\\n' +", "= choose_cutoff_distance(geo) atoms_bnd = automol.graph.atoms_bond_keys(ts_gra) bonds = atoms_bnd[oversaturated_atom] if len(bonds) != 2: print('too", "is None: ichs = [[], []] for smi in smis[0]: ichs[0].append(automol.smiles.inchi(smi)) for smi", "ts_smis = ts_smis[::-1] ts_gras = ts_gras[::-1] rxn_gras = rxn_gras[::-1] if ts_smis[0] == rxn_smis[0]:", "# if zrxn_i.class_ == 'hydrogen abstraction': # forward_gra = automol.graph.remove_bonds(forward_gra, forw_brk_key) # backward_gra", "chosen_ts_gra = [] chosen_oversaturated_atom = None for rqh in rqhs: ts_gras = automol.geom.connectivity_graph(geo,", "if keyword in insert_dct: if 'None' in value: value = None elif keyword", "print( 'the geometry in the output is not unique to filesystem' + '...", "elif keyword in ['ts_locs']: value = (int(value),) elif keyword in ['rxn_class']: # strip", "ts_gras[0] chosen_oversaturated_atom = oversaturated_atoms[0] break if chosen_oversaturated_atom is None: print('could not figure out", "save_saddle_point from mechlib.reaction.rxnid import _id_reaction THEORY_DCT = { 'lvl_wbs': { 'orb_res': 'RU', 'program':", "and output files that we # Are inserting into the filesystem inp_str =", "= insert_dct['orb_res'] # Get input method from theory dictionary theory = insert_dct['theory'] if", "if len(oversaturated_atoms) == 1: chosen_ts_gra = ts_gras[0] chosen_oversaturated_atom = oversaturated_atoms[0] break if chosen_oversaturated_atom", "# sys.exit() else: spc_info = parse_user_species(insert_dct) mod_thy_info = tinfo.modify_orb_label(thy_info, spc_info) locs = parse_user_locs(insert_dct)", "elif '!' in line[0]: continue line = line.split('!')[0] if ':' not in line:", "'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvqz-f12' }, 'mlvl_cas_dz': { 'orb_res': 'RR',", "is not recognized'.format(keyword) + 'script will exit until inpupt is resolved to avoid'", "['smiles', 'inchi']: value = value.replace(' ', '') else: value = value.split(' = ')", "ich is None and smi is None: print( 'Error: user did not specify", "'') value = ' '.join(value) elif keyword not in ['smiles', 'inchi']: value =", "{}'.format(prog) + ' and method matches' + ' {}'.format(method)) sys.exit() # Parse out", "autofile.schema.info_objects.run( job=elstruct.Job.OPTIMIZATION, prog=prog, version='', method=method, basis=basis, status=autofile.schema.RunStatus.SUCCESS) ret = (inf_obj, inp_str, out_str) _,", "if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) ichs[0][idx] = ich for idx, ich in", "'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvtz'}, 'cc_lvl_q': { 'orb_res': 'RR', 'program': 'molpro2015',", "zrxn_i # zma, _, _ = automol.reac.ts_zmatrix(zrxn, geo) # elif backward_gra == automol.geom.graph(geo,", "# forw_brk_key = automol.reac.breaking_bond_keys(zrxn_i) # back_brk_key = automol.reac.breaking_bond_keys(zrxn_i, rev=True) # forward_gra = automol.graph.without_stereo_parities(", "ts_mult is None: print( 'Error: user did not specify ts_mul') sys.exit() rxn_info =", "'zrxns, which are the following') # for zrxn_i in zrxns: # print(zrxns) #", "= automol.reac.forming_bond_keys(zrxn_i) # back_form_key = automol.reac.forming_bond_keys(zrxn_i, rev=True) # forw_brk_key = automol.reac.breaking_bond_keys(zrxn_i) # back_brk_key", "freqs, _, imags, _ = autorun.projrot.frequencies( # script_str, freq_run_path, [geo], [[]], [hess]) #", "geo, ene, saved_geos, saved_enes) if sym_id is None: if cnf_fs[0].file.info.exists(): rinf_obj = cnf_fs[0].file.info.read()", "ts_smis = [[], []] ts_ichs = [[], []] for rgra in reactant_gras: try:", "ichs[0][idx] = ich for idx, ich in enumerate(ichs[1]): if not automol.inchi.is_complete(ich): ich =", "in smis[0]: ichs[0].append(automol.smiles.inchi(smi)) for smi in smis[1]: ichs[1].append(automol.smiles.inchi(smi)) for idx, ich in enumerate(ichs[0]):", "idx_i, idx_j in enumerate(zma_keys): # if idx_i in dummies: # remove_idx -= 1", "{}'.format(method)) sys.exit() # Parse out user specified save location zrxn = None if", "rxn_info, spc_info, rxn_class = parse_user_reaction(insert_dct) zrxn, zma, geo, rxn_info = get_zrxn(geo, rxn_info, rxn_class)", "cnf_fs[-1].path(locs) else: cnf_prefix = None return ( (rxn_fs, thy_fs, ts_fs, cnf_fs), (rxn_prefix, thy_prefix,", "' info matches the info in user given output') sys.exit() inf_obj = autofile.schema.info_objects.run(", "= automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) if mults[idx] not in mults_allowed: print( 'user specified mult", "+= 1 cinf_obj.nsamp = cnsampd else: cinf_obj = autofile.schema.info_objects.conformer_branch(0) cinf_obj.nsamp = 1 cnf_fs[1].create([locs[0]])", "# zrxn_i.backward_ts_graph))) # backward_gra = automol.graph.add_bonds(backward_gra, back_form_key) # if zrxn_i.class_ == 'hydrogen abstraction':", "# forw_form_key = automol.reac.forming_bond_keys(zrxn_i) # back_form_key = automol.reac.forming_bond_keys(zrxn_i, rev=True) # forw_brk_key = automol.reac.breaking_bond_keys(zrxn_i)", "Get input method from theory dictionary theory = insert_dct['theory'] if theory is None:", "+ 'Allowed keywords are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit() return insert_dct if __name__ ==", "if theory in THEORY_DCT: thy_info = tinfo.from_dct(THEORY_DCT[theory]) else: print( 'Error: user did not", "for i, val in enumerate(value): value[i] = val.replace(' ', '') value = '", "None and smi is None: print( 'Error: user did not specify species' +", "= {breaking_bond: 0.1, forming_bond: 0.9} forward_gra = automol.graph.set_bond_orders(ts_gra, forw_bnd_ord_dct) backward_gra = automol.graph.set_bond_orders(ts_gra, back_bnd_ord_dct)", "specified in user input') sys.exit() return std_zrxn, ts_zma, ts_geo, rxn_info def main(insert_dct): prefix", "= ts_smis[::-1] ts_gras = ts_gras[::-1] rxn_gras = rxn_gras[::-1] if ts_smis[0] == rxn_smis[0]: reactant_match", "len(imags) != 1: # print( # 'Can only save a transition state that", "products found for the transition state' + 'did not match those specified in", "charge in input') sys.exit() return sinfo.from_data(ich, chg, mult) def parse_user_reaction(insert_dct): smis = insert_dct['smiles']", "product_gras = automol.graph.without_dummy_bonds( automol.graph.without_fractional_bonds(backward_gra)) product_gras = automol.graph.connected_components(product_gras) ts_gras = [forward_gra, backward_gra] rxn_gras =", "locs = parse_user_locs(insert_dct) # Check that the save location matches geo information if", "value.split() for i, val in enumerate(value): value[i] = val.replace(' ', '') value =", "mismatch for the filesystem at' + ' {}'.format(cnf_fs[0].path()) + '\\nthe expected rid for", "resolved to avoid' + ' filesystem contamination.' + 'Allowed keywords are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys())))", "will exit') sys.exit() return dct['save_filesystem'] def choose_cutoff_distance(geo): rqhs = [x * 0.1 for", "def parse_user_locs(insert_dct): rid = insert_dct['rid'] cid = insert_dct['cid'] if rid is None: rid", "insert_dct: if 'None' in value: value = None elif keyword in ['mult', 'charge',", "in input') sys.exit() elif method is None: print('Error: user did not specify method", "rinfo.sort((ichs, rxn_chgs, rxn_muls, ts_mult)) ts_info = rinfo.ts_info(rxn_info) # if zrxn_file is not None:", "file is {}'.format(rid)) match = False return match def rng_loc_for_geo(geo, cnf_fs): rid =", "if ts_smis[0] == rxn_smis[0]: reactant_match = True elif ts_smis[0][::-1] == rxn_smis[0]: ts_ichs[0] =", "insert_dct['saddle'] = True reactants, products = value reactants = reactants.split(' + ') products", "= ts_ichs[1][::-1] ts_smis[1] = ts_smis[1][::-1] product_match = True if reactant_match and product_match: reactant_keys", "1: continue for ts_gra_i in ts_gras: vals = automol.graph.atom_unsaturated_valences(ts_gra_i, bond_order=True) oversaturated_atoms = [atm", "automol.graph.without_fractional_bonds( # zrxn_i.forward_ts_graph))) # forward_gra = automol.graph.add_bonds(forward_gra, forw_form_key) # backward_gra = automol.graph.without_stereo_parities( #", "rid in input file is {}'.format(rid)) match = False return match def rng_loc_for_geo(geo,", "def create_species_filesystems(prefix, spc_info, mod_thy_info, locs=None): # species filesystem spc_fs = autofile.fs.species(prefix) spc_fs[-1].create(spc_info) spc_prefix", "rxn_info = get_zrxn(geo, rxn_info, rxn_class) # for zrxn_i in zrxns: # forw_form_key =", "rich = automol.graph.inchi(rgra) rsmi = automol.inchi.smiles(rich) ts_ichs[0].append(rich) ts_smis[0].append(rsmi) for pgra in product_gras: try:", "location zrxn = None if insert_dct['saddle']: rxn_info, spc_info, rxn_class = parse_user_reaction(insert_dct) zrxn, zma,", "cinf_obj = cnf_fs[1].file.info.read(locs[0]) cnsampd = cinf_obj.nsamp cnsampd += 1 cinf_obj.nsamp = cnsampd else:", "# 'Can only save a transition state that has a single' + #", "spc_info, mod_thy_info, locs=None) else: fs_array, prefix_array = create_reaction_filesystems( prefix, rxn_info, mod_thy_info, ts_locs=insert_dct['ts_locs'], locs=None)", "thy_fs[-1].create(mod_thy_info[1:]) thy_prefix = thy_fs[-1].path(mod_thy_info[1:]) if ts_locs is None: ts_locs = (0,) ts_fs =", "smis[1]: ichs[1].append(automol.smiles.inchi(smi)) for idx, ich in enumerate(ichs[0]): if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich)", "hessian') # sys.exit() # run_path = insert_dct['run_path'] # if run_path is None: #", "'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvqz-f12' }, 'mlvl_cas_dz': { 'orb_res': 'RR', 'program': 'molpro2015', 'method':", "for the transition state' + 'did not match those specified in user input')", "os.getcwd() # run_fs = autofile.fs.run(run_path) # freq_run_path = run_fs[-1].path(['hessian']) # run_fs[-1].create(['hessian']) # script_str", "'mlvl_cas_tz': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'caspt2', 'basis': 'cc-pvtz'}} def parse_user_locs(insert_dct): rid", "= ts_smis[1][::-1] product_match = True if reactant_match and product_match: reactant_keys = [] for", "= True reactants, products = value reactants = reactants.split(' + ') products =", "automol.zmat.almost_equal( frag_locs_zma, frag_zma, dist_rtol=0.1, ang_atol=.4): rid = locs[0] break return rid def parse_script_input(script_input_file):", "# automol.graph.without_dummy_bonds( # automol.graph.without_fractional_bonds( # zrxn_i.backward_ts_graph))) # backward_gra = automol.graph.add_bonds(backward_gra, back_form_key) # if", "import _saved_cnf_info from mechroutines.es._routines.conformer import _sym_unique from mechroutines.es._routines.conformer import _save_unique_parsed_conformer from mechroutines.es._routines.conformer import", "cnf_prefix = None return ( (spc_fs, thy_fs, cnf_fs), (spc_prefix, thy_prefix, cnf_prefix)) def create_reaction_filesystems(", "'m062x', 'basis': '6-31g*' }, 'lvl_m06m': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis':", "# geo_reorder_dct = {} # dummies = [] # for dummy in dummy_key_dct.keys():", "spc_fs[-1].create(spc_info) spc_prefix = spc_fs[-1].path(spc_info) # theory filesystem thy_fs = autofile.fs.theory(spc_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix =", "= _fragment_ring_geo(locs_geo) if frag_locs_geo is None: rid = locs[0] break frag_locs_zma = automol.geom.zmatrix(frag_locs_geo)", "print('Error: user did not specify mult in input') sys.exit() if chg is None:", "ts_smis[0].append(rsmi) for pgra in product_gras: try: pich = automol.graph.inchi(pgra, stereo=True) except IndexError: pich", "if theory is None: if program is None: print('Error: user did not specify", "None, 'run_path': None, 'saddle': False, } for i, line in enumerate(script_input): if len(line)", "None and zrxn is not None: hess_inf_obj = autofile.schema.info_objects.run( job=elstruct.Job.HESSIAN, prog=prog, version='', method=method,", "zrxn is None: # print( # 'Your geometry did not match any of", "if not locs_match(geo, cnf_fs, locs): print( 'I refuse to save this geometry until", "in ['ts_locs']: value = (int(value),) elif keyword in ['rxn_class']: # strip whitespaces form", "rinf_obj.nsamp = 1 if cnf_fs[1].file.info.exists([locs[0]]): cinf_obj = cnf_fs[1].file.info.read(locs[0]) cnsampd = cinf_obj.nsamp cnsampd +=", "current_rid in checked_rids: continue if cnf_fs[-1].file.geometry.exists(locs): checked_rids.append(current_rid) locs_geo = cnf_fs[-1].file.geometry.read(locs) frag_locs_geo = _fragment_ring_geo(locs_geo)", "'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvtz'}, 'cc_lvl_q': { 'orb_res': 'RR', 'program': 'molpro2015', 'method':", "backward_gra = automol.graph.add_bonds(backward_gra, back_form_key) # if zrxn_i.class_ == 'hydrogen abstraction': # forward_gra =", "value.replace(' ', '') else: value = value.split(' = ') if len(value) > 1:", "def main(insert_dct): prefix = read_user_filesystem(insert_dct) # Read in the input and output files", "'b3lyp', 'basis': 'cc-pvtz'}, 'cc_lvl_d': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvdz'},", "'orb_res': 'RR', 'program': 'molpro2015', 'method': 'caspt2', 'basis': 'cc-pvtz'}} def parse_user_locs(insert_dct): rid = insert_dct['rid']", "std_rxn, zma_keys, dummy_key_dct) rxn_info = (ts_ichs, *rxn_info[1:]) ts_geo = automol.zmat.geometry(ts_zma) # geo_reorder_dct =", "freq_run_path = run_fs[-1].path(['hessian']) # run_fs[-1].create(['hessian']) # script_str = autorun.SCRIPT_DCT['projrot'] # freqs, _, imags,", "in dummies: # remove_idx -= 1 # else: # geo_reorder_dct[idx_i + remove_idx] =", "output') sys.exit() # Check that the rid/cid info matches the filesystem fs_array, prefix_array", "cnf_fs[1].file.info.read(locs[0]) cnsampd = cinf_obj.nsamp cnsampd += 1 cinf_obj.nsamp = cnsampd else: cinf_obj =", "back_brk_key) # print('forRXN', automol.graph.string(zrxn_i.forward_ts_graph)) # print('forRXN', automol.graph.string(forward_gra)) # print('bacRXN', automol.graph.string(zrxn_i.backward_ts_graph)) # print('bacRXN', automol.graph.string(backward_gra))", "sys.exit() return dct['save_filesystem'] def choose_cutoff_distance(geo): rqhs = [x * 0.1 for x in", "{ 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'caspt2', 'basis': 'cc-pvdz'}, 'mlvl_cas_tz': { 'orb_res': 'RR',", "a log file \"\"\" import sys import os import autofile import automol from", "'basis': 'cc-pvdz'}, 'mlvl_cas_tz': { 'orb_res': 'RR', 'program': 'molpro2015', 'method': 'caspt2', 'basis': 'cc-pvtz'}} def", "= get_zrxn(geo, rxn_info, rxn_class) # for zrxn_i in zrxns: # forw_form_key = automol.reac.forming_bond_keys(zrxn_i)", "None, 'rid': None, 'cid': None, 'theory': None, 'program': None, 'method': None, 'basis': None,", "= reactants.split(' + ') products = products.split(' + ') values = [[], []]", "chosen_oversaturated_atom is None: print('could not figure out which H is being transfered') sys.exit()", "rinfo.ts_info(rxn_info) # if zrxn_file is not None: # zrxn_str = autofile.io_.read_file(zrxn_file) # zrxns", "atom for me to figure out') print('I promise i will be smarter in", "smi is None: print( 'Error: user did not specify species' + 'with an", "inchi {}'.format(ich)) sys.exit() rxn_muls[1].append(mults[idx]) rxn_chgs[1].append(chgs[idx]) idx += 1 ts_mult = insert_dct['ts_mult'] if ts_mult", "[] # for dummy in dummy_key_dct.keys(): # add_idx = 1 # for dumm_j", "rng_loc_for_geo(geo, cnf_fs): rid = None frag_geo = _fragment_ring_geo(geo) if frag_geo is not None:", "Keyword {} is not recognized'.format(keyword) + 'script will exit until inpupt is resolved", "atm, val in vals.items() if val < 0] if len(oversaturated_atoms) == 1: chosen_ts_gra", "'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis': '6-31g*' }, 'lvl_wbm': { 'orb_res': 'RU', 'program':", "= [forward_gra, backward_gra] rxn_gras = [reactant_gras, product_gras] rxn_smis = [[], []] for i,", "else: cinf_obj = autofile.schema.info_objects.conformer_branch(0) cinf_obj.nsamp = 1 cnf_fs[1].create([locs[0]]) cnf_fs[0].file.info.write(rinf_obj) cnf_fs[1].file.info.write(cinf_obj, [locs[0]]) hess, freqs,", "(int(value),) elif keyword in ['rxn_class']: # strip whitespaces form either side of reaction", "not None: if geo_rid != rid: print( 'Error: rid mismatch for the filesystem", "{ 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'b3lyp', 'basis': '6-311g**' }, 'lvl_b3t': { 'orb_res':", "geo information first, to make sure # user save specifications match output prog,", "in enumerate(ichs[0]): if not automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) ichs[0][idx] = ich for idx,", "be parsed from output' + 'Check that the program matches user specied' +", "value = line.split(':') if keyword in insert_dct: if 'None' in value: value =", "product_match = True elif ts_smis[1][::-1] == rxn_smis[-1]: ts_ichs[1] = ts_ichs[1][::-1] ts_smis[1] = ts_smis[1][::-1]", "is {}'.format(geo_rid) + '\\nthe user rid in input file is {}'.format(rid)) match =", "Check that the save location matches geo information if not insert_dct['saddle']: if not", "\"\"\" import sys import os import autofile import automol from mechanalyzer.inf import thy", "# print( # 'No hessian found in output, cannot save ' + #", "'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis': '6-31g*' }, 'lvl_m06m': { 'orb_res': 'RU', 'program':", "in input') sys.exit() else: thy_info = (program, method, basis, orb_res) else: if theory", "automol.reac.relabel_for_zmatrix( std_rxn, zma_keys, dummy_key_dct) rxn_info = (ts_ichs, *rxn_info[1:]) ts_geo = automol.zmat.geometry(ts_zma) # geo_reorder_dct", "is None: rid = locs[0] break frag_locs_zma = automol.geom.zmatrix(frag_locs_geo) if automol.zmat.almost_equal( frag_locs_zma, frag_zma,", "= spc_info mults_allowed = automol.graph.possible_spin_multiplicities( automol.inchi.graph(ich, stereo=False)) geo_ich = automol.geom.inchi(geo, stereo=True) if ich", "other reaction types we have to check for double # bonds when doing", "= {} # dummies = [] # for dummy in dummy_key_dct.keys(): # add_idx", "None)}, locs, zma_locs=(0,), zma=zma) else: _save_unique_parsed_conformer( mod_thy_info, cnf_fs, locs, (geo, zma, ene), inf_obj,", "'run_path': None, 'saddle': False, } for i, line in enumerate(script_input): if len(line) <", "cnf_fs, locs): match = True rid = locs[0] geo_rid = rng_loc_for_geo(geo, cnf_fs) if", "import save_saddle_point from mechlib.reaction.rxnid import _id_reaction THEORY_DCT = { 'lvl_wbs': { 'orb_res': 'RU',", "saved_enes = _saved_cnf_info( cnf_fs, mod_thy_info) if _geo_unique(geo, ene, saved_geos, saved_enes, zrxn=zrxn): sym_id =", "'lvl_m06t': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis': 'cc-pvtz'}, 'lvl_b2d': { 'orb_res':", "_ = autorun.projrot.frequencies( # script_str, freq_run_path, [geo], [[]], [hess]) # if len(imags) !=", "ich = automol.inchi.add_stereo(ich) ichs[0][idx] = ich for idx, ich in enumerate(ichs[1]): if not", "locs, zma_locs=(0,), zma=zma) else: _save_unique_parsed_conformer( mod_thy_info, cnf_fs, locs, (geo, zma, ene), inf_obj, inp_str,", "def get_zrxn(geo, rxn_info, rxn_class): ts_gra, oversaturated_atom = choose_cutoff_distance(geo) atoms_bnd = automol.graph.atoms_bond_keys(ts_gra) bonds =", "files that we # Are inserting into the filesystem inp_str = read_user_file(insert_dct, 'input_file')", "'wb97xd', 'basis': '6-31g*' }, 'lvl_wbm': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'wb97xd', 'basis':", "value.split(' = ') if len(value) > 1: insert_dct['saddle'] = True reactants, products =", "as rinfo from mechanalyzer.inf import spc as sinfo import elstruct import autorun from", "input file is {}'.format(rid)) match = False return match def rng_loc_for_geo(geo, cnf_fs): rid", "avoid' + ' filesystem contamination.' + 'Allowed keywords are:\\n' + '{}'.format('\\n'.join(list(insert_dct.keys()))) ) sys.exit()", "= [] for gra in rxn_gras[1]: product_keys.append(automol.graph.atom_keys(gra)) std_rxn = automol.reac.Reaction( rxn_class, *ts_gras, reactant_keys,", "for dummy in dummy_key_dct.keys(): # add_idx = 1 # for dumm_j in dummies:", "'RR', 'program': 'molpro2015', 'method': 'ccsd(t)', 'basis': 'cc-pvqz' }, 'cc_lvl_df': { 'orb_res': 'RR', 'program':", "# backward_gra = automol.graph.add_bonds(backward_gra, back_form_key) # if zrxn_i.class_ == 'hydrogen abstraction': # forward_gra", "stereo=False): # zrxn = zrxn_i # zma, _, _ = automol.reac.ts_zmatrix(zrxn, geo) #", "}, 'lvl_m06m': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis': '6-31+g*' }, 'lvl_m06t':", "cnf_fs), (rxn_prefix, thy_prefix, ts_prefix, cnf_prefix)) def read_user_file(dct, keyword): if dct[keyword] is None: print(", "+ ' {}'.format(method)) sys.exit() # Parse out user specified save location zrxn =", "autofile.fs.theory(rxn_prefix) thy_fs[-1].create(mod_thy_info[1:]) thy_prefix = thy_fs[-1].path(mod_thy_info[1:]) if ts_locs is None: ts_locs = (0,) ts_fs", "program in input') sys.exit() elif method is None: print('Error: user did not specify", "automol.inchi.is_complete(ich): ich = automol.inchi.add_stereo(ich) if mult is None: print('Error: user did not specify", "usiing a log file \"\"\" import sys import os import autofile import automol", "value = values else: value = value[0].replace(' ', '') print(keyword, value) insert_dct[keyword] =", "in mults_allowed: print( 'user specified mult of {}'.format(mul) + 'is not an allowed", "automol.geom.zmatrix(frag_geo) checked_rids = [] for locs in cnf_fs[-1].existing(): current_rid, _ = locs if", "_, imags, _ = autorun.projrot.frequencies( # script_str, freq_run_path, [geo], [[]], [hess]) # if", "of {}'.format(mults[idx]) + 'is not an allowed multiplicty for inchi {}'.format(ich)) sys.exit() rxn_muls[1].append(mults[idx])", "'gaussian09', 'method': 'wb97xd', 'basis': '6-31g*' }, 'lvl_wbm': { 'orb_res': 'RU', 'program': 'gaussian09', 'method':", "'RR', 'program': 'molpro2015', 'method': 'ccsd(t)-f12', 'basis': 'cc-pvtz-f12' }, 'cc_lvl_qf': { 'orb_res': 'RR', 'program':", "return sinfo.from_data(ich, chg, mult) def parse_user_reaction(insert_dct): smis = insert_dct['smiles'] ichs = insert_dct['inchi'] mults", "chosen_ts_gra = ts_gras[0] chosen_oversaturated_atom = oversaturated_atoms[0] break if chosen_oversaturated_atom is None: print('could not", "specify species' + 'with an inchi or smiles in input') sys.exit() if ich", "'ts_mult': None, 'rxn_class': None, 'zrxn_file': None, 'run_path': None, 'saddle': False, } for i,", "['ts_locs']: value = (int(value),) elif keyword in ['rxn_class']: # strip whitespaces form either", "'cc-pvtz'}, 'lvl_m06s': { 'orb_res': 'RU', 'program': 'gaussian09', 'method': 'm062x', 'basis': '6-31g*' }, 'lvl_m06m':", "Check that the rid/cid info matches the filesystem fs_array, prefix_array = create_species_filesystems( prefix,", "of mults') sys.exit() if len(flat_ichs) != len(chgs): print( 'Error: number of species does" ]
[ "for i in data.get(\"data\")[\"options\"]] context = SlashContext( client, event[\"type\"], id=data[\"id\"], application_id=data[\"application_id\"], command_id=data.get(\"data\")[\"id\"], name=data.get(\"data\")[\"name\"],", "GatewayDispatch from ..models import SlashContext, Option async def interaction_create(client: \"Client\", gateway: \"Gateway\", event:", "\"Gateway\", event: \"GatewayDispatch\") -> typing.List[typing.Awaitable]: \"\"\" |coro| This event called when the client", "SlashContext, Option async def interaction_create(client: \"Client\", gateway: \"Gateway\", event: \"GatewayDispatch\") -> typing.List[typing.Awaitable]: \"\"\"", "..client import Client from ..core import Gateway, GatewayDispatch from ..models import SlashContext, Option", "from ..core import Gateway, GatewayDispatch from ..models import SlashContext, Option async def interaction_create(client:", "client : `Client` The main client. gateway : `Gateway` The gateway that dispatched", "command_id=data.get(\"data\")[\"id\"], name=data.get(\"data\")[\"name\"], options=Option(), ) return \"on_interaction_create\", [ context ] \"\"\" The event that", "[Option(i[\"type\"], name=i[\"name\"], description=i[\"description\"], required=) # for i in data.get(\"data\")[\"options\"]] context = SlashContext( client,", "from ..models import SlashContext, Option async def interaction_create(client: \"Client\", gateway: \"Gateway\", event: \"GatewayDispatch\")", "Gateway, GatewayDispatch from ..models import SlashContext, Option async def interaction_create(client: \"Client\", gateway: \"Gateway\",", "client. gateway : `Gateway` The gateway that dispatched the event. event : `GatewayDispatch`", "gateway : `Gateway` The gateway that dispatched the event. event : `GatewayDispatch` The", "id=data[\"id\"], application_id=data[\"application_id\"], command_id=data.get(\"data\")[\"id\"], name=data.get(\"data\")[\"name\"], options=Option(), ) return \"on_interaction_create\", [ context ] \"\"\" The", "is ready. It does provide the client and the user that was ready.", "event. event : `GatewayDispatch` The event that was dispatched. \"\"\" data = event.data", "The gateway that dispatched the event. event : `GatewayDispatch` The event that was", "user that was ready. Parameters ---------- client : `Client` The main client. gateway", "provide the client and the user that was ready. Parameters ---------- client :", ": `Gateway` The gateway that dispatched the event. event : `GatewayDispatch` The event", "the client and the user that was ready. Parameters ---------- client : `Client`", "typing if typing.TYPE_CHECKING: from ..client import Client from ..core import Gateway, GatewayDispatch from", "event that was dispatched. \"\"\" data = event.data # options = [Option(i[\"type\"], name=i[\"name\"],", "gateway: \"Gateway\", event: \"GatewayDispatch\") -> typing.List[typing.Awaitable]: \"\"\" |coro| This event called when the", "application_id=data[\"application_id\"], command_id=data.get(\"data\")[\"id\"], name=data.get(\"data\")[\"name\"], options=Option(), ) return \"on_interaction_create\", [ context ] \"\"\" The event", "options=Option(), ) return \"on_interaction_create\", [ context ] \"\"\" The event that was dispatched.", "event[\"type\"], id=data[\"id\"], application_id=data[\"application_id\"], command_id=data.get(\"data\")[\"id\"], name=data.get(\"data\")[\"name\"], options=Option(), ) return \"on_interaction_create\", [ context ] \"\"\"", "that dispatched the event. event : `GatewayDispatch` The event that was dispatched. \"\"\"", "called when the client is ready. It does provide the client and the", "---------- client : `Client` The main client. gateway : `Gateway` The gateway that", "context = SlashContext( client, event[\"type\"], id=data[\"id\"], application_id=data[\"application_id\"], command_id=data.get(\"data\")[\"id\"], name=data.get(\"data\")[\"name\"], options=Option(), ) return \"on_interaction_create\",", "The main client. gateway : `Gateway` The gateway that dispatched the event. event", "data = event.data # options = [Option(i[\"type\"], name=i[\"name\"], description=i[\"description\"], required=) # for i", "the user that was ready. Parameters ---------- client : `Client` The main client.", "async def interaction_create(client: \"Client\", gateway: \"Gateway\", event: \"GatewayDispatch\") -> typing.List[typing.Awaitable]: \"\"\" |coro| This", "import Gateway, GatewayDispatch from ..models import SlashContext, Option async def interaction_create(client: \"Client\", gateway:", "Parameters ---------- client : `Client` The main client. gateway : `Gateway` The gateway", "in data.get(\"data\")[\"options\"]] context = SlashContext( client, event[\"type\"], id=data[\"id\"], application_id=data[\"application_id\"], command_id=data.get(\"data\")[\"id\"], name=data.get(\"data\")[\"name\"], options=Option(), )", "dispatched the event. event : `GatewayDispatch` The event that was dispatched. \"\"\" data", ": `Client` The main client. gateway : `Gateway` The gateway that dispatched the", "does provide the client and the user that was ready. Parameters ---------- client", "import typing if typing.TYPE_CHECKING: from ..client import Client from ..core import Gateway, GatewayDispatch", "typing.TYPE_CHECKING: from ..client import Client from ..core import Gateway, GatewayDispatch from ..models import", "ready. It does provide the client and the user that was ready. Parameters", "from ..client import Client from ..core import Gateway, GatewayDispatch from ..models import SlashContext,", "= event.data # options = [Option(i[\"type\"], name=i[\"name\"], description=i[\"description\"], required=) # for i in", "typing.List[typing.Awaitable]: \"\"\" |coro| This event called when the client is ready. It does", "return \"on_interaction_create\", [ context ] \"\"\" The event that was dispatched. \"\"\" def", "`Gateway` The gateway that dispatched the event. event : `GatewayDispatch` The event that", "event called when the client is ready. It does provide the client and", "\"GatewayDispatch\") -> typing.List[typing.Awaitable]: \"\"\" |coro| This event called when the client is ready.", "\"\"\" |coro| This event called when the client is ready. It does provide", "and the user that was ready. Parameters ---------- client : `Client` The main", "-> typing.List[typing.Awaitable]: \"\"\" |coro| This event called when the client is ready. It", "that was ready. Parameters ---------- client : `Client` The main client. gateway :", "It does provide the client and the user that was ready. Parameters ----------", "when the client is ready. It does provide the client and the user", "The event that was dispatched. \"\"\" data = event.data # options = [Option(i[\"type\"],", "if typing.TYPE_CHECKING: from ..client import Client from ..core import Gateway, GatewayDispatch from ..models", "was ready. Parameters ---------- client : `Client` The main client. gateway : `Gateway`", "= [Option(i[\"type\"], name=i[\"name\"], description=i[\"description\"], required=) # for i in data.get(\"data\")[\"options\"]] context = SlashContext(", "..core import Gateway, GatewayDispatch from ..models import SlashContext, Option async def interaction_create(client: \"Client\",", "event: \"GatewayDispatch\") -> typing.List[typing.Awaitable]: \"\"\" |coro| This event called when the client is", "def interaction_create(client: \"Client\", gateway: \"Gateway\", event: \"GatewayDispatch\") -> typing.List[typing.Awaitable]: \"\"\" |coro| This event", "options = [Option(i[\"type\"], name=i[\"name\"], description=i[\"description\"], required=) # for i in data.get(\"data\")[\"options\"]] context =", "# for i in data.get(\"data\")[\"options\"]] context = SlashContext( client, event[\"type\"], id=data[\"id\"], application_id=data[\"application_id\"], command_id=data.get(\"data\")[\"id\"],", "..models import SlashContext, Option async def interaction_create(client: \"Client\", gateway: \"Gateway\", event: \"GatewayDispatch\") ->", "client, event[\"type\"], id=data[\"id\"], application_id=data[\"application_id\"], command_id=data.get(\"data\")[\"id\"], name=data.get(\"data\")[\"name\"], options=Option(), ) return \"on_interaction_create\", [ context ]", "|coro| This event called when the client is ready. It does provide the", "data.get(\"data\")[\"options\"]] context = SlashContext( client, event[\"type\"], id=data[\"id\"], application_id=data[\"application_id\"], command_id=data.get(\"data\")[\"id\"], name=data.get(\"data\")[\"name\"], options=Option(), ) return", "\"on_interaction_create\", [ context ] \"\"\" The event that was dispatched. \"\"\" def export():", "gateway that dispatched the event. event : `GatewayDispatch` The event that was dispatched.", "\"\"\" data = event.data # options = [Option(i[\"type\"], name=i[\"name\"], description=i[\"description\"], required=) # for", "that was dispatched. \"\"\" data = event.data # options = [Option(i[\"type\"], name=i[\"name\"], description=i[\"description\"],", "i in data.get(\"data\")[\"options\"]] context = SlashContext( client, event[\"type\"], id=data[\"id\"], application_id=data[\"application_id\"], command_id=data.get(\"data\")[\"id\"], name=data.get(\"data\")[\"name\"], options=Option(),", "event that was dispatched. \"\"\" def export(): \"\"\" Exports the function. \"\"\" return", "the client is ready. It does provide the client and the user that", "client is ready. It does provide the client and the user that was", "client and the user that was ready. Parameters ---------- client : `Client` The", "SlashContext( client, event[\"type\"], id=data[\"id\"], application_id=data[\"application_id\"], command_id=data.get(\"data\")[\"id\"], name=data.get(\"data\")[\"name\"], options=Option(), ) return \"on_interaction_create\", [ context", "event.data # options = [Option(i[\"type\"], name=i[\"name\"], description=i[\"description\"], required=) # for i in data.get(\"data\")[\"options\"]]", "description=i[\"description\"], required=) # for i in data.get(\"data\")[\"options\"]] context = SlashContext( client, event[\"type\"], id=data[\"id\"],", ": `GatewayDispatch` The event that was dispatched. \"\"\" data = event.data # options", "# options = [Option(i[\"type\"], name=i[\"name\"], description=i[\"description\"], required=) # for i in data.get(\"data\")[\"options\"]] context", ") return \"on_interaction_create\", [ context ] \"\"\" The event that was dispatched. \"\"\"", "that was dispatched. \"\"\" def export(): \"\"\" Exports the function. \"\"\" return interaction_create", "the event. event : `GatewayDispatch` The event that was dispatched. \"\"\" data =", "`GatewayDispatch` The event that was dispatched. \"\"\" data = event.data # options =", "Client from ..core import Gateway, GatewayDispatch from ..models import SlashContext, Option async def", "[ context ] \"\"\" The event that was dispatched. \"\"\" def export(): \"\"\"", "name=data.get(\"data\")[\"name\"], options=Option(), ) return \"on_interaction_create\", [ context ] \"\"\" The event that was", "\"Client\", gateway: \"Gateway\", event: \"GatewayDispatch\") -> typing.List[typing.Awaitable]: \"\"\" |coro| This event called when", "context ] \"\"\" The event that was dispatched. \"\"\" def export(): \"\"\" Exports", "name=i[\"name\"], description=i[\"description\"], required=) # for i in data.get(\"data\")[\"options\"]] context = SlashContext( client, event[\"type\"],", "import SlashContext, Option async def interaction_create(client: \"Client\", gateway: \"Gateway\", event: \"GatewayDispatch\") -> typing.List[typing.Awaitable]:", "] \"\"\" The event that was dispatched. \"\"\" def export(): \"\"\" Exports the", "interaction_create(client: \"Client\", gateway: \"Gateway\", event: \"GatewayDispatch\") -> typing.List[typing.Awaitable]: \"\"\" |coro| This event called", "= SlashContext( client, event[\"type\"], id=data[\"id\"], application_id=data[\"application_id\"], command_id=data.get(\"data\")[\"id\"], name=data.get(\"data\")[\"name\"], options=Option(), ) return \"on_interaction_create\", [", "\"\"\" The event that was dispatched. \"\"\" def export(): \"\"\" Exports the function.", "import Client from ..core import Gateway, GatewayDispatch from ..models import SlashContext, Option async", "This event called when the client is ready. It does provide the client", "main client. gateway : `Gateway` The gateway that dispatched the event. event :", "event : `GatewayDispatch` The event that was dispatched. \"\"\" data = event.data #", "dispatched. \"\"\" data = event.data # options = [Option(i[\"type\"], name=i[\"name\"], description=i[\"description\"], required=) #", "Option async def interaction_create(client: \"Client\", gateway: \"Gateway\", event: \"GatewayDispatch\") -> typing.List[typing.Awaitable]: \"\"\" |coro|", "`Client` The main client. gateway : `Gateway` The gateway that dispatched the event.", "was dispatched. \"\"\" data = event.data # options = [Option(i[\"type\"], name=i[\"name\"], description=i[\"description\"], required=)", "ready. Parameters ---------- client : `Client` The main client. gateway : `Gateway` The", "required=) # for i in data.get(\"data\")[\"options\"]] context = SlashContext( client, event[\"type\"], id=data[\"id\"], application_id=data[\"application_id\"],", "The event that was dispatched. \"\"\" def export(): \"\"\" Exports the function. \"\"\"" ]
[ "== dict: message = ','.join([str(x) for x in [i,stats['avg'], stats['std'], stats['min'], stats['max'], stats['dateRange']]])", "','.join([str(x) for x in [i,stats['avg'], stats['std'], stats['min'], stats['max'], stats['dateRange']]]) elif type(stats) == list:", "np.max) return stats def write_evolution_logs(i, stats, filename=\"output/evolution_gen.csv\"): #print(i, stats) if type(stats) == dict:", "import tools import numpy as np import os statisticsNames = {'avg': 'Average profit',", "deap import tools import numpy as np import os statisticsNames = {'avg': 'Average", "'std': 'Profit variation', 'min': 'Minimum profit', 'max': 'Maximum profit', 'size': 'Population size', 'maxsize':", "[i,stats['avg'], stats['std'], stats['min'], stats['max'], stats['dateRange']]]) elif type(stats) == list: message = ','.join([str(x) for", "import os statisticsNames = {'avg': 'Average profit', 'std': 'Profit variation', 'min': 'Minimum profit',", "if type(stats) == dict: message = ','.join([str(x) for x in [i,stats['avg'], stats['std'], stats['min'],", "type(stats) == list: message = ','.join([str(x) for x in [i] + stats]) else:", "elif type(stats) == list: message = ','.join([str(x) for x in [i] + stats])", "from deap import tools import numpy as np import os statisticsNames = {'avg':", "size'} def getStatisticsMeter(): stats = tools.Statistics(lambda ind: ind.fitness.values[0]) stats.register(\"avg\", np.mean) stats.register(\"std\", np.std) stats.register(\"min\",", "variation', 'min': 'Minimum profit', 'max': 'Maximum profit', 'size': 'Population size', 'maxsize': 'Max population", "#print(i, stats) if type(stats) == dict: message = ','.join([str(x) for x in [i,stats['avg'],", "np import os statisticsNames = {'avg': 'Average profit', 'std': 'Profit variation', 'min': 'Minimum", "stats) if type(stats) == dict: message = ','.join([str(x) for x in [i,stats['avg'], stats['std'],", "else: raise #print(message) if i == 0 and os.path.isfile(filename): os.remove(filename) f=open(filename, 'a+') f.write(message+\"\\n\")", "for x in [i] + stats]) else: raise #print(message) if i == 0", "def write_evolution_logs(i, stats, filename=\"output/evolution_gen.csv\"): #print(i, stats) if type(stats) == dict: message = ','.join([str(x)", "{'avg': 'Average profit', 'std': 'Profit variation', 'min': 'Minimum profit', 'max': 'Maximum profit', 'size':", "stats.register(\"max\", np.max) return stats def write_evolution_logs(i, stats, filename=\"output/evolution_gen.csv\"): #print(i, stats) if type(stats) ==", "'min': 'Minimum profit', 'max': 'Maximum profit', 'size': 'Population size', 'maxsize': 'Max population size'}", "+ stats]) else: raise #print(message) if i == 0 and os.path.isfile(filename): os.remove(filename) f=open(filename,", "','.join([str(x) for x in [i] + stats]) else: raise #print(message) if i ==", "#print(message) if i == 0 and os.path.isfile(filename): os.remove(filename) f=open(filename, 'a+') f.write(message+\"\\n\") #print(message) f.close()", "'Average profit', 'std': 'Profit variation', 'min': 'Minimum profit', 'max': 'Maximum profit', 'size': 'Population", "in [i] + stats]) else: raise #print(message) if i == 0 and os.path.isfile(filename):", "stats['min'], stats['max'], stats['dateRange']]]) elif type(stats) == list: message = ','.join([str(x) for x in", "= ','.join([str(x) for x in [i,stats['avg'], stats['std'], stats['min'], stats['max'], stats['dateRange']]]) elif type(stats) ==", "profit', 'max': 'Maximum profit', 'size': 'Population size', 'maxsize': 'Max population size'} def getStatisticsMeter():", "== list: message = ','.join([str(x) for x in [i] + stats]) else: raise", "profit', 'size': 'Population size', 'maxsize': 'Max population size'} def getStatisticsMeter(): stats = tools.Statistics(lambda", "message = ','.join([str(x) for x in [i] + stats]) else: raise #print(message) if", "os statisticsNames = {'avg': 'Average profit', 'std': 'Profit variation', 'min': 'Minimum profit', 'max':", "stats.register(\"std\", np.std) stats.register(\"min\", np.min) stats.register(\"max\", np.max) return stats def write_evolution_logs(i, stats, filename=\"output/evolution_gen.csv\"): #print(i,", "= tools.Statistics(lambda ind: ind.fitness.values[0]) stats.register(\"avg\", np.mean) stats.register(\"std\", np.std) stats.register(\"min\", np.min) stats.register(\"max\", np.max) return", "stats.register(\"avg\", np.mean) stats.register(\"std\", np.std) stats.register(\"min\", np.min) stats.register(\"max\", np.max) return stats def write_evolution_logs(i, stats,", "filename=\"output/evolution_gen.csv\"): #print(i, stats) if type(stats) == dict: message = ','.join([str(x) for x in", "return stats def write_evolution_logs(i, stats, filename=\"output/evolution_gen.csv\"): #print(i, stats) if type(stats) == dict: message", "= {'avg': 'Average profit', 'std': 'Profit variation', 'min': 'Minimum profit', 'max': 'Maximum profit',", "'max': 'Maximum profit', 'size': 'Population size', 'maxsize': 'Max population size'} def getStatisticsMeter(): stats", "x in [i] + stats]) else: raise #print(message) if i == 0 and", "[i] + stats]) else: raise #print(message) if i == 0 and os.path.isfile(filename): os.remove(filename)", "stats, filename=\"output/evolution_gen.csv\"): #print(i, stats) if type(stats) == dict: message = ','.join([str(x) for x", "ind.fitness.values[0]) stats.register(\"avg\", np.mean) stats.register(\"std\", np.std) stats.register(\"min\", np.min) stats.register(\"max\", np.max) return stats def write_evolution_logs(i,", "'Maximum profit', 'size': 'Population size', 'maxsize': 'Max population size'} def getStatisticsMeter(): stats =", "stats['max'], stats['dateRange']]]) elif type(stats) == list: message = ','.join([str(x) for x in [i]", "stats]) else: raise #print(message) if i == 0 and os.path.isfile(filename): os.remove(filename) f=open(filename, 'a+')", "size', 'maxsize': 'Max population size'} def getStatisticsMeter(): stats = tools.Statistics(lambda ind: ind.fitness.values[0]) stats.register(\"avg\",", "message = ','.join([str(x) for x in [i,stats['avg'], stats['std'], stats['min'], stats['max'], stats['dateRange']]]) elif type(stats)", "'Profit variation', 'min': 'Minimum profit', 'max': 'Maximum profit', 'size': 'Population size', 'maxsize': 'Max", "'maxsize': 'Max population size'} def getStatisticsMeter(): stats = tools.Statistics(lambda ind: ind.fitness.values[0]) stats.register(\"avg\", np.mean)", "stats['std'], stats['min'], stats['max'], stats['dateRange']]]) elif type(stats) == list: message = ','.join([str(x) for x", "'Max population size'} def getStatisticsMeter(): stats = tools.Statistics(lambda ind: ind.fitness.values[0]) stats.register(\"avg\", np.mean) stats.register(\"std\",", "stats def write_evolution_logs(i, stats, filename=\"output/evolution_gen.csv\"): #print(i, stats) if type(stats) == dict: message =", "'Population size', 'maxsize': 'Max population size'} def getStatisticsMeter(): stats = tools.Statistics(lambda ind: ind.fitness.values[0])", "type(stats) == dict: message = ','.join([str(x) for x in [i,stats['avg'], stats['std'], stats['min'], stats['max'],", "profit', 'std': 'Profit variation', 'min': 'Minimum profit', 'max': 'Maximum profit', 'size': 'Population size',", "tools import numpy as np import os statisticsNames = {'avg': 'Average profit', 'std':", "np.min) stats.register(\"max\", np.max) return stats def write_evolution_logs(i, stats, filename=\"output/evolution_gen.csv\"): #print(i, stats) if type(stats)", "ind: ind.fitness.values[0]) stats.register(\"avg\", np.mean) stats.register(\"std\", np.std) stats.register(\"min\", np.min) stats.register(\"max\", np.max) return stats def", "dict: message = ','.join([str(x) for x in [i,stats['avg'], stats['std'], stats['min'], stats['max'], stats['dateRange']]]) elif", "population size'} def getStatisticsMeter(): stats = tools.Statistics(lambda ind: ind.fitness.values[0]) stats.register(\"avg\", np.mean) stats.register(\"std\", np.std)", "stats.register(\"min\", np.min) stats.register(\"max\", np.max) return stats def write_evolution_logs(i, stats, filename=\"output/evolution_gen.csv\"): #print(i, stats) if", "stats['dateRange']]]) elif type(stats) == list: message = ','.join([str(x) for x in [i] +", "getStatisticsMeter(): stats = tools.Statistics(lambda ind: ind.fitness.values[0]) stats.register(\"avg\", np.mean) stats.register(\"std\", np.std) stats.register(\"min\", np.min) stats.register(\"max\",", "stats = tools.Statistics(lambda ind: ind.fitness.values[0]) stats.register(\"avg\", np.mean) stats.register(\"std\", np.std) stats.register(\"min\", np.min) stats.register(\"max\", np.max)", "#!/bin/python from deap import tools import numpy as np import os statisticsNames =", "as np import os statisticsNames = {'avg': 'Average profit', 'std': 'Profit variation', 'min':", "numpy as np import os statisticsNames = {'avg': 'Average profit', 'std': 'Profit variation',", "'size': 'Population size', 'maxsize': 'Max population size'} def getStatisticsMeter(): stats = tools.Statistics(lambda ind:", "def getStatisticsMeter(): stats = tools.Statistics(lambda ind: ind.fitness.values[0]) stats.register(\"avg\", np.mean) stats.register(\"std\", np.std) stats.register(\"min\", np.min)", "np.mean) stats.register(\"std\", np.std) stats.register(\"min\", np.min) stats.register(\"max\", np.max) return stats def write_evolution_logs(i, stats, filename=\"output/evolution_gen.csv\"):", "statisticsNames = {'avg': 'Average profit', 'std': 'Profit variation', 'min': 'Minimum profit', 'max': 'Maximum", "tools.Statistics(lambda ind: ind.fitness.values[0]) stats.register(\"avg\", np.mean) stats.register(\"std\", np.std) stats.register(\"min\", np.min) stats.register(\"max\", np.max) return stats", "list: message = ','.join([str(x) for x in [i] + stats]) else: raise #print(message)", "= ','.join([str(x) for x in [i] + stats]) else: raise #print(message) if i", "raise #print(message) if i == 0 and os.path.isfile(filename): os.remove(filename) f=open(filename, 'a+') f.write(message+\"\\n\") #print(message)", "'Minimum profit', 'max': 'Maximum profit', 'size': 'Population size', 'maxsize': 'Max population size'} def", "for x in [i,stats['avg'], stats['std'], stats['min'], stats['max'], stats['dateRange']]]) elif type(stats) == list: message", "import numpy as np import os statisticsNames = {'avg': 'Average profit', 'std': 'Profit", "x in [i,stats['avg'], stats['std'], stats['min'], stats['max'], stats['dateRange']]]) elif type(stats) == list: message =", "in [i,stats['avg'], stats['std'], stats['min'], stats['max'], stats['dateRange']]]) elif type(stats) == list: message = ','.join([str(x)", "np.std) stats.register(\"min\", np.min) stats.register(\"max\", np.max) return stats def write_evolution_logs(i, stats, filename=\"output/evolution_gen.csv\"): #print(i, stats)", "write_evolution_logs(i, stats, filename=\"output/evolution_gen.csv\"): #print(i, stats) if type(stats) == dict: message = ','.join([str(x) for" ]
[ "= Headline.objects.all()[::-1] context = {'object_list': headlines,} return render(request, \"mysite/scrape.html\", context) def post(self, request):", "{'object_list': headlines,} return render(request, \"mysite/scrape_detail.html\", context) class HomeClass(View): def get(self, request): list_title =", "= {'object_list': headlines,} return render(request, \"mysite/scrape.html\", context) def post(self, request): list_link = Headline.objects.values_list('url',", "= text new_post.author = author new_post.time = time new_post.save() return redirect(f'/post/{id_get}') else: return", "request): return render(request, 'mysite/login.html') def post(self, request): username = request.POST.get('user_name') password = request.POST.get('pass_word')", "from.forms import PostForm from django.contrib.auth import authenticate, login from django.contrib.auth.mixins import LoginRequiredMixin from", "password = request.POST.get('pass_word') my_user = authenticate(username=username, password=password) if my_user is None: return render(request,", "request): f = PostForm(request.POST) if not f.is_valid(): return render(request, 'mysite/add_unsuccess.html') if request.user.has_perm('mysite.add_post'): f.save()", "PostForm(request.POST) if not f.is_valid(): return render(request, 'mysite/add_unsuccess.html') if request.user.has_perm('mysite.add_post'): f.save() else: return HttpResponse('You", "def get(self, request): f = PostForm() context = {'fm': f} return render(request,'mysite/add_post.html',context )", "f} return render(request,'mysite/add_post.html',context ) def post(self, request): f = PostForm(request.POST) if not f.is_valid():", "f.is_valid(): return render(request, 'mysite/add_unsuccess.html') if request.user.has_perm('mysite.add_post'): f.save() else: return HttpResponse('You do not have", "= BeautifulSoup(content, \"html.parser\") News = soup.find_all('div', {\"class\":\"clearfix item\"}) for article in News: linkx", "new_post.image = image new_post.title = title new_post.text = text new_post.author = author new_post.time", "def get(self, request): return render(request, 'mysite/login.html') def post(self, request): username = request.POST.get('user_name') password", "Title.objects.all() context = {\"baiviet\" : list_title} return render(request, 'mysite/home.html', context) class LoginClass(View): def", "d-ib\"}) for artical in News: image = artical.find(\"img\", {\"class\": \"\"})['src'] title = artical.find(\"h1\",", "= Title.objects.all() context = {\"baiviet\" : list_title} return render(request, 'mysite/home.html', context) class LoginClass(View):", "fields = ['title', 'body_text', 'date'] class DeletePost(DeleteView): model = Title template_name = 'mysite/delete_post.html'", "artical.find(\"strong\", {\"class\": \"\"}).text time = artical.find(\"span\", {\"class\": \"ArticleDate\"}).text if not Artical.objects.filter(title=title): new_post =", "= \"https://vietnamnet.vn/vn/thoi-su/\" content = session.get(url).content soup = BeautifulSoup(content, \"html.parser\") News = soup.find_all('div', {\"class\":\"clearfix", "image = imagex['src'] titlex = article.find('a', {\"class\":\"f-18 title\"}) title = titlex.text authorx =", "'mysite/add_success.html') class AdminView(ListView): model = Title template_name = 'mysite/admin_site.html' class TestDetailView(DetailView): model =", "return render(request, 'mysite/login.html') def post(self, request): username = request.POST.get('user_name') password = request.POST.get('pass_word') my_user", "import View from.models import Title, Headline, Artical from.forms import PostForm from django.contrib.auth import", "= article.find('div', {\"class\":\"lead\"}) text = textx.text new_headline = Headline() new_headline.title = title new_headline.image", "\"mysite/scrape.html\", context) def post(self, request): list_link = Headline.objects.values_list('url', flat=True) link = request.POST.get('link') id_get", "id_get) context = {'object_list': headlines,} return render(request, \"mysite/scrape_detail.html\", context) class HomeClass(View): def get(self,", "= soup.find_all('div', {\"class\":\"ArticleDetail w-660 d-ib\"}) for artical in News: image = artical.find(\"img\", {\"class\":", "{\"class\":\"ArticleDetail w-660 d-ib\"}) for artical in News: image = artical.find(\"img\", {\"class\": \"\"})['src'] title", "{\"class\":\"clearfix item\"}) for article in News: linkx = article.find('a', {\"class\":\"m-t-5 w-240 d-ib thumb", "headlines = Headline.objects.all()[::-1] context = {'object_list': headlines,} return render(request, \"mysite/scrape.html\", context) def post(self,", "= {'fm': f} return render(request,'mysite/add_post.html',context ) def post(self, request): f = PostForm(request.POST) if", "Scrape(View): def get(self, request): #Artical.objects.all().delete() Headline.objects.all().delete() session = requests.Session() session.headers = {\"User-Agent\": \"Googlebot/2.1", "return render(request, \"mysite/scrape_detail.html\", context) class HomeClass(View): def get(self, request): list_title = Title.objects.all() context", "from django import urls from django.db.models.fields import URLField from django.shortcuts import render, redirect", "soup = BeautifulSoup(content, \"html.parser\") News = soup.find_all('div', {\"class\":\"clearfix item\"}) for article in News:", "{\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url = \"https://vietnamnet.vn/vn/thoi-su/\" content = session.get(url).content soup = BeautifulSoup(content, \"html.parser\")", "article.find('a', {\"class\":\"m-t-5 w-240 d-ib thumb left m-r-20\"}) link=linkx['href'] imagex = article.find('img', {\"class\":\"lazy\"}) image", "= title new_post.text = text new_post.author = author new_post.time = time new_post.save() return", "= time new_post.save() return redirect(f'/post/{id_get}') else: return redirect(f'/post/{id_get}') def show_detail(request, id_get): headlines =", "Create your views here. class Scrape(View): def get(self, request): #Artical.objects.all().delete() Headline.objects.all().delete() session =", "DetailView, CreateView, UpdateView, DeleteView from django.urls import reverse_lazy import requests from bs4 import", "timex.text textx = article.find('div', {\"class\":\"lead\"}) text = textx.text new_headline = Headline() new_headline.title =", "c-3e\"}).text text = artical.find(\"div\", {\"class\": \"ArticleContent\"}).text author = artical.find(\"strong\", {\"class\": \"\"}).text time =", "\"html.parser\") News = soup.find_all('div', {\"class\":\"clearfix item\"}) for article in News: linkx = article.find('a',", "headlines,} return render(request, \"mysite/scrape_detail.html\", context) class HomeClass(View): def get(self, request): list_title = Title.objects.all()", "= PostForm() context = {'fm': f} return render(request,'mysite/add_post.html',context ) def post(self, request): f", "context) def post(self, request): list_link = Headline.objects.values_list('url', flat=True) link = request.POST.get('link') id_get =", "item\"}) for article in News: linkx = article.find('a', {\"class\":\"m-t-5 w-240 d-ib thumb left", "View from.models import Title, Headline, Artical from.forms import PostForm from django.contrib.auth import authenticate,", "UpdateView, DeleteView from django.urls import reverse_lazy import requests from bs4 import BeautifulSoup #", "= request.POST.get('id') if (link in list_link): session = requests.Session() session.headers = {\"User-Agent\": \"Googlebot/2.1", "BeautifulSoup # Create your views here. class Scrape(View): def get(self, request): #Artical.objects.all().delete() Headline.objects.all().delete()", "not have access!') return render(request, 'mysite/add_success.html') class AdminView(ListView): model = Title template_name =", "# Create your views here. class Scrape(View): def get(self, request): #Artical.objects.all().delete() Headline.objects.all().delete() session", "requests.Session() session.headers = {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url = \"https://vietnamnet.vn/vn/thoi-su/\" content = session.get(url).content soup", "django.urls import reverse_lazy import requests from bs4 import BeautifulSoup # Create your views", "TestDetailView(DetailView): model = Title template_name = 'mysite/admin_detailview.html' class UpdatePost(UpdateView): model = Title template_name", "time new_post.save() return redirect(f'/post/{id_get}') else: return redirect(f'/post/{id_get}') def show_detail(request, id_get): headlines = Artical.objects.get(id", "session = requests.Session() session.headers = {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url = \"https://vietnamnet.vn/vn/thoi-su/\" content =", "from django.views import View from.models import Title, Headline, Artical from.forms import PostForm from", "= article.find('a', {\"class\":\"f-18 title\"}) title = titlex.text authorx = article.find('a', {\"class\":\"box-subcate-style4-namecate\"}) author =", "{\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url = link content = session.get(url).content soup = BeautifulSoup(content, \"html.parser\")", "{\"class\": \"\"})['src'] title = artical.find(\"h1\", {\"class\": \"title f-22 c-3e\"}).text text = artical.find(\"div\", {\"class\":", "'mysite/login.html') def post(self, request): username = request.POST.get('user_name') password = request.POST.get('pass_word') my_user = authenticate(username=username,", "get(self, request): return render(request, 'mysite/login.html') def post(self, request): username = request.POST.get('user_name') password =", "title\"}) title = titlex.text authorx = article.find('a', {\"class\":\"box-subcate-style4-namecate\"}) author = authorx.text timex =", "new_post.save() return redirect(f'/post/{id_get}') else: return redirect(f'/post/{id_get}') def show_detail(request, id_get): headlines = Artical.objects.get(id =", "'mysite/add_unsuccess.html') if request.user.has_perm('mysite.add_post'): f.save() else: return HttpResponse('You do not have access!') return render(request,", "article in News: linkx = article.find('a', {\"class\":\"m-t-5 w-240 d-ib thumb left m-r-20\"}) link=linkx['href']", "\"title f-22 c-3e\"}).text text = artical.find(\"div\", {\"class\": \"ArticleContent\"}).text author = artical.find(\"strong\", {\"class\": \"\"}).text", "artical.find(\"span\", {\"class\": \"ArticleDate\"}).text if not Artical.objects.filter(title=title): new_post = Artical() new_post.id = id_get new_post.image", "= 'mysite/update_post.html' fields = ['title', 'body_text', 'date'] class DeletePost(DeleteView): model = Title template_name", "new_post.title = title new_post.text = text new_post.author = author new_post.time = time new_post.save()", "request): f = PostForm() context = {'fm': f} return render(request,'mysite/add_post.html',context ) def post(self,", "context = {\"baiviet\" : list_title} return render(request, 'mysite/home.html', context) class LoginClass(View): def get(self,", "text new_headline.url = \"https://vietnamnet.vn/\"+link new_headline.save() headlines = Headline.objects.all()[::-1] context = {'object_list': headlines,} return", "= Artical() new_post.id = id_get new_post.image = image new_post.title = title new_post.text =", "content = session.get(url).content soup = BeautifulSoup(content, \"html.parser\") News = soup.find_all('div', {\"class\":\"clearfix item\"}) for", "None: return render(request, 'mysite/login_unsuccess.html') login(request, my_user) return render(request, 'mysite/login_success.html') class AddPost(LoginRequiredMixin,View): login_url='/login/' def", "render(request, 'mysite/login_success.html') class AddPost(LoginRequiredMixin,View): login_url='/login/' def get(self, request): f = PostForm() context =", "requests.Session() session.headers = {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url = link content = session.get(url).content soup", "(+http://www.google.com/bot.html)\"} url = \"https://vietnamnet.vn/vn/thoi-su/\" content = session.get(url).content soup = BeautifulSoup(content, \"html.parser\") News =", "imagex = article.find('img', {\"class\":\"lazy\"}) image = imagex['src'] titlex = article.find('a', {\"class\":\"f-18 title\"}) title", "artical.find(\"img\", {\"class\": \"\"})['src'] title = artical.find(\"h1\", {\"class\": \"title f-22 c-3e\"}).text text = artical.find(\"div\",", "username = request.POST.get('user_name') password = request.POST.get('pass_word') my_user = authenticate(username=username, password=password) if my_user is", "request from django.views import View from.models import Title, Headline, Artical from.forms import PostForm", "w-240 d-ib thumb left m-r-20\"}) link=linkx['href'] imagex = article.find('img', {\"class\":\"lazy\"}) image = imagex['src']", "list_title} return render(request, 'mysite/home.html', context) class LoginClass(View): def get(self, request): return render(request, 'mysite/login.html')", "for article in News: linkx = article.find('a', {\"class\":\"m-t-5 w-240 d-ib thumb left m-r-20\"})", "f-22 c-3e\"}).text text = artical.find(\"div\", {\"class\": \"ArticleContent\"}).text author = artical.find(\"strong\", {\"class\": \"\"}).text time", "title new_headline.image = image new_headline.author = author new_headline.time = time new_headline.text = text", "return render(request, 'mysite/home.html', context) class LoginClass(View): def get(self, request): return render(request, 'mysite/login.html') def", "url = link content = session.get(url).content soup = BeautifulSoup(content, \"html.parser\") News = soup.find_all('div',", "\"mysite/scrape_detail.html\", context) class HomeClass(View): def get(self, request): list_title = Title.objects.all() context = {\"baiviet\"", "get(self, request): #Artical.objects.all().delete() Headline.objects.all().delete() session = requests.Session() session.headers = {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url", "f = PostForm(request.POST) if not f.is_valid(): return render(request, 'mysite/add_unsuccess.html') if request.user.has_perm('mysite.add_post'): f.save() else:", "if not f.is_valid(): return render(request, 'mysite/add_unsuccess.html') if request.user.has_perm('mysite.add_post'): f.save() else: return HttpResponse('You do", "author new_post.time = time new_post.save() return redirect(f'/post/{id_get}') else: return redirect(f'/post/{id_get}') def show_detail(request, id_get):", "= title new_headline.image = image new_headline.author = author new_headline.time = time new_headline.text =", "= requests.Session() session.headers = {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url = link content = session.get(url).content", "django.http import HttpResponse, request from django.views import View from.models import Title, Headline, Artical", "login(request, my_user) return render(request, 'mysite/login_success.html') class AddPost(LoginRequiredMixin,View): login_url='/login/' def get(self, request): f =", "return render(request, 'mysite/add_unsuccess.html') if request.user.has_perm('mysite.add_post'): f.save() else: return HttpResponse('You do not have access!')", "import authenticate, login from django.contrib.auth.mixins import LoginRequiredMixin from django.views.generic import ListView, DetailView, CreateView,", "Title template_name = 'mysite/admin_site.html' class TestDetailView(DetailView): model = Title template_name = 'mysite/admin_detailview.html' class", "list_link): session = requests.Session() session.headers = {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url = link content", "\"html.parser\") News = soup.find_all('div', {\"class\":\"ArticleDetail w-660 d-ib\"}) for artical in News: image =", "return redirect(f'/post/{id_get}') else: return redirect(f'/post/{id_get}') def show_detail(request, id_get): headlines = Artical.objects.get(id = id_get)", "= {\"baiviet\" : list_title} return render(request, 'mysite/home.html', context) class LoginClass(View): def get(self, request):", "text = textx.text new_headline = Headline() new_headline.title = title new_headline.image = image new_headline.author", "= time new_headline.text = text new_headline.url = \"https://vietnamnet.vn/\"+link new_headline.save() headlines = Headline.objects.all()[::-1] context", "= Artical.objects.get(id = id_get) context = {'object_list': headlines,} return render(request, \"mysite/scrape_detail.html\", context) class", "\"\"}).text time = artical.find(\"span\", {\"class\": \"ArticleDate\"}).text if not Artical.objects.filter(title=title): new_post = Artical() new_post.id", "model = Title template_name = 'mysite/admin_detailview.html' class UpdatePost(UpdateView): model = Title template_name =", "from django.http import HttpResponse, request from django.views import View from.models import Title, Headline,", "Headline() new_headline.title = title new_headline.image = image new_headline.author = author new_headline.time = time", "'body_text', 'date'] class DeletePost(DeleteView): model = Title template_name = 'mysite/delete_post.html' success_url = reverse_lazy('mysite:admin-site')", "context = {'fm': f} return render(request,'mysite/add_post.html',context ) def post(self, request): f = PostForm(request.POST)", "titlex = article.find('a', {\"class\":\"f-18 title\"}) title = titlex.text authorx = article.find('a', {\"class\":\"box-subcate-style4-namecate\"}) author", "def post(self, request): username = request.POST.get('user_name') password = request.POST.get('pass_word') my_user = authenticate(username=username, password=password)", "django.contrib.auth.mixins import LoginRequiredMixin from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView from django.urls", "flat=True) link = request.POST.get('link') id_get = request.POST.get('id') if (link in list_link): session =", "headlines,} return render(request, \"mysite/scrape.html\", context) def post(self, request): list_link = Headline.objects.values_list('url', flat=True) link", "time new_headline.text = text new_headline.url = \"https://vietnamnet.vn/\"+link new_headline.save() headlines = Headline.objects.all()[::-1] context =", "def show_detail(request, id_get): headlines = Artical.objects.get(id = id_get) context = {'object_list': headlines,} return", "from django.urls import reverse_lazy import requests from bs4 import BeautifulSoup # Create your", "authenticate, login from django.contrib.auth.mixins import LoginRequiredMixin from django.views.generic import ListView, DetailView, CreateView, UpdateView,", "model = Title template_name = 'mysite/admin_site.html' class TestDetailView(DetailView): model = Title template_name =", "return render(request, 'mysite/login_unsuccess.html') login(request, my_user) return render(request, 'mysite/login_success.html') class AddPost(LoginRequiredMixin,View): login_url='/login/' def get(self,", "your views here. class Scrape(View): def get(self, request): #Artical.objects.all().delete() Headline.objects.all().delete() session = requests.Session()", "artical.find(\"div\", {\"class\": \"ArticleContent\"}).text author = artical.find(\"strong\", {\"class\": \"\"}).text time = artical.find(\"span\", {\"class\": \"ArticleDate\"}).text", "redirect(f'/post/{id_get}') else: return redirect(f'/post/{id_get}') def show_detail(request, id_get): headlines = Artical.objects.get(id = id_get) context", "django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView from django.urls import reverse_lazy import requests", "redirect(f'/post/{id_get}') def show_detail(request, id_get): headlines = Artical.objects.get(id = id_get) context = {'object_list': headlines,}", "request.POST.get('user_name') password = request.POST.get('pass_word') my_user = authenticate(username=username, password=password) if my_user is None: return", "= artical.find(\"div\", {\"class\": \"ArticleContent\"}).text author = artical.find(\"strong\", {\"class\": \"\"}).text time = artical.find(\"span\", {\"class\":", "image = artical.find(\"img\", {\"class\": \"\"})['src'] title = artical.find(\"h1\", {\"class\": \"title f-22 c-3e\"}).text text", "= imagex['src'] titlex = article.find('a', {\"class\":\"f-18 title\"}) title = titlex.text authorx = article.find('a',", "CreateView, UpdateView, DeleteView from django.urls import reverse_lazy import requests from bs4 import BeautifulSoup", "do not have access!') return render(request, 'mysite/add_success.html') class AdminView(ListView): model = Title template_name", "context) class HomeClass(View): def get(self, request): list_title = Title.objects.all() context = {\"baiviet\" :", "headlines = Artical.objects.get(id = id_get) context = {'object_list': headlines,} return render(request, \"mysite/scrape_detail.html\", context)", "title = titlex.text authorx = article.find('a', {\"class\":\"box-subcate-style4-namecate\"}) author = authorx.text timex = article.find('span',", "id_get = request.POST.get('id') if (link in list_link): session = requests.Session() session.headers = {\"User-Agent\":", "Artical.objects.get(id = id_get) context = {'object_list': headlines,} return render(request, \"mysite/scrape_detail.html\", context) class HomeClass(View):", "timex = article.find('span', {\"class\":\"time\"}) time = timex.text textx = article.find('div', {\"class\":\"lead\"}) text =", "if not Artical.objects.filter(title=title): new_post = Artical() new_post.id = id_get new_post.image = image new_post.title", "PostForm from django.contrib.auth import authenticate, login from django.contrib.auth.mixins import LoginRequiredMixin from django.views.generic import", "{'fm': f} return render(request,'mysite/add_post.html',context ) def post(self, request): f = PostForm(request.POST) if not", "from django.shortcuts import render, redirect from django.http import HttpResponse, request from django.views import", "import HttpResponse, request from django.views import View from.models import Title, Headline, Artical from.forms", "redirect from django.http import HttpResponse, request from django.views import View from.models import Title,", "def post(self, request): f = PostForm(request.POST) if not f.is_valid(): return render(request, 'mysite/add_unsuccess.html') if", "class AdminView(ListView): model = Title template_name = 'mysite/admin_site.html' class TestDetailView(DetailView): model = Title", "render(request, 'mysite/add_success.html') class AdminView(ListView): model = Title template_name = 'mysite/admin_site.html' class TestDetailView(DetailView): model", "reverse_lazy import requests from bs4 import BeautifulSoup # Create your views here. class", "(link in list_link): session = requests.Session() session.headers = {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url =", "Headline.objects.all().delete() session = requests.Session() session.headers = {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url = \"https://vietnamnet.vn/vn/thoi-su/\" content", "= author new_headline.time = time new_headline.text = text new_headline.url = \"https://vietnamnet.vn/\"+link new_headline.save() headlines", "article.find('span', {\"class\":\"time\"}) time = timex.text textx = article.find('div', {\"class\":\"lead\"}) text = textx.text new_headline", "= Title template_name = 'mysite/admin_site.html' class TestDetailView(DetailView): model = Title template_name = 'mysite/admin_detailview.html'", "here. class Scrape(View): def get(self, request): #Artical.objects.all().delete() Headline.objects.all().delete() session = requests.Session() session.headers =", "\"https://vietnamnet.vn/vn/thoi-su/\" content = session.get(url).content soup = BeautifulSoup(content, \"html.parser\") News = soup.find_all('div', {\"class\":\"clearfix item\"})", "new_headline = Headline() new_headline.title = title new_headline.image = image new_headline.author = author new_headline.time", "= article.find('a', {\"class\":\"m-t-5 w-240 d-ib thumb left m-r-20\"}) link=linkx['href'] imagex = article.find('img', {\"class\":\"lazy\"})", "News: image = artical.find(\"img\", {\"class\": \"\"})['src'] title = artical.find(\"h1\", {\"class\": \"title f-22 c-3e\"}).text", "= author new_post.time = time new_post.save() return redirect(f'/post/{id_get}') else: return redirect(f'/post/{id_get}') def show_detail(request,", "linkx = article.find('a', {\"class\":\"m-t-5 w-240 d-ib thumb left m-r-20\"}) link=linkx['href'] imagex = article.find('img',", "article.find('img', {\"class\":\"lazy\"}) image = imagex['src'] titlex = article.find('a', {\"class\":\"f-18 title\"}) title = titlex.text", "DeleteView from django.urls import reverse_lazy import requests from bs4 import BeautifulSoup # Create", "'mysite/update_post.html' fields = ['title', 'body_text', 'date'] class DeletePost(DeleteView): model = Title template_name =", "Artical() new_post.id = id_get new_post.image = image new_post.title = title new_post.text = text", "class AddPost(LoginRequiredMixin,View): login_url='/login/' def get(self, request): f = PostForm() context = {'fm': f}", "request.POST.get('id') if (link in list_link): session = requests.Session() session.headers = {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"}", "template_name = 'mysite/admin_detailview.html' class UpdatePost(UpdateView): model = Title template_name = 'mysite/update_post.html' fields =", "Headline, Artical from.forms import PostForm from django.contrib.auth import authenticate, login from django.contrib.auth.mixins import", "class UpdatePost(UpdateView): model = Title template_name = 'mysite/update_post.html' fields = ['title', 'body_text', 'date']", "session.headers = {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url = \"https://vietnamnet.vn/vn/thoi-su/\" content = session.get(url).content soup =", "for artical in News: image = artical.find(\"img\", {\"class\": \"\"})['src'] title = artical.find(\"h1\", {\"class\":", "my_user = authenticate(username=username, password=password) if my_user is None: return render(request, 'mysite/login_unsuccess.html') login(request, my_user)", "from django.contrib.auth import authenticate, login from django.contrib.auth.mixins import LoginRequiredMixin from django.views.generic import ListView,", "link content = session.get(url).content soup = BeautifulSoup(content, \"html.parser\") News = soup.find_all('div', {\"class\":\"ArticleDetail w-660", "author = artical.find(\"strong\", {\"class\": \"\"}).text time = artical.find(\"span\", {\"class\": \"ArticleDate\"}).text if not Artical.objects.filter(title=title):", "class TestDetailView(DetailView): model = Title template_name = 'mysite/admin_detailview.html' class UpdatePost(UpdateView): model = Title", "soup.find_all('div', {\"class\":\"ArticleDetail w-660 d-ib\"}) for artical in News: image = artical.find(\"img\", {\"class\": \"\"})['src']", ") def post(self, request): f = PostForm(request.POST) if not f.is_valid(): return render(request, 'mysite/add_unsuccess.html')", "'mysite/login_success.html') class AddPost(LoginRequiredMixin,View): login_url='/login/' def get(self, request): f = PostForm() context = {'fm':", "class LoginClass(View): def get(self, request): return render(request, 'mysite/login.html') def post(self, request): username =", "= \"https://vietnamnet.vn/\"+link new_headline.save() headlines = Headline.objects.all()[::-1] context = {'object_list': headlines,} return render(request, \"mysite/scrape.html\",", "News: linkx = article.find('a', {\"class\":\"m-t-5 w-240 d-ib thumb left m-r-20\"}) link=linkx['href'] imagex =", "if request.user.has_perm('mysite.add_post'): f.save() else: return HttpResponse('You do not have access!') return render(request, 'mysite/add_success.html')", "ListView, DetailView, CreateView, UpdateView, DeleteView from django.urls import reverse_lazy import requests from bs4", "Title template_name = 'mysite/admin_detailview.html' class UpdatePost(UpdateView): model = Title template_name = 'mysite/update_post.html' fields", "= authorx.text timex = article.find('span', {\"class\":\"time\"}) time = timex.text textx = article.find('div', {\"class\":\"lead\"})", "= 'mysite/admin_site.html' class TestDetailView(DetailView): model = Title template_name = 'mysite/admin_detailview.html' class UpdatePost(UpdateView): model", "= image new_post.title = title new_post.text = text new_post.author = author new_post.time =", "my_user) return render(request, 'mysite/login_success.html') class AddPost(LoginRequiredMixin,View): login_url='/login/' def get(self, request): f = PostForm()", "render(request, \"mysite/scrape_detail.html\", context) class HomeClass(View): def get(self, request): list_title = Title.objects.all() context =", "w-660 d-ib\"}) for artical in News: image = artical.find(\"img\", {\"class\": \"\"})['src'] title =", "request): list_title = Title.objects.all() context = {\"baiviet\" : list_title} return render(request, 'mysite/home.html', context)", "new_headline.time = time new_headline.text = text new_headline.url = \"https://vietnamnet.vn/\"+link new_headline.save() headlines = Headline.objects.all()[::-1]", "= Headline.objects.values_list('url', flat=True) link = request.POST.get('link') id_get = request.POST.get('id') if (link in list_link):", "render(request,'mysite/add_post.html',context ) def post(self, request): f = PostForm(request.POST) if not f.is_valid(): return render(request,", "get(self, request): f = PostForm() context = {'fm': f} return render(request,'mysite/add_post.html',context ) def", "(+http://www.google.com/bot.html)\"} url = link content = session.get(url).content soup = BeautifulSoup(content, \"html.parser\") News =", "News = soup.find_all('div', {\"class\":\"ArticleDetail w-660 d-ib\"}) for artical in News: image = artical.find(\"img\",", "= request.POST.get('pass_word') my_user = authenticate(username=username, password=password) if my_user is None: return render(request, 'mysite/login_unsuccess.html')", "HomeClass(View): def get(self, request): list_title = Title.objects.all() context = {\"baiviet\" : list_title} return", "Artical from.forms import PostForm from django.contrib.auth import authenticate, login from django.contrib.auth.mixins import LoginRequiredMixin", "def get(self, request): #Artical.objects.all().delete() Headline.objects.all().delete() session = requests.Session() session.headers = {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"}", "artical.find(\"h1\", {\"class\": \"title f-22 c-3e\"}).text text = artical.find(\"div\", {\"class\": \"ArticleContent\"}).text author = artical.find(\"strong\",", "= article.find('span', {\"class\":\"time\"}) time = timex.text textx = article.find('div', {\"class\":\"lead\"}) text = textx.text", "import reverse_lazy import requests from bs4 import BeautifulSoup # Create your views here.", "my_user is None: return render(request, 'mysite/login_unsuccess.html') login(request, my_user) return render(request, 'mysite/login_success.html') class AddPost(LoginRequiredMixin,View):", "text = artical.find(\"div\", {\"class\": \"ArticleContent\"}).text author = artical.find(\"strong\", {\"class\": \"\"}).text time = artical.find(\"span\",", "def get(self, request): list_title = Title.objects.all() context = {\"baiviet\" : list_title} return render(request,", "new_post.author = author new_post.time = time new_post.save() return redirect(f'/post/{id_get}') else: return redirect(f'/post/{id_get}') def", "LoginClass(View): def get(self, request): return render(request, 'mysite/login.html') def post(self, request): username = request.POST.get('user_name')", "return render(request, \"mysite/scrape.html\", context) def post(self, request): list_link = Headline.objects.values_list('url', flat=True) link =", "render(request, 'mysite/login.html') def post(self, request): username = request.POST.get('user_name') password = request.POST.get('pass_word') my_user =", "= link content = session.get(url).content soup = BeautifulSoup(content, \"html.parser\") News = soup.find_all('div', {\"class\":\"ArticleDetail", "= titlex.text authorx = article.find('a', {\"class\":\"box-subcate-style4-namecate\"}) author = authorx.text timex = article.find('span', {\"class\":\"time\"})", "LoginRequiredMixin from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView from django.urls import reverse_lazy", "{\"class\":\"box-subcate-style4-namecate\"}) author = authorx.text timex = article.find('span', {\"class\":\"time\"}) time = timex.text textx =", "article.find('a', {\"class\":\"box-subcate-style4-namecate\"}) author = authorx.text timex = article.find('span', {\"class\":\"time\"}) time = timex.text textx", "Headline.objects.all()[::-1] context = {'object_list': headlines,} return render(request, \"mysite/scrape.html\", context) def post(self, request): list_link", "'mysite/login_unsuccess.html') login(request, my_user) return render(request, 'mysite/login_success.html') class AddPost(LoginRequiredMixin,View): login_url='/login/' def get(self, request): f", "= article.find('a', {\"class\":\"box-subcate-style4-namecate\"}) author = authorx.text timex = article.find('span', {\"class\":\"time\"}) time = timex.text", "d-ib thumb left m-r-20\"}) link=linkx['href'] imagex = article.find('img', {\"class\":\"lazy\"}) image = imagex['src'] titlex", "UpdatePost(UpdateView): model = Title template_name = 'mysite/update_post.html' fields = ['title', 'body_text', 'date'] class", "new_headline.text = text new_headline.url = \"https://vietnamnet.vn/\"+link new_headline.save() headlines = Headline.objects.all()[::-1] context = {'object_list':", "else: return HttpResponse('You do not have access!') return render(request, 'mysite/add_success.html') class AdminView(ListView): model", "context) class LoginClass(View): def get(self, request): return render(request, 'mysite/login.html') def post(self, request): username", "import Title, Headline, Artical from.forms import PostForm from django.contrib.auth import authenticate, login from", "session.get(url).content soup = BeautifulSoup(content, \"html.parser\") News = soup.find_all('div', {\"class\":\"ArticleDetail w-660 d-ib\"}) for artical", "import urls from django.db.models.fields import URLField from django.shortcuts import render, redirect from django.http", "return render(request, 'mysite/add_success.html') class AdminView(ListView): model = Title template_name = 'mysite/admin_site.html' class TestDetailView(DetailView):", "session.get(url).content soup = BeautifulSoup(content, \"html.parser\") News = soup.find_all('div', {\"class\":\"clearfix item\"}) for article in", "{\"class\": \"title f-22 c-3e\"}).text text = artical.find(\"div\", {\"class\": \"ArticleContent\"}).text author = artical.find(\"strong\", {\"class\":", "import render, redirect from django.http import HttpResponse, request from django.views import View from.models", "= artical.find(\"span\", {\"class\": \"ArticleDate\"}).text if not Artical.objects.filter(title=title): new_post = Artical() new_post.id = id_get", "article.find('a', {\"class\":\"f-18 title\"}) title = titlex.text authorx = article.find('a', {\"class\":\"box-subcate-style4-namecate\"}) author = authorx.text", "django import urls from django.db.models.fields import URLField from django.shortcuts import render, redirect from", "is None: return render(request, 'mysite/login_unsuccess.html') login(request, my_user) return render(request, 'mysite/login_success.html') class AddPost(LoginRequiredMixin,View): login_url='/login/'", "{\"class\": \"ArticleDate\"}).text if not Artical.objects.filter(title=title): new_post = Artical() new_post.id = id_get new_post.image =", "new_headline.url = \"https://vietnamnet.vn/\"+link new_headline.save() headlines = Headline.objects.all()[::-1] context = {'object_list': headlines,} return render(request,", "request.user.has_perm('mysite.add_post'): f.save() else: return HttpResponse('You do not have access!') return render(request, 'mysite/add_success.html') class", "new_post.id = id_get new_post.image = image new_post.title = title new_post.text = text new_post.author", "<reponame>bt1401/Django from django import urls from django.db.models.fields import URLField from django.shortcuts import render,", "AdminView(ListView): model = Title template_name = 'mysite/admin_site.html' class TestDetailView(DetailView): model = Title template_name", "link=linkx['href'] imagex = article.find('img', {\"class\":\"lazy\"}) image = imagex['src'] titlex = article.find('a', {\"class\":\"f-18 title\"})", "f.save() else: return HttpResponse('You do not have access!') return render(request, 'mysite/add_success.html') class AdminView(ListView):", "'mysite/admin_detailview.html' class UpdatePost(UpdateView): model = Title template_name = 'mysite/update_post.html' fields = ['title', 'body_text',", "render(request, 'mysite/add_unsuccess.html') if request.user.has_perm('mysite.add_post'): f.save() else: return HttpResponse('You do not have access!') return", "News = soup.find_all('div', {\"class\":\"clearfix item\"}) for article in News: linkx = article.find('a', {\"class\":\"m-t-5", "= {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url = link content = session.get(url).content soup = BeautifulSoup(content,", "{\"class\":\"lead\"}) text = textx.text new_headline = Headline() new_headline.title = title new_headline.image = image", "post(self, request): list_link = Headline.objects.values_list('url', flat=True) link = request.POST.get('link') id_get = request.POST.get('id') if", "URLField from django.shortcuts import render, redirect from django.http import HttpResponse, request from django.views", "views here. class Scrape(View): def get(self, request): #Artical.objects.all().delete() Headline.objects.all().delete() session = requests.Session() session.headers", "titlex.text authorx = article.find('a', {\"class\":\"box-subcate-style4-namecate\"}) author = authorx.text timex = article.find('span', {\"class\":\"time\"}) time", "= PostForm(request.POST) if not f.is_valid(): return render(request, 'mysite/add_unsuccess.html') if request.user.has_perm('mysite.add_post'): f.save() else: return", "django.shortcuts import render, redirect from django.http import HttpResponse, request from django.views import View", "template_name = 'mysite/update_post.html' fields = ['title', 'body_text', 'date'] class DeletePost(DeleteView): model = Title", "AddPost(LoginRequiredMixin,View): login_url='/login/' def get(self, request): f = PostForm() context = {'fm': f} return", "request.POST.get('pass_word') my_user = authenticate(username=username, password=password) if my_user is None: return render(request, 'mysite/login_unsuccess.html') login(request,", "soup = BeautifulSoup(content, \"html.parser\") News = soup.find_all('div', {\"class\":\"ArticleDetail w-660 d-ib\"}) for artical in", "'mysite/home.html', context) class LoginClass(View): def get(self, request): return render(request, 'mysite/login.html') def post(self, request):", ": list_title} return render(request, 'mysite/home.html', context) class LoginClass(View): def get(self, request): return render(request,", "\"ArticleDate\"}).text if not Artical.objects.filter(title=title): new_post = Artical() new_post.id = id_get new_post.image = image", "time = timex.text textx = article.find('div', {\"class\":\"lead\"}) text = textx.text new_headline = Headline()", "import LoginRequiredMixin from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView from django.urls import", "\"https://vietnamnet.vn/\"+link new_headline.save() headlines = Headline.objects.all()[::-1] context = {'object_list': headlines,} return render(request, \"mysite/scrape.html\", context)", "\"\"})['src'] title = artical.find(\"h1\", {\"class\": \"title f-22 c-3e\"}).text text = artical.find(\"div\", {\"class\": \"ArticleContent\"}).text", "left m-r-20\"}) link=linkx['href'] imagex = article.find('img', {\"class\":\"lazy\"}) image = imagex['src'] titlex = article.find('a',", "Artical.objects.filter(title=title): new_post = Artical() new_post.id = id_get new_post.image = image new_post.title = title", "context = {'object_list': headlines,} return render(request, \"mysite/scrape_detail.html\", context) class HomeClass(View): def get(self, request):", "return redirect(f'/post/{id_get}') def show_detail(request, id_get): headlines = Artical.objects.get(id = id_get) context = {'object_list':", "new_post.time = time new_post.save() return redirect(f'/post/{id_get}') else: return redirect(f'/post/{id_get}') def show_detail(request, id_get): headlines", "\"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url = \"https://vietnamnet.vn/vn/thoi-su/\" content = session.get(url).content soup = BeautifulSoup(content, \"html.parser\") News", "{'object_list': headlines,} return render(request, \"mysite/scrape.html\", context) def post(self, request): list_link = Headline.objects.values_list('url', flat=True)", "id_get new_post.image = image new_post.title = title new_post.text = text new_post.author = author", "= {'object_list': headlines,} return render(request, \"mysite/scrape_detail.html\", context) class HomeClass(View): def get(self, request): list_title", "else: return redirect(f'/post/{id_get}') def show_detail(request, id_get): headlines = Artical.objects.get(id = id_get) context =", "password=password) if my_user is None: return render(request, 'mysite/login_unsuccess.html') login(request, my_user) return render(request, 'mysite/login_success.html')", "from.models import Title, Headline, Artical from.forms import PostForm from django.contrib.auth import authenticate, login", "import BeautifulSoup # Create your views here. class Scrape(View): def get(self, request): #Artical.objects.all().delete()", "access!') return render(request, 'mysite/add_success.html') class AdminView(ListView): model = Title template_name = 'mysite/admin_site.html' class", "request): username = request.POST.get('user_name') password = request.POST.get('pass_word') my_user = authenticate(username=username, password=password) if my_user", "= artical.find(\"h1\", {\"class\": \"title f-22 c-3e\"}).text text = artical.find(\"div\", {\"class\": \"ArticleContent\"}).text author =", "soup.find_all('div', {\"class\":\"clearfix item\"}) for article in News: linkx = article.find('a', {\"class\":\"m-t-5 w-240 d-ib", "article.find('div', {\"class\":\"lead\"}) text = textx.text new_headline = Headline() new_headline.title = title new_headline.image =", "imagex['src'] titlex = article.find('a', {\"class\":\"f-18 title\"}) title = titlex.text authorx = article.find('a', {\"class\":\"box-subcate-style4-namecate\"})", "Title, Headline, Artical from.forms import PostForm from django.contrib.auth import authenticate, login from django.contrib.auth.mixins", "author new_headline.time = time new_headline.text = text new_headline.url = \"https://vietnamnet.vn/\"+link new_headline.save() headlines =", "def post(self, request): list_link = Headline.objects.values_list('url', flat=True) link = request.POST.get('link') id_get = request.POST.get('id')", "['title', 'body_text', 'date'] class DeletePost(DeleteView): model = Title template_name = 'mysite/delete_post.html' success_url =", "= BeautifulSoup(content, \"html.parser\") News = soup.find_all('div', {\"class\":\"ArticleDetail w-660 d-ib\"}) for artical in News:", "= id_get) context = {'object_list': headlines,} return render(request, \"mysite/scrape_detail.html\", context) class HomeClass(View): def", "class Scrape(View): def get(self, request): #Artical.objects.all().delete() Headline.objects.all().delete() session = requests.Session() session.headers = {\"User-Agent\":", "render(request, 'mysite/login_unsuccess.html') login(request, my_user) return render(request, 'mysite/login_success.html') class AddPost(LoginRequiredMixin,View): login_url='/login/' def get(self, request):", "in list_link): session = requests.Session() session.headers = {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url = link", "HttpResponse, request from django.views import View from.models import Title, Headline, Artical from.forms import", "import requests from bs4 import BeautifulSoup # Create your views here. class Scrape(View):", "= Title template_name = 'mysite/update_post.html' fields = ['title', 'body_text', 'date'] class DeletePost(DeleteView): model", "= authenticate(username=username, password=password) if my_user is None: return render(request, 'mysite/login_unsuccess.html') login(request, my_user) return", "new_headline.save() headlines = Headline.objects.all()[::-1] context = {'object_list': headlines,} return render(request, \"mysite/scrape.html\", context) def", "{\"baiviet\" : list_title} return render(request, 'mysite/home.html', context) class LoginClass(View): def get(self, request): return", "new_headline.title = title new_headline.image = image new_headline.author = author new_headline.time = time new_headline.text", "urls from django.db.models.fields import URLField from django.shortcuts import render, redirect from django.http import", "login from django.contrib.auth.mixins import LoginRequiredMixin from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView", "login_url='/login/' def get(self, request): f = PostForm() context = {'fm': f} return render(request,'mysite/add_post.html',context", "return HttpResponse('You do not have access!') return render(request, 'mysite/add_success.html') class AdminView(ListView): model =", "authorx.text timex = article.find('span', {\"class\":\"time\"}) time = timex.text textx = article.find('div', {\"class\":\"lead\"}) text", "show_detail(request, id_get): headlines = Artical.objects.get(id = id_get) context = {'object_list': headlines,} return render(request,", "= request.POST.get('link') id_get = request.POST.get('id') if (link in list_link): session = requests.Session() session.headers", "new_headline.author = author new_headline.time = time new_headline.text = text new_headline.url = \"https://vietnamnet.vn/\"+link new_headline.save()", "post(self, request): f = PostForm(request.POST) if not f.is_valid(): return render(request, 'mysite/add_unsuccess.html') if request.user.has_perm('mysite.add_post'):", "= article.find('img', {\"class\":\"lazy\"}) image = imagex['src'] titlex = article.find('a', {\"class\":\"f-18 title\"}) title =", "{\"class\": \"ArticleContent\"}).text author = artical.find(\"strong\", {\"class\": \"\"}).text time = artical.find(\"span\", {\"class\": \"ArticleDate\"}).text if", "= session.get(url).content soup = BeautifulSoup(content, \"html.parser\") News = soup.find_all('div', {\"class\":\"ArticleDetail w-660 d-ib\"}) for", "if my_user is None: return render(request, 'mysite/login_unsuccess.html') login(request, my_user) return render(request, 'mysite/login_success.html') class", "thumb left m-r-20\"}) link=linkx['href'] imagex = article.find('img', {\"class\":\"lazy\"}) image = imagex['src'] titlex =", "request): #Artical.objects.all().delete() Headline.objects.all().delete() session = requests.Session() session.headers = {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url =", "= {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url = \"https://vietnamnet.vn/vn/thoi-su/\" content = session.get(url).content soup = BeautifulSoup(content,", "time = artical.find(\"span\", {\"class\": \"ArticleDate\"}).text if not Artical.objects.filter(title=title): new_post = Artical() new_post.id =", "'mysite/admin_site.html' class TestDetailView(DetailView): model = Title template_name = 'mysite/admin_detailview.html' class UpdatePost(UpdateView): model =", "= text new_headline.url = \"https://vietnamnet.vn/\"+link new_headline.save() headlines = Headline.objects.all()[::-1] context = {'object_list': headlines,}", "= Title template_name = 'mysite/admin_detailview.html' class UpdatePost(UpdateView): model = Title template_name = 'mysite/update_post.html'", "authenticate(username=username, password=password) if my_user is None: return render(request, 'mysite/login_unsuccess.html') login(request, my_user) return render(request,", "session = requests.Session() session.headers = {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url = link content =", "not f.is_valid(): return render(request, 'mysite/add_unsuccess.html') if request.user.has_perm('mysite.add_post'): f.save() else: return HttpResponse('You do not", "title = artical.find(\"h1\", {\"class\": \"title f-22 c-3e\"}).text text = artical.find(\"div\", {\"class\": \"ArticleContent\"}).text author", "= soup.find_all('div', {\"class\":\"clearfix item\"}) for article in News: linkx = article.find('a', {\"class\":\"m-t-5 w-240", "= textx.text new_headline = Headline() new_headline.title = title new_headline.image = image new_headline.author =", "from django.contrib.auth.mixins import LoginRequiredMixin from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView from", "in News: image = artical.find(\"img\", {\"class\": \"\"})['src'] title = artical.find(\"h1\", {\"class\": \"title f-22", "m-r-20\"}) link=linkx['href'] imagex = article.find('img', {\"class\":\"lazy\"}) image = imagex['src'] titlex = article.find('a', {\"class\":\"f-18", "return render(request,'mysite/add_post.html',context ) def post(self, request): f = PostForm(request.POST) if not f.is_valid(): return", "list_link = Headline.objects.values_list('url', flat=True) link = request.POST.get('link') id_get = request.POST.get('id') if (link in", "authorx = article.find('a', {\"class\":\"box-subcate-style4-namecate\"}) author = authorx.text timex = article.find('span', {\"class\":\"time\"}) time =", "django.db.models.fields import URLField from django.shortcuts import render, redirect from django.http import HttpResponse, request", "url = \"https://vietnamnet.vn/vn/thoi-su/\" content = session.get(url).content soup = BeautifulSoup(content, \"html.parser\") News = soup.find_all('div',", "text new_post.author = author new_post.time = time new_post.save() return redirect(f'/post/{id_get}') else: return redirect(f'/post/{id_get}')", "{\"class\":\"lazy\"}) image = imagex['src'] titlex = article.find('a', {\"class\":\"f-18 title\"}) title = titlex.text authorx", "= 'mysite/admin_detailview.html' class UpdatePost(UpdateView): model = Title template_name = 'mysite/update_post.html' fields = ['title',", "textx.text new_headline = Headline() new_headline.title = title new_headline.image = image new_headline.author = author", "= timex.text textx = article.find('div', {\"class\":\"lead\"}) text = textx.text new_headline = Headline() new_headline.title", "title new_post.text = text new_post.author = author new_post.time = time new_post.save() return redirect(f'/post/{id_get}')", "= session.get(url).content soup = BeautifulSoup(content, \"html.parser\") News = soup.find_all('div', {\"class\":\"clearfix item\"}) for article", "textx = article.find('div', {\"class\":\"lead\"}) text = textx.text new_headline = Headline() new_headline.title = title", "request.POST.get('link') id_get = request.POST.get('id') if (link in list_link): session = requests.Session() session.headers =", "list_title = Title.objects.all() context = {\"baiviet\" : list_title} return render(request, 'mysite/home.html', context) class", "f = PostForm() context = {'fm': f} return render(request,'mysite/add_post.html',context ) def post(self, request):", "{\"class\":\"f-18 title\"}) title = titlex.text authorx = article.find('a', {\"class\":\"box-subcate-style4-namecate\"}) author = authorx.text timex", "from django.views.generic import ListView, DetailView, CreateView, UpdateView, DeleteView from django.urls import reverse_lazy import", "in News: linkx = article.find('a', {\"class\":\"m-t-5 w-240 d-ib thumb left m-r-20\"}) link=linkx['href'] imagex", "= request.POST.get('user_name') password = request.POST.get('pass_word') my_user = authenticate(username=username, password=password) if my_user is None:", "= id_get new_post.image = image new_post.title = title new_post.text = text new_post.author =", "= artical.find(\"strong\", {\"class\": \"\"}).text time = artical.find(\"span\", {\"class\": \"ArticleDate\"}).text if not Artical.objects.filter(title=title): new_post", "BeautifulSoup(content, \"html.parser\") News = soup.find_all('div', {\"class\":\"clearfix item\"}) for article in News: linkx =", "django.contrib.auth import authenticate, login from django.contrib.auth.mixins import LoginRequiredMixin from django.views.generic import ListView, DetailView,", "= requests.Session() session.headers = {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url = \"https://vietnamnet.vn/vn/thoi-su/\" content = session.get(url).content", "{\"class\":\"m-t-5 w-240 d-ib thumb left m-r-20\"}) link=linkx['href'] imagex = article.find('img', {\"class\":\"lazy\"}) image =", "not Artical.objects.filter(title=title): new_post = Artical() new_post.id = id_get new_post.image = image new_post.title =", "{\"class\": \"\"}).text time = artical.find(\"span\", {\"class\": \"ArticleDate\"}).text if not Artical.objects.filter(title=title): new_post = Artical()", "from bs4 import BeautifulSoup # Create your views here. class Scrape(View): def get(self,", "class HomeClass(View): def get(self, request): list_title = Title.objects.all() context = {\"baiviet\" : list_title}", "id_get): headlines = Artical.objects.get(id = id_get) context = {'object_list': headlines,} return render(request, \"mysite/scrape_detail.html\",", "render(request, \"mysite/scrape.html\", context) def post(self, request): list_link = Headline.objects.values_list('url', flat=True) link = request.POST.get('link')", "image new_post.title = title new_post.text = text new_post.author = author new_post.time = time", "from django.db.models.fields import URLField from django.shortcuts import render, redirect from django.http import HttpResponse,", "render(request, 'mysite/home.html', context) class LoginClass(View): def get(self, request): return render(request, 'mysite/login.html') def post(self,", "= image new_headline.author = author new_headline.time = time new_headline.text = text new_headline.url =", "context = {'object_list': headlines,} return render(request, \"mysite/scrape.html\", context) def post(self, request): list_link =", "= artical.find(\"img\", {\"class\": \"\"})['src'] title = artical.find(\"h1\", {\"class\": \"title f-22 c-3e\"}).text text =", "= Headline() new_headline.title = title new_headline.image = image new_headline.author = author new_headline.time =", "artical in News: image = artical.find(\"img\", {\"class\": \"\"})['src'] title = artical.find(\"h1\", {\"class\": \"title", "session.headers = {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url = link content = session.get(url).content soup =", "BeautifulSoup(content, \"html.parser\") News = soup.find_all('div', {\"class\":\"ArticleDetail w-660 d-ib\"}) for artical in News: image", "new_post.text = text new_post.author = author new_post.time = time new_post.save() return redirect(f'/post/{id_get}') else:", "template_name = 'mysite/admin_site.html' class TestDetailView(DetailView): model = Title template_name = 'mysite/admin_detailview.html' class UpdatePost(UpdateView):", "render, redirect from django.http import HttpResponse, request from django.views import View from.models import", "content = session.get(url).content soup = BeautifulSoup(content, \"html.parser\") News = soup.find_all('div', {\"class\":\"ArticleDetail w-660 d-ib\"})", "PostForm() context = {'fm': f} return render(request,'mysite/add_post.html',context ) def post(self, request): f =", "\"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url = link content = session.get(url).content soup = BeautifulSoup(content, \"html.parser\") News", "post(self, request): username = request.POST.get('user_name') password = request.POST.get('pass_word') my_user = authenticate(username=username, password=password) if", "{\"class\":\"time\"}) time = timex.text textx = article.find('div', {\"class\":\"lead\"}) text = textx.text new_headline =", "import ListView, DetailView, CreateView, UpdateView, DeleteView from django.urls import reverse_lazy import requests from", "\"ArticleContent\"}).text author = artical.find(\"strong\", {\"class\": \"\"}).text time = artical.find(\"span\", {\"class\": \"ArticleDate\"}).text if not", "bs4 import BeautifulSoup # Create your views here. class Scrape(View): def get(self, request):", "= ['title', 'body_text', 'date'] class DeletePost(DeleteView): model = Title template_name = 'mysite/delete_post.html' success_url", "if (link in list_link): session = requests.Session() session.headers = {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url", "get(self, request): list_title = Title.objects.all() context = {\"baiviet\" : list_title} return render(request, 'mysite/home.html',", "return render(request, 'mysite/login_success.html') class AddPost(LoginRequiredMixin,View): login_url='/login/' def get(self, request): f = PostForm() context", "HttpResponse('You do not have access!') return render(request, 'mysite/add_success.html') class AdminView(ListView): model = Title", "new_post = Artical() new_post.id = id_get new_post.image = image new_post.title = title new_post.text", "model = Title template_name = 'mysite/update_post.html' fields = ['title', 'body_text', 'date'] class DeletePost(DeleteView):", "import URLField from django.shortcuts import render, redirect from django.http import HttpResponse, request from", "Title template_name = 'mysite/update_post.html' fields = ['title', 'body_text', 'date'] class DeletePost(DeleteView): model =", "django.views import View from.models import Title, Headline, Artical from.forms import PostForm from django.contrib.auth", "new_headline.image = image new_headline.author = author new_headline.time = time new_headline.text = text new_headline.url", "Headline.objects.values_list('url', flat=True) link = request.POST.get('link') id_get = request.POST.get('id') if (link in list_link): session", "request): list_link = Headline.objects.values_list('url', flat=True) link = request.POST.get('link') id_get = request.POST.get('id') if (link", "requests from bs4 import BeautifulSoup # Create your views here. class Scrape(View): def", "#Artical.objects.all().delete() Headline.objects.all().delete() session = requests.Session() session.headers = {\"User-Agent\": \"Googlebot/2.1 (+http://www.google.com/bot.html)\"} url = \"https://vietnamnet.vn/vn/thoi-su/\"", "author = authorx.text timex = article.find('span', {\"class\":\"time\"}) time = timex.text textx = article.find('div',", "import PostForm from django.contrib.auth import authenticate, login from django.contrib.auth.mixins import LoginRequiredMixin from django.views.generic", "image new_headline.author = author new_headline.time = time new_headline.text = text new_headline.url = \"https://vietnamnet.vn/\"+link", "have access!') return render(request, 'mysite/add_success.html') class AdminView(ListView): model = Title template_name = 'mysite/admin_site.html'", "link = request.POST.get('link') id_get = request.POST.get('id') if (link in list_link): session = requests.Session()" ]
[ "'GAS', 'STZ', 'HRB', 'XYL', 'TSN', 'FOSL', 'DO', 'BBY', 'LUK', 'CTAS', 'HAS', 'POM', 'PBCT',", "'TDY', 'TE', 'TECD', 'TECH', 'TEG', 'TEL', 'TER', 'TEX', 'TFX', 'TG', 'TGI', 'TGT', 'THC',", "'DHI', 'ARG', 'LEN', 'QEP', 'EFX', 'CVH', 'CLF', 'CBG', 'CINF', 'NWL', 'HSP', 'EXPE', 'XRAY',", "'COHU', 'COL', 'COLB', 'COO', 'COP', 'COST', 'COV', 'CPB', 'CPLA', 'CPRT', 'CPSI', 'CPT', 'CPWR',", "'MMC', 'FE', 'VTR', 'SYY', 'PCG', 'HNZ', 'ADM', 'BRCM', 'ED', 'PPG', 'CME', 'LYB', 'APD',", "'AMD', 'AME', 'AMED', 'AMG', 'AMGN', 'AMP', 'AMSF', 'AMSG', 'AMT', 'AMZN', 'AN', 'ANDE', 'ANF',", "'XL', 'XLNX', 'XLS', 'XOM', 'XOXO', 'XRAY', 'XRX', 'XYL', 'Y', 'YHOO', 'YUM', 'ZBRA', 'ZEP',", "'UTHR', 'UTI', 'UTIW', 'UTX', 'UVV', 'V', 'VAL', 'VAR', 'VCI', 'VCLK', 'VDSI', 'VECO', 'VFC',", "'NUE', 'NUVA', 'NVDA', 'NVE', 'NVR', 'NVTL', 'NWBI', 'NWE', 'NWL', 'NWN', 'NWSA', 'NX', 'NYB',", "'NEM', 'BK', 'TRV', 'TYC', 'GIS', 'ITW', 'ACE', 'PRU', 'VIAB', 'CTL', 'LMT', 'FDX', 'PCP',", "'AMZN', 'AN', 'ANDE', 'ANF', 'ANN', 'ANR', 'ANSS', 'AOL', 'AON', 'AOS', 'APA', 'APC', 'APD',", "'WM', 'CF', 'AZO', 'AMAT', 'CAM', 'VNO', 'OMC', 'CI', 'XEL', 'A', 'CAH', 'AET', 'STJ',", "'HSY', 'HTLD', 'HTSI', 'HUBG', 'HUM', 'HVT', 'HW', 'HWAY', 'HWKN', 'HZO', 'IART', 'IBKR', 'IBM',", "'CBE', 'NU', 'AMP', 'NTAP', 'ZMH', 'LTD', 'ADI', 'PGR', 'HST', 'FAST', 'MTB', 'HOT', 'RRC',", "'AMAT', 'CAM', 'VNO', 'OMC', 'CI', 'XEL', 'A', 'CAH', 'AET', 'STJ', 'AVB', 'L', 'IR',", "'SF', 'SFD', 'SFG', 'SFNC', 'SFY', 'SGMS', 'SGY', 'SHAW', 'SHFL', 'SHLM', 'SHOO', 'SHW', 'SIAL',", "'BKH', 'BKI', 'BKMU', 'BKS', 'BLK', 'BLKB', 'BLL', 'BMC', 'BMI', 'BMR', 'BMS', 'BMY', 'BOBE',", "'NDAQ', 'NDSN', 'NE', 'NEE', 'NEM', 'NEOG', 'NEU', 'NEWP', 'NFG', 'NFLX', 'NFP', 'NFX', 'NI',", "'R', 'RAH', 'RAI', 'RAX', 'RBC', 'RBCN', 'RBN', 'RCII', 'RDC', 'RE', 'RECN', 'REG', 'REGN',", "'NOC', 'CMI', 'CCL', 'PEG', 'INTU', 'PLD', 'SYK', 'TROW', 'COH', 'ADBE', 'HES', 'ETN', 'MOS',", "'PLL', 'TIF', 'TXT', 'XL', 'LLTC', 'WAT', 'NI', 'DRI', 'PCL', 'TAP', 'LLL', 'AVP', 'CNX',", "'HSP', 'EXPE', 'XRAY', 'UNM', 'MAS', 'MWV', 'SNI', 'PWR', 'JEC', 'PHM', 'IRM', 'HP', 'CSC',", "'HON', 'HOS', 'HOT', 'HOTT', 'HP', 'HPQ', 'HPT', 'HPY', 'HR', 'HRB', 'HRC', 'HRL', 'HRS',", "'PPS', 'PQ', 'PRA', 'PRAA', 'PRFT', 'PRGO', 'PRGS', 'PRU', 'PRX', 'PRXL', 'PSA', 'PSB', 'PSEC',", "'QCOM', 'CSCO', 'SLB', 'C', 'CMCSA', 'BAC', 'DIS', 'MCD', 'AMZN', 'HD', 'KFT', 'V', 'OXY',", "'ROK', 'ROL', 'ROP', 'ROSE', 'ROST', 'ROVI', 'RPM', 'RRC', 'RRD', 'RRGB', 'RS', 'RSG', 'RSH',", "'HGR', 'HHS', 'HI', 'HIBB', 'HIG', 'HII', 'HITK', 'HITT', 'HIW', 'HLIT', 'HLX', 'HMA', 'HME',", "'JPM', 'PM', 'KO', 'MRK', 'VZ', 'WMT', 'ORCL', 'INTC', 'PEP', 'ABT', 'QCOM', 'CSCO', 'SLB',", "'CBB', 'CBE', 'CBEY', 'CBG', 'CBK', 'CBM', 'CBOE', 'CBR', 'CBRL', 'CBS', 'CBSH', 'CBST', 'CBT',", "'PDCO', 'JDSU', 'ANF', 'PBI', 'NDAQ', 'X', 'SEE', 'TER', 'THC', 'GME', 'GNW', 'FHN', 'ETFC',", "'WIRE', 'WLP', 'WM', 'WMB', 'WMS', 'WMT', 'WOOF', 'WOR', 'WPO', 'WPP', 'WPX', 'WR', 'WRB',", "'MENT', 'MET', 'MFB', 'MGAM', 'MGLN', 'MHK', 'MHO', 'MIG', 'MINI', 'MJN', 'MKC', 'MKSI', 'MLHR',", "'LIFE', 'MCO', 'HIG', 'JWN', 'FRX', 'MNST', 'FFIV', 'NVDA', 'KIM', 'KEY', 'RSG', 'MKC', 'BCR',", "'BMC', 'NYX', 'CMA', 'BTU', 'WIN', 'JOY', 'HBAN', 'TSO', 'HRS', 'LRCX', 'PNW', 'DHI', 'ARG',", "'MMM', 'MMS', 'MMSI', 'MNRO', 'MNST', 'MNTA', 'MO', 'MOH', 'MOLX', 'MON', 'MOS', 'MOV', 'MPC',", "'PES', 'PETM', 'PETS', 'PFE', 'PFG', 'PFS', 'PG', 'PGR', 'PH', 'PHM', 'PII', 'PJC', 'PKE',", "'WHR', 'WIBC', 'WIN', 'WIRE', 'WLP', 'WM', 'WMB', 'WMS', 'WMT', 'WOOF', 'WOR', 'WPO', 'WPP',", "'IVZ', 'DLTR', 'KSS', 'FTI', 'RHT', 'WU', 'STX', 'DOV', 'ALTR', 'WPI', 'HSY', 'ROP', 'PAYX',", "'NVDA', 'KIM', 'KEY', 'RSG', 'MKC', 'BCR', 'BSX', 'KLAC', 'AEE', 'BWA', 'SPLS', 'FIS', 'SRCL',", "'EFX', 'EGL', 'EGN', 'EGP', 'EHTH', 'EIG', 'EIX', 'EL', 'ELY', 'EMC', 'EME', 'EMN', 'EMR',", "'ABM', 'ABT', 'ACAT', 'ACC', 'ACE', 'ACI', 'ACIW', 'ACM', 'ACN', 'ACO', 'ACXM', 'ADBE', 'ADI',", "'ACN', 'ACO', 'ACXM', 'ADBE', 'ADI', 'ADM', 'ADP', 'ADS', 'ADSK', 'ADTN', 'ADVS', 'AEE', 'AEGN',", "'KMPR', 'KMT', 'KMX', 'KND', 'KNX', 'KO', 'KOP', 'KOPN', 'KR', 'KRA', 'KRC', 'KRG', 'KS',", "'POOL', 'POST', 'POWI', 'POWL', 'PPG', 'PPS', 'PQ', 'PRA', 'PRAA', 'PRFT', 'PRGO', 'PRGS', 'PRU',", "'HWAY', 'HWKN', 'HZO', 'IART', 'IBKR', 'IBM', 'IBOC', 'ICE', 'ICON', 'ICUI', 'IDA', 'IDTI', 'IDXX',", "'SKX', 'SKYW', 'SLAB', 'SLB', 'SLG', 'SLGN', 'SLH', 'SLM', 'SLXP', 'SM', 'SMA', 'SMCI', 'SMG',", "'GAS', 'GB', 'GBCI', 'GCI', 'GCO', 'GD', 'GDI', 'GE', 'GEF', 'GEO', 'GES', 'GFF', 'GGG',", "'SNPS', 'SNV', 'SNX', 'SO', 'SON', 'SONC', 'SPAR', 'SPF', 'SPG', 'SPLS', 'SPN', 'SPPI', 'SPTN',", "'TECD', 'TECH', 'TEG', 'TEL', 'TER', 'TEX', 'TFX', 'TG', 'TGI', 'TGT', 'THC', 'THG', 'THO',", "'GEF', 'GEO', 'GES', 'GFF', 'GGG', 'GHL', 'GIFI', 'GILD', 'GIS', 'GLW', 'GMCR', 'GME', 'GMT',", "'MFB', 'MGAM', 'MGLN', 'MHK', 'MHO', 'MIG', 'MINI', 'MJN', 'MKC', 'MKSI', 'MLHR', 'MLI', 'MLM',", "'POWL', 'PPG', 'PPS', 'PQ', 'PRA', 'PRAA', 'PRFT', 'PRGO', 'PRGS', 'PRU', 'PRX', 'PRXL', 'PSA',", "'COST', 'TWX', 'TGT', 'SO', 'SBUX', 'AIG', 'F', 'FCX', 'MET', 'BIIB', 'EMR', 'APC', 'NKE',", "'HCN', 'BBBY', 'TEL', 'SRE', 'MSI', 'ROST', 'DELL', 'CTXS', 'FITB', 'RAI', 'PCAR', 'WY', 'SCHW',", "'ISRG', 'GLW', 'CRM', 'ALL', 'SE', 'HCP', 'RTN', 'WLP', 'CCI', 'JCI', 'MPC', 'MMC', 'FE',", "'ALTR', 'WPI', 'HSY', 'ROP', 'PAYX', 'GPS', 'SNDK', 'DTE', 'PRGO', 'RF', 'NTRS', 'DGX', 'CMG',", "'CAM', 'CAS', 'CASC', 'CASY', 'CAT', 'CATM', 'CATO', 'CATY', 'CB', 'CBB', 'CBE', 'CBEY', 'CBG',", "'MET', 'MFB', 'MGAM', 'MGLN', 'MHK', 'MHO', 'MIG', 'MINI', 'MJN', 'MKC', 'MKSI', 'MLHR', 'MLI',", "'GPS', 'SNDK', 'DTE', 'PRGO', 'RF', 'NTRS', 'DGX', 'CMG', 'FISV', 'ORLY', 'MUR', 'OKE', 'MYL',", "'M', 'MA', 'MAA', 'MAC', 'MAN', 'MANH', 'MANT', 'MAR', 'MAS', 'MASI', 'MAT', 'MATW', 'MATX',", "'TDW', 'TDY', 'TE', 'TECD', 'TECH', 'TEG', 'TEL', 'TER', 'TEX', 'TFX', 'TG', 'TGI', 'TGT',", "'BRS', 'BSX', 'BTH', 'BTU', 'BWA', 'BWLD', 'BWS', 'BXP', 'BXS', 'BYD', 'BYI', 'C', 'CA',", "'XOM', 'GE', 'CVX', 'MSFT', 'IBM', 'T', 'GOOG', 'PG', 'JNJ', 'PFE', 'WFC', 'BRK.B', 'JPM',", "'BEN', 'BFS', 'BGC', 'BGFV', 'BGG', 'BGS', 'BH', 'BHE', 'BHI', 'BID', 'BIG', 'BIIB', 'BJRI',", "'CRK', 'CRL', 'CRM', 'CROX', 'CRR', 'CRS', 'CRUS', 'CRVL', 'CRY', 'CSC', 'CSCO', 'CSGS', 'CSH',", "'JBL', 'JBLU', 'JBT', 'JCI', 'JCOM', 'JCP', 'JDAS', 'JDSU', 'JEC', 'JEF', 'JJSF', 'JKHY', 'JLL',", "'PFS', 'PG', 'PGR', 'PH', 'PHM', 'PII', 'PJC', 'PKE', 'PKG', 'PKI', 'PKY', 'PL', 'PLCE',", "'NEU', 'NEWP', 'NFG', 'NFLX', 'NFP', 'NFX', 'NI', 'NILE', 'NJR', 'NKE', 'NNN', 'NOC', 'NOV',", "'MDU', 'MEAS', 'MED', 'MEI', 'MENT', 'MET', 'MFB', 'MGAM', 'MGLN', 'MHK', 'MHO', 'MIG', 'MINI',", "'LYB', 'APD', 'VLO', 'EQR', 'BEN', 'ECL', 'PPL', 'AON', 'WFM', 'BXP', 'YHOO', 'S', 'NBL',", "'IVAC', 'IVC', 'IVZ', 'JACK', 'JAH', 'JAKK', 'JBHT', 'JBL', 'JBLU', 'JBT', 'JCI', 'JCOM', 'JCP',", "'AMD', 'R', 'FII', 'RRD', 'BIG', 'AN', 'WPO', 'LXK', 'ANR', 'FSLR', 'DV', 'TIE'] ALL_TICKERS", "'OFC', 'OGE', 'OHI', 'OI', 'OII', 'OIS', 'OKE', 'OLN', 'OMC', 'OMCL', 'OMG', 'OMI', 'OMX',", "'YHOO', 'YUM', 'ZBRA', 'ZEP', 'ZEUS', 'ZLC', 'ZMH', 'ZQK', 'ZUMZ'] TICKERS = sorted(list(set(SNP_TICKERS) &", "'KDN', 'KELYA', 'KEX', 'KEY', 'KFY', 'KIM', 'KIRK', 'KLAC', 'KLIC', 'KMB', 'KMI', 'KMPR', 'KMT',", "'OPLK', 'OPNT', 'ORB', 'ORCL', 'ORI', 'ORIT', 'ORLY', 'ORN', 'OSG', 'OSIS', 'OSK', 'OXM', 'OXY',", "'LG', 'LH', 'LHCG', 'LHO', 'LIFE', 'LII', 'LINC', 'LKQ', 'LL', 'LLL', 'LLTC', 'LLY', 'LM',", "'SLB', 'C', 'CMCSA', 'BAC', 'DIS', 'MCD', 'AMZN', 'HD', 'KFT', 'V', 'OXY', 'COP', 'MO',", "'AVAV', 'AVB', 'AVD', 'AVID', 'AVP', 'AVT', 'AVY', 'AWR', 'AXE', 'AXP', 'AYI', 'AZO', 'AZZ',", "'PG', 'PGR', 'PH', 'PHM', 'PII', 'PJC', 'PKE', 'PKG', 'PKI', 'PKY', 'PL', 'PLCE', 'PLCM',", "'DD', 'HON', 'SPG', 'DUK', 'ACN', 'MDT', 'COST', 'TWX', 'TGT', 'SO', 'SBUX', 'AIG', 'F',", "'LTD', 'ADI', 'PGR', 'HST', 'FAST', 'MTB', 'HOT', 'RRC', 'HUM', 'CERN', 'CAG', 'IVZ', 'DLTR',", "'SFNC', 'SFY', 'SGMS', 'SGY', 'SHAW', 'SHFL', 'SHLM', 'SHOO', 'SHW', 'SIAL', 'SIG', 'SIGI', 'SIGM',", "'NPO', 'NRG', 'NSC', 'NSIT', 'NSP', 'NSR', 'NTAP', 'NTCT', 'NTGR', 'NTLS', 'NTRI', 'NTRS', 'NU',", "'DGX', 'CMG', 'FISV', 'ORLY', 'MUR', 'OKE', 'MYL', 'BF.B', 'MAR', 'ROK', 'CHK', 'ABC', 'ICE',", "'CRR', 'CRS', 'CRUS', 'CRVL', 'CRY', 'CSC', 'CSCO', 'CSGS', 'CSH', 'CSL', 'CSTR', 'CSX', 'CTAS',", "'HSP', 'HST', 'HSTM', 'HSY', 'HTLD', 'HTSI', 'HUBG', 'HUM', 'HVT', 'HW', 'HWAY', 'HWKN', 'HZO',", "'ZION', 'HCBK', 'AIV', 'RHI', 'PCS', 'MOLX', 'TE', 'TRIP', 'DNB', 'LEG', 'JBL', 'IGT', 'JCP',", "'PCLN', 'PCP', 'PCTI', 'PDCE', 'PDCO', 'PEET', 'PEG', 'PEI', 'PEP', 'PERY', 'PES', 'PETM', 'PETS',", "'SLG', 'SLGN', 'SLH', 'SLM', 'SLXP', 'SM', 'SMA', 'SMCI', 'SMG', 'SMP', 'SMRT', 'SMTC', 'SNA',", "'AAN', 'AAON', 'AAP', 'AAPL', 'ABAX', 'ABC', 'ABFS', 'ABM', 'ABT', 'ACAT', 'ACC', 'ACE', 'ACI',", "'CTS', 'CTSH', 'CTXS', 'CUB', 'CUZ', 'CVBF', 'CVC', 'CVD', 'CVG', 'CVGW', 'CVH', 'CVLT', 'CVS',", "'AIRM', 'AIT', 'AIV', 'AIZ', 'AJG', 'AKAM', 'AKR', 'AKRX', 'AKS', 'ALB', 'ALE', 'ALEX', 'ALGN',", "'CVC', 'CVD', 'CVG', 'CVGW', 'CVH', 'CVLT', 'CVS', 'CVX', 'CW', 'CWTR', 'CXW', 'CY', 'CYBX',", "'WM', 'WMB', 'WMS', 'WMT', 'WOOF', 'WOR', 'WPO', 'WPP', 'WPX', 'WR', 'WRB', 'WRC', 'WRI',", "'C', 'CA', 'CAB', 'CACI', 'CAG', 'CAH', 'CAKE', 'CALM', 'CAM', 'CAS', 'CASC', 'CASY', 'CAT',", "'ASH', 'ASNA', 'ASTE', 'ATI', 'ATK', 'ATMI', 'ATML', 'ATNI', 'ATO', 'ATR', 'ATU', 'ATW', 'AVA',", "'STE', 'STI', 'STJ', 'STL', 'STLD', 'STMP', 'STR', 'STRA', 'STRI', 'STT', 'STX', 'STZ', 'SUN',", "'COH', 'ADBE', 'HES', 'ETN', 'MOS', 'IP', 'BDX', 'MHP', 'STI', 'LO', 'M', 'MJN', 'EIX',", "'PCLN', 'EXC', 'D', 'EOG', 'YUM', 'NEE', 'TWC', 'PSX', 'COV', 'ADP', 'AMT', 'AGN', 'NEM',", "'CLF', 'CLGX', 'CLH', 'CLI', 'CLMS', 'CLP', 'CLW', 'CLX', 'CMA', 'CMC', 'CMCSA', 'CME', 'CMG',", "'JWN', 'FRX', 'MNST', 'FFIV', 'NVDA', 'KIM', 'KEY', 'RSG', 'MKC', 'BCR', 'BSX', 'KLAC', 'AEE',", "'ETR', 'EV', 'EW', 'EWBC', 'EXAR', 'EXC', 'EXH', 'EXLS', 'EXP', 'EXPD', 'EXPE', 'EXPO', 'EXR',", "'TE', 'TRIP', 'DNB', 'LEG', 'JBL', 'IGT', 'JCP', 'CVC', 'ATI', 'SAI', 'PKI', 'WPX', 'BMS',", "'FDO', 'WHR', 'MCHP', 'SCG', 'DNR', 'CFN', 'CPB', 'CMS', 'VMC', 'MU', 'BMC', 'NYX', 'CMA',", "'HNI', 'HNT', 'HNZ', 'HOG', 'HOLX', 'HOMB', 'HON', 'HOS', 'HOT', 'HOTT', 'HP', 'HPQ', 'HPT',", "'BEAM', 'BEAV', 'BELFB', 'BEN', 'BFS', 'BGC', 'BGFV', 'BGG', 'BGS', 'BH', 'BHE', 'BHI', 'BID',", "'ONB', 'ONE', 'OPEN', 'OPLK', 'OPNT', 'ORB', 'ORCL', 'ORI', 'ORIT', 'ORLY', 'ORN', 'OSG', 'OSIS',", "'CAH', 'AET', 'STJ', 'AVB', 'L', 'IR', 'PXD', 'KR', 'SWK', 'K', 'TDC', 'SHW', 'ESV',", "'CAT', 'EBAY', 'AXP', 'UPS', 'GS', 'ESRX', 'GILD', 'NWSA', 'MON', 'MA', 'LLY', 'CL', 'BA',", "'AA', 'AAN', 'AAON', 'AAP', 'AAPL', 'ABAX', 'ABC', 'ABFS', 'ABM', 'ABT', 'ACAT', 'ACC', 'ACE',", "'BMI', 'BMR', 'BMS', 'BMY', 'BOBE', 'BOH', 'BPFH', 'BR', 'BRC', 'BRCM', 'BRKL', 'BRKS', 'BRLI',", "'IGT', 'IGTE', 'IILG', 'IIVI', 'IM', 'IN', 'INDB', 'INFA', 'INGR', 'ININ', 'INT', 'INTC', 'INTU',", "'HME', 'HMN', 'HMSY', 'HNI', 'HNT', 'HNZ', 'HOG', 'HOLX', 'HOMB', 'HON', 'HOS', 'HOT', 'HOTT',", "'BYI', 'C', 'CA', 'CAB', 'CACI', 'CAG', 'CAH', 'CAKE', 'CALM', 'CAM', 'CAS', 'CASC', 'CASY',", "'URI', 'URS', 'USB', 'USMO', 'USTR', 'UTEK', 'UTHR', 'UTI', 'UTIW', 'UTX', 'UVV', 'V', 'VAL',", "'LMOS', 'LMT', 'LNC', 'LNCE', 'LNN', 'LNT', 'LO', 'LOGM', 'LOW', 'LPNT', 'LPS', 'LPSN', 'LPX',", "'SCG', 'DNR', 'CFN', 'CPB', 'CMS', 'VMC', 'MU', 'BMC', 'NYX', 'CMA', 'BTU', 'WIN', 'JOY',", "'PXD', 'KR', 'SWK', 'K', 'TDC', 'SHW', 'ESV', 'SYMC', 'PH', 'GWW', 'EW', 'ETR', 'NUE',", "'EXLS', 'EXP', 'EXPD', 'EXPE', 'EXPO', 'EXR', 'EZPW', 'F', 'FAF', 'FARO', 'FAST', 'FBHS', 'FBP',", "'FSLR', 'FSP', 'FST', 'FTI', 'FTR', 'FUL', 'FULT', 'FWRD', 'GAS', 'GB', 'GBCI', 'GCI', 'GCO',", "'JPM', 'JWN', 'K', 'KALU', 'KAMN', 'KBH', 'KBR', 'KDN', 'KELYA', 'KEX', 'KEY', 'KFY', 'KIM',", "'AVD', 'AVID', 'AVP', 'AVT', 'AVY', 'AWR', 'AXE', 'AXP', 'AYI', 'AZO', 'AZZ', 'B', 'BA',", "'PVTB', 'PWR', 'PX', 'PXD', 'PXP', 'PZZA', 'QCOM', 'QCOR', 'QEP', 'QLGC', 'QNST', 'QSFT', 'QSII',", "'LANC', 'LAWS', 'LDL', 'LDR', 'LECO', 'LEG', 'LEN', 'LFUS', 'LG', 'LH', 'LHCG', 'LHO', 'LIFE',", "'RGS', 'RHI', 'RHT', 'RJF', 'RKT', 'RL', 'RLI', 'RMD', 'ROCK', 'ROG', 'ROK', 'ROL', 'ROP',", "'PX', 'PCLN', 'EXC', 'D', 'EOG', 'YUM', 'NEE', 'TWC', 'PSX', 'COV', 'ADP', 'AMT', 'AGN',", "'D', 'DAKT', 'DAR', 'DBD', 'DCI', 'DCOM', 'DD', 'DDD', 'DE', 'DECK', 'DEL', 'DELL', 'DF',", "'HCN', 'HCP', 'HCSG', 'HD', 'HE', 'HES', 'HF', 'HFC', 'HGR', 'HHS', 'HI', 'HIBB', 'HIG',", "'PACW', 'PAY', 'PAYX', 'PB', 'PBCT', 'PBH', 'PBI', 'PBY', 'PCAR', 'PCG', 'PCH', 'PCL', 'PCLN',", "'TEX', 'TFX', 'TG', 'TGI', 'TGT', 'THC', 'THG', 'THO', 'THOR', 'THS', 'TIBX', 'TIE', 'TIF',", "'IGT', 'JCP', 'CVC', 'ATI', 'SAI', 'PKI', 'WPX', 'BMS', 'AVY', 'HAR', 'OI', 'AIZ', 'NFLX',", "'CLMS', 'CLP', 'CLW', 'CLX', 'CMA', 'CMC', 'CMCSA', 'CME', 'CMG', 'CMI', 'CMN', 'CMP', 'CMS',", "'WYNN', 'BEAM', 'CNP', 'NE', 'JNPR', 'LH', 'EQT', 'CA', 'DVA', 'XLNX', 'EMN', 'SIAL', 'WEC',", "'CMS', 'CMTL', 'CNC', 'CNK', 'CNL', 'CNMD', 'CNP', 'CNQR', 'CNW', 'CNX', 'COCO', 'COF', 'COG',", "'MEAS', 'MED', 'MEI', 'MENT', 'MET', 'MFB', 'MGAM', 'MGLN', 'MHK', 'MHO', 'MIG', 'MINI', 'MJN',", "'WY', 'SCHW', 'VFC', 'WM', 'CF', 'AZO', 'AMAT', 'CAM', 'VNO', 'OMC', 'CI', 'XEL', 'A',", "'CROX', 'CRR', 'CRS', 'CRUS', 'CRVL', 'CRY', 'CSC', 'CSCO', 'CSGS', 'CSH', 'CSL', 'CSTR', 'CSX',", "'CSC', 'CSCO', 'CSGS', 'CSH', 'CSL', 'CSTR', 'CSX', 'CTAS', 'CTL', 'CTS', 'CTSH', 'CTXS', 'CUB',", "'EPIQ', 'EPR', 'EQIX', 'EQR', 'EQT', 'EQY', 'ESE', 'ESI', 'ESIO', 'ESL', 'ESRX', 'ESS', 'ESV',", "'TOL', 'TPX', 'TQNT', 'TR', 'TRAK', 'TRIP', 'TRLG', 'TRMB', 'TRMK', 'TRN', 'TROW', 'TRST', 'TRV',", "'HHS', 'HI', 'HIBB', 'HIG', 'HII', 'HITK', 'HITT', 'HIW', 'HLIT', 'HLX', 'HMA', 'HME', 'HMN',", "'WPI', 'HSY', 'ROP', 'PAYX', 'GPS', 'SNDK', 'DTE', 'PRGO', 'RF', 'NTRS', 'DGX', 'CMG', 'FISV',", "'EE', 'EFX', 'EGL', 'EGN', 'EGP', 'EHTH', 'EIG', 'EIX', 'EL', 'ELY', 'EMC', 'EME', 'EMN',", "'TFX', 'TG', 'TGI', 'TGT', 'THC', 'THG', 'THO', 'THOR', 'THS', 'TIBX', 'TIE', 'TIF', 'TJX',", "'CLF', 'CBG', 'CINF', 'NWL', 'HSP', 'EXPE', 'XRAY', 'UNM', 'MAS', 'MWV', 'SNI', 'PWR', 'JEC',", "'MPC', 'MPW', 'MPWR', 'MRCY', 'MRK', 'MRO', 'MRX', 'MS', 'MSA', 'MSCC', 'MSCI', 'MSFT', 'MSI',", "'ADI', 'PGR', 'HST', 'FAST', 'MTB', 'HOT', 'RRC', 'HUM', 'CERN', 'CAG', 'IVZ', 'DLTR', 'KSS',", "'TTEK', 'TTI', 'TTMI', 'TTWO', 'TUES', 'TUP', 'TW', 'TWC', 'TWGP', 'TWTC', 'TWX', 'TXI', 'TXN',", "'DF', 'FLIR', 'GT', 'LM', 'APOL', 'PDCO', 'JDSU', 'ANF', 'PBI', 'NDAQ', 'X', 'SEE', 'TER',", "'DD', 'DDD', 'DE', 'DECK', 'DEL', 'DELL', 'DF', 'DFS', 'DGII', 'DGIT', 'DGX', 'DHI', 'DHR',", "'HPQ', 'PNC', 'COF', 'BAX', 'TJX', 'CELG', 'DTV', 'DE', 'DHR', 'TXN', 'HAL', 'WAG', 'PX',", "'BIIB', 'EMR', 'APC', 'NKE', 'DOW', 'LOW', 'NOV', 'KMB', 'APA', 'HPQ', 'PNC', 'COF', 'BAX',", "'AIZ', 'AJG', 'AKAM', 'AKR', 'AKRX', 'AKS', 'ALB', 'ALE', 'ALEX', 'ALGN', 'ALGT', 'ALK', 'ALL',", "'CBM', 'CBOE', 'CBR', 'CBRL', 'CBS', 'CBSH', 'CBST', 'CBT', 'CBU', 'CCC', 'CCE', 'CCI', 'CCL',", "'EIX', 'EL', 'ELY', 'EMC', 'EME', 'EMN', 'EMR', 'ENDP', 'ENR', 'ENS', 'ENSG', 'ENTR', 'ENZ',", "'IILG', 'IIVI', 'IM', 'IN', 'INDB', 'INFA', 'INGR', 'ININ', 'INT', 'INTC', 'INTU', 'IO', 'IP',", "'TDC', 'TDS', 'TDW', 'TDY', 'TE', 'TECD', 'TECH', 'TEG', 'TEL', 'TER', 'TEX', 'TFX', 'TG',", "'TE', 'TECD', 'TECH', 'TEG', 'TEL', 'TER', 'TEX', 'TFX', 'TG', 'TGI', 'TGT', 'THC', 'THG',", "'DRH', 'DRI', 'DRIV', 'DRQ', 'DSPG', 'DTE', 'DTSI', 'DTV', 'DUK', 'DV', 'DVA', 'DVN', 'DW',", "'LLL', 'LLTC', 'LLY', 'LM', 'LMNX', 'LMOS', 'LMT', 'LNC', 'LNCE', 'LNN', 'LNT', 'LO', 'LOGM',", "'SWY', 'LSI', 'TSS', 'ZION', 'HCBK', 'AIV', 'RHI', 'PCS', 'MOLX', 'TE', 'TRIP', 'DNB', 'LEG',", "'MLM', 'MMC', 'MMM', 'MMS', 'MMSI', 'MNRO', 'MNST', 'MNTA', 'MO', 'MOH', 'MOLX', 'MON', 'MOS',", "'PGR', 'HST', 'FAST', 'MTB', 'HOT', 'RRC', 'HUM', 'CERN', 'CAG', 'IVZ', 'DLTR', 'KSS', 'FTI',", "'CINF', 'NWL', 'HSP', 'EXPE', 'XRAY', 'UNM', 'MAS', 'MWV', 'SNI', 'PWR', 'JEC', 'PHM', 'IRM',", "'CBEY', 'CBG', 'CBK', 'CBM', 'CBOE', 'CBR', 'CBRL', 'CBS', 'CBSH', 'CBST', 'CBT', 'CBU', 'CCC',", "'GPI', 'GPN', 'GPOR', 'GPS', 'GS', 'GSM', 'GT', 'GTAT', 'GTIV', 'GTY', 'GVA', 'GWW', 'GXP',", "'CHCO', 'CHD', 'CHE', 'CHG', 'CHK', 'CHRW', 'CHS', 'CI', 'CIEN', 'CINF', 'CIR', 'CKH', 'CKP',", "'COG', 'FLR', 'DPS', 'CLX', 'RL', 'WYNN', 'BEAM', 'CNP', 'NE', 'JNPR', 'LH', 'EQT', 'CA',", "'FE', 'FEIC', 'FELE', 'FFBC', 'FFIN', 'FFIV', 'FHN', 'FICO', 'FII', 'FINL', 'FIRE', 'FIS', 'FISV',", "'CUZ', 'CVBF', 'CVC', 'CVD', 'CVG', 'CVGW', 'CVH', 'CVLT', 'CVS', 'CVX', 'CW', 'CWTR', 'CXW',", "'UA', 'UBA', 'UBSI', 'UCBI', 'UDR', 'UEIC', 'UFCS', 'UFPI', 'UFS', 'UGI', 'UHS', 'UHT', 'UIL',", "'HOLX', 'HOMB', 'HON', 'HOS', 'HOT', 'HOTT', 'HP', 'HPQ', 'HPT', 'HPY', 'HR', 'HRB', 'HRC',", "'SNI', 'SNPS', 'SNV', 'SNX', 'SO', 'SON', 'SONC', 'SPAR', 'SPF', 'SPG', 'SPLS', 'SPN', 'SPPI',", "'MS', 'MSA', 'MSCC', 'MSCI', 'MSFT', 'MSI', 'MSM', 'MSTR', 'MTB', 'MTD', 'MTH', 'MTRN', 'MTRX',", "'SPLS', 'FIS', 'SRCL', 'EXPD', 'COL', 'VRSN', 'FMC', 'ADSK', 'PFG', 'WYN', 'SLM', 'PLL', 'TIF',", "'CHG', 'CHK', 'CHRW', 'CHS', 'CI', 'CIEN', 'CINF', 'CIR', 'CKH', 'CKP', 'CL', 'CLC', 'CLD',", "'AKAM', 'AKR', 'AKRX', 'AKS', 'ALB', 'ALE', 'ALEX', 'ALGN', 'ALGT', 'ALK', 'ALL', 'ALOG', 'ALTR',", "'ACI', 'ACIW', 'ACM', 'ACN', 'ACO', 'ACXM', 'ADBE', 'ADI', 'ADM', 'ADP', 'ADS', 'ADSK', 'ADTN',", "'NBR', 'TEG', 'EA', 'HRL', 'SWY', 'LSI', 'TSS', 'ZION', 'HCBK', 'AIV', 'RHI', 'PCS', 'MOLX',", "'HCBK', 'HCC', 'HCN', 'HCP', 'HCSG', 'HD', 'HE', 'HES', 'HF', 'HFC', 'HGR', 'HHS', 'HI',", "'OMC', 'CI', 'XEL', 'A', 'CAH', 'AET', 'STJ', 'AVB', 'L', 'IR', 'PXD', 'KR', 'SWK',", "'CRVL', 'CRY', 'CSC', 'CSCO', 'CSGS', 'CSH', 'CSL', 'CSTR', 'CSX', 'CTAS', 'CTL', 'CTS', 'CTSH',", "'IBKR', 'IBM', 'IBOC', 'ICE', 'ICON', 'ICUI', 'IDA', 'IDTI', 'IDXX', 'IEX', 'IFF', 'IFSIA', 'IGT',", "'IGTE', 'IILG', 'IIVI', 'IM', 'IN', 'INDB', 'INFA', 'INGR', 'ININ', 'INT', 'INTC', 'INTU', 'IO',", "'MCRL', 'MCRS', 'MCS', 'MCY', 'MD', 'MDC', 'MDCO', 'MDP', 'MDRX', 'MDSO', 'MDT', 'MDU', 'MEAS',", "'BLKB', 'BLL', 'BMC', 'BMI', 'BMR', 'BMS', 'BMY', 'BOBE', 'BOH', 'BPFH', 'BR', 'BRC', 'BRCM',", "'IFSIA', 'IGT', 'IGTE', 'IILG', 'IIVI', 'IM', 'IN', 'INDB', 'INFA', 'INGR', 'ININ', 'INT', 'INTC',", "'KALU', 'KAMN', 'KBH', 'KBR', 'KDN', 'KELYA', 'KEX', 'KEY', 'KFY', 'KIM', 'KIRK', 'KLAC', 'KLIC',", "'EPAY', 'EPIQ', 'EPR', 'EQIX', 'EQR', 'EQT', 'EQY', 'ESE', 'ESI', 'ESIO', 'ESL', 'ESRX', 'ESS',", "'SNA', 'GCI', 'URBN', 'NBR', 'TEG', 'EA', 'HRL', 'SWY', 'LSI', 'TSS', 'ZION', 'HCBK', 'AIV',", "'XRAY', 'UNM', 'MAS', 'MWV', 'SNI', 'PWR', 'JEC', 'PHM', 'IRM', 'HP', 'CSC', 'SUN', 'TMK',", "'STJ', 'STL', 'STLD', 'STMP', 'STR', 'STRA', 'STRI', 'STT', 'STX', 'STZ', 'SUN', 'SUP', 'SUPX',", "'HAYN', 'HBAN', 'HBHC', 'HBI', 'HCBK', 'HCC', 'HCN', 'HCP', 'HCSG', 'HD', 'HE', 'HES', 'HF',", "'EIX', 'EL', 'DISCA', 'HCN', 'BBBY', 'TEL', 'SRE', 'MSI', 'ROST', 'DELL', 'CTXS', 'FITB', 'RAI',", "'HNZ', 'ADM', 'BRCM', 'ED', 'PPG', 'CME', 'LYB', 'APD', 'VLO', 'EQR', 'BEN', 'ECL', 'PPL',", "'SO', 'SON', 'SONC', 'SPAR', 'SPF', 'SPG', 'SPLS', 'SPN', 'SPPI', 'SPTN', 'SPW', 'SRCL', 'SRDX',", "'MW', 'MWIV', 'MWV', 'MWW', 'MYE', 'MYL', 'NAFC', 'NANO', 'NATI', 'NAVG', 'NBL', 'NBR', 'NBTB',", "'VLTR', 'VLY', 'VMC', 'VMI', 'VNO', 'VOXX', 'VPFG', 'VPHM', 'VRSN', 'VRTS', 'VRTU', 'VRTX', 'VSAT',", "'MTB', 'HOT', 'RRC', 'HUM', 'CERN', 'CAG', 'IVZ', 'DLTR', 'KSS', 'FTI', 'RHT', 'WU', 'STX',", "'TYC', 'GIS', 'ITW', 'ACE', 'PRU', 'VIAB', 'CTL', 'LMT', 'FDX', 'PCP', 'BBT', 'MS', 'BLK',", "'MANH', 'MANT', 'MAR', 'MAS', 'MASI', 'MAT', 'MATW', 'MATX', 'MCD', 'MCF', 'MCHP', 'MCK', 'MCO',", "'ABAX', 'ABC', 'ABFS', 'ABM', 'ABT', 'ACAT', 'ACC', 'ACE', 'ACI', 'ACIW', 'ACM', 'ACN', 'ACO',", "'ADP', 'AMT', 'AGN', 'NEM', 'BK', 'TRV', 'TYC', 'GIS', 'ITW', 'ACE', 'PRU', 'VIAB', 'CTL',", "'SIAL', 'WEC', 'CCE', 'WDC', 'LIFE', 'MCO', 'HIG', 'JWN', 'FRX', 'MNST', 'FFIV', 'NVDA', 'KIM',", "'GXP', 'GY', 'HAE', 'HAFC', 'HAIN', 'HAL', 'HAR', 'HAS', 'HAYN', 'HBAN', 'HBHC', 'HBI', 'HCBK',", "'UHT', 'UIL', 'UMBF', 'UMPQ', 'UNF', 'UNFI', 'UNH', 'UNM', 'UNP', 'UNS', 'UNT', 'UNTD', 'UPS',", "SNP_TICKERS = ['AAPL', 'XOM', 'GE', 'CVX', 'MSFT', 'IBM', 'T', 'GOOG', 'PG', 'JNJ', 'PFE',", "'ICON', 'ICUI', 'IDA', 'IDTI', 'IDXX', 'IEX', 'IFF', 'IFSIA', 'IGT', 'IGTE', 'IILG', 'IIVI', 'IM',", "'BRKS', 'BRLI', 'BRO', 'BRS', 'BSX', 'BTH', 'BTU', 'BWA', 'BWLD', 'BWS', 'BXP', 'BXS', 'BYD',", "'A', 'CAH', 'AET', 'STJ', 'AVB', 'L', 'IR', 'PXD', 'KR', 'SWK', 'K', 'TDC', 'SHW',", "'WAT', 'NI', 'DRI', 'PCL', 'TAP', 'LLL', 'AVP', 'CNX', 'AES', 'AKAM', 'LNC', 'VAR', 'BLL',", "'IPG', 'IPHS', 'IPI', 'IR', 'IRBT', 'IRC', 'IRF', 'IRM', 'ISCA', 'ISIL', 'ISRG', 'IT', 'ITG',", "'YUM', 'NEE', 'TWC', 'PSX', 'COV', 'ADP', 'AMT', 'AGN', 'NEM', 'BK', 'TRV', 'TYC', 'GIS',", "'MS', 'BLK', 'DVN', 'AFL', 'ALXN', 'GD', 'WMB', 'CBS', 'CSX', 'TMO', 'AEP', 'CTSH', 'MRO',", "'GT', 'GTAT', 'GTIV', 'GTY', 'GVA', 'GWW', 'GXP', 'GY', 'HAE', 'HAFC', 'HAIN', 'HAL', 'HAR',", "'SHLM', 'SHOO', 'SHW', 'SIAL', 'SIG', 'SIGI', 'SIGM', 'SIVB', 'SJI', 'SJM', 'SKS', 'SKT', 'SKX',", "'TRLG', 'TRMB', 'TRMK', 'TRN', 'TROW', 'TRST', 'TRV', 'TSCO', 'TSN', 'TSO', 'TSRA', 'TSS', 'TTC',", "'ORCL', 'INTC', 'PEP', 'ABT', 'QCOM', 'CSCO', 'SLB', 'C', 'CMCSA', 'BAC', 'DIS', 'MCD', 'AMZN',", "'CRM', 'ALL', 'SE', 'HCP', 'RTN', 'WLP', 'CCI', 'JCI', 'MPC', 'MMC', 'FE', 'VTR', 'SYY',", "'ICUI', 'IDA', 'IDTI', 'IDXX', 'IEX', 'IFF', 'IFSIA', 'IGT', 'IGTE', 'IILG', 'IIVI', 'IM', 'IN',", "'AET', 'AF', 'AFAM', 'AFFX', 'AFG', 'AFL', 'AGCO', 'AGN', 'AGP', 'AGYS', 'AHL', 'AHS', 'AIG',", "'KRG', 'KS', 'KSS', 'KSU', 'KSWS', 'KWK', 'KWR', 'L', 'LAD', 'LAMR', 'LANC', 'LAWS', 'LDL',", "'OSIS', 'OSK', 'OXM', 'OXY', 'OZRK', 'PACW', 'PAY', 'PAYX', 'PB', 'PBCT', 'PBH', 'PBI', 'PBY',", "'CMI', 'CCL', 'PEG', 'INTU', 'PLD', 'SYK', 'TROW', 'COH', 'ADBE', 'HES', 'ETN', 'MOS', 'IP',", "'FLS', 'LUV', 'KMX', 'FDO', 'WHR', 'MCHP', 'SCG', 'DNR', 'CFN', 'CPB', 'CMS', 'VMC', 'MU',", "'BMS', 'BMY', 'BOBE', 'BOH', 'BPFH', 'BR', 'BRC', 'BRCM', 'BRKL', 'BRKS', 'BRLI', 'BRO', 'BRS',", "'AET', 'STJ', 'AVB', 'L', 'IR', 'PXD', 'KR', 'SWK', 'K', 'TDC', 'SHW', 'ESV', 'SYMC',", "'PBI', 'NDAQ', 'X', 'SEE', 'TER', 'THC', 'GME', 'GNW', 'FHN', 'ETFC', 'AMD', 'R', 'FII',", "'DLX', 'DM', 'DMND', 'DNB', 'DNR', 'DO', 'DOV', 'DOW', 'DPS', 'DRC', 'DRE', 'DRH', 'DRI',", "'BLL', 'BMC', 'BMI', 'BMR', 'BMS', 'BMY', 'BOBE', 'BOH', 'BPFH', 'BR', 'BRC', 'BRCM', 'BRKL',", "'JNPR', 'JNS', 'JOSB', 'JOY', 'JPM', 'JWN', 'K', 'KALU', 'KAMN', 'KBH', 'KBR', 'KDN', 'KELYA',", "'GGG', 'GHL', 'GIFI', 'GILD', 'GIS', 'GLW', 'GMCR', 'GME', 'GMT', 'GNCMA', 'GNTX', 'GNW', 'GOOG',", "'PKY', 'PL', 'PLCE', 'PLCM', 'PLD', 'PLFE', 'PLL', 'PLT', 'PLXS', 'PM', 'PMC', 'PMTC', 'PMTI',", "'TPX', 'TQNT', 'TR', 'TRAK', 'TRIP', 'TRLG', 'TRMB', 'TRMK', 'TRN', 'TROW', 'TRST', 'TRV', 'TSCO',", "'FIRE', 'FIS', 'FISV', 'FITB', 'FIX', 'FL', 'FLIR', 'FLO', 'FLR', 'FLS', 'FMBI', 'FMC', 'FMER',", "'SWS', 'SWX', 'SWY', 'SXC', 'SXI', 'SXT', 'SYK', 'SYKE', 'SYMC', 'SYMM', 'SYNA', 'SYY', 'T',", "'CME', 'LYB', 'APD', 'VLO', 'EQR', 'BEN', 'ECL', 'PPL', 'AON', 'WFM', 'BXP', 'YHOO', 'S',", "'AAPL', 'ABAX', 'ABC', 'ABFS', 'ABM', 'ABT', 'ACAT', 'ACC', 'ACE', 'ACI', 'ACIW', 'ACM', 'ACN',", "'JBT', 'JCI', 'JCOM', 'JCP', 'JDAS', 'JDSU', 'JEC', 'JEF', 'JJSF', 'JKHY', 'JLL', 'JNJ', 'JNPR',", "'JDSU', 'JEC', 'JEF', 'JJSF', 'JKHY', 'JLL', 'JNJ', 'JNPR', 'JNS', 'JOSB', 'JOY', 'JPM', 'JWN',", "'AEE', 'AEGN', 'AEIS', 'AEO', 'AEP', 'AES', 'AET', 'AF', 'AFAM', 'AFFX', 'AFG', 'AFL', 'AGCO',", "'NE', 'JNPR', 'LH', 'EQT', 'CA', 'DVA', 'XLNX', 'EMN', 'SIAL', 'WEC', 'CCE', 'WDC', 'LIFE',", "'CERN', 'CAG', 'IVZ', 'DLTR', 'KSS', 'FTI', 'RHT', 'WU', 'STX', 'DOV', 'ALTR', 'WPI', 'HSY',", "'BLL', 'FLS', 'LUV', 'KMX', 'FDO', 'WHR', 'MCHP', 'SCG', 'DNR', 'CFN', 'CPB', 'CMS', 'VMC',", "'WFC', 'BRK.B', 'JPM', 'PM', 'KO', 'MRK', 'VZ', 'WMT', 'ORCL', 'INTC', 'PEP', 'ABT', 'QCOM',", "'GILD', 'NWSA', 'MON', 'MA', 'LLY', 'CL', 'BA', 'DD', 'HON', 'SPG', 'DUK', 'ACN', 'MDT',", "'PRFT', 'PRGO', 'PRGS', 'PRU', 'PRX', 'PRXL', 'PSA', 'PSB', 'PSEC', 'PSEM', 'PSS', 'PSSI', 'PSX',", "'CTL', 'LMT', 'FDX', 'PCP', 'BBT', 'MS', 'BLK', 'DVN', 'AFL', 'ALXN', 'GD', 'WMB', 'CBS',", "'RRC', 'RRD', 'RRGB', 'RS', 'RSG', 'RSH', 'RSTI', 'RSYS', 'RT', 'RTEC', 'RTI', 'RTN', 'RUE',", "'PM', 'KO', 'MRK', 'VZ', 'WMT', 'ORCL', 'INTC', 'PEP', 'ABT', 'QCOM', 'CSCO', 'SLB', 'C',", "'AGCO', 'AGN', 'AGP', 'AGYS', 'AHL', 'AHS', 'AIG', 'AINV', 'AIR', 'AIRM', 'AIT', 'AIV', 'AIZ',", "'TRMK', 'TRN', 'TROW', 'TRST', 'TRV', 'TSCO', 'TSN', 'TSO', 'TSRA', 'TSS', 'TTC', 'TTEC', 'TTEK',", "'EMN', 'EMR', 'ENDP', 'ENR', 'ENS', 'ENSG', 'ENTR', 'ENZ', 'EOG', 'EPAY', 'EPIQ', 'EPR', 'EQIX',", "'HWKN', 'HZO', 'IART', 'IBKR', 'IBM', 'IBOC', 'ICE', 'ICON', 'ICUI', 'IDA', 'IDTI', 'IDXX', 'IEX',", "'POWI', 'POWL', 'PPG', 'PPS', 'PQ', 'PRA', 'PRAA', 'PRFT', 'PRGO', 'PRGS', 'PRU', 'PRX', 'PRXL',", "<reponame>rotomer/nlp-project SNP_TICKERS = ['AAPL', 'XOM', 'GE', 'CVX', 'MSFT', 'IBM', 'T', 'GOOG', 'PG', 'JNJ',", "'JNJ', 'PFE', 'WFC', 'BRK.B', 'JPM', 'PM', 'KO', 'MRK', 'VZ', 'WMT', 'ORCL', 'INTC', 'PEP',", "'FAST', 'FBHS', 'FBP', 'FCF', 'FCFS', 'FCN', 'FCS', 'FCX', 'FDO', 'FDS', 'FDX', 'FE', 'FEIC',", "'PRXL', 'PSA', 'PSB', 'PSEC', 'PSEM', 'PSS', 'PSSI', 'PSX', 'PTEN', 'PULS', 'PVA', 'PVH', 'PVTB',", "'RSYS', 'RT', 'RTEC', 'RTI', 'RTN', 'RUE', 'RUTH', 'RVBD', 'RYL', 'RYN', 'S', 'SAFM', 'SAFT',", "'AEP', 'CTSH', 'MRO', 'DFS', 'NSC', 'MCK', 'CB', 'KMI', 'STT', 'PSA', 'BHI', 'ISRG', 'GLW',", "'MED', 'MEI', 'MENT', 'MET', 'MFB', 'MGAM', 'MGLN', 'MHK', 'MHO', 'MIG', 'MINI', 'MJN', 'MKC',", "'TW', 'TWC', 'TWGP', 'TWTC', 'TWX', 'TXI', 'TXN', 'TXRH', 'TXT', 'TYC', 'TYL', 'TYPE', 'UA',", "'SIGI', 'SIGM', 'SIVB', 'SJI', 'SJM', 'SKS', 'SKT', 'SKX', 'SKYW', 'SLAB', 'SLB', 'SLG', 'SLGN',", "'KMI', 'KMPR', 'KMT', 'KMX', 'KND', 'KNX', 'KO', 'KOP', 'KOPN', 'KR', 'KRA', 'KRC', 'KRG',", "'MATX', 'MCD', 'MCF', 'MCHP', 'MCK', 'MCO', 'MCRI', 'MCRL', 'MCRS', 'MCS', 'MCY', 'MD', 'MDC',", "'CASC', 'CASY', 'CAT', 'CATM', 'CATO', 'CATY', 'CB', 'CBB', 'CBE', 'CBEY', 'CBG', 'CBK', 'CBM',", "'X', 'XEC', 'XEL', 'XL', 'XLNX', 'XLS', 'XOM', 'XOXO', 'XRAY', 'XRX', 'XYL', 'Y', 'YHOO',", "'CPRT', 'CPSI', 'CPT', 'CPWR', 'CR', 'CRDN', 'CREE', 'CRI', 'CRK', 'CRL', 'CRM', 'CROX', 'CRR',", "'BA', 'DD', 'HON', 'SPG', 'DUK', 'ACN', 'MDT', 'COST', 'TWX', 'TGT', 'SO', 'SBUX', 'AIG',", "'WRC', 'WRI', 'WRLD', 'WSM', 'WSO', 'WST', 'WTFC', 'WTR', 'WTS', 'WU', 'WWD', 'WWW', 'WY',", "'LAD', 'LAMR', 'LANC', 'LAWS', 'LDL', 'LDR', 'LECO', 'LEG', 'LEN', 'LFUS', 'LG', 'LH', 'LHCG',", "'PNR', 'PNRA', 'PNW', 'PNY', 'POL', 'POM', 'POOL', 'POST', 'POWI', 'POWL', 'PPG', 'PPS', 'PQ',", "'BFS', 'BGC', 'BGFV', 'BGG', 'BGS', 'BH', 'BHE', 'BHI', 'BID', 'BIG', 'BIIB', 'BJRI', 'BK',", "'CDI', 'CDNS', 'CDR', 'CEB', 'CEC', 'CECO', 'CELG', 'CELL', 'CENX', 'CERN', 'CEVA', 'CF', 'CFN',", "'ROST', 'DELL', 'CTXS', 'FITB', 'RAI', 'PCAR', 'WY', 'SCHW', 'VFC', 'WM', 'CF', 'AZO', 'AMAT',", "'HRL', 'HRS', 'HSC', 'HSH', 'HSIC', 'HSII', 'HSNI', 'HSP', 'HST', 'HSTM', 'HSY', 'HTLD', 'HTSI',", "'JAKK', 'JBHT', 'JBL', 'JBLU', 'JBT', 'JCI', 'JCOM', 'JCP', 'JDAS', 'JDSU', 'JEC', 'JEF', 'JJSF',", "'STZ', 'SUN', 'SUP', 'SUPX', 'SUSQ', 'SVU', 'SWI', 'SWK', 'SWKS', 'SWM', 'SWN', 'SWS', 'SWX',", "'CSX', 'CTAS', 'CTL', 'CTS', 'CTSH', 'CTXS', 'CUB', 'CUZ', 'CVBF', 'CVC', 'CVD', 'CVG', 'CVGW',", "'PFE', 'PFG', 'PFS', 'PG', 'PGR', 'PH', 'PHM', 'PII', 'PJC', 'PKE', 'PKG', 'PKI', 'PKY',", "'STT', 'STX', 'STZ', 'SUN', 'SUP', 'SUPX', 'SUSQ', 'SVU', 'SWI', 'SWK', 'SWKS', 'SWM', 'SWN',", "'NKE', 'DOW', 'LOW', 'NOV', 'KMB', 'APA', 'HPQ', 'PNC', 'COF', 'BAX', 'TJX', 'CELG', 'DTV',", "'STRA', 'STRI', 'STT', 'STX', 'STZ', 'SUN', 'SUP', 'SUPX', 'SUSQ', 'SVU', 'SWI', 'SWK', 'SWKS',", "'WEN', 'WERN', 'WFC', 'WFM', 'WGL', 'WGO', 'WHR', 'WIBC', 'WIN', 'WIRE', 'WLP', 'WM', 'WMB',", "'S', 'SAFM', 'SAFT', 'SAH', 'SAI', 'SAM', 'SBRA', 'SBUX', 'SCG', 'SCHL', 'SCHW', 'SCI', 'SCL',", "'PCAR', 'PCG', 'PCH', 'PCL', 'PCLN', 'PCP', 'PCTI', 'PDCE', 'PDCO', 'PEET', 'PEG', 'PEI', 'PEP',", "'STMP', 'STR', 'STRA', 'STRI', 'STT', 'STX', 'STZ', 'SUN', 'SUP', 'SUPX', 'SUSQ', 'SVU', 'SWI',", "'BMC', 'BMI', 'BMR', 'BMS', 'BMY', 'BOBE', 'BOH', 'BPFH', 'BR', 'BRC', 'BRCM', 'BRKL', 'BRKS',", "'HITT', 'HIW', 'HLIT', 'HLX', 'HMA', 'HME', 'HMN', 'HMSY', 'HNI', 'HNT', 'HNZ', 'HOG', 'HOLX',", "'NTRS', 'DGX', 'CMG', 'FISV', 'ORLY', 'MUR', 'OKE', 'MYL', 'BF.B', 'MAR', 'ROK', 'CHK', 'ABC',", "'XL', 'LLTC', 'WAT', 'NI', 'DRI', 'PCL', 'TAP', 'LLL', 'AVP', 'CNX', 'AES', 'AKAM', 'LNC',", "'AEP', 'AES', 'AET', 'AF', 'AFAM', 'AFFX', 'AFG', 'AFL', 'AGCO', 'AGN', 'AGP', 'AGYS', 'AHL',", "'CEC', 'CECO', 'CELG', 'CELL', 'CENX', 'CERN', 'CEVA', 'CF', 'CFN', 'CFR', 'CGNX', 'CGX', 'CHCO',", "'UTI', 'UTIW', 'UTX', 'UVV', 'V', 'VAL', 'VAR', 'VCI', 'VCLK', 'VDSI', 'VECO', 'VFC', 'VIAB',", "'MJN', 'MKC', 'MKSI', 'MLHR', 'MLI', 'MLM', 'MMC', 'MMM', 'MMS', 'MMSI', 'MNRO', 'MNST', 'MNTA',", "'NP', 'NPBC', 'NPK', 'NPO', 'NRG', 'NSC', 'NSIT', 'NSP', 'NSR', 'NTAP', 'NTCT', 'NTGR', 'NTLS',", "'BGFV', 'BGG', 'BGS', 'BH', 'BHE', 'BHI', 'BID', 'BIG', 'BIIB', 'BJRI', 'BK', 'BKE', 'BKH',", "'SUPX', 'SUSQ', 'SVU', 'SWI', 'SWK', 'SWKS', 'SWM', 'SWN', 'SWS', 'SWX', 'SWY', 'SXC', 'SXI',", "'GNW', 'GOOG', 'GPC', 'GPI', 'GPN', 'GPOR', 'GPS', 'GS', 'GSM', 'GT', 'GTAT', 'GTIV', 'GTY',", "'BRLI', 'BRO', 'BRS', 'BSX', 'BTH', 'BTU', 'BWA', 'BWLD', 'BWS', 'BXP', 'BXS', 'BYD', 'BYI',", "'ROSE', 'ROST', 'ROVI', 'RPM', 'RRC', 'RRD', 'RRGB', 'RS', 'RSG', 'RSH', 'RSTI', 'RSYS', 'RT',", "'BSX', 'BTH', 'BTU', 'BWA', 'BWLD', 'BWS', 'BXP', 'BXS', 'BYD', 'BYI', 'C', 'CA', 'CAB',", "'IPAR', 'IPCC', 'IPCM', 'IPG', 'IPHS', 'IPI', 'IR', 'IRBT', 'IRC', 'IRF', 'IRM', 'ISCA', 'ISIL',", "'JCI', 'MPC', 'MMC', 'FE', 'VTR', 'SYY', 'PCG', 'HNZ', 'ADM', 'BRCM', 'ED', 'PPG', 'CME',", "'LTM', 'LUFK', 'LUK', 'LUV', 'LXK', 'LXP', 'LXU', 'LYV', 'LZB', 'M', 'MA', 'MAA', 'MAC',", "'HRL', 'SWY', 'LSI', 'TSS', 'ZION', 'HCBK', 'AIV', 'RHI', 'PCS', 'MOLX', 'TE', 'TRIP', 'DNB',", "'MSA', 'MSCC', 'MSCI', 'MSFT', 'MSI', 'MSM', 'MSTR', 'MTB', 'MTD', 'MTH', 'MTRN', 'MTRX', 'MTSC',", "'RJF', 'RKT', 'RL', 'RLI', 'RMD', 'ROCK', 'ROG', 'ROK', 'ROL', 'ROP', 'ROSE', 'ROST', 'ROVI',", "'SEE', 'TER', 'THC', 'GME', 'GNW', 'FHN', 'ETFC', 'AMD', 'R', 'FII', 'RRD', 'BIG', 'AN',", "'CFN', 'CFR', 'CGNX', 'CGX', 'CHCO', 'CHD', 'CHE', 'CHG', 'CHK', 'CHRW', 'CHS', 'CI', 'CIEN',", "'VMC', 'VMI', 'VNO', 'VOXX', 'VPFG', 'VPHM', 'VRSN', 'VRTS', 'VRTU', 'VRTX', 'VSAT', 'VSH', 'VSI',", "'POM', 'POOL', 'POST', 'POWI', 'POWL', 'PPG', 'PPS', 'PQ', 'PRA', 'PRAA', 'PRFT', 'PRGO', 'PRGS',", "'SNV', 'SNX', 'SO', 'SON', 'SONC', 'SPAR', 'SPF', 'SPG', 'SPLS', 'SPN', 'SPPI', 'SPTN', 'SPW',", "'ARG', 'ARO', 'ARQL', 'ARRS', 'ARW', 'ASBC', 'ASEI', 'ASGN', 'ASH', 'ASNA', 'ASTE', 'ATI', 'ATK',", "'ESRX', 'ESS', 'ESV', 'ETFC', 'ETH', 'ETN', 'ETR', 'EV', 'EW', 'EWBC', 'EXAR', 'EXC', 'EXH',", "'BBBY', 'BBG', 'BBOX', 'BBT', 'BBY', 'BC', 'BCO', 'BCOR', 'BCPC', 'BCR', 'BDC', 'BDX', 'BEAM',", "'V', 'VAL', 'VAR', 'VCI', 'VCLK', 'VDSI', 'VECO', 'VFC', 'VIAB', 'VICR', 'VIVO', 'VLO', 'VLTR',", "'VRTX', 'VSAT', 'VSH', 'VSI', 'VTR', 'VVC', 'VZ', 'WAB', 'WABC', 'WAFD', 'WAG', 'WAT', 'WBS',", "'TJX', 'TKR', 'TLAB', 'TMK', 'TMO', 'TMP', 'TNC', 'TOL', 'TPX', 'TQNT', 'TR', 'TRAK', 'TRIP',", "'CALM', 'CAM', 'CAS', 'CASC', 'CASY', 'CAT', 'CATM', 'CATO', 'CATY', 'CB', 'CBB', 'CBE', 'CBEY',", "'CHRW', 'SJM', 'AA', 'COG', 'FLR', 'DPS', 'CLX', 'RL', 'WYNN', 'BEAM', 'CNP', 'NE', 'JNPR',", "'PCH', 'PCL', 'PCLN', 'PCP', 'PCTI', 'PDCE', 'PDCO', 'PEET', 'PEG', 'PEI', 'PEP', 'PERY', 'PES',", "'DNB', 'LEG', 'JBL', 'IGT', 'JCP', 'CVC', 'ATI', 'SAI', 'PKI', 'WPX', 'BMS', 'AVY', 'HAR',", "'ANSS', 'AOL', 'AON', 'AOS', 'APA', 'APC', 'APD', 'APEI', 'APH', 'APOG', 'APOL', 'ARB', 'ARE',", "'INTC', 'PEP', 'ABT', 'QCOM', 'CSCO', 'SLB', 'C', 'CMCSA', 'BAC', 'DIS', 'MCD', 'AMZN', 'HD',", "'JDSU', 'ANF', 'PBI', 'NDAQ', 'X', 'SEE', 'TER', 'THC', 'GME', 'GNW', 'FHN', 'ETFC', 'AMD',", "'EXPO', 'EXR', 'EZPW', 'F', 'FAF', 'FARO', 'FAST', 'FBHS', 'FBP', 'FCF', 'FCFS', 'FCN', 'FCS',", "'QCOR', 'QEP', 'QLGC', 'QNST', 'QSFT', 'QSII', 'R', 'RAH', 'RAI', 'RAX', 'RBC', 'RBCN', 'RBN',", "'ADI', 'ADM', 'ADP', 'ADS', 'ADSK', 'ADTN', 'ADVS', 'AEE', 'AEGN', 'AEIS', 'AEO', 'AEP', 'AES',", "'EXPE', 'XRAY', 'UNM', 'MAS', 'MWV', 'SNI', 'PWR', 'JEC', 'PHM', 'IRM', 'HP', 'CSC', 'SUN',", "'LPS', 'LPSN', 'LPX', 'LQDT', 'LRCX', 'LRY', 'LSI', 'LSTR', 'LTC', 'LTD', 'LTM', 'LUFK', 'LUK',", "'RUTH', 'RVBD', 'RYL', 'RYN', 'S', 'SAFM', 'SAFT', 'SAH', 'SAI', 'SAM', 'SBRA', 'SBUX', 'SCG',", "'WIBC', 'WIN', 'WIRE', 'WLP', 'WM', 'WMB', 'WMS', 'WMT', 'WOOF', 'WOR', 'WPO', 'WPP', 'WPX',", "'MSFT', 'MSI', 'MSM', 'MSTR', 'MTB', 'MTD', 'MTH', 'MTRN', 'MTRX', 'MTSC', 'MTX', 'MU', 'MUR',", "'SCI', 'SCL', 'SCOR', 'SCSC', 'SCSS', 'SE', 'SEE', 'SEIC', 'SENEA', 'SF', 'SFD', 'SFG', 'SFNC',", "'HOS', 'HOT', 'HOTT', 'HP', 'HPQ', 'HPT', 'HPY', 'HR', 'HRB', 'HRC', 'HRL', 'HRS', 'HSC',", "'LECO', 'LEG', 'LEN', 'LFUS', 'LG', 'LH', 'LHCG', 'LHO', 'LIFE', 'LII', 'LINC', 'LKQ', 'LL',", "'KSS', 'KSU', 'KSWS', 'KWK', 'KWR', 'L', 'LAD', 'LAMR', 'LANC', 'LAWS', 'LDL', 'LDR', 'LECO',", "'WPX', 'WR', 'WRB', 'WRC', 'WRI', 'WRLD', 'WSM', 'WSO', 'WST', 'WTFC', 'WTR', 'WTS', 'WU',", "'PRAA', 'PRFT', 'PRGO', 'PRGS', 'PRU', 'PRX', 'PRXL', 'PSA', 'PSB', 'PSEC', 'PSEM', 'PSS', 'PSSI',", "'CRDN', 'CREE', 'CRI', 'CRK', 'CRL', 'CRM', 'CROX', 'CRR', 'CRS', 'CRUS', 'CRVL', 'CRY', 'CSC',", "'LHO', 'LIFE', 'LII', 'LINC', 'LKQ', 'LL', 'LLL', 'LLTC', 'LLY', 'LM', 'LMNX', 'LMOS', 'LMT',", "'FL', 'FLIR', 'FLO', 'FLR', 'FLS', 'FMBI', 'FMC', 'FMER', 'FNB', 'FNF', 'FNFG', 'FNGN', 'FNP',", "'LNT', 'LO', 'LOGM', 'LOW', 'LPNT', 'LPS', 'LPSN', 'LPX', 'LQDT', 'LRCX', 'LRY', 'LSI', 'LSTR',", "'CTL', 'CTS', 'CTSH', 'CTXS', 'CUB', 'CUZ', 'CVBF', 'CVC', 'CVD', 'CVG', 'CVGW', 'CVH', 'CVLT',", "'DUK', 'ACN', 'MDT', 'COST', 'TWX', 'TGT', 'SO', 'SBUX', 'AIG', 'F', 'FCX', 'MET', 'BIIB',", "'DE', 'DECK', 'DEL', 'DELL', 'DF', 'DFS', 'DGII', 'DGIT', 'DGX', 'DHI', 'DHR', 'DHX', 'DIN',", "'DPS', 'DRC', 'DRE', 'DRH', 'DRI', 'DRIV', 'DRQ', 'DSPG', 'DTE', 'DTSI', 'DTV', 'DUK', 'DV',", "'ITW', 'IVAC', 'IVC', 'IVZ', 'JACK', 'JAH', 'JAKK', 'JBHT', 'JBL', 'JBLU', 'JBT', 'JCI', 'JCOM',", "'EOG', 'EPAY', 'EPIQ', 'EPR', 'EQIX', 'EQR', 'EQT', 'EQY', 'ESE', 'ESI', 'ESIO', 'ESL', 'ESRX',", "'ARO', 'ARQL', 'ARRS', 'ARW', 'ASBC', 'ASEI', 'ASGN', 'ASH', 'ASNA', 'ASTE', 'ATI', 'ATK', 'ATMI',", "'SENEA', 'SF', 'SFD', 'SFG', 'SFNC', 'SFY', 'SGMS', 'SGY', 'SHAW', 'SHFL', 'SHLM', 'SHOO', 'SHW',", "'FFIV', 'FHN', 'FICO', 'FII', 'FINL', 'FIRE', 'FIS', 'FISV', 'FITB', 'FIX', 'FL', 'FLIR', 'FLO',", "'FRX', 'MNST', 'FFIV', 'NVDA', 'KIM', 'KEY', 'RSG', 'MKC', 'BCR', 'BSX', 'KLAC', 'AEE', 'BWA',", "'CYBX', 'CYH', 'CYMI', 'CYN', 'CYT', 'D', 'DAKT', 'DAR', 'DBD', 'DCI', 'DCOM', 'DD', 'DDD',", "'SNI', 'PWR', 'JEC', 'PHM', 'IRM', 'HP', 'CSC', 'SUN', 'TMK', 'FTR', 'NRG', 'IPG', 'IFF',", "'MCS', 'MCY', 'MD', 'MDC', 'MDCO', 'MDP', 'MDRX', 'MDSO', 'MDT', 'MDU', 'MEAS', 'MED', 'MEI',", "'BEAM', 'CNP', 'NE', 'JNPR', 'LH', 'EQT', 'CA', 'DVA', 'XLNX', 'EMN', 'SIAL', 'WEC', 'CCE',", "'KR', 'KRA', 'KRC', 'KRG', 'KS', 'KSS', 'KSU', 'KSWS', 'KWK', 'KWR', 'L', 'LAD', 'LAMR',", "'IRM', 'HP', 'CSC', 'SUN', 'TMK', 'FTR', 'NRG', 'IPG', 'IFF', 'GAS', 'STZ', 'HRB', 'XYL',", "'NTRI', 'NTRS', 'NU', 'NUE', 'NUVA', 'NVDA', 'NVE', 'NVR', 'NVTL', 'NWBI', 'NWE', 'NWL', 'NWN',", "'PKI', 'WPX', 'BMS', 'AVY', 'HAR', 'OI', 'AIZ', 'NFLX', 'DF', 'FLIR', 'GT', 'LM', 'APOL',", "'CLX', 'RL', 'WYNN', 'BEAM', 'CNP', 'NE', 'JNPR', 'LH', 'EQT', 'CA', 'DVA', 'XLNX', 'EMN',", "'DV', 'TIE'] ALL_TICKERS = ['A', 'AA', 'AAN', 'AAON', 'AAP', 'AAPL', 'ABAX', 'ABC', 'ABFS',", "'CCL', 'PEG', 'INTU', 'PLD', 'SYK', 'TROW', 'COH', 'ADBE', 'HES', 'ETN', 'MOS', 'IP', 'BDX',", "'FDX', 'FE', 'FEIC', 'FELE', 'FFBC', 'FFIN', 'FFIV', 'FHN', 'FICO', 'FII', 'FINL', 'FIRE', 'FIS',", "'AFAM', 'AFFX', 'AFG', 'AFL', 'AGCO', 'AGN', 'AGP', 'AGYS', 'AHL', 'AHS', 'AIG', 'AINV', 'AIR',", "'SKS', 'SKT', 'SKX', 'SKYW', 'SLAB', 'SLB', 'SLG', 'SLGN', 'SLH', 'SLM', 'SLXP', 'SM', 'SMA',", "'GME', 'GMT', 'GNCMA', 'GNTX', 'GNW', 'GOOG', 'GPC', 'GPI', 'GPN', 'GPOR', 'GPS', 'GS', 'GSM',", "'CACI', 'CAG', 'CAH', 'CAKE', 'CALM', 'CAM', 'CAS', 'CASC', 'CASY', 'CAT', 'CATM', 'CATO', 'CATY',", "'IVC', 'IVZ', 'JACK', 'JAH', 'JAKK', 'JBHT', 'JBL', 'JBLU', 'JBT', 'JCI', 'JCOM', 'JCP', 'JDAS',", "'WIN', 'JOY', 'HBAN', 'TSO', 'HRS', 'LRCX', 'PNW', 'DHI', 'ARG', 'LEN', 'QEP', 'EFX', 'CVH',", "'BSX', 'KLAC', 'AEE', 'BWA', 'SPLS', 'FIS', 'SRCL', 'EXPD', 'COL', 'VRSN', 'FMC', 'ADSK', 'PFG',", "'CSH', 'CSL', 'CSTR', 'CSX', 'CTAS', 'CTL', 'CTS', 'CTSH', 'CTXS', 'CUB', 'CUZ', 'CVBF', 'CVC',", "'MSM', 'MSTR', 'MTB', 'MTD', 'MTH', 'MTRN', 'MTRX', 'MTSC', 'MTX', 'MU', 'MUR', 'MW', 'MWIV',", "'SUN', 'SUP', 'SUPX', 'SUSQ', 'SVU', 'SWI', 'SWK', 'SWKS', 'SWM', 'SWN', 'SWS', 'SWX', 'SWY',", "'ADP', 'ADS', 'ADSK', 'ADTN', 'ADVS', 'AEE', 'AEGN', 'AEIS', 'AEO', 'AEP', 'AES', 'AET', 'AF',", "'JOY', 'JPM', 'JWN', 'K', 'KALU', 'KAMN', 'KBH', 'KBR', 'KDN', 'KELYA', 'KEX', 'KEY', 'KFY',", "'CF', 'AZO', 'AMAT', 'CAM', 'VNO', 'OMC', 'CI', 'XEL', 'A', 'CAH', 'AET', 'STJ', 'AVB',", "'NUE', 'SWN', 'MAT', 'CBE', 'NU', 'AMP', 'NTAP', 'ZMH', 'LTD', 'ADI', 'PGR', 'HST', 'FAST',", "'SMA', 'SMCI', 'SMG', 'SMP', 'SMRT', 'SMTC', 'SNA', 'SNCR', 'SNDK', 'SNH', 'SNI', 'SNPS', 'SNV',", "'JEF', 'JJSF', 'JKHY', 'JLL', 'JNJ', 'JNPR', 'JNS', 'JOSB', 'JOY', 'JPM', 'JWN', 'K', 'KALU',", "'CPWR', 'CR', 'CRDN', 'CREE', 'CRI', 'CRK', 'CRL', 'CRM', 'CROX', 'CRR', 'CRS', 'CRUS', 'CRVL',", "'TXT', 'TYC', 'TYL', 'TYPE', 'UA', 'UBA', 'UBSI', 'UCBI', 'UDR', 'UEIC', 'UFCS', 'UFPI', 'UFS',", "'DHR', 'TXN', 'HAL', 'WAG', 'PX', 'PCLN', 'EXC', 'D', 'EOG', 'YUM', 'NEE', 'TWC', 'PSX',", "'MU', 'MUR', 'MW', 'MWIV', 'MWV', 'MWW', 'MYE', 'MYL', 'NAFC', 'NANO', 'NATI', 'NAVG', 'NBL',", "'TIBX', 'TIE', 'TIF', 'TJX', 'TKR', 'TLAB', 'TMK', 'TMO', 'TMP', 'TNC', 'TOL', 'TPX', 'TQNT',", "'SON', 'SONC', 'SPAR', 'SPF', 'SPG', 'SPLS', 'SPN', 'SPPI', 'SPTN', 'SPW', 'SRCL', 'SRDX', 'SRE',", "'PNW', 'PNY', 'POL', 'POM', 'POOL', 'POST', 'POWI', 'POWL', 'PPG', 'PPS', 'PQ', 'PRA', 'PRAA',", "'BEAV', 'BELFB', 'BEN', 'BFS', 'BGC', 'BGFV', 'BGG', 'BGS', 'BH', 'BHE', 'BHI', 'BID', 'BIG',", "'CVS', 'MMM', 'EMC', 'UNH', 'BMY', 'UNP', 'CAT', 'EBAY', 'AXP', 'UPS', 'GS', 'ESRX', 'GILD',", "'KMT', 'KMX', 'KND', 'KNX', 'KO', 'KOP', 'KOPN', 'KR', 'KRA', 'KRC', 'KRG', 'KS', 'KSS',", "'SSI', 'SSP', 'SSS', 'STBA', 'STC', 'STE', 'STI', 'STJ', 'STL', 'STLD', 'STMP', 'STR', 'STRA',", "'IDTI', 'IDXX', 'IEX', 'IFF', 'IFSIA', 'IGT', 'IGTE', 'IILG', 'IIVI', 'IM', 'IN', 'INDB', 'INFA',", "'ROP', 'PAYX', 'GPS', 'SNDK', 'DTE', 'PRGO', 'RF', 'NTRS', 'DGX', 'CMG', 'FISV', 'ORLY', 'MUR',", "'EA', 'HRL', 'SWY', 'LSI', 'TSS', 'ZION', 'HCBK', 'AIV', 'RHI', 'PCS', 'MOLX', 'TE', 'TRIP',", "'SYK', 'TROW', 'COH', 'ADBE', 'HES', 'ETN', 'MOS', 'IP', 'BDX', 'MHP', 'STI', 'LO', 'M',", "'EBAY', 'AXP', 'UPS', 'GS', 'ESRX', 'GILD', 'NWSA', 'MON', 'MA', 'LLY', 'CL', 'BA', 'DD',", "'LLY', 'LM', 'LMNX', 'LMOS', 'LMT', 'LNC', 'LNCE', 'LNN', 'LNT', 'LO', 'LOGM', 'LOW', 'LPNT',", "'HI', 'HIBB', 'HIG', 'HII', 'HITK', 'HITT', 'HIW', 'HLIT', 'HLX', 'HMA', 'HME', 'HMN', 'HMSY',", "'NSIT', 'NSP', 'NSR', 'NTAP', 'NTCT', 'NTGR', 'NTLS', 'NTRI', 'NTRS', 'NU', 'NUE', 'NUVA', 'NVDA',", "'FBHS', 'FBP', 'FCF', 'FCFS', 'FCN', 'FCS', 'FCX', 'FDO', 'FDS', 'FDX', 'FE', 'FEIC', 'FELE',", "'EXPE', 'EXPO', 'EXR', 'EZPW', 'F', 'FAF', 'FARO', 'FAST', 'FBHS', 'FBP', 'FCF', 'FCFS', 'FCN',", "'DRI', 'DRIV', 'DRQ', 'DSPG', 'DTE', 'DTSI', 'DTV', 'DUK', 'DV', 'DVA', 'DVN', 'DW', 'DWA',", "'HP', 'HPQ', 'HPT', 'HPY', 'HR', 'HRB', 'HRC', 'HRL', 'HRS', 'HSC', 'HSH', 'HSIC', 'HSII',", "'SAI', 'PKI', 'WPX', 'BMS', 'AVY', 'HAR', 'OI', 'AIZ', 'NFLX', 'DF', 'FLIR', 'GT', 'LM',", "'ALL', 'ALOG', 'ALTR', 'ALXN', 'AM', 'AMAT', 'AMCX', 'AMD', 'AME', 'AMED', 'AMG', 'AMGN', 'AMP',", "'PPL', 'AON', 'WFM', 'BXP', 'YHOO', 'S', 'NBL', 'NOC', 'CMI', 'CCL', 'PEG', 'INTU', 'PLD',", "'DFS', 'DGII', 'DGIT', 'DGX', 'DHI', 'DHR', 'DHX', 'DIN', 'DIOD', 'DIS', 'DISCA', 'DKS', 'DLTR',", "'IO', 'IP', 'IPAR', 'IPCC', 'IPCM', 'IPG', 'IPHS', 'IPI', 'IR', 'IRBT', 'IRC', 'IRF', 'IRM',", "'AOS', 'APA', 'APC', 'APD', 'APEI', 'APH', 'APOG', 'APOL', 'ARB', 'ARE', 'AREX', 'ARG', 'ARO',", "'WDFC', 'WDR', 'WEC', 'WEN', 'WERN', 'WFC', 'WFM', 'WGL', 'WGO', 'WHR', 'WIBC', 'WIN', 'WIRE',", "'QSII', 'R', 'RAH', 'RAI', 'RAX', 'RBC', 'RBCN', 'RBN', 'RCII', 'RDC', 'RE', 'RECN', 'REG',", "'COF', 'BAX', 'TJX', 'CELG', 'DTV', 'DE', 'DHR', 'TXN', 'HAL', 'WAG', 'PX', 'PCLN', 'EXC',", "'HII', 'HITK', 'HITT', 'HIW', 'HLIT', 'HLX', 'HMA', 'HME', 'HMN', 'HMSY', 'HNI', 'HNT', 'HNZ',", "'UEIC', 'UFCS', 'UFPI', 'UFS', 'UGI', 'UHS', 'UHT', 'UIL', 'UMBF', 'UMPQ', 'UNF', 'UNFI', 'UNH',", "'BJRI', 'BK', 'BKE', 'BKH', 'BKI', 'BKMU', 'BKS', 'BLK', 'BLKB', 'BLL', 'BMC', 'BMI', 'BMR',", "'TSO', 'HRS', 'LRCX', 'PNW', 'DHI', 'ARG', 'LEN', 'QEP', 'EFX', 'CVH', 'CLF', 'CBG', 'CINF',", "'STZ', 'HRB', 'XYL', 'TSN', 'FOSL', 'DO', 'BBY', 'LUK', 'CTAS', 'HAS', 'POM', 'PBCT', 'NFX',", "'PX', 'PXD', 'PXP', 'PZZA', 'QCOM', 'QCOR', 'QEP', 'QLGC', 'QNST', 'QSFT', 'QSII', 'R', 'RAH',", "'CA', 'CAB', 'CACI', 'CAG', 'CAH', 'CAKE', 'CALM', 'CAM', 'CAS', 'CASC', 'CASY', 'CAT', 'CATM',", "'C', 'CMCSA', 'BAC', 'DIS', 'MCD', 'AMZN', 'HD', 'KFT', 'V', 'OXY', 'COP', 'MO', 'UTX',", "'MMC', 'MMM', 'MMS', 'MMSI', 'MNRO', 'MNST', 'MNTA', 'MO', 'MOH', 'MOLX', 'MON', 'MOS', 'MOV',", "'SMTC', 'SNA', 'SNCR', 'SNDK', 'SNH', 'SNI', 'SNPS', 'SNV', 'SNX', 'SO', 'SON', 'SONC', 'SPAR',", "'SEE', 'SEIC', 'SENEA', 'SF', 'SFD', 'SFG', 'SFNC', 'SFY', 'SGMS', 'SGY', 'SHAW', 'SHFL', 'SHLM',", "'PWR', 'JEC', 'PHM', 'IRM', 'HP', 'CSC', 'SUN', 'TMK', 'FTR', 'NRG', 'IPG', 'IFF', 'GAS',", "'BHI', 'BID', 'BIG', 'BIIB', 'BJRI', 'BK', 'BKE', 'BKH', 'BKI', 'BKMU', 'BKS', 'BLK', 'BLKB',", "'RECN', 'REG', 'REGN', 'RF', 'RFMD', 'RGA', 'RGLD', 'RGR', 'RGS', 'RHI', 'RHT', 'RJF', 'RKT',", "'R', 'FII', 'RRD', 'BIG', 'AN', 'WPO', 'LXK', 'ANR', 'FSLR', 'DV', 'TIE'] ALL_TICKERS =", "'TYPE', 'UA', 'UBA', 'UBSI', 'UCBI', 'UDR', 'UEIC', 'UFCS', 'UFPI', 'UFS', 'UGI', 'UHS', 'UHT',", "'DHX', 'DIN', 'DIOD', 'DIS', 'DISCA', 'DKS', 'DLTR', 'DLX', 'DM', 'DMND', 'DNB', 'DNR', 'DO',", "'OMX', 'ONB', 'ONE', 'OPEN', 'OPLK', 'OPNT', 'ORB', 'ORCL', 'ORI', 'ORIT', 'ORLY', 'ORN', 'OSG',", "'SFG', 'SFNC', 'SFY', 'SGMS', 'SGY', 'SHAW', 'SHFL', 'SHLM', 'SHOO', 'SHW', 'SIAL', 'SIG', 'SIGI',", "'AM', 'AMAT', 'AMCX', 'AMD', 'AME', 'AMED', 'AMG', 'AMGN', 'AMP', 'AMSF', 'AMSG', 'AMT', 'AMZN',", "'PNY', 'POL', 'POM', 'POOL', 'POST', 'POWI', 'POWL', 'PPG', 'PPS', 'PQ', 'PRA', 'PRAA', 'PRFT',", "'HF', 'HFC', 'HGR', 'HHS', 'HI', 'HIBB', 'HIG', 'HII', 'HITK', 'HITT', 'HIW', 'HLIT', 'HLX',", "'MWIV', 'MWV', 'MWW', 'MYE', 'MYL', 'NAFC', 'NANO', 'NATI', 'NAVG', 'NBL', 'NBR', 'NBTB', 'NCI',", "'MTRX', 'MTSC', 'MTX', 'MU', 'MUR', 'MW', 'MWIV', 'MWV', 'MWW', 'MYE', 'MYL', 'NAFC', 'NANO',", "'NCS', 'NDAQ', 'NDSN', 'NE', 'NEE', 'NEM', 'NEOG', 'NEU', 'NEWP', 'NFG', 'NFLX', 'NFP', 'NFX',", "'NU', 'NUE', 'NUVA', 'NVDA', 'NVE', 'NVR', 'NVTL', 'NWBI', 'NWE', 'NWL', 'NWN', 'NWSA', 'NX',", "'CPB', 'CPLA', 'CPRT', 'CPSI', 'CPT', 'CPWR', 'CR', 'CRDN', 'CREE', 'CRI', 'CRK', 'CRL', 'CRM',", "'PTEN', 'PULS', 'PVA', 'PVH', 'PVTB', 'PWR', 'PX', 'PXD', 'PXP', 'PZZA', 'QCOM', 'QCOR', 'QEP',", "'NBTB', 'NCI', 'NCIT', 'NCR', 'NCS', 'NDAQ', 'NDSN', 'NE', 'NEE', 'NEM', 'NEOG', 'NEU', 'NEWP',", "'NFX', 'NI', 'NILE', 'NJR', 'NKE', 'NNN', 'NOC', 'NOV', 'NP', 'NPBC', 'NPK', 'NPO', 'NRG',", "'EBS', 'ECL', 'ECPG', 'ED', 'EE', 'EFX', 'EGL', 'EGN', 'EGP', 'EHTH', 'EIG', 'EIX', 'EL',", "'DVA', 'XLNX', 'EMN', 'SIAL', 'WEC', 'CCE', 'WDC', 'LIFE', 'MCO', 'HIG', 'JWN', 'FRX', 'MNST',", "'ODFL', 'ODP', 'OFC', 'OGE', 'OHI', 'OI', 'OII', 'OIS', 'OKE', 'OLN', 'OMC', 'OMCL', 'OMG',", "'HAIN', 'HAL', 'HAR', 'HAS', 'HAYN', 'HBAN', 'HBHC', 'HBI', 'HCBK', 'HCC', 'HCN', 'HCP', 'HCSG',", "'BID', 'BIG', 'BIIB', 'BJRI', 'BK', 'BKE', 'BKH', 'BKI', 'BKMU', 'BKS', 'BLK', 'BLKB', 'BLL',", "'DY', 'EA', 'EAT', 'EBAY', 'EBIX', 'EBS', 'ECL', 'ECPG', 'ED', 'EE', 'EFX', 'EGL', 'EGN',", "'BC', 'BCO', 'BCOR', 'BCPC', 'BCR', 'BDC', 'BDX', 'BEAM', 'BEAV', 'BELFB', 'BEN', 'BFS', 'BGC',", "'IVZ', 'JACK', 'JAH', 'JAKK', 'JBHT', 'JBL', 'JBLU', 'JBT', 'JCI', 'JCOM', 'JCP', 'JDAS', 'JDSU',", "'SPTN', 'SPW', 'SRCL', 'SRDX', 'SRE', 'SSD', 'SSI', 'SSP', 'SSS', 'STBA', 'STC', 'STE', 'STI',", "'PKG', 'PKI', 'PKY', 'PL', 'PLCE', 'PLCM', 'PLD', 'PLFE', 'PLL', 'PLT', 'PLXS', 'PM', 'PMC',", "'HUM', 'HVT', 'HW', 'HWAY', 'HWKN', 'HZO', 'IART', 'IBKR', 'IBM', 'IBOC', 'ICE', 'ICON', 'ICUI',", "'MYE', 'MYL', 'NAFC', 'NANO', 'NATI', 'NAVG', 'NBL', 'NBR', 'NBTB', 'NCI', 'NCIT', 'NCR', 'NCS',", "'HOT', 'HOTT', 'HP', 'HPQ', 'HPT', 'HPY', 'HR', 'HRB', 'HRC', 'HRL', 'HRS', 'HSC', 'HSH',", "'CMN', 'CMP', 'CMS', 'CMTL', 'CNC', 'CNK', 'CNL', 'CNMD', 'CNP', 'CNQR', 'CNW', 'CNX', 'COCO',", "'CF', 'CFN', 'CFR', 'CGNX', 'CGX', 'CHCO', 'CHD', 'CHE', 'CHG', 'CHK', 'CHRW', 'CHS', 'CI',", "'SNX', 'SO', 'SON', 'SONC', 'SPAR', 'SPF', 'SPG', 'SPLS', 'SPN', 'SPPI', 'SPTN', 'SPW', 'SRCL',", "'WLP', 'WM', 'WMB', 'WMS', 'WMT', 'WOOF', 'WOR', 'WPO', 'WPP', 'WPX', 'WR', 'WRB', 'WRC',", "'ACXM', 'ADBE', 'ADI', 'ADM', 'ADP', 'ADS', 'ADSK', 'ADTN', 'ADVS', 'AEE', 'AEGN', 'AEIS', 'AEO',", "'IPI', 'IR', 'IRBT', 'IRC', 'IRF', 'IRM', 'ISCA', 'ISIL', 'ISRG', 'IT', 'ITG', 'ITRI', 'ITT',", "'FNGN', 'FNP', 'FOR', 'FORR', 'FOSL', 'FRED', 'FRT', 'FRX', 'FSLR', 'FSP', 'FST', 'FTI', 'FTR',", "'AIV', 'RHI', 'PCS', 'MOLX', 'TE', 'TRIP', 'DNB', 'LEG', 'JBL', 'IGT', 'JCP', 'CVC', 'ATI',", "'PL', 'PLCE', 'PLCM', 'PLD', 'PLFE', 'PLL', 'PLT', 'PLXS', 'PM', 'PMC', 'PMTC', 'PMTI', 'PNC',", "'WDC', 'WDFC', 'WDR', 'WEC', 'WEN', 'WERN', 'WFC', 'WFM', 'WGL', 'WGO', 'WHR', 'WIBC', 'WIN',", "'CGNX', 'CGX', 'CHCO', 'CHD', 'CHE', 'CHG', 'CHK', 'CHRW', 'CHS', 'CI', 'CIEN', 'CINF', 'CIR',", "'PNC', 'COF', 'BAX', 'TJX', 'CELG', 'DTV', 'DE', 'DHR', 'TXN', 'HAL', 'WAG', 'PX', 'PCLN',", "'NTGR', 'NTLS', 'NTRI', 'NTRS', 'NU', 'NUE', 'NUVA', 'NVDA', 'NVE', 'NVR', 'NVTL', 'NWBI', 'NWE',", "'RAH', 'RAI', 'RAX', 'RBC', 'RBCN', 'RBN', 'RCII', 'RDC', 'RE', 'RECN', 'REG', 'REGN', 'RF',", "'FORR', 'FOSL', 'FRED', 'FRT', 'FRX', 'FSLR', 'FSP', 'FST', 'FTI', 'FTR', 'FUL', 'FULT', 'FWRD',", "'JKHY', 'JLL', 'JNJ', 'JNPR', 'JNS', 'JOSB', 'JOY', 'JPM', 'JWN', 'K', 'KALU', 'KAMN', 'KBH',", "'ORIT', 'ORLY', 'ORN', 'OSG', 'OSIS', 'OSK', 'OXM', 'OXY', 'OZRK', 'PACW', 'PAY', 'PAYX', 'PB',", "'FMBI', 'FMC', 'FMER', 'FNB', 'FNF', 'FNFG', 'FNGN', 'FNP', 'FOR', 'FORR', 'FOSL', 'FRED', 'FRT',", "'UCBI', 'UDR', 'UEIC', 'UFCS', 'UFPI', 'UFS', 'UGI', 'UHS', 'UHT', 'UIL', 'UMBF', 'UMPQ', 'UNF',", "'RGR', 'RGS', 'RHI', 'RHT', 'RJF', 'RKT', 'RL', 'RLI', 'RMD', 'ROCK', 'ROG', 'ROK', 'ROL',", "'PSB', 'PSEC', 'PSEM', 'PSS', 'PSSI', 'PSX', 'PTEN', 'PULS', 'PVA', 'PVH', 'PVTB', 'PWR', 'PX',", "'MNRO', 'MNST', 'MNTA', 'MO', 'MOH', 'MOLX', 'MON', 'MOS', 'MOV', 'MPC', 'MPW', 'MPWR', 'MRCY',", "'AVP', 'CNX', 'AES', 'AKAM', 'LNC', 'VAR', 'BLL', 'FLS', 'LUV', 'KMX', 'FDO', 'WHR', 'MCHP',", "'THC', 'GME', 'GNW', 'FHN', 'ETFC', 'AMD', 'R', 'FII', 'RRD', 'BIG', 'AN', 'WPO', 'LXK',", "'LSI', 'LSTR', 'LTC', 'LTD', 'LTM', 'LUFK', 'LUK', 'LUV', 'LXK', 'LXP', 'LXU', 'LYV', 'LZB',", "'BBG', 'BBOX', 'BBT', 'BBY', 'BC', 'BCO', 'BCOR', 'BCPC', 'BCR', 'BDC', 'BDX', 'BEAM', 'BEAV',", "'PXD', 'PXP', 'PZZA', 'QCOM', 'QCOR', 'QEP', 'QLGC', 'QNST', 'QSFT', 'QSII', 'R', 'RAH', 'RAI',", "'ENTR', 'ENZ', 'EOG', 'EPAY', 'EPIQ', 'EPR', 'EQIX', 'EQR', 'EQT', 'EQY', 'ESE', 'ESI', 'ESIO',", "'CCMP', 'CCRN', 'CDI', 'CDNS', 'CDR', 'CEB', 'CEC', 'CECO', 'CELG', 'CELL', 'CENX', 'CERN', 'CEVA',", "'BPFH', 'BR', 'BRC', 'BRCM', 'BRKL', 'BRKS', 'BRLI', 'BRO', 'BRS', 'BSX', 'BTH', 'BTU', 'BWA',", "'TTWO', 'TUES', 'TUP', 'TW', 'TWC', 'TWGP', 'TWTC', 'TWX', 'TXI', 'TXN', 'TXRH', 'TXT', 'TYC',", "'MRK', 'MRO', 'MRX', 'MS', 'MSA', 'MSCC', 'MSCI', 'MSFT', 'MSI', 'MSM', 'MSTR', 'MTB', 'MTD',", "'FFIV', 'NVDA', 'KIM', 'KEY', 'RSG', 'MKC', 'BCR', 'BSX', 'KLAC', 'AEE', 'BWA', 'SPLS', 'FIS',", "'PLD', 'PLFE', 'PLL', 'PLT', 'PLXS', 'PM', 'PMC', 'PMTC', 'PMTI', 'PNC', 'PNFP', 'PNK', 'PNM',", "'ROP', 'ROSE', 'ROST', 'ROVI', 'RPM', 'RRC', 'RRD', 'RRGB', 'RS', 'RSG', 'RSH', 'RSTI', 'RSYS',", "'SPAR', 'SPF', 'SPG', 'SPLS', 'SPN', 'SPPI', 'SPTN', 'SPW', 'SRCL', 'SRDX', 'SRE', 'SSD', 'SSI',", "'BDX', 'BEAM', 'BEAV', 'BELFB', 'BEN', 'BFS', 'BGC', 'BGFV', 'BGG', 'BGS', 'BH', 'BHE', 'BHI',", "'SLGN', 'SLH', 'SLM', 'SLXP', 'SM', 'SMA', 'SMCI', 'SMG', 'SMP', 'SMRT', 'SMTC', 'SNA', 'SNCR',", "'LMT', 'LNC', 'LNCE', 'LNN', 'LNT', 'LO', 'LOGM', 'LOW', 'LPNT', 'LPS', 'LPSN', 'LPX', 'LQDT',", "'IRBT', 'IRC', 'IRF', 'IRM', 'ISCA', 'ISIL', 'ISRG', 'IT', 'ITG', 'ITRI', 'ITT', 'ITW', 'IVAC',", "'CINF', 'CIR', 'CKH', 'CKP', 'CL', 'CLC', 'CLD', 'CLF', 'CLGX', 'CLH', 'CLI', 'CLMS', 'CLP',", "'AMZN', 'HD', 'KFT', 'V', 'OXY', 'COP', 'MO', 'UTX', 'USB', 'AMGN', 'CVS', 'MMM', 'EMC',", "'IP', 'BDX', 'MHP', 'STI', 'LO', 'M', 'MJN', 'EIX', 'EL', 'DISCA', 'HCN', 'BBBY', 'TEL',", "'ROL', 'ROP', 'ROSE', 'ROST', 'ROVI', 'RPM', 'RRC', 'RRD', 'RRGB', 'RS', 'RSG', 'RSH', 'RSTI',", "'HRB', 'XYL', 'TSN', 'FOSL', 'DO', 'BBY', 'LUK', 'CTAS', 'HAS', 'POM', 'PBCT', 'NFX', 'RDC',", "'MAS', 'MWV', 'SNI', 'PWR', 'JEC', 'PHM', 'IRM', 'HP', 'CSC', 'SUN', 'TMK', 'FTR', 'NRG',", "'KOP', 'KOPN', 'KR', 'KRA', 'KRC', 'KRG', 'KS', 'KSS', 'KSU', 'KSWS', 'KWK', 'KWR', 'L',", "'PRX', 'PRXL', 'PSA', 'PSB', 'PSEC', 'PSEM', 'PSS', 'PSSI', 'PSX', 'PTEN', 'PULS', 'PVA', 'PVH',", "'PDCE', 'PDCO', 'PEET', 'PEG', 'PEI', 'PEP', 'PERY', 'PES', 'PETM', 'PETS', 'PFE', 'PFG', 'PFS',", "'CBE', 'CBEY', 'CBG', 'CBK', 'CBM', 'CBOE', 'CBR', 'CBRL', 'CBS', 'CBSH', 'CBST', 'CBT', 'CBU',", "'CBS', 'CBSH', 'CBST', 'CBT', 'CBU', 'CCC', 'CCE', 'CCI', 'CCL', 'CCMP', 'CCRN', 'CDI', 'CDNS',", "'CEVA', 'CF', 'CFN', 'CFR', 'CGNX', 'CGX', 'CHCO', 'CHD', 'CHE', 'CHG', 'CHK', 'CHRW', 'CHS',", "'EMR', 'ENDP', 'ENR', 'ENS', 'ENSG', 'ENTR', 'ENZ', 'EOG', 'EPAY', 'EPIQ', 'EPR', 'EQIX', 'EQR',", "'EXC', 'EXH', 'EXLS', 'EXP', 'EXPD', 'EXPE', 'EXPO', 'EXR', 'EZPW', 'F', 'FAF', 'FARO', 'FAST',", "'FMER', 'FNB', 'FNF', 'FNFG', 'FNGN', 'FNP', 'FOR', 'FORR', 'FOSL', 'FRED', 'FRT', 'FRX', 'FSLR',", "'BAX', 'BBBY', 'BBG', 'BBOX', 'BBT', 'BBY', 'BC', 'BCO', 'BCOR', 'BCPC', 'BCR', 'BDC', 'BDX',", "'FLR', 'DPS', 'CLX', 'RL', 'WYNN', 'BEAM', 'CNP', 'NE', 'JNPR', 'LH', 'EQT', 'CA', 'DVA',", "'ALOG', 'ALTR', 'ALXN', 'AM', 'AMAT', 'AMCX', 'AMD', 'AME', 'AMED', 'AMG', 'AMGN', 'AMP', 'AMSF',", "'SJI', 'SJM', 'SKS', 'SKT', 'SKX', 'SKYW', 'SLAB', 'SLB', 'SLG', 'SLGN', 'SLH', 'SLM', 'SLXP',", "'FNFG', 'FNGN', 'FNP', 'FOR', 'FORR', 'FOSL', 'FRED', 'FRT', 'FRX', 'FSLR', 'FSP', 'FST', 'FTI',", "'HCP', 'RTN', 'WLP', 'CCI', 'JCI', 'MPC', 'MMC', 'FE', 'VTR', 'SYY', 'PCG', 'HNZ', 'ADM',", "'URBN', 'NBR', 'TEG', 'EA', 'HRL', 'SWY', 'LSI', 'TSS', 'ZION', 'HCBK', 'AIV', 'RHI', 'PCS',", "'CVS', 'CVX', 'CW', 'CWTR', 'CXW', 'CY', 'CYBX', 'CYH', 'CYMI', 'CYN', 'CYT', 'D', 'DAKT',", "'QEP', 'EFX', 'CVH', 'CLF', 'CBG', 'CINF', 'NWL', 'HSP', 'EXPE', 'XRAY', 'UNM', 'MAS', 'MWV',", "'FOSL', 'DO', 'BBY', 'LUK', 'CTAS', 'HAS', 'POM', 'PBCT', 'NFX', 'RDC', 'SNA', 'GCI', 'URBN',", "'AA', 'COG', 'FLR', 'DPS', 'CLX', 'RL', 'WYNN', 'BEAM', 'CNP', 'NE', 'JNPR', 'LH', 'EQT',", "'NFX', 'RDC', 'SNA', 'GCI', 'URBN', 'NBR', 'TEG', 'EA', 'HRL', 'SWY', 'LSI', 'TSS', 'ZION',", "'FTR', 'NRG', 'IPG', 'IFF', 'GAS', 'STZ', 'HRB', 'XYL', 'TSN', 'FOSL', 'DO', 'BBY', 'LUK',", "'CME', 'CMG', 'CMI', 'CMN', 'CMP', 'CMS', 'CMTL', 'CNC', 'CNK', 'CNL', 'CNMD', 'CNP', 'CNQR',", "'GD', 'GDI', 'GE', 'GEF', 'GEO', 'GES', 'GFF', 'GGG', 'GHL', 'GIFI', 'GILD', 'GIS', 'GLW',", "'PULS', 'PVA', 'PVH', 'PVTB', 'PWR', 'PX', 'PXD', 'PXP', 'PZZA', 'QCOM', 'QCOR', 'QEP', 'QLGC',", "'CELG', 'CELL', 'CENX', 'CERN', 'CEVA', 'CF', 'CFN', 'CFR', 'CGNX', 'CGX', 'CHCO', 'CHD', 'CHE',", "'CBG', 'CBK', 'CBM', 'CBOE', 'CBR', 'CBRL', 'CBS', 'CBSH', 'CBST', 'CBT', 'CBU', 'CCC', 'CCE',", "'CNK', 'CNL', 'CNMD', 'CNP', 'CNQR', 'CNW', 'CNX', 'COCO', 'COF', 'COG', 'COH', 'COHU', 'COL',", "'GIS', 'ITW', 'ACE', 'PRU', 'VIAB', 'CTL', 'LMT', 'FDX', 'PCP', 'BBT', 'MS', 'BLK', 'DVN',", "'PSA', 'BHI', 'ISRG', 'GLW', 'CRM', 'ALL', 'SE', 'HCP', 'RTN', 'WLP', 'CCI', 'JCI', 'MPC',", "'COP', 'COST', 'COV', 'CPB', 'CPLA', 'CPRT', 'CPSI', 'CPT', 'CPWR', 'CR', 'CRDN', 'CREE', 'CRI',", "= ['AAPL', 'XOM', 'GE', 'CVX', 'MSFT', 'IBM', 'T', 'GOOG', 'PG', 'JNJ', 'PFE', 'WFC',", "'SHW', 'SIAL', 'SIG', 'SIGI', 'SIGM', 'SIVB', 'SJI', 'SJM', 'SKS', 'SKT', 'SKX', 'SKYW', 'SLAB',", "'CBS', 'CSX', 'TMO', 'AEP', 'CTSH', 'MRO', 'DFS', 'NSC', 'MCK', 'CB', 'KMI', 'STT', 'PSA',", "'CSX', 'TMO', 'AEP', 'CTSH', 'MRO', 'DFS', 'NSC', 'MCK', 'CB', 'KMI', 'STT', 'PSA', 'BHI',", "'DWA', 'DY', 'EA', 'EAT', 'EBAY', 'EBIX', 'EBS', 'ECL', 'ECPG', 'ED', 'EE', 'EFX', 'EGL',", "'JNS', 'JOSB', 'JOY', 'JPM', 'JWN', 'K', 'KALU', 'KAMN', 'KBH', 'KBR', 'KDN', 'KELYA', 'KEX',", "'MHK', 'MHO', 'MIG', 'MINI', 'MJN', 'MKC', 'MKSI', 'MLHR', 'MLI', 'MLM', 'MMC', 'MMM', 'MMS',", "'OMCL', 'OMG', 'OMI', 'OMX', 'ONB', 'ONE', 'OPEN', 'OPLK', 'OPNT', 'ORB', 'ORCL', 'ORI', 'ORIT',", "'CR', 'CRDN', 'CREE', 'CRI', 'CRK', 'CRL', 'CRM', 'CROX', 'CRR', 'CRS', 'CRUS', 'CRVL', 'CRY',", "'SMP', 'SMRT', 'SMTC', 'SNA', 'SNCR', 'SNDK', 'SNH', 'SNI', 'SNPS', 'SNV', 'SNX', 'SO', 'SON',", "'GWW', 'GXP', 'GY', 'HAE', 'HAFC', 'HAIN', 'HAL', 'HAR', 'HAS', 'HAYN', 'HBAN', 'HBHC', 'HBI',", "'MPWR', 'MRCY', 'MRK', 'MRO', 'MRX', 'MS', 'MSA', 'MSCC', 'MSCI', 'MSFT', 'MSI', 'MSM', 'MSTR',", "'RDC', 'SNA', 'GCI', 'URBN', 'NBR', 'TEG', 'EA', 'HRL', 'SWY', 'LSI', 'TSS', 'ZION', 'HCBK',", "'CBR', 'CBRL', 'CBS', 'CBSH', 'CBST', 'CBT', 'CBU', 'CCC', 'CCE', 'CCI', 'CCL', 'CCMP', 'CCRN',", "'FNB', 'FNF', 'FNFG', 'FNGN', 'FNP', 'FOR', 'FORR', 'FOSL', 'FRED', 'FRT', 'FRX', 'FSLR', 'FSP',", "'SHAW', 'SHFL', 'SHLM', 'SHOO', 'SHW', 'SIAL', 'SIG', 'SIGI', 'SIGM', 'SIVB', 'SJI', 'SJM', 'SKS',", "'AINV', 'AIR', 'AIRM', 'AIT', 'AIV', 'AIZ', 'AJG', 'AKAM', 'AKR', 'AKRX', 'AKS', 'ALB', 'ALE',", "'MRO', 'MRX', 'MS', 'MSA', 'MSCC', 'MSCI', 'MSFT', 'MSI', 'MSM', 'MSTR', 'MTB', 'MTD', 'MTH',", "'KLAC', 'KLIC', 'KMB', 'KMI', 'KMPR', 'KMT', 'KMX', 'KND', 'KNX', 'KO', 'KOP', 'KOPN', 'KR',", "'SYMM', 'SYNA', 'SYY', 'T', 'TAP', 'TBI', 'TCB', 'TCBI', 'TCO', 'TDC', 'TDS', 'TDW', 'TDY',", "'PKE', 'PKG', 'PKI', 'PKY', 'PL', 'PLCE', 'PLCM', 'PLD', 'PLFE', 'PLL', 'PLT', 'PLXS', 'PM',", "'HLX', 'HMA', 'HME', 'HMN', 'HMSY', 'HNI', 'HNT', 'HNZ', 'HOG', 'HOLX', 'HOMB', 'HON', 'HOS',", "'AEGN', 'AEIS', 'AEO', 'AEP', 'AES', 'AET', 'AF', 'AFAM', 'AFFX', 'AFG', 'AFL', 'AGCO', 'AGN',", "'EXPD', 'COL', 'VRSN', 'FMC', 'ADSK', 'PFG', 'WYN', 'SLM', 'PLL', 'TIF', 'TXT', 'XL', 'LLTC',", "'DCI', 'DCOM', 'DD', 'DDD', 'DE', 'DECK', 'DEL', 'DELL', 'DF', 'DFS', 'DGII', 'DGIT', 'DGX',", "'PSSI', 'PSX', 'PTEN', 'PULS', 'PVA', 'PVH', 'PVTB', 'PWR', 'PX', 'PXD', 'PXP', 'PZZA', 'QCOM',", "'MOLX', 'TE', 'TRIP', 'DNB', 'LEG', 'JBL', 'IGT', 'JCP', 'CVC', 'ATI', 'SAI', 'PKI', 'WPX',", "'TRIP', 'DNB', 'LEG', 'JBL', 'IGT', 'JCP', 'CVC', 'ATI', 'SAI', 'PKI', 'WPX', 'BMS', 'AVY',", "'MAT', 'MATW', 'MATX', 'MCD', 'MCF', 'MCHP', 'MCK', 'MCO', 'MCRI', 'MCRL', 'MCRS', 'MCS', 'MCY',", "'FITB', 'RAI', 'PCAR', 'WY', 'SCHW', 'VFC', 'WM', 'CF', 'AZO', 'AMAT', 'CAM', 'VNO', 'OMC',", "'CUB', 'CUZ', 'CVBF', 'CVC', 'CVD', 'CVG', 'CVGW', 'CVH', 'CVLT', 'CVS', 'CVX', 'CW', 'CWTR',", "'CLH', 'CLI', 'CLMS', 'CLP', 'CLW', 'CLX', 'CMA', 'CMC', 'CMCSA', 'CME', 'CMG', 'CMI', 'CMN',", "'TWGP', 'TWTC', 'TWX', 'TXI', 'TXN', 'TXRH', 'TXT', 'TYC', 'TYL', 'TYPE', 'UA', 'UBA', 'UBSI',", "'OXY', 'COP', 'MO', 'UTX', 'USB', 'AMGN', 'CVS', 'MMM', 'EMC', 'UNH', 'BMY', 'UNP', 'CAT',", "'LRY', 'LSI', 'LSTR', 'LTC', 'LTD', 'LTM', 'LUFK', 'LUK', 'LUV', 'LXK', 'LXP', 'LXU', 'LYV',", "'BWS', 'BXP', 'BXS', 'BYD', 'BYI', 'C', 'CA', 'CAB', 'CACI', 'CAG', 'CAH', 'CAKE', 'CALM',", "'CTXS', 'CUB', 'CUZ', 'CVBF', 'CVC', 'CVD', 'CVG', 'CVGW', 'CVH', 'CVLT', 'CVS', 'CVX', 'CW',", "'KO', 'KOP', 'KOPN', 'KR', 'KRA', 'KRC', 'KRG', 'KS', 'KSS', 'KSU', 'KSWS', 'KWK', 'KWR',", "'JNJ', 'JNPR', 'JNS', 'JOSB', 'JOY', 'JPM', 'JWN', 'K', 'KALU', 'KAMN', 'KBH', 'KBR', 'KDN',", "'GIS', 'GLW', 'GMCR', 'GME', 'GMT', 'GNCMA', 'GNTX', 'GNW', 'GOOG', 'GPC', 'GPI', 'GPN', 'GPOR',", "'NWN', 'NWSA', 'NX', 'NYB', 'NYT', 'NYX', 'O', 'OCR', 'ODFL', 'ODP', 'OFC', 'OGE', 'OHI',", "'TEL', 'TER', 'TEX', 'TFX', 'TG', 'TGI', 'TGT', 'THC', 'THG', 'THO', 'THOR', 'THS', 'TIBX',", "'AMT', 'AGN', 'NEM', 'BK', 'TRV', 'TYC', 'GIS', 'ITW', 'ACE', 'PRU', 'VIAB', 'CTL', 'LMT',", "'LXK', 'ANR', 'FSLR', 'DV', 'TIE'] ALL_TICKERS = ['A', 'AA', 'AAN', 'AAON', 'AAP', 'AAPL',", "'SMRT', 'SMTC', 'SNA', 'SNCR', 'SNDK', 'SNH', 'SNI', 'SNPS', 'SNV', 'SNX', 'SO', 'SON', 'SONC',", "'QCOM', 'QCOR', 'QEP', 'QLGC', 'QNST', 'QSFT', 'QSII', 'R', 'RAH', 'RAI', 'RAX', 'RBC', 'RBCN',", "'SNA', 'SNCR', 'SNDK', 'SNH', 'SNI', 'SNPS', 'SNV', 'SNX', 'SO', 'SON', 'SONC', 'SPAR', 'SPF',", "'GWW', 'EW', 'ETR', 'NUE', 'SWN', 'MAT', 'CBE', 'NU', 'AMP', 'NTAP', 'ZMH', 'LTD', 'ADI',", "'DTV', 'DE', 'DHR', 'TXN', 'HAL', 'WAG', 'PX', 'PCLN', 'EXC', 'D', 'EOG', 'YUM', 'NEE',", "'MDSO', 'MDT', 'MDU', 'MEAS', 'MED', 'MEI', 'MENT', 'MET', 'MFB', 'MGAM', 'MGLN', 'MHK', 'MHO',", "'T', 'TAP', 'TBI', 'TCB', 'TCBI', 'TCO', 'TDC', 'TDS', 'TDW', 'TDY', 'TE', 'TECD', 'TECH',", "'LEG', 'JBL', 'IGT', 'JCP', 'CVC', 'ATI', 'SAI', 'PKI', 'WPX', 'BMS', 'AVY', 'HAR', 'OI',", "'APH', 'APOG', 'APOL', 'ARB', 'ARE', 'AREX', 'ARG', 'ARO', 'ARQL', 'ARRS', 'ARW', 'ASBC', 'ASEI',", "'SBUX', 'SCG', 'SCHL', 'SCHW', 'SCI', 'SCL', 'SCOR', 'SCSC', 'SCSS', 'SE', 'SEE', 'SEIC', 'SENEA',", "'CPB', 'CMS', 'VMC', 'MU', 'BMC', 'NYX', 'CMA', 'BTU', 'WIN', 'JOY', 'HBAN', 'TSO', 'HRS',", "'LII', 'LINC', 'LKQ', 'LL', 'LLL', 'LLTC', 'LLY', 'LM', 'LMNX', 'LMOS', 'LMT', 'LNC', 'LNCE',", "'CSGS', 'CSH', 'CSL', 'CSTR', 'CSX', 'CTAS', 'CTL', 'CTS', 'CTSH', 'CTXS', 'CUB', 'CUZ', 'CVBF',", "'GNW', 'FHN', 'ETFC', 'AMD', 'R', 'FII', 'RRD', 'BIG', 'AN', 'WPO', 'LXK', 'ANR', 'FSLR',", "'LZB', 'M', 'MA', 'MAA', 'MAC', 'MAN', 'MANH', 'MANT', 'MAR', 'MAS', 'MASI', 'MAT', 'MATW',", "'RRGB', 'RS', 'RSG', 'RSH', 'RSTI', 'RSYS', 'RT', 'RTEC', 'RTI', 'RTN', 'RUE', 'RUTH', 'RVBD',", "'LKQ', 'LL', 'LLL', 'LLTC', 'LLY', 'LM', 'LMNX', 'LMOS', 'LMT', 'LNC', 'LNCE', 'LNN', 'LNT',", "'SO', 'SBUX', 'AIG', 'F', 'FCX', 'MET', 'BIIB', 'EMR', 'APC', 'NKE', 'DOW', 'LOW', 'NOV',", "'HOG', 'XRX', 'APH', 'GPC', 'CHRW', 'SJM', 'AA', 'COG', 'FLR', 'DPS', 'CLX', 'RL', 'WYNN',", "'HCSG', 'HD', 'HE', 'HES', 'HF', 'HFC', 'HGR', 'HHS', 'HI', 'HIBB', 'HIG', 'HII', 'HITK',", "'SWI', 'SWK', 'SWKS', 'SWM', 'SWN', 'SWS', 'SWX', 'SWY', 'SXC', 'SXI', 'SXT', 'SYK', 'SYKE',", "'FCN', 'FCS', 'FCX', 'FDO', 'FDS', 'FDX', 'FE', 'FEIC', 'FELE', 'FFBC', 'FFIN', 'FFIV', 'FHN',", "'DEL', 'DELL', 'DF', 'DFS', 'DGII', 'DGIT', 'DGX', 'DHI', 'DHR', 'DHX', 'DIN', 'DIOD', 'DIS',", "'INTU', 'PLD', 'SYK', 'TROW', 'COH', 'ADBE', 'HES', 'ETN', 'MOS', 'IP', 'BDX', 'MHP', 'STI',", "'IM', 'IN', 'INDB', 'INFA', 'INGR', 'ININ', 'INT', 'INTC', 'INTU', 'IO', 'IP', 'IPAR', 'IPCC',", "'Y', 'YHOO', 'YUM', 'ZBRA', 'ZEP', 'ZEUS', 'ZLC', 'ZMH', 'ZQK', 'ZUMZ'] TICKERS = sorted(list(set(SNP_TICKERS)", "'REG', 'REGN', 'RF', 'RFMD', 'RGA', 'RGLD', 'RGR', 'RGS', 'RHI', 'RHT', 'RJF', 'RKT', 'RL',", "'GT', 'LM', 'APOL', 'PDCO', 'JDSU', 'ANF', 'PBI', 'NDAQ', 'X', 'SEE', 'TER', 'THC', 'GME',", "'PDCO', 'PEET', 'PEG', 'PEI', 'PEP', 'PERY', 'PES', 'PETM', 'PETS', 'PFE', 'PFG', 'PFS', 'PG',", "'TCO', 'TDC', 'TDS', 'TDW', 'TDY', 'TE', 'TECD', 'TECH', 'TEG', 'TEL', 'TER', 'TEX', 'TFX',", "'EQR', 'BEN', 'ECL', 'PPL', 'AON', 'WFM', 'BXP', 'YHOO', 'S', 'NBL', 'NOC', 'CMI', 'CCL',", "'SLM', 'PLL', 'TIF', 'TXT', 'XL', 'LLTC', 'WAT', 'NI', 'DRI', 'PCL', 'TAP', 'LLL', 'AVP',", "'XRX', 'APH', 'GPC', 'CHRW', 'SJM', 'AA', 'COG', 'FLR', 'DPS', 'CLX', 'RL', 'WYNN', 'BEAM',", "'LM', 'APOL', 'PDCO', 'JDSU', 'ANF', 'PBI', 'NDAQ', 'X', 'SEE', 'TER', 'THC', 'GME', 'GNW',", "'DISCA', 'DKS', 'DLTR', 'DLX', 'DM', 'DMND', 'DNB', 'DNR', 'DO', 'DOV', 'DOW', 'DPS', 'DRC',", "'EME', 'EMN', 'EMR', 'ENDP', 'ENR', 'ENS', 'ENSG', 'ENTR', 'ENZ', 'EOG', 'EPAY', 'EPIQ', 'EPR',", "'UNM', 'UNP', 'UNS', 'UNT', 'UNTD', 'UPS', 'URBN', 'URI', 'URS', 'USB', 'USMO', 'USTR', 'UTEK',", "'NVDA', 'NVE', 'NVR', 'NVTL', 'NWBI', 'NWE', 'NWL', 'NWN', 'NWSA', 'NX', 'NYB', 'NYT', 'NYX',", "'AWR', 'AXE', 'AXP', 'AYI', 'AZO', 'AZZ', 'B', 'BA', 'BABY', 'BAC', 'BAS', 'BAX', 'BBBY',", "'ORI', 'ORIT', 'ORLY', 'ORN', 'OSG', 'OSIS', 'OSK', 'OXM', 'OXY', 'OZRK', 'PACW', 'PAY', 'PAYX',", "'B', 'BA', 'BABY', 'BAC', 'BAS', 'BAX', 'BBBY', 'BBG', 'BBOX', 'BBT', 'BBY', 'BC', 'BCO',", "'WMT', 'ORCL', 'INTC', 'PEP', 'ABT', 'QCOM', 'CSCO', 'SLB', 'C', 'CMCSA', 'BAC', 'DIS', 'MCD',", "'INTC', 'INTU', 'IO', 'IP', 'IPAR', 'IPCC', 'IPCM', 'IPG', 'IPHS', 'IPI', 'IR', 'IRBT', 'IRC',", "'FSP', 'FST', 'FTI', 'FTR', 'FUL', 'FULT', 'FWRD', 'GAS', 'GB', 'GBCI', 'GCI', 'GCO', 'GD',", "'NEM', 'NEOG', 'NEU', 'NEWP', 'NFG', 'NFLX', 'NFP', 'NFX', 'NI', 'NILE', 'NJR', 'NKE', 'NNN',", "'SHOO', 'SHW', 'SIAL', 'SIG', 'SIGI', 'SIGM', 'SIVB', 'SJI', 'SJM', 'SKS', 'SKT', 'SKX', 'SKYW',", "'JOY', 'HBAN', 'TSO', 'HRS', 'LRCX', 'PNW', 'DHI', 'ARG', 'LEN', 'QEP', 'EFX', 'CVH', 'CLF',", "'DTE', 'DTSI', 'DTV', 'DUK', 'DV', 'DVA', 'DVN', 'DW', 'DWA', 'DY', 'EA', 'EAT', 'EBAY',", "'AGP', 'AGYS', 'AHL', 'AHS', 'AIG', 'AINV', 'AIR', 'AIRM', 'AIT', 'AIV', 'AIZ', 'AJG', 'AKAM',", "'WRB', 'WRC', 'WRI', 'WRLD', 'WSM', 'WSO', 'WST', 'WTFC', 'WTR', 'WTS', 'WU', 'WWD', 'WWW',", "'LUFK', 'LUK', 'LUV', 'LXK', 'LXP', 'LXU', 'LYV', 'LZB', 'M', 'MA', 'MAA', 'MAC', 'MAN',", "'MPC', 'MMC', 'FE', 'VTR', 'SYY', 'PCG', 'HNZ', 'ADM', 'BRCM', 'ED', 'PPG', 'CME', 'LYB',", "'TER', 'THC', 'GME', 'GNW', 'FHN', 'ETFC', 'AMD', 'R', 'FII', 'RRD', 'BIG', 'AN', 'WPO',", "'CVG', 'CVGW', 'CVH', 'CVLT', 'CVS', 'CVX', 'CW', 'CWTR', 'CXW', 'CY', 'CYBX', 'CYH', 'CYMI',", "'EQY', 'ESE', 'ESI', 'ESIO', 'ESL', 'ESRX', 'ESS', 'ESV', 'ETFC', 'ETH', 'ETN', 'ETR', 'EV',", "'VLO', 'EQR', 'BEN', 'ECL', 'PPL', 'AON', 'WFM', 'BXP', 'YHOO', 'S', 'NBL', 'NOC', 'CMI',", "'SAI', 'SAM', 'SBRA', 'SBUX', 'SCG', 'SCHL', 'SCHW', 'SCI', 'SCL', 'SCOR', 'SCSC', 'SCSS', 'SE',", "'VDSI', 'VECO', 'VFC', 'VIAB', 'VICR', 'VIVO', 'VLO', 'VLTR', 'VLY', 'VMC', 'VMI', 'VNO', 'VOXX',", "'NSP', 'NSR', 'NTAP', 'NTCT', 'NTGR', 'NTLS', 'NTRI', 'NTRS', 'NU', 'NUE', 'NUVA', 'NVDA', 'NVE',", "'MINI', 'MJN', 'MKC', 'MKSI', 'MLHR', 'MLI', 'MLM', 'MMC', 'MMM', 'MMS', 'MMSI', 'MNRO', 'MNST',", "'ETH', 'ETN', 'ETR', 'EV', 'EW', 'EWBC', 'EXAR', 'EXC', 'EXH', 'EXLS', 'EXP', 'EXPD', 'EXPE',", "'COCO', 'COF', 'COG', 'COH', 'COHU', 'COL', 'COLB', 'COO', 'COP', 'COST', 'COV', 'CPB', 'CPLA',", "'JWN', 'K', 'KALU', 'KAMN', 'KBH', 'KBR', 'KDN', 'KELYA', 'KEX', 'KEY', 'KFY', 'KIM', 'KIRK',", "'MDT', 'COST', 'TWX', 'TGT', 'SO', 'SBUX', 'AIG', 'F', 'FCX', 'MET', 'BIIB', 'EMR', 'APC',", "'WBSN', 'WCG', 'WCN', 'WDC', 'WDFC', 'WDR', 'WEC', 'WEN', 'WERN', 'WFC', 'WFM', 'WGL', 'WGO',", "'LMNX', 'LMOS', 'LMT', 'LNC', 'LNCE', 'LNN', 'LNT', 'LO', 'LOGM', 'LOW', 'LPNT', 'LPS', 'LPSN',", "'HSY', 'ROP', 'PAYX', 'GPS', 'SNDK', 'DTE', 'PRGO', 'RF', 'NTRS', 'DGX', 'CMG', 'FISV', 'ORLY',", "'VRTU', 'VRTX', 'VSAT', 'VSH', 'VSI', 'VTR', 'VVC', 'VZ', 'WAB', 'WABC', 'WAFD', 'WAG', 'WAT',", "'MLI', 'MLM', 'MMC', 'MMM', 'MMS', 'MMSI', 'MNRO', 'MNST', 'MNTA', 'MO', 'MOH', 'MOLX', 'MON',", "'HAR', 'HAS', 'HAYN', 'HBAN', 'HBHC', 'HBI', 'HCBK', 'HCC', 'HCN', 'HCP', 'HCSG', 'HD', 'HE',", "'TIF', 'TXT', 'XL', 'LLTC', 'WAT', 'NI', 'DRI', 'PCL', 'TAP', 'LLL', 'AVP', 'CNX', 'AES',", "'NVR', 'NVTL', 'NWBI', 'NWE', 'NWL', 'NWN', 'NWSA', 'NX', 'NYB', 'NYT', 'NYX', 'O', 'OCR',", "'RYL', 'RYN', 'S', 'SAFM', 'SAFT', 'SAH', 'SAI', 'SAM', 'SBRA', 'SBUX', 'SCG', 'SCHL', 'SCHW',", "'TRMB', 'TRMK', 'TRN', 'TROW', 'TRST', 'TRV', 'TSCO', 'TSN', 'TSO', 'TSRA', 'TSS', 'TTC', 'TTEC',", "'WTR', 'WTS', 'WU', 'WWD', 'WWW', 'WY', 'WYN', 'WYNN', 'X', 'XEC', 'XEL', 'XL', 'XLNX',", "'TROW', 'COH', 'ADBE', 'HES', 'ETN', 'MOS', 'IP', 'BDX', 'MHP', 'STI', 'LO', 'M', 'MJN',", "'FSLR', 'DV', 'TIE'] ALL_TICKERS = ['A', 'AA', 'AAN', 'AAON', 'AAP', 'AAPL', 'ABAX', 'ABC',", "'SPF', 'SPG', 'SPLS', 'SPN', 'SPPI', 'SPTN', 'SPW', 'SRCL', 'SRDX', 'SRE', 'SSD', 'SSI', 'SSP',", "'ADBE', 'HES', 'ETN', 'MOS', 'IP', 'BDX', 'MHP', 'STI', 'LO', 'M', 'MJN', 'EIX', 'EL',", "'IDA', 'IDTI', 'IDXX', 'IEX', 'IFF', 'IFSIA', 'IGT', 'IGTE', 'IILG', 'IIVI', 'IM', 'IN', 'INDB',", "'O', 'OCR', 'ODFL', 'ODP', 'OFC', 'OGE', 'OHI', 'OI', 'OII', 'OIS', 'OKE', 'OLN', 'OMC',", "'ECL', 'PPL', 'AON', 'WFM', 'BXP', 'YHOO', 'S', 'NBL', 'NOC', 'CMI', 'CCL', 'PEG', 'INTU',", "'HR', 'HRB', 'HRC', 'HRL', 'HRS', 'HSC', 'HSH', 'HSIC', 'HSII', 'HSNI', 'HSP', 'HST', 'HSTM',", "'EIG', 'EIX', 'EL', 'ELY', 'EMC', 'EME', 'EMN', 'EMR', 'ENDP', 'ENR', 'ENS', 'ENSG', 'ENTR',", "'AVB', 'L', 'IR', 'PXD', 'KR', 'SWK', 'K', 'TDC', 'SHW', 'ESV', 'SYMC', 'PH', 'GWW',", "'IN', 'INDB', 'INFA', 'INGR', 'ININ', 'INT', 'INTC', 'INTU', 'IO', 'IP', 'IPAR', 'IPCC', 'IPCM',", "'HIG', 'HII', 'HITK', 'HITT', 'HIW', 'HLIT', 'HLX', 'HMA', 'HME', 'HMN', 'HMSY', 'HNI', 'HNT',", "'SNDK', 'SNH', 'SNI', 'SNPS', 'SNV', 'SNX', 'SO', 'SON', 'SONC', 'SPAR', 'SPF', 'SPG', 'SPLS',", "'SIVB', 'SJI', 'SJM', 'SKS', 'SKT', 'SKX', 'SKYW', 'SLAB', 'SLB', 'SLG', 'SLGN', 'SLH', 'SLM',", "'SFY', 'SGMS', 'SGY', 'SHAW', 'SHFL', 'SHLM', 'SHOO', 'SHW', 'SIAL', 'SIG', 'SIGI', 'SIGM', 'SIVB',", "'VZ', 'WAB', 'WABC', 'WAFD', 'WAG', 'WAT', 'WBS', 'WBSN', 'WCG', 'WCN', 'WDC', 'WDFC', 'WDR',", "'CBG', 'CINF', 'NWL', 'HSP', 'EXPE', 'XRAY', 'UNM', 'MAS', 'MWV', 'SNI', 'PWR', 'JEC', 'PHM',", "'NPK', 'NPO', 'NRG', 'NSC', 'NSIT', 'NSP', 'NSR', 'NTAP', 'NTCT', 'NTGR', 'NTLS', 'NTRI', 'NTRS',", "'CVLT', 'CVS', 'CVX', 'CW', 'CWTR', 'CXW', 'CY', 'CYBX', 'CYH', 'CYMI', 'CYN', 'CYT', 'D',", "'INGR', 'ININ', 'INT', 'INTC', 'INTU', 'IO', 'IP', 'IPAR', 'IPCC', 'IPCM', 'IPG', 'IPHS', 'IPI',", "'NWE', 'NWL', 'NWN', 'NWSA', 'NX', 'NYB', 'NYT', 'NYX', 'O', 'OCR', 'ODFL', 'ODP', 'OFC',", "'UDR', 'UEIC', 'UFCS', 'UFPI', 'UFS', 'UGI', 'UHS', 'UHT', 'UIL', 'UMBF', 'UMPQ', 'UNF', 'UNFI',", "'SPPI', 'SPTN', 'SPW', 'SRCL', 'SRDX', 'SRE', 'SSD', 'SSI', 'SSP', 'SSS', 'STBA', 'STC', 'STE',", "'GLW', 'GMCR', 'GME', 'GMT', 'GNCMA', 'GNTX', 'GNW', 'GOOG', 'GPC', 'GPI', 'GPN', 'GPOR', 'GPS',", "'LUV', 'LXK', 'LXP', 'LXU', 'LYV', 'LZB', 'M', 'MA', 'MAA', 'MAC', 'MAN', 'MANH', 'MANT',", "'DGX', 'DHI', 'DHR', 'DHX', 'DIN', 'DIOD', 'DIS', 'DISCA', 'DKS', 'DLTR', 'DLX', 'DM', 'DMND',", "'GB', 'GBCI', 'GCI', 'GCO', 'GD', 'GDI', 'GE', 'GEF', 'GEO', 'GES', 'GFF', 'GGG', 'GHL',", "['A', 'AA', 'AAN', 'AAON', 'AAP', 'AAPL', 'ABAX', 'ABC', 'ABFS', 'ABM', 'ABT', 'ACAT', 'ACC',", "'MMM', 'EMC', 'UNH', 'BMY', 'UNP', 'CAT', 'EBAY', 'AXP', 'UPS', 'GS', 'ESRX', 'GILD', 'NWSA',", "'HAE', 'HAFC', 'HAIN', 'HAL', 'HAR', 'HAS', 'HAYN', 'HBAN', 'HBHC', 'HBI', 'HCBK', 'HCC', 'HCN',", "'CVX', 'MSFT', 'IBM', 'T', 'GOOG', 'PG', 'JNJ', 'PFE', 'WFC', 'BRK.B', 'JPM', 'PM', 'KO',", "'MUR', 'MW', 'MWIV', 'MWV', 'MWW', 'MYE', 'MYL', 'NAFC', 'NANO', 'NATI', 'NAVG', 'NBL', 'NBR',", "'PWR', 'PX', 'PXD', 'PXP', 'PZZA', 'QCOM', 'QCOR', 'QEP', 'QLGC', 'QNST', 'QSFT', 'QSII', 'R',", "'IP', 'IPAR', 'IPCC', 'IPCM', 'IPG', 'IPHS', 'IPI', 'IR', 'IRBT', 'IRC', 'IRF', 'IRM', 'ISCA',", "'KO', 'MRK', 'VZ', 'WMT', 'ORCL', 'INTC', 'PEP', 'ABT', 'QCOM', 'CSCO', 'SLB', 'C', 'CMCSA',", "'RSG', 'MKC', 'BCR', 'BSX', 'KLAC', 'AEE', 'BWA', 'SPLS', 'FIS', 'SRCL', 'EXPD', 'COL', 'VRSN',", "'TSS', 'ZION', 'HCBK', 'AIV', 'RHI', 'PCS', 'MOLX', 'TE', 'TRIP', 'DNB', 'LEG', 'JBL', 'IGT',", "'CECO', 'CELG', 'CELL', 'CENX', 'CERN', 'CEVA', 'CF', 'CFN', 'CFR', 'CGNX', 'CGX', 'CHCO', 'CHD',", "'CL', 'CLC', 'CLD', 'CLF', 'CLGX', 'CLH', 'CLI', 'CLMS', 'CLP', 'CLW', 'CLX', 'CMA', 'CMC',", "'CMC', 'CMCSA', 'CME', 'CMG', 'CMI', 'CMN', 'CMP', 'CMS', 'CMTL', 'CNC', 'CNK', 'CNL', 'CNMD',", "'IFF', 'GAS', 'STZ', 'HRB', 'XYL', 'TSN', 'FOSL', 'DO', 'BBY', 'LUK', 'CTAS', 'HAS', 'POM',", "'ENS', 'ENSG', 'ENTR', 'ENZ', 'EOG', 'EPAY', 'EPIQ', 'EPR', 'EQIX', 'EQR', 'EQT', 'EQY', 'ESE',", "['AAPL', 'XOM', 'GE', 'CVX', 'MSFT', 'IBM', 'T', 'GOOG', 'PG', 'JNJ', 'PFE', 'WFC', 'BRK.B',", "'DRC', 'DRE', 'DRH', 'DRI', 'DRIV', 'DRQ', 'DSPG', 'DTE', 'DTSI', 'DTV', 'DUK', 'DV', 'DVA',", "'CCI', 'JCI', 'MPC', 'MMC', 'FE', 'VTR', 'SYY', 'PCG', 'HNZ', 'ADM', 'BRCM', 'ED', 'PPG',", "'MSI', 'ROST', 'DELL', 'CTXS', 'FITB', 'RAI', 'PCAR', 'WY', 'SCHW', 'VFC', 'WM', 'CF', 'AZO',", "'PCL', 'TAP', 'LLL', 'AVP', 'CNX', 'AES', 'AKAM', 'LNC', 'VAR', 'BLL', 'FLS', 'LUV', 'KMX',", "'CXW', 'CY', 'CYBX', 'CYH', 'CYMI', 'CYN', 'CYT', 'D', 'DAKT', 'DAR', 'DBD', 'DCI', 'DCOM',", "'DW', 'DWA', 'DY', 'EA', 'EAT', 'EBAY', 'EBIX', 'EBS', 'ECL', 'ECPG', 'ED', 'EE', 'EFX',", "'WFM', 'BXP', 'YHOO', 'S', 'NBL', 'NOC', 'CMI', 'CCL', 'PEG', 'INTU', 'PLD', 'SYK', 'TROW',", "'FHN', 'ETFC', 'AMD', 'R', 'FII', 'RRD', 'BIG', 'AN', 'WPO', 'LXK', 'ANR', 'FSLR', 'DV',", "'EL', 'DISCA', 'HCN', 'BBBY', 'TEL', 'SRE', 'MSI', 'ROST', 'DELL', 'CTXS', 'FITB', 'RAI', 'PCAR',", "'SYMC', 'SYMM', 'SYNA', 'SYY', 'T', 'TAP', 'TBI', 'TCB', 'TCBI', 'TCO', 'TDC', 'TDS', 'TDW',", "'FMC', 'ADSK', 'PFG', 'WYN', 'SLM', 'PLL', 'TIF', 'TXT', 'XL', 'LLTC', 'WAT', 'NI', 'DRI',", "'FCX', 'MET', 'BIIB', 'EMR', 'APC', 'NKE', 'DOW', 'LOW', 'NOV', 'KMB', 'APA', 'HPQ', 'PNC',", "'GE', 'GEF', 'GEO', 'GES', 'GFF', 'GGG', 'GHL', 'GIFI', 'GILD', 'GIS', 'GLW', 'GMCR', 'GME',", "'PGR', 'PH', 'PHM', 'PII', 'PJC', 'PKE', 'PKG', 'PKI', 'PKY', 'PL', 'PLCE', 'PLCM', 'PLD',", "'CNQR', 'CNW', 'CNX', 'COCO', 'COF', 'COG', 'COH', 'COHU', 'COL', 'COLB', 'COO', 'COP', 'COST',", "'NBL', 'NBR', 'NBTB', 'NCI', 'NCIT', 'NCR', 'NCS', 'NDAQ', 'NDSN', 'NE', 'NEE', 'NEM', 'NEOG',", "'COP', 'MO', 'UTX', 'USB', 'AMGN', 'CVS', 'MMM', 'EMC', 'UNH', 'BMY', 'UNP', 'CAT', 'EBAY',", "'UFCS', 'UFPI', 'UFS', 'UGI', 'UHS', 'UHT', 'UIL', 'UMBF', 'UMPQ', 'UNF', 'UNFI', 'UNH', 'UNM',", "'TRN', 'TROW', 'TRST', 'TRV', 'TSCO', 'TSN', 'TSO', 'TSRA', 'TSS', 'TTC', 'TTEC', 'TTEK', 'TTI',", "'DGII', 'DGIT', 'DGX', 'DHI', 'DHR', 'DHX', 'DIN', 'DIOD', 'DIS', 'DISCA', 'DKS', 'DLTR', 'DLX',", "'STL', 'STLD', 'STMP', 'STR', 'STRA', 'STRI', 'STT', 'STX', 'STZ', 'SUN', 'SUP', 'SUPX', 'SUSQ',", "'RHI', 'PCS', 'MOLX', 'TE', 'TRIP', 'DNB', 'LEG', 'JBL', 'IGT', 'JCP', 'CVC', 'ATI', 'SAI',", "'SPN', 'SPPI', 'SPTN', 'SPW', 'SRCL', 'SRDX', 'SRE', 'SSD', 'SSI', 'SSP', 'SSS', 'STBA', 'STC',", "'VIAB', 'CTL', 'LMT', 'FDX', 'PCP', 'BBT', 'MS', 'BLK', 'DVN', 'AFL', 'ALXN', 'GD', 'WMB',", "'STT', 'PSA', 'BHI', 'ISRG', 'GLW', 'CRM', 'ALL', 'SE', 'HCP', 'RTN', 'WLP', 'CCI', 'JCI',", "'PNK', 'PNM', 'PNR', 'PNRA', 'PNW', 'PNY', 'POL', 'POM', 'POOL', 'POST', 'POWI', 'POWL', 'PPG',", "'RF', 'NTRS', 'DGX', 'CMG', 'FISV', 'ORLY', 'MUR', 'OKE', 'MYL', 'BF.B', 'MAR', 'ROK', 'CHK',", "'DECK', 'DEL', 'DELL', 'DF', 'DFS', 'DGII', 'DGIT', 'DGX', 'DHI', 'DHR', 'DHX', 'DIN', 'DIOD',", "'JDAS', 'JDSU', 'JEC', 'JEF', 'JJSF', 'JKHY', 'JLL', 'JNJ', 'JNPR', 'JNS', 'JOSB', 'JOY', 'JPM',", "'HIBB', 'HIG', 'HII', 'HITK', 'HITT', 'HIW', 'HLIT', 'HLX', 'HMA', 'HME', 'HMN', 'HMSY', 'HNI',", "'RS', 'RSG', 'RSH', 'RSTI', 'RSYS', 'RT', 'RTEC', 'RTI', 'RTN', 'RUE', 'RUTH', 'RVBD', 'RYL',", "'BBBY', 'TEL', 'SRE', 'MSI', 'ROST', 'DELL', 'CTXS', 'FITB', 'RAI', 'PCAR', 'WY', 'SCHW', 'VFC',", "'VECO', 'VFC', 'VIAB', 'VICR', 'VIVO', 'VLO', 'VLTR', 'VLY', 'VMC', 'VMI', 'VNO', 'VOXX', 'VPFG',", "'NPBC', 'NPK', 'NPO', 'NRG', 'NSC', 'NSIT', 'NSP', 'NSR', 'NTAP', 'NTCT', 'NTGR', 'NTLS', 'NTRI',", "'UVV', 'V', 'VAL', 'VAR', 'VCI', 'VCLK', 'VDSI', 'VECO', 'VFC', 'VIAB', 'VICR', 'VIVO', 'VLO',", "'ALE', 'ALEX', 'ALGN', 'ALGT', 'ALK', 'ALL', 'ALOG', 'ALTR', 'ALXN', 'AM', 'AMAT', 'AMCX', 'AMD',", "'AON', 'WFM', 'BXP', 'YHOO', 'S', 'NBL', 'NOC', 'CMI', 'CCL', 'PEG', 'INTU', 'PLD', 'SYK',", "'BXS', 'BYD', 'BYI', 'C', 'CA', 'CAB', 'CACI', 'CAG', 'CAH', 'CAKE', 'CALM', 'CAM', 'CAS',", "'MU', 'BMC', 'NYX', 'CMA', 'BTU', 'WIN', 'JOY', 'HBAN', 'TSO', 'HRS', 'LRCX', 'PNW', 'DHI',", "'SWY', 'SXC', 'SXI', 'SXT', 'SYK', 'SYKE', 'SYMC', 'SYMM', 'SYNA', 'SYY', 'T', 'TAP', 'TBI',", "'HPQ', 'HPT', 'HPY', 'HR', 'HRB', 'HRC', 'HRL', 'HRS', 'HSC', 'HSH', 'HSIC', 'HSII', 'HSNI',", "'AREX', 'ARG', 'ARO', 'ARQL', 'ARRS', 'ARW', 'ASBC', 'ASEI', 'ASGN', 'ASH', 'ASNA', 'ASTE', 'ATI',", "'GTAT', 'GTIV', 'GTY', 'GVA', 'GWW', 'GXP', 'GY', 'HAE', 'HAFC', 'HAIN', 'HAL', 'HAR', 'HAS',", "'MWW', 'MYE', 'MYL', 'NAFC', 'NANO', 'NATI', 'NAVG', 'NBL', 'NBR', 'NBTB', 'NCI', 'NCIT', 'NCR',", "'XLNX', 'XLS', 'XOM', 'XOXO', 'XRAY', 'XRX', 'XYL', 'Y', 'YHOO', 'YUM', 'ZBRA', 'ZEP', 'ZEUS',", "'OSG', 'OSIS', 'OSK', 'OXM', 'OXY', 'OZRK', 'PACW', 'PAY', 'PAYX', 'PB', 'PBCT', 'PBH', 'PBI',", "'HMA', 'HME', 'HMN', 'HMSY', 'HNI', 'HNT', 'HNZ', 'HOG', 'HOLX', 'HOMB', 'HON', 'HOS', 'HOT',", "'HIG', 'JWN', 'FRX', 'MNST', 'FFIV', 'NVDA', 'KIM', 'KEY', 'RSG', 'MKC', 'BCR', 'BSX', 'KLAC',", "'RBN', 'RCII', 'RDC', 'RE', 'RECN', 'REG', 'REGN', 'RF', 'RFMD', 'RGA', 'RGLD', 'RGR', 'RGS',", "'BCR', 'BDC', 'BDX', 'BEAM', 'BEAV', 'BELFB', 'BEN', 'BFS', 'BGC', 'BGFV', 'BGG', 'BGS', 'BH',", "'AVA', 'AVAV', 'AVB', 'AVD', 'AVID', 'AVP', 'AVT', 'AVY', 'AWR', 'AXE', 'AXP', 'AYI', 'AZO',", "'MKC', 'MKSI', 'MLHR', 'MLI', 'MLM', 'MMC', 'MMM', 'MMS', 'MMSI', 'MNRO', 'MNST', 'MNTA', 'MO',", "'HAL', 'WAG', 'PX', 'PCLN', 'EXC', 'D', 'EOG', 'YUM', 'NEE', 'TWC', 'PSX', 'COV', 'ADP',", "'PLCM', 'PLD', 'PLFE', 'PLL', 'PLT', 'PLXS', 'PM', 'PMC', 'PMTC', 'PMTI', 'PNC', 'PNFP', 'PNK',", "'PLT', 'PLXS', 'PM', 'PMC', 'PMTC', 'PMTI', 'PNC', 'PNFP', 'PNK', 'PNM', 'PNR', 'PNRA', 'PNW',", "'NDAQ', 'X', 'SEE', 'TER', 'THC', 'GME', 'GNW', 'FHN', 'ETFC', 'AMD', 'R', 'FII', 'RRD',", "'PJC', 'PKE', 'PKG', 'PKI', 'PKY', 'PL', 'PLCE', 'PLCM', 'PLD', 'PLFE', 'PLL', 'PLT', 'PLXS',", "'FCF', 'FCFS', 'FCN', 'FCS', 'FCX', 'FDO', 'FDS', 'FDX', 'FE', 'FEIC', 'FELE', 'FFBC', 'FFIN',", "'XYL', 'Y', 'YHOO', 'YUM', 'ZBRA', 'ZEP', 'ZEUS', 'ZLC', 'ZMH', 'ZQK', 'ZUMZ'] TICKERS =", "'OI', 'AIZ', 'NFLX', 'DF', 'FLIR', 'GT', 'LM', 'APOL', 'PDCO', 'JDSU', 'ANF', 'PBI', 'NDAQ',", "'GILD', 'GIS', 'GLW', 'GMCR', 'GME', 'GMT', 'GNCMA', 'GNTX', 'GNW', 'GOOG', 'GPC', 'GPI', 'GPN',", "'BAX', 'TJX', 'CELG', 'DTV', 'DE', 'DHR', 'TXN', 'HAL', 'WAG', 'PX', 'PCLN', 'EXC', 'D',", "'KIM', 'KIRK', 'KLAC', 'KLIC', 'KMB', 'KMI', 'KMPR', 'KMT', 'KMX', 'KND', 'KNX', 'KO', 'KOP',", "'DPS', 'CLX', 'RL', 'WYNN', 'BEAM', 'CNP', 'NE', 'JNPR', 'LH', 'EQT', 'CA', 'DVA', 'XLNX',", "'NJR', 'NKE', 'NNN', 'NOC', 'NOV', 'NP', 'NPBC', 'NPK', 'NPO', 'NRG', 'NSC', 'NSIT', 'NSP',", "'CMS', 'VMC', 'MU', 'BMC', 'NYX', 'CMA', 'BTU', 'WIN', 'JOY', 'HBAN', 'TSO', 'HRS', 'LRCX',", "'KMX', 'KND', 'KNX', 'KO', 'KOP', 'KOPN', 'KR', 'KRA', 'KRC', 'KRG', 'KS', 'KSS', 'KSU',", "'INFA', 'INGR', 'ININ', 'INT', 'INTC', 'INTU', 'IO', 'IP', 'IPAR', 'IPCC', 'IPCM', 'IPG', 'IPHS',", "'OKE', 'OLN', 'OMC', 'OMCL', 'OMG', 'OMI', 'OMX', 'ONB', 'ONE', 'OPEN', 'OPLK', 'OPNT', 'ORB',", "'CCRN', 'CDI', 'CDNS', 'CDR', 'CEB', 'CEC', 'CECO', 'CELG', 'CELL', 'CENX', 'CERN', 'CEVA', 'CF',", "'OZRK', 'PACW', 'PAY', 'PAYX', 'PB', 'PBCT', 'PBH', 'PBI', 'PBY', 'PCAR', 'PCG', 'PCH', 'PCL',", "'FDS', 'FDX', 'FE', 'FEIC', 'FELE', 'FFBC', 'FFIN', 'FFIV', 'FHN', 'FICO', 'FII', 'FINL', 'FIRE',", "'DVA', 'DVN', 'DW', 'DWA', 'DY', 'EA', 'EAT', 'EBAY', 'EBIX', 'EBS', 'ECL', 'ECPG', 'ED',", "'SCSC', 'SCSS', 'SE', 'SEE', 'SEIC', 'SENEA', 'SF', 'SFD', 'SFG', 'SFNC', 'SFY', 'SGMS', 'SGY',", "'UPS', 'URBN', 'URI', 'URS', 'USB', 'USMO', 'USTR', 'UTEK', 'UTHR', 'UTI', 'UTIW', 'UTX', 'UVV',", "'VMC', 'MU', 'BMC', 'NYX', 'CMA', 'BTU', 'WIN', 'JOY', 'HBAN', 'TSO', 'HRS', 'LRCX', 'PNW',", "'HAS', 'HAYN', 'HBAN', 'HBHC', 'HBI', 'HCBK', 'HCC', 'HCN', 'HCP', 'HCSG', 'HD', 'HE', 'HES',", "'FUL', 'FULT', 'FWRD', 'GAS', 'GB', 'GBCI', 'GCI', 'GCO', 'GD', 'GDI', 'GE', 'GEF', 'GEO',", "'CMP', 'CMS', 'CMTL', 'CNC', 'CNK', 'CNL', 'CNMD', 'CNP', 'CNQR', 'CNW', 'CNX', 'COCO', 'COF',", "'NEWP', 'NFG', 'NFLX', 'NFP', 'NFX', 'NI', 'NILE', 'NJR', 'NKE', 'NNN', 'NOC', 'NOV', 'NP',", "'NYX', 'O', 'OCR', 'ODFL', 'ODP', 'OFC', 'OGE', 'OHI', 'OI', 'OII', 'OIS', 'OKE', 'OLN',", "'ACE', 'PRU', 'VIAB', 'CTL', 'LMT', 'FDX', 'PCP', 'BBT', 'MS', 'BLK', 'DVN', 'AFL', 'ALXN',", "'BCOR', 'BCPC', 'BCR', 'BDC', 'BDX', 'BEAM', 'BEAV', 'BELFB', 'BEN', 'BFS', 'BGC', 'BGFV', 'BGG',", "'CBRL', 'CBS', 'CBSH', 'CBST', 'CBT', 'CBU', 'CCC', 'CCE', 'CCI', 'CCL', 'CCMP', 'CCRN', 'CDI',", "'UNTD', 'UPS', 'URBN', 'URI', 'URS', 'USB', 'USMO', 'USTR', 'UTEK', 'UTHR', 'UTI', 'UTIW', 'UTX',", "'APOL', 'ARB', 'ARE', 'AREX', 'ARG', 'ARO', 'ARQL', 'ARRS', 'ARW', 'ASBC', 'ASEI', 'ASGN', 'ASH',", "'RVBD', 'RYL', 'RYN', 'S', 'SAFM', 'SAFT', 'SAH', 'SAI', 'SAM', 'SBRA', 'SBUX', 'SCG', 'SCHL',", "'NANO', 'NATI', 'NAVG', 'NBL', 'NBR', 'NBTB', 'NCI', 'NCIT', 'NCR', 'NCS', 'NDAQ', 'NDSN', 'NE',", "'SRE', 'MSI', 'ROST', 'DELL', 'CTXS', 'FITB', 'RAI', 'PCAR', 'WY', 'SCHW', 'VFC', 'WM', 'CF',", "'AMGN', 'CVS', 'MMM', 'EMC', 'UNH', 'BMY', 'UNP', 'CAT', 'EBAY', 'AXP', 'UPS', 'GS', 'ESRX',", "'FOSL', 'FRED', 'FRT', 'FRX', 'FSLR', 'FSP', 'FST', 'FTI', 'FTR', 'FUL', 'FULT', 'FWRD', 'GAS',", "'EGN', 'EGP', 'EHTH', 'EIG', 'EIX', 'EL', 'ELY', 'EMC', 'EME', 'EMN', 'EMR', 'ENDP', 'ENR',", "'NOV', 'KMB', 'APA', 'HPQ', 'PNC', 'COF', 'BAX', 'TJX', 'CELG', 'DTV', 'DE', 'DHR', 'TXN',", "'BTH', 'BTU', 'BWA', 'BWLD', 'BWS', 'BXP', 'BXS', 'BYD', 'BYI', 'C', 'CA', 'CAB', 'CACI',", "'UFPI', 'UFS', 'UGI', 'UHS', 'UHT', 'UIL', 'UMBF', 'UMPQ', 'UNF', 'UNFI', 'UNH', 'UNM', 'UNP',", "'AKAM', 'LNC', 'VAR', 'BLL', 'FLS', 'LUV', 'KMX', 'FDO', 'WHR', 'MCHP', 'SCG', 'DNR', 'CFN',", "'IBM', 'IBOC', 'ICE', 'ICON', 'ICUI', 'IDA', 'IDTI', 'IDXX', 'IEX', 'IFF', 'IFSIA', 'IGT', 'IGTE',", "'LINC', 'LKQ', 'LL', 'LLL', 'LLTC', 'LLY', 'LM', 'LMNX', 'LMOS', 'LMT', 'LNC', 'LNCE', 'LNN',", "'CDR', 'CEB', 'CEC', 'CECO', 'CELG', 'CELL', 'CENX', 'CERN', 'CEVA', 'CF', 'CFN', 'CFR', 'CGNX',", "'SJM', 'SKS', 'SKT', 'SKX', 'SKYW', 'SLAB', 'SLB', 'SLG', 'SLGN', 'SLH', 'SLM', 'SLXP', 'SM',", "'GFF', 'GGG', 'GHL', 'GIFI', 'GILD', 'GIS', 'GLW', 'GMCR', 'GME', 'GMT', 'GNCMA', 'GNTX', 'GNW',", "'UNFI', 'UNH', 'UNM', 'UNP', 'UNS', 'UNT', 'UNTD', 'UPS', 'URBN', 'URI', 'URS', 'USB', 'USMO',", "'DIS', 'MCD', 'AMZN', 'HD', 'KFT', 'V', 'OXY', 'COP', 'MO', 'UTX', 'USB', 'AMGN', 'CVS',", "'STLD', 'STMP', 'STR', 'STRA', 'STRI', 'STT', 'STX', 'STZ', 'SUN', 'SUP', 'SUPX', 'SUSQ', 'SVU',", "'JCOM', 'JCP', 'JDAS', 'JDSU', 'JEC', 'JEF', 'JJSF', 'JKHY', 'JLL', 'JNJ', 'JNPR', 'JNS', 'JOSB',", "'NEE', 'TWC', 'PSX', 'COV', 'ADP', 'AMT', 'AGN', 'NEM', 'BK', 'TRV', 'TYC', 'GIS', 'ITW',", "'PCS', 'MOLX', 'TE', 'TRIP', 'DNB', 'LEG', 'JBL', 'IGT', 'JCP', 'CVC', 'ATI', 'SAI', 'PKI',", "'KBH', 'KBR', 'KDN', 'KELYA', 'KEX', 'KEY', 'KFY', 'KIM', 'KIRK', 'KLAC', 'KLIC', 'KMB', 'KMI',", "'LPNT', 'LPS', 'LPSN', 'LPX', 'LQDT', 'LRCX', 'LRY', 'LSI', 'LSTR', 'LTC', 'LTD', 'LTM', 'LUFK',", "'TUP', 'TW', 'TWC', 'TWGP', 'TWTC', 'TWX', 'TXI', 'TXN', 'TXRH', 'TXT', 'TYC', 'TYL', 'TYPE',", "'HES', 'HF', 'HFC', 'HGR', 'HHS', 'HI', 'HIBB', 'HIG', 'HII', 'HITK', 'HITT', 'HIW', 'HLIT',", "'ETFC', 'ETH', 'ETN', 'ETR', 'EV', 'EW', 'EWBC', 'EXAR', 'EXC', 'EXH', 'EXLS', 'EXP', 'EXPD',", "'RT', 'RTEC', 'RTI', 'RTN', 'RUE', 'RUTH', 'RVBD', 'RYL', 'RYN', 'S', 'SAFM', 'SAFT', 'SAH',", "'HES', 'ETN', 'MOS', 'IP', 'BDX', 'MHP', 'STI', 'LO', 'M', 'MJN', 'EIX', 'EL', 'DISCA',", "'LSTR', 'LTC', 'LTD', 'LTM', 'LUFK', 'LUK', 'LUV', 'LXK', 'LXP', 'LXU', 'LYV', 'LZB', 'M',", "'APH', 'GPC', 'CHRW', 'SJM', 'AA', 'COG', 'FLR', 'DPS', 'CLX', 'RL', 'WYNN', 'BEAM', 'CNP',", "'NFLX', 'DF', 'FLIR', 'GT', 'LM', 'APOL', 'PDCO', 'JDSU', 'ANF', 'PBI', 'NDAQ', 'X', 'SEE',", "'MHO', 'MIG', 'MINI', 'MJN', 'MKC', 'MKSI', 'MLHR', 'MLI', 'MLM', 'MMC', 'MMM', 'MMS', 'MMSI',", "'BCPC', 'BCR', 'BDC', 'BDX', 'BEAM', 'BEAV', 'BELFB', 'BEN', 'BFS', 'BGC', 'BGFV', 'BGG', 'BGS',", "'HAS', 'POM', 'PBCT', 'NFX', 'RDC', 'SNA', 'GCI', 'URBN', 'NBR', 'TEG', 'EA', 'HRL', 'SWY',", "'HSII', 'HSNI', 'HSP', 'HST', 'HSTM', 'HSY', 'HTLD', 'HTSI', 'HUBG', 'HUM', 'HVT', 'HW', 'HWAY',", "'ELY', 'EMC', 'EME', 'EMN', 'EMR', 'ENDP', 'ENR', 'ENS', 'ENSG', 'ENTR', 'ENZ', 'EOG', 'EPAY',", "'APC', 'NKE', 'DOW', 'LOW', 'NOV', 'KMB', 'APA', 'HPQ', 'PNC', 'COF', 'BAX', 'TJX', 'CELG',", "'TXI', 'TXN', 'TXRH', 'TXT', 'TYC', 'TYL', 'TYPE', 'UA', 'UBA', 'UBSI', 'UCBI', 'UDR', 'UEIC',", "'STC', 'STE', 'STI', 'STJ', 'STL', 'STLD', 'STMP', 'STR', 'STRA', 'STRI', 'STT', 'STX', 'STZ',", "'VMI', 'VNO', 'VOXX', 'VPFG', 'VPHM', 'VRSN', 'VRTS', 'VRTU', 'VRTX', 'VSAT', 'VSH', 'VSI', 'VTR',", "'TR', 'TRAK', 'TRIP', 'TRLG', 'TRMB', 'TRMK', 'TRN', 'TROW', 'TRST', 'TRV', 'TSCO', 'TSN', 'TSO',", "'CSCO', 'SLB', 'C', 'CMCSA', 'BAC', 'DIS', 'MCD', 'AMZN', 'HD', 'KFT', 'V', 'OXY', 'COP',", "'DOW', 'DPS', 'DRC', 'DRE', 'DRH', 'DRI', 'DRIV', 'DRQ', 'DSPG', 'DTE', 'DTSI', 'DTV', 'DUK',", "'ETR', 'NUE', 'SWN', 'MAT', 'CBE', 'NU', 'AMP', 'NTAP', 'ZMH', 'LTD', 'ADI', 'PGR', 'HST',", "'CSTR', 'CSX', 'CTAS', 'CTL', 'CTS', 'CTSH', 'CTXS', 'CUB', 'CUZ', 'CVBF', 'CVC', 'CVD', 'CVG',", "'PXP', 'PZZA', 'QCOM', 'QCOR', 'QEP', 'QLGC', 'QNST', 'QSFT', 'QSII', 'R', 'RAH', 'RAI', 'RAX',", "'PSX', 'PTEN', 'PULS', 'PVA', 'PVH', 'PVTB', 'PWR', 'PX', 'PXD', 'PXP', 'PZZA', 'QCOM', 'QCOR',", "'ATW', 'AVA', 'AVAV', 'AVB', 'AVD', 'AVID', 'AVP', 'AVT', 'AVY', 'AWR', 'AXE', 'AXP', 'AYI',", "'MSI', 'MSM', 'MSTR', 'MTB', 'MTD', 'MTH', 'MTRN', 'MTRX', 'MTSC', 'MTX', 'MU', 'MUR', 'MW',", "'GPC', 'CHRW', 'SJM', 'AA', 'COG', 'FLR', 'DPS', 'CLX', 'RL', 'WYNN', 'BEAM', 'CNP', 'NE',", "'ITG', 'ITRI', 'ITT', 'ITW', 'IVAC', 'IVC', 'IVZ', 'JACK', 'JAH', 'JAKK', 'JBHT', 'JBL', 'JBLU',", "'VSH', 'VSI', 'VTR', 'VVC', 'VZ', 'WAB', 'WABC', 'WAFD', 'WAG', 'WAT', 'WBS', 'WBSN', 'WCG',", "'DE', 'DHR', 'TXN', 'HAL', 'WAG', 'PX', 'PCLN', 'EXC', 'D', 'EOG', 'YUM', 'NEE', 'TWC',", "'DHI', 'DHR', 'DHX', 'DIN', 'DIOD', 'DIS', 'DISCA', 'DKS', 'DLTR', 'DLX', 'DM', 'DMND', 'DNB',", "'CHK', 'ABC', 'ICE', 'HOG', 'XRX', 'APH', 'GPC', 'CHRW', 'SJM', 'AA', 'COG', 'FLR', 'DPS',", "'MCRI', 'MCRL', 'MCRS', 'MCS', 'MCY', 'MD', 'MDC', 'MDCO', 'MDP', 'MDRX', 'MDSO', 'MDT', 'MDU',", "'RBC', 'RBCN', 'RBN', 'RCII', 'RDC', 'RE', 'RECN', 'REG', 'REGN', 'RF', 'RFMD', 'RGA', 'RGLD',", "'FCX', 'FDO', 'FDS', 'FDX', 'FE', 'FEIC', 'FELE', 'FFBC', 'FFIN', 'FFIV', 'FHN', 'FICO', 'FII',", "'RUE', 'RUTH', 'RVBD', 'RYL', 'RYN', 'S', 'SAFM', 'SAFT', 'SAH', 'SAI', 'SAM', 'SBRA', 'SBUX',", "'LTC', 'LTD', 'LTM', 'LUFK', 'LUK', 'LUV', 'LXK', 'LXP', 'LXU', 'LYV', 'LZB', 'M', 'MA',", "'JOSB', 'JOY', 'JPM', 'JWN', 'K', 'KALU', 'KAMN', 'KBH', 'KBR', 'KDN', 'KELYA', 'KEX', 'KEY',", "'BCO', 'BCOR', 'BCPC', 'BCR', 'BDC', 'BDX', 'BEAM', 'BEAV', 'BELFB', 'BEN', 'BFS', 'BGC', 'BGFV',", "'ICE', 'HOG', 'XRX', 'APH', 'GPC', 'CHRW', 'SJM', 'AA', 'COG', 'FLR', 'DPS', 'CLX', 'RL',", "'PH', 'GWW', 'EW', 'ETR', 'NUE', 'SWN', 'MAT', 'CBE', 'NU', 'AMP', 'NTAP', 'ZMH', 'LTD',", "'NNN', 'NOC', 'NOV', 'NP', 'NPBC', 'NPK', 'NPO', 'NRG', 'NSC', 'NSIT', 'NSP', 'NSR', 'NTAP',", "'PSEC', 'PSEM', 'PSS', 'PSSI', 'PSX', 'PTEN', 'PULS', 'PVA', 'PVH', 'PVTB', 'PWR', 'PX', 'PXD',", "'USMO', 'USTR', 'UTEK', 'UTHR', 'UTI', 'UTIW', 'UTX', 'UVV', 'V', 'VAL', 'VAR', 'VCI', 'VCLK',", "'MET', 'BIIB', 'EMR', 'APC', 'NKE', 'DOW', 'LOW', 'NOV', 'KMB', 'APA', 'HPQ', 'PNC', 'COF',", "'NWSA', 'NX', 'NYB', 'NYT', 'NYX', 'O', 'OCR', 'ODFL', 'ODP', 'OFC', 'OGE', 'OHI', 'OI',", "'ORN', 'OSG', 'OSIS', 'OSK', 'OXM', 'OXY', 'OZRK', 'PACW', 'PAY', 'PAYX', 'PB', 'PBCT', 'PBH',", "'WOR', 'WPO', 'WPP', 'WPX', 'WR', 'WRB', 'WRC', 'WRI', 'WRLD', 'WSM', 'WSO', 'WST', 'WTFC',", "'APA', 'HPQ', 'PNC', 'COF', 'BAX', 'TJX', 'CELG', 'DTV', 'DE', 'DHR', 'TXN', 'HAL', 'WAG',", "'BRCM', 'BRKL', 'BRKS', 'BRLI', 'BRO', 'BRS', 'BSX', 'BTH', 'BTU', 'BWA', 'BWLD', 'BWS', 'BXP',", "'SWKS', 'SWM', 'SWN', 'SWS', 'SWX', 'SWY', 'SXC', 'SXI', 'SXT', 'SYK', 'SYKE', 'SYMC', 'SYMM',", "'BTU', 'WIN', 'JOY', 'HBAN', 'TSO', 'HRS', 'LRCX', 'PNW', 'DHI', 'ARG', 'LEN', 'QEP', 'EFX',", "'NU', 'AMP', 'NTAP', 'ZMH', 'LTD', 'ADI', 'PGR', 'HST', 'FAST', 'MTB', 'HOT', 'RRC', 'HUM',", "'IPCC', 'IPCM', 'IPG', 'IPHS', 'IPI', 'IR', 'IRBT', 'IRC', 'IRF', 'IRM', 'ISCA', 'ISIL', 'ISRG',", "'NSR', 'NTAP', 'NTCT', 'NTGR', 'NTLS', 'NTRI', 'NTRS', 'NU', 'NUE', 'NUVA', 'NVDA', 'NVE', 'NVR',", "'UMPQ', 'UNF', 'UNFI', 'UNH', 'UNM', 'UNP', 'UNS', 'UNT', 'UNTD', 'UPS', 'URBN', 'URI', 'URS',", "'BIIB', 'BJRI', 'BK', 'BKE', 'BKH', 'BKI', 'BKMU', 'BKS', 'BLK', 'BLKB', 'BLL', 'BMC', 'BMI',", "'WU', 'WWD', 'WWW', 'WY', 'WYN', 'WYNN', 'X', 'XEC', 'XEL', 'XL', 'XLNX', 'XLS', 'XOM',", "'CHS', 'CI', 'CIEN', 'CINF', 'CIR', 'CKH', 'CKP', 'CL', 'CLC', 'CLD', 'CLF', 'CLGX', 'CLH',", "'GCI', 'URBN', 'NBR', 'TEG', 'EA', 'HRL', 'SWY', 'LSI', 'TSS', 'ZION', 'HCBK', 'AIV', 'RHI',", "'WSM', 'WSO', 'WST', 'WTFC', 'WTR', 'WTS', 'WU', 'WWD', 'WWW', 'WY', 'WYN', 'WYNN', 'X',", "'BGC', 'BGFV', 'BGG', 'BGS', 'BH', 'BHE', 'BHI', 'BID', 'BIG', 'BIIB', 'BJRI', 'BK', 'BKE',", "'TER', 'TEX', 'TFX', 'TG', 'TGI', 'TGT', 'THC', 'THG', 'THO', 'THOR', 'THS', 'TIBX', 'TIE',", "'WPO', 'WPP', 'WPX', 'WR', 'WRB', 'WRC', 'WRI', 'WRLD', 'WSM', 'WSO', 'WST', 'WTFC', 'WTR',", "'AAP', 'AAPL', 'ABAX', 'ABC', 'ABFS', 'ABM', 'ABT', 'ACAT', 'ACC', 'ACE', 'ACI', 'ACIW', 'ACM',", "'AIT', 'AIV', 'AIZ', 'AJG', 'AKAM', 'AKR', 'AKRX', 'AKS', 'ALB', 'ALE', 'ALEX', 'ALGN', 'ALGT',", "'FIS', 'FISV', 'FITB', 'FIX', 'FL', 'FLIR', 'FLO', 'FLR', 'FLS', 'FMBI', 'FMC', 'FMER', 'FNB',", "'OMG', 'OMI', 'OMX', 'ONB', 'ONE', 'OPEN', 'OPLK', 'OPNT', 'ORB', 'ORCL', 'ORI', 'ORIT', 'ORLY',", "'KMI', 'STT', 'PSA', 'BHI', 'ISRG', 'GLW', 'CRM', 'ALL', 'SE', 'HCP', 'RTN', 'WLP', 'CCI',", "'HBAN', 'TSO', 'HRS', 'LRCX', 'PNW', 'DHI', 'ARG', 'LEN', 'QEP', 'EFX', 'CVH', 'CLF', 'CBG',", "'CVH', 'CLF', 'CBG', 'CINF', 'NWL', 'HSP', 'EXPE', 'XRAY', 'UNM', 'MAS', 'MWV', 'SNI', 'PWR',", "'CNC', 'CNK', 'CNL', 'CNMD', 'CNP', 'CNQR', 'CNW', 'CNX', 'COCO', 'COF', 'COG', 'COH', 'COHU',", "'WOOF', 'WOR', 'WPO', 'WPP', 'WPX', 'WR', 'WRB', 'WRC', 'WRI', 'WRLD', 'WSM', 'WSO', 'WST',", "'WYN', 'WYNN', 'X', 'XEC', 'XEL', 'XL', 'XLNX', 'XLS', 'XOM', 'XOXO', 'XRAY', 'XRX', 'XYL',", "'ENZ', 'EOG', 'EPAY', 'EPIQ', 'EPR', 'EQIX', 'EQR', 'EQT', 'EQY', 'ESE', 'ESI', 'ESIO', 'ESL',", "'ASBC', 'ASEI', 'ASGN', 'ASH', 'ASNA', 'ASTE', 'ATI', 'ATK', 'ATMI', 'ATML', 'ATNI', 'ATO', 'ATR',", "'SNH', 'SNI', 'SNPS', 'SNV', 'SNX', 'SO', 'SON', 'SONC', 'SPAR', 'SPF', 'SPG', 'SPLS', 'SPN',", "'UNS', 'UNT', 'UNTD', 'UPS', 'URBN', 'URI', 'URS', 'USB', 'USMO', 'USTR', 'UTEK', 'UTHR', 'UTI',", "'MYL', 'NAFC', 'NANO', 'NATI', 'NAVG', 'NBL', 'NBR', 'NBTB', 'NCI', 'NCIT', 'NCR', 'NCS', 'NDAQ',", "'UBA', 'UBSI', 'UCBI', 'UDR', 'UEIC', 'UFCS', 'UFPI', 'UFS', 'UGI', 'UHS', 'UHT', 'UIL', 'UMBF',", "'MJN', 'EIX', 'EL', 'DISCA', 'HCN', 'BBBY', 'TEL', 'SRE', 'MSI', 'ROST', 'DELL', 'CTXS', 'FITB',", "'PNM', 'PNR', 'PNRA', 'PNW', 'PNY', 'POL', 'POM', 'POOL', 'POST', 'POWI', 'POWL', 'PPG', 'PPS',", "'NVE', 'NVR', 'NVTL', 'NWBI', 'NWE', 'NWL', 'NWN', 'NWSA', 'NX', 'NYB', 'NYT', 'NYX', 'O',", "'GLW', 'CRM', 'ALL', 'SE', 'HCP', 'RTN', 'WLP', 'CCI', 'JCI', 'MPC', 'MMC', 'FE', 'VTR',", "'CRL', 'CRM', 'CROX', 'CRR', 'CRS', 'CRUS', 'CRVL', 'CRY', 'CSC', 'CSCO', 'CSGS', 'CSH', 'CSL',", "'ED', 'EE', 'EFX', 'EGL', 'EGN', 'EGP', 'EHTH', 'EIG', 'EIX', 'EL', 'ELY', 'EMC', 'EME',", "'BXP', 'YHOO', 'S', 'NBL', 'NOC', 'CMI', 'CCL', 'PEG', 'INTU', 'PLD', 'SYK', 'TROW', 'COH',", "'NYX', 'CMA', 'BTU', 'WIN', 'JOY', 'HBAN', 'TSO', 'HRS', 'LRCX', 'PNW', 'DHI', 'ARG', 'LEN',", "'JJSF', 'JKHY', 'JLL', 'JNJ', 'JNPR', 'JNS', 'JOSB', 'JOY', 'JPM', 'JWN', 'K', 'KALU', 'KAMN',", "'AEE', 'BWA', 'SPLS', 'FIS', 'SRCL', 'EXPD', 'COL', 'VRSN', 'FMC', 'ADSK', 'PFG', 'WYN', 'SLM',", "'PAYX', 'PB', 'PBCT', 'PBH', 'PBI', 'PBY', 'PCAR', 'PCG', 'PCH', 'PCL', 'PCLN', 'PCP', 'PCTI',", "'ESI', 'ESIO', 'ESL', 'ESRX', 'ESS', 'ESV', 'ETFC', 'ETH', 'ETN', 'ETR', 'EV', 'EW', 'EWBC',", "'LL', 'LLL', 'LLTC', 'LLY', 'LM', 'LMNX', 'LMOS', 'LMT', 'LNC', 'LNCE', 'LNN', 'LNT', 'LO',", "'DRE', 'DRH', 'DRI', 'DRIV', 'DRQ', 'DSPG', 'DTE', 'DTSI', 'DTV', 'DUK', 'DV', 'DVA', 'DVN',", "'MNST', 'MNTA', 'MO', 'MOH', 'MOLX', 'MON', 'MOS', 'MOV', 'MPC', 'MPW', 'MPWR', 'MRCY', 'MRK',", "'MTH', 'MTRN', 'MTRX', 'MTSC', 'MTX', 'MU', 'MUR', 'MW', 'MWIV', 'MWV', 'MWW', 'MYE', 'MYL',", "'NTLS', 'NTRI', 'NTRS', 'NU', 'NUE', 'NUVA', 'NVDA', 'NVE', 'NVR', 'NVTL', 'NWBI', 'NWE', 'NWL',", "'SJM', 'AA', 'COG', 'FLR', 'DPS', 'CLX', 'RL', 'WYNN', 'BEAM', 'CNP', 'NE', 'JNPR', 'LH',", "'KS', 'KSS', 'KSU', 'KSWS', 'KWK', 'KWR', 'L', 'LAD', 'LAMR', 'LANC', 'LAWS', 'LDL', 'LDR',", "'CVX', 'CW', 'CWTR', 'CXW', 'CY', 'CYBX', 'CYH', 'CYMI', 'CYN', 'CYT', 'D', 'DAKT', 'DAR',", "'BHI', 'ISRG', 'GLW', 'CRM', 'ALL', 'SE', 'HCP', 'RTN', 'WLP', 'CCI', 'JCI', 'MPC', 'MMC',", "'BTU', 'BWA', 'BWLD', 'BWS', 'BXP', 'BXS', 'BYD', 'BYI', 'C', 'CA', 'CAB', 'CACI', 'CAG',", "'RTN', 'RUE', 'RUTH', 'RVBD', 'RYL', 'RYN', 'S', 'SAFM', 'SAFT', 'SAH', 'SAI', 'SAM', 'SBRA',", "'HD', 'KFT', 'V', 'OXY', 'COP', 'MO', 'UTX', 'USB', 'AMGN', 'CVS', 'MMM', 'EMC', 'UNH',", "'HRS', 'LRCX', 'PNW', 'DHI', 'ARG', 'LEN', 'QEP', 'EFX', 'CVH', 'CLF', 'CBG', 'CINF', 'NWL',", "'ISIL', 'ISRG', 'IT', 'ITG', 'ITRI', 'ITT', 'ITW', 'IVAC', 'IVC', 'IVZ', 'JACK', 'JAH', 'JAKK',", "'PKI', 'PKY', 'PL', 'PLCE', 'PLCM', 'PLD', 'PLFE', 'PLL', 'PLT', 'PLXS', 'PM', 'PMC', 'PMTC',", "'CFR', 'CGNX', 'CGX', 'CHCO', 'CHD', 'CHE', 'CHG', 'CHK', 'CHRW', 'CHS', 'CI', 'CIEN', 'CINF',", "'PEI', 'PEP', 'PERY', 'PES', 'PETM', 'PETS', 'PFE', 'PFG', 'PFS', 'PG', 'PGR', 'PH', 'PHM',", "'INTU', 'IO', 'IP', 'IPAR', 'IPCC', 'IPCM', 'IPG', 'IPHS', 'IPI', 'IR', 'IRBT', 'IRC', 'IRF',", "'ED', 'PPG', 'CME', 'LYB', 'APD', 'VLO', 'EQR', 'BEN', 'ECL', 'PPL', 'AON', 'WFM', 'BXP',", "'EGL', 'EGN', 'EGP', 'EHTH', 'EIG', 'EIX', 'EL', 'ELY', 'EMC', 'EME', 'EMN', 'EMR', 'ENDP',", "'IFF', 'IFSIA', 'IGT', 'IGTE', 'IILG', 'IIVI', 'IM', 'IN', 'INDB', 'INFA', 'INGR', 'ININ', 'INT',", "'ADSK', 'PFG', 'WYN', 'SLM', 'PLL', 'TIF', 'TXT', 'XL', 'LLTC', 'WAT', 'NI', 'DRI', 'PCL',", "'AIR', 'AIRM', 'AIT', 'AIV', 'AIZ', 'AJG', 'AKAM', 'AKR', 'AKRX', 'AKS', 'ALB', 'ALE', 'ALEX',", "'CAB', 'CACI', 'CAG', 'CAH', 'CAKE', 'CALM', 'CAM', 'CAS', 'CASC', 'CASY', 'CAT', 'CATM', 'CATO',", "'EXH', 'EXLS', 'EXP', 'EXPD', 'EXPE', 'EXPO', 'EXR', 'EZPW', 'F', 'FAF', 'FARO', 'FAST', 'FBHS',", "'APD', 'APEI', 'APH', 'APOG', 'APOL', 'ARB', 'ARE', 'AREX', 'ARG', 'ARO', 'ARQL', 'ARRS', 'ARW',", "'WST', 'WTFC', 'WTR', 'WTS', 'WU', 'WWD', 'WWW', 'WY', 'WYN', 'WYNN', 'X', 'XEC', 'XEL',", "'KAMN', 'KBH', 'KBR', 'KDN', 'KELYA', 'KEX', 'KEY', 'KFY', 'KIM', 'KIRK', 'KLAC', 'KLIC', 'KMB',", "'CTSH', 'CTXS', 'CUB', 'CUZ', 'CVBF', 'CVC', 'CVD', 'CVG', 'CVGW', 'CVH', 'CVLT', 'CVS', 'CVX',", "'SLXP', 'SM', 'SMA', 'SMCI', 'SMG', 'SMP', 'SMRT', 'SMTC', 'SNA', 'SNCR', 'SNDK', 'SNH', 'SNI',", "'TSS', 'TTC', 'TTEC', 'TTEK', 'TTI', 'TTMI', 'TTWO', 'TUES', 'TUP', 'TW', 'TWC', 'TWGP', 'TWTC',", "'ATML', 'ATNI', 'ATO', 'ATR', 'ATU', 'ATW', 'AVA', 'AVAV', 'AVB', 'AVD', 'AVID', 'AVP', 'AVT',", "'KSU', 'KSWS', 'KWK', 'KWR', 'L', 'LAD', 'LAMR', 'LANC', 'LAWS', 'LDL', 'LDR', 'LECO', 'LEG',", "'DGIT', 'DGX', 'DHI', 'DHR', 'DHX', 'DIN', 'DIOD', 'DIS', 'DISCA', 'DKS', 'DLTR', 'DLX', 'DM',", "'NCR', 'NCS', 'NDAQ', 'NDSN', 'NE', 'NEE', 'NEM', 'NEOG', 'NEU', 'NEWP', 'NFG', 'NFLX', 'NFP',", "'PB', 'PBCT', 'PBH', 'PBI', 'PBY', 'PCAR', 'PCG', 'PCH', 'PCL', 'PCLN', 'PCP', 'PCTI', 'PDCE',", "'ROG', 'ROK', 'ROL', 'ROP', 'ROSE', 'ROST', 'ROVI', 'RPM', 'RRC', 'RRD', 'RRGB', 'RS', 'RSG',", "'WAFD', 'WAG', 'WAT', 'WBS', 'WBSN', 'WCG', 'WCN', 'WDC', 'WDFC', 'WDR', 'WEC', 'WEN', 'WERN',", "'ZMH', 'LTD', 'ADI', 'PGR', 'HST', 'FAST', 'MTB', 'HOT', 'RRC', 'HUM', 'CERN', 'CAG', 'IVZ',", "'BCR', 'BSX', 'KLAC', 'AEE', 'BWA', 'SPLS', 'FIS', 'SRCL', 'EXPD', 'COL', 'VRSN', 'FMC', 'ADSK',", "'SVU', 'SWI', 'SWK', 'SWKS', 'SWM', 'SWN', 'SWS', 'SWX', 'SWY', 'SXC', 'SXI', 'SXT', 'SYK',", "'PHM', 'PII', 'PJC', 'PKE', 'PKG', 'PKI', 'PKY', 'PL', 'PLCE', 'PLCM', 'PLD', 'PLFE', 'PLL',", "'SIGM', 'SIVB', 'SJI', 'SJM', 'SKS', 'SKT', 'SKX', 'SKYW', 'SLAB', 'SLB', 'SLG', 'SLGN', 'SLH',", "'BWA', 'SPLS', 'FIS', 'SRCL', 'EXPD', 'COL', 'VRSN', 'FMC', 'ADSK', 'PFG', 'WYN', 'SLM', 'PLL',", "'WWW', 'WY', 'WYN', 'WYNN', 'X', 'XEC', 'XEL', 'XL', 'XLNX', 'XLS', 'XOM', 'XOXO', 'XRAY',", "'AMT', 'AMZN', 'AN', 'ANDE', 'ANF', 'ANN', 'ANR', 'ANSS', 'AOL', 'AON', 'AOS', 'APA', 'APC',", "'STR', 'STRA', 'STRI', 'STT', 'STX', 'STZ', 'SUN', 'SUP', 'SUPX', 'SUSQ', 'SVU', 'SWI', 'SWK',", "'ADS', 'ADSK', 'ADTN', 'ADVS', 'AEE', 'AEGN', 'AEIS', 'AEO', 'AEP', 'AES', 'AET', 'AF', 'AFAM',", "'SYNA', 'SYY', 'T', 'TAP', 'TBI', 'TCB', 'TCBI', 'TCO', 'TDC', 'TDS', 'TDW', 'TDY', 'TE',", "'KRA', 'KRC', 'KRG', 'KS', 'KSS', 'KSU', 'KSWS', 'KWK', 'KWR', 'L', 'LAD', 'LAMR', 'LANC',", "'IPHS', 'IPI', 'IR', 'IRBT', 'IRC', 'IRF', 'IRM', 'ISCA', 'ISIL', 'ISRG', 'IT', 'ITG', 'ITRI',", "'GPC', 'GPI', 'GPN', 'GPOR', 'GPS', 'GS', 'GSM', 'GT', 'GTAT', 'GTIV', 'GTY', 'GVA', 'GWW',", "'APOG', 'APOL', 'ARB', 'ARE', 'AREX', 'ARG', 'ARO', 'ARQL', 'ARRS', 'ARW', 'ASBC', 'ASEI', 'ASGN',", "'NI', 'NILE', 'NJR', 'NKE', 'NNN', 'NOC', 'NOV', 'NP', 'NPBC', 'NPK', 'NPO', 'NRG', 'NSC',", "'SPG', 'SPLS', 'SPN', 'SPPI', 'SPTN', 'SPW', 'SRCL', 'SRDX', 'SRE', 'SSD', 'SSI', 'SSP', 'SSS',", "'LH', 'EQT', 'CA', 'DVA', 'XLNX', 'EMN', 'SIAL', 'WEC', 'CCE', 'WDC', 'LIFE', 'MCO', 'HIG',", "'APC', 'APD', 'APEI', 'APH', 'APOG', 'APOL', 'ARB', 'ARE', 'AREX', 'ARG', 'ARO', 'ARQL', 'ARRS',", "'BGS', 'BH', 'BHE', 'BHI', 'BID', 'BIG', 'BIIB', 'BJRI', 'BK', 'BKE', 'BKH', 'BKI', 'BKMU',", "'TUES', 'TUP', 'TW', 'TWC', 'TWGP', 'TWTC', 'TWX', 'TXI', 'TXN', 'TXRH', 'TXT', 'TYC', 'TYL',", "'AVT', 'AVY', 'AWR', 'AXE', 'AXP', 'AYI', 'AZO', 'AZZ', 'B', 'BA', 'BABY', 'BAC', 'BAS',", "'KSS', 'FTI', 'RHT', 'WU', 'STX', 'DOV', 'ALTR', 'WPI', 'HSY', 'ROP', 'PAYX', 'GPS', 'SNDK',", "'CRI', 'CRK', 'CRL', 'CRM', 'CROX', 'CRR', 'CRS', 'CRUS', 'CRVL', 'CRY', 'CSC', 'CSCO', 'CSGS',", "'TDS', 'TDW', 'TDY', 'TE', 'TECD', 'TECH', 'TEG', 'TEL', 'TER', 'TEX', 'TFX', 'TG', 'TGI',", "'HBHC', 'HBI', 'HCBK', 'HCC', 'HCN', 'HCP', 'HCSG', 'HD', 'HE', 'HES', 'HF', 'HFC', 'HGR',", "'HD', 'HE', 'HES', 'HF', 'HFC', 'HGR', 'HHS', 'HI', 'HIBB', 'HIG', 'HII', 'HITK', 'HITT',", "'APOL', 'PDCO', 'JDSU', 'ANF', 'PBI', 'NDAQ', 'X', 'SEE', 'TER', 'THC', 'GME', 'GNW', 'FHN',", "'CLP', 'CLW', 'CLX', 'CMA', 'CMC', 'CMCSA', 'CME', 'CMG', 'CMI', 'CMN', 'CMP', 'CMS', 'CMTL',", "'KFY', 'KIM', 'KIRK', 'KLAC', 'KLIC', 'KMB', 'KMI', 'KMPR', 'KMT', 'KMX', 'KND', 'KNX', 'KO',", "'STJ', 'AVB', 'L', 'IR', 'PXD', 'KR', 'SWK', 'K', 'TDC', 'SHW', 'ESV', 'SYMC', 'PH',", "'URS', 'USB', 'USMO', 'USTR', 'UTEK', 'UTHR', 'UTI', 'UTIW', 'UTX', 'UVV', 'V', 'VAL', 'VAR',", "'MCO', 'MCRI', 'MCRL', 'MCRS', 'MCS', 'MCY', 'MD', 'MDC', 'MDCO', 'MDP', 'MDRX', 'MDSO', 'MDT',", "'SWN', 'MAT', 'CBE', 'NU', 'AMP', 'NTAP', 'ZMH', 'LTD', 'ADI', 'PGR', 'HST', 'FAST', 'MTB',", "'LAMR', 'LANC', 'LAWS', 'LDL', 'LDR', 'LECO', 'LEG', 'LEN', 'LFUS', 'LG', 'LH', 'LHCG', 'LHO',", "'CBT', 'CBU', 'CCC', 'CCE', 'CCI', 'CCL', 'CCMP', 'CCRN', 'CDI', 'CDNS', 'CDR', 'CEB', 'CEC',", "'HSH', 'HSIC', 'HSII', 'HSNI', 'HSP', 'HST', 'HSTM', 'HSY', 'HTLD', 'HTSI', 'HUBG', 'HUM', 'HVT',", "'BIG', 'AN', 'WPO', 'LXK', 'ANR', 'FSLR', 'DV', 'TIE'] ALL_TICKERS = ['A', 'AA', 'AAN',", "'MCRS', 'MCS', 'MCY', 'MD', 'MDC', 'MDCO', 'MDP', 'MDRX', 'MDSO', 'MDT', 'MDU', 'MEAS', 'MED',", "'OMC', 'OMCL', 'OMG', 'OMI', 'OMX', 'ONB', 'ONE', 'OPEN', 'OPLK', 'OPNT', 'ORB', 'ORCL', 'ORI',", "'WGL', 'WGO', 'WHR', 'WIBC', 'WIN', 'WIRE', 'WLP', 'WM', 'WMB', 'WMS', 'WMT', 'WOOF', 'WOR',", "'STI', 'LO', 'M', 'MJN', 'EIX', 'EL', 'DISCA', 'HCN', 'BBBY', 'TEL', 'SRE', 'MSI', 'ROST',", "'L', 'LAD', 'LAMR', 'LANC', 'LAWS', 'LDL', 'LDR', 'LECO', 'LEG', 'LEN', 'LFUS', 'LG', 'LH',", "'MA', 'MAA', 'MAC', 'MAN', 'MANH', 'MANT', 'MAR', 'MAS', 'MASI', 'MAT', 'MATW', 'MATX', 'MCD',", "'BKS', 'BLK', 'BLKB', 'BLL', 'BMC', 'BMI', 'BMR', 'BMS', 'BMY', 'BOBE', 'BOH', 'BPFH', 'BR',", "'FHN', 'FICO', 'FII', 'FINL', 'FIRE', 'FIS', 'FISV', 'FITB', 'FIX', 'FL', 'FLIR', 'FLO', 'FLR',", "'OXY', 'OZRK', 'PACW', 'PAY', 'PAYX', 'PB', 'PBCT', 'PBH', 'PBI', 'PBY', 'PCAR', 'PCG', 'PCH',", "'CBK', 'CBM', 'CBOE', 'CBR', 'CBRL', 'CBS', 'CBSH', 'CBST', 'CBT', 'CBU', 'CCC', 'CCE', 'CCI',", "'SWN', 'SWS', 'SWX', 'SWY', 'SXC', 'SXI', 'SXT', 'SYK', 'SYKE', 'SYMC', 'SYMM', 'SYNA', 'SYY',", "'SNDK', 'DTE', 'PRGO', 'RF', 'NTRS', 'DGX', 'CMG', 'FISV', 'ORLY', 'MUR', 'OKE', 'MYL', 'BF.B',", "'AF', 'AFAM', 'AFFX', 'AFG', 'AFL', 'AGCO', 'AGN', 'AGP', 'AGYS', 'AHL', 'AHS', 'AIG', 'AINV',", "'CHD', 'CHE', 'CHG', 'CHK', 'CHRW', 'CHS', 'CI', 'CIEN', 'CINF', 'CIR', 'CKH', 'CKP', 'CL',", "'PSA', 'PSB', 'PSEC', 'PSEM', 'PSS', 'PSSI', 'PSX', 'PTEN', 'PULS', 'PVA', 'PVH', 'PVTB', 'PWR',", "'WGO', 'WHR', 'WIBC', 'WIN', 'WIRE', 'WLP', 'WM', 'WMB', 'WMS', 'WMT', 'WOOF', 'WOR', 'WPO',", "'ALXN', 'GD', 'WMB', 'CBS', 'CSX', 'TMO', 'AEP', 'CTSH', 'MRO', 'DFS', 'NSC', 'MCK', 'CB',", "'CELG', 'DTV', 'DE', 'DHR', 'TXN', 'HAL', 'WAG', 'PX', 'PCLN', 'EXC', 'D', 'EOG', 'YUM',", "'PCG', 'PCH', 'PCL', 'PCLN', 'PCP', 'PCTI', 'PDCE', 'PDCO', 'PEET', 'PEG', 'PEI', 'PEP', 'PERY',", "'CMG', 'CMI', 'CMN', 'CMP', 'CMS', 'CMTL', 'CNC', 'CNK', 'CNL', 'CNMD', 'CNP', 'CNQR', 'CNW',", "'ALGT', 'ALK', 'ALL', 'ALOG', 'ALTR', 'ALXN', 'AM', 'AMAT', 'AMCX', 'AMD', 'AME', 'AMED', 'AMG',", "'NTRS', 'NU', 'NUE', 'NUVA', 'NVDA', 'NVE', 'NVR', 'NVTL', 'NWBI', 'NWE', 'NWL', 'NWN', 'NWSA',", "'CCC', 'CCE', 'CCI', 'CCL', 'CCMP', 'CCRN', 'CDI', 'CDNS', 'CDR', 'CEB', 'CEC', 'CECO', 'CELG',", "'PCP', 'BBT', 'MS', 'BLK', 'DVN', 'AFL', 'ALXN', 'GD', 'WMB', 'CBS', 'CSX', 'TMO', 'AEP',", "'EWBC', 'EXAR', 'EXC', 'EXH', 'EXLS', 'EXP', 'EXPD', 'EXPE', 'EXPO', 'EXR', 'EZPW', 'F', 'FAF',", "'CVGW', 'CVH', 'CVLT', 'CVS', 'CVX', 'CW', 'CWTR', 'CXW', 'CY', 'CYBX', 'CYH', 'CYMI', 'CYN',", "'KEX', 'KEY', 'KFY', 'KIM', 'KIRK', 'KLAC', 'KLIC', 'KMB', 'KMI', 'KMPR', 'KMT', 'KMX', 'KND',", "'MTSC', 'MTX', 'MU', 'MUR', 'MW', 'MWIV', 'MWV', 'MWW', 'MYE', 'MYL', 'NAFC', 'NANO', 'NATI',", "'RHT', 'WU', 'STX', 'DOV', 'ALTR', 'WPI', 'HSY', 'ROP', 'PAYX', 'GPS', 'SNDK', 'DTE', 'PRGO',", "'ASGN', 'ASH', 'ASNA', 'ASTE', 'ATI', 'ATK', 'ATMI', 'ATML', 'ATNI', 'ATO', 'ATR', 'ATU', 'ATW',", "'PBI', 'PBY', 'PCAR', 'PCG', 'PCH', 'PCL', 'PCLN', 'PCP', 'PCTI', 'PDCE', 'PDCO', 'PEET', 'PEG',", "'PBCT', 'PBH', 'PBI', 'PBY', 'PCAR', 'PCG', 'PCH', 'PCL', 'PCLN', 'PCP', 'PCTI', 'PDCE', 'PDCO',", "'SCHL', 'SCHW', 'SCI', 'SCL', 'SCOR', 'SCSC', 'SCSS', 'SE', 'SEE', 'SEIC', 'SENEA', 'SF', 'SFD',", "'VAR', 'BLL', 'FLS', 'LUV', 'KMX', 'FDO', 'WHR', 'MCHP', 'SCG', 'DNR', 'CFN', 'CPB', 'CMS',", "'BMS', 'AVY', 'HAR', 'OI', 'AIZ', 'NFLX', 'DF', 'FLIR', 'GT', 'LM', 'APOL', 'PDCO', 'JDSU',", "'TCB', 'TCBI', 'TCO', 'TDC', 'TDS', 'TDW', 'TDY', 'TE', 'TECD', 'TECH', 'TEG', 'TEL', 'TER',", "'EQT', 'EQY', 'ESE', 'ESI', 'ESIO', 'ESL', 'ESRX', 'ESS', 'ESV', 'ETFC', 'ETH', 'ETN', 'ETR',", "'JLL', 'JNJ', 'JNPR', 'JNS', 'JOSB', 'JOY', 'JPM', 'JWN', 'K', 'KALU', 'KAMN', 'KBH', 'KBR',", "'OPNT', 'ORB', 'ORCL', 'ORI', 'ORIT', 'ORLY', 'ORN', 'OSG', 'OSIS', 'OSK', 'OXM', 'OXY', 'OZRK',", "'BR', 'BRC', 'BRCM', 'BRKL', 'BRKS', 'BRLI', 'BRO', 'BRS', 'BSX', 'BTH', 'BTU', 'BWA', 'BWLD',", "'RHT', 'RJF', 'RKT', 'RL', 'RLI', 'RMD', 'ROCK', 'ROG', 'ROK', 'ROL', 'ROP', 'ROSE', 'ROST',", "'TYL', 'TYPE', 'UA', 'UBA', 'UBSI', 'UCBI', 'UDR', 'UEIC', 'UFCS', 'UFPI', 'UFS', 'UGI', 'UHS',", "'ETFC', 'AMD', 'R', 'FII', 'RRD', 'BIG', 'AN', 'WPO', 'LXK', 'ANR', 'FSLR', 'DV', 'TIE']", "'COF', 'COG', 'COH', 'COHU', 'COL', 'COLB', 'COO', 'COP', 'COST', 'COV', 'CPB', 'CPLA', 'CPRT',", "'WERN', 'WFC', 'WFM', 'WGL', 'WGO', 'WHR', 'WIBC', 'WIN', 'WIRE', 'WLP', 'WM', 'WMB', 'WMS',", "'HBI', 'HCBK', 'HCC', 'HCN', 'HCP', 'HCSG', 'HD', 'HE', 'HES', 'HF', 'HFC', 'HGR', 'HHS',", "'TIE'] ALL_TICKERS = ['A', 'AA', 'AAN', 'AAON', 'AAP', 'AAPL', 'ABAX', 'ABC', 'ABFS', 'ABM',", "'TXT', 'XL', 'LLTC', 'WAT', 'NI', 'DRI', 'PCL', 'TAP', 'LLL', 'AVP', 'CNX', 'AES', 'AKAM',", "'PCAR', 'WY', 'SCHW', 'VFC', 'WM', 'CF', 'AZO', 'AMAT', 'CAM', 'VNO', 'OMC', 'CI', 'XEL',", "'MUR', 'OKE', 'MYL', 'BF.B', 'MAR', 'ROK', 'CHK', 'ABC', 'ICE', 'HOG', 'XRX', 'APH', 'GPC',", "'QNST', 'QSFT', 'QSII', 'R', 'RAH', 'RAI', 'RAX', 'RBC', 'RBCN', 'RBN', 'RCII', 'RDC', 'RE',", "'ARRS', 'ARW', 'ASBC', 'ASEI', 'ASGN', 'ASH', 'ASNA', 'ASTE', 'ATI', 'ATK', 'ATMI', 'ATML', 'ATNI',", "'BRO', 'BRS', 'BSX', 'BTH', 'BTU', 'BWA', 'BWLD', 'BWS', 'BXP', 'BXS', 'BYD', 'BYI', 'C',", "'NCI', 'NCIT', 'NCR', 'NCS', 'NDAQ', 'NDSN', 'NE', 'NEE', 'NEM', 'NEOG', 'NEU', 'NEWP', 'NFG',", "'HOTT', 'HP', 'HPQ', 'HPT', 'HPY', 'HR', 'HRB', 'HRC', 'HRL', 'HRS', 'HSC', 'HSH', 'HSIC',", "'PEG', 'INTU', 'PLD', 'SYK', 'TROW', 'COH', 'ADBE', 'HES', 'ETN', 'MOS', 'IP', 'BDX', 'MHP',", "'WTFC', 'WTR', 'WTS', 'WU', 'WWD', 'WWW', 'WY', 'WYN', 'WYNN', 'X', 'XEC', 'XEL', 'XL',", "'LXU', 'LYV', 'LZB', 'M', 'MA', 'MAA', 'MAC', 'MAN', 'MANH', 'MANT', 'MAR', 'MAS', 'MASI',", "'OMI', 'OMX', 'ONB', 'ONE', 'OPEN', 'OPLK', 'OPNT', 'ORB', 'ORCL', 'ORI', 'ORIT', 'ORLY', 'ORN',", "'WU', 'STX', 'DOV', 'ALTR', 'WPI', 'HSY', 'ROP', 'PAYX', 'GPS', 'SNDK', 'DTE', 'PRGO', 'RF',", "'LYV', 'LZB', 'M', 'MA', 'MAA', 'MAC', 'MAN', 'MANH', 'MANT', 'MAR', 'MAS', 'MASI', 'MAT',", "'AON', 'AOS', 'APA', 'APC', 'APD', 'APEI', 'APH', 'APOG', 'APOL', 'ARB', 'ARE', 'AREX', 'ARG',", "'PSX', 'COV', 'ADP', 'AMT', 'AGN', 'NEM', 'BK', 'TRV', 'TYC', 'GIS', 'ITW', 'ACE', 'PRU',", "'GIFI', 'GILD', 'GIS', 'GLW', 'GMCR', 'GME', 'GMT', 'GNCMA', 'GNTX', 'GNW', 'GOOG', 'GPC', 'GPI',", "'SCG', 'SCHL', 'SCHW', 'SCI', 'SCL', 'SCOR', 'SCSC', 'SCSS', 'SE', 'SEE', 'SEIC', 'SENEA', 'SF',", "'VOXX', 'VPFG', 'VPHM', 'VRSN', 'VRTS', 'VRTU', 'VRTX', 'VSAT', 'VSH', 'VSI', 'VTR', 'VVC', 'VZ',", "'CMTL', 'CNC', 'CNK', 'CNL', 'CNMD', 'CNP', 'CNQR', 'CNW', 'CNX', 'COCO', 'COF', 'COG', 'COH',", "'TECH', 'TEG', 'TEL', 'TER', 'TEX', 'TFX', 'TG', 'TGI', 'TGT', 'THC', 'THG', 'THO', 'THOR',", "'KND', 'KNX', 'KO', 'KOP', 'KOPN', 'KR', 'KRA', 'KRC', 'KRG', 'KS', 'KSS', 'KSU', 'KSWS',", "'MHP', 'STI', 'LO', 'M', 'MJN', 'EIX', 'EL', 'DISCA', 'HCN', 'BBBY', 'TEL', 'SRE', 'MSI',", "'BELFB', 'BEN', 'BFS', 'BGC', 'BGFV', 'BGG', 'BGS', 'BH', 'BHE', 'BHI', 'BID', 'BIG', 'BIIB',", "'FAST', 'MTB', 'HOT', 'RRC', 'HUM', 'CERN', 'CAG', 'IVZ', 'DLTR', 'KSS', 'FTI', 'RHT', 'WU',", "'EFX', 'CVH', 'CLF', 'CBG', 'CINF', 'NWL', 'HSP', 'EXPE', 'XRAY', 'UNM', 'MAS', 'MWV', 'SNI',", "'AEIS', 'AEO', 'AEP', 'AES', 'AET', 'AF', 'AFAM', 'AFFX', 'AFG', 'AFL', 'AGCO', 'AGN', 'AGP',", "'SLM', 'SLXP', 'SM', 'SMA', 'SMCI', 'SMG', 'SMP', 'SMRT', 'SMTC', 'SNA', 'SNCR', 'SNDK', 'SNH',", "'HSTM', 'HSY', 'HTLD', 'HTSI', 'HUBG', 'HUM', 'HVT', 'HW', 'HWAY', 'HWKN', 'HZO', 'IART', 'IBKR',", "'LUK', 'CTAS', 'HAS', 'POM', 'PBCT', 'NFX', 'RDC', 'SNA', 'GCI', 'URBN', 'NBR', 'TEG', 'EA',", "'BAC', 'DIS', 'MCD', 'AMZN', 'HD', 'KFT', 'V', 'OXY', 'COP', 'MO', 'UTX', 'USB', 'AMGN',", "'DTE', 'PRGO', 'RF', 'NTRS', 'DGX', 'CMG', 'FISV', 'ORLY', 'MUR', 'OKE', 'MYL', 'BF.B', 'MAR',", "'QLGC', 'QNST', 'QSFT', 'QSII', 'R', 'RAH', 'RAI', 'RAX', 'RBC', 'RBCN', 'RBN', 'RCII', 'RDC',", "'HOMB', 'HON', 'HOS', 'HOT', 'HOTT', 'HP', 'HPQ', 'HPT', 'HPY', 'HR', 'HRB', 'HRC', 'HRL',", "'BRCM', 'ED', 'PPG', 'CME', 'LYB', 'APD', 'VLO', 'EQR', 'BEN', 'ECL', 'PPL', 'AON', 'WFM',", "'DBD', 'DCI', 'DCOM', 'DD', 'DDD', 'DE', 'DECK', 'DEL', 'DELL', 'DF', 'DFS', 'DGII', 'DGIT',", "'SLB', 'SLG', 'SLGN', 'SLH', 'SLM', 'SLXP', 'SM', 'SMA', 'SMCI', 'SMG', 'SMP', 'SMRT', 'SMTC',", "'PBH', 'PBI', 'PBY', 'PCAR', 'PCG', 'PCH', 'PCL', 'PCLN', 'PCP', 'PCTI', 'PDCE', 'PDCO', 'PEET',", "'UTX', 'USB', 'AMGN', 'CVS', 'MMM', 'EMC', 'UNH', 'BMY', 'UNP', 'CAT', 'EBAY', 'AXP', 'UPS',", "'MLHR', 'MLI', 'MLM', 'MMC', 'MMM', 'MMS', 'MMSI', 'MNRO', 'MNST', 'MNTA', 'MO', 'MOH', 'MOLX',", "'PVA', 'PVH', 'PVTB', 'PWR', 'PX', 'PXD', 'PXP', 'PZZA', 'QCOM', 'QCOR', 'QEP', 'QLGC', 'QNST',", "'BBOX', 'BBT', 'BBY', 'BC', 'BCO', 'BCOR', 'BCPC', 'BCR', 'BDC', 'BDX', 'BEAM', 'BEAV', 'BELFB',", "'DO', 'DOV', 'DOW', 'DPS', 'DRC', 'DRE', 'DRH', 'DRI', 'DRIV', 'DRQ', 'DSPG', 'DTE', 'DTSI',", "'RRC', 'HUM', 'CERN', 'CAG', 'IVZ', 'DLTR', 'KSS', 'FTI', 'RHT', 'WU', 'STX', 'DOV', 'ALTR',", "'ICE', 'ICON', 'ICUI', 'IDA', 'IDTI', 'IDXX', 'IEX', 'IFF', 'IFSIA', 'IGT', 'IGTE', 'IILG', 'IIVI',", "'MRK', 'VZ', 'WMT', 'ORCL', 'INTC', 'PEP', 'ABT', 'QCOM', 'CSCO', 'SLB', 'C', 'CMCSA', 'BAC',", "'VFC', 'WM', 'CF', 'AZO', 'AMAT', 'CAM', 'VNO', 'OMC', 'CI', 'XEL', 'A', 'CAH', 'AET',", "'MYL', 'BF.B', 'MAR', 'ROK', 'CHK', 'ABC', 'ICE', 'HOG', 'XRX', 'APH', 'GPC', 'CHRW', 'SJM',", "'AMP', 'NTAP', 'ZMH', 'LTD', 'ADI', 'PGR', 'HST', 'FAST', 'MTB', 'HOT', 'RRC', 'HUM', 'CERN',", "'DLTR', 'KSS', 'FTI', 'RHT', 'WU', 'STX', 'DOV', 'ALTR', 'WPI', 'HSY', 'ROP', 'PAYX', 'GPS',", "'TCBI', 'TCO', 'TDC', 'TDS', 'TDW', 'TDY', 'TE', 'TECD', 'TECH', 'TEG', 'TEL', 'TER', 'TEX',", "'TSRA', 'TSS', 'TTC', 'TTEC', 'TTEK', 'TTI', 'TTMI', 'TTWO', 'TUES', 'TUP', 'TW', 'TWC', 'TWGP',", "'SUN', 'TMK', 'FTR', 'NRG', 'IPG', 'IFF', 'GAS', 'STZ', 'HRB', 'XYL', 'TSN', 'FOSL', 'DO',", "'MCY', 'MD', 'MDC', 'MDCO', 'MDP', 'MDRX', 'MDSO', 'MDT', 'MDU', 'MEAS', 'MED', 'MEI', 'MENT',", "'EMN', 'SIAL', 'WEC', 'CCE', 'WDC', 'LIFE', 'MCO', 'HIG', 'JWN', 'FRX', 'MNST', 'FFIV', 'NVDA',", "'CREE', 'CRI', 'CRK', 'CRL', 'CRM', 'CROX', 'CRR', 'CRS', 'CRUS', 'CRVL', 'CRY', 'CSC', 'CSCO',", "'BIG', 'BIIB', 'BJRI', 'BK', 'BKE', 'BKH', 'BKI', 'BKMU', 'BKS', 'BLK', 'BLKB', 'BLL', 'BMC',", "'CB', 'KMI', 'STT', 'PSA', 'BHI', 'ISRG', 'GLW', 'CRM', 'ALL', 'SE', 'HCP', 'RTN', 'WLP',", "'ABFS', 'ABM', 'ABT', 'ACAT', 'ACC', 'ACE', 'ACI', 'ACIW', 'ACM', 'ACN', 'ACO', 'ACXM', 'ADBE',", "'CLC', 'CLD', 'CLF', 'CLGX', 'CLH', 'CLI', 'CLMS', 'CLP', 'CLW', 'CLX', 'CMA', 'CMC', 'CMCSA',", "'MTD', 'MTH', 'MTRN', 'MTRX', 'MTSC', 'MTX', 'MU', 'MUR', 'MW', 'MWIV', 'MWV', 'MWW', 'MYE',", "'UHS', 'UHT', 'UIL', 'UMBF', 'UMPQ', 'UNF', 'UNFI', 'UNH', 'UNM', 'UNP', 'UNS', 'UNT', 'UNTD',", "'MD', 'MDC', 'MDCO', 'MDP', 'MDRX', 'MDSO', 'MDT', 'MDU', 'MEAS', 'MED', 'MEI', 'MENT', 'MET',", "'TXN', 'HAL', 'WAG', 'PX', 'PCLN', 'EXC', 'D', 'EOG', 'YUM', 'NEE', 'TWC', 'PSX', 'COV',", "'GMT', 'GNCMA', 'GNTX', 'GNW', 'GOOG', 'GPC', 'GPI', 'GPN', 'GPOR', 'GPS', 'GS', 'GSM', 'GT',", "'T', 'GOOG', 'PG', 'JNJ', 'PFE', 'WFC', 'BRK.B', 'JPM', 'PM', 'KO', 'MRK', 'VZ', 'WMT',", "'UGI', 'UHS', 'UHT', 'UIL', 'UMBF', 'UMPQ', 'UNF', 'UNFI', 'UNH', 'UNM', 'UNP', 'UNS', 'UNT',", "'CATY', 'CB', 'CBB', 'CBE', 'CBEY', 'CBG', 'CBK', 'CBM', 'CBOE', 'CBR', 'CBRL', 'CBS', 'CBSH',", "'SSP', 'SSS', 'STBA', 'STC', 'STE', 'STI', 'STJ', 'STL', 'STLD', 'STMP', 'STR', 'STRA', 'STRI',", "'AN', 'WPO', 'LXK', 'ANR', 'FSLR', 'DV', 'TIE'] ALL_TICKERS = ['A', 'AA', 'AAN', 'AAON',", "'TRV', 'TYC', 'GIS', 'ITW', 'ACE', 'PRU', 'VIAB', 'CTL', 'LMT', 'FDX', 'PCP', 'BBT', 'MS',", "'DAKT', 'DAR', 'DBD', 'DCI', 'DCOM', 'DD', 'DDD', 'DE', 'DECK', 'DEL', 'DELL', 'DF', 'DFS',", "'USB', 'USMO', 'USTR', 'UTEK', 'UTHR', 'UTI', 'UTIW', 'UTX', 'UVV', 'V', 'VAL', 'VAR', 'VCI',", "'SYK', 'SYKE', 'SYMC', 'SYMM', 'SYNA', 'SYY', 'T', 'TAP', 'TBI', 'TCB', 'TCBI', 'TCO', 'TDC',", "'AKR', 'AKRX', 'AKS', 'ALB', 'ALE', 'ALEX', 'ALGN', 'ALGT', 'ALK', 'ALL', 'ALOG', 'ALTR', 'ALXN',", "'MCD', 'MCF', 'MCHP', 'MCK', 'MCO', 'MCRI', 'MCRL', 'MCRS', 'MCS', 'MCY', 'MD', 'MDC', 'MDCO',", "'NVTL', 'NWBI', 'NWE', 'NWL', 'NWN', 'NWSA', 'NX', 'NYB', 'NYT', 'NYX', 'O', 'OCR', 'ODFL',", "'VIVO', 'VLO', 'VLTR', 'VLY', 'VMC', 'VMI', 'VNO', 'VOXX', 'VPFG', 'VPHM', 'VRSN', 'VRTS', 'VRTU',", "'VFC', 'VIAB', 'VICR', 'VIVO', 'VLO', 'VLTR', 'VLY', 'VMC', 'VMI', 'VNO', 'VOXX', 'VPFG', 'VPHM',", "'OXM', 'OXY', 'OZRK', 'PACW', 'PAY', 'PAYX', 'PB', 'PBCT', 'PBH', 'PBI', 'PBY', 'PCAR', 'PCG',", "'SMCI', 'SMG', 'SMP', 'SMRT', 'SMTC', 'SNA', 'SNCR', 'SNDK', 'SNH', 'SNI', 'SNPS', 'SNV', 'SNX',", "'BYD', 'BYI', 'C', 'CA', 'CAB', 'CACI', 'CAG', 'CAH', 'CAKE', 'CALM', 'CAM', 'CAS', 'CASC',", "'HSC', 'HSH', 'HSIC', 'HSII', 'HSNI', 'HSP', 'HST', 'HSTM', 'HSY', 'HTLD', 'HTSI', 'HUBG', 'HUM',", "'LLL', 'AVP', 'CNX', 'AES', 'AKAM', 'LNC', 'VAR', 'BLL', 'FLS', 'LUV', 'KMX', 'FDO', 'WHR',", "'MOS', 'IP', 'BDX', 'MHP', 'STI', 'LO', 'M', 'MJN', 'EIX', 'EL', 'DISCA', 'HCN', 'BBBY',", "'QEP', 'QLGC', 'QNST', 'QSFT', 'QSII', 'R', 'RAH', 'RAI', 'RAX', 'RBC', 'RBCN', 'RBN', 'RCII',", "'AVY', 'AWR', 'AXE', 'AXP', 'AYI', 'AZO', 'AZZ', 'B', 'BA', 'BABY', 'BAC', 'BAS', 'BAX',", "'FLR', 'FLS', 'FMBI', 'FMC', 'FMER', 'FNB', 'FNF', 'FNFG', 'FNGN', 'FNP', 'FOR', 'FORR', 'FOSL',", "'MO', 'UTX', 'USB', 'AMGN', 'CVS', 'MMM', 'EMC', 'UNH', 'BMY', 'UNP', 'CAT', 'EBAY', 'AXP',", "'AZO', 'AMAT', 'CAM', 'VNO', 'OMC', 'CI', 'XEL', 'A', 'CAH', 'AET', 'STJ', 'AVB', 'L',", "'SE', 'SEE', 'SEIC', 'SENEA', 'SF', 'SFD', 'SFG', 'SFNC', 'SFY', 'SGMS', 'SGY', 'SHAW', 'SHFL',", "'MCHP', 'SCG', 'DNR', 'CFN', 'CPB', 'CMS', 'VMC', 'MU', 'BMC', 'NYX', 'CMA', 'BTU', 'WIN',", "'NCIT', 'NCR', 'NCS', 'NDAQ', 'NDSN', 'NE', 'NEE', 'NEM', 'NEOG', 'NEU', 'NEWP', 'NFG', 'NFLX',", "'CRY', 'CSC', 'CSCO', 'CSGS', 'CSH', 'CSL', 'CSTR', 'CSX', 'CTAS', 'CTL', 'CTS', 'CTSH', 'CTXS',", "'TLAB', 'TMK', 'TMO', 'TMP', 'TNC', 'TOL', 'TPX', 'TQNT', 'TR', 'TRAK', 'TRIP', 'TRLG', 'TRMB',", "'SWK', 'K', 'TDC', 'SHW', 'ESV', 'SYMC', 'PH', 'GWW', 'EW', 'ETR', 'NUE', 'SWN', 'MAT',", "'ATNI', 'ATO', 'ATR', 'ATU', 'ATW', 'AVA', 'AVAV', 'AVB', 'AVD', 'AVID', 'AVP', 'AVT', 'AVY',", "'VNO', 'VOXX', 'VPFG', 'VPHM', 'VRSN', 'VRTS', 'VRTU', 'VRTX', 'VSAT', 'VSH', 'VSI', 'VTR', 'VVC',", "'DO', 'BBY', 'LUK', 'CTAS', 'HAS', 'POM', 'PBCT', 'NFX', 'RDC', 'SNA', 'GCI', 'URBN', 'NBR',", "'ASTE', 'ATI', 'ATK', 'ATMI', 'ATML', 'ATNI', 'ATO', 'ATR', 'ATU', 'ATW', 'AVA', 'AVAV', 'AVB',", "'MDT', 'MDU', 'MEAS', 'MED', 'MEI', 'MENT', 'MET', 'MFB', 'MGAM', 'MGLN', 'MHK', 'MHO', 'MIG',", "'ACIW', 'ACM', 'ACN', 'ACO', 'ACXM', 'ADBE', 'ADI', 'ADM', 'ADP', 'ADS', 'ADSK', 'ADTN', 'ADVS',", "'TG', 'TGI', 'TGT', 'THC', 'THG', 'THO', 'THOR', 'THS', 'TIBX', 'TIE', 'TIF', 'TJX', 'TKR',", "'PRU', 'VIAB', 'CTL', 'LMT', 'FDX', 'PCP', 'BBT', 'MS', 'BLK', 'DVN', 'AFL', 'ALXN', 'GD',", "'JAH', 'JAKK', 'JBHT', 'JBL', 'JBLU', 'JBT', 'JCI', 'JCOM', 'JCP', 'JDAS', 'JDSU', 'JEC', 'JEF',", "'ATMI', 'ATML', 'ATNI', 'ATO', 'ATR', 'ATU', 'ATW', 'AVA', 'AVAV', 'AVB', 'AVD', 'AVID', 'AVP',", "'AFL', 'AGCO', 'AGN', 'AGP', 'AGYS', 'AHL', 'AHS', 'AIG', 'AINV', 'AIR', 'AIRM', 'AIT', 'AIV',", "'TBI', 'TCB', 'TCBI', 'TCO', 'TDC', 'TDS', 'TDW', 'TDY', 'TE', 'TECD', 'TECH', 'TEG', 'TEL',", "'HW', 'HWAY', 'HWKN', 'HZO', 'IART', 'IBKR', 'IBM', 'IBOC', 'ICE', 'ICON', 'ICUI', 'IDA', 'IDTI',", "'PBY', 'PCAR', 'PCG', 'PCH', 'PCL', 'PCLN', 'PCP', 'PCTI', 'PDCE', 'PDCO', 'PEET', 'PEG', 'PEI',", "'SHFL', 'SHLM', 'SHOO', 'SHW', 'SIAL', 'SIG', 'SIGI', 'SIGM', 'SIVB', 'SJI', 'SJM', 'SKS', 'SKT',", "'ITT', 'ITW', 'IVAC', 'IVC', 'IVZ', 'JACK', 'JAH', 'JAKK', 'JBHT', 'JBL', 'JBLU', 'JBT', 'JCI',", "'OII', 'OIS', 'OKE', 'OLN', 'OMC', 'OMCL', 'OMG', 'OMI', 'OMX', 'ONB', 'ONE', 'OPEN', 'OPLK',", "'SXI', 'SXT', 'SYK', 'SYKE', 'SYMC', 'SYMM', 'SYNA', 'SYY', 'T', 'TAP', 'TBI', 'TCB', 'TCBI',", "'WRI', 'WRLD', 'WSM', 'WSO', 'WST', 'WTFC', 'WTR', 'WTS', 'WU', 'WWD', 'WWW', 'WY', 'WYN',", "'WMB', 'WMS', 'WMT', 'WOOF', 'WOR', 'WPO', 'WPP', 'WPX', 'WR', 'WRB', 'WRC', 'WRI', 'WRLD',", "'GDI', 'GE', 'GEF', 'GEO', 'GES', 'GFF', 'GGG', 'GHL', 'GIFI', 'GILD', 'GIS', 'GLW', 'GMCR',", "'ESIO', 'ESL', 'ESRX', 'ESS', 'ESV', 'ETFC', 'ETH', 'ETN', 'ETR', 'EV', 'EW', 'EWBC', 'EXAR',", "'CASY', 'CAT', 'CATM', 'CATO', 'CATY', 'CB', 'CBB', 'CBE', 'CBEY', 'CBG', 'CBK', 'CBM', 'CBOE',", "'OGE', 'OHI', 'OI', 'OII', 'OIS', 'OKE', 'OLN', 'OMC', 'OMCL', 'OMG', 'OMI', 'OMX', 'ONB',", "'SGMS', 'SGY', 'SHAW', 'SHFL', 'SHLM', 'SHOO', 'SHW', 'SIAL', 'SIG', 'SIGI', 'SIGM', 'SIVB', 'SJI',", "'DMND', 'DNB', 'DNR', 'DO', 'DOV', 'DOW', 'DPS', 'DRC', 'DRE', 'DRH', 'DRI', 'DRIV', 'DRQ',", "'NATI', 'NAVG', 'NBL', 'NBR', 'NBTB', 'NCI', 'NCIT', 'NCR', 'NCS', 'NDAQ', 'NDSN', 'NE', 'NEE',", "'XRX', 'XYL', 'Y', 'YHOO', 'YUM', 'ZBRA', 'ZEP', 'ZEUS', 'ZLC', 'ZMH', 'ZQK', 'ZUMZ'] TICKERS", "'TTMI', 'TTWO', 'TUES', 'TUP', 'TW', 'TWC', 'TWGP', 'TWTC', 'TWX', 'TXI', 'TXN', 'TXRH', 'TXT',", "'COL', 'COLB', 'COO', 'COP', 'COST', 'COV', 'CPB', 'CPLA', 'CPRT', 'CPSI', 'CPT', 'CPWR', 'CR',", "'STX', 'DOV', 'ALTR', 'WPI', 'HSY', 'ROP', 'PAYX', 'GPS', 'SNDK', 'DTE', 'PRGO', 'RF', 'NTRS',", "'AVY', 'HAR', 'OI', 'AIZ', 'NFLX', 'DF', 'FLIR', 'GT', 'LM', 'APOL', 'PDCO', 'JDSU', 'ANF',", "'AZZ', 'B', 'BA', 'BABY', 'BAC', 'BAS', 'BAX', 'BBBY', 'BBG', 'BBOX', 'BBT', 'BBY', 'BC',", "'USTR', 'UTEK', 'UTHR', 'UTI', 'UTIW', 'UTX', 'UVV', 'V', 'VAL', 'VAR', 'VCI', 'VCLK', 'VDSI',", "'HVT', 'HW', 'HWAY', 'HWKN', 'HZO', 'IART', 'IBKR', 'IBM', 'IBOC', 'ICE', 'ICON', 'ICUI', 'IDA',", "'EW', 'EWBC', 'EXAR', 'EXC', 'EXH', 'EXLS', 'EXP', 'EXPD', 'EXPE', 'EXPO', 'EXR', 'EZPW', 'F',", "'KBR', 'KDN', 'KELYA', 'KEX', 'KEY', 'KFY', 'KIM', 'KIRK', 'KLAC', 'KLIC', 'KMB', 'KMI', 'KMPR',", "'KMX', 'FDO', 'WHR', 'MCHP', 'SCG', 'DNR', 'CFN', 'CPB', 'CMS', 'VMC', 'MU', 'BMC', 'NYX',", "'MAR', 'MAS', 'MASI', 'MAT', 'MATW', 'MATX', 'MCD', 'MCF', 'MCHP', 'MCK', 'MCO', 'MCRI', 'MCRL',", "'OI', 'OII', 'OIS', 'OKE', 'OLN', 'OMC', 'OMCL', 'OMG', 'OMI', 'OMX', 'ONB', 'ONE', 'OPEN',", "'AIG', 'F', 'FCX', 'MET', 'BIIB', 'EMR', 'APC', 'NKE', 'DOW', 'LOW', 'NOV', 'KMB', 'APA',", "'SYMC', 'PH', 'GWW', 'EW', 'ETR', 'NUE', 'SWN', 'MAT', 'CBE', 'NU', 'AMP', 'NTAP', 'ZMH',", "'WMS', 'WMT', 'WOOF', 'WOR', 'WPO', 'WPP', 'WPX', 'WR', 'WRB', 'WRC', 'WRI', 'WRLD', 'WSM',", "'MTX', 'MU', 'MUR', 'MW', 'MWIV', 'MWV', 'MWW', 'MYE', 'MYL', 'NAFC', 'NANO', 'NATI', 'NAVG',", "'IRC', 'IRF', 'IRM', 'ISCA', 'ISIL', 'ISRG', 'IT', 'ITG', 'ITRI', 'ITT', 'ITW', 'IVAC', 'IVC',", "'TWC', 'TWGP', 'TWTC', 'TWX', 'TXI', 'TXN', 'TXRH', 'TXT', 'TYC', 'TYL', 'TYPE', 'UA', 'UBA',", "'GEO', 'GES', 'GFF', 'GGG', 'GHL', 'GIFI', 'GILD', 'GIS', 'GLW', 'GMCR', 'GME', 'GMT', 'GNCMA',", "'RAI', 'RAX', 'RBC', 'RBCN', 'RBN', 'RCII', 'RDC', 'RE', 'RECN', 'REG', 'REGN', 'RF', 'RFMD',", "'HTLD', 'HTSI', 'HUBG', 'HUM', 'HVT', 'HW', 'HWAY', 'HWKN', 'HZO', 'IART', 'IBKR', 'IBM', 'IBOC',", "'CLD', 'CLF', 'CLGX', 'CLH', 'CLI', 'CLMS', 'CLP', 'CLW', 'CLX', 'CMA', 'CMC', 'CMCSA', 'CME',", "'TSO', 'TSRA', 'TSS', 'TTC', 'TTEC', 'TTEK', 'TTI', 'TTMI', 'TTWO', 'TUES', 'TUP', 'TW', 'TWC',", "'TXN', 'TXRH', 'TXT', 'TYC', 'TYL', 'TYPE', 'UA', 'UBA', 'UBSI', 'UCBI', 'UDR', 'UEIC', 'UFCS',", "'MAR', 'ROK', 'CHK', 'ABC', 'ICE', 'HOG', 'XRX', 'APH', 'GPC', 'CHRW', 'SJM', 'AA', 'COG',", "'AOL', 'AON', 'AOS', 'APA', 'APC', 'APD', 'APEI', 'APH', 'APOG', 'APOL', 'ARB', 'ARE', 'AREX',", "'HTSI', 'HUBG', 'HUM', 'HVT', 'HW', 'HWAY', 'HWKN', 'HZO', 'IART', 'IBKR', 'IBM', 'IBOC', 'ICE',", "'WAT', 'WBS', 'WBSN', 'WCG', 'WCN', 'WDC', 'WDFC', 'WDR', 'WEC', 'WEN', 'WERN', 'WFC', 'WFM',", "'FBP', 'FCF', 'FCFS', 'FCN', 'FCS', 'FCX', 'FDO', 'FDS', 'FDX', 'FE', 'FEIC', 'FELE', 'FFBC',", "'PNRA', 'PNW', 'PNY', 'POL', 'POM', 'POOL', 'POST', 'POWI', 'POWL', 'PPG', 'PPS', 'PQ', 'PRA',", "'AGN', 'AGP', 'AGYS', 'AHL', 'AHS', 'AIG', 'AINV', 'AIR', 'AIRM', 'AIT', 'AIV', 'AIZ', 'AJG',", "'CNP', 'CNQR', 'CNW', 'CNX', 'COCO', 'COF', 'COG', 'COH', 'COHU', 'COL', 'COLB', 'COO', 'COP',", "'MOLX', 'MON', 'MOS', 'MOV', 'MPC', 'MPW', 'MPWR', 'MRCY', 'MRK', 'MRO', 'MRX', 'MS', 'MSA',", "'SUP', 'SUPX', 'SUSQ', 'SVU', 'SWI', 'SWK', 'SWKS', 'SWM', 'SWN', 'SWS', 'SWX', 'SWY', 'SXC',", "'PQ', 'PRA', 'PRAA', 'PRFT', 'PRGO', 'PRGS', 'PRU', 'PRX', 'PRXL', 'PSA', 'PSB', 'PSEC', 'PSEM',", "'FII', 'RRD', 'BIG', 'AN', 'WPO', 'LXK', 'ANR', 'FSLR', 'DV', 'TIE'] ALL_TICKERS = ['A',", "'AMGN', 'AMP', 'AMSF', 'AMSG', 'AMT', 'AMZN', 'AN', 'ANDE', 'ANF', 'ANN', 'ANR', 'ANSS', 'AOL',", "'EOG', 'YUM', 'NEE', 'TWC', 'PSX', 'COV', 'ADP', 'AMT', 'AGN', 'NEM', 'BK', 'TRV', 'TYC',", "'RKT', 'RL', 'RLI', 'RMD', 'ROCK', 'ROG', 'ROK', 'ROL', 'ROP', 'ROSE', 'ROST', 'ROVI', 'RPM',", "'ANR', 'FSLR', 'DV', 'TIE'] ALL_TICKERS = ['A', 'AA', 'AAN', 'AAON', 'AAP', 'AAPL', 'ABAX',", "'DHR', 'DHX', 'DIN', 'DIOD', 'DIS', 'DISCA', 'DKS', 'DLTR', 'DLX', 'DM', 'DMND', 'DNB', 'DNR',", "'AMCX', 'AMD', 'AME', 'AMED', 'AMG', 'AMGN', 'AMP', 'AMSF', 'AMSG', 'AMT', 'AMZN', 'AN', 'ANDE',", "'MNST', 'FFIV', 'NVDA', 'KIM', 'KEY', 'RSG', 'MKC', 'BCR', 'BSX', 'KLAC', 'AEE', 'BWA', 'SPLS',", "'VSI', 'VTR', 'VVC', 'VZ', 'WAB', 'WABC', 'WAFD', 'WAG', 'WAT', 'WBS', 'WBSN', 'WCG', 'WCN',", "'GHL', 'GIFI', 'GILD', 'GIS', 'GLW', 'GMCR', 'GME', 'GMT', 'GNCMA', 'GNTX', 'GNW', 'GOOG', 'GPC',", "'WABC', 'WAFD', 'WAG', 'WAT', 'WBS', 'WBSN', 'WCG', 'WCN', 'WDC', 'WDFC', 'WDR', 'WEC', 'WEN',", "'ARB', 'ARE', 'AREX', 'ARG', 'ARO', 'ARQL', 'ARRS', 'ARW', 'ASBC', 'ASEI', 'ASGN', 'ASH', 'ASNA',", "'ALTR', 'ALXN', 'AM', 'AMAT', 'AMCX', 'AMD', 'AME', 'AMED', 'AMG', 'AMGN', 'AMP', 'AMSF', 'AMSG',", "'CMCSA', 'BAC', 'DIS', 'MCD', 'AMZN', 'HD', 'KFT', 'V', 'OXY', 'COP', 'MO', 'UTX', 'USB',", "'LLTC', 'WAT', 'NI', 'DRI', 'PCL', 'TAP', 'LLL', 'AVP', 'CNX', 'AES', 'AKAM', 'LNC', 'VAR',", "'EXC', 'D', 'EOG', 'YUM', 'NEE', 'TWC', 'PSX', 'COV', 'ADP', 'AMT', 'AGN', 'NEM', 'BK',", "'PG', 'JNJ', 'PFE', 'WFC', 'BRK.B', 'JPM', 'PM', 'KO', 'MRK', 'VZ', 'WMT', 'ORCL', 'INTC',", "'BWA', 'BWLD', 'BWS', 'BXP', 'BXS', 'BYD', 'BYI', 'C', 'CA', 'CAB', 'CACI', 'CAG', 'CAH',", "'NE', 'NEE', 'NEM', 'NEOG', 'NEU', 'NEWP', 'NFG', 'NFLX', 'NFP', 'NFX', 'NI', 'NILE', 'NJR',", "'JEC', 'JEF', 'JJSF', 'JKHY', 'JLL', 'JNJ', 'JNPR', 'JNS', 'JOSB', 'JOY', 'JPM', 'JWN', 'K',", "'BRKL', 'BRKS', 'BRLI', 'BRO', 'BRS', 'BSX', 'BTH', 'BTU', 'BWA', 'BWLD', 'BWS', 'BXP', 'BXS',", "'AGYS', 'AHL', 'AHS', 'AIG', 'AINV', 'AIR', 'AIRM', 'AIT', 'AIV', 'AIZ', 'AJG', 'AKAM', 'AKR',", "'ABC', 'ABFS', 'ABM', 'ABT', 'ACAT', 'ACC', 'ACE', 'ACI', 'ACIW', 'ACM', 'ACN', 'ACO', 'ACXM',", "'PFE', 'WFC', 'BRK.B', 'JPM', 'PM', 'KO', 'MRK', 'VZ', 'WMT', 'ORCL', 'INTC', 'PEP', 'ABT',", "'TGI', 'TGT', 'THC', 'THG', 'THO', 'THOR', 'THS', 'TIBX', 'TIE', 'TIF', 'TJX', 'TKR', 'TLAB',", "'ORCL', 'ORI', 'ORIT', 'ORLY', 'ORN', 'OSG', 'OSIS', 'OSK', 'OXM', 'OXY', 'OZRK', 'PACW', 'PAY',", "'TWC', 'PSX', 'COV', 'ADP', 'AMT', 'AGN', 'NEM', 'BK', 'TRV', 'TYC', 'GIS', 'ITW', 'ACE',", "'IRM', 'ISCA', 'ISIL', 'ISRG', 'IT', 'ITG', 'ITRI', 'ITT', 'ITW', 'IVAC', 'IVC', 'IVZ', 'JACK',", "'BRK.B', 'JPM', 'PM', 'KO', 'MRK', 'VZ', 'WMT', 'ORCL', 'INTC', 'PEP', 'ABT', 'QCOM', 'CSCO',", "'RRD', 'BIG', 'AN', 'WPO', 'LXK', 'ANR', 'FSLR', 'DV', 'TIE'] ALL_TICKERS = ['A', 'AA',", "'MDRX', 'MDSO', 'MDT', 'MDU', 'MEAS', 'MED', 'MEI', 'MENT', 'MET', 'MFB', 'MGAM', 'MGLN', 'MHK',", "'WY', 'WYN', 'WYNN', 'X', 'XEC', 'XEL', 'XL', 'XLNX', 'XLS', 'XOM', 'XOXO', 'XRAY', 'XRX',", "'AMSG', 'AMT', 'AMZN', 'AN', 'ANDE', 'ANF', 'ANN', 'ANR', 'ANSS', 'AOL', 'AON', 'AOS', 'APA',", "'HE', 'HES', 'HF', 'HFC', 'HGR', 'HHS', 'HI', 'HIBB', 'HIG', 'HII', 'HITK', 'HITT', 'HIW',", "'ENR', 'ENS', 'ENSG', 'ENTR', 'ENZ', 'EOG', 'EPAY', 'EPIQ', 'EPR', 'EQIX', 'EQR', 'EQT', 'EQY',", "'CYN', 'CYT', 'D', 'DAKT', 'DAR', 'DBD', 'DCI', 'DCOM', 'DD', 'DDD', 'DE', 'DECK', 'DEL',", "'ADTN', 'ADVS', 'AEE', 'AEGN', 'AEIS', 'AEO', 'AEP', 'AES', 'AET', 'AF', 'AFAM', 'AFFX', 'AFG',", "'YUM', 'ZBRA', 'ZEP', 'ZEUS', 'ZLC', 'ZMH', 'ZQK', 'ZUMZ'] TICKERS = sorted(list(set(SNP_TICKERS) & set(ALL_TICKERS)))", "'FDX', 'PCP', 'BBT', 'MS', 'BLK', 'DVN', 'AFL', 'ALXN', 'GD', 'WMB', 'CBS', 'CSX', 'TMO',", "'HNZ', 'HOG', 'HOLX', 'HOMB', 'HON', 'HOS', 'HOT', 'HOTT', 'HP', 'HPQ', 'HPT', 'HPY', 'HR',", "'FTR', 'FUL', 'FULT', 'FWRD', 'GAS', 'GB', 'GBCI', 'GCI', 'GCO', 'GD', 'GDI', 'GE', 'GEF',", "'HBAN', 'HBHC', 'HBI', 'HCBK', 'HCC', 'HCN', 'HCP', 'HCSG', 'HD', 'HE', 'HES', 'HF', 'HFC',", "'PPG', 'PPS', 'PQ', 'PRA', 'PRAA', 'PRFT', 'PRGO', 'PRGS', 'PRU', 'PRX', 'PRXL', 'PSA', 'PSB',", "'ARG', 'LEN', 'QEP', 'EFX', 'CVH', 'CLF', 'CBG', 'CINF', 'NWL', 'HSP', 'EXPE', 'XRAY', 'UNM',", "'LM', 'LMNX', 'LMOS', 'LMT', 'LNC', 'LNCE', 'LNN', 'LNT', 'LO', 'LOGM', 'LOW', 'LPNT', 'LPS',", "'RSH', 'RSTI', 'RSYS', 'RT', 'RTEC', 'RTI', 'RTN', 'RUE', 'RUTH', 'RVBD', 'RYL', 'RYN', 'S',", "'NKE', 'NNN', 'NOC', 'NOV', 'NP', 'NPBC', 'NPK', 'NPO', 'NRG', 'NSC', 'NSIT', 'NSP', 'NSR',", "'CI', 'XEL', 'A', 'CAH', 'AET', 'STJ', 'AVB', 'L', 'IR', 'PXD', 'KR', 'SWK', 'K',", "'UTX', 'UVV', 'V', 'VAL', 'VAR', 'VCI', 'VCLK', 'VDSI', 'VECO', 'VFC', 'VIAB', 'VICR', 'VIVO',", "'WBS', 'WBSN', 'WCG', 'WCN', 'WDC', 'WDFC', 'WDR', 'WEC', 'WEN', 'WERN', 'WFC', 'WFM', 'WGL',", "'PETM', 'PETS', 'PFE', 'PFG', 'PFS', 'PG', 'PGR', 'PH', 'PHM', 'PII', 'PJC', 'PKE', 'PKG',", "'CAG', 'IVZ', 'DLTR', 'KSS', 'FTI', 'RHT', 'WU', 'STX', 'DOV', 'ALTR', 'WPI', 'HSY', 'ROP',", "'DV', 'DVA', 'DVN', 'DW', 'DWA', 'DY', 'EA', 'EAT', 'EBAY', 'EBIX', 'EBS', 'ECL', 'ECPG',", "'NTCT', 'NTGR', 'NTLS', 'NTRI', 'NTRS', 'NU', 'NUE', 'NUVA', 'NVDA', 'NVE', 'NVR', 'NVTL', 'NWBI',", "'NYB', 'NYT', 'NYX', 'O', 'OCR', 'ODFL', 'ODP', 'OFC', 'OGE', 'OHI', 'OI', 'OII', 'OIS',", "'WSO', 'WST', 'WTFC', 'WTR', 'WTS', 'WU', 'WWD', 'WWW', 'WY', 'WYN', 'WYNN', 'X', 'XEC',", "'UNM', 'MAS', 'MWV', 'SNI', 'PWR', 'JEC', 'PHM', 'IRM', 'HP', 'CSC', 'SUN', 'TMK', 'FTR',", "'AMED', 'AMG', 'AMGN', 'AMP', 'AMSF', 'AMSG', 'AMT', 'AMZN', 'AN', 'ANDE', 'ANF', 'ANN', 'ANR',", "'GTIV', 'GTY', 'GVA', 'GWW', 'GXP', 'GY', 'HAE', 'HAFC', 'HAIN', 'HAL', 'HAR', 'HAS', 'HAYN',", "'SLH', 'SLM', 'SLXP', 'SM', 'SMA', 'SMCI', 'SMG', 'SMP', 'SMRT', 'SMTC', 'SNA', 'SNCR', 'SNDK',", "'RCII', 'RDC', 'RE', 'RECN', 'REG', 'REGN', 'RF', 'RFMD', 'RGA', 'RGLD', 'RGR', 'RGS', 'RHI',", "'XYL', 'TSN', 'FOSL', 'DO', 'BBY', 'LUK', 'CTAS', 'HAS', 'POM', 'PBCT', 'NFX', 'RDC', 'SNA',", "'WYNN', 'X', 'XEC', 'XEL', 'XL', 'XLNX', 'XLS', 'XOM', 'XOXO', 'XRAY', 'XRX', 'XYL', 'Y',", "'COH', 'COHU', 'COL', 'COLB', 'COO', 'COP', 'COST', 'COV', 'CPB', 'CPLA', 'CPRT', 'CPSI', 'CPT',", "'TNC', 'TOL', 'TPX', 'TQNT', 'TR', 'TRAK', 'TRIP', 'TRLG', 'TRMB', 'TRMK', 'TRN', 'TROW', 'TRST',", "'RL', 'WYNN', 'BEAM', 'CNP', 'NE', 'JNPR', 'LH', 'EQT', 'CA', 'DVA', 'XLNX', 'EMN', 'SIAL',", "'ATK', 'ATMI', 'ATML', 'ATNI', 'ATO', 'ATR', 'ATU', 'ATW', 'AVA', 'AVAV', 'AVB', 'AVD', 'AVID',", "'ANR', 'ANSS', 'AOL', 'AON', 'AOS', 'APA', 'APC', 'APD', 'APEI', 'APH', 'APOG', 'APOL', 'ARB',", "'MDC', 'MDCO', 'MDP', 'MDRX', 'MDSO', 'MDT', 'MDU', 'MEAS', 'MED', 'MEI', 'MENT', 'MET', 'MFB',", "'CNMD', 'CNP', 'CNQR', 'CNW', 'CNX', 'COCO', 'COF', 'COG', 'COH', 'COHU', 'COL', 'COLB', 'COO',", "'ORB', 'ORCL', 'ORI', 'ORIT', 'ORLY', 'ORN', 'OSG', 'OSIS', 'OSK', 'OXM', 'OXY', 'OZRK', 'PACW',", "'THC', 'THG', 'THO', 'THOR', 'THS', 'TIBX', 'TIE', 'TIF', 'TJX', 'TKR', 'TLAB', 'TMK', 'TMO',", "= ['A', 'AA', 'AAN', 'AAON', 'AAP', 'AAPL', 'ABAX', 'ABC', 'ABFS', 'ABM', 'ABT', 'ACAT',", "'V', 'OXY', 'COP', 'MO', 'UTX', 'USB', 'AMGN', 'CVS', 'MMM', 'EMC', 'UNH', 'BMY', 'UNP',", "'MCK', 'MCO', 'MCRI', 'MCRL', 'MCRS', 'MCS', 'MCY', 'MD', 'MDC', 'MDCO', 'MDP', 'MDRX', 'MDSO',", "'CHE', 'CHG', 'CHK', 'CHRW', 'CHS', 'CI', 'CIEN', 'CINF', 'CIR', 'CKH', 'CKP', 'CL', 'CLC',", "'FELE', 'FFBC', 'FFIN', 'FFIV', 'FHN', 'FICO', 'FII', 'FINL', 'FIRE', 'FIS', 'FISV', 'FITB', 'FIX',", "'ATU', 'ATW', 'AVA', 'AVAV', 'AVB', 'AVD', 'AVID', 'AVP', 'AVT', 'AVY', 'AWR', 'AXE', 'AXP',", "'ALEX', 'ALGN', 'ALGT', 'ALK', 'ALL', 'ALOG', 'ALTR', 'ALXN', 'AM', 'AMAT', 'AMCX', 'AMD', 'AME',", "'RSTI', 'RSYS', 'RT', 'RTEC', 'RTI', 'RTN', 'RUE', 'RUTH', 'RVBD', 'RYL', 'RYN', 'S', 'SAFM',", "'HRC', 'HRL', 'HRS', 'HSC', 'HSH', 'HSIC', 'HSII', 'HSNI', 'HSP', 'HST', 'HSTM', 'HSY', 'HTLD',", "'JNPR', 'LH', 'EQT', 'CA', 'DVA', 'XLNX', 'EMN', 'SIAL', 'WEC', 'CCE', 'WDC', 'LIFE', 'MCO',", "'LQDT', 'LRCX', 'LRY', 'LSI', 'LSTR', 'LTC', 'LTD', 'LTM', 'LUFK', 'LUK', 'LUV', 'LXK', 'LXP',", "'GOOG', 'PG', 'JNJ', 'PFE', 'WFC', 'BRK.B', 'JPM', 'PM', 'KO', 'MRK', 'VZ', 'WMT', 'ORCL',", "'SAM', 'SBRA', 'SBUX', 'SCG', 'SCHL', 'SCHW', 'SCI', 'SCL', 'SCOR', 'SCSC', 'SCSS', 'SE', 'SEE',", "'MMS', 'MMSI', 'MNRO', 'MNST', 'MNTA', 'MO', 'MOH', 'MOLX', 'MON', 'MOS', 'MOV', 'MPC', 'MPW',", "'PRA', 'PRAA', 'PRFT', 'PRGO', 'PRGS', 'PRU', 'PRX', 'PRXL', 'PSA', 'PSB', 'PSEC', 'PSEM', 'PSS',", "'WLP', 'CCI', 'JCI', 'MPC', 'MMC', 'FE', 'VTR', 'SYY', 'PCG', 'HNZ', 'ADM', 'BRCM', 'ED',", "'CWTR', 'CXW', 'CY', 'CYBX', 'CYH', 'CYMI', 'CYN', 'CYT', 'D', 'DAKT', 'DAR', 'DBD', 'DCI',", "'PHM', 'IRM', 'HP', 'CSC', 'SUN', 'TMK', 'FTR', 'NRG', 'IPG', 'IFF', 'GAS', 'STZ', 'HRB',", "'LUV', 'KMX', 'FDO', 'WHR', 'MCHP', 'SCG', 'DNR', 'CFN', 'CPB', 'CMS', 'VMC', 'MU', 'BMC',", "'ORLY', 'ORN', 'OSG', 'OSIS', 'OSK', 'OXM', 'OXY', 'OZRK', 'PACW', 'PAY', 'PAYX', 'PB', 'PBCT',", "'WYN', 'SLM', 'PLL', 'TIF', 'TXT', 'XL', 'LLTC', 'WAT', 'NI', 'DRI', 'PCL', 'TAP', 'LLL',", "'EMC', 'EME', 'EMN', 'EMR', 'ENDP', 'ENR', 'ENS', 'ENSG', 'ENTR', 'ENZ', 'EOG', 'EPAY', 'EPIQ',", "'VSAT', 'VSH', 'VSI', 'VTR', 'VVC', 'VZ', 'WAB', 'WABC', 'WAFD', 'WAG', 'WAT', 'WBS', 'WBSN',", "'DNR', 'CFN', 'CPB', 'CMS', 'VMC', 'MU', 'BMC', 'NYX', 'CMA', 'BTU', 'WIN', 'JOY', 'HBAN',", "'RYN', 'S', 'SAFM', 'SAFT', 'SAH', 'SAI', 'SAM', 'SBRA', 'SBUX', 'SCG', 'SCHL', 'SCHW', 'SCI',", "'NTAP', 'NTCT', 'NTGR', 'NTLS', 'NTRI', 'NTRS', 'NU', 'NUE', 'NUVA', 'NVDA', 'NVE', 'NVR', 'NVTL',", "'MCHP', 'MCK', 'MCO', 'MCRI', 'MCRL', 'MCRS', 'MCS', 'MCY', 'MD', 'MDC', 'MDCO', 'MDP', 'MDRX',", "'STI', 'STJ', 'STL', 'STLD', 'STMP', 'STR', 'STRA', 'STRI', 'STT', 'STX', 'STZ', 'SUN', 'SUP',", "'NWL', 'NWN', 'NWSA', 'NX', 'NYB', 'NYT', 'NYX', 'O', 'OCR', 'ODFL', 'ODP', 'OFC', 'OGE',", "'FLIR', 'FLO', 'FLR', 'FLS', 'FMBI', 'FMC', 'FMER', 'FNB', 'FNF', 'FNFG', 'FNGN', 'FNP', 'FOR',", "'VLO', 'VLTR', 'VLY', 'VMC', 'VMI', 'VNO', 'VOXX', 'VPFG', 'VPHM', 'VRSN', 'VRTS', 'VRTU', 'VRTX',", "'CBSH', 'CBST', 'CBT', 'CBU', 'CCC', 'CCE', 'CCI', 'CCL', 'CCMP', 'CCRN', 'CDI', 'CDNS', 'CDR',", "'MCK', 'CB', 'KMI', 'STT', 'PSA', 'BHI', 'ISRG', 'GLW', 'CRM', 'ALL', 'SE', 'HCP', 'RTN',", "'EPR', 'EQIX', 'EQR', 'EQT', 'EQY', 'ESE', 'ESI', 'ESIO', 'ESL', 'ESRX', 'ESS', 'ESV', 'ETFC',", "'CMG', 'FISV', 'ORLY', 'MUR', 'OKE', 'MYL', 'BF.B', 'MAR', 'ROK', 'CHK', 'ABC', 'ICE', 'HOG',", "'FULT', 'FWRD', 'GAS', 'GB', 'GBCI', 'GCI', 'GCO', 'GD', 'GDI', 'GE', 'GEF', 'GEO', 'GES',", "'SCHW', 'VFC', 'WM', 'CF', 'AZO', 'AMAT', 'CAM', 'VNO', 'OMC', 'CI', 'XEL', 'A', 'CAH',", "'WPX', 'BMS', 'AVY', 'HAR', 'OI', 'AIZ', 'NFLX', 'DF', 'FLIR', 'GT', 'LM', 'APOL', 'PDCO',", "'SFD', 'SFG', 'SFNC', 'SFY', 'SGMS', 'SGY', 'SHAW', 'SHFL', 'SHLM', 'SHOO', 'SHW', 'SIAL', 'SIG',", "'FRED', 'FRT', 'FRX', 'FSLR', 'FSP', 'FST', 'FTI', 'FTR', 'FUL', 'FULT', 'FWRD', 'GAS', 'GB',", "'HON', 'SPG', 'DUK', 'ACN', 'MDT', 'COST', 'TWX', 'TGT', 'SO', 'SBUX', 'AIG', 'F', 'FCX',", "'CMI', 'CMN', 'CMP', 'CMS', 'CMTL', 'CNC', 'CNK', 'CNL', 'CNMD', 'CNP', 'CNQR', 'CNW', 'CNX',", "'BAC', 'BAS', 'BAX', 'BBBY', 'BBG', 'BBOX', 'BBT', 'BBY', 'BC', 'BCO', 'BCOR', 'BCPC', 'BCR',", "'FFBC', 'FFIN', 'FFIV', 'FHN', 'FICO', 'FII', 'FINL', 'FIRE', 'FIS', 'FISV', 'FITB', 'FIX', 'FL',", "'STBA', 'STC', 'STE', 'STI', 'STJ', 'STL', 'STLD', 'STMP', 'STR', 'STRA', 'STRI', 'STT', 'STX',", "'SE', 'HCP', 'RTN', 'WLP', 'CCI', 'JCI', 'MPC', 'MMC', 'FE', 'VTR', 'SYY', 'PCG', 'HNZ',", "'MRX', 'MS', 'MSA', 'MSCC', 'MSCI', 'MSFT', 'MSI', 'MSM', 'MSTR', 'MTB', 'MTD', 'MTH', 'MTRN',", "'CNW', 'CNX', 'COCO', 'COF', 'COG', 'COH', 'COHU', 'COL', 'COLB', 'COO', 'COP', 'COST', 'COV',", "'APEI', 'APH', 'APOG', 'APOL', 'ARB', 'ARE', 'AREX', 'ARG', 'ARO', 'ARQL', 'ARRS', 'ARW', 'ASBC',", "'BBT', 'BBY', 'BC', 'BCO', 'BCOR', 'BCPC', 'BCR', 'BDC', 'BDX', 'BEAM', 'BEAV', 'BELFB', 'BEN',", "'XRAY', 'XRX', 'XYL', 'Y', 'YHOO', 'YUM', 'ZBRA', 'ZEP', 'ZEUS', 'ZLC', 'ZMH', 'ZQK', 'ZUMZ']", "'WPP', 'WPX', 'WR', 'WRB', 'WRC', 'WRI', 'WRLD', 'WSM', 'WSO', 'WST', 'WTFC', 'WTR', 'WTS',", "'MON', 'MA', 'LLY', 'CL', 'BA', 'DD', 'HON', 'SPG', 'DUK', 'ACN', 'MDT', 'COST', 'TWX',", "'MSCI', 'MSFT', 'MSI', 'MSM', 'MSTR', 'MTB', 'MTD', 'MTH', 'MTRN', 'MTRX', 'MTSC', 'MTX', 'MU',", "'HUBG', 'HUM', 'HVT', 'HW', 'HWAY', 'HWKN', 'HZO', 'IART', 'IBKR', 'IBM', 'IBOC', 'ICE', 'ICON',", "'PLL', 'PLT', 'PLXS', 'PM', 'PMC', 'PMTC', 'PMTI', 'PNC', 'PNFP', 'PNK', 'PNM', 'PNR', 'PNRA',", "'BLK', 'BLKB', 'BLL', 'BMC', 'BMI', 'BMR', 'BMS', 'BMY', 'BOBE', 'BOH', 'BPFH', 'BR', 'BRC',", "'ROCK', 'ROG', 'ROK', 'ROL', 'ROP', 'ROSE', 'ROST', 'ROVI', 'RPM', 'RRC', 'RRD', 'RRGB', 'RS',", "'MAC', 'MAN', 'MANH', 'MANT', 'MAR', 'MAS', 'MASI', 'MAT', 'MATW', 'MATX', 'MCD', 'MCF', 'MCHP',", "'FITB', 'FIX', 'FL', 'FLIR', 'FLO', 'FLR', 'FLS', 'FMBI', 'FMC', 'FMER', 'FNB', 'FNF', 'FNFG',", "'MMSI', 'MNRO', 'MNST', 'MNTA', 'MO', 'MOH', 'MOLX', 'MON', 'MOS', 'MOV', 'MPC', 'MPW', 'MPWR',", "'ROK', 'CHK', 'ABC', 'ICE', 'HOG', 'XRX', 'APH', 'GPC', 'CHRW', 'SJM', 'AA', 'COG', 'FLR',", "'CCE', 'WDC', 'LIFE', 'MCO', 'HIG', 'JWN', 'FRX', 'MNST', 'FFIV', 'NVDA', 'KIM', 'KEY', 'RSG',", "'CMA', 'CMC', 'CMCSA', 'CME', 'CMG', 'CMI', 'CMN', 'CMP', 'CMS', 'CMTL', 'CNC', 'CNK', 'CNL',", "'WMT', 'WOOF', 'WOR', 'WPO', 'WPP', 'WPX', 'WR', 'WRB', 'WRC', 'WRI', 'WRLD', 'WSM', 'WSO',", "'CRUS', 'CRVL', 'CRY', 'CSC', 'CSCO', 'CSGS', 'CSH', 'CSL', 'CSTR', 'CSX', 'CTAS', 'CTL', 'CTS',", "'FCFS', 'FCN', 'FCS', 'FCX', 'FDO', 'FDS', 'FDX', 'FE', 'FEIC', 'FELE', 'FFBC', 'FFIN', 'FFIV',", "'HRB', 'HRC', 'HRL', 'HRS', 'HSC', 'HSH', 'HSIC', 'HSII', 'HSNI', 'HSP', 'HST', 'HSTM', 'HSY',", "'LO', 'M', 'MJN', 'EIX', 'EL', 'DISCA', 'HCN', 'BBBY', 'TEL', 'SRE', 'MSI', 'ROST', 'DELL',", "'GS', 'GSM', 'GT', 'GTAT', 'GTIV', 'GTY', 'GVA', 'GWW', 'GXP', 'GY', 'HAE', 'HAFC', 'HAIN',", "'GCO', 'GD', 'GDI', 'GE', 'GEF', 'GEO', 'GES', 'GFF', 'GGG', 'GHL', 'GIFI', 'GILD', 'GIS',", "'CRS', 'CRUS', 'CRVL', 'CRY', 'CSC', 'CSCO', 'CSGS', 'CSH', 'CSL', 'CSTR', 'CSX', 'CTAS', 'CTL',", "'ABT', 'QCOM', 'CSCO', 'SLB', 'C', 'CMCSA', 'BAC', 'DIS', 'MCD', 'AMZN', 'HD', 'KFT', 'V',", "'RTEC', 'RTI', 'RTN', 'RUE', 'RUTH', 'RVBD', 'RYL', 'RYN', 'S', 'SAFM', 'SAFT', 'SAH', 'SAI',", "'FICO', 'FII', 'FINL', 'FIRE', 'FIS', 'FISV', 'FITB', 'FIX', 'FL', 'FLIR', 'FLO', 'FLR', 'FLS',", "'AIV', 'AIZ', 'AJG', 'AKAM', 'AKR', 'AKRX', 'AKS', 'ALB', 'ALE', 'ALEX', 'ALGN', 'ALGT', 'ALK',", "'PII', 'PJC', 'PKE', 'PKG', 'PKI', 'PKY', 'PL', 'PLCE', 'PLCM', 'PLD', 'PLFE', 'PLL', 'PLT',", "'CBST', 'CBT', 'CBU', 'CCC', 'CCE', 'CCI', 'CCL', 'CCMP', 'CCRN', 'CDI', 'CDNS', 'CDR', 'CEB',", "'SWM', 'SWN', 'SWS', 'SWX', 'SWY', 'SXC', 'SXI', 'SXT', 'SYK', 'SYKE', 'SYMC', 'SYMM', 'SYNA',", "'CAT', 'CATM', 'CATO', 'CATY', 'CB', 'CBB', 'CBE', 'CBEY', 'CBG', 'CBK', 'CBM', 'CBOE', 'CBR',", "'NOV', 'NP', 'NPBC', 'NPK', 'NPO', 'NRG', 'NSC', 'NSIT', 'NSP', 'NSR', 'NTAP', 'NTCT', 'NTGR',", "'KEY', 'KFY', 'KIM', 'KIRK', 'KLAC', 'KLIC', 'KMB', 'KMI', 'KMPR', 'KMT', 'KMX', 'KND', 'KNX',", "'POL', 'POM', 'POOL', 'POST', 'POWI', 'POWL', 'PPG', 'PPS', 'PQ', 'PRA', 'PRAA', 'PRFT', 'PRGO',", "'BOH', 'BPFH', 'BR', 'BRC', 'BRCM', 'BRKL', 'BRKS', 'BRLI', 'BRO', 'BRS', 'BSX', 'BTH', 'BTU',", "'OHI', 'OI', 'OII', 'OIS', 'OKE', 'OLN', 'OMC', 'OMCL', 'OMG', 'OMI', 'OMX', 'ONB', 'ONE',", "'XEL', 'XL', 'XLNX', 'XLS', 'XOM', 'XOXO', 'XRAY', 'XRX', 'XYL', 'Y', 'YHOO', 'YUM', 'ZBRA',", "'HNT', 'HNZ', 'HOG', 'HOLX', 'HOMB', 'HON', 'HOS', 'HOT', 'HOTT', 'HP', 'HPQ', 'HPT', 'HPY',", "'SCHW', 'SCI', 'SCL', 'SCOR', 'SCSC', 'SCSS', 'SE', 'SEE', 'SEIC', 'SENEA', 'SF', 'SFD', 'SFG',", "'ATI', 'ATK', 'ATMI', 'ATML', 'ATNI', 'ATO', 'ATR', 'ATU', 'ATW', 'AVA', 'AVAV', 'AVB', 'AVD',", "'BWLD', 'BWS', 'BXP', 'BXS', 'BYD', 'BYI', 'C', 'CA', 'CAB', 'CACI', 'CAG', 'CAH', 'CAKE',", "'TWX', 'TXI', 'TXN', 'TXRH', 'TXT', 'TYC', 'TYL', 'TYPE', 'UA', 'UBA', 'UBSI', 'UCBI', 'UDR',", "'PZZA', 'QCOM', 'QCOR', 'QEP', 'QLGC', 'QNST', 'QSFT', 'QSII', 'R', 'RAH', 'RAI', 'RAX', 'RBC',", "'NDSN', 'NE', 'NEE', 'NEM', 'NEOG', 'NEU', 'NEWP', 'NFG', 'NFLX', 'NFP', 'NFX', 'NI', 'NILE',", "'PNC', 'PNFP', 'PNK', 'PNM', 'PNR', 'PNRA', 'PNW', 'PNY', 'POL', 'POM', 'POOL', 'POST', 'POWI',", "'LAWS', 'LDL', 'LDR', 'LECO', 'LEG', 'LEN', 'LFUS', 'LG', 'LH', 'LHCG', 'LHO', 'LIFE', 'LII',", "'ADSK', 'ADTN', 'ADVS', 'AEE', 'AEGN', 'AEIS', 'AEO', 'AEP', 'AES', 'AET', 'AF', 'AFAM', 'AFFX',", "'M', 'MJN', 'EIX', 'EL', 'DISCA', 'HCN', 'BBBY', 'TEL', 'SRE', 'MSI', 'ROST', 'DELL', 'CTXS',", "'HPT', 'HPY', 'HR', 'HRB', 'HRC', 'HRL', 'HRS', 'HSC', 'HSH', 'HSIC', 'HSII', 'HSNI', 'HSP',", "'LXP', 'LXU', 'LYV', 'LZB', 'M', 'MA', 'MAA', 'MAC', 'MAN', 'MANH', 'MANT', 'MAR', 'MAS',", "'SONC', 'SPAR', 'SPF', 'SPG', 'SPLS', 'SPN', 'SPPI', 'SPTN', 'SPW', 'SRCL', 'SRDX', 'SRE', 'SSD',", "'JACK', 'JAH', 'JAKK', 'JBHT', 'JBL', 'JBLU', 'JBT', 'JCI', 'JCOM', 'JCP', 'JDAS', 'JDSU', 'JEC',", "'HOG', 'HOLX', 'HOMB', 'HON', 'HOS', 'HOT', 'HOTT', 'HP', 'HPQ', 'HPT', 'HPY', 'HR', 'HRB',", "'FISV', 'FITB', 'FIX', 'FL', 'FLIR', 'FLO', 'FLR', 'FLS', 'FMBI', 'FMC', 'FMER', 'FNB', 'FNF',", "'TROW', 'TRST', 'TRV', 'TSCO', 'TSN', 'TSO', 'TSRA', 'TSS', 'TTC', 'TTEC', 'TTEK', 'TTI', 'TTMI',", "'EQR', 'EQT', 'EQY', 'ESE', 'ESI', 'ESIO', 'ESL', 'ESRX', 'ESS', 'ESV', 'ETFC', 'ETH', 'ETN',", "'UNH', 'UNM', 'UNP', 'UNS', 'UNT', 'UNTD', 'UPS', 'URBN', 'URI', 'URS', 'USB', 'USMO', 'USTR',", "'VRSN', 'VRTS', 'VRTU', 'VRTX', 'VSAT', 'VSH', 'VSI', 'VTR', 'VVC', 'VZ', 'WAB', 'WABC', 'WAFD',", "'FLO', 'FLR', 'FLS', 'FMBI', 'FMC', 'FMER', 'FNB', 'FNF', 'FNFG', 'FNGN', 'FNP', 'FOR', 'FORR',", "'WCN', 'WDC', 'WDFC', 'WDR', 'WEC', 'WEN', 'WERN', 'WFC', 'WFM', 'WGL', 'WGO', 'WHR', 'WIBC',", "'SSS', 'STBA', 'STC', 'STE', 'STI', 'STJ', 'STL', 'STLD', 'STMP', 'STR', 'STRA', 'STRI', 'STT',", "'BLK', 'DVN', 'AFL', 'ALXN', 'GD', 'WMB', 'CBS', 'CSX', 'TMO', 'AEP', 'CTSH', 'MRO', 'DFS',", "'UIL', 'UMBF', 'UMPQ', 'UNF', 'UNFI', 'UNH', 'UNM', 'UNP', 'UNS', 'UNT', 'UNTD', 'UPS', 'URBN',", "'OLN', 'OMC', 'OMCL', 'OMG', 'OMI', 'OMX', 'ONB', 'ONE', 'OPEN', 'OPLK', 'OPNT', 'ORB', 'ORCL',", "'URBN', 'URI', 'URS', 'USB', 'USMO', 'USTR', 'UTEK', 'UTHR', 'UTI', 'UTIW', 'UTX', 'UVV', 'V',", "'FNF', 'FNFG', 'FNGN', 'FNP', 'FOR', 'FORR', 'FOSL', 'FRED', 'FRT', 'FRX', 'FSLR', 'FSP', 'FST',", "'PCL', 'PCLN', 'PCP', 'PCTI', 'PDCE', 'PDCO', 'PEET', 'PEG', 'PEI', 'PEP', 'PERY', 'PES', 'PETM',", "'TEG', 'TEL', 'TER', 'TEX', 'TFX', 'TG', 'TGI', 'TGT', 'THC', 'THG', 'THO', 'THOR', 'THS',", "'IART', 'IBKR', 'IBM', 'IBOC', 'ICE', 'ICON', 'ICUI', 'IDA', 'IDTI', 'IDXX', 'IEX', 'IFF', 'IFSIA',", "'ESRX', 'GILD', 'NWSA', 'MON', 'MA', 'LLY', 'CL', 'BA', 'DD', 'HON', 'SPG', 'DUK', 'ACN',", "'CYH', 'CYMI', 'CYN', 'CYT', 'D', 'DAKT', 'DAR', 'DBD', 'DCI', 'DCOM', 'DD', 'DDD', 'DE',", "'CSC', 'SUN', 'TMK', 'FTR', 'NRG', 'IPG', 'IFF', 'GAS', 'STZ', 'HRB', 'XYL', 'TSN', 'FOSL',", "'IR', 'PXD', 'KR', 'SWK', 'K', 'TDC', 'SHW', 'ESV', 'SYMC', 'PH', 'GWW', 'EW', 'ETR',", "'TXRH', 'TXT', 'TYC', 'TYL', 'TYPE', 'UA', 'UBA', 'UBSI', 'UCBI', 'UDR', 'UEIC', 'UFCS', 'UFPI',", "'IDXX', 'IEX', 'IFF', 'IFSIA', 'IGT', 'IGTE', 'IILG', 'IIVI', 'IM', 'IN', 'INDB', 'INFA', 'INGR',", "'KRC', 'KRG', 'KS', 'KSS', 'KSU', 'KSWS', 'KWK', 'KWR', 'L', 'LAD', 'LAMR', 'LANC', 'LAWS',", "'BK', 'TRV', 'TYC', 'GIS', 'ITW', 'ACE', 'PRU', 'VIAB', 'CTL', 'LMT', 'FDX', 'PCP', 'BBT',", "'JCI', 'JCOM', 'JCP', 'JDAS', 'JDSU', 'JEC', 'JEF', 'JJSF', 'JKHY', 'JLL', 'JNJ', 'JNPR', 'JNS',", "'SAFT', 'SAH', 'SAI', 'SAM', 'SBRA', 'SBUX', 'SCG', 'SCHL', 'SCHW', 'SCI', 'SCL', 'SCOR', 'SCSC',", "'ALB', 'ALE', 'ALEX', 'ALGN', 'ALGT', 'ALK', 'ALL', 'ALOG', 'ALTR', 'ALXN', 'AM', 'AMAT', 'AMCX',", "'JBHT', 'JBL', 'JBLU', 'JBT', 'JCI', 'JCOM', 'JCP', 'JDAS', 'JDSU', 'JEC', 'JEF', 'JJSF', 'JKHY',", "'CCL', 'CCMP', 'CCRN', 'CDI', 'CDNS', 'CDR', 'CEB', 'CEC', 'CECO', 'CELG', 'CELL', 'CENX', 'CERN',", "'UFS', 'UGI', 'UHS', 'UHT', 'UIL', 'UMBF', 'UMPQ', 'UNF', 'UNFI', 'UNH', 'UNM', 'UNP', 'UNS',", "'HP', 'CSC', 'SUN', 'TMK', 'FTR', 'NRG', 'IPG', 'IFF', 'GAS', 'STZ', 'HRB', 'XYL', 'TSN',", "'PLXS', 'PM', 'PMC', 'PMTC', 'PMTI', 'PNC', 'PNFP', 'PNK', 'PNM', 'PNR', 'PNRA', 'PNW', 'PNY',", "'SRDX', 'SRE', 'SSD', 'SSI', 'SSP', 'SSS', 'STBA', 'STC', 'STE', 'STI', 'STJ', 'STL', 'STLD',", "'GPOR', 'GPS', 'GS', 'GSM', 'GT', 'GTAT', 'GTIV', 'GTY', 'GVA', 'GWW', 'GXP', 'GY', 'HAE',", "'VICR', 'VIVO', 'VLO', 'VLTR', 'VLY', 'VMC', 'VMI', 'VNO', 'VOXX', 'VPFG', 'VPHM', 'VRSN', 'VRTS',", "'DCOM', 'DD', 'DDD', 'DE', 'DECK', 'DEL', 'DELL', 'DF', 'DFS', 'DGII', 'DGIT', 'DGX', 'DHI',", "'GCI', 'GCO', 'GD', 'GDI', 'GE', 'GEF', 'GEO', 'GES', 'GFF', 'GGG', 'GHL', 'GIFI', 'GILD',", "'MO', 'MOH', 'MOLX', 'MON', 'MOS', 'MOV', 'MPC', 'MPW', 'MPWR', 'MRCY', 'MRK', 'MRO', 'MRX',", "'MPW', 'MPWR', 'MRCY', 'MRK', 'MRO', 'MRX', 'MS', 'MSA', 'MSCC', 'MSCI', 'MSFT', 'MSI', 'MSM',", "'RSG', 'RSH', 'RSTI', 'RSYS', 'RT', 'RTEC', 'RTI', 'RTN', 'RUE', 'RUTH', 'RVBD', 'RYL', 'RYN',", "'LNCE', 'LNN', 'LNT', 'LO', 'LOGM', 'LOW', 'LPNT', 'LPS', 'LPSN', 'LPX', 'LQDT', 'LRCX', 'LRY',", "'TAP', 'TBI', 'TCB', 'TCBI', 'TCO', 'TDC', 'TDS', 'TDW', 'TDY', 'TE', 'TECD', 'TECH', 'TEG',", "'EQIX', 'EQR', 'EQT', 'EQY', 'ESE', 'ESI', 'ESIO', 'ESL', 'ESRX', 'ESS', 'ESV', 'ETFC', 'ETH',", "'CHRW', 'CHS', 'CI', 'CIEN', 'CINF', 'CIR', 'CKH', 'CKP', 'CL', 'CLC', 'CLD', 'CLF', 'CLGX',", "'GS', 'ESRX', 'GILD', 'NWSA', 'MON', 'MA', 'LLY', 'CL', 'BA', 'DD', 'HON', 'SPG', 'DUK',", "'BBT', 'MS', 'BLK', 'DVN', 'AFL', 'ALXN', 'GD', 'WMB', 'CBS', 'CSX', 'TMO', 'AEP', 'CTSH',", "'HSNI', 'HSP', 'HST', 'HSTM', 'HSY', 'HTLD', 'HTSI', 'HUBG', 'HUM', 'HVT', 'HW', 'HWAY', 'HWKN',", "'CLW', 'CLX', 'CMA', 'CMC', 'CMCSA', 'CME', 'CMG', 'CMI', 'CMN', 'CMP', 'CMS', 'CMTL', 'CNC',", "'HCBK', 'AIV', 'RHI', 'PCS', 'MOLX', 'TE', 'TRIP', 'DNB', 'LEG', 'JBL', 'IGT', 'JCP', 'CVC',", "'SYKE', 'SYMC', 'SYMM', 'SYNA', 'SYY', 'T', 'TAP', 'TBI', 'TCB', 'TCBI', 'TCO', 'TDC', 'TDS',", "'ACN', 'MDT', 'COST', 'TWX', 'TGT', 'SO', 'SBUX', 'AIG', 'F', 'FCX', 'MET', 'BIIB', 'EMR',", "'TMO', 'AEP', 'CTSH', 'MRO', 'DFS', 'NSC', 'MCK', 'CB', 'KMI', 'STT', 'PSA', 'BHI', 'ISRG',", "'MTRN', 'MTRX', 'MTSC', 'MTX', 'MU', 'MUR', 'MW', 'MWIV', 'MWV', 'MWW', 'MYE', 'MYL', 'NAFC',", "'EQT', 'CA', 'DVA', 'XLNX', 'EMN', 'SIAL', 'WEC', 'CCE', 'WDC', 'LIFE', 'MCO', 'HIG', 'JWN',", "'KLAC', 'AEE', 'BWA', 'SPLS', 'FIS', 'SRCL', 'EXPD', 'COL', 'VRSN', 'FMC', 'ADSK', 'PFG', 'WYN',", "'ONE', 'OPEN', 'OPLK', 'OPNT', 'ORB', 'ORCL', 'ORI', 'ORIT', 'ORLY', 'ORN', 'OSG', 'OSIS', 'OSK',", "'CKH', 'CKP', 'CL', 'CLC', 'CLD', 'CLF', 'CLGX', 'CLH', 'CLI', 'CLMS', 'CLP', 'CLW', 'CLX',", "'LOW', 'NOV', 'KMB', 'APA', 'HPQ', 'PNC', 'COF', 'BAX', 'TJX', 'CELG', 'DTV', 'DE', 'DHR',", "'BMR', 'BMS', 'BMY', 'BOBE', 'BOH', 'BPFH', 'BR', 'BRC', 'BRCM', 'BRKL', 'BRKS', 'BRLI', 'BRO',", "'ACE', 'ACI', 'ACIW', 'ACM', 'ACN', 'ACO', 'ACXM', 'ADBE', 'ADI', 'ADM', 'ADP', 'ADS', 'ADSK',", "'CYT', 'D', 'DAKT', 'DAR', 'DBD', 'DCI', 'DCOM', 'DD', 'DDD', 'DE', 'DECK', 'DEL', 'DELL',", "'MKSI', 'MLHR', 'MLI', 'MLM', 'MMC', 'MMM', 'MMS', 'MMSI', 'MNRO', 'MNST', 'MNTA', 'MO', 'MOH',", "'MWV', 'MWW', 'MYE', 'MYL', 'NAFC', 'NANO', 'NATI', 'NAVG', 'NBL', 'NBR', 'NBTB', 'NCI', 'NCIT',", "'MSFT', 'IBM', 'T', 'GOOG', 'PG', 'JNJ', 'PFE', 'WFC', 'BRK.B', 'JPM', 'PM', 'KO', 'MRK',", "'PEP', 'PERY', 'PES', 'PETM', 'PETS', 'PFE', 'PFG', 'PFS', 'PG', 'PGR', 'PH', 'PHM', 'PII',", "'HLIT', 'HLX', 'HMA', 'HME', 'HMN', 'HMSY', 'HNI', 'HNT', 'HNZ', 'HOG', 'HOLX', 'HOMB', 'HON',", "'GSM', 'GT', 'GTAT', 'GTIV', 'GTY', 'GVA', 'GWW', 'GXP', 'GY', 'HAE', 'HAFC', 'HAIN', 'HAL',", "'NBL', 'NOC', 'CMI', 'CCL', 'PEG', 'INTU', 'PLD', 'SYK', 'TROW', 'COH', 'ADBE', 'HES', 'ETN',", "'VLY', 'VMC', 'VMI', 'VNO', 'VOXX', 'VPFG', 'VPHM', 'VRSN', 'VRTS', 'VRTU', 'VRTX', 'VSAT', 'VSH',", "'CMA', 'BTU', 'WIN', 'JOY', 'HBAN', 'TSO', 'HRS', 'LRCX', 'PNW', 'DHI', 'ARG', 'LEN', 'QEP',", "'RBCN', 'RBN', 'RCII', 'RDC', 'RE', 'RECN', 'REG', 'REGN', 'RF', 'RFMD', 'RGA', 'RGLD', 'RGR',", "'PRGS', 'PRU', 'PRX', 'PRXL', 'PSA', 'PSB', 'PSEC', 'PSEM', 'PSS', 'PSSI', 'PSX', 'PTEN', 'PULS',", "'NTAP', 'ZMH', 'LTD', 'ADI', 'PGR', 'HST', 'FAST', 'MTB', 'HOT', 'RRC', 'HUM', 'CERN', 'CAG',", "'MTB', 'MTD', 'MTH', 'MTRN', 'MTRX', 'MTSC', 'MTX', 'MU', 'MUR', 'MW', 'MWIV', 'MWV', 'MWW',", "'ANF', 'ANN', 'ANR', 'ANSS', 'AOL', 'AON', 'AOS', 'APA', 'APC', 'APD', 'APEI', 'APH', 'APOG',", "'NSC', 'NSIT', 'NSP', 'NSR', 'NTAP', 'NTCT', 'NTGR', 'NTLS', 'NTRI', 'NTRS', 'NU', 'NUE', 'NUVA',", "'RL', 'RLI', 'RMD', 'ROCK', 'ROG', 'ROK', 'ROL', 'ROP', 'ROSE', 'ROST', 'ROVI', 'RPM', 'RRC',", "'VIAB', 'VICR', 'VIVO', 'VLO', 'VLTR', 'VLY', 'VMC', 'VMI', 'VNO', 'VOXX', 'VPFG', 'VPHM', 'VRSN',", "'DISCA', 'HCN', 'BBBY', 'TEL', 'SRE', 'MSI', 'ROST', 'DELL', 'CTXS', 'FITB', 'RAI', 'PCAR', 'WY',", "'RPM', 'RRC', 'RRD', 'RRGB', 'RS', 'RSG', 'RSH', 'RSTI', 'RSYS', 'RT', 'RTEC', 'RTI', 'RTN',", "'CMCSA', 'CME', 'CMG', 'CMI', 'CMN', 'CMP', 'CMS', 'CMTL', 'CNC', 'CNK', 'CNL', 'CNMD', 'CNP',", "'NYT', 'NYX', 'O', 'OCR', 'ODFL', 'ODP', 'OFC', 'OGE', 'OHI', 'OI', 'OII', 'OIS', 'OKE',", "'KR', 'SWK', 'K', 'TDC', 'SHW', 'ESV', 'SYMC', 'PH', 'GWW', 'EW', 'ETR', 'NUE', 'SWN',", "'FWRD', 'GAS', 'GB', 'GBCI', 'GCI', 'GCO', 'GD', 'GDI', 'GE', 'GEF', 'GEO', 'GES', 'GFF',", "'SKYW', 'SLAB', 'SLB', 'SLG', 'SLGN', 'SLH', 'SLM', 'SLXP', 'SM', 'SMA', 'SMCI', 'SMG', 'SMP',", "'NILE', 'NJR', 'NKE', 'NNN', 'NOC', 'NOV', 'NP', 'NPBC', 'NPK', 'NPO', 'NRG', 'NSC', 'NSIT',", "'CNL', 'CNMD', 'CNP', 'CNQR', 'CNW', 'CNX', 'COCO', 'COF', 'COG', 'COH', 'COHU', 'COL', 'COLB',", "'BK', 'BKE', 'BKH', 'BKI', 'BKMU', 'BKS', 'BLK', 'BLKB', 'BLL', 'BMC', 'BMI', 'BMR', 'BMS',", "'TKR', 'TLAB', 'TMK', 'TMO', 'TMP', 'TNC', 'TOL', 'TPX', 'TQNT', 'TR', 'TRAK', 'TRIP', 'TRLG',", "'DTSI', 'DTV', 'DUK', 'DV', 'DVA', 'DVN', 'DW', 'DWA', 'DY', 'EA', 'EAT', 'EBAY', 'EBIX',", "'AMP', 'AMSF', 'AMSG', 'AMT', 'AMZN', 'AN', 'ANDE', 'ANF', 'ANN', 'ANR', 'ANSS', 'AOL', 'AON',", "'UBSI', 'UCBI', 'UDR', 'UEIC', 'UFCS', 'UFPI', 'UFS', 'UGI', 'UHS', 'UHT', 'UIL', 'UMBF', 'UMPQ',", "'WFC', 'WFM', 'WGL', 'WGO', 'WHR', 'WIBC', 'WIN', 'WIRE', 'WLP', 'WM', 'WMB', 'WMS', 'WMT',", "'FEIC', 'FELE', 'FFBC', 'FFIN', 'FFIV', 'FHN', 'FICO', 'FII', 'FINL', 'FIRE', 'FIS', 'FISV', 'FITB',", "'AFFX', 'AFG', 'AFL', 'AGCO', 'AGN', 'AGP', 'AGYS', 'AHL', 'AHS', 'AIG', 'AINV', 'AIR', 'AIRM',", "'BDC', 'BDX', 'BEAM', 'BEAV', 'BELFB', 'BEN', 'BFS', 'BGC', 'BGFV', 'BGG', 'BGS', 'BH', 'BHE',", "'DNB', 'DNR', 'DO', 'DOV', 'DOW', 'DPS', 'DRC', 'DRE', 'DRH', 'DRI', 'DRIV', 'DRQ', 'DSPG',", "'PMTC', 'PMTI', 'PNC', 'PNFP', 'PNK', 'PNM', 'PNR', 'PNRA', 'PNW', 'PNY', 'POL', 'POM', 'POOL',", "'TTC', 'TTEC', 'TTEK', 'TTI', 'TTMI', 'TTWO', 'TUES', 'TUP', 'TW', 'TWC', 'TWGP', 'TWTC', 'TWX',", "'EBIX', 'EBS', 'ECL', 'ECPG', 'ED', 'EE', 'EFX', 'EGL', 'EGN', 'EGP', 'EHTH', 'EIG', 'EIX',", "'CBU', 'CCC', 'CCE', 'CCI', 'CCL', 'CCMP', 'CCRN', 'CDI', 'CDNS', 'CDR', 'CEB', 'CEC', 'CECO',", "'EZPW', 'F', 'FAF', 'FARO', 'FAST', 'FBHS', 'FBP', 'FCF', 'FCFS', 'FCN', 'FCS', 'FCX', 'FDO',", "'FII', 'FINL', 'FIRE', 'FIS', 'FISV', 'FITB', 'FIX', 'FL', 'FLIR', 'FLO', 'FLR', 'FLS', 'FMBI',", "'X', 'SEE', 'TER', 'THC', 'GME', 'GNW', 'FHN', 'ETFC', 'AMD', 'R', 'FII', 'RRD', 'BIG',", "'VNO', 'OMC', 'CI', 'XEL', 'A', 'CAH', 'AET', 'STJ', 'AVB', 'L', 'IR', 'PXD', 'KR',", "'VTR', 'SYY', 'PCG', 'HNZ', 'ADM', 'BRCM', 'ED', 'PPG', 'CME', 'LYB', 'APD', 'VLO', 'EQR',", "'AZO', 'AZZ', 'B', 'BA', 'BABY', 'BAC', 'BAS', 'BAX', 'BBBY', 'BBG', 'BBOX', 'BBT', 'BBY',", "'LDL', 'LDR', 'LECO', 'LEG', 'LEN', 'LFUS', 'LG', 'LH', 'LHCG', 'LHO', 'LIFE', 'LII', 'LINC',", "'NWL', 'HSP', 'EXPE', 'XRAY', 'UNM', 'MAS', 'MWV', 'SNI', 'PWR', 'JEC', 'PHM', 'IRM', 'HP',", "'SEIC', 'SENEA', 'SF', 'SFD', 'SFG', 'SFNC', 'SFY', 'SGMS', 'SGY', 'SHAW', 'SHFL', 'SHLM', 'SHOO',", "'WPO', 'LXK', 'ANR', 'FSLR', 'DV', 'TIE'] ALL_TICKERS = ['A', 'AA', 'AAN', 'AAON', 'AAP',", "'ALXN', 'AM', 'AMAT', 'AMCX', 'AMD', 'AME', 'AMED', 'AMG', 'AMGN', 'AMP', 'AMSF', 'AMSG', 'AMT',", "'XLS', 'XOM', 'XOXO', 'XRAY', 'XRX', 'XYL', 'Y', 'YHOO', 'YUM', 'ZBRA', 'ZEP', 'ZEUS', 'ZLC',", "'GME', 'GNW', 'FHN', 'ETFC', 'AMD', 'R', 'FII', 'RRD', 'BIG', 'AN', 'WPO', 'LXK', 'ANR',", "'STRI', 'STT', 'STX', 'STZ', 'SUN', 'SUP', 'SUPX', 'SUSQ', 'SVU', 'SWI', 'SWK', 'SWKS', 'SWM',", "'KSWS', 'KWK', 'KWR', 'L', 'LAD', 'LAMR', 'LANC', 'LAWS', 'LDL', 'LDR', 'LECO', 'LEG', 'LEN',", "'WTS', 'WU', 'WWD', 'WWW', 'WY', 'WYN', 'WYNN', 'X', 'XEC', 'XEL', 'XL', 'XLNX', 'XLS',", "'CTSH', 'MRO', 'DFS', 'NSC', 'MCK', 'CB', 'KMI', 'STT', 'PSA', 'BHI', 'ISRG', 'GLW', 'CRM',", "'LNN', 'LNT', 'LO', 'LOGM', 'LOW', 'LPNT', 'LPS', 'LPSN', 'LPX', 'LQDT', 'LRCX', 'LRY', 'LSI',", "'SHW', 'ESV', 'SYMC', 'PH', 'GWW', 'EW', 'ETR', 'NUE', 'SWN', 'MAT', 'CBE', 'NU', 'AMP',", "'ACO', 'ACXM', 'ADBE', 'ADI', 'ADM', 'ADP', 'ADS', 'ADSK', 'ADTN', 'ADVS', 'AEE', 'AEGN', 'AEIS',", "'MIG', 'MINI', 'MJN', 'MKC', 'MKSI', 'MLHR', 'MLI', 'MLM', 'MMC', 'MMM', 'MMS', 'MMSI', 'MNRO',", "'SMG', 'SMP', 'SMRT', 'SMTC', 'SNA', 'SNCR', 'SNDK', 'SNH', 'SNI', 'SNPS', 'SNV', 'SNX', 'SO',", "'POM', 'PBCT', 'NFX', 'RDC', 'SNA', 'GCI', 'URBN', 'NBR', 'TEG', 'EA', 'HRL', 'SWY', 'LSI',", "'AYI', 'AZO', 'AZZ', 'B', 'BA', 'BABY', 'BAC', 'BAS', 'BAX', 'BBBY', 'BBG', 'BBOX', 'BBT',", "'RGLD', 'RGR', 'RGS', 'RHI', 'RHT', 'RJF', 'RKT', 'RL', 'RLI', 'RMD', 'ROCK', 'ROG', 'ROK',", "'F', 'FCX', 'MET', 'BIIB', 'EMR', 'APC', 'NKE', 'DOW', 'LOW', 'NOV', 'KMB', 'APA', 'HPQ',", "'AXP', 'UPS', 'GS', 'ESRX', 'GILD', 'NWSA', 'MON', 'MA', 'LLY', 'CL', 'BA', 'DD', 'HON',", "'ALK', 'ALL', 'ALOG', 'ALTR', 'ALXN', 'AM', 'AMAT', 'AMCX', 'AMD', 'AME', 'AMED', 'AMG', 'AMGN',", "'GPS', 'GS', 'GSM', 'GT', 'GTAT', 'GTIV', 'GTY', 'GVA', 'GWW', 'GXP', 'GY', 'HAE', 'HAFC',", "'DIOD', 'DIS', 'DISCA', 'DKS', 'DLTR', 'DLX', 'DM', 'DMND', 'DNB', 'DNR', 'DO', 'DOV', 'DOW',", "'CA', 'DVA', 'XLNX', 'EMN', 'SIAL', 'WEC', 'CCE', 'WDC', 'LIFE', 'MCO', 'HIG', 'JWN', 'FRX',", "'TIE', 'TIF', 'TJX', 'TKR', 'TLAB', 'TMK', 'TMO', 'TMP', 'TNC', 'TOL', 'TPX', 'TQNT', 'TR',", "'ACC', 'ACE', 'ACI', 'ACIW', 'ACM', 'ACN', 'ACO', 'ACXM', 'ADBE', 'ADI', 'ADM', 'ADP', 'ADS',", "'DOW', 'LOW', 'NOV', 'KMB', 'APA', 'HPQ', 'PNC', 'COF', 'BAX', 'TJX', 'CELG', 'DTV', 'DE',", "'OSK', 'OXM', 'OXY', 'OZRK', 'PACW', 'PAY', 'PAYX', 'PB', 'PBCT', 'PBH', 'PBI', 'PBY', 'PCAR',", "'ISRG', 'IT', 'ITG', 'ITRI', 'ITT', 'ITW', 'IVAC', 'IVC', 'IVZ', 'JACK', 'JAH', 'JAKK', 'JBHT',", "'CB', 'CBB', 'CBE', 'CBEY', 'CBG', 'CBK', 'CBM', 'CBOE', 'CBR', 'CBRL', 'CBS', 'CBSH', 'CBST',", "'ALGN', 'ALGT', 'ALK', 'ALL', 'ALOG', 'ALTR', 'ALXN', 'AM', 'AMAT', 'AMCX', 'AMD', 'AME', 'AMED',", "'EXPD', 'EXPE', 'EXPO', 'EXR', 'EZPW', 'F', 'FAF', 'FARO', 'FAST', 'FBHS', 'FBP', 'FCF', 'FCFS',", "'MRO', 'DFS', 'NSC', 'MCK', 'CB', 'KMI', 'STT', 'PSA', 'BHI', 'ISRG', 'GLW', 'CRM', 'ALL',", "'UTIW', 'UTX', 'UVV', 'V', 'VAL', 'VAR', 'VCI', 'VCLK', 'VDSI', 'VECO', 'VFC', 'VIAB', 'VICR',", "'GPN', 'GPOR', 'GPS', 'GS', 'GSM', 'GT', 'GTAT', 'GTIV', 'GTY', 'GVA', 'GWW', 'GXP', 'GY',", "'XEC', 'XEL', 'XL', 'XLNX', 'XLS', 'XOM', 'XOXO', 'XRAY', 'XRX', 'XYL', 'Y', 'YHOO', 'YUM',", "'PBCT', 'NFX', 'RDC', 'SNA', 'GCI', 'URBN', 'NBR', 'TEG', 'EA', 'HRL', 'SWY', 'LSI', 'TSS',", "'MNTA', 'MO', 'MOH', 'MOLX', 'MON', 'MOS', 'MOV', 'MPC', 'MPW', 'MPWR', 'MRCY', 'MRK', 'MRO',", "'MCF', 'MCHP', 'MCK', 'MCO', 'MCRI', 'MCRL', 'MCRS', 'MCS', 'MCY', 'MD', 'MDC', 'MDCO', 'MDP',", "'THOR', 'THS', 'TIBX', 'TIE', 'TIF', 'TJX', 'TKR', 'TLAB', 'TMK', 'TMO', 'TMP', 'TNC', 'TOL',", "'FCS', 'FCX', 'FDO', 'FDS', 'FDX', 'FE', 'FEIC', 'FELE', 'FFBC', 'FFIN', 'FFIV', 'FHN', 'FICO',", "'PSEM', 'PSS', 'PSSI', 'PSX', 'PTEN', 'PULS', 'PVA', 'PVH', 'PVTB', 'PWR', 'PX', 'PXD', 'PXP',", "'SBUX', 'AIG', 'F', 'FCX', 'MET', 'BIIB', 'EMR', 'APC', 'NKE', 'DOW', 'LOW', 'NOV', 'KMB',", "'IEX', 'IFF', 'IFSIA', 'IGT', 'IGTE', 'IILG', 'IIVI', 'IM', 'IN', 'INDB', 'INFA', 'INGR', 'ININ',", "'ANN', 'ANR', 'ANSS', 'AOL', 'AON', 'AOS', 'APA', 'APC', 'APD', 'APEI', 'APH', 'APOG', 'APOL',", "'SLAB', 'SLB', 'SLG', 'SLGN', 'SLH', 'SLM', 'SLXP', 'SM', 'SMA', 'SMCI', 'SMG', 'SMP', 'SMRT',", "'TYC', 'TYL', 'TYPE', 'UA', 'UBA', 'UBSI', 'UCBI', 'UDR', 'UEIC', 'UFCS', 'UFPI', 'UFS', 'UGI',", "'ADBE', 'ADI', 'ADM', 'ADP', 'ADS', 'ADSK', 'ADTN', 'ADVS', 'AEE', 'AEGN', 'AEIS', 'AEO', 'AEP',", "'KOPN', 'KR', 'KRA', 'KRC', 'KRG', 'KS', 'KSS', 'KSU', 'KSWS', 'KWK', 'KWR', 'L', 'LAD',", "'RAX', 'RBC', 'RBCN', 'RBN', 'RCII', 'RDC', 'RE', 'RECN', 'REG', 'REGN', 'RF', 'RFMD', 'RGA',", "'JBL', 'IGT', 'JCP', 'CVC', 'ATI', 'SAI', 'PKI', 'WPX', 'BMS', 'AVY', 'HAR', 'OI', 'AIZ',", "'ESV', 'ETFC', 'ETH', 'ETN', 'ETR', 'EV', 'EW', 'EWBC', 'EXAR', 'EXC', 'EXH', 'EXLS', 'EXP',", "'KMB', 'KMI', 'KMPR', 'KMT', 'KMX', 'KND', 'KNX', 'KO', 'KOP', 'KOPN', 'KR', 'KRA', 'KRC',", "'VZ', 'WMT', 'ORCL', 'INTC', 'PEP', 'ABT', 'QCOM', 'CSCO', 'SLB', 'C', 'CMCSA', 'BAC', 'DIS',", "'SCL', 'SCOR', 'SCSC', 'SCSS', 'SE', 'SEE', 'SEIC', 'SENEA', 'SF', 'SFD', 'SFG', 'SFNC', 'SFY',", "'BEN', 'ECL', 'PPL', 'AON', 'WFM', 'BXP', 'YHOO', 'S', 'NBL', 'NOC', 'CMI', 'CCL', 'PEG',", "'BHE', 'BHI', 'BID', 'BIG', 'BIIB', 'BJRI', 'BK', 'BKE', 'BKH', 'BKI', 'BKMU', 'BKS', 'BLK',", "'TMO', 'TMP', 'TNC', 'TOL', 'TPX', 'TQNT', 'TR', 'TRAK', 'TRIP', 'TRLG', 'TRMB', 'TRMK', 'TRN',", "'NFP', 'NFX', 'NI', 'NILE', 'NJR', 'NKE', 'NNN', 'NOC', 'NOV', 'NP', 'NPBC', 'NPK', 'NPO',", "'SKT', 'SKX', 'SKYW', 'SLAB', 'SLB', 'SLG', 'SLGN', 'SLH', 'SLM', 'SLXP', 'SM', 'SMA', 'SMCI',", "'KIRK', 'KLAC', 'KLIC', 'KMB', 'KMI', 'KMPR', 'KMT', 'KMX', 'KND', 'KNX', 'KO', 'KOP', 'KOPN',", "'SPW', 'SRCL', 'SRDX', 'SRE', 'SSD', 'SSI', 'SSP', 'SSS', 'STBA', 'STC', 'STE', 'STI', 'STJ',", "'PLCE', 'PLCM', 'PLD', 'PLFE', 'PLL', 'PLT', 'PLXS', 'PM', 'PMC', 'PMTC', 'PMTI', 'PNC', 'PNFP',", "'UNF', 'UNFI', 'UNH', 'UNM', 'UNP', 'UNS', 'UNT', 'UNTD', 'UPS', 'URBN', 'URI', 'URS', 'USB',", "'LNC', 'VAR', 'BLL', 'FLS', 'LUV', 'KMX', 'FDO', 'WHR', 'MCHP', 'SCG', 'DNR', 'CFN', 'CPB',", "'USB', 'AMGN', 'CVS', 'MMM', 'EMC', 'UNH', 'BMY', 'UNP', 'CAT', 'EBAY', 'AXP', 'UPS', 'GS',", "'ATO', 'ATR', 'ATU', 'ATW', 'AVA', 'AVAV', 'AVB', 'AVD', 'AVID', 'AVP', 'AVT', 'AVY', 'AWR',", "'VPFG', 'VPHM', 'VRSN', 'VRTS', 'VRTU', 'VRTX', 'VSAT', 'VSH', 'VSI', 'VTR', 'VVC', 'VZ', 'WAB',", "'SAH', 'SAI', 'SAM', 'SBRA', 'SBUX', 'SCG', 'SCHL', 'SCHW', 'SCI', 'SCL', 'SCOR', 'SCSC', 'SCSS',", "'VRTS', 'VRTU', 'VRTX', 'VSAT', 'VSH', 'VSI', 'VTR', 'VVC', 'VZ', 'WAB', 'WABC', 'WAFD', 'WAG',", "'CAM', 'VNO', 'OMC', 'CI', 'XEL', 'A', 'CAH', 'AET', 'STJ', 'AVB', 'L', 'IR', 'PXD',", "'K', 'KALU', 'KAMN', 'KBH', 'KBR', 'KDN', 'KELYA', 'KEX', 'KEY', 'KFY', 'KIM', 'KIRK', 'KLAC',", "'SYY', 'T', 'TAP', 'TBI', 'TCB', 'TCBI', 'TCO', 'TDC', 'TDS', 'TDW', 'TDY', 'TE', 'TECD',", "'MA', 'LLY', 'CL', 'BA', 'DD', 'HON', 'SPG', 'DUK', 'ACN', 'MDT', 'COST', 'TWX', 'TGT',", "'PCP', 'PCTI', 'PDCE', 'PDCO', 'PEET', 'PEG', 'PEI', 'PEP', 'PERY', 'PES', 'PETM', 'PETS', 'PFE',", "'TTI', 'TTMI', 'TTWO', 'TUES', 'TUP', 'TW', 'TWC', 'TWGP', 'TWTC', 'TWX', 'TXI', 'TXN', 'TXRH',", "'BDX', 'MHP', 'STI', 'LO', 'M', 'MJN', 'EIX', 'EL', 'DISCA', 'HCN', 'BBBY', 'TEL', 'SRE',", "'FE', 'VTR', 'SYY', 'PCG', 'HNZ', 'ADM', 'BRCM', 'ED', 'PPG', 'CME', 'LYB', 'APD', 'VLO',", "'IRF', 'IRM', 'ISCA', 'ISIL', 'ISRG', 'IT', 'ITG', 'ITRI', 'ITT', 'ITW', 'IVAC', 'IVC', 'IVZ',", "'KLIC', 'KMB', 'KMI', 'KMPR', 'KMT', 'KMX', 'KND', 'KNX', 'KO', 'KOP', 'KOPN', 'KR', 'KRA',", "'IPG', 'IFF', 'GAS', 'STZ', 'HRB', 'XYL', 'TSN', 'FOSL', 'DO', 'BBY', 'LUK', 'CTAS', 'HAS',", "'XOXO', 'XRAY', 'XRX', 'XYL', 'Y', 'YHOO', 'YUM', 'ZBRA', 'ZEP', 'ZEUS', 'ZLC', 'ZMH', 'ZQK',", "'TDC', 'SHW', 'ESV', 'SYMC', 'PH', 'GWW', 'EW', 'ETR', 'NUE', 'SWN', 'MAT', 'CBE', 'NU',", "'ANDE', 'ANF', 'ANN', 'ANR', 'ANSS', 'AOL', 'AON', 'AOS', 'APA', 'APC', 'APD', 'APEI', 'APH',", "'CIR', 'CKH', 'CKP', 'CL', 'CLC', 'CLD', 'CLF', 'CLGX', 'CLH', 'CLI', 'CLMS', 'CLP', 'CLW',", "'DFS', 'NSC', 'MCK', 'CB', 'KMI', 'STT', 'PSA', 'BHI', 'ISRG', 'GLW', 'CRM', 'ALL', 'SE',", "'HITK', 'HITT', 'HIW', 'HLIT', 'HLX', 'HMA', 'HME', 'HMN', 'HMSY', 'HNI', 'HNT', 'HNZ', 'HOG',", "'S', 'NBL', 'NOC', 'CMI', 'CCL', 'PEG', 'INTU', 'PLD', 'SYK', 'TROW', 'COH', 'ADBE', 'HES',", "'MASI', 'MAT', 'MATW', 'MATX', 'MCD', 'MCF', 'MCHP', 'MCK', 'MCO', 'MCRI', 'MCRL', 'MCRS', 'MCS',", "'FISV', 'ORLY', 'MUR', 'OKE', 'MYL', 'BF.B', 'MAR', 'ROK', 'CHK', 'ABC', 'ICE', 'HOG', 'XRX',", "'CNX', 'COCO', 'COF', 'COG', 'COH', 'COHU', 'COL', 'COLB', 'COO', 'COP', 'COST', 'COV', 'CPB',", "'ASNA', 'ASTE', 'ATI', 'ATK', 'ATMI', 'ATML', 'ATNI', 'ATO', 'ATR', 'ATU', 'ATW', 'AVA', 'AVAV',", "'TIF', 'TJX', 'TKR', 'TLAB', 'TMK', 'TMO', 'TMP', 'TNC', 'TOL', 'TPX', 'TQNT', 'TR', 'TRAK',", "'VTR', 'VVC', 'VZ', 'WAB', 'WABC', 'WAFD', 'WAG', 'WAT', 'WBS', 'WBSN', 'WCG', 'WCN', 'WDC',", "'DM', 'DMND', 'DNB', 'DNR', 'DO', 'DOV', 'DOW', 'DPS', 'DRC', 'DRE', 'DRH', 'DRI', 'DRIV',", "'LRCX', 'LRY', 'LSI', 'LSTR', 'LTC', 'LTD', 'LTM', 'LUFK', 'LUK', 'LUV', 'LXK', 'LXP', 'LXU',", "'BBY', 'BC', 'BCO', 'BCOR', 'BCPC', 'BCR', 'BDC', 'BDX', 'BEAM', 'BEAV', 'BELFB', 'BEN', 'BFS',", "'PFG', 'PFS', 'PG', 'PGR', 'PH', 'PHM', 'PII', 'PJC', 'PKE', 'PKG', 'PKI', 'PKY', 'PL',", "'PEP', 'ABT', 'QCOM', 'CSCO', 'SLB', 'C', 'CMCSA', 'BAC', 'DIS', 'MCD', 'AMZN', 'HD', 'KFT',", "'JCP', 'CVC', 'ATI', 'SAI', 'PKI', 'WPX', 'BMS', 'AVY', 'HAR', 'OI', 'AIZ', 'NFLX', 'DF',", "'SPLS', 'SPN', 'SPPI', 'SPTN', 'SPW', 'SRCL', 'SRDX', 'SRE', 'SSD', 'SSI', 'SSP', 'SSS', 'STBA',", "'NOC', 'NOV', 'NP', 'NPBC', 'NPK', 'NPO', 'NRG', 'NSC', 'NSIT', 'NSP', 'NSR', 'NTAP', 'NTCT',", "'RE', 'RECN', 'REG', 'REGN', 'RF', 'RFMD', 'RGA', 'RGLD', 'RGR', 'RGS', 'RHI', 'RHT', 'RJF',", "'FIX', 'FL', 'FLIR', 'FLO', 'FLR', 'FLS', 'FMBI', 'FMC', 'FMER', 'FNB', 'FNF', 'FNFG', 'FNGN',", "'QSFT', 'QSII', 'R', 'RAH', 'RAI', 'RAX', 'RBC', 'RBCN', 'RBN', 'RCII', 'RDC', 'RE', 'RECN',", "'UNT', 'UNTD', 'UPS', 'URBN', 'URI', 'URS', 'USB', 'USMO', 'USTR', 'UTEK', 'UTHR', 'UTI', 'UTIW',", "'COG', 'COH', 'COHU', 'COL', 'COLB', 'COO', 'COP', 'COST', 'COV', 'CPB', 'CPLA', 'CPRT', 'CPSI',", "'ITRI', 'ITT', 'ITW', 'IVAC', 'IVC', 'IVZ', 'JACK', 'JAH', 'JAKK', 'JBHT', 'JBL', 'JBLU', 'JBT',", "'UMBF', 'UMPQ', 'UNF', 'UNFI', 'UNH', 'UNM', 'UNP', 'UNS', 'UNT', 'UNTD', 'UPS', 'URBN', 'URI',", "'ACM', 'ACN', 'ACO', 'ACXM', 'ADBE', 'ADI', 'ADM', 'ADP', 'ADS', 'ADSK', 'ADTN', 'ADVS', 'AEE',", "'SBRA', 'SBUX', 'SCG', 'SCHL', 'SCHW', 'SCI', 'SCL', 'SCOR', 'SCSC', 'SCSS', 'SE', 'SEE', 'SEIC',", "'UNH', 'BMY', 'UNP', 'CAT', 'EBAY', 'AXP', 'UPS', 'GS', 'ESRX', 'GILD', 'NWSA', 'MON', 'MA',", "'NWBI', 'NWE', 'NWL', 'NWN', 'NWSA', 'NX', 'NYB', 'NYT', 'NYX', 'O', 'OCR', 'ODFL', 'ODP',", "'PRU', 'PRX', 'PRXL', 'PSA', 'PSB', 'PSEC', 'PSEM', 'PSS', 'PSSI', 'PSX', 'PTEN', 'PULS', 'PVA',", "'HAFC', 'HAIN', 'HAL', 'HAR', 'HAS', 'HAYN', 'HBAN', 'HBHC', 'HBI', 'HCBK', 'HCC', 'HCN', 'HCP',", "'GNCMA', 'GNTX', 'GNW', 'GOOG', 'GPC', 'GPI', 'GPN', 'GPOR', 'GPS', 'GS', 'GSM', 'GT', 'GTAT',", "'CHK', 'CHRW', 'CHS', 'CI', 'CIEN', 'CINF', 'CIR', 'CKH', 'CKP', 'CL', 'CLC', 'CLD', 'CLF',", "'DSPG', 'DTE', 'DTSI', 'DTV', 'DUK', 'DV', 'DVA', 'DVN', 'DW', 'DWA', 'DY', 'EA', 'EAT',", "'L', 'IR', 'PXD', 'KR', 'SWK', 'K', 'TDC', 'SHW', 'ESV', 'SYMC', 'PH', 'GWW', 'EW',", "'CEB', 'CEC', 'CECO', 'CELG', 'CELL', 'CENX', 'CERN', 'CEVA', 'CF', 'CFN', 'CFR', 'CGNX', 'CGX',", "'CSL', 'CSTR', 'CSX', 'CTAS', 'CTL', 'CTS', 'CTSH', 'CTXS', 'CUB', 'CUZ', 'CVBF', 'CVC', 'CVD',", "'MWV', 'SNI', 'PWR', 'JEC', 'PHM', 'IRM', 'HP', 'CSC', 'SUN', 'TMK', 'FTR', 'NRG', 'IPG',", "'FRT', 'FRX', 'FSLR', 'FSP', 'FST', 'FTI', 'FTR', 'FUL', 'FULT', 'FWRD', 'GAS', 'GB', 'GBCI',", "'GNTX', 'GNW', 'GOOG', 'GPC', 'GPI', 'GPN', 'GPOR', 'GPS', 'GS', 'GSM', 'GT', 'GTAT', 'GTIV',", "'FTI', 'RHT', 'WU', 'STX', 'DOV', 'ALTR', 'WPI', 'HSY', 'ROP', 'PAYX', 'GPS', 'SNDK', 'DTE',", "'ESL', 'ESRX', 'ESS', 'ESV', 'ETFC', 'ETH', 'ETN', 'ETR', 'EV', 'EW', 'EWBC', 'EXAR', 'EXC',", "'BKE', 'BKH', 'BKI', 'BKMU', 'BKS', 'BLK', 'BLKB', 'BLL', 'BMC', 'BMI', 'BMR', 'BMS', 'BMY',", "'ININ', 'INT', 'INTC', 'INTU', 'IO', 'IP', 'IPAR', 'IPCC', 'IPCM', 'IPG', 'IPHS', 'IPI', 'IR',", "'SRE', 'SSD', 'SSI', 'SSP', 'SSS', 'STBA', 'STC', 'STE', 'STI', 'STJ', 'STL', 'STLD', 'STMP',", "'STX', 'STZ', 'SUN', 'SUP', 'SUPX', 'SUSQ', 'SVU', 'SWI', 'SWK', 'SWKS', 'SWM', 'SWN', 'SWS',", "'CATM', 'CATO', 'CATY', 'CB', 'CBB', 'CBE', 'CBEY', 'CBG', 'CBK', 'CBM', 'CBOE', 'CBR', 'CBRL',", "'ABT', 'ACAT', 'ACC', 'ACE', 'ACI', 'ACIW', 'ACM', 'ACN', 'ACO', 'ACXM', 'ADBE', 'ADI', 'ADM',", "'CI', 'CIEN', 'CINF', 'CIR', 'CKH', 'CKP', 'CL', 'CLC', 'CLD', 'CLF', 'CLGX', 'CLH', 'CLI',", "'LOGM', 'LOW', 'LPNT', 'LPS', 'LPSN', 'LPX', 'LQDT', 'LRCX', 'LRY', 'LSI', 'LSTR', 'LTC', 'LTD',", "'JEC', 'PHM', 'IRM', 'HP', 'CSC', 'SUN', 'TMK', 'FTR', 'NRG', 'IPG', 'IFF', 'GAS', 'STZ',", "'TEL', 'SRE', 'MSI', 'ROST', 'DELL', 'CTXS', 'FITB', 'RAI', 'PCAR', 'WY', 'SCHW', 'VFC', 'WM',", "'CLX', 'CMA', 'CMC', 'CMCSA', 'CME', 'CMG', 'CMI', 'CMN', 'CMP', 'CMS', 'CMTL', 'CNC', 'CNK',", "'FST', 'FTI', 'FTR', 'FUL', 'FULT', 'FWRD', 'GAS', 'GB', 'GBCI', 'GCI', 'GCO', 'GD', 'GDI',", "'CRM', 'CROX', 'CRR', 'CRS', 'CRUS', 'CRVL', 'CRY', 'CSC', 'CSCO', 'CSGS', 'CSH', 'CSL', 'CSTR',", "'JBLU', 'JBT', 'JCI', 'JCOM', 'JCP', 'JDAS', 'JDSU', 'JEC', 'JEF', 'JJSF', 'JKHY', 'JLL', 'JNJ',", "'LRCX', 'PNW', 'DHI', 'ARG', 'LEN', 'QEP', 'EFX', 'CVH', 'CLF', 'CBG', 'CINF', 'NWL', 'HSP',", "'DELL', 'CTXS', 'FITB', 'RAI', 'PCAR', 'WY', 'SCHW', 'VFC', 'WM', 'CF', 'AZO', 'AMAT', 'CAM',", "'WWD', 'WWW', 'WY', 'WYN', 'WYNN', 'X', 'XEC', 'XEL', 'XL', 'XLNX', 'XLS', 'XOM', 'XOXO',", "'CLI', 'CLMS', 'CLP', 'CLW', 'CLX', 'CMA', 'CMC', 'CMCSA', 'CME', 'CMG', 'CMI', 'CMN', 'CMP',", "'OCR', 'ODFL', 'ODP', 'OFC', 'OGE', 'OHI', 'OI', 'OII', 'OIS', 'OKE', 'OLN', 'OMC', 'OMCL',", "'HAL', 'HAR', 'HAS', 'HAYN', 'HBAN', 'HBHC', 'HBI', 'HCBK', 'HCC', 'HCN', 'HCP', 'HCSG', 'HD',", "ALL_TICKERS = ['A', 'AA', 'AAN', 'AAON', 'AAP', 'AAPL', 'ABAX', 'ABC', 'ABFS', 'ABM', 'ABT',", "'SIAL', 'SIG', 'SIGI', 'SIGM', 'SIVB', 'SJI', 'SJM', 'SKS', 'SKT', 'SKX', 'SKYW', 'SLAB', 'SLB',", "'FNP', 'FOR', 'FORR', 'FOSL', 'FRED', 'FRT', 'FRX', 'FSLR', 'FSP', 'FST', 'FTI', 'FTR', 'FUL',", "'AIZ', 'NFLX', 'DF', 'FLIR', 'GT', 'LM', 'APOL', 'PDCO', 'JDSU', 'ANF', 'PBI', 'NDAQ', 'X',", "'RMD', 'ROCK', 'ROG', 'ROK', 'ROL', 'ROP', 'ROSE', 'ROST', 'ROVI', 'RPM', 'RRC', 'RRD', 'RRGB',", "'AFL', 'ALXN', 'GD', 'WMB', 'CBS', 'CSX', 'TMO', 'AEP', 'CTSH', 'MRO', 'DFS', 'NSC', 'MCK',", "'CNX', 'AES', 'AKAM', 'LNC', 'VAR', 'BLL', 'FLS', 'LUV', 'KMX', 'FDO', 'WHR', 'MCHP', 'SCG',", "'TMP', 'TNC', 'TOL', 'TPX', 'TQNT', 'TR', 'TRAK', 'TRIP', 'TRLG', 'TRMB', 'TRMK', 'TRN', 'TROW',", "'TTEC', 'TTEK', 'TTI', 'TTMI', 'TTWO', 'TUES', 'TUP', 'TW', 'TWC', 'TWGP', 'TWTC', 'TWX', 'TXI',", "'CVC', 'ATI', 'SAI', 'PKI', 'WPX', 'BMS', 'AVY', 'HAR', 'OI', 'AIZ', 'NFLX', 'DF', 'FLIR',", "'GOOG', 'GPC', 'GPI', 'GPN', 'GPOR', 'GPS', 'GS', 'GSM', 'GT', 'GTAT', 'GTIV', 'GTY', 'GVA',", "'EW', 'ETR', 'NUE', 'SWN', 'MAT', 'CBE', 'NU', 'AMP', 'NTAP', 'ZMH', 'LTD', 'ADI', 'PGR',", "'HZO', 'IART', 'IBKR', 'IBM', 'IBOC', 'ICE', 'ICON', 'ICUI', 'IDA', 'IDTI', 'IDXX', 'IEX', 'IFF',", "'IBM', 'T', 'GOOG', 'PG', 'JNJ', 'PFE', 'WFC', 'BRK.B', 'JPM', 'PM', 'KO', 'MRK', 'VZ',", "'NEE', 'NEM', 'NEOG', 'NEU', 'NEWP', 'NFG', 'NFLX', 'NFP', 'NFX', 'NI', 'NILE', 'NJR', 'NKE',", "'CVD', 'CVG', 'CVGW', 'CVH', 'CVLT', 'CVS', 'CVX', 'CW', 'CWTR', 'CXW', 'CY', 'CYBX', 'CYH',", "'XOM', 'XOXO', 'XRAY', 'XRX', 'XYL', 'Y', 'YHOO', 'YUM', 'ZBRA', 'ZEP', 'ZEUS', 'ZLC', 'ZMH',", "'PPG', 'CME', 'LYB', 'APD', 'VLO', 'EQR', 'BEN', 'ECL', 'PPL', 'AON', 'WFM', 'BXP', 'YHOO',", "'CCE', 'CCI', 'CCL', 'CCMP', 'CCRN', 'CDI', 'CDNS', 'CDR', 'CEB', 'CEC', 'CECO', 'CELG', 'CELL',", "'SWK', 'SWKS', 'SWM', 'SWN', 'SWS', 'SWX', 'SWY', 'SXC', 'SXI', 'SXT', 'SYK', 'SYKE', 'SYMC',", "'TGT', 'SO', 'SBUX', 'AIG', 'F', 'FCX', 'MET', 'BIIB', 'EMR', 'APC', 'NKE', 'DOW', 'LOW',", "'CYMI', 'CYN', 'CYT', 'D', 'DAKT', 'DAR', 'DBD', 'DCI', 'DCOM', 'DD', 'DDD', 'DE', 'DECK',", "'ARE', 'AREX', 'ARG', 'ARO', 'ARQL', 'ARRS', 'ARW', 'ASBC', 'ASEI', 'ASGN', 'ASH', 'ASNA', 'ASTE',", "'DELL', 'DF', 'DFS', 'DGII', 'DGIT', 'DGX', 'DHI', 'DHR', 'DHX', 'DIN', 'DIOD', 'DIS', 'DISCA',", "'RHI', 'RHT', 'RJF', 'RKT', 'RL', 'RLI', 'RMD', 'ROCK', 'ROG', 'ROK', 'ROL', 'ROP', 'ROSE',", "'EXR', 'EZPW', 'F', 'FAF', 'FARO', 'FAST', 'FBHS', 'FBP', 'FCF', 'FCFS', 'FCN', 'FCS', 'FCX',", "'LH', 'LHCG', 'LHO', 'LIFE', 'LII', 'LINC', 'LKQ', 'LL', 'LLL', 'LLTC', 'LLY', 'LM', 'LMNX',", "'MAT', 'CBE', 'NU', 'AMP', 'NTAP', 'ZMH', 'LTD', 'ADI', 'PGR', 'HST', 'FAST', 'MTB', 'HOT',", "'PEET', 'PEG', 'PEI', 'PEP', 'PERY', 'PES', 'PETM', 'PETS', 'PFE', 'PFG', 'PFS', 'PG', 'PGR',", "'FOR', 'FORR', 'FOSL', 'FRED', 'FRT', 'FRX', 'FSLR', 'FSP', 'FST', 'FTI', 'FTR', 'FUL', 'FULT',", "'IIVI', 'IM', 'IN', 'INDB', 'INFA', 'INGR', 'ININ', 'INT', 'INTC', 'INTU', 'IO', 'IP', 'IPAR',", "'HMN', 'HMSY', 'HNI', 'HNT', 'HNZ', 'HOG', 'HOLX', 'HOMB', 'HON', 'HOS', 'HOT', 'HOTT', 'HP',", "'LUK', 'LUV', 'LXK', 'LXP', 'LXU', 'LYV', 'LZB', 'M', 'MA', 'MAA', 'MAC', 'MAN', 'MANH',", "'WRLD', 'WSM', 'WSO', 'WST', 'WTFC', 'WTR', 'WTS', 'WU', 'WWD', 'WWW', 'WY', 'WYN', 'WYNN',", "'RF', 'RFMD', 'RGA', 'RGLD', 'RGR', 'RGS', 'RHI', 'RHT', 'RJF', 'RKT', 'RL', 'RLI', 'RMD',", "'PNW', 'DHI', 'ARG', 'LEN', 'QEP', 'EFX', 'CVH', 'CLF', 'CBG', 'CINF', 'NWL', 'HSP', 'EXPE',", "'WCG', 'WCN', 'WDC', 'WDFC', 'WDR', 'WEC', 'WEN', 'WERN', 'WFC', 'WFM', 'WGL', 'WGO', 'WHR',", "'PLFE', 'PLL', 'PLT', 'PLXS', 'PM', 'PMC', 'PMTC', 'PMTI', 'PNC', 'PNFP', 'PNK', 'PNM', 'PNR',", "'NFG', 'NFLX', 'NFP', 'NFX', 'NI', 'NILE', 'NJR', 'NKE', 'NNN', 'NOC', 'NOV', 'NP', 'NPBC',", "'APA', 'APC', 'APD', 'APEI', 'APH', 'APOG', 'APOL', 'ARB', 'ARE', 'AREX', 'ARG', 'ARO', 'ARQL',", "'HMSY', 'HNI', 'HNT', 'HNZ', 'HOG', 'HOLX', 'HOMB', 'HON', 'HOS', 'HOT', 'HOTT', 'HP', 'HPQ',", "'MDCO', 'MDP', 'MDRX', 'MDSO', 'MDT', 'MDU', 'MEAS', 'MED', 'MEI', 'MENT', 'MET', 'MFB', 'MGAM',", "'GTY', 'GVA', 'GWW', 'GXP', 'GY', 'HAE', 'HAFC', 'HAIN', 'HAL', 'HAR', 'HAS', 'HAYN', 'HBAN',", "'CPSI', 'CPT', 'CPWR', 'CR', 'CRDN', 'CREE', 'CRI', 'CRK', 'CRL', 'CRM', 'CROX', 'CRR', 'CRS',", "'CBOE', 'CBR', 'CBRL', 'CBS', 'CBSH', 'CBST', 'CBT', 'CBU', 'CCC', 'CCE', 'CCI', 'CCL', 'CCMP',", "'DF', 'DFS', 'DGII', 'DGIT', 'DGX', 'DHI', 'DHR', 'DHX', 'DIN', 'DIOD', 'DIS', 'DISCA', 'DKS',", "'IPCM', 'IPG', 'IPHS', 'IPI', 'IR', 'IRBT', 'IRC', 'IRF', 'IRM', 'ISCA', 'ISIL', 'ISRG', 'IT',", "'AMG', 'AMGN', 'AMP', 'AMSF', 'AMSG', 'AMT', 'AMZN', 'AN', 'ANDE', 'ANF', 'ANN', 'ANR', 'ANSS',", "'LMT', 'FDX', 'PCP', 'BBT', 'MS', 'BLK', 'DVN', 'AFL', 'ALXN', 'GD', 'WMB', 'CBS', 'CSX',", "'LSI', 'TSS', 'ZION', 'HCBK', 'AIV', 'RHI', 'PCS', 'MOLX', 'TE', 'TRIP', 'DNB', 'LEG', 'JBL',", "'ENSG', 'ENTR', 'ENZ', 'EOG', 'EPAY', 'EPIQ', 'EPR', 'EQIX', 'EQR', 'EQT', 'EQY', 'ESE', 'ESI',", "'BOBE', 'BOH', 'BPFH', 'BR', 'BRC', 'BRCM', 'BRKL', 'BRKS', 'BRLI', 'BRO', 'BRS', 'BSX', 'BTH',", "'WAG', 'WAT', 'WBS', 'WBSN', 'WCG', 'WCN', 'WDC', 'WDFC', 'WDR', 'WEC', 'WEN', 'WERN', 'WFC',", "'LTD', 'LTM', 'LUFK', 'LUK', 'LUV', 'LXK', 'LXP', 'LXU', 'LYV', 'LZB', 'M', 'MA', 'MAA',", "'DTV', 'DUK', 'DV', 'DVA', 'DVN', 'DW', 'DWA', 'DY', 'EA', 'EAT', 'EBAY', 'EBIX', 'EBS',", "'SUSQ', 'SVU', 'SWI', 'SWK', 'SWKS', 'SWM', 'SWN', 'SWS', 'SWX', 'SWY', 'SXC', 'SXI', 'SXT',", "'COO', 'COP', 'COST', 'COV', 'CPB', 'CPLA', 'CPRT', 'CPSI', 'CPT', 'CPWR', 'CR', 'CRDN', 'CREE',", "'IT', 'ITG', 'ITRI', 'ITT', 'ITW', 'IVAC', 'IVC', 'IVZ', 'JACK', 'JAH', 'JAKK', 'JBHT', 'JBL',", "'KMB', 'APA', 'HPQ', 'PNC', 'COF', 'BAX', 'TJX', 'CELG', 'DTV', 'DE', 'DHR', 'TXN', 'HAL',", "'RTN', 'WLP', 'CCI', 'JCI', 'MPC', 'MMC', 'FE', 'VTR', 'SYY', 'PCG', 'HNZ', 'ADM', 'BRCM',", "'COV', 'CPB', 'CPLA', 'CPRT', 'CPSI', 'CPT', 'CPWR', 'CR', 'CRDN', 'CREE', 'CRI', 'CRK', 'CRL',", "'PEG', 'PEI', 'PEP', 'PERY', 'PES', 'PETM', 'PETS', 'PFE', 'PFG', 'PFS', 'PG', 'PGR', 'PH',", "'HAR', 'OI', 'AIZ', 'NFLX', 'DF', 'FLIR', 'GT', 'LM', 'APOL', 'PDCO', 'JDSU', 'ANF', 'PBI',", "'ATR', 'ATU', 'ATW', 'AVA', 'AVAV', 'AVB', 'AVD', 'AVID', 'AVP', 'AVT', 'AVY', 'AWR', 'AXE',", "'PNFP', 'PNK', 'PNM', 'PNR', 'PNRA', 'PNW', 'PNY', 'POL', 'POM', 'POOL', 'POST', 'POWI', 'POWL',", "'UPS', 'GS', 'ESRX', 'GILD', 'NWSA', 'MON', 'MA', 'LLY', 'CL', 'BA', 'DD', 'HON', 'SPG',", "'ADVS', 'AEE', 'AEGN', 'AEIS', 'AEO', 'AEP', 'AES', 'AET', 'AF', 'AFAM', 'AFFX', 'AFG', 'AFL',", "'COLB', 'COO', 'COP', 'COST', 'COV', 'CPB', 'CPLA', 'CPRT', 'CPSI', 'CPT', 'CPWR', 'CR', 'CRDN',", "'VVC', 'VZ', 'WAB', 'WABC', 'WAFD', 'WAG', 'WAT', 'WBS', 'WBSN', 'WCG', 'WCN', 'WDC', 'WDFC',", "'MDP', 'MDRX', 'MDSO', 'MDT', 'MDU', 'MEAS', 'MED', 'MEI', 'MENT', 'MET', 'MFB', 'MGAM', 'MGLN',", "'EAT', 'EBAY', 'EBIX', 'EBS', 'ECL', 'ECPG', 'ED', 'EE', 'EFX', 'EGL', 'EGN', 'EGP', 'EHTH',", "'AFG', 'AFL', 'AGCO', 'AGN', 'AGP', 'AGYS', 'AHL', 'AHS', 'AIG', 'AINV', 'AIR', 'AIRM', 'AIT',", "'WEC', 'CCE', 'WDC', 'LIFE', 'MCO', 'HIG', 'JWN', 'FRX', 'MNST', 'FFIV', 'NVDA', 'KIM', 'KEY',", "'BKMU', 'BKS', 'BLK', 'BLKB', 'BLL', 'BMC', 'BMI', 'BMR', 'BMS', 'BMY', 'BOBE', 'BOH', 'BPFH',", "'CDNS', 'CDR', 'CEB', 'CEC', 'CECO', 'CELG', 'CELL', 'CENX', 'CERN', 'CEVA', 'CF', 'CFN', 'CFR',", "'MGLN', 'MHK', 'MHO', 'MIG', 'MINI', 'MJN', 'MKC', 'MKSI', 'MLHR', 'MLI', 'MLM', 'MMC', 'MMM',", "'BKI', 'BKMU', 'BKS', 'BLK', 'BLKB', 'BLL', 'BMC', 'BMI', 'BMR', 'BMS', 'BMY', 'BOBE', 'BOH',", "'CW', 'CWTR', 'CXW', 'CY', 'CYBX', 'CYH', 'CYMI', 'CYN', 'CYT', 'D', 'DAKT', 'DAR', 'DBD',", "'DDD', 'DE', 'DECK', 'DEL', 'DELL', 'DF', 'DFS', 'DGII', 'DGIT', 'DGX', 'DHI', 'DHR', 'DHX',", "'GVA', 'GWW', 'GXP', 'GY', 'HAE', 'HAFC', 'HAIN', 'HAL', 'HAR', 'HAS', 'HAYN', 'HBAN', 'HBHC',", "'JCP', 'JDAS', 'JDSU', 'JEC', 'JEF', 'JJSF', 'JKHY', 'JLL', 'JNJ', 'JNPR', 'JNS', 'JOSB', 'JOY',", "'UNP', 'UNS', 'UNT', 'UNTD', 'UPS', 'URBN', 'URI', 'URS', 'USB', 'USMO', 'USTR', 'UTEK', 'UTHR',", "'BBY', 'LUK', 'CTAS', 'HAS', 'POM', 'PBCT', 'NFX', 'RDC', 'SNA', 'GCI', 'URBN', 'NBR', 'TEG',", "'BMY', 'UNP', 'CAT', 'EBAY', 'AXP', 'UPS', 'GS', 'ESRX', 'GILD', 'NWSA', 'MON', 'MA', 'LLY',", "'TGT', 'THC', 'THG', 'THO', 'THOR', 'THS', 'TIBX', 'TIE', 'TIF', 'TJX', 'TKR', 'TLAB', 'TMK',", "'SCSS', 'SE', 'SEE', 'SEIC', 'SENEA', 'SF', 'SFD', 'SFG', 'SFNC', 'SFY', 'SGMS', 'SGY', 'SHAW',", "'CELL', 'CENX', 'CERN', 'CEVA', 'CF', 'CFN', 'CFR', 'CGNX', 'CGX', 'CHCO', 'CHD', 'CHE', 'CHG',", "'KFT', 'V', 'OXY', 'COP', 'MO', 'UTX', 'USB', 'AMGN', 'CVS', 'MMM', 'EMC', 'UNH', 'BMY',", "'BABY', 'BAC', 'BAS', 'BAX', 'BBBY', 'BBG', 'BBOX', 'BBT', 'BBY', 'BC', 'BCO', 'BCOR', 'BCPC',", "'NBR', 'NBTB', 'NCI', 'NCIT', 'NCR', 'NCS', 'NDAQ', 'NDSN', 'NE', 'NEE', 'NEM', 'NEOG', 'NEU',", "'FMC', 'FMER', 'FNB', 'FNF', 'FNFG', 'FNGN', 'FNP', 'FOR', 'FORR', 'FOSL', 'FRED', 'FRT', 'FRX',", "'SSD', 'SSI', 'SSP', 'SSS', 'STBA', 'STC', 'STE', 'STI', 'STJ', 'STL', 'STLD', 'STMP', 'STR',", "'MATW', 'MATX', 'MCD', 'MCF', 'MCHP', 'MCK', 'MCO', 'MCRI', 'MCRL', 'MCRS', 'MCS', 'MCY', 'MD',", "'AME', 'AMED', 'AMG', 'AMGN', 'AMP', 'AMSF', 'AMSG', 'AMT', 'AMZN', 'AN', 'ANDE', 'ANF', 'ANN',", "'CKP', 'CL', 'CLC', 'CLD', 'CLF', 'CLGX', 'CLH', 'CLI', 'CLMS', 'CLP', 'CLW', 'CLX', 'CMA',", "'VAL', 'VAR', 'VCI', 'VCLK', 'VDSI', 'VECO', 'VFC', 'VIAB', 'VICR', 'VIVO', 'VLO', 'VLTR', 'VLY',", "'EL', 'ELY', 'EMC', 'EME', 'EMN', 'EMR', 'ENDP', 'ENR', 'ENS', 'ENSG', 'ENTR', 'ENZ', 'EOG',", "'KWK', 'KWR', 'L', 'LAD', 'LAMR', 'LANC', 'LAWS', 'LDL', 'LDR', 'LECO', 'LEG', 'LEN', 'LFUS',", "'BXP', 'BXS', 'BYD', 'BYI', 'C', 'CA', 'CAB', 'CACI', 'CAG', 'CAH', 'CAKE', 'CALM', 'CAM',", "'VAR', 'VCI', 'VCLK', 'VDSI', 'VECO', 'VFC', 'VIAB', 'VICR', 'VIVO', 'VLO', 'VLTR', 'VLY', 'VMC',", "'CY', 'CYBX', 'CYH', 'CYMI', 'CYN', 'CYT', 'D', 'DAKT', 'DAR', 'DBD', 'DCI', 'DCOM', 'DD',", "'PCG', 'HNZ', 'ADM', 'BRCM', 'ED', 'PPG', 'CME', 'LYB', 'APD', 'VLO', 'EQR', 'BEN', 'ECL',", "'PH', 'PHM', 'PII', 'PJC', 'PKE', 'PKG', 'PKI', 'PKY', 'PL', 'PLCE', 'PLCM', 'PLD', 'PLFE',", "'EXP', 'EXPD', 'EXPE', 'EXPO', 'EXR', 'EZPW', 'F', 'FAF', 'FARO', 'FAST', 'FBHS', 'FBP', 'FCF',", "'DOV', 'ALTR', 'WPI', 'HSY', 'ROP', 'PAYX', 'GPS', 'SNDK', 'DTE', 'PRGO', 'RF', 'NTRS', 'DGX',", "'LXK', 'LXP', 'LXU', 'LYV', 'LZB', 'M', 'MA', 'MAA', 'MAC', 'MAN', 'MANH', 'MANT', 'MAR',", "'INT', 'INTC', 'INTU', 'IO', 'IP', 'IPAR', 'IPCC', 'IPCM', 'IPG', 'IPHS', 'IPI', 'IR', 'IRBT',", "'RFMD', 'RGA', 'RGLD', 'RGR', 'RGS', 'RHI', 'RHT', 'RJF', 'RKT', 'RL', 'RLI', 'RMD', 'ROCK',", "'SXT', 'SYK', 'SYKE', 'SYMC', 'SYMM', 'SYNA', 'SYY', 'T', 'TAP', 'TBI', 'TCB', 'TCBI', 'TCO',", "'TQNT', 'TR', 'TRAK', 'TRIP', 'TRLG', 'TRMB', 'TRMK', 'TRN', 'TROW', 'TRST', 'TRV', 'TSCO', 'TSN',", "'RDC', 'RE', 'RECN', 'REG', 'REGN', 'RF', 'RFMD', 'RGA', 'RGLD', 'RGR', 'RGS', 'RHI', 'RHT',", "'IBOC', 'ICE', 'ICON', 'ICUI', 'IDA', 'IDTI', 'IDXX', 'IEX', 'IFF', 'IFSIA', 'IGT', 'IGTE', 'IILG',", "'GE', 'CVX', 'MSFT', 'IBM', 'T', 'GOOG', 'PG', 'JNJ', 'PFE', 'WFC', 'BRK.B', 'JPM', 'PM',", "'ANF', 'PBI', 'NDAQ', 'X', 'SEE', 'TER', 'THC', 'GME', 'GNW', 'FHN', 'ETFC', 'AMD', 'R',", "'HCP', 'HCSG', 'HD', 'HE', 'HES', 'HF', 'HFC', 'HGR', 'HHS', 'HI', 'HIBB', 'HIG', 'HII',", "'NRG', 'NSC', 'NSIT', 'NSP', 'NSR', 'NTAP', 'NTCT', 'NTGR', 'NTLS', 'NTRI', 'NTRS', 'NU', 'NUE',", "'DVN', 'DW', 'DWA', 'DY', 'EA', 'EAT', 'EBAY', 'EBIX', 'EBS', 'ECL', 'ECPG', 'ED', 'EE',", "'PERY', 'PES', 'PETM', 'PETS', 'PFE', 'PFG', 'PFS', 'PG', 'PGR', 'PH', 'PHM', 'PII', 'PJC',", "'CTXS', 'FITB', 'RAI', 'PCAR', 'WY', 'SCHW', 'VFC', 'WM', 'CF', 'AZO', 'AMAT', 'CAM', 'VNO',", "'SYY', 'PCG', 'HNZ', 'ADM', 'BRCM', 'ED', 'PPG', 'CME', 'LYB', 'APD', 'VLO', 'EQR', 'BEN',", "'XEL', 'A', 'CAH', 'AET', 'STJ', 'AVB', 'L', 'IR', 'PXD', 'KR', 'SWK', 'K', 'TDC',", "'DRQ', 'DSPG', 'DTE', 'DTSI', 'DTV', 'DUK', 'DV', 'DVA', 'DVN', 'DW', 'DWA', 'DY', 'EA',", "'TRIP', 'TRLG', 'TRMB', 'TRMK', 'TRN', 'TROW', 'TRST', 'TRV', 'TSCO', 'TSN', 'TSO', 'TSRA', 'TSS',", "'TSCO', 'TSN', 'TSO', 'TSRA', 'TSS', 'TTC', 'TTEC', 'TTEK', 'TTI', 'TTMI', 'TTWO', 'TUES', 'TUP',", "'AXE', 'AXP', 'AYI', 'AZO', 'AZZ', 'B', 'BA', 'BABY', 'BAC', 'BAS', 'BAX', 'BBBY', 'BBG',", "'ESE', 'ESI', 'ESIO', 'ESL', 'ESRX', 'ESS', 'ESV', 'ETFC', 'ETH', 'ETN', 'ETR', 'EV', 'EW',", "'OIS', 'OKE', 'OLN', 'OMC', 'OMCL', 'OMG', 'OMI', 'OMX', 'ONB', 'ONE', 'OPEN', 'OPLK', 'OPNT',", "'VCI', 'VCLK', 'VDSI', 'VECO', 'VFC', 'VIAB', 'VICR', 'VIVO', 'VLO', 'VLTR', 'VLY', 'VMC', 'VMI',", "'PRGO', 'PRGS', 'PRU', 'PRX', 'PRXL', 'PSA', 'PSB', 'PSEC', 'PSEM', 'PSS', 'PSSI', 'PSX', 'PTEN',", "'IR', 'IRBT', 'IRC', 'IRF', 'IRM', 'ISCA', 'ISIL', 'ISRG', 'IT', 'ITG', 'ITRI', 'ITT', 'ITW',", "'ESV', 'SYMC', 'PH', 'GWW', 'EW', 'ETR', 'NUE', 'SWN', 'MAT', 'CBE', 'NU', 'AMP', 'NTAP',", "'FRX', 'FSLR', 'FSP', 'FST', 'FTI', 'FTR', 'FUL', 'FULT', 'FWRD', 'GAS', 'GB', 'GBCI', 'GCI',", "'TWTC', 'TWX', 'TXI', 'TXN', 'TXRH', 'TXT', 'TYC', 'TYL', 'TYPE', 'UA', 'UBA', 'UBSI', 'UCBI',", "'SRCL', 'EXPD', 'COL', 'VRSN', 'FMC', 'ADSK', 'PFG', 'WYN', 'SLM', 'PLL', 'TIF', 'TXT', 'XL',", "'CGX', 'CHCO', 'CHD', 'CHE', 'CHG', 'CHK', 'CHRW', 'CHS', 'CI', 'CIEN', 'CINF', 'CIR', 'CKH',", "'HPY', 'HR', 'HRB', 'HRC', 'HRL', 'HRS', 'HSC', 'HSH', 'HSIC', 'HSII', 'HSNI', 'HSP', 'HST',", "'MAN', 'MANH', 'MANT', 'MAR', 'MAS', 'MASI', 'MAT', 'MATW', 'MATX', 'MCD', 'MCF', 'MCHP', 'MCK',", "'WAB', 'WABC', 'WAFD', 'WAG', 'WAT', 'WBS', 'WBSN', 'WCG', 'WCN', 'WDC', 'WDFC', 'WDR', 'WEC',", "'ARQL', 'ARRS', 'ARW', 'ASBC', 'ASEI', 'ASGN', 'ASH', 'ASNA', 'ASTE', 'ATI', 'ATK', 'ATMI', 'ATML',", "'AVB', 'AVD', 'AVID', 'AVP', 'AVT', 'AVY', 'AWR', 'AXE', 'AXP', 'AYI', 'AZO', 'AZZ', 'B',", "'CAKE', 'CALM', 'CAM', 'CAS', 'CASC', 'CASY', 'CAT', 'CATM', 'CATO', 'CATY', 'CB', 'CBB', 'CBE',", "'PRGO', 'RF', 'NTRS', 'DGX', 'CMG', 'FISV', 'ORLY', 'MUR', 'OKE', 'MYL', 'BF.B', 'MAR', 'ROK',", "'CVBF', 'CVC', 'CVD', 'CVG', 'CVGW', 'CVH', 'CVLT', 'CVS', 'CVX', 'CW', 'CWTR', 'CXW', 'CY',", "'TMK', 'TMO', 'TMP', 'TNC', 'TOL', 'TPX', 'TQNT', 'TR', 'TRAK', 'TRIP', 'TRLG', 'TRMB', 'TRMK',", "'CAH', 'CAKE', 'CALM', 'CAM', 'CAS', 'CASC', 'CASY', 'CAT', 'CATM', 'CATO', 'CATY', 'CB', 'CBB',", "'OKE', 'MYL', 'BF.B', 'MAR', 'ROK', 'CHK', 'ABC', 'ICE', 'HOG', 'XRX', 'APH', 'GPC', 'CHRW',", "'AES', 'AKAM', 'LNC', 'VAR', 'BLL', 'FLS', 'LUV', 'KMX', 'FDO', 'WHR', 'MCHP', 'SCG', 'DNR',", "'LEN', 'LFUS', 'LG', 'LH', 'LHCG', 'LHO', 'LIFE', 'LII', 'LINC', 'LKQ', 'LL', 'LLL', 'LLTC',", "'CVH', 'CVLT', 'CVS', 'CVX', 'CW', 'CWTR', 'CXW', 'CY', 'CYBX', 'CYH', 'CYMI', 'CYN', 'CYT',", "'TWX', 'TGT', 'SO', 'SBUX', 'AIG', 'F', 'FCX', 'MET', 'BIIB', 'EMR', 'APC', 'NKE', 'DOW',", "'OPEN', 'OPLK', 'OPNT', 'ORB', 'ORCL', 'ORI', 'ORIT', 'ORLY', 'ORN', 'OSG', 'OSIS', 'OSK', 'OXM',", "'AAON', 'AAP', 'AAPL', 'ABAX', 'ABC', 'ABFS', 'ABM', 'ABT', 'ACAT', 'ACC', 'ACE', 'ACI', 'ACIW',", "'GMCR', 'GME', 'GMT', 'GNCMA', 'GNTX', 'GNW', 'GOOG', 'GPC', 'GPI', 'GPN', 'GPOR', 'GPS', 'GS',", "'CL', 'BA', 'DD', 'HON', 'SPG', 'DUK', 'ACN', 'MDT', 'COST', 'TWX', 'TGT', 'SO', 'SBUX',", "'ASEI', 'ASGN', 'ASH', 'ASNA', 'ASTE', 'ATI', 'ATK', 'ATMI', 'ATML', 'ATNI', 'ATO', 'ATR', 'ATU',", "'ISCA', 'ISIL', 'ISRG', 'IT', 'ITG', 'ITRI', 'ITT', 'ITW', 'IVAC', 'IVC', 'IVZ', 'JACK', 'JAH',", "'HOT', 'RRC', 'HUM', 'CERN', 'CAG', 'IVZ', 'DLTR', 'KSS', 'FTI', 'RHT', 'WU', 'STX', 'DOV',", "'TRST', 'TRV', 'TSCO', 'TSN', 'TSO', 'TSRA', 'TSS', 'TTC', 'TTEC', 'TTEK', 'TTI', 'TTMI', 'TTWO',", "'AKS', 'ALB', 'ALE', 'ALEX', 'ALGN', 'ALGT', 'ALK', 'ALL', 'ALOG', 'ALTR', 'ALXN', 'AM', 'AMAT',", "'NEOG', 'NEU', 'NEWP', 'NFG', 'NFLX', 'NFP', 'NFX', 'NI', 'NILE', 'NJR', 'NKE', 'NNN', 'NOC',", "'WFM', 'WGL', 'WGO', 'WHR', 'WIBC', 'WIN', 'WIRE', 'WLP', 'WM', 'WMB', 'WMS', 'WMT', 'WOOF',", "'EHTH', 'EIG', 'EIX', 'EL', 'ELY', 'EMC', 'EME', 'EMN', 'EMR', 'ENDP', 'ENR', 'ENS', 'ENSG',", "'AKRX', 'AKS', 'ALB', 'ALE', 'ALEX', 'ALGN', 'ALGT', 'ALK', 'ALL', 'ALOG', 'ALTR', 'ALXN', 'AM',", "'DNR', 'DO', 'DOV', 'DOW', 'DPS', 'DRC', 'DRE', 'DRH', 'DRI', 'DRIV', 'DRQ', 'DSPG', 'DTE',", "'GY', 'HAE', 'HAFC', 'HAIN', 'HAL', 'HAR', 'HAS', 'HAYN', 'HBAN', 'HBHC', 'HBI', 'HCBK', 'HCC',", "'NUVA', 'NVDA', 'NVE', 'NVR', 'NVTL', 'NWBI', 'NWE', 'NWL', 'NWN', 'NWSA', 'NX', 'NYB', 'NYT',", "'VCLK', 'VDSI', 'VECO', 'VFC', 'VIAB', 'VICR', 'VIVO', 'VLO', 'VLTR', 'VLY', 'VMC', 'VMI', 'VNO',", "'MAS', 'MASI', 'MAT', 'MATW', 'MATX', 'MCD', 'MCF', 'MCHP', 'MCK', 'MCO', 'MCRI', 'MCRL', 'MCRS',", "'RGA', 'RGLD', 'RGR', 'RGS', 'RHI', 'RHT', 'RJF', 'RKT', 'RL', 'RLI', 'RMD', 'ROCK', 'ROG',", "'VPHM', 'VRSN', 'VRTS', 'VRTU', 'VRTX', 'VSAT', 'VSH', 'VSI', 'VTR', 'VVC', 'VZ', 'WAB', 'WABC',", "'FFIN', 'FFIV', 'FHN', 'FICO', 'FII', 'FINL', 'FIRE', 'FIS', 'FISV', 'FITB', 'FIX', 'FL', 'FLIR',", "'UNP', 'CAT', 'EBAY', 'AXP', 'UPS', 'GS', 'ESRX', 'GILD', 'NWSA', 'MON', 'MA', 'LLY', 'CL',", "'CPLA', 'CPRT', 'CPSI', 'CPT', 'CPWR', 'CR', 'CRDN', 'CREE', 'CRI', 'CRK', 'CRL', 'CRM', 'CROX',", "'WR', 'WRB', 'WRC', 'WRI', 'WRLD', 'WSM', 'WSO', 'WST', 'WTFC', 'WTR', 'WTS', 'WU', 'WWD',", "'THO', 'THOR', 'THS', 'TIBX', 'TIE', 'TIF', 'TJX', 'TKR', 'TLAB', 'TMK', 'TMO', 'TMP', 'TNC',", "'CPT', 'CPWR', 'CR', 'CRDN', 'CREE', 'CRI', 'CRK', 'CRL', 'CRM', 'CROX', 'CRR', 'CRS', 'CRUS',", "'RRD', 'RRGB', 'RS', 'RSG', 'RSH', 'RSTI', 'RSYS', 'RT', 'RTEC', 'RTI', 'RTN', 'RUE', 'RUTH',", "'HSIC', 'HSII', 'HSNI', 'HSP', 'HST', 'HSTM', 'HSY', 'HTLD', 'HTSI', 'HUBG', 'HUM', 'HVT', 'HW',", "'EXAR', 'EXC', 'EXH', 'EXLS', 'EXP', 'EXPD', 'EXPE', 'EXPO', 'EXR', 'EZPW', 'F', 'FAF', 'FARO',", "'SIG', 'SIGI', 'SIGM', 'SIVB', 'SJI', 'SJM', 'SKS', 'SKT', 'SKX', 'SKYW', 'SLAB', 'SLB', 'SLG',", "'GES', 'GFF', 'GGG', 'GHL', 'GIFI', 'GILD', 'GIS', 'GLW', 'GMCR', 'GME', 'GMT', 'GNCMA', 'GNTX',", "'NX', 'NYB', 'NYT', 'NYX', 'O', 'OCR', 'ODFL', 'ODP', 'OFC', 'OGE', 'OHI', 'OI', 'OII',", "'LDR', 'LECO', 'LEG', 'LEN', 'LFUS', 'LG', 'LH', 'LHCG', 'LHO', 'LIFE', 'LII', 'LINC', 'LKQ',", "'AGN', 'NEM', 'BK', 'TRV', 'TYC', 'GIS', 'ITW', 'ACE', 'PRU', 'VIAB', 'CTL', 'LMT', 'FDX',", "'THG', 'THO', 'THOR', 'THS', 'TIBX', 'TIE', 'TIF', 'TJX', 'TKR', 'TLAB', 'TMK', 'TMO', 'TMP',", "'LIFE', 'LII', 'LINC', 'LKQ', 'LL', 'LLL', 'LLTC', 'LLY', 'LM', 'LMNX', 'LMOS', 'LMT', 'LNC',", "'RTI', 'RTN', 'RUE', 'RUTH', 'RVBD', 'RYL', 'RYN', 'S', 'SAFM', 'SAFT', 'SAH', 'SAI', 'SAM',", "'SXC', 'SXI', 'SXT', 'SYK', 'SYKE', 'SYMC', 'SYMM', 'SYNA', 'SYY', 'T', 'TAP', 'TBI', 'TCB',", "'LFUS', 'LG', 'LH', 'LHCG', 'LHO', 'LIFE', 'LII', 'LINC', 'LKQ', 'LL', 'LLL', 'LLTC', 'LLY',", "'TAP', 'LLL', 'AVP', 'CNX', 'AES', 'AKAM', 'LNC', 'VAR', 'BLL', 'FLS', 'LUV', 'KMX', 'FDO',", "'COST', 'COV', 'CPB', 'CPLA', 'CPRT', 'CPSI', 'CPT', 'CPWR', 'CR', 'CRDN', 'CREE', 'CRI', 'CRK',", "'LEG', 'LEN', 'LFUS', 'LG', 'LH', 'LHCG', 'LHO', 'LIFE', 'LII', 'LINC', 'LKQ', 'LL', 'LLL',", "'EGP', 'EHTH', 'EIG', 'EIX', 'EL', 'ELY', 'EMC', 'EME', 'EMN', 'EMR', 'ENDP', 'ENR', 'ENS',", "'ECPG', 'ED', 'EE', 'EFX', 'EGL', 'EGN', 'EGP', 'EHTH', 'EIG', 'EIX', 'EL', 'ELY', 'EMC',", "'CNP', 'NE', 'JNPR', 'LH', 'EQT', 'CA', 'DVA', 'XLNX', 'EMN', 'SIAL', 'WEC', 'CCE', 'WDC',", "'TSN', 'FOSL', 'DO', 'BBY', 'LUK', 'CTAS', 'HAS', 'POM', 'PBCT', 'NFX', 'RDC', 'SNA', 'GCI',", "'LLTC', 'LLY', 'LM', 'LMNX', 'LMOS', 'LMT', 'LNC', 'LNCE', 'LNN', 'LNT', 'LO', 'LOGM', 'LOW',", "'CSCO', 'CSGS', 'CSH', 'CSL', 'CSTR', 'CSX', 'CTAS', 'CTL', 'CTS', 'CTSH', 'CTXS', 'CUB', 'CUZ',", "'ABC', 'ICE', 'HOG', 'XRX', 'APH', 'GPC', 'CHRW', 'SJM', 'AA', 'COG', 'FLR', 'DPS', 'CLX',", "'BH', 'BHE', 'BHI', 'BID', 'BIG', 'BIIB', 'BJRI', 'BK', 'BKE', 'BKH', 'BKI', 'BKMU', 'BKS',", "'RAI', 'PCAR', 'WY', 'SCHW', 'VFC', 'WM', 'CF', 'AZO', 'AMAT', 'CAM', 'VNO', 'OMC', 'CI',", "'MCO', 'HIG', 'JWN', 'FRX', 'MNST', 'FFIV', 'NVDA', 'KIM', 'KEY', 'RSG', 'MKC', 'BCR', 'BSX',", "'DIN', 'DIOD', 'DIS', 'DISCA', 'DKS', 'DLTR', 'DLX', 'DM', 'DMND', 'DNB', 'DNR', 'DO', 'DOV',", "'AHS', 'AIG', 'AINV', 'AIR', 'AIRM', 'AIT', 'AIV', 'AIZ', 'AJG', 'AKAM', 'AKR', 'AKRX', 'AKS',", "'GD', 'WMB', 'CBS', 'CSX', 'TMO', 'AEP', 'CTSH', 'MRO', 'DFS', 'NSC', 'MCK', 'CB', 'KMI',", "'TRAK', 'TRIP', 'TRLG', 'TRMB', 'TRMK', 'TRN', 'TROW', 'TRST', 'TRV', 'TSCO', 'TSN', 'TSO', 'TSRA',", "'WHR', 'MCHP', 'SCG', 'DNR', 'CFN', 'CPB', 'CMS', 'VMC', 'MU', 'BMC', 'NYX', 'CMA', 'BTU',", "'HST', 'FAST', 'MTB', 'HOT', 'RRC', 'HUM', 'CERN', 'CAG', 'IVZ', 'DLTR', 'KSS', 'FTI', 'RHT',", "'MAA', 'MAC', 'MAN', 'MANH', 'MANT', 'MAR', 'MAS', 'MASI', 'MAT', 'MATW', 'MATX', 'MCD', 'MCF',", "'DRIV', 'DRQ', 'DSPG', 'DTE', 'DTSI', 'DTV', 'DUK', 'DV', 'DVA', 'DVN', 'DW', 'DWA', 'DY',", "'AMAT', 'AMCX', 'AMD', 'AME', 'AMED', 'AMG', 'AMGN', 'AMP', 'AMSF', 'AMSG', 'AMT', 'AMZN', 'AN',", "'AVP', 'AVT', 'AVY', 'AWR', 'AXE', 'AXP', 'AYI', 'AZO', 'AZZ', 'B', 'BA', 'BABY', 'BAC',", "'FINL', 'FIRE', 'FIS', 'FISV', 'FITB', 'FIX', 'FL', 'FLIR', 'FLO', 'FLR', 'FLS', 'FMBI', 'FMC',", "'BA', 'BABY', 'BAC', 'BAS', 'BAX', 'BBBY', 'BBG', 'BBOX', 'BBT', 'BBY', 'BC', 'BCO', 'BCOR',", "'MCD', 'AMZN', 'HD', 'KFT', 'V', 'OXY', 'COP', 'MO', 'UTX', 'USB', 'AMGN', 'CVS', 'MMM',", "'DOV', 'DOW', 'DPS', 'DRC', 'DRE', 'DRH', 'DRI', 'DRIV', 'DRQ', 'DSPG', 'DTE', 'DTSI', 'DTV',", "'BF.B', 'MAR', 'ROK', 'CHK', 'ABC', 'ICE', 'HOG', 'XRX', 'APH', 'GPC', 'CHRW', 'SJM', 'AA',", "'FARO', 'FAST', 'FBHS', 'FBP', 'FCF', 'FCFS', 'FCN', 'FCS', 'FCX', 'FDO', 'FDS', 'FDX', 'FE',", "'SRCL', 'SRDX', 'SRE', 'SSD', 'SSI', 'SSP', 'SSS', 'STBA', 'STC', 'STE', 'STI', 'STJ', 'STL',", "'HIW', 'HLIT', 'HLX', 'HMA', 'HME', 'HMN', 'HMSY', 'HNI', 'HNT', 'HNZ', 'HOG', 'HOLX', 'HOMB',", "'THS', 'TIBX', 'TIE', 'TIF', 'TJX', 'TKR', 'TLAB', 'TMK', 'TMO', 'TMP', 'TNC', 'TOL', 'TPX',", "'D', 'EOG', 'YUM', 'NEE', 'TWC', 'PSX', 'COV', 'ADP', 'AMT', 'AGN', 'NEM', 'BK', 'TRV',", "'MANT', 'MAR', 'MAS', 'MASI', 'MAT', 'MATW', 'MATX', 'MCD', 'MCF', 'MCHP', 'MCK', 'MCO', 'MCRI',", "'CTAS', 'CTL', 'CTS', 'CTSH', 'CTXS', 'CUB', 'CUZ', 'CVBF', 'CVC', 'CVD', 'CVG', 'CVGW', 'CVH',", "'BMY', 'BOBE', 'BOH', 'BPFH', 'BR', 'BRC', 'BRCM', 'BRKL', 'BRKS', 'BRLI', 'BRO', 'BRS', 'BSX',", "'YHOO', 'S', 'NBL', 'NOC', 'CMI', 'CCL', 'PEG', 'INTU', 'PLD', 'SYK', 'TROW', 'COH', 'ADBE',", "'ETN', 'ETR', 'EV', 'EW', 'EWBC', 'EXAR', 'EXC', 'EXH', 'EXLS', 'EXP', 'EXPD', 'EXPE', 'EXPO',", "'MRCY', 'MRK', 'MRO', 'MRX', 'MS', 'MSA', 'MSCC', 'MSCI', 'MSFT', 'MSI', 'MSM', 'MSTR', 'MTB',", "'AJG', 'AKAM', 'AKR', 'AKRX', 'AKS', 'ALB', 'ALE', 'ALEX', 'ALGN', 'ALGT', 'ALK', 'ALL', 'ALOG',", "'KIM', 'KEY', 'RSG', 'MKC', 'BCR', 'BSX', 'KLAC', 'AEE', 'BWA', 'SPLS', 'FIS', 'SRCL', 'EXPD',", "'K', 'TDC', 'SHW', 'ESV', 'SYMC', 'PH', 'GWW', 'EW', 'ETR', 'NUE', 'SWN', 'MAT', 'CBE',", "'LLY', 'CL', 'BA', 'DD', 'HON', 'SPG', 'DUK', 'ACN', 'MDT', 'COST', 'TWX', 'TGT', 'SO',", "'WAG', 'PX', 'PCLN', 'EXC', 'D', 'EOG', 'YUM', 'NEE', 'TWC', 'PSX', 'COV', 'ADP', 'AMT',", "'MSTR', 'MTB', 'MTD', 'MTH', 'MTRN', 'MTRX', 'MTSC', 'MTX', 'MU', 'MUR', 'MW', 'MWIV', 'MWV',", "'LOW', 'LPNT', 'LPS', 'LPSN', 'LPX', 'LQDT', 'LRCX', 'LRY', 'LSI', 'LSTR', 'LTC', 'LTD', 'LTM',", "'WDC', 'LIFE', 'MCO', 'HIG', 'JWN', 'FRX', 'MNST', 'FFIV', 'NVDA', 'KIM', 'KEY', 'RSG', 'MKC',", "'TRV', 'TSCO', 'TSN', 'TSO', 'TSRA', 'TSS', 'TTC', 'TTEC', 'TTEK', 'TTI', 'TTMI', 'TTWO', 'TUES',", "'CIEN', 'CINF', 'CIR', 'CKH', 'CKP', 'CL', 'CLC', 'CLD', 'CLF', 'CLGX', 'CLH', 'CLI', 'CLMS',", "'FIS', 'SRCL', 'EXPD', 'COL', 'VRSN', 'FMC', 'ADSK', 'PFG', 'WYN', 'SLM', 'PLL', 'TIF', 'TXT',", "'MOH', 'MOLX', 'MON', 'MOS', 'MOV', 'MPC', 'MPW', 'MPWR', 'MRCY', 'MRK', 'MRO', 'MRX', 'MS',", "'PAYX', 'GPS', 'SNDK', 'DTE', 'PRGO', 'RF', 'NTRS', 'DGX', 'CMG', 'FISV', 'ORLY', 'MUR', 'OKE',", "'HFC', 'HGR', 'HHS', 'HI', 'HIBB', 'HIG', 'HII', 'HITK', 'HITT', 'HIW', 'HLIT', 'HLX', 'HMA',", "'TJX', 'CELG', 'DTV', 'DE', 'DHR', 'TXN', 'HAL', 'WAG', 'PX', 'PCLN', 'EXC', 'D', 'EOG',", "'DVN', 'AFL', 'ALXN', 'GD', 'WMB', 'CBS', 'CSX', 'TMO', 'AEP', 'CTSH', 'MRO', 'DFS', 'NSC',", "'FTI', 'FTR', 'FUL', 'FULT', 'FWRD', 'GAS', 'GB', 'GBCI', 'GCI', 'GCO', 'GD', 'GDI', 'GE',", "'SNCR', 'SNDK', 'SNH', 'SNI', 'SNPS', 'SNV', 'SNX', 'SO', 'SON', 'SONC', 'SPAR', 'SPF', 'SPG',", "'HST', 'HSTM', 'HSY', 'HTLD', 'HTSI', 'HUBG', 'HUM', 'HVT', 'HW', 'HWAY', 'HWKN', 'HZO', 'IART',", "'MOV', 'MPC', 'MPW', 'MPWR', 'MRCY', 'MRK', 'MRO', 'MRX', 'MS', 'MSA', 'MSCC', 'MSCI', 'MSFT',", "'SWX', 'SWY', 'SXC', 'SXI', 'SXT', 'SYK', 'SYKE', 'SYMC', 'SYMM', 'SYNA', 'SYY', 'T', 'TAP',", "'FLIR', 'GT', 'LM', 'APOL', 'PDCO', 'JDSU', 'ANF', 'PBI', 'NDAQ', 'X', 'SEE', 'TER', 'THC',", "'KNX', 'KO', 'KOP', 'KOPN', 'KR', 'KRA', 'KRC', 'KRG', 'KS', 'KSS', 'KSU', 'KSWS', 'KWK',", "'SAFM', 'SAFT', 'SAH', 'SAI', 'SAM', 'SBRA', 'SBUX', 'SCG', 'SCHL', 'SCHW', 'SCI', 'SCL', 'SCOR',", "'VRSN', 'FMC', 'ADSK', 'PFG', 'WYN', 'SLM', 'PLL', 'TIF', 'TXT', 'XL', 'LLTC', 'WAT', 'NI',", "'CFN', 'CPB', 'CMS', 'VMC', 'MU', 'BMC', 'NYX', 'CMA', 'BTU', 'WIN', 'JOY', 'HBAN', 'TSO',", "'CAG', 'CAH', 'CAKE', 'CALM', 'CAM', 'CAS', 'CASC', 'CASY', 'CAT', 'CATM', 'CATO', 'CATY', 'CB',", "'DRI', 'PCL', 'TAP', 'LLL', 'AVP', 'CNX', 'AES', 'AKAM', 'LNC', 'VAR', 'BLL', 'FLS', 'LUV',", "'PFG', 'WYN', 'SLM', 'PLL', 'TIF', 'TXT', 'XL', 'LLTC', 'WAT', 'NI', 'DRI', 'PCL', 'TAP',", "'NSC', 'MCK', 'CB', 'KMI', 'STT', 'PSA', 'BHI', 'ISRG', 'GLW', 'CRM', 'ALL', 'SE', 'HCP',", "'MOS', 'MOV', 'MPC', 'MPW', 'MPWR', 'MRCY', 'MRK', 'MRO', 'MRX', 'MS', 'MSA', 'MSCC', 'MSCI',", "'WIN', 'WIRE', 'WLP', 'WM', 'WMB', 'WMS', 'WMT', 'WOOF', 'WOR', 'WPO', 'WPP', 'WPX', 'WR',", "'ADM', 'BRCM', 'ED', 'PPG', 'CME', 'LYB', 'APD', 'VLO', 'EQR', 'BEN', 'ECL', 'PPL', 'AON',", "'FDO', 'FDS', 'FDX', 'FE', 'FEIC', 'FELE', 'FFBC', 'FFIN', 'FFIV', 'FHN', 'FICO', 'FII', 'FINL',", "'PVH', 'PVTB', 'PWR', 'PX', 'PXD', 'PXP', 'PZZA', 'QCOM', 'QCOR', 'QEP', 'QLGC', 'QNST', 'QSFT',", "'NI', 'DRI', 'PCL', 'TAP', 'LLL', 'AVP', 'CNX', 'AES', 'AKAM', 'LNC', 'VAR', 'BLL', 'FLS',", "'HUM', 'CERN', 'CAG', 'IVZ', 'DLTR', 'KSS', 'FTI', 'RHT', 'WU', 'STX', 'DOV', 'ALTR', 'WPI',", "'WDR', 'WEC', 'WEN', 'WERN', 'WFC', 'WFM', 'WGL', 'WGO', 'WHR', 'WIBC', 'WIN', 'WIRE', 'WLP',", "'ALL', 'SE', 'HCP', 'RTN', 'WLP', 'CCI', 'JCI', 'MPC', 'MMC', 'FE', 'VTR', 'SYY', 'PCG',", "'GBCI', 'GCI', 'GCO', 'GD', 'GDI', 'GE', 'GEF', 'GEO', 'GES', 'GFF', 'GGG', 'GHL', 'GIFI',", "'ADM', 'ADP', 'ADS', 'ADSK', 'ADTN', 'ADVS', 'AEE', 'AEGN', 'AEIS', 'AEO', 'AEP', 'AES', 'AET',", "'F', 'FAF', 'FARO', 'FAST', 'FBHS', 'FBP', 'FCF', 'FCFS', 'FCN', 'FCS', 'FCX', 'FDO', 'FDS',", "'PMC', 'PMTC', 'PMTI', 'PNC', 'PNFP', 'PNK', 'PNM', 'PNR', 'PNRA', 'PNW', 'PNY', 'POL', 'POM',", "'CTAS', 'HAS', 'POM', 'PBCT', 'NFX', 'RDC', 'SNA', 'GCI', 'URBN', 'NBR', 'TEG', 'EA', 'HRL',", "'ACAT', 'ACC', 'ACE', 'ACI', 'ACIW', 'ACM', 'ACN', 'ACO', 'ACXM', 'ADBE', 'ADI', 'ADM', 'ADP',", "'LEN', 'QEP', 'EFX', 'CVH', 'CLF', 'CBG', 'CINF', 'NWL', 'HSP', 'EXPE', 'XRAY', 'UNM', 'MAS',", "'AES', 'AET', 'AF', 'AFAM', 'AFFX', 'AFG', 'AFL', 'AGCO', 'AGN', 'AGP', 'AGYS', 'AHL', 'AHS',", "'ORLY', 'MUR', 'OKE', 'MYL', 'BF.B', 'MAR', 'ROK', 'CHK', 'ABC', 'ICE', 'HOG', 'XRX', 'APH',", "'KEY', 'RSG', 'MKC', 'BCR', 'BSX', 'KLAC', 'AEE', 'BWA', 'SPLS', 'FIS', 'SRCL', 'EXPD', 'COL',", "'ETN', 'MOS', 'IP', 'BDX', 'MHP', 'STI', 'LO', 'M', 'MJN', 'EIX', 'EL', 'DISCA', 'HCN',", "'ODP', 'OFC', 'OGE', 'OHI', 'OI', 'OII', 'OIS', 'OKE', 'OLN', 'OMC', 'OMCL', 'OMG', 'OMI',", "'PETS', 'PFE', 'PFG', 'PFS', 'PG', 'PGR', 'PH', 'PHM', 'PII', 'PJC', 'PKE', 'PKG', 'PKI',", "'REGN', 'RF', 'RFMD', 'RGA', 'RGLD', 'RGR', 'RGS', 'RHI', 'RHT', 'RJF', 'RKT', 'RL', 'RLI',", "'AHL', 'AHS', 'AIG', 'AINV', 'AIR', 'AIRM', 'AIT', 'AIV', 'AIZ', 'AJG', 'AKAM', 'AKR', 'AKRX',", "'LPSN', 'LPX', 'LQDT', 'LRCX', 'LRY', 'LSI', 'LSTR', 'LTC', 'LTD', 'LTM', 'LUFK', 'LUK', 'LUV',", "'FAF', 'FARO', 'FAST', 'FBHS', 'FBP', 'FCF', 'FCFS', 'FCN', 'FCS', 'FCX', 'FDO', 'FDS', 'FDX',", "'MEI', 'MENT', 'MET', 'MFB', 'MGAM', 'MGLN', 'MHK', 'MHO', 'MIG', 'MINI', 'MJN', 'MKC', 'MKSI',", "'EMR', 'APC', 'NKE', 'DOW', 'LOW', 'NOV', 'KMB', 'APA', 'HPQ', 'PNC', 'COF', 'BAX', 'TJX',", "'ITW', 'ACE', 'PRU', 'VIAB', 'CTL', 'LMT', 'FDX', 'PCP', 'BBT', 'MS', 'BLK', 'DVN', 'AFL',", "'PMTI', 'PNC', 'PNFP', 'PNK', 'PNM', 'PNR', 'PNRA', 'PNW', 'PNY', 'POL', 'POM', 'POOL', 'POST',", "'BRC', 'BRCM', 'BRKL', 'BRKS', 'BRLI', 'BRO', 'BRS', 'BSX', 'BTH', 'BTU', 'BWA', 'BWLD', 'BWS',", "'EBAY', 'EBIX', 'EBS', 'ECL', 'ECPG', 'ED', 'EE', 'EFX', 'EGL', 'EGN', 'EGP', 'EHTH', 'EIG',", "'XLNX', 'EMN', 'SIAL', 'WEC', 'CCE', 'WDC', 'LIFE', 'MCO', 'HIG', 'JWN', 'FRX', 'MNST', 'FFIV',", "'AN', 'ANDE', 'ANF', 'ANN', 'ANR', 'ANSS', 'AOL', 'AON', 'AOS', 'APA', 'APC', 'APD', 'APEI',", "'EV', 'EW', 'EWBC', 'EXAR', 'EXC', 'EXH', 'EXLS', 'EXP', 'EXPD', 'EXPE', 'EXPO', 'EXR', 'EZPW',", "'TSN', 'TSO', 'TSRA', 'TSS', 'TTC', 'TTEC', 'TTEK', 'TTI', 'TTMI', 'TTWO', 'TUES', 'TUP', 'TW',", "'SM', 'SMA', 'SMCI', 'SMG', 'SMP', 'SMRT', 'SMTC', 'SNA', 'SNCR', 'SNDK', 'SNH', 'SNI', 'SNPS',", "'FLS', 'FMBI', 'FMC', 'FMER', 'FNB', 'FNF', 'FNFG', 'FNGN', 'FNP', 'FOR', 'FORR', 'FOSL', 'FRED',", "'HCC', 'HCN', 'HCP', 'HCSG', 'HD', 'HE', 'HES', 'HF', 'HFC', 'HGR', 'HHS', 'HI', 'HIBB',", "'ATI', 'SAI', 'PKI', 'WPX', 'BMS', 'AVY', 'HAR', 'OI', 'AIZ', 'NFLX', 'DF', 'FLIR', 'GT',", "'EMC', 'UNH', 'BMY', 'UNP', 'CAT', 'EBAY', 'AXP', 'UPS', 'GS', 'ESRX', 'GILD', 'NWSA', 'MON',", "'NWSA', 'MON', 'MA', 'LLY', 'CL', 'BA', 'DD', 'HON', 'SPG', 'DUK', 'ACN', 'MDT', 'COST',", "'UTEK', 'UTHR', 'UTI', 'UTIW', 'UTX', 'UVV', 'V', 'VAL', 'VAR', 'VCI', 'VCLK', 'VDSI', 'VECO',", "'MKC', 'BCR', 'BSX', 'KLAC', 'AEE', 'BWA', 'SPLS', 'FIS', 'SRCL', 'EXPD', 'COL', 'VRSN', 'FMC',", "'CERN', 'CEVA', 'CF', 'CFN', 'CFR', 'CGNX', 'CGX', 'CHCO', 'CHD', 'CHE', 'CHG', 'CHK', 'CHRW',", "'SGY', 'SHAW', 'SHFL', 'SHLM', 'SHOO', 'SHW', 'SIAL', 'SIG', 'SIGI', 'SIGM', 'SIVB', 'SJI', 'SJM',", "'MON', 'MOS', 'MOV', 'MPC', 'MPW', 'MPWR', 'MRCY', 'MRK', 'MRO', 'MRX', 'MS', 'MSA', 'MSCC',", "'PM', 'PMC', 'PMTC', 'PMTI', 'PNC', 'PNFP', 'PNK', 'PNM', 'PNR', 'PNRA', 'PNW', 'PNY', 'POL',", "'NAFC', 'NANO', 'NATI', 'NAVG', 'NBL', 'NBR', 'NBTB', 'NCI', 'NCIT', 'NCR', 'NCS', 'NDAQ', 'NDSN',", "'CLGX', 'CLH', 'CLI', 'CLMS', 'CLP', 'CLW', 'CLX', 'CMA', 'CMC', 'CMCSA', 'CME', 'CMG', 'CMI',", "'WEC', 'WEN', 'WERN', 'WFC', 'WFM', 'WGL', 'WGO', 'WHR', 'WIBC', 'WIN', 'WIRE', 'WLP', 'WM',", "'PSS', 'PSSI', 'PSX', 'PTEN', 'PULS', 'PVA', 'PVH', 'PVTB', 'PWR', 'PX', 'PXD', 'PXP', 'PZZA',", "'PAY', 'PAYX', 'PB', 'PBCT', 'PBH', 'PBI', 'PBY', 'PCAR', 'PCG', 'PCH', 'PCL', 'PCLN', 'PCP',", "'LPX', 'LQDT', 'LRCX', 'LRY', 'LSI', 'LSTR', 'LTC', 'LTD', 'LTM', 'LUFK', 'LUK', 'LUV', 'LXK',", "'BGG', 'BGS', 'BH', 'BHE', 'BHI', 'BID', 'BIG', 'BIIB', 'BJRI', 'BK', 'BKE', 'BKH', 'BKI',", "'MGAM', 'MGLN', 'MHK', 'MHO', 'MIG', 'MINI', 'MJN', 'MKC', 'MKSI', 'MLHR', 'MLI', 'MLM', 'MMC',", "'ROST', 'ROVI', 'RPM', 'RRC', 'RRD', 'RRGB', 'RS', 'RSG', 'RSH', 'RSTI', 'RSYS', 'RT', 'RTEC',", "'BAS', 'BAX', 'BBBY', 'BBG', 'BBOX', 'BBT', 'BBY', 'BC', 'BCO', 'BCOR', 'BCPC', 'BCR', 'BDC',", "'LNC', 'LNCE', 'LNN', 'LNT', 'LO', 'LOGM', 'LOW', 'LPNT', 'LPS', 'LPSN', 'LPX', 'LQDT', 'LRCX',", "'NRG', 'IPG', 'IFF', 'GAS', 'STZ', 'HRB', 'XYL', 'TSN', 'FOSL', 'DO', 'BBY', 'LUK', 'CTAS',", "'DLTR', 'DLX', 'DM', 'DMND', 'DNB', 'DNR', 'DO', 'DOV', 'DOW', 'DPS', 'DRC', 'DRE', 'DRH',", "'ROVI', 'RPM', 'RRC', 'RRD', 'RRGB', 'RS', 'RSG', 'RSH', 'RSTI', 'RSYS', 'RT', 'RTEC', 'RTI',", "'AMSF', 'AMSG', 'AMT', 'AMZN', 'AN', 'ANDE', 'ANF', 'ANN', 'ANR', 'ANSS', 'AOL', 'AON', 'AOS',", "'CENX', 'CERN', 'CEVA', 'CF', 'CFN', 'CFR', 'CGNX', 'CGX', 'CHCO', 'CHD', 'CHE', 'CHG', 'CHK',", "'HRS', 'HSC', 'HSH', 'HSIC', 'HSII', 'HSNI', 'HSP', 'HST', 'HSTM', 'HSY', 'HTLD', 'HTSI', 'HUBG',", "'WMB', 'CBS', 'CSX', 'TMO', 'AEP', 'CTSH', 'MRO', 'DFS', 'NSC', 'MCK', 'CB', 'KMI', 'STT',", "'COL', 'VRSN', 'FMC', 'ADSK', 'PFG', 'WYN', 'SLM', 'PLL', 'TIF', 'TXT', 'XL', 'LLTC', 'WAT',", "'COV', 'ADP', 'AMT', 'AGN', 'NEM', 'BK', 'TRV', 'TYC', 'GIS', 'ITW', 'ACE', 'PRU', 'VIAB',", "'NFLX', 'NFP', 'NFX', 'NI', 'NILE', 'NJR', 'NKE', 'NNN', 'NOC', 'NOV', 'NP', 'NPBC', 'NPK',", "'CATO', 'CATY', 'CB', 'CBB', 'CBE', 'CBEY', 'CBG', 'CBK', 'CBM', 'CBOE', 'CBR', 'CBRL', 'CBS',", "'DUK', 'DV', 'DVA', 'DVN', 'DW', 'DWA', 'DY', 'EA', 'EAT', 'EBAY', 'EBIX', 'EBS', 'ECL',", "'KELYA', 'KEX', 'KEY', 'KFY', 'KIM', 'KIRK', 'KLAC', 'KLIC', 'KMB', 'KMI', 'KMPR', 'KMT', 'KMX',", "'PCTI', 'PDCE', 'PDCO', 'PEET', 'PEG', 'PEI', 'PEP', 'PERY', 'PES', 'PETM', 'PETS', 'PFE', 'PFG',", "'CCI', 'CCL', 'CCMP', 'CCRN', 'CDI', 'CDNS', 'CDR', 'CEB', 'CEC', 'CECO', 'CELG', 'CELL', 'CENX',", "'NAVG', 'NBL', 'NBR', 'NBTB', 'NCI', 'NCIT', 'NCR', 'NCS', 'NDAQ', 'NDSN', 'NE', 'NEE', 'NEM',", "'SCOR', 'SCSC', 'SCSS', 'SE', 'SEE', 'SEIC', 'SENEA', 'SF', 'SFD', 'SFG', 'SFNC', 'SFY', 'SGMS',", "'AXP', 'AYI', 'AZO', 'AZZ', 'B', 'BA', 'BABY', 'BAC', 'BAS', 'BAX', 'BBBY', 'BBG', 'BBOX',", "'KWR', 'L', 'LAD', 'LAMR', 'LANC', 'LAWS', 'LDL', 'LDR', 'LECO', 'LEG', 'LEN', 'LFUS', 'LG',", "'SPG', 'DUK', 'ACN', 'MDT', 'COST', 'TWX', 'TGT', 'SO', 'SBUX', 'AIG', 'F', 'FCX', 'MET',", "'ENDP', 'ENR', 'ENS', 'ENSG', 'ENTR', 'ENZ', 'EOG', 'EPAY', 'EPIQ', 'EPR', 'EQIX', 'EQR', 'EQT',", "'CAS', 'CASC', 'CASY', 'CAT', 'CATM', 'CATO', 'CATY', 'CB', 'CBB', 'CBE', 'CBEY', 'CBG', 'CBK',", "'ARW', 'ASBC', 'ASEI', 'ASGN', 'ASH', 'ASNA', 'ASTE', 'ATI', 'ATK', 'ATMI', 'ATML', 'ATNI', 'ATO',", "'DKS', 'DLTR', 'DLX', 'DM', 'DMND', 'DNB', 'DNR', 'DO', 'DOV', 'DOW', 'DPS', 'DRC', 'DRE',", "'DAR', 'DBD', 'DCI', 'DCOM', 'DD', 'DDD', 'DE', 'DECK', 'DEL', 'DELL', 'DF', 'DFS', 'DGII',", "'INDB', 'INFA', 'INGR', 'ININ', 'INT', 'INTC', 'INTU', 'IO', 'IP', 'IPAR', 'IPCC', 'IPCM', 'IPG',", "'AEO', 'AEP', 'AES', 'AET', 'AF', 'AFAM', 'AFFX', 'AFG', 'AFL', 'AGCO', 'AGN', 'AGP', 'AGYS',", "'LO', 'LOGM', 'LOW', 'LPNT', 'LPS', 'LPSN', 'LPX', 'LQDT', 'LRCX', 'LRY', 'LSI', 'LSTR', 'LTC',", "'PLD', 'SYK', 'TROW', 'COH', 'ADBE', 'HES', 'ETN', 'MOS', 'IP', 'BDX', 'MHP', 'STI', 'LO',", "'POST', 'POWI', 'POWL', 'PPG', 'PPS', 'PQ', 'PRA', 'PRAA', 'PRFT', 'PRGO', 'PRGS', 'PRU', 'PRX',", "'LHCG', 'LHO', 'LIFE', 'LII', 'LINC', 'LKQ', 'LL', 'LLL', 'LLTC', 'LLY', 'LM', 'LMNX', 'LMOS',", "'RLI', 'RMD', 'ROCK', 'ROG', 'ROK', 'ROL', 'ROP', 'ROSE', 'ROST', 'ROVI', 'RPM', 'RRC', 'RRD',", "'TMK', 'FTR', 'NRG', 'IPG', 'IFF', 'GAS', 'STZ', 'HRB', 'XYL', 'TSN', 'FOSL', 'DO', 'BBY',", "'MSCC', 'MSCI', 'MSFT', 'MSI', 'MSM', 'MSTR', 'MTB', 'MTD', 'MTH', 'MTRN', 'MTRX', 'MTSC', 'MTX',", "'APD', 'VLO', 'EQR', 'BEN', 'ECL', 'PPL', 'AON', 'WFM', 'BXP', 'YHOO', 'S', 'NBL', 'NOC',", "'ECL', 'ECPG', 'ED', 'EE', 'EFX', 'EGL', 'EGN', 'EGP', 'EHTH', 'EIG', 'EIX', 'EL', 'ELY',", "'EA', 'EAT', 'EBAY', 'EBIX', 'EBS', 'ECL', 'ECPG', 'ED', 'EE', 'EFX', 'EGL', 'EGN', 'EGP',", "'ESS', 'ESV', 'ETFC', 'ETH', 'ETN', 'ETR', 'EV', 'EW', 'EWBC', 'EXAR', 'EXC', 'EXH', 'EXLS',", "'AIG', 'AINV', 'AIR', 'AIRM', 'AIT', 'AIV', 'AIZ', 'AJG', 'AKAM', 'AKR', 'AKRX', 'AKS', 'ALB',", "'TEG', 'EA', 'HRL', 'SWY', 'LSI', 'TSS', 'ZION', 'HCBK', 'AIV', 'RHI', 'PCS', 'MOLX', 'TE',", "'DIS', 'DISCA', 'DKS', 'DLTR', 'DLX', 'DM', 'DMND', 'DNB', 'DNR', 'DO', 'DOV', 'DOW', 'DPS',", "'AVID', 'AVP', 'AVT', 'AVY', 'AWR', 'AXE', 'AXP', 'AYI', 'AZO', 'AZZ', 'B', 'BA', 'BABY'," ]
[ "another way to redirect streams of spawned programs and are close cousins to", "data, int(data) * 2) \"\"\" ''' the following connects two programs, by piping", "\"\"\" But Python scripts can also provide input to spawned programs’ standard input", "shell world') C:\\...\\PP4E\\System\\Streams> type hello-in.py inp = input() open('hello-in.txt', 'w').write('Hello ' + inp", "to run a shell command line. Other Redirection Options: os.popen and subprocess Revisited", "'w') # 'w'--write to program stdin >>> pipe.write('Gumby\\n') >>> pipe.close() # \\n at", "objects). These tools redirect the streams of a program that a script starts,", "spawned program’s input stream is just as simple, though a bit more complex", "hello-out.py') # 'r' is default--read stdout >>> pipe.read() print(pipe.close()) # exit status: None", "as bidirectional stream communication (accessing both a program’s input and output) and tying", "'\\n') ''' ''' Python scripts can read output from other programs and scripts", "from other programs and scripts like these, too, using code like the following:", "also achieve feats such as bidirectional stream communication (accessing both a program’s input", "can be used to run a shell command line (a string we would", "its pipes are read or write (and not both) prevents us from catching", "(stdout, stderr) >>> pipe.returncode # exit status >>> pipe = Popen('python hello-out.py', stdout=PIPE)", "can be used to control buffering of written text. \"\"\" \"Redirecting input and", "script into another, first with shell syntax, and then with the subprocess module:", "are another way to redirect streams of spawned programs and are close cousins", "spawned program with this module. C:\\...\\PP4E\\System\\Streams> type writer.py print(\"Help! Help! I'm being repressed!\")", "of the default “r”, connects the returned object to the spawned program’s input", "in the desired mode flag, we redirect either a spawned program’s output or", "subprocess module. module can emulate os.popen functionality, but it can also achieve feats", "writer.py | python reader.py Got this: \"Help! Help! I'm being repressed!\" The meaning", "from subprocess import Popen, PIPE, call >>> X = call('python hello-out.py') # convenience", "input to spawned programs’ standard input streams—passing a “w” mode argument, instead of", "os.popen and subprocess tools are another way to redirect streams of spawned programs", "0 We can get close to this with os.popen, but that the fact", "# output sent to a file In fact, we can use obtain both", "stdout=PIPE) >>> p2 = Popen('python reader.py', stdin=p1.stdout, stdout=PIPE) >>> output = p2.communicate()[0] >>>", "desired mode flag, we redirect either a spawned program’s output or input streams", "\"Redirecting input and output with subprocess\" ''' For even more control over the", "programs (not calling functions), and the command’s streams are processed in the spawning", "to control buffering of written text. \"\"\" \"Redirecting input and output with subprocess\"", "spawn a program and get both its standard output text and exit status.", "a script to read another program’s output. I suggested that these tools may", "way to redirect another command’s streams from within a Python program. As we", "or csh prompt) but also provide a Python file-like object connected to the", "hello-out.py', stdout=PIPE) >>> pipe.stdout.read() # read all output >>> pipe.wait() # exit status", "both the input and output streams of a spawned program with this module.", "an optional third argument that can be used to control buffering of written", "at the built-in os.popen function and its subprocess.Popen relative, which provide a way", "may be used to tap into input streams as well. Because of that,", "subprocess import Popen, PIPE, call >>> X = call('python hello-out.py') # convenience >>>", "the os.popen and subprocess tools are another way to redirect streams of spawned", "subprocess module: C:\\...\\PP4E\\System\\Streams> python writer.py | python reader.py Got this: \"Help! Help! I'm", "of life is', data, int(data) * 2) \"\"\" ''' the following connects two", "subprocess\" ''' For even more control over the streams of spawned programs, we", "to a file The popen call is also smart enough to run the", "a “w” mode argument, instead of the default “r”, connects the returned object", "streams of spawned programs and are close cousins to some of the techniques", "'w') >>> p2.write( p1.read() ) 36 >>> X = p2.close() Got this: \"Help!", "is also smart enough to run the command string as an independent process", "achieve feats such as bidirectional stream communication (accessing both a program’s input and", "hello-in.py inp = input() open('hello-in.txt', 'w').write('Hello ' + inp + '\\n') ''' '''", "the output of one program to the input of another. this module provides", "They are similar in spirit to the redirect function, but are based on", "a way to redirect another command’s streams from within a Python program. As", "C:\\...\\PP4E\\System\\Streams> python >>> import os >>> pipe = os.popen('python hello-out.py') # 'r' is", "os.popen('python hello-out.py') # 'r' is default--read stdout >>> pipe.read() print(pipe.close()) # exit status:", "= sys.stdin.readline()[:-1] print('The meaning of life is', data, int(data) * 2) \"\"\" '''", "tools redirect the streams of a program that a script starts, instead of", "effect is much like the shell | command-line pipe syntax for redirecting streams", "default--read stdout >>> pipe.read() print(pipe.close()) # exit status: None is good ''' \"\"\"", "can also achieve feats such as bidirectional stream communication (accessing both a program’s", "# exit status: None is good ''' \"\"\" But Python scripts can also", "Help! I'm being repressed!\") print(42) C:\\...\\PP4E\\System\\Streams> type reader.py print('Got this: \"%s\"' % input())", ">>> pipe.close() # \\n at end is optional >>> open('hello-in.txt').read() # output sent", "the streams of the script itself. ''' \"Redirecting input or output with os.popen\"", "run the command string as an independent process on platforms that support such", "script to read another program’s output. I suggested that these tools may be", "redirect either a spawned program’s output or input streams to a file in", "inp + '\\n') ''' ''' Python scripts can read output from other programs", "functions), and the command’s streams are processed in the spawning script as files", "of spawned programs and are close cousins to some of the techniques we", "some of the techniques we just met. Their effect is much like the", "\"Help! Help! I'm being repressed!\" The meaning of life is 42 84 >>>", "tied to class objects). These tools redirect the streams of a program that", ">>> output b'Got this: \"Help! Help! I\\'m being repressed!\"\\r\\nThe meaning of life is", "data = sys.stdin.readline()[:-1] print('The meaning of life is', data, int(data) * 2) \"\"\"", ">>> pipe.communicate()[0] # (stdout, stderr) >>> pipe.returncode # exit status >>> pipe =", "Python script into another, first with shell syntax, and then with the subprocess", "spawning script as files (not tied to class objects). These tools redirect the", "the streams of spawned programs, we can employ the subprocess module. module can", "both) prevents us from catching the second script’s output in our code: >>>", "itself. ''' \"Redirecting input or output with os.popen\" ''' In fact, by passing", "Python program. these tools can be used to run a shell command line.", "hello-out.py print('Hello shell world') C:\\...\\PP4E\\System\\Streams> type hello-in.py inp = input() open('hello-in.txt', 'w').write('Hello '", "similar in spirit to the redirect function, but are based on running programs", "optional third argument that can be used to control buffering of written text.", "module. module can emulate os.popen functionality, but it can also achieve feats such", "program to the input of another. this module provides multiple ways to spawn", "a file The popen call is also smart enough to run the command", "of a spawned program with this module. C:\\...\\PP4E\\System\\Streams> type writer.py print(\"Help! Help! I'm", "command line (a string we would normally type at a DOS or csh", "piping the output of one Python script into another, first with shell syntax,", "shell syntax, and then with the subprocess module: C:\\...\\PP4E\\System\\Streams> python writer.py | python", "normally type at a DOS or csh prompt) but also provide a Python", "but are based on running programs (not calling functions), and the command’s streams", ">>> pipe = Popen('python hello-out.py', stdout=PIPE) >>> pipe.stdout.read() # read all output >>>", "techniques we just met. Their effect is much like the shell | command-line", "can also provide input to spawned programs’ standard input streams—passing a “w” mode", "first look at the built-in os.popen function and its subprocess.Popen relative, which provide", "provide input to spawned programs’ standard input streams—passing a “w” mode argument, instead", "error” here). C:\\...\\PP4E\\System\\Streams> type hello-out.py print('Hello shell world') C:\\...\\PP4E\\System\\Streams> type hello-in.py inp =", "output stream— reading the file object allows a script to read another program’s", "and output) and tying the output of one program to the input of", "and provide a file-like interface to piped streams. They are similar in spirit", "pipe.stdin.write(b'Pokey\\n') >>> pipe.stdin.close() >>> pipe.wait() >>> open('hello-in.txt').read() # output sent to a file", "= Popen('python hello-out.py', stdout=PIPE) >>> pipe.stdout.read() # read all output >>> pipe.wait() #", "% input()) import sys data = sys.stdin.readline()[:-1] print('The meaning of life is', data,", "input()) import sys data = sys.stdin.readline()[:-1] print('The meaning of life is', data, int(data)", "C:\\...\\PP4E\\System\\Streams> python writer.py | python reader.py Got this: \"Help! Help! I'm being repressed!\"", "function and its subprocess.Popen relative, which provide a way to redirect another command’s", "well. Because of that, the os.popen and subprocess tools are another way to", "obtain both the input and output streams of a spawned program with this", ">>> p2 = os.popen('python reader.py', 'w') >>> p2.write( p1.read() ) 36 >>> X", "Python file-like object connected to the command’s output stream— reading the file object", "is much like the shell | command-line pipe syntax for redirecting streams to", "the end of the preceding chapter, we took a first look at the", "Near the end of the preceding chapter, we took a first look at", "to the command’s output stream— reading the file object allows a script to", "string as an independent process on platforms that support such a notion. It", "of another. this module provides multiple ways to spawn a program and get", "the command’s output stream— reading the file object allows a script to read", "Python scripts can also provide input to spawned programs’ standard input streams—passing a", "subprocess Revisited\" ''' the built-in os.popen function and its subprocess.Popen relative, which provide", "= os.popen('python writer.py', 'r') >>> p2 = os.popen('python reader.py', 'w') >>> p2.write( p1.read()", "But Python scripts can also provide input to spawned programs’ standard input streams—passing", "print('Got this: \"%s\"' % input()) import sys data = sys.stdin.readline()[:-1] print('The meaning of", "we can obtain the spawned program’s exit status code from the close method", "also smart enough to run the command string as an independent process on", "to the spawned program’s input stream. What we write on the spawning end", "input of another. this module provides multiple ways to spawn a program and", "Help! I'm being repressed!\" The meaning of life is 42 84 >>> print(X)", ">>> pipe.stdin.write(b'Pokey\\n') >>> pipe.stdin.close() >>> pipe.wait() >>> open('hello-in.txt').read() # output sent to a", "to redirect streams of spawned programs and are close cousins to some of", "os >>> pipe = os.popen('python hello-out.py') # 'r' is default--read stdout >>> pipe.read()", "also provide input to spawned programs’ standard input streams—passing a “w” mode argument,", "shows up as input in the program started: >>> pipe = os.popen('python hello-in.py',", "into input streams as well. Because of that, the os.popen and subprocess tools", "= p2.communicate()[0] >>> output b'Got this: \"Help! Help! I\\'m being repressed!\"\\r\\nThe meaning of", "''' \"Redirecting input or output with os.popen\" ''' In fact, by passing in", "process on platforms that support such a notion. It accepts an optional third", "catching the second script’s output in our code: >>> import os >>> p1", "program. As we saw, these tools can be used to run a shell", "simple, though a bit more complex than the os.popen approach with 'w' file", "programs and are close cousins to some of the techniques we just met.", "programs and scripts like these, too, using code like the following: C:\\...\\PP4E\\System\\Streams> python", "spirit to the redirect function, but are based on running programs (not calling", "preceding chapter, we took a first look at the built-in os.popen function and", "of a program that a script starts, instead of redirecting the streams of", "the command string as an independent process on platforms that support such a", "and exit status. C:\\...\\PP4E\\System\\Streams> python >>> from subprocess import Popen, PIPE, call >>>", "+ inp + '\\n') ''' ''' Python scripts can read output from other", "Redirecting and connecting to the spawned program’s input stream is just as simple,", "program’s input stream is just as simple, though a bit more complex than", "a bit more complex than the os.popen approach with 'w' file mode >>>", "as files (not tied to class objects). These tools redirect the streams of", "p2 = Popen('python reader.py', stdin=p1.stdout, stdout=PIPE) >>> output = p2.communicate()[0] >>> output b'Got", "another command’s streams from within a Python program. As we saw, these tools", "we saw, these tools can be used to run a shell command line", "None is good ''' \"\"\" But Python scripts can also provide input to", "+ '\\n') ''' ''' Python scripts can read output from other programs and", "argument that can be used to control buffering of written text. \"\"\" \"Redirecting", "and subprocess Revisited Near the end of the preceding chapter, we took a", "# convenience >>> X >>> pipe = Popen('python hello-out.py', stdout=PIPE) >>> pipe.communicate()[0] #", "and output with subprocess\" ''' For even more control over the streams of", "of the preceding chapter, we took a first look at the built-in os.popen", "reader.py print('Got this: \"%s\"' % input()) import sys data = sys.stdin.readline()[:-1] print('The meaning", "just met. Their effect is much like the shell | command-line pipe syntax", "\"Help! Help! I\\'m being repressed!\"\\r\\nThe meaning of life is 42 84\\r\\n' >>> p2.returncode", "to a file In fact, we can use obtain both the input and", "type hello-in.py inp = input() open('hello-in.txt', 'w').write('Hello ' + inp + '\\n') '''", "the fact that its pipes are read or write (and not both) prevents", "from catching the second script’s output in our code: >>> import os >>>", "input streams as well. Because of that, the os.popen and subprocess tools are", "output) and tying the output of one program to the input of another.", "I'm being repressed!\") print(42) C:\\...\\PP4E\\System\\Streams> type reader.py print('Got this: \"%s\"' % input()) import", "and we can obtain the spawned program’s exit status code from the close", "function, but are based on running programs (not calling functions), and the command’s", "fact, we can use obtain both the input and output streams of a", "command’s output stream— reading the file object allows a script to read another", "write (and not both) prevents us from catching the second script’s output in", ">>> from subprocess import Popen, PIPE, call >>> X = call('python hello-out.py') #", ">>> pipe = os.popen('python hello-in.py', 'w') # 'w'--write to program stdin >>> pipe.write('Gumby\\n')", "redirect function, but are based on running programs (not calling functions), and the", "call('python hello-out.py') # convenience >>> X >>> pipe = Popen('python hello-out.py', stdout=PIPE) >>>", "notion. It accepts an optional third argument that can be used to control", "pipe.communicate()[0] # (stdout, stderr) >>> pipe.returncode # exit status >>> pipe = Popen('python", "can read output from other programs and scripts like these, too, using code", "script’s output in our code: >>> import os >>> p1 = os.popen('python writer.py',", "sent to a file The popen call is also smart enough to run", ">>> pipe = Popen('python hello-in.py', stdin=PIPE) >>> pipe.stdin.write(b'Pokey\\n') >>> pipe.stdin.close() >>> pipe.wait() >>>", "input stream is just as simple, though a bit more complex than the", "shell | command-line pipe syntax for redirecting streams to programs (in fact, their", "program stdin >>> pipe.write('Gumby\\n') >>> pipe.close() # \\n at end is optional >>>", ">>> pipe.stdin.close() >>> pipe.wait() >>> open('hello-in.txt').read() # output sent to a file In", ">>> p1 = os.popen('python writer.py', 'r') >>> p2 = os.popen('python reader.py', 'w') >>>", "string we would normally type at a DOS or csh prompt) but also", "file object allows a script to read another program’s output. I suggested that", "that support such a notion. It accepts an optional third argument that can", "84\\r\\n' >>> p2.returncode 0 We can get close to this with os.popen, but", "these tools can be used to run a shell command line. Other Redirection", "\"\"\" \"Redirecting input and output with subprocess\" ''' For even more control over", "streams from within a Python program. As we saw, these tools can be", "that can be used to control buffering of written text. \"\"\" \"Redirecting input", "get both its standard output text and exit status. C:\\...\\PP4E\\System\\Streams> python >>> from", "but that the fact that its pipes are read or write (and not", "output from other programs and scripts like these, too, using code like the", "to piped streams. They are similar in spirit to the redirect function, but", "it can also achieve feats such as bidirectional stream communication (accessing both a", "input() open('hello-in.txt', 'w').write('Hello ' + inp + '\\n') ''' ''' Python scripts can", "Help! I'm being repressed!\" The meaning of life is 42 84 C:\\...\\PP4E\\System\\Streams> python", "one Python script into another, first with shell syntax, and then with the", "python >>> from subprocess import Popen, PIPE >>> p1 = Popen('python writer.py', stdout=PIPE)", "functionality, but it can also achieve feats such as bidirectional stream communication (accessing", "Popen('python hello-out.py', stdout=PIPE) >>> pipe.stdout.read() # read all output >>> pipe.wait() # exit", "approach with 'w' file mode >>> pipe = Popen('python hello-in.py', stdin=PIPE) >>> pipe.stdin.write(b'Pokey\\n')", "is 42 84\\r\\n' >>> p2.returncode 0 We can get close to this with", "run within a script and provide a file-like interface to piped streams. They", "within a Python program. these tools can be used to run a shell", "with the subprocess module: C:\\...\\PP4E\\System\\Streams> python writer.py | python reader.py Got this: \"Help!", "to spawned programs’ standard input streams—passing a “w” mode argument, instead of the", "pipe = Popen('python hello-out.py', stdout=PIPE) >>> pipe.stdout.read() # read all output >>> pipe.wait()", "and its subprocess.Popen relative, which provide a way to redirect another command’s streams", "their names mean “pipe open”), but they are run within a script and", "programs’ standard input streams—passing a “w” mode argument, instead of the default “r”,", "''' the built-in os.popen function and its subprocess.Popen relative, which provide a way", "input and output) and tying the output of one program to the input", "being repressed!\" The meaning of life is 42 84 >>> print(X) None '''", "mode argument, instead of the default “r”, connects the returned object to the", "command’s streams are processed in the spawning script as files (not tied to", "bidirectional stream communication (accessing both a program’s input and output) and tying the", "used to run a shell command line. Other Redirection Options: os.popen and subprocess", "program’s input and output) and tying the output of one program to the", "streams—passing a “w” mode argument, instead of the default “r”, connects the returned", "to class objects). These tools redirect the streams of a program that a", "more control over the streams of spawned programs, we can employ the subprocess", "object to the spawned program’s input stream. What we write on the spawning", "though a bit more complex than the os.popen approach with 'w' file mode", "to read another program’s output. I suggested that these tools may be used", "run a shell command line. Other Redirection Options: os.popen and subprocess Revisited Near", "import os >>> pipe = os.popen('python hello-out.py') # 'r' is default--read stdout >>>", "input stream. What we write on the spawning end shows up as input", "command line. Other Redirection Options: os.popen and subprocess Revisited Near the end of", "provide a file-like interface to piped streams. They are similar in spirit to", "= input() open('hello-in.txt', 'w').write('Hello ' + inp + '\\n') ''' ''' Python scripts", "os.popen and subprocess Revisited\" ''' the built-in os.popen function and its subprocess.Popen relative,", "of the techniques we just met. Their effect is much like the shell", "fact, by passing in the desired mode flag, we redirect either a spawned", "here). C:\\...\\PP4E\\System\\Streams> type hello-out.py print('Hello shell world') C:\\...\\PP4E\\System\\Streams> type hello-in.py inp = input()", "mode flag, we redirect either a spawned program’s output or input streams to", "a file-like interface to piped streams. They are similar in spirit to the", "Popen, PIPE, call >>> X = call('python hello-out.py') # convenience >>> X >>>", "run a shell command line (a string we would normally type at a", "subprocess tools are another way to redirect streams of spawned programs and are", "obtain the spawned program’s exit status code from the close method (None means", "“r”, connects the returned object to the spawned program’s input stream. What we", "hello-out.py') # convenience >>> X >>> pipe = Popen('python hello-out.py', stdout=PIPE) >>> pipe.communicate()[0]", "syntax for redirecting streams to programs (in fact, their names mean “pipe open”),", "running programs (not calling functions), and the command’s streams are processed in the", "output sent to a file The popen call is also smart enough to", "Revisited\" ''' the built-in os.popen function and its subprocess.Popen relative, which provide a", "by passing in the desired mode flag, we redirect either a spawned program’s", "python >>> import os >>> pipe = os.popen('python hello-out.py') # 'r' is default--read", "Popen('python hello-in.py', stdin=PIPE) >>> pipe.stdin.write(b'Pokey\\n') >>> pipe.stdin.close() >>> pipe.wait() >>> open('hello-in.txt').read() # output", "program and get both its standard output text and exit status. C:\\...\\PP4E\\System\\Streams> python", "two programs, by piping the output of one Python script into another, first", "subprocess.Popen relative, which provide a way to redirect another command’s streams from within", "the program started: >>> pipe = os.popen('python hello-in.py', 'w') # 'w'--write to program", "program with this module. C:\\...\\PP4E\\System\\Streams> type writer.py print(\"Help! Help! I'm being repressed!\") print(42)", "at a DOS or csh prompt) but also provide a Python file-like object", "* 2) \"\"\" ''' the following connects two programs, by piping the output", "to run the command string as an independent process on platforms that support", "saw, these tools can be used to run a shell command line (a", "stdout=PIPE) >>> pipe.stdout.read() # read all output >>> pipe.wait() # exit status '''", "as simple, though a bit more complex than the os.popen approach with 'w'", "the subprocess module: C:\\...\\PP4E\\System\\Streams> python writer.py | python reader.py Got this: \"Help! Help!", "= os.popen('python hello-out.py') # 'r' is default--read stdout >>> pipe.read() print(pipe.close()) # exit", ">>> open('hello-in.txt').read() # output sent to a file The popen call is also", "type writer.py print(\"Help! Help! I'm being repressed!\") print(42) C:\\...\\PP4E\\System\\Streams> type reader.py print('Got this:", "code: >>> import os >>> p1 = os.popen('python writer.py', 'r') >>> p2 =", "I suggested that these tools may be used to tap into input streams", "\"%s\"' % input()) import sys data = sys.stdin.readline()[:-1] print('The meaning of life is',", "of spawned programs, we can employ the subprocess module. module can emulate os.popen", "command’s streams from within a Python program. As we saw, these tools can", "good ''' \"\"\" But Python scripts can also provide input to spawned programs’", "In fact, we can use obtain both the input and output streams of", "\"Other Redirection Options: os.popen and subprocess Revisited\" ''' the built-in os.popen function and", "they are run within a script and provide a file-like interface to piped", "streams. They are similar in spirit to the redirect function, but are based", "import os >>> p1 = os.popen('python writer.py', 'r') >>> p2 = os.popen('python reader.py',", "sent to a file In fact, we can use obtain both the input", "calling functions), and the command’s streams are processed in the spawning script as", "python reader.py Got this: \"Help! Help! I'm being repressed!\" The meaning of life", "in spirit to the redirect function, but are based on running programs (not", "os.popen('python writer.py', 'r') >>> p2 = os.popen('python reader.py', 'w') >>> p2.write( p1.read() )", "either a spawned program’s output or input streams to a file in the", "started: >>> pipe = os.popen('python hello-in.py', 'w') # 'w'--write to program stdin >>>", "script as files (not tied to class objects). These tools redirect the streams", "# output sent to a file The popen call is also smart enough", "another. this module provides multiple ways to spawn a program and get both", "and scripts like these, too, using code like the following: C:\\...\\PP4E\\System\\Streams> python >>>", ">>> pipe = Popen('python hello-out.py', stdout=PIPE) >>> pipe.communicate()[0] # (stdout, stderr) >>> pipe.returncode", "emulate os.popen functionality, but it can also achieve feats such as bidirectional stream", "as well. Because of that, the os.popen and subprocess tools are another way", "not both) prevents us from catching the second script’s output in our code:", "scripts, and we can obtain the spawned program’s exit status code from the", "to spawn a program and get both its standard output text and exit", "flag, we redirect either a spawned program’s output or input streams to a", "built-in os.popen function and its subprocess.Popen relative, which provide a way to redirect", "Python program. As we saw, these tools can be used to run a", "module: C:\\...\\PP4E\\System\\Streams> python writer.py | python reader.py Got this: \"Help! Help! I'm being", "calling scripts, and we can obtain the spawned program’s exit status code from", "programs, we can employ the subprocess module. module can emulate os.popen functionality, but", "of that, the os.popen and subprocess tools are another way to redirect streams", "import sys data = sys.stdin.readline()[:-1] print('The meaning of life is', data, int(data) *", "can obtain the spawned program’s exit status code from the close method (None", "a shell command line. Other Redirection Options: os.popen and subprocess Revisited Near the", "can get close to this with os.popen, but that the fact that its", "streams are processed in the spawning script as files (not tied to class", "from within a Python program. these tools can be used to run a", "the techniques we just met. Their effect is much like the shell |", "These tools redirect the streams of a program that a script starts, instead", "then with the subprocess module: C:\\...\\PP4E\\System\\Streams> python writer.py | python reader.py Got this:", "the built-in os.popen function and its subprocess.Popen relative, which provide a way to", "I'm being repressed!\" The meaning of life is 42 84 C:\\...\\PP4E\\System\\Streams> python >>>", "pipe.wait() # exit status ''' \"\"\" Redirecting and connecting to the spawned program’s", "connecting to the spawned program’s input stream is just as simple, though a", "os.popen approach with 'w' file mode >>> pipe = Popen('python hello-in.py', stdin=PIPE) >>>", "type at a DOS or csh prompt) but also provide a Python file-like", "X = call('python hello-out.py') # convenience >>> X >>> pipe = Popen('python hello-out.py',", "in the program started: >>> pipe = os.popen('python hello-in.py', 'w') # 'w'--write to", "the close method (None means “no error” here). C:\\...\\PP4E\\System\\Streams> type hello-out.py print('Hello shell", "hello-out.py', stdout=PIPE) >>> pipe.communicate()[0] # (stdout, stderr) >>> pipe.returncode # exit status >>>", "of one program to the input of another. this module provides multiple ways", "following connects two programs, by piping the output of one Python script into", "subprocess import Popen, PIPE >>> p1 = Popen('python writer.py', stdout=PIPE) >>> p2 =", "file-like interface to piped streams. They are similar in spirit to the redirect", "life is 42 84 C:\\...\\PP4E\\System\\Streams> python >>> from subprocess import Popen, PIPE >>>", "(and not both) prevents us from catching the second script’s output in our", "Their effect is much like the shell | command-line pipe syntax for redirecting", "pipe.write('Gumby\\n') >>> pipe.close() # \\n at end is optional >>> open('hello-in.txt').read() # output", "second script’s output in our code: >>> import os >>> p1 = os.popen('python", "another command’s streams from within a Python program. these tools can be used", "another program’s output. I suggested that these tools may be used to tap", "piped streams. They are similar in spirit to the redirect function, but are", "be used to tap into input streams as well. Because of that, the", "third argument that can be used to control buffering of written text. \"\"\"", "or output with os.popen\" ''' In fact, by passing in the desired mode", "this: \"Help! Help! I\\'m being repressed!\"\\r\\nThe meaning of life is 42 84\\r\\n' >>>", "optional >>> open('hello-in.txt').read() # output sent to a file The popen call is", "can employ the subprocess module. module can emulate os.popen functionality, but it can", "Got this: \"Help! Help! I'm being repressed!\" The meaning of life is 42", "module can emulate os.popen functionality, but it can also achieve feats such as", "a Python file-like object connected to the command’s output stream— reading the file", "streams to programs (in fact, their names mean “pipe open”), but they are", "2) \"\"\" ''' the following connects two programs, by piping the output of", "input streams—passing a “w” mode argument, instead of the default “r”, connects the", "within a script and provide a file-like interface to piped streams. They are", "file-like object connected to the command’s output stream— reading the file object allows", "output >>> pipe.wait() # exit status ''' \"\"\" Redirecting and connecting to the", "prompt) but also provide a Python file-like object connected to the command’s output", "the spawning script as files (not tied to class objects). These tools redirect", "being repressed!\") print(42) C:\\...\\PP4E\\System\\Streams> type reader.py print('Got this: \"%s\"' % input()) import sys", "streams as well. Because of that, the os.popen and subprocess tools are another", "on running programs (not calling functions), and the command’s streams are processed in", "open('hello-in.txt').read() # output sent to a file In fact, we can use obtain", "(a string we would normally type at a DOS or csh prompt) but", "input in the program started: >>> pipe = os.popen('python hello-in.py', 'w') # 'w'--write", "stream is just as simple, though a bit more complex than the os.popen", "'w'--write to program stdin >>> pipe.write('Gumby\\n') >>> pipe.close() # \\n at end is", "fact that its pipes are read or write (and not both) prevents us", "the subprocess module. module can emulate os.popen functionality, but it can also achieve", "took a first look at the built-in os.popen function and its subprocess.Popen relative,", "a way to redirect another command’s streams from within a Python program. these", "script and provide a file-like interface to piped streams. They are similar in", "a Python program. As we saw, these tools can be used to run", "(not calling functions), and the command’s streams are processed in the spawning script", "output text and exit status. C:\\...\\PP4E\\System\\Streams> python >>> from subprocess import Popen, PIPE,", "a spawned program with this module. C:\\...\\PP4E\\System\\Streams> type writer.py print(\"Help! Help! I'm being", ">>> p1 = Popen('python writer.py', stdout=PIPE) >>> p2 = Popen('python reader.py', stdin=p1.stdout, stdout=PIPE)", "pipe.stdin.close() >>> pipe.wait() >>> open('hello-in.txt').read() # output sent to a file In fact,", "in the spawning script as files (not tied to class objects). These tools", "to this with os.popen, but that the fact that its pipes are read", "spawned program’s exit status code from the close method (None means “no error”", "'r') >>> p2 = os.popen('python reader.py', 'w') >>> p2.write( p1.read() ) 36 >>>", "starts, instead of redirecting the streams of the script itself. ''' \"Redirecting input", "this module. C:\\...\\PP4E\\System\\Streams> type writer.py print(\"Help! Help! I'm being repressed!\") print(42) C:\\...\\PP4E\\System\\Streams> type", "inp = input() open('hello-in.txt', 'w').write('Hello ' + inp + '\\n') ''' ''' Python", "program started: >>> pipe = os.popen('python hello-in.py', 'w') # 'w'--write to program stdin", "the returned object to the spawned program’s input stream. What we write on", "being repressed!\" The meaning of life is 42 84 C:\\...\\PP4E\\System\\Streams> python >>> from", "# exit status >>> pipe = Popen('python hello-out.py', stdout=PIPE) >>> pipe.stdout.read() # read", "read another program’s output. I suggested that these tools may be used to", ">>> pipe.stdout.read() # read all output >>> pipe.wait() # exit status ''' \"\"\"", "are similar in spirit to the redirect function, but are based on running", "line. Other Redirection Options: os.popen and subprocess Revisited Near the end of the", "that a script starts, instead of redirecting the streams of the script itself.", "input streams to a file in the calling scripts, and we can obtain", "p2.close() Got this: \"Help! Help! I'm being repressed!\" The meaning of life is", "stderr) >>> pipe.returncode # exit status >>> pipe = Popen('python hello-out.py', stdout=PIPE) >>>", "stdin >>> pipe.write('Gumby\\n') >>> pipe.close() # \\n at end is optional >>> open('hello-in.txt').read()", "streams of the script itself. ''' \"Redirecting input or output with os.popen\" '''", "I'm being repressed!\" The meaning of life is 42 84 >>> print(X) None", "multiple ways to spawn a program and get both its standard output text", "# \\n at end is optional >>> open('hello-in.txt').read() # output sent to a", "the following: C:\\...\\PP4E\\System\\Streams> python >>> import os >>> pipe = os.popen('python hello-out.py') #", "argument, instead of the default “r”, connects the returned object to the spawned", "into another, first with shell syntax, and then with the subprocess module: C:\\...\\PP4E\\System\\Streams>", ">>> p2.returncode 0 We can get close to this with os.popen, but that", "redirect streams of spawned programs and are close cousins to some of the", "line (a string we would normally type at a DOS or csh prompt)", "p1 = Popen('python writer.py', stdout=PIPE) >>> p2 = Popen('python reader.py', stdin=p1.stdout, stdout=PIPE) >>>", "42 84 C:\\...\\PP4E\\System\\Streams> python >>> from subprocess import Popen, PIPE >>> p1 =", "# read all output >>> pipe.wait() # exit status ''' \"\"\" Redirecting and", "output. I suggested that these tools may be used to tap into input", "exit status code from the close method (None means “no error” here). C:\\...\\PP4E\\System\\Streams>", "and get both its standard output text and exit status. C:\\...\\PP4E\\System\\Streams> python >>>", "python writer.py | python reader.py Got this: \"Help! Help! I'm being repressed!\" The", "Revisited Near the end of the preceding chapter, we took a first look", "the default “r”, connects the returned object to the spawned program’s input stream.", "As we saw, these tools can be used to run a shell command", "meaning of life is 42 84\\r\\n' >>> p2.returncode 0 We can get close", "''' the following connects two programs, by piping the output of one Python", "a file In fact, we can use obtain both the input and output", "the spawning end shows up as input in the program started: >>> pipe", "look at the built-in os.popen function and its subprocess.Popen relative, which provide a", "file in the calling scripts, and we can obtain the spawned program’s exit", "would normally type at a DOS or csh prompt) but also provide a", "as an independent process on platforms that support such a notion. It accepts", "use obtain both the input and output streams of a spawned program with", "or write (and not both) prevents us from catching the second script’s output", "are based on running programs (not calling functions), and the command’s streams are", "p2.returncode 0 We can get close to this with os.popen, but that the", "'w').write('Hello ' + inp + '\\n') ''' ''' Python scripts can read output", "sys data = sys.stdin.readline()[:-1] print('The meaning of life is', data, int(data) * 2)", "and output streams of a spawned program with this module. C:\\...\\PP4E\\System\\Streams> type writer.py", "used to run a shell command line (a string we would normally type", "stdin=p1.stdout, stdout=PIPE) >>> output = p2.communicate()[0] >>> output b'Got this: \"Help! Help! I\\'m", "spawned program’s input stream. What we write on the spawning end shows up", "stdout=PIPE) >>> pipe.communicate()[0] # (stdout, stderr) >>> pipe.returncode # exit status >>> pipe", "but they are run within a script and provide a file-like interface to", "can emulate os.popen functionality, but it can also achieve feats such as bidirectional", "buffering of written text. \"\"\" \"Redirecting input and output with subprocess\" ''' For", "status. C:\\...\\PP4E\\System\\Streams> python >>> from subprocess import Popen, PIPE, call >>> X =", "mean “pipe open”), but they are run within a script and provide a", "In fact, by passing in the desired mode flag, we redirect either a", "program. these tools can be used to run a shell command line. Other", "exit status >>> pipe = Popen('python hello-out.py', stdout=PIPE) >>> pipe.stdout.read() # read all", "can use obtain both the input and output streams of a spawned program", "output with os.popen\" ''' In fact, by passing in the desired mode flag,", "connects the returned object to the spawned program’s input stream. What we write", "interface to piped streams. They are similar in spirit to the redirect function,", "in our code: >>> import os >>> p1 = os.popen('python writer.py', 'r') >>>", "command’s streams from within a Python program. these tools can be used to", "with os.popen, but that the fact that its pipes are read or write", "os.popen('python reader.py', 'w') >>> p2.write( p1.read() ) 36 >>> X = p2.close() Got", "pipe.stdout.read() # read all output >>> pipe.wait() # exit status ''' \"\"\" Redirecting", "to some of the techniques we just met. Their effect is much like", "output streams of a spawned program with this module. C:\\...\\PP4E\\System\\Streams> type writer.py print(\"Help!", "connected to the command’s output stream— reading the file object allows a script", "read all output >>> pipe.wait() # exit status ''' \"\"\" Redirecting and connecting", "os >>> p1 = os.popen('python writer.py', 'r') >>> p2 = os.popen('python reader.py', 'w')", "than the os.popen approach with 'w' file mode >>> pipe = Popen('python hello-in.py',", "tools are another way to redirect streams of spawned programs and are close", "the spawned program’s input stream is just as simple, though a bit more", "read output from other programs and scripts like these, too, using code like", "open”), but they are run within a script and provide a file-like interface", "\"Help! Help! I'm being repressed!\" The meaning of life is 42 84 C:\\...\\PP4E\\System\\Streams>", "too, using code like the following: C:\\...\\PP4E\\System\\Streams> python >>> import os >>> pipe", "this with os.popen, but that the fact that its pipes are read or", "communication (accessing both a program’s input and output) and tying the output of", "and then with the subprocess module: C:\\...\\PP4E\\System\\Streams> python writer.py | python reader.py Got", "following: C:\\...\\PP4E\\System\\Streams> python >>> import os >>> pipe = os.popen('python hello-out.py') # 'r'", "we can employ the subprocess module. module can emulate os.popen functionality, but it", "with shell syntax, and then with the subprocess module: C:\\...\\PP4E\\System\\Streams> python writer.py |", "Popen('python writer.py', stdout=PIPE) >>> p2 = Popen('python reader.py', stdin=p1.stdout, stdout=PIPE) >>> output =", "of redirecting the streams of the script itself. ''' \"Redirecting input or output", "or input streams to a file in the calling scripts, and we can", "as input in the program started: >>> pipe = os.popen('python hello-in.py', 'w') #", "# 'r' is default--read stdout >>> pipe.read() print(pipe.close()) # exit status: None is", "with this module. C:\\...\\PP4E\\System\\Streams> type writer.py print(\"Help! Help! I'm being repressed!\") print(42) C:\\...\\PP4E\\System\\Streams>", "and subprocess tools are another way to redirect streams of spawned programs and", "tools can be used to run a shell command line. Other Redirection Options:", "close cousins to some of the techniques we just met. Their effect is", "Redirection Options: os.popen and subprocess Revisited Near the end of the preceding chapter,", "cousins to some of the techniques we just met. Their effect is much", "redirect the streams of a program that a script starts, instead of redirecting", "pipe = Popen('python hello-out.py', stdout=PIPE) >>> pipe.communicate()[0] # (stdout, stderr) >>> pipe.returncode #", ">>> p2.write( p1.read() ) 36 >>> X = p2.close() Got this: \"Help! Help!", "be used to control buffering of written text. \"\"\" \"Redirecting input and output", "the second script’s output in our code: >>> import os >>> p1 =", "# exit status ''' \"\"\" Redirecting and connecting to the spawned program’s input", "spawned programs’ standard input streams—passing a “w” mode argument, instead of the default", "meaning of life is', data, int(data) * 2) \"\"\" ''' the following connects", "spawned programs and are close cousins to some of the techniques we just", "program’s exit status code from the close method (None means “no error” here).", "we just met. Their effect is much like the shell | command-line pipe", "life is 42 84\\r\\n' >>> p2.returncode 0 We can get close to this", "reader.py', 'w') >>> p2.write( p1.read() ) 36 >>> X = p2.close() Got this:", "support such a notion. It accepts an optional third argument that can be", "instead of the default “r”, connects the returned object to the spawned program’s", "output b'Got this: \"Help! Help! I\\'m being repressed!\"\\r\\nThe meaning of life is 42", "over the streams of spawned programs, we can employ the subprocess module. module", "we redirect either a spawned program’s output or input streams to a file", "that these tools may be used to tap into input streams as well.", "the shell | command-line pipe syntax for redirecting streams to programs (in fact,", "a DOS or csh prompt) but also provide a Python file-like object connected", "provide a Python file-like object connected to the command’s output stream— reading the", "within a Python program. As we saw, these tools can be used to", "these tools can be used to run a shell command line (a string", "repressed!\" The meaning of life is 42 84 C:\\...\\PP4E\\System\\Streams> python >>> from subprocess", "scripts like these, too, using code like the following: C:\\...\\PP4E\\System\\Streams> python >>> import", "of life is 42 84\\r\\n' >>> p2.returncode 0 We can get close to", "hello-in.py', stdin=PIPE) >>> pipe.stdin.write(b'Pokey\\n') >>> pipe.stdin.close() >>> pipe.wait() >>> open('hello-in.txt').read() # output sent", "the os.popen approach with 'w' file mode >>> pipe = Popen('python hello-in.py', stdin=PIPE)", "(None means “no error” here). C:\\...\\PP4E\\System\\Streams> type hello-out.py print('Hello shell world') C:\\...\\PP4E\\System\\Streams> type", "''' For even more control over the streams of spawned programs, we can", "''' ''' Python scripts can read output from other programs and scripts like", "The meaning of life is 42 84 C:\\...\\PP4E\\System\\Streams> python >>> from subprocess import", "end is optional >>> open('hello-in.txt').read() # output sent to a file The popen", "a shell command line (a string we would normally type at a DOS", "on the spawning end shows up as input in the program started: >>>", "C:\\...\\PP4E\\System\\Streams> type reader.py print('Got this: \"%s\"' % input()) import sys data = sys.stdin.readline()[:-1]", "even more control over the streams of spawned programs, we can employ the", "another, first with shell syntax, and then with the subprocess module: C:\\...\\PP4E\\System\\Streams> python", "csh prompt) but also provide a Python file-like object connected to the command’s", "with os.popen\" ''' In fact, by passing in the desired mode flag, we", "status code from the close method (None means “no error” here). C:\\...\\PP4E\\System\\Streams> type", "world') C:\\...\\PP4E\\System\\Streams> type hello-in.py inp = input() open('hello-in.txt', 'w').write('Hello ' + inp +", "to run a shell command line (a string we would normally type at", "the script itself. ''' \"Redirecting input or output with os.popen\" ''' In fact,", "= Popen('python hello-out.py', stdout=PIPE) >>> pipe.communicate()[0] # (stdout, stderr) >>> pipe.returncode # exit", "exit status. C:\\...\\PP4E\\System\\Streams> python >>> from subprocess import Popen, PIPE, call >>> X", "sys.stdin.readline()[:-1] print('The meaning of life is', data, int(data) * 2) \"\"\" ''' the", "output of one Python script into another, first with shell syntax, and then", "p1 = os.popen('python writer.py', 'r') >>> p2 = os.popen('python reader.py', 'w') >>> p2.write(", "stream. What we write on the spawning end shows up as input in", "stdin=PIPE) >>> pipe.stdin.write(b'Pokey\\n') >>> pipe.stdin.close() >>> pipe.wait() >>> open('hello-in.txt').read() # output sent to", "''' Python scripts can read output from other programs and scripts like these,", "are run within a script and provide a file-like interface to piped streams.", "processed in the spawning script as files (not tied to class objects). These", "import Popen, PIPE, call >>> X = call('python hello-out.py') # convenience >>> X", "all output >>> pipe.wait() # exit status ''' \"\"\" Redirecting and connecting to", "import Popen, PIPE >>> p1 = Popen('python writer.py', stdout=PIPE) >>> p2 = Popen('python", "print('The meaning of life is', data, int(data) * 2) \"\"\" ''' the following", "employ the subprocess module. module can emulate os.popen functionality, but it can also", "os.popen, but that the fact that its pipes are read or write (and", "Because of that, the os.popen and subprocess tools are another way to redirect", "\\n at end is optional >>> open('hello-in.txt').read() # output sent to a file", "to the input of another. this module provides multiple ways to spawn a", "allows a script to read another program’s output. I suggested that these tools", "type hello-out.py print('Hello shell world') C:\\...\\PP4E\\System\\Streams> type hello-in.py inp = input() open('hello-in.txt', 'w').write('Hello", "C:\\...\\PP4E\\System\\Streams> python >>> from subprocess import Popen, PIPE, call >>> X = call('python", "bit more complex than the os.popen approach with 'w' file mode >>> pipe", "output in our code: >>> import os >>> p1 = os.popen('python writer.py', 'r')", "C:\\...\\PP4E\\System\\Streams> type hello-out.py print('Hello shell world') C:\\...\\PP4E\\System\\Streams> type hello-in.py inp = input() open('hello-in.txt',", "input or output with os.popen\" ''' In fact, by passing in the desired", "code like the following: C:\\...\\PP4E\\System\\Streams> python >>> import os >>> pipe = os.popen('python", "The popen call is also smart enough to run the command string as", "convenience >>> X >>> pipe = Popen('python hello-out.py', stdout=PIPE) >>> pipe.communicate()[0] # (stdout,", "file In fact, we can use obtain both the input and output streams", "“pipe open”), but they are run within a script and provide a file-like", "pipe syntax for redirecting streams to programs (in fact, their names mean “pipe", "that its pipes are read or write (and not both) prevents us from", "a Python program. these tools can be used to run a shell command", "like the following: C:\\...\\PP4E\\System\\Streams> python >>> import os >>> pipe = os.popen('python hello-out.py')", "the following connects two programs, by piping the output of one Python script", "a program’s input and output) and tying the output of one program to", "connects two programs, by piping the output of one Python script into another,", "open('hello-in.txt', 'w').write('Hello ' + inp + '\\n') ''' ''' Python scripts can read", "like these, too, using code like the following: C:\\...\\PP4E\\System\\Streams> python >>> import os", ">>> pipe.returncode # exit status >>> pipe = Popen('python hello-out.py', stdout=PIPE) >>> pipe.stdout.read()", "a program that a script starts, instead of redirecting the streams of the", "os.popen\" ''' In fact, by passing in the desired mode flag, we redirect", ">>> pipe.wait() # exit status ''' \"\"\" Redirecting and connecting to the spawned", "reading the file object allows a script to read another program’s output. I", "way to redirect another command’s streams from within a Python program. these tools", "pipe.close() # \\n at end is optional >>> open('hello-in.txt').read() # output sent to", "output sent to a file In fact, we can use obtain both the", "What we write on the spawning end shows up as input in the", "a first look at the built-in os.popen function and its subprocess.Popen relative, which", "that, the os.popen and subprocess tools are another way to redirect streams of", "syntax, and then with the subprocess module: C:\\...\\PP4E\\System\\Streams> python writer.py | python reader.py", "its standard output text and exit status. C:\\...\\PP4E\\System\\Streams> python >>> from subprocess import", "but also provide a Python file-like object connected to the command’s output stream—", "the input and output streams of a spawned program with this module. C:\\...\\PP4E\\System\\Streams>", "writer.py', stdout=PIPE) >>> p2 = Popen('python reader.py', stdin=p1.stdout, stdout=PIPE) >>> output = p2.communicate()[0]", "suggested that these tools may be used to tap into input streams as", "Python scripts can read output from other programs and scripts like these, too,", "programs, by piping the output of one Python script into another, first with", "C:\\...\\PP4E\\System\\Streams> python >>> from subprocess import Popen, PIPE >>> p1 = Popen('python writer.py',", "much like the shell | command-line pipe syntax for redirecting streams to programs", "to the redirect function, but are based on running programs (not calling functions),", "repressed!\"\\r\\nThe meaning of life is 42 84\\r\\n' >>> p2.returncode 0 We can get", "control buffering of written text. \"\"\" \"Redirecting input and output with subprocess\" '''", "close method (None means “no error” here). C:\\...\\PP4E\\System\\Streams> type hello-out.py print('Hello shell world')", "file The popen call is also smart enough to run the command string", "based on running programs (not calling functions), and the command’s streams are processed", "print(42) C:\\...\\PP4E\\System\\Streams> type reader.py print('Got this: \"%s\"' % input()) import sys data =", "standard input streams—passing a “w” mode argument, instead of the default “r”, connects", ">>> pipe.read() print(pipe.close()) # exit status: None is good ''' \"\"\" But Python", "os.popen('python hello-in.py', 'w') # 'w'--write to program stdin >>> pipe.write('Gumby\\n') >>> pipe.close() #", "to a file in the calling scripts, and we can obtain the spawned", "Popen, PIPE >>> p1 = Popen('python writer.py', stdout=PIPE) >>> p2 = Popen('python reader.py',", "we would normally type at a DOS or csh prompt) but also provide", ">>> pipe = os.popen('python hello-out.py') # 'r' is default--read stdout >>> pipe.read() print(pipe.close())", "os.popen function and its subprocess.Popen relative, which provide a way to redirect another", "this module provides multiple ways to spawn a program and get both its", "module. C:\\...\\PP4E\\System\\Streams> type writer.py print(\"Help! Help! I'm being repressed!\") print(42) C:\\...\\PP4E\\System\\Streams> type reader.py", "(in fact, their names mean “pipe open”), but they are run within a", "which provide a way to redirect another command’s streams from within a Python", "# 'w'--write to program stdin >>> pipe.write('Gumby\\n') >>> pipe.close() # \\n at end", "reader.py Got this: \"Help! Help! I'm being repressed!\" The meaning of life is", "streams of spawned programs, we can employ the subprocess module. module can emulate", "output = p2.communicate()[0] >>> output b'Got this: \"Help! Help! I\\'m being repressed!\"\\r\\nThe meaning", "of life is 42 84 C:\\...\\PP4E\\System\\Streams> python >>> from subprocess import Popen, PIPE", "PIPE >>> p1 = Popen('python writer.py', stdout=PIPE) >>> p2 = Popen('python reader.py', stdin=p1.stdout,", "p1.read() ) 36 >>> X = p2.close() Got this: \"Help! Help! I'm being", ">>> p2 = Popen('python reader.py', stdin=p1.stdout, stdout=PIPE) >>> output = p2.communicate()[0] >>> output", "p2.communicate()[0] >>> output b'Got this: \"Help! Help! I\\'m being repressed!\"\\r\\nThe meaning of life", "meaning of life is 42 84 C:\\...\\PP4E\\System\\Streams> python >>> from subprocess import Popen,", "\"\"\" Redirecting and connecting to the spawned program’s input stream is just as", "is good ''' \"\"\" But Python scripts can also provide input to spawned", ">>> X = call('python hello-out.py') # convenience >>> X >>> pipe = Popen('python", "tools can be used to run a shell command line (a string we", "C:\\...\\PP4E\\System\\Streams> type writer.py print(\"Help! Help! I'm being repressed!\") print(42) C:\\...\\PP4E\\System\\Streams> type reader.py print('Got", "print(\"Help! Help! I'm being repressed!\") print(42) C:\\...\\PP4E\\System\\Streams> type reader.py print('Got this: \"%s\"' %", "of the script itself. ''' \"Redirecting input or output with os.popen\" ''' In", "output or input streams to a file in the calling scripts, and we", "program that a script starts, instead of redirecting the streams of the script", "in the calling scripts, and we can obtain the spawned program’s exit status", ">>> output = p2.communicate()[0] >>> output b'Got this: \"Help! Help! I\\'m being repressed!\"\\r\\nThe", "call is also smart enough to run the command string as an independent", "with 'w' file mode >>> pipe = Popen('python hello-in.py', stdin=PIPE) >>> pipe.stdin.write(b'Pokey\\n') >>>", "is 42 84 C:\\...\\PP4E\\System\\Streams> python >>> from subprocess import Popen, PIPE >>> p1", "hello-in.py', 'w') # 'w'--write to program stdin >>> pipe.write('Gumby\\n') >>> pipe.close() # \\n", "file mode >>> pipe = Popen('python hello-in.py', stdin=PIPE) >>> pipe.stdin.write(b'Pokey\\n') >>> pipe.stdin.close() >>>", "Popen('python hello-out.py', stdout=PIPE) >>> pipe.communicate()[0] # (stdout, stderr) >>> pipe.returncode # exit status", "fact, their names mean “pipe open”), but they are run within a script", "pipe = os.popen('python hello-in.py', 'w') # 'w'--write to program stdin >>> pipe.write('Gumby\\n') >>>", "= Popen('python hello-in.py', stdin=PIPE) >>> pipe.stdin.write(b'Pokey\\n') >>> pipe.stdin.close() >>> pipe.wait() >>> open('hello-in.txt').read() #", "status >>> pipe = Popen('python hello-out.py', stdout=PIPE) >>> pipe.stdout.read() # read all output", "status ''' \"\"\" Redirecting and connecting to the spawned program’s input stream is", "more complex than the os.popen approach with 'w' file mode >>> pipe =", "the output of one Python script into another, first with shell syntax, and", "C:\\...\\PP4E\\System\\Streams> type hello-in.py inp = input() open('hello-in.txt', 'w').write('Hello ' + inp + '\\n')", "the streams of a program that a script starts, instead of redirecting the", "being repressed!\"\\r\\nThe meaning of life is 42 84\\r\\n' >>> p2.returncode 0 We can", "“no error” here). C:\\...\\PP4E\\System\\Streams> type hello-out.py print('Hello shell world') C:\\...\\PP4E\\System\\Streams> type hello-in.py inp", "and tying the output of one program to the input of another. this", "Help! I\\'m being repressed!\"\\r\\nThe meaning of life is 42 84\\r\\n' >>> p2.returncode 0", "pipes are read or write (and not both) prevents us from catching the", "the input of another. this module provides multiple ways to spawn a program", "from the close method (None means “no error” here). C:\\...\\PP4E\\System\\Streams> type hello-out.py print('Hello", "can be used to run a shell command line. Other Redirection Options: os.popen", "''' \"\"\" But Python scripts can also provide input to spawned programs’ standard", "os.popen functionality, but it can also achieve feats such as bidirectional stream communication", "life is', data, int(data) * 2) \"\"\" ''' the following connects two programs,", "us from catching the second script’s output in our code: >>> import os", "from subprocess import Popen, PIPE >>> p1 = Popen('python writer.py', stdout=PIPE) >>> p2", "to programs (in fact, their names mean “pipe open”), but they are run", "scripts can also provide input to spawned programs’ standard input streams—passing a “w”", "Popen('python reader.py', stdin=p1.stdout, stdout=PIPE) >>> output = p2.communicate()[0] >>> output b'Got this: \"Help!", ">>> open('hello-in.txt').read() # output sent to a file In fact, we can use", "redirecting the streams of the script itself. ''' \"Redirecting input or output with", ">>> pipe.wait() >>> open('hello-in.txt').read() # output sent to a file In fact, we", "these, too, using code like the following: C:\\...\\PP4E\\System\\Streams> python >>> import os >>>", "our code: >>> import os >>> p1 = os.popen('python writer.py', 'r') >>> p2", "''' In fact, by passing in the desired mode flag, we redirect either", "files (not tied to class objects). These tools redirect the streams of a", "by piping the output of one Python script into another, first with shell", "the preceding chapter, we took a first look at the built-in os.popen function", "command string as an independent process on platforms that support such a notion.", "both its standard output text and exit status. C:\\...\\PP4E\\System\\Streams> python >>> from subprocess", "input and output with subprocess\" ''' For even more control over the streams", "pipe.read() print(pipe.close()) # exit status: None is good ''' \"\"\" But Python scripts", "stdout >>> pipe.read() print(pipe.close()) # exit status: None is good ''' \"\"\" But", "spawning end shows up as input in the program started: >>> pipe =", "script starts, instead of redirecting the streams of the script itself. ''' \"Redirecting", "is optional >>> open('hello-in.txt').read() # output sent to a file The popen call", "redirect another command’s streams from within a Python program. these tools can be", "this: \"%s\"' % input()) import sys data = sys.stdin.readline()[:-1] print('The meaning of life", "| command-line pipe syntax for redirecting streams to programs (in fact, their names", "repressed!\") print(42) C:\\...\\PP4E\\System\\Streams> type reader.py print('Got this: \"%s\"' % input()) import sys data", "Options: os.popen and subprocess Revisited Near the end of the preceding chapter, we", "= os.popen('python reader.py', 'w') >>> p2.write( p1.read() ) 36 >>> X = p2.close()", "the desired mode flag, we redirect either a spawned program’s output or input", "text. \"\"\" \"Redirecting input and output with subprocess\" ''' For even more control", "one program to the input of another. this module provides multiple ways to", "for redirecting streams to programs (in fact, their names mean “pipe open”), but", "Redirection Options: os.popen and subprocess Revisited\" ''' the built-in os.popen function and its", "and subprocess Revisited\" ''' the built-in os.popen function and its subprocess.Popen relative, which", "is', data, int(data) * 2) \"\"\" ''' the following connects two programs, by", "the redirect function, but are based on running programs (not calling functions), and", "the spawned program’s exit status code from the close method (None means “no", "at end is optional >>> open('hello-in.txt').read() # output sent to a file The", "read or write (and not both) prevents us from catching the second script’s", "but it can also achieve feats such as bidirectional stream communication (accessing both", "complex than the os.popen approach with 'w' file mode >>> pipe = Popen('python", "first with shell syntax, and then with the subprocess module: C:\\...\\PP4E\\System\\Streams> python writer.py", "and are close cousins to some of the techniques we just met. Their", "provides multiple ways to spawn a program and get both its standard output", ") 36 >>> X = p2.close() Got this: \"Help! Help! I'm being repressed!\"", "streams from within a Python program. these tools can be used to run", "writer.py print(\"Help! Help! I'm being repressed!\") print(42) C:\\...\\PP4E\\System\\Streams> type reader.py print('Got this: \"%s\"'", "control over the streams of spawned programs, we can employ the subprocess module.", "stream— reading the file object allows a script to read another program’s output.", "on platforms that support such a notion. It accepts an optional third argument", "'w' file mode >>> pipe = Popen('python hello-in.py', stdin=PIPE) >>> pipe.stdin.write(b'Pokey\\n') >>> pipe.stdin.close()", "| python reader.py Got this: \"Help! Help! I'm being repressed!\" The meaning of", "I\\'m being repressed!\"\\r\\nThe meaning of life is 42 84\\r\\n' >>> p2.returncode 0 We", "used to tap into input streams as well. Because of that, the os.popen", "spawned programs, we can employ the subprocess module. module can emulate os.popen functionality,", "a program and get both its standard output text and exit status. C:\\...\\PP4E\\System\\Streams>", "42 84\\r\\n' >>> p2.returncode 0 We can get close to this with os.popen,", "code from the close method (None means “no error” here). C:\\...\\PP4E\\System\\Streams> type hello-out.py", "to the spawned program’s input stream is just as simple, though a bit", "both a program’s input and output) and tying the output of one program", "to redirect another command’s streams from within a Python program. As we saw,", "module provides multiple ways to spawn a program and get both its standard", "like the shell | command-line pipe syntax for redirecting streams to programs (in", "and the command’s streams are processed in the spawning script as files (not", "scripts can read output from other programs and scripts like these, too, using", "mode >>> pipe = Popen('python hello-in.py', stdin=PIPE) >>> pipe.stdin.write(b'Pokey\\n') >>> pipe.stdin.close() >>> pipe.wait()", ">>> from subprocess import Popen, PIPE >>> p1 = Popen('python writer.py', stdout=PIPE) >>>", "also provide a Python file-like object connected to the command’s output stream— reading", "int(data) * 2) \"\"\" ''' the following connects two programs, by piping the", "output of one program to the input of another. this module provides multiple", "'r' is default--read stdout >>> pipe.read() print(pipe.close()) # exit status: None is good", "84 C:\\...\\PP4E\\System\\Streams> python >>> from subprocess import Popen, PIPE >>> p1 = Popen('python", "stdout=PIPE) >>> output = p2.communicate()[0] >>> output b'Got this: \"Help! Help! I\\'m being", "are read or write (and not both) prevents us from catching the second", "passing in the desired mode flag, we redirect either a spawned program’s output", "Options: os.popen and subprocess Revisited\" ''' the built-in os.popen function and its subprocess.Popen", "the file object allows a script to read another program’s output. I suggested", "relative, which provide a way to redirect another command’s streams from within a", "tools may be used to tap into input streams as well. Because of", "such a notion. It accepts an optional third argument that can be used", "= Popen('python reader.py', stdin=p1.stdout, stdout=PIPE) >>> output = p2.communicate()[0] >>> output b'Got this:", "X >>> pipe = Popen('python hello-out.py', stdout=PIPE) >>> pipe.communicate()[0] # (stdout, stderr) >>>", "' + inp + '\\n') ''' ''' Python scripts can read output from", "print('Hello shell world') C:\\...\\PP4E\\System\\Streams> type hello-in.py inp = input() open('hello-in.txt', 'w').write('Hello ' +", "used to control buffering of written text. \"\"\" \"Redirecting input and output with", "python >>> from subprocess import Popen, PIPE, call >>> X = call('python hello-out.py')", "For even more control over the streams of spawned programs, we can employ", "DOS or csh prompt) but also provide a Python file-like object connected to", "are processed in the spawning script as files (not tied to class objects).", "accepts an optional third argument that can be used to control buffering of", "we took a first look at the built-in os.popen function and its subprocess.Popen", "tap into input streams as well. Because of that, the os.popen and subprocess", "way to redirect streams of spawned programs and are close cousins to some", "pipe.returncode # exit status >>> pipe = Popen('python hello-out.py', stdout=PIPE) >>> pipe.stdout.read() #", "status: None is good ''' \"\"\" But Python scripts can also provide input", "of one Python script into another, first with shell syntax, and then with", "a spawned program’s output or input streams to a file in the calling", "streams of a spawned program with this module. C:\\...\\PP4E\\System\\Streams> type writer.py print(\"Help! Help!", "means “no error” here). C:\\...\\PP4E\\System\\Streams> type hello-out.py print('Hello shell world') C:\\...\\PP4E\\System\\Streams> type hello-in.py", "is just as simple, though a bit more complex than the os.popen approach", "input and output streams of a spawned program with this module. C:\\...\\PP4E\\System\\Streams> type", "and connecting to the spawned program’s input stream is just as simple, though", "exit status ''' \"\"\" Redirecting and connecting to the spawned program’s input stream", "pipe.wait() >>> open('hello-in.txt').read() # output sent to a file In fact, we can", "Other Redirection Options: os.popen and subprocess Revisited Near the end of the preceding", "streams of a program that a script starts, instead of redirecting the streams", "an independent process on platforms that support such a notion. It accepts an", "program’s output or input streams to a file in the calling scripts, and", "prevents us from catching the second script’s output in our code: >>> import", "instead of redirecting the streams of the script itself. ''' \"Redirecting input or", "end shows up as input in the program started: >>> pipe = os.popen('python", "to program stdin >>> pipe.write('Gumby\\n') >>> pipe.close() # \\n at end is optional", "provide a way to redirect another command’s streams from within a Python program.", "to tap into input streams as well. Because of that, the os.popen and", "using code like the following: C:\\...\\PP4E\\System\\Streams> python >>> import os >>> pipe =", "that the fact that its pipes are read or write (and not both)", "of written text. \"\"\" \"Redirecting input and output with subprocess\" ''' For even", "stream communication (accessing both a program’s input and output) and tying the output", "tying the output of one program to the input of another. this module", "close to this with os.popen, but that the fact that its pipes are", "= call('python hello-out.py') # convenience >>> X >>> pipe = Popen('python hello-out.py', stdout=PIPE)", "end of the preceding chapter, we took a first look at the built-in", "is default--read stdout >>> pipe.read() print(pipe.close()) # exit status: None is good '''", "\"Redirecting input or output with os.popen\" ''' In fact, by passing in the", "subprocess Revisited Near the end of the preceding chapter, we took a first", "= os.popen('python hello-in.py', 'w') # 'w'--write to program stdin >>> pipe.write('Gumby\\n') >>> pipe.close()", "We can get close to this with os.popen, but that the fact that", "reader.py', stdin=p1.stdout, stdout=PIPE) >>> output = p2.communicate()[0] >>> output b'Got this: \"Help! Help!", ">>> X = p2.close() Got this: \"Help! Help! I'm being repressed!\" The meaning", "''' \"\"\" Redirecting and connecting to the spawned program’s input stream is just", ">>> pipe.write('Gumby\\n') >>> pipe.close() # \\n at end is optional >>> open('hello-in.txt').read() #", "script itself. ''' \"Redirecting input or output with os.popen\" ''' In fact, by", "the command’s streams are processed in the spawning script as files (not tied", "program’s output. I suggested that these tools may be used to tap into", "p2 = os.popen('python reader.py', 'w') >>> p2.write( p1.read() ) 36 >>> X =", "shell command line (a string we would normally type at a DOS or", "output with subprocess\" ''' For even more control over the streams of spawned", "open('hello-in.txt').read() # output sent to a file The popen call is also smart", "pipe = Popen('python hello-in.py', stdin=PIPE) >>> pipe.stdin.write(b'Pokey\\n') >>> pipe.stdin.close() >>> pipe.wait() >>> open('hello-in.txt').read()", "object connected to the command’s output stream— reading the file object allows a", "the calling scripts, and we can obtain the spawned program’s exit status code", "shell command line. Other Redirection Options: os.popen and subprocess Revisited Near the end", "spawned program’s output or input streams to a file in the calling scripts,", "met. Their effect is much like the shell | command-line pipe syntax for", "popen call is also smart enough to run the command string as an", ">>> import os >>> p1 = os.popen('python writer.py', 'r') >>> p2 = os.popen('python", "are close cousins to some of the techniques we just met. Their effect", "writer.py', 'r') >>> p2 = os.popen('python reader.py', 'w') >>> p2.write( p1.read() ) 36", "print(pipe.close()) # exit status: None is good ''' \"\"\" But Python scripts can", "these tools may be used to tap into input streams as well. Because", "(accessing both a program’s input and output) and tying the output of one", "get close to this with os.popen, but that the fact that its pipes", "feats such as bidirectional stream communication (accessing both a program’s input and output)", "to redirect another command’s streams from within a Python program. these tools can", "enough to run the command string as an independent process on platforms that", "b'Got this: \"Help! Help! I\\'m being repressed!\"\\r\\nThe meaning of life is 42 84\\r\\n'", "with subprocess\" ''' For even more control over the streams of spawned programs,", "text and exit status. C:\\...\\PP4E\\System\\Streams> python >>> from subprocess import Popen, PIPE, call", "platforms that support such a notion. It accepts an optional third argument that", "smart enough to run the command string as an independent process on platforms", "written text. \"\"\" \"Redirecting input and output with subprocess\" ''' For even more", "a script and provide a file-like interface to piped streams. They are similar", "call >>> X = call('python hello-out.py') # convenience >>> X >>> pipe =", "X = p2.close() Got this: \"Help! Help! I'm being repressed!\" The meaning of", "= Popen('python writer.py', stdout=PIPE) >>> p2 = Popen('python reader.py', stdin=p1.stdout, stdout=PIPE) >>> output", "from within a Python program. As we saw, these tools can be used", "class objects). These tools redirect the streams of a program that a script", "program’s input stream. What we write on the spawning end shows up as", "“w” mode argument, instead of the default “r”, connects the returned object to", "standard output text and exit status. C:\\...\\PP4E\\System\\Streams> python >>> from subprocess import Popen,", "we write on the spawning end shows up as input in the program", "redirect another command’s streams from within a Python program. As we saw, these", "type reader.py print('Got this: \"%s\"' % input()) import sys data = sys.stdin.readline()[:-1] print('The", "ways to spawn a program and get both its standard output text and", "other programs and scripts like these, too, using code like the following: C:\\...\\PP4E\\System\\Streams>", "a script starts, instead of redirecting the streams of the script itself. '''", "command-line pipe syntax for redirecting streams to programs (in fact, their names mean", "os.popen and subprocess Revisited Near the end of the preceding chapter, we took", "object allows a script to read another program’s output. I suggested that these", "method (None means “no error” here). C:\\...\\PP4E\\System\\Streams> type hello-out.py print('Hello shell world') C:\\...\\PP4E\\System\\Streams>", "chapter, we took a first look at the built-in os.popen function and its", "independent process on platforms that support such a notion. It accepts an optional", "we can use obtain both the input and output streams of a spawned", "= p2.close() Got this: \"Help! Help! I'm being repressed!\" The meaning of life", ">>> import os >>> pipe = os.popen('python hello-out.py') # 'r' is default--read stdout", "PIPE, call >>> X = call('python hello-out.py') # convenience >>> X >>> pipe", "default “r”, connects the returned object to the spawned program’s input stream. What", "It accepts an optional third argument that can be used to control buffering", "the spawned program’s input stream. What we write on the spawning end shows", "36 >>> X = p2.close() Got this: \"Help! Help! I'm being repressed!\" The", "its subprocess.Popen relative, which provide a way to redirect another command’s streams from", "a file in the calling scripts, and we can obtain the spawned program’s", "this: \"Help! Help! I'm being repressed!\" The meaning of life is 42 84", "streams to a file in the calling scripts, and we can obtain the", "pipe = os.popen('python hello-out.py') # 'r' is default--read stdout >>> pipe.read() print(pipe.close()) #", "be used to run a shell command line (a string we would normally", "returned object to the spawned program’s input stream. What we write on the", "up as input in the program started: >>> pipe = os.popen('python hello-in.py', 'w')", "write on the spawning end shows up as input in the program started:", "a notion. It accepts an optional third argument that can be used to", "just as simple, though a bit more complex than the os.popen approach with", "\"\"\" ''' the following connects two programs, by piping the output of one", "# (stdout, stderr) >>> pipe.returncode # exit status >>> pipe = Popen('python hello-out.py',", "redirecting streams to programs (in fact, their names mean “pipe open”), but they", "programs (in fact, their names mean “pipe open”), but they are run within", "names mean “pipe open”), but they are run within a script and provide", ">>> X >>> pipe = Popen('python hello-out.py', stdout=PIPE) >>> pipe.communicate()[0] # (stdout, stderr)", "exit status: None is good ''' \"\"\" But Python scripts can also provide", "(not tied to class objects). These tools redirect the streams of a program", "p2.write( p1.read() ) 36 >>> X = p2.close() Got this: \"Help! Help! I'm", "be used to run a shell command line. Other Redirection Options: os.popen and", "such as bidirectional stream communication (accessing both a program’s input and output) and" ]
[ "else: break images = np.vstack(images) y_true = np.concatenate(y_true) y_pred = np.concatenate(y_pred) y_true =", "np.argmax(y_pred, axis=1) true_taxonID = [self.label_names[x] for x in y_true] pred_taxonID = [self.label_names[x] for", "Args: experiment: a comet experiment object train_data: a tf data object to generate", "F1Callback(experiment=experiment, y_true=y_true, eval_dataset=validation_data, label_names=label_names, submodel=submodel, train_shp=train_shp) callback_list.append(f1) #if submodel is None: #plot_images =", "domain_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=domain_lists) self.experiment.log_metric(name = \"Within_domain confusion[training]\",", "[self.label_names[x] for x in y_true] pred_taxonID = [self.label_names[x] for x in y_pred] counter", "callback_list.append(confusion_matrix) f1 = F1Callback(experiment=experiment, y_true=y_true, eval_dataset=validation_data, label_names=label_names, submodel=submodel, train_shp=train_shp) callback_list.append(f1) #if submodel is", "with labels annotated\"\"\" #fill until there is atleast 20 images images = []", "in self.dataset: if num_images < limit: pred = self.model.predict(data) images.append(data) if self.submodel: y_pred.append(pred[0])", "predictions to make sure its constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) class ConfusionMatrixCallback(Callback): def __init__(self, experiment,", "validation_data: if submodel in [\"spatial\",\"spectral\"]: label = label[0] y_true.append(label) y_true = np.concatenate(y_true) if", "[\"spectral\",\"spatial\"]: y_pred = y_pred[0] #F1 macro, micro = metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"MicroF1\", micro) self.experiment.log_metric(\"MacroF1\",", "macro, micro = metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"MicroF1\", micro) self.experiment.log_metric(\"MacroF1\", macro) #Log number of predictions", "zip(true_taxonID, pred_taxonID, images): figure = visualize.plot_prediction(image=image, prediction=prediction, label=label) self.experiment.log_figure(figure_name=\"{}_{}\".format(label, counter)) counter += 1", "generate data validation_data: a tf data object to generate data train_shp: the original", "label_names self.submodel = submodel def on_train_end(self, epoch, logs={}): \"\"\"Plot sample images with labels", "pandas as pd from datetime import datetime from DeepTreeAttention.utils import metrics from DeepTreeAttention.visualization", "x in y_true] pred_taxonID = [self.label_names[x] for x in y_pred] counter = 0", "the first scientific_dict = self.train_shp.groupby('taxonID')['scientific'].apply(lambda x: x.head(1).values.tolist()).to_dict() genus_confusion = metrics.genus_confusion(y_true = results.true_taxonID, y_pred", "y_pred = results.predicted_taxonID, site_lists=domain_lists) self.experiment.log_metric(name = \"Within_domain confusion[training]\", value = domain_confusion) #Genus of", "submodel self.n = n self.train_shp = train_shp self.y_true = y_true def on_train_end(self, logs={}):", "macro) #Log number of predictions to make sure its constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) results", "label, prediction, image in zip(true_taxonID, pred_taxonID, images): figure = visualize.plot_prediction(image=image, prediction=prediction, label=label) self.experiment.log_figure(figure_name=\"{}_{}\".format(label,", "+= 1 def create(experiment, train_data, validation_data, train_shp, log_dir=None, label_names=None, submodel=False): \"\"\"Create a set", "object to generate data train_shp: the original shapefile for the train data to", "y_pred = y_pred[0] #F1 macro, micro = metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"Final MicroF1\", micro) self.experiment.log_metric(\"Final", "def __init__(self, experiment, dataset, label_names, y_true, submodel): self.experiment = experiment self.dataset = dataset", "y_pred = results.predicted_taxonID, site_lists=site_lists) self.experiment.log_metric(name = \"Within_site confusion[training]\", value = site_confusion) plot_lists =", "= ImageCallback(experiment, validation_data, label_names, submodel=submodel) #callback_list.append(plot_images) if log_dir is not None: print(\"saving tensorboard", "import ReduceLROnPlateau from tensorflow.keras.callbacks import Callback, TensorBoard from tensorflow import expand_dims class F1Callback(Callback):", "confused most_confused = results.groupby([\"true_taxonID\",\"predicted_taxonID\"]).size().reset_index(name=\"count\") most_confused = most_confused[~(most_confused.true_taxonID == most_confused.predicted_taxonID)].sort_values(\"count\", ascending=False) self.experiment.log_table(\"most_confused.csv\",most_confused.values) def on_epoch_end(self,", "tf data object to generate data validation_data: a tf data object to generate", "results.predicted.apply(lambda x: self.label_names[x]) #Within site confusion site_lists = self.train_shp.groupby(\"taxonID\").siteID.unique() site_confusion = metrics.site_confusion(y_true =", "self.experiment.log_metric(name = \"Within_domain confusion[training]\", value = domain_confusion) #Genus of all the different taxonID", "x: self.label_names[x]) results[\"predicted_taxonID\"] = results.predicted.apply(lambda x: self.label_names[x]) #Within site confusion site_lists = self.train_shp.groupby(\"taxonID\").siteID.unique()", "site and species matrix y_pred = self.model.predict(self.eval_dataset) if self.submodel in [\"spectral\",\"spatial\"]: y_pred =", "site confusion site_lists = self.train_shp.groupby(\"taxonID\").siteID.unique() site_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID,", "= \"Within Genus confusion\", value = genus_confusion) #Most confused most_confused = results.groupby([\"true_taxonID\",\"predicted_taxonID\"]).size().reset_index(name=\"count\") most_confused", "to make sure its constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) class ConfusionMatrixCallback(Callback): def __init__(self, experiment, dataset,", "self.dataset: if num_images < limit: pred = self.model.predict(data) images.append(data) if self.submodel: y_pred.append(pred[0]) y_true.append(label[0])", "self.model.predict(data) images.append(data) if self.submodel: y_pred.append(pred[0]) y_true.append(label[0]) else: y_pred.append(pred) y_true.append(label) num_images += label.shape[0] else:", "= np.concatenate(y_pred) y_true = np.argmax(y_true, axis=1) y_pred = np.argmax(y_pred, axis=1) true_taxonID = [self.label_names[x]", "train_data, validation_data, train_shp, log_dir=None, label_names=None, submodel=False): \"\"\"Create a set of callbacks Args: experiment:", "for metadata callback_list = [] reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, min_delta=0.1, min_lr=0.00001, verbose=1)", "y_true.append(label) y_true = np.concatenate(y_true) if not submodel in [\"spatial\",\"spectral\"]: confusion_matrix = ConfusionMatrixCallback(experiment=experiment, y_true=y_true,", "labels since they are not shuffled y_true = [ ] for data, label", "epochs y_true: instead of iterating through the dataset every time, just do it", "self.experiment = experiment self.dataset = dataset self.label_names = label_names self.submodel = submodel self.y_true", "= dataset self.label_names = label_names self.submodel = submodel def on_train_end(self, epoch, logs={}): \"\"\"Plot", "callback_list.append(f1) #if submodel is None: #plot_images = ImageCallback(experiment, validation_data, label_names, submodel=submodel) #callback_list.append(plot_images) if", "metrics.genus_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, scientific_dict=scientific_dict) self.experiment.log_metric(name = \"Within Genus confusion\", value", "= results.predicted_taxonID, scientific_dict=scientific_dict) self.experiment.log_metric(name = \"Within Genus confusion\", value = genus_confusion) #Most confused", "experiment, dataset, label_names, submodel=False): self.experiment = experiment self.dataset = dataset self.label_names = label_names", "] for data, label in validation_data: if submodel in [\"spatial\",\"spectral\"]: label = label[0]", "None: #plot_images = ImageCallback(experiment, validation_data, label_names, submodel=submodel) #callback_list.append(plot_images) if log_dir is not None:", "a set of callbacks Args: experiment: a comet experiment object train_data: a tf", "= y_true def on_train_end(self, logs={}): y_pred = [] sites = [] #gather site", "def on_train_end(self, logs={}): y_pred = [] sites = [] #gather site and species", "#F1 macro, micro = metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"Final MicroF1\", micro) self.experiment.log_metric(\"Final MacroF1\", macro) #Log", "confusion site_lists = self.train_shp.groupby(\"taxonID\").siteID.unique() site_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=site_lists)", "submodel=submodel) #callback_list.append(plot_images) if log_dir is not None: print(\"saving tensorboard logs at {}\".format(log_dir)) tensorboard", "will run every 4 epochs y_true: instead of iterating through the dataset every", "results = pd.DataFrame({\"true\":np.argmax(self.y_true, 1),\"predicted\":np.argmax(y_pred, 1)}) #assign labels if self.label_names: results[\"true_taxonID\"] = results.true.apply(lambda x:", "label_names=label_names, submodel=submodel) callback_list.append(confusion_matrix) f1 = F1Callback(experiment=experiment, y_true=y_true, eval_dataset=validation_data, label_names=label_names, submodel=submodel, train_shp=train_shp) callback_list.append(f1) #if", "self.label_names: results[\"true_taxonID\"] = results.true.apply(lambda x: self.label_names[x]) results[\"predicted_taxonID\"] = results.predicted.apply(lambda x: self.label_names[x]) #Within site", "its constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) class ConfusionMatrixCallback(Callback): def __init__(self, experiment, dataset, label_names, y_true, submodel):", "os import numpy as np import pandas as pd from datetime import datetime", "site_confusion) plot_lists = self.train_shp.groupby(\"taxonID\").plotID.unique() plot_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=plot_lists)", "site_lists=domain_lists) self.experiment.log_metric(name = \"Within_domain confusion[training]\", value = domain_confusion) #Genus of all the different", "results.true.apply(lambda x: self.label_names[x]) results[\"predicted_taxonID\"] = results.predicted.apply(lambda x: self.label_names[x]) #Within site confusion site_lists =", "y_pred, title=name, file_name= name, labels=self.label_names, max_categories=90, max_example_per_cell=1) class ImageCallback(Callback): def __init__(self, experiment, dataset,", "variants should be the same, take the first scientific_dict = self.train_shp.groupby('taxonID')['scientific'].apply(lambda x: x.head(1).values.tolist()).to_dict()", "= [] y_true = [] limit = 20 num_images = 0 for data,", "self.n = n self.train_shp = train_shp self.y_true = y_true def on_train_end(self, logs={}): y_pred", "if log_dir is not None: print(\"saving tensorboard logs at {}\".format(log_dir)) tensorboard = TensorBoard(log_dir=log_dir,", "tensorflow import expand_dims class F1Callback(Callback): def __init__(self, experiment, eval_dataset, y_true, label_names, submodel, train_shp,", "results[\"predicted_taxonID\"] = results.predicted.apply(lambda x: self.label_names[x]) #Within site confusion site_lists = self.train_shp.groupby(\"taxonID\").siteID.unique() site_confusion =", "= self.train_shp.groupby(\"taxonID\").plotID.unique() plot_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=plot_lists) self.experiment.log_metric(name =", "limit: pred = self.model.predict(data) images.append(data) if self.submodel: y_pred.append(pred[0]) y_true.append(label[0]) else: y_pred.append(pred) y_true.append(label) num_images", "epoch, logs={}): y_pred = self.model.predict(self.dataset) if self.submodel is \"metadata\": name = \"Metadata Confusion", "submodel in [\"spatial\",\"spectral\"]: confusion_matrix = ConfusionMatrixCallback(experiment=experiment, y_true=y_true, dataset=validation_data, label_names=label_names, submodel=submodel) callback_list.append(confusion_matrix) f1 =", "#Get the true labels since they are not shuffled y_true = [ ]", "results.groupby([\"true_taxonID\",\"predicted_taxonID\"]).size().reset_index(name=\"count\") most_confused = most_confused[~(most_confused.true_taxonID == most_confused.predicted_taxonID)].sort_values(\"count\", ascending=False) self.experiment.log_table(\"most_confused.csv\",most_confused.values) def on_epoch_end(self, epoch, logs={}): if", "counter = 0 for label, prediction, image in zip(true_taxonID, pred_taxonID, images): figure =", "of predictions to make sure its constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) class ConfusionMatrixCallback(Callback): def __init__(self,", "ascending=False) self.experiment.log_table(\"most_confused.csv\",most_confused.values) def on_epoch_end(self, epoch, logs={}): if not epoch % self.n == 0:", "[] #gather site and species matrix y_pred = self.model.predict(self.eval_dataset) if self.submodel in [\"spectral\",\"spatial\"]:", "images with labels annotated\"\"\" #fill until there is atleast 20 images images =", "predictions to make sure its constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) results = pd.DataFrame({\"true\":np.argmax(self.y_true, 1),\"predicted\":np.argmax(y_pred, 1)})", "else: y_pred.append(pred) y_true.append(label) num_images += label.shape[0] else: break images = np.vstack(images) y_true =", "in [\"ensemble\"]: name = \"Ensemble Matrix\" else: name = \"Confusion Matrix\" cm =", "= metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"Final MicroF1\", micro) self.experiment.log_metric(\"Final MacroF1\", macro) #Log number of predictions", "to the function \"\"\" self.experiment = experiment self.eval_dataset = eval_dataset self.label_names = label_names", "#if submodel is None: #plot_images = ImageCallback(experiment, validation_data, label_names, submodel=submodel) #callback_list.append(plot_images) if log_dir", "callbacks\"\"\" import os import numpy as np import pandas as pd from datetime", "labels if self.label_names: results[\"true_taxonID\"] = results.true.apply(lambda x: self.label_names[x]) results[\"predicted_taxonID\"] = results.predicted.apply(lambda x: self.label_names[x])", "ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, min_delta=0.1, min_lr=0.00001, verbose=1) callback_list.append(reduce_lr) #Get the true labels since they", "= genus_confusion) #Most confused most_confused = results.groupby([\"true_taxonID\",\"predicted_taxonID\"]).size().reset_index(name=\"count\") most_confused = most_confused[~(most_confused.true_taxonID == most_confused.predicted_taxonID)].sort_values(\"count\", ascending=False)", "label = label[0] y_true.append(label) y_true = np.concatenate(y_true) if not submodel in [\"spatial\",\"spectral\"]: confusion_matrix", "self.submodel in [\"spectral\",\"spatial\"]: y_pred = y_pred[0] #F1 macro, micro = metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"MicroF1\",", "function will run every 4 epochs y_true: instead of iterating through the dataset", "self.label_names = label_names self.submodel = submodel self.n = n self.train_shp = train_shp self.y_true", "self.experiment.log_metric(name = \"Within_plot confusion[training]\", value = plot_confusion) domain_lists = self.train_shp.groupby(\"taxonID\").domainID.unique() domain_confusion = metrics.site_confusion(y_true", "constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) class ConfusionMatrixCallback(Callback): def __init__(self, experiment, dataset, label_names, y_true, submodel): self.experiment", "np.concatenate(y_true) if not submodel in [\"spatial\",\"spectral\"]: confusion_matrix = ConfusionMatrixCallback(experiment=experiment, y_true=y_true, dataset=validation_data, label_names=label_names, submodel=submodel)", "samples\",y_pred.shape[0]) results = pd.DataFrame({\"true\":np.argmax(self.y_true, 1),\"predicted\":np.argmax(y_pred, 1)}) #assign labels if self.label_names: results[\"true_taxonID\"] = results.true.apply(lambda", "site_lists=plot_lists) self.experiment.log_metric(name = \"Within_plot confusion[training]\", value = plot_confusion) domain_lists = self.train_shp.groupby(\"taxonID\").domainID.unique() domain_confusion =", "print(\"saving tensorboard logs at {}\".format(log_dir)) tensorboard = TensorBoard(log_dir=log_dir, histogram_freq=0, profile_batch=30) callback_list.append(tensorboard) return callback_list", "results.predicted_taxonID, site_lists=plot_lists) self.experiment.log_metric(name = \"Within_plot confusion[training]\", value = plot_confusion) domain_lists = self.train_shp.groupby(\"taxonID\").domainID.unique() domain_confusion", "= submodel self.y_true = y_true def on_train_end(self, epoch, logs={}): y_pred = self.model.predict(self.dataset) if", "class F1Callback(Callback): def __init__(self, experiment, eval_dataset, y_true, label_names, submodel, train_shp, n=10): \"\"\"F1 callback", "import datetime from DeepTreeAttention.utils import metrics from DeepTreeAttention.visualization import visualize from tensorflow.keras.callbacks import", "= pd.DataFrame({\"true\":np.argmax(self.y_true, 1),\"predicted\":np.argmax(y_pred, 1)}) #assign labels if self.label_names: results[\"true_taxonID\"] = results.true.apply(lambda x: self.label_names[x])", "max_example_per_cell=1) class ImageCallback(Callback): def __init__(self, experiment, dataset, label_names, submodel=False): self.experiment = experiment self.dataset", "self.experiment.log_confusion_matrix( self.y_true, y_pred, title=name, file_name= name, labels=self.label_names, max_categories=90, max_example_per_cell=1) class ImageCallback(Callback): def __init__(self,", "y_true] pred_taxonID = [self.label_names[x] for x in y_pred] counter = 0 for label,", "experiment, dataset, label_names, y_true, submodel): self.experiment = experiment self.dataset = dataset self.label_names =", "4 epochs y_true: instead of iterating through the dataset every time, just do", "\"\"\"F1 callback Args: n: number of epochs to run. If n=4, function will", "num_images += label.shape[0] else: break images = np.vstack(images) y_true = np.concatenate(y_true) y_pred =", "numpy as np import pandas as pd from datetime import datetime from DeepTreeAttention.utils", "y_pred) self.experiment.log_metric(\"MicroF1\", micro) self.experiment.log_metric(\"MacroF1\", macro) #Log number of predictions to make sure its", "to generate data train_shp: the original shapefile for the train data to check", "= results.predicted_taxonID, site_lists=domain_lists) self.experiment.log_metric(name = \"Within_domain confusion[training]\", value = domain_confusion) #Genus of all", "of epochs to run. If n=4, function will run every 4 epochs y_true:", "value = plot_confusion) domain_lists = self.train_shp.groupby(\"taxonID\").domainID.unique() domain_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred =", "label[0] y_true.append(label) y_true = np.concatenate(y_true) if not submodel in [\"spatial\",\"spectral\"]: confusion_matrix = ConfusionMatrixCallback(experiment=experiment,", "if not submodel in [\"spatial\",\"spectral\"]: confusion_matrix = ConfusionMatrixCallback(experiment=experiment, y_true=y_true, dataset=validation_data, label_names=label_names, submodel=submodel) callback_list.append(confusion_matrix)", "pd from datetime import datetime from DeepTreeAttention.utils import metrics from DeepTreeAttention.visualization import visualize", "labels=self.label_names, max_categories=90, max_example_per_cell=1) class ImageCallback(Callback): def __init__(self, experiment, dataset, label_names, submodel=False): self.experiment =", "are not shuffled y_true = [ ] for data, label in validation_data: if", "self.experiment = experiment self.eval_dataset = eval_dataset self.label_names = label_names self.submodel = submodel self.n", "the dataset every time, just do it once and pass the true labels", "max_categories=90, max_example_per_cell=1) class ImageCallback(Callback): def __init__(self, experiment, dataset, label_names, submodel=False): self.experiment = experiment", "< limit: pred = self.model.predict(data) images.append(data) if self.submodel: y_pred.append(pred[0]) y_true.append(label[0]) else: y_pred.append(pred) y_true.append(label)", "verbose=1) callback_list.append(reduce_lr) #Get the true labels since they are not shuffled y_true =", "counter)) counter += 1 def create(experiment, train_data, validation_data, train_shp, log_dir=None, label_names=None, submodel=False): \"\"\"Create", "\"Metadata Confusion Matrix\" elif self.submodel in [\"ensemble\"]: name = \"Ensemble Matrix\" else: name", "return None y_pred = [] sites = [] #gather site and species matrix", "\"Confusion Matrix\" cm = self.experiment.log_confusion_matrix( self.y_true, y_pred, title=name, file_name= name, labels=self.label_names, max_categories=90, max_example_per_cell=1)", "and pass the true labels to the function \"\"\" self.experiment = experiment self.eval_dataset", "callbacks Args: experiment: a comet experiment object train_data: a tf data object to", "metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=domain_lists) self.experiment.log_metric(name = \"Within_domain confusion[training]\", value =", "self.y_true, y_pred, title=name, file_name= name, labels=self.label_names, max_categories=90, max_example_per_cell=1) class ImageCallback(Callback): def __init__(self, experiment,", "= 0 for data, label in self.dataset: if num_images < limit: pred =", "sure its constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) results = pd.DataFrame({\"true\":np.argmax(self.y_true, 1),\"predicted\":np.argmax(y_pred, 1)}) #assign labels if", "on_epoch_end(self, epoch, logs={}): if not epoch % self.n == 0: return None y_pred", "constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) results = pd.DataFrame({\"true\":np.argmax(self.y_true, 1),\"predicted\":np.argmax(y_pred, 1)}) #assign labels if self.label_names: results[\"true_taxonID\"]", "iterating through the dataset every time, just do it once and pass the", "= \"Within_site confusion[training]\", value = site_confusion) plot_lists = self.train_shp.groupby(\"taxonID\").plotID.unique() plot_confusion = metrics.site_confusion(y_true =", "not epoch % self.n == 0: return None y_pred = [] sites =", "sure its constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) class ConfusionMatrixCallback(Callback): def __init__(self, experiment, dataset, label_names, y_true,", "self.experiment.log_metric(\"Final MacroF1\", macro) #Log number of predictions to make sure its constant self.experiment.log_metric(\"Prediction", "as np import pandas as pd from datetime import datetime from DeepTreeAttention.utils import", "y_pred[0] #F1 macro, micro = metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"Final MicroF1\", micro) self.experiment.log_metric(\"Final MacroF1\", macro)", "import metrics from DeepTreeAttention.visualization import visualize from tensorflow.keras.callbacks import ReduceLROnPlateau from tensorflow.keras.callbacks import", "= train_shp self.y_true = y_true def on_train_end(self, logs={}): y_pred = [] sites =", "= results.predicted_taxonID, site_lists=site_lists) self.experiment.log_metric(name = \"Within_site confusion[training]\", value = site_confusion) plot_lists = self.train_shp.groupby(\"taxonID\").plotID.unique()", "= [] limit = 20 num_images = 0 for data, label in self.dataset:", "epochs to run. If n=4, function will run every 4 epochs y_true: instead", "experiment object train_data: a tf data object to generate data validation_data: a tf", "[\"spectral\",\"spatial\"]: y_pred = y_pred[0] #F1 macro, micro = metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"Final MicroF1\", micro)", "label in validation_data: if submodel in [\"spatial\",\"spectral\"]: label = label[0] y_true.append(label) y_true =", "factor=0.5, patience=10, min_delta=0.1, min_lr=0.00001, verbose=1) callback_list.append(reduce_lr) #Get the true labels since they are", "= \"Confusion Matrix\" cm = self.experiment.log_confusion_matrix( self.y_true, y_pred, title=name, file_name= name, labels=self.label_names, max_categories=90,", "name, labels=self.label_names, max_categories=90, max_example_per_cell=1) class ImageCallback(Callback): def __init__(self, experiment, dataset, label_names, submodel=False): self.experiment", "ReduceLROnPlateau from tensorflow.keras.callbacks import Callback, TensorBoard from tensorflow import expand_dims class F1Callback(Callback): def", "in [\"spatial\",\"spectral\"]: confusion_matrix = ConfusionMatrixCallback(experiment=experiment, y_true=y_true, dataset=validation_data, label_names=label_names, submodel=submodel) callback_list.append(confusion_matrix) f1 = F1Callback(experiment=experiment,", "data to check site error \"\"\" #turn off callbacks for metadata callback_list =", "images.append(data) if self.submodel: y_pred.append(pred[0]) y_true.append(label[0]) else: y_pred.append(pred) y_true.append(label) num_images += label.shape[0] else: break", "data validation_data: a tf data object to generate data train_shp: the original shapefile", "sample images with labels annotated\"\"\" #fill until there is atleast 20 images images", "#plot_images = ImageCallback(experiment, validation_data, label_names, submodel=submodel) #callback_list.append(plot_images) if log_dir is not None: print(\"saving", "prediction, image in zip(true_taxonID, pred_taxonID, images): figure = visualize.plot_prediction(image=image, prediction=prediction, label=label) self.experiment.log_figure(figure_name=\"{}_{}\".format(label, counter))", "#Log number of predictions to make sure its constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) results =", "= [] y_pred = [] y_true = [] limit = 20 num_images =", "Matrix\" elif self.submodel in [\"ensemble\"]: name = \"Ensemble Matrix\" else: name = \"Confusion", "name = \"Ensemble Matrix\" else: name = \"Confusion Matrix\" cm = self.experiment.log_confusion_matrix( self.y_true,", "n self.train_shp = train_shp self.y_true = y_true def on_train_end(self, logs={}): y_pred = []", "20 num_images = 0 for data, label in self.dataset: if num_images < limit:", "most_confused = most_confused[~(most_confused.true_taxonID == most_confused.predicted_taxonID)].sort_values(\"count\", ascending=False) self.experiment.log_table(\"most_confused.csv\",most_confused.values) def on_epoch_end(self, epoch, logs={}): if not", "self.label_names[x]) #Within site confusion site_lists = self.train_shp.groupby(\"taxonID\").siteID.unique() site_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred", "self.submodel = submodel def on_train_end(self, epoch, logs={}): \"\"\"Plot sample images with labels annotated\"\"\"", "samples\",y_pred.shape[0]) class ConfusionMatrixCallback(Callback): def __init__(self, experiment, dataset, label_names, y_true, submodel): self.experiment = experiment", "= results.predicted_taxonID, site_lists=plot_lists) self.experiment.log_metric(name = \"Within_plot confusion[training]\", value = plot_confusion) domain_lists = self.train_shp.groupby(\"taxonID\").domainID.unique()", "pass the true labels to the function \"\"\" self.experiment = experiment self.eval_dataset =", "the true labels to the function \"\"\" self.experiment = experiment self.eval_dataset = eval_dataset", "submodel=False): self.experiment = experiment self.dataset = dataset self.label_names = label_names self.submodel = submodel", "callback Args: n: number of epochs to run. If n=4, function will run", "= results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=plot_lists) self.experiment.log_metric(name = \"Within_plot confusion[training]\", value = plot_confusion)", "original shapefile for the train data to check site error \"\"\" #turn off", "same, take the first scientific_dict = self.train_shp.groupby('taxonID')['scientific'].apply(lambda x: x.head(1).values.tolist()).to_dict() genus_confusion = metrics.genus_confusion(y_true =", "#Within site confusion site_lists = self.train_shp.groupby(\"taxonID\").siteID.unique() site_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred =", "submodel=False): \"\"\"Create a set of callbacks Args: experiment: a comet experiment object train_data:", "def __init__(self, experiment, dataset, label_names, submodel=False): self.experiment = experiment self.dataset = dataset self.label_names", "counter += 1 def create(experiment, train_data, validation_data, train_shp, log_dir=None, label_names=None, submodel=False): \"\"\"Create a", "eval_dataset self.label_names = label_names self.submodel = submodel self.n = n self.train_shp = train_shp", "self.experiment.log_metric(name = \"Within Genus confusion\", value = genus_confusion) #Most confused most_confused = results.groupby([\"true_taxonID\",\"predicted_taxonID\"]).size().reset_index(name=\"count\")", "n: number of epochs to run. If n=4, function will run every 4", "self.experiment.log_metric(\"MicroF1\", micro) self.experiment.log_metric(\"MacroF1\", macro) #Log number of predictions to make sure its constant", "0 for label, prediction, image in zip(true_taxonID, pred_taxonID, images): figure = visualize.plot_prediction(image=image, prediction=prediction,", "train_data: a tf data object to generate data validation_data: a tf data object", "[] limit = 20 num_images = 0 for data, label in self.dataset: if", "= submodel self.n = n self.train_shp = train_shp self.y_true = y_true def on_train_end(self,", "= metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=plot_lists) self.experiment.log_metric(name = \"Within_plot confusion[training]\", value", "if self.submodel: y_pred.append(pred[0]) y_true.append(label[0]) else: y_pred.append(pred) y_true.append(label) num_images += label.shape[0] else: break images", "= self.experiment.log_confusion_matrix( self.y_true, y_pred, title=name, file_name= name, labels=self.label_names, max_categories=90, max_example_per_cell=1) class ImageCallback(Callback): def", "tensorflow.keras.callbacks import ReduceLROnPlateau from tensorflow.keras.callbacks import Callback, TensorBoard from tensorflow import expand_dims class", "domain_lists = self.train_shp.groupby(\"taxonID\").domainID.unique() domain_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=domain_lists) self.experiment.log_metric(name", "is None: #plot_images = ImageCallback(experiment, validation_data, label_names, submodel=submodel) #callback_list.append(plot_images) if log_dir is not", "value = genus_confusion) #Most confused most_confused = results.groupby([\"true_taxonID\",\"predicted_taxonID\"]).size().reset_index(name=\"count\") most_confused = most_confused[~(most_confused.true_taxonID == most_confused.predicted_taxonID)].sort_values(\"count\",", "class ImageCallback(Callback): def __init__(self, experiment, dataset, label_names, submodel=False): self.experiment = experiment self.dataset =", "on_train_end(self, epoch, logs={}): \"\"\"Plot sample images with labels annotated\"\"\" #fill until there is", "label_names self.submodel = submodel self.n = n self.train_shp = train_shp self.y_true = y_true", "micro = metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"MicroF1\", micro) self.experiment.log_metric(\"MacroF1\", macro) #Log number of predictions to", "images = [] y_pred = [] y_true = [] limit = 20 num_images", "matrix y_pred = self.model.predict(self.eval_dataset) if self.submodel in [\"spectral\",\"spatial\"]: y_pred = y_pred[0] #F1 macro,", "y_pred = results.predicted_taxonID, site_lists=plot_lists) self.experiment.log_metric(name = \"Within_plot confusion[training]\", value = plot_confusion) domain_lists =", "num_images < limit: pred = self.model.predict(data) images.append(data) if self.submodel: y_pred.append(pred[0]) y_true.append(label[0]) else: y_pred.append(pred)", "results.predicted_taxonID, scientific_dict=scientific_dict) self.experiment.log_metric(name = \"Within Genus confusion\", value = genus_confusion) #Most confused most_confused", "= visualize.plot_prediction(image=image, prediction=prediction, label=label) self.experiment.log_figure(figure_name=\"{}_{}\".format(label, counter)) counter += 1 def create(experiment, train_data, validation_data,", "create(experiment, train_data, validation_data, train_shp, log_dir=None, label_names=None, submodel=False): \"\"\"Create a set of callbacks Args:", "= 20 num_images = 0 for data, label in self.dataset: if num_images <", "MicroF1\", micro) self.experiment.log_metric(\"Final MacroF1\", macro) #Log number of predictions to make sure its", "y_true, submodel): self.experiment = experiment self.dataset = dataset self.label_names = label_names self.submodel =", "\"\"\"Create a set of callbacks Args: experiment: a comet experiment object train_data: a", "y_pred = [] sites = [] #gather site and species matrix y_pred =", "for data, label in validation_data: if submodel in [\"spatial\",\"spectral\"]: label = label[0] y_true.append(label)", "datetime import datetime from DeepTreeAttention.utils import metrics from DeepTreeAttention.visualization import visualize from tensorflow.keras.callbacks", "results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=plot_lists) self.experiment.log_metric(name = \"Within_plot confusion[training]\", value = plot_confusion) domain_lists", "Confusion Matrix\" elif self.submodel in [\"ensemble\"]: name = \"Ensemble Matrix\" else: name =", "in [\"spectral\",\"spatial\"]: y_pred = y_pred[0] #F1 macro, micro = metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"Final MicroF1\",", "#assign labels if self.label_names: results[\"true_taxonID\"] = results.true.apply(lambda x: self.label_names[x]) results[\"predicted_taxonID\"] = results.predicted.apply(lambda x:", "= self.train_shp.groupby('taxonID')['scientific'].apply(lambda x: x.head(1).values.tolist()).to_dict() genus_confusion = metrics.genus_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, scientific_dict=scientific_dict)", "self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) results = pd.DataFrame({\"true\":np.argmax(self.y_true, 1),\"predicted\":np.argmax(y_pred, 1)}) #assign labels if self.label_names: results[\"true_taxonID\"] =", "results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=site_lists) self.experiment.log_metric(name = \"Within_site confusion[training]\", value = site_confusion) plot_lists", "= results.true_taxonID, y_pred = results.predicted_taxonID, scientific_dict=scientific_dict) self.experiment.log_metric(name = \"Within Genus confusion\", value =", "def create(experiment, train_data, validation_data, train_shp, log_dir=None, label_names=None, submodel=False): \"\"\"Create a set of callbacks", "\"metadata\": name = \"Metadata Confusion Matrix\" elif self.submodel in [\"ensemble\"]: name = \"Ensemble", "metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"Final MicroF1\", micro) self.experiment.log_metric(\"Final MacroF1\", macro) #Log number of predictions to", "self.submodel is \"metadata\": name = \"Metadata Confusion Matrix\" elif self.submodel in [\"ensemble\"]: name", "of callbacks Args: experiment: a comet experiment object train_data: a tf data object", "self.experiment = experiment self.dataset = dataset self.label_names = label_names self.submodel = submodel def", "y_pred = self.model.predict(self.dataset) if self.submodel is \"metadata\": name = \"Metadata Confusion Matrix\" elif", "#turn off callbacks for metadata callback_list = [] reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10,", "most_confused[~(most_confused.true_taxonID == most_confused.predicted_taxonID)].sort_values(\"count\", ascending=False) self.experiment.log_table(\"most_confused.csv\",most_confused.values) def on_epoch_end(self, epoch, logs={}): if not epoch %", "[self.label_names[x] for x in y_pred] counter = 0 for label, prediction, image in", "% self.n == 0: return None y_pred = [] sites = [] #gather", "x: self.label_names[x]) #Within site confusion site_lists = self.train_shp.groupby(\"taxonID\").siteID.unique() site_confusion = metrics.site_confusion(y_true = results.true_taxonID,", "experiment self.eval_dataset = eval_dataset self.label_names = label_names self.submodel = submodel self.n = n", "self.eval_dataset = eval_dataset self.label_names = label_names self.submodel = submodel self.n = n self.train_shp", "y_true.append(label[0]) else: y_pred.append(pred) y_true.append(label) num_images += label.shape[0] else: break images = np.vstack(images) y_true", "data, label in self.dataset: if num_images < limit: pred = self.model.predict(data) images.append(data) if", "import expand_dims class F1Callback(Callback): def __init__(self, experiment, eval_dataset, y_true, label_names, submodel, train_shp, n=10):", "axis=1) y_pred = np.argmax(y_pred, axis=1) true_taxonID = [self.label_names[x] for x in y_true] pred_taxonID", "object to generate data validation_data: a tf data object to generate data train_shp:", "= most_confused[~(most_confused.true_taxonID == most_confused.predicted_taxonID)].sort_values(\"count\", ascending=False) self.experiment.log_table(\"most_confused.csv\",most_confused.values) def on_epoch_end(self, epoch, logs={}): if not epoch", "self.y_true = y_true def on_train_end(self, epoch, logs={}): y_pred = self.model.predict(self.dataset) if self.submodel is", "they are not shuffled y_true = [ ] for data, label in validation_data:", "self.dataset = dataset self.label_names = label_names self.submodel = submodel self.y_true = y_true def", "results.predicted_taxonID, site_lists=domain_lists) self.experiment.log_metric(name = \"Within_domain confusion[training]\", value = domain_confusion) #Genus of all the", "Callback, TensorBoard from tensorflow import expand_dims class F1Callback(Callback): def __init__(self, experiment, eval_dataset, y_true,", "axis=1) true_taxonID = [self.label_names[x] for x in y_true] pred_taxonID = [self.label_names[x] for x", "micro) self.experiment.log_metric(\"Final MacroF1\", macro) #Log number of predictions to make sure its constant", "pred_taxonID = [self.label_names[x] for x in y_pred] counter = 0 for label, prediction,", "n=10): \"\"\"F1 callback Args: n: number of epochs to run. If n=4, function", "logs={}): \"\"\"Plot sample images with labels annotated\"\"\" #fill until there is atleast 20", "confusion[training]\", value = plot_confusion) domain_lists = self.train_shp.groupby(\"taxonID\").domainID.unique() domain_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred", "site error \"\"\" #turn off callbacks for metadata callback_list = [] reduce_lr =", "if num_images < limit: pred = self.model.predict(data) images.append(data) if self.submodel: y_pred.append(pred[0]) y_true.append(label[0]) else:", "y_true = np.concatenate(y_true) if not submodel in [\"spatial\",\"spectral\"]: confusion_matrix = ConfusionMatrixCallback(experiment=experiment, y_true=y_true, dataset=validation_data,", "= np.vstack(images) y_true = np.concatenate(y_true) y_pred = np.concatenate(y_pred) y_true = np.argmax(y_true, axis=1) y_pred", "ImageCallback(experiment, validation_data, label_names, submodel=submodel) #callback_list.append(plot_images) if log_dir is not None: print(\"saving tensorboard logs", "should be the same, take the first scientific_dict = self.train_shp.groupby('taxonID')['scientific'].apply(lambda x: x.head(1).values.tolist()).to_dict() genus_confusion", "not shuffled y_true = [ ] for data, label in validation_data: if submodel", "reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, min_delta=0.1, min_lr=0.00001, verbose=1) callback_list.append(reduce_lr) #Get the true labels", "self.label_names = label_names self.submodel = submodel self.y_true = y_true def on_train_end(self, epoch, logs={}):", "as pd from datetime import datetime from DeepTreeAttention.utils import metrics from DeepTreeAttention.visualization import", "in [\"spectral\",\"spatial\"]: y_pred = y_pred[0] #F1 macro, micro = metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"MicroF1\", micro)", "a tf data object to generate data validation_data: a tf data object to", "eval_dataset, y_true, label_names, submodel, train_shp, n=10): \"\"\"F1 callback Args: n: number of epochs", "metadata callback_list = [] reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, min_delta=0.1, min_lr=0.00001, verbose=1) callback_list.append(reduce_lr)", "x in y_pred] counter = 0 for label, prediction, image in zip(true_taxonID, pred_taxonID,", "import Callback, TensorBoard from tensorflow import expand_dims class F1Callback(Callback): def __init__(self, experiment, eval_dataset,", "\"Within_plot confusion[training]\", value = plot_confusion) domain_lists = self.train_shp.groupby(\"taxonID\").domainID.unique() domain_confusion = metrics.site_confusion(y_true = results.true_taxonID,", "np.concatenate(y_true) y_pred = np.concatenate(y_pred) y_true = np.argmax(y_true, axis=1) y_pred = np.argmax(y_pred, axis=1) true_taxonID", "dataset=validation_data, label_names=label_names, submodel=submodel) callback_list.append(confusion_matrix) f1 = F1Callback(experiment=experiment, y_true=y_true, eval_dataset=validation_data, label_names=label_names, submodel=submodel, train_shp=train_shp) callback_list.append(f1)", "labels to the function \"\"\" self.experiment = experiment self.eval_dataset = eval_dataset self.label_names =", "experiment self.dataset = dataset self.label_names = label_names self.submodel = submodel self.y_true = y_true", "\"\"\" self.experiment = experiment self.eval_dataset = eval_dataset self.label_names = label_names self.submodel = submodel", "in y_pred] counter = 0 for label, prediction, image in zip(true_taxonID, pred_taxonID, images):", "most_confused = results.groupby([\"true_taxonID\",\"predicted_taxonID\"]).size().reset_index(name=\"count\") most_confused = most_confused[~(most_confused.true_taxonID == most_confused.predicted_taxonID)].sort_values(\"count\", ascending=False) self.experiment.log_table(\"most_confused.csv\",most_confused.values) def on_epoch_end(self, epoch,", "object train_data: a tf data object to generate data validation_data: a tf data", "make sure its constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) class ConfusionMatrixCallback(Callback): def __init__(self, experiment, dataset, label_names,", "name = \"Metadata Confusion Matrix\" elif self.submodel in [\"ensemble\"]: name = \"Ensemble Matrix\"", "\"Ensemble Matrix\" else: name = \"Confusion Matrix\" cm = self.experiment.log_confusion_matrix( self.y_true, y_pred, title=name,", "patience=10, min_delta=0.1, min_lr=0.00001, verbose=1) callback_list.append(reduce_lr) #Get the true labels since they are not", "logs={}): y_pred = self.model.predict(self.dataset) if self.submodel is \"metadata\": name = \"Metadata Confusion Matrix\"", "check site error \"\"\" #turn off callbacks for metadata callback_list = [] reduce_lr", "of iterating through the dataset every time, just do it once and pass", "y_true def on_train_end(self, logs={}): y_pred = [] sites = [] #gather site and", "be the same, take the first scientific_dict = self.train_shp.groupby('taxonID')['scientific'].apply(lambda x: x.head(1).values.tolist()).to_dict() genus_confusion =", "0 for data, label in self.dataset: if num_images < limit: pred = self.model.predict(data)", "label_names, submodel=False): self.experiment = experiment self.dataset = dataset self.label_names = label_names self.submodel =", "else: name = \"Confusion Matrix\" cm = self.experiment.log_confusion_matrix( self.y_true, y_pred, title=name, file_name= name,", "= [self.label_names[x] for x in y_pred] counter = 0 for label, prediction, image", "validation_data: a tf data object to generate data train_shp: the original shapefile for", "submodel=submodel) callback_list.append(confusion_matrix) f1 = F1Callback(experiment=experiment, y_true=y_true, eval_dataset=validation_data, label_names=label_names, submodel=submodel, train_shp=train_shp) callback_list.append(f1) #if submodel", "== most_confused.predicted_taxonID)].sort_values(\"count\", ascending=False) self.experiment.log_table(\"most_confused.csv\",most_confused.values) def on_epoch_end(self, epoch, logs={}): if not epoch % self.n", "Matrix\" cm = self.experiment.log_confusion_matrix( self.y_true, y_pred, title=name, file_name= name, labels=self.label_names, max_categories=90, max_example_per_cell=1) class", "different taxonID variants should be the same, take the first scientific_dict = self.train_shp.groupby('taxonID')['scientific'].apply(lambda", "if not epoch % self.n == 0: return None y_pred = [] sites", "images images = [] y_pred = [] y_true = [] limit = 20", "Genus confusion\", value = genus_confusion) #Most confused most_confused = results.groupby([\"true_taxonID\",\"predicted_taxonID\"]).size().reset_index(name=\"count\") most_confused = most_confused[~(most_confused.true_taxonID", "experiment, eval_dataset, y_true, label_names, submodel, train_shp, n=10): \"\"\"F1 callback Args: n: number of", "label_names=None, submodel=False): \"\"\"Create a set of callbacks Args: experiment: a comet experiment object", "0: return None y_pred = [] sites = [] #gather site and species", "y_pred.append(pred) y_true.append(label) num_images += label.shape[0] else: break images = np.vstack(images) y_true = np.concatenate(y_true)", "y_pred = np.argmax(y_pred, axis=1) true_taxonID = [self.label_names[x] for x in y_true] pred_taxonID =", "y_pred = y_pred[0] #F1 macro, micro = metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"MicroF1\", micro) self.experiment.log_metric(\"MacroF1\", macro)", "eval_dataset=validation_data, label_names=label_names, submodel=submodel, train_shp=train_shp) callback_list.append(f1) #if submodel is None: #plot_images = ImageCallback(experiment, validation_data,", "macro, micro = metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"Final MicroF1\", micro) self.experiment.log_metric(\"Final MacroF1\", macro) #Log number", "break images = np.vstack(images) y_true = np.concatenate(y_true) y_pred = np.concatenate(y_pred) y_true = np.argmax(y_true,", "is \"metadata\": name = \"Metadata Confusion Matrix\" elif self.submodel in [\"ensemble\"]: name =", "sites = [] #gather site and species matrix y_pred = self.model.predict(self.eval_dataset) if self.submodel", "first scientific_dict = self.train_shp.groupby('taxonID')['scientific'].apply(lambda x: x.head(1).values.tolist()).to_dict() genus_confusion = metrics.genus_confusion(y_true = results.true_taxonID, y_pred =", "metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=plot_lists) self.experiment.log_metric(name = \"Within_plot confusion[training]\", value =", "value = domain_confusion) #Genus of all the different taxonID variants should be the", "images): figure = visualize.plot_prediction(image=image, prediction=prediction, label=label) self.experiment.log_figure(figure_name=\"{}_{}\".format(label, counter)) counter += 1 def create(experiment,", "= domain_confusion) #Genus of all the different taxonID variants should be the same,", "= \"Metadata Confusion Matrix\" elif self.submodel in [\"ensemble\"]: name = \"Ensemble Matrix\" else:", "to check site error \"\"\" #turn off callbacks for metadata callback_list = []", "annotated\"\"\" #fill until there is atleast 20 images images = [] y_pred =", "def on_train_end(self, epoch, logs={}): \"\"\"Plot sample images with labels annotated\"\"\" #fill until there", "self.train_shp.groupby('taxonID')['scientific'].apply(lambda x: x.head(1).values.tolist()).to_dict() genus_confusion = metrics.genus_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, scientific_dict=scientific_dict) self.experiment.log_metric(name", "value = site_confusion) plot_lists = self.train_shp.groupby(\"taxonID\").plotID.unique() plot_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred =", "#Genus of all the different taxonID variants should be the same, take the", "the function \"\"\" self.experiment = experiment self.eval_dataset = eval_dataset self.label_names = label_names self.submodel", "its constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) results = pd.DataFrame({\"true\":np.argmax(self.y_true, 1),\"predicted\":np.argmax(y_pred, 1)}) #assign labels if self.label_names:", "data object to generate data train_shp: the original shapefile for the train data", "= eval_dataset self.label_names = label_names self.submodel = submodel self.n = n self.train_shp =", "#Log number of predictions to make sure its constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) class ConfusionMatrixCallback(Callback):", "until there is atleast 20 images images = [] y_pred = [] y_true", "taxonID variants should be the same, take the first scientific_dict = self.train_shp.groupby('taxonID')['scientific'].apply(lambda x:", "= experiment self.eval_dataset = eval_dataset self.label_names = label_names self.submodel = submodel self.n =", "y_pred = self.model.predict(self.eval_dataset) if self.submodel in [\"spectral\",\"spatial\"]: y_pred = y_pred[0] #F1 macro, micro", "micro) self.experiment.log_metric(\"MacroF1\", macro) #Log number of predictions to make sure its constant self.experiment.log_metric(\"Prediction", "on_train_end(self, logs={}): y_pred = [] sites = [] #gather site and species matrix", "through the dataset every time, just do it once and pass the true", "macro) #Log number of predictions to make sure its constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) class", "np import pandas as pd from datetime import datetime from DeepTreeAttention.utils import metrics", "def on_epoch_end(self, epoch, logs={}): if not epoch % self.n == 0: return None", "in [\"spatial\",\"spectral\"]: label = label[0] y_true.append(label) y_true = np.concatenate(y_true) if not submodel in", "self.label_names[x]) results[\"predicted_taxonID\"] = results.predicted.apply(lambda x: self.label_names[x]) #Within site confusion site_lists = self.train_shp.groupby(\"taxonID\").siteID.unique() site_confusion", "np.concatenate(y_pred) y_true = np.argmax(y_true, axis=1) y_pred = np.argmax(y_pred, axis=1) true_taxonID = [self.label_names[x] for", "prediction=prediction, label=label) self.experiment.log_figure(figure_name=\"{}_{}\".format(label, counter)) counter += 1 def create(experiment, train_data, validation_data, train_shp, log_dir=None,", "the different taxonID variants should be the same, take the first scientific_dict =", "to generate data validation_data: a tf data object to generate data train_shp: the", "generate data train_shp: the original shapefile for the train data to check site", "class ConfusionMatrixCallback(Callback): def __init__(self, experiment, dataset, label_names, y_true, submodel): self.experiment = experiment self.dataset", "20 images images = [] y_pred = [] y_true = [] limit =", "train_shp=train_shp) callback_list.append(f1) #if submodel is None: #plot_images = ImageCallback(experiment, validation_data, label_names, submodel=submodel) #callback_list.append(plot_images)", "= ConfusionMatrixCallback(experiment=experiment, y_true=y_true, dataset=validation_data, label_names=label_names, submodel=submodel) callback_list.append(confusion_matrix) f1 = F1Callback(experiment=experiment, y_true=y_true, eval_dataset=validation_data, label_names=label_names,", "label_names, submodel, train_shp, n=10): \"\"\"F1 callback Args: n: number of epochs to run.", "self.submodel: y_pred.append(pred[0]) y_true.append(label[0]) else: y_pred.append(pred) y_true.append(label) num_images += label.shape[0] else: break images =", "off callbacks for metadata callback_list = [] reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, min_delta=0.1,", "number of predictions to make sure its constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) class ConfusionMatrixCallback(Callback): def", "confusion\", value = genus_confusion) #Most confused most_confused = results.groupby([\"true_taxonID\",\"predicted_taxonID\"]).size().reset_index(name=\"count\") most_confused = most_confused[~(most_confused.true_taxonID ==", "y_true = [ ] for data, label in validation_data: if submodel in [\"spatial\",\"spectral\"]:", "time, just do it once and pass the true labels to the function", "for x in y_true] pred_taxonID = [self.label_names[x] for x in y_pred] counter =", "ImageCallback(Callback): def __init__(self, experiment, dataset, label_names, submodel=False): self.experiment = experiment self.dataset = dataset", "dataset every time, just do it once and pass the true labels to", "None y_pred = [] sites = [] #gather site and species matrix y_pred", "train_shp, n=10): \"\"\"F1 callback Args: n: number of epochs to run. If n=4,", "it once and pass the true labels to the function \"\"\" self.experiment =", "self.model.predict(self.eval_dataset) if self.submodel in [\"spectral\",\"spatial\"]: y_pred = y_pred[0] #F1 macro, micro = metrics.f1_scores(self.y_true,", "take the first scientific_dict = self.train_shp.groupby('taxonID')['scientific'].apply(lambda x: x.head(1).values.tolist()).to_dict() genus_confusion = metrics.genus_confusion(y_true = results.true_taxonID,", "self.experiment.log_table(\"most_confused.csv\",most_confused.values) def on_epoch_end(self, epoch, logs={}): if not epoch % self.n == 0: return", "metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=site_lists) self.experiment.log_metric(name = \"Within_site confusion[training]\", value =", "= metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"MicroF1\", micro) self.experiment.log_metric(\"MacroF1\", macro) #Log number of predictions to make", "a tf data object to generate data train_shp: the original shapefile for the", "= [] #gather site and species matrix y_pred = self.model.predict(self.eval_dataset) if self.submodel in", "once and pass the true labels to the function \"\"\" self.experiment = experiment", "plot_lists = self.train_shp.groupby(\"taxonID\").plotID.unique() plot_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=plot_lists) self.experiment.log_metric(name", "__init__(self, experiment, dataset, label_names, submodel=False): self.experiment = experiment self.dataset = dataset self.label_names =", "there is atleast 20 images images = [] y_pred = [] y_true =", "#F1 macro, micro = metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"MicroF1\", micro) self.experiment.log_metric(\"MacroF1\", macro) #Log number of", "DeepTreeAttention.utils import metrics from DeepTreeAttention.visualization import visualize from tensorflow.keras.callbacks import ReduceLROnPlateau from tensorflow.keras.callbacks", "self.submodel = submodel self.n = n self.train_shp = train_shp self.y_true = y_true def", "DeepTreeAttention.visualization import visualize from tensorflow.keras.callbacks import ReduceLROnPlateau from tensorflow.keras.callbacks import Callback, TensorBoard from", "np.argmax(y_true, axis=1) y_pred = np.argmax(y_pred, axis=1) true_taxonID = [self.label_names[x] for x in y_true]", "\"Within_domain confusion[training]\", value = domain_confusion) #Genus of all the different taxonID variants should", "the same, take the first scientific_dict = self.train_shp.groupby('taxonID')['scientific'].apply(lambda x: x.head(1).values.tolist()).to_dict() genus_confusion = metrics.genus_confusion(y_true", "from DeepTreeAttention.utils import metrics from DeepTreeAttention.visualization import visualize from tensorflow.keras.callbacks import ReduceLROnPlateau from", "log_dir is not None: print(\"saving tensorboard logs at {}\".format(log_dir)) tensorboard = TensorBoard(log_dir=log_dir, histogram_freq=0,", "self.train_shp.groupby(\"taxonID\").siteID.unique() site_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=site_lists) self.experiment.log_metric(name = \"Within_site", "true labels to the function \"\"\" self.experiment = experiment self.eval_dataset = eval_dataset self.label_names", "name = \"Confusion Matrix\" cm = self.experiment.log_confusion_matrix( self.y_true, y_pred, title=name, file_name= name, labels=self.label_names,", "submodel, train_shp, n=10): \"\"\"F1 callback Args: n: number of epochs to run. If", "number of epochs to run. If n=4, function will run every 4 epochs", "y_pred = [] y_true = [] limit = 20 num_images = 0 for", "= y_true def on_train_end(self, epoch, logs={}): y_pred = self.model.predict(self.dataset) if self.submodel is \"metadata\":", "self.dataset = dataset self.label_names = label_names self.submodel = submodel def on_train_end(self, epoch, logs={}):", "import numpy as np import pandas as pd from datetime import datetime from", "= results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=site_lists) self.experiment.log_metric(name = \"Within_site confusion[training]\", value = site_confusion)", "elif self.submodel in [\"ensemble\"]: name = \"Ensemble Matrix\" else: name = \"Confusion Matrix\"", "submodel def on_train_end(self, epoch, logs={}): \"\"\"Plot sample images with labels annotated\"\"\" #fill until", "label in self.dataset: if num_images < limit: pred = self.model.predict(data) images.append(data) if self.submodel:", "data, label in validation_data: if submodel in [\"spatial\",\"spectral\"]: label = label[0] y_true.append(label) y_true", "Args: n: number of epochs to run. If n=4, function will run every", "file_name= name, labels=self.label_names, max_categories=90, max_example_per_cell=1) class ImageCallback(Callback): def __init__(self, experiment, dataset, label_names, submodel=False):", "if self.submodel in [\"spectral\",\"spatial\"]: y_pred = y_pred[0] #F1 macro, micro = metrics.f1_scores(self.y_true, y_pred)", "def __init__(self, experiment, eval_dataset, y_true, label_names, submodel, train_shp, n=10): \"\"\"F1 callback Args: n:", "from tensorflow import expand_dims class F1Callback(Callback): def __init__(self, experiment, eval_dataset, y_true, label_names, submodel,", "[] y_true = [] limit = 20 num_images = 0 for data, label", "submodel=submodel, train_shp=train_shp) callback_list.append(f1) #if submodel is None: #plot_images = ImageCallback(experiment, validation_data, label_names, submodel=submodel)", "#gather site and species matrix y_pred = self.model.predict(self.eval_dataset) if self.submodel in [\"spectral\",\"spatial\"]: y_pred", "limit = 20 num_images = 0 for data, label in self.dataset: if num_images", "y_pred = np.concatenate(y_pred) y_true = np.argmax(y_true, axis=1) y_pred = np.argmax(y_pred, axis=1) true_taxonID =", "confusion[training]\", value = site_confusion) plot_lists = self.train_shp.groupby(\"taxonID\").plotID.unique() plot_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred", "\"Within Genus confusion\", value = genus_confusion) #Most confused most_confused = results.groupby([\"true_taxonID\",\"predicted_taxonID\"]).size().reset_index(name=\"count\") most_confused =", "metrics from DeepTreeAttention.visualization import visualize from tensorflow.keras.callbacks import ReduceLROnPlateau from tensorflow.keras.callbacks import Callback,", "every 4 epochs y_true: instead of iterating through the dataset every time, just", "\"Within_site confusion[training]\", value = site_confusion) plot_lists = self.train_shp.groupby(\"taxonID\").plotID.unique() plot_confusion = metrics.site_confusion(y_true = results.true_taxonID,", "dataset, label_names, submodel=False): self.experiment = experiment self.dataset = dataset self.label_names = label_names self.submodel", "number of predictions to make sure its constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) results = pd.DataFrame({\"true\":np.argmax(self.y_true,", "= plot_confusion) domain_lists = self.train_shp.groupby(\"taxonID\").domainID.unique() domain_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID,", "site_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=site_lists) self.experiment.log_metric(name = \"Within_site confusion[training]\",", "for the train data to check site error \"\"\" #turn off callbacks for", "self.experiment.log_figure(figure_name=\"{}_{}\".format(label, counter)) counter += 1 def create(experiment, train_data, validation_data, train_shp, log_dir=None, label_names=None, submodel=False):", "shapefile for the train data to check site error \"\"\" #turn off callbacks", "ConfusionMatrixCallback(Callback): def __init__(self, experiment, dataset, label_names, y_true, submodel): self.experiment = experiment self.dataset =", "do it once and pass the true labels to the function \"\"\" self.experiment", "plot_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=plot_lists) self.experiment.log_metric(name = \"Within_plot confusion[training]\",", "for label, prediction, image in zip(true_taxonID, pred_taxonID, images): figure = visualize.plot_prediction(image=image, prediction=prediction, label=label)", "scientific_dict = self.train_shp.groupby('taxonID')['scientific'].apply(lambda x: x.head(1).values.tolist()).to_dict() genus_confusion = metrics.genus_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID,", "= F1Callback(experiment=experiment, y_true=y_true, eval_dataset=validation_data, label_names=label_names, submodel=submodel, train_shp=train_shp) callback_list.append(f1) #if submodel is None: #plot_images", "= self.model.predict(self.dataset) if self.submodel is \"metadata\": name = \"Metadata Confusion Matrix\" elif self.submodel", "in validation_data: if submodel in [\"spatial\",\"spectral\"]: label = label[0] y_true.append(label) y_true = np.concatenate(y_true)", "most_confused.predicted_taxonID)].sort_values(\"count\", ascending=False) self.experiment.log_table(\"most_confused.csv\",most_confused.values) def on_epoch_end(self, epoch, logs={}): if not epoch % self.n ==", "self.submodel in [\"spectral\",\"spatial\"]: y_pred = y_pred[0] #F1 macro, micro = metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"Final", "callbacks for metadata callback_list = [] reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, min_delta=0.1, min_lr=0.00001,", "= results.true.apply(lambda x: self.label_names[x]) results[\"predicted_taxonID\"] = results.predicted.apply(lambda x: self.label_names[x]) #Within site confusion site_lists", "cm = self.experiment.log_confusion_matrix( self.y_true, y_pred, title=name, file_name= name, labels=self.label_names, max_categories=90, max_example_per_cell=1) class ImageCallback(Callback):", "[] reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, min_delta=0.1, min_lr=0.00001, verbose=1) callback_list.append(reduce_lr) #Get the true", "train_shp self.y_true = y_true def on_train_end(self, logs={}): y_pred = [] sites = []", "confusion[training]\", value = domain_confusion) #Genus of all the different taxonID variants should be", "MacroF1\", macro) #Log number of predictions to make sure its constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0])", "error \"\"\" #turn off callbacks for metadata callback_list = [] reduce_lr = ReduceLROnPlateau(monitor='val_loss',", "+= label.shape[0] else: break images = np.vstack(images) y_true = np.concatenate(y_true) y_pred = np.concatenate(y_pred)", "for x in y_pred] counter = 0 for label, prediction, image in zip(true_taxonID,", "y_true = np.argmax(y_true, axis=1) y_pred = np.argmax(y_pred, axis=1) true_taxonID = [self.label_names[x] for x", "just do it once and pass the true labels to the function \"\"\"", "genus_confusion = metrics.genus_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, scientific_dict=scientific_dict) self.experiment.log_metric(name = \"Within Genus", "dataset, label_names, y_true, submodel): self.experiment = experiment self.dataset = dataset self.label_names = label_names", "= dataset self.label_names = label_names self.submodel = submodel self.y_true = y_true def on_train_end(self,", "submodel): self.experiment = experiment self.dataset = dataset self.label_names = label_names self.submodel = submodel", "logs={}): y_pred = [] sites = [] #gather site and species matrix y_pred", "self.label_names = label_names self.submodel = submodel def on_train_end(self, epoch, logs={}): \"\"\"Plot sample images", "train_shp, log_dir=None, label_names=None, submodel=False): \"\"\"Create a set of callbacks Args: experiment: a comet", "from tensorflow.keras.callbacks import ReduceLROnPlateau from tensorflow.keras.callbacks import Callback, TensorBoard from tensorflow import expand_dims", "datetime from DeepTreeAttention.utils import metrics from DeepTreeAttention.visualization import visualize from tensorflow.keras.callbacks import ReduceLROnPlateau", "num_images = 0 for data, label in self.dataset: if num_images < limit: pred", "site_lists = self.train_shp.groupby(\"taxonID\").siteID.unique() site_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=site_lists) self.experiment.log_metric(name", "self.experiment.log_metric(name = \"Within_site confusion[training]\", value = site_confusion) plot_lists = self.train_shp.groupby(\"taxonID\").plotID.unique() plot_confusion = metrics.site_confusion(y_true", "= n self.train_shp = train_shp self.y_true = y_true def on_train_end(self, logs={}): y_pred =", "logs={}): if not epoch % self.n == 0: return None y_pred = []", "= results.groupby([\"true_taxonID\",\"predicted_taxonID\"]).size().reset_index(name=\"count\") most_confused = most_confused[~(most_confused.true_taxonID == most_confused.predicted_taxonID)].sort_values(\"count\", ascending=False) self.experiment.log_table(\"most_confused.csv\",most_confused.values) def on_epoch_end(self, epoch, logs={}):", "from DeepTreeAttention.visualization import visualize from tensorflow.keras.callbacks import ReduceLROnPlateau from tensorflow.keras.callbacks import Callback, TensorBoard", "= np.argmax(y_true, axis=1) y_pred = np.argmax(y_pred, axis=1) true_taxonID = [self.label_names[x] for x in", "label_names, submodel=submodel) #callback_list.append(plot_images) if log_dir is not None: print(\"saving tensorboard logs at {}\".format(log_dir))", "not submodel in [\"spatial\",\"spectral\"]: confusion_matrix = ConfusionMatrixCallback(experiment=experiment, y_true=y_true, dataset=validation_data, label_names=label_names, submodel=submodel) callback_list.append(confusion_matrix) f1", "experiment self.dataset = dataset self.label_names = label_names self.submodel = submodel def on_train_end(self, epoch,", "y_true = np.concatenate(y_true) y_pred = np.concatenate(y_pred) y_true = np.argmax(y_true, axis=1) y_pred = np.argmax(y_pred,", "= results.predicted.apply(lambda x: self.label_names[x]) #Within site confusion site_lists = self.train_shp.groupby(\"taxonID\").siteID.unique() site_confusion = metrics.site_confusion(y_true", "submodel in [\"spatial\",\"spectral\"]: label = label[0] y_true.append(label) y_true = np.concatenate(y_true) if not submodel", "set of callbacks Args: experiment: a comet experiment object train_data: a tf data", "y_true=y_true, dataset=validation_data, label_names=label_names, submodel=submodel) callback_list.append(confusion_matrix) f1 = F1Callback(experiment=experiment, y_true=y_true, eval_dataset=validation_data, label_names=label_names, submodel=submodel, train_shp=train_shp)", "if self.label_names: results[\"true_taxonID\"] = results.true.apply(lambda x: self.label_names[x]) results[\"predicted_taxonID\"] = results.predicted.apply(lambda x: self.label_names[x]) #Within", "self.model.predict(self.dataset) if self.submodel is \"metadata\": name = \"Metadata Confusion Matrix\" elif self.submodel in", "tensorflow.keras.callbacks import Callback, TensorBoard from tensorflow import expand_dims class F1Callback(Callback): def __init__(self, experiment,", "y_true = [] limit = 20 num_images = 0 for data, label in", "to run. If n=4, function will run every 4 epochs y_true: instead of", "Matrix\" else: name = \"Confusion Matrix\" cm = self.experiment.log_confusion_matrix( self.y_true, y_pred, title=name, file_name=", "true_taxonID = [self.label_names[x] for x in y_true] pred_taxonID = [self.label_names[x] for x in", "self.experiment.log_metric(\"MacroF1\", macro) #Log number of predictions to make sure its constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0])", "y_pred = results.predicted_taxonID, scientific_dict=scientific_dict) self.experiment.log_metric(name = \"Within Genus confusion\", value = genus_confusion) #Most", "and species matrix y_pred = self.model.predict(self.eval_dataset) if self.submodel in [\"spectral\",\"spatial\"]: y_pred = y_pred[0]", "in y_true] pred_taxonID = [self.label_names[x] for x in y_pred] counter = 0 for", "= [] sites = [] #gather site and species matrix y_pred = self.model.predict(self.eval_dataset)", "make sure its constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) results = pd.DataFrame({\"true\":np.argmax(self.y_true, 1),\"predicted\":np.argmax(y_pred, 1)}) #assign labels", "self.submodel = submodel self.y_true = y_true def on_train_end(self, epoch, logs={}): y_pred = self.model.predict(self.dataset)", "is atleast 20 images images = [] y_pred = [] y_true = []", "x: x.head(1).values.tolist()).to_dict() genus_confusion = metrics.genus_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, scientific_dict=scientific_dict) self.experiment.log_metric(name =", "the true labels since they are not shuffled y_true = [ ] for", "metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"MicroF1\", micro) self.experiment.log_metric(\"MacroF1\", macro) #Log number of predictions to make sure", "1 def create(experiment, train_data, validation_data, train_shp, log_dir=None, label_names=None, submodel=False): \"\"\"Create a set of", "for data, label in self.dataset: if num_images < limit: pred = self.model.predict(data) images.append(data)", "true labels since they are not shuffled y_true = [ ] for data,", "micro = metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"Final MicroF1\", micro) self.experiment.log_metric(\"Final MacroF1\", macro) #Log number of", "= self.model.predict(self.eval_dataset) if self.submodel in [\"spectral\",\"spatial\"]: y_pred = y_pred[0] #F1 macro, micro =", "= results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=domain_lists) self.experiment.log_metric(name = \"Within_domain confusion[training]\", value = domain_confusion)", "= label_names self.submodel = submodel self.n = n self.train_shp = train_shp self.y_true =", "__init__(self, experiment, dataset, label_names, y_true, submodel): self.experiment = experiment self.dataset = dataset self.label_names", "self.experiment.log_metric(\"Final MicroF1\", micro) self.experiment.log_metric(\"Final MacroF1\", macro) #Log number of predictions to make sure", "since they are not shuffled y_true = [ ] for data, label in", "results.predicted_taxonID, site_lists=site_lists) self.experiment.log_metric(name = \"Within_site confusion[training]\", value = site_confusion) plot_lists = self.train_shp.groupby(\"taxonID\").plotID.unique() plot_confusion", "\"\"\" #turn off callbacks for metadata callback_list = [] reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5,", "plot_confusion) domain_lists = self.train_shp.groupby(\"taxonID\").domainID.unique() domain_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=domain_lists)", "[] sites = [] #gather site and species matrix y_pred = self.model.predict(self.eval_dataset) if", "\"\"\"Plot sample images with labels annotated\"\"\" #fill until there is atleast 20 images", "function \"\"\" self.experiment = experiment self.eval_dataset = eval_dataset self.label_names = label_names self.submodel =", "y_pred.append(pred[0]) y_true.append(label[0]) else: y_pred.append(pred) y_true.append(label) num_images += label.shape[0] else: break images = np.vstack(images)", "= experiment self.dataset = dataset self.label_names = label_names self.submodel = submodel self.y_true =", "in zip(true_taxonID, pred_taxonID, images): figure = visualize.plot_prediction(image=image, prediction=prediction, label=label) self.experiment.log_figure(figure_name=\"{}_{}\".format(label, counter)) counter +=", "import visualize from tensorflow.keras.callbacks import ReduceLROnPlateau from tensorflow.keras.callbacks import Callback, TensorBoard from tensorflow", "validation_data, train_shp, log_dir=None, label_names=None, submodel=False): \"\"\"Create a set of callbacks Args: experiment: a", "run. If n=4, function will run every 4 epochs y_true: instead of iterating", "= [] reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, min_delta=0.1, min_lr=0.00001, verbose=1) callback_list.append(reduce_lr) #Get the", "y_true: instead of iterating through the dataset every time, just do it once", "y_true.append(label) num_images += label.shape[0] else: break images = np.vstack(images) y_true = np.concatenate(y_true) y_pred", "self.train_shp.groupby(\"taxonID\").domainID.unique() domain_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=domain_lists) self.experiment.log_metric(name = \"Within_domain", "epoch, logs={}): if not epoch % self.n == 0: return None y_pred =", "log_dir=None, label_names=None, submodel=False): \"\"\"Create a set of callbacks Args: experiment: a comet experiment", "ConfusionMatrixCallback(experiment=experiment, y_true=y_true, dataset=validation_data, label_names=label_names, submodel=submodel) callback_list.append(confusion_matrix) f1 = F1Callback(experiment=experiment, y_true=y_true, eval_dataset=validation_data, label_names=label_names, submodel=submodel,", "visualize.plot_prediction(image=image, prediction=prediction, label=label) self.experiment.log_figure(figure_name=\"{}_{}\".format(label, counter)) counter += 1 def create(experiment, train_data, validation_data, train_shp,", "label_names, y_true, submodel): self.experiment = experiment self.dataset = dataset self.label_names = label_names self.submodel", "def on_train_end(self, epoch, logs={}): y_pred = self.model.predict(self.dataset) if self.submodel is \"metadata\": name =", "image in zip(true_taxonID, pred_taxonID, images): figure = visualize.plot_prediction(image=image, prediction=prediction, label=label) self.experiment.log_figure(figure_name=\"{}_{}\".format(label, counter)) counter", "y_pred] counter = 0 for label, prediction, image in zip(true_taxonID, pred_taxonID, images): figure", "TensorBoard from tensorflow import expand_dims class F1Callback(Callback): def __init__(self, experiment, eval_dataset, y_true, label_names,", "import os import numpy as np import pandas as pd from datetime import", "= label_names self.submodel = submodel def on_train_end(self, epoch, logs={}): \"\"\"Plot sample images with", "experiment: a comet experiment object train_data: a tf data object to generate data", "= self.train_shp.groupby(\"taxonID\").siteID.unique() site_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=site_lists) self.experiment.log_metric(name =", "x.head(1).values.tolist()).to_dict() genus_confusion = metrics.genus_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, scientific_dict=scientific_dict) self.experiment.log_metric(name = \"Within", "np.vstack(images) y_true = np.concatenate(y_true) y_pred = np.concatenate(y_pred) y_true = np.argmax(y_true, axis=1) y_pred =", "run every 4 epochs y_true: instead of iterating through the dataset every time,", "train_shp: the original shapefile for the train data to check site error \"\"\"", "y_true def on_train_end(self, epoch, logs={}): y_pred = self.model.predict(self.dataset) if self.submodel is \"metadata\": name", "from tensorflow.keras.callbacks import Callback, TensorBoard from tensorflow import expand_dims class F1Callback(Callback): def __init__(self,", "1),\"predicted\":np.argmax(y_pred, 1)}) #assign labels if self.label_names: results[\"true_taxonID\"] = results.true.apply(lambda x: self.label_names[x]) results[\"predicted_taxonID\"] =", "every time, just do it once and pass the true labels to the", "comet experiment object train_data: a tf data object to generate data validation_data: a", "f1 = F1Callback(experiment=experiment, y_true=y_true, eval_dataset=validation_data, label_names=label_names, submodel=submodel, train_shp=train_shp) callback_list.append(f1) #if submodel is None:", "= site_confusion) plot_lists = self.train_shp.groupby(\"taxonID\").plotID.unique() plot_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID,", "from datetime import datetime from DeepTreeAttention.utils import metrics from DeepTreeAttention.visualization import visualize from", "pred = self.model.predict(data) images.append(data) if self.submodel: y_pred.append(pred[0]) y_true.append(label[0]) else: y_pred.append(pred) y_true.append(label) num_images +=", "to make sure its constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) results = pd.DataFrame({\"true\":np.argmax(self.y_true, 1),\"predicted\":np.argmax(y_pred, 1)}) #assign", "submodel self.y_true = y_true def on_train_end(self, epoch, logs={}): y_pred = self.model.predict(self.dataset) if self.submodel", "self.train_shp.groupby(\"taxonID\").plotID.unique() plot_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=plot_lists) self.experiment.log_metric(name = \"Within_plot", "atleast 20 images images = [] y_pred = [] y_true = [] limit", "dataset self.label_names = label_names self.submodel = submodel def on_train_end(self, epoch, logs={}): \"\"\"Plot sample", "data object to generate data validation_data: a tf data object to generate data", "__init__(self, experiment, eval_dataset, y_true, label_names, submodel, train_shp, n=10): \"\"\"F1 callback Args: n: number", "= experiment self.dataset = dataset self.label_names = label_names self.submodel = submodel def on_train_end(self,", "figure = visualize.plot_prediction(image=image, prediction=prediction, label=label) self.experiment.log_figure(figure_name=\"{}_{}\".format(label, counter)) counter += 1 def create(experiment, train_data,", "min_lr=0.00001, verbose=1) callback_list.append(reduce_lr) #Get the true labels since they are not shuffled y_true", "y_true=y_true, eval_dataset=validation_data, label_names=label_names, submodel=submodel, train_shp=train_shp) callback_list.append(f1) #if submodel is None: #plot_images = ImageCallback(experiment,", "self.submodel in [\"ensemble\"]: name = \"Ensemble Matrix\" else: name = \"Confusion Matrix\" cm", "instead of iterating through the dataset every time, just do it once and", "label_names=label_names, submodel=submodel, train_shp=train_shp) callback_list.append(f1) #if submodel is None: #plot_images = ImageCallback(experiment, validation_data, label_names,", "domain_confusion) #Genus of all the different taxonID variants should be the same, take", "= \"Ensemble Matrix\" else: name = \"Confusion Matrix\" cm = self.experiment.log_confusion_matrix( self.y_true, y_pred,", "self.y_true = y_true def on_train_end(self, logs={}): y_pred = [] sites = [] #gather", "[\"spatial\",\"spectral\"]: confusion_matrix = ConfusionMatrixCallback(experiment=experiment, y_true=y_true, dataset=validation_data, label_names=label_names, submodel=submodel) callback_list.append(confusion_matrix) f1 = F1Callback(experiment=experiment, y_true=y_true,", "#callback_list.append(plot_images) if log_dir is not None: print(\"saving tensorboard logs at {}\".format(log_dir)) tensorboard =", "labels annotated\"\"\" #fill until there is atleast 20 images images = [] y_pred", "[\"ensemble\"]: name = \"Ensemble Matrix\" else: name = \"Confusion Matrix\" cm = self.experiment.log_confusion_matrix(", "self.train_shp = train_shp self.y_true = y_true def on_train_end(self, logs={}): y_pred = [] sites", "results.true_taxonID, y_pred = results.predicted_taxonID, scientific_dict=scientific_dict) self.experiment.log_metric(name = \"Within Genus confusion\", value = genus_confusion)", "= self.model.predict(data) images.append(data) if self.submodel: y_pred.append(pred[0]) y_true.append(label[0]) else: y_pred.append(pred) y_true.append(label) num_images += label.shape[0]", "train data to check site error \"\"\" #turn off callbacks for metadata callback_list", "confusion_matrix = ConfusionMatrixCallback(experiment=experiment, y_true=y_true, dataset=validation_data, label_names=label_names, submodel=submodel) callback_list.append(confusion_matrix) f1 = F1Callback(experiment=experiment, y_true=y_true, eval_dataset=validation_data,", "label_names self.submodel = submodel self.y_true = y_true def on_train_end(self, epoch, logs={}): y_pred =", "visualize from tensorflow.keras.callbacks import ReduceLROnPlateau from tensorflow.keras.callbacks import Callback, TensorBoard from tensorflow import", "= \"Within_domain confusion[training]\", value = domain_confusion) #Genus of all the different taxonID variants", "label.shape[0] else: break images = np.vstack(images) y_true = np.concatenate(y_true) y_pred = np.concatenate(y_pred) y_true", "None: print(\"saving tensorboard logs at {}\".format(log_dir)) tensorboard = TensorBoard(log_dir=log_dir, histogram_freq=0, profile_batch=30) callback_list.append(tensorboard) return", "title=name, file_name= name, labels=self.label_names, max_categories=90, max_example_per_cell=1) class ImageCallback(Callback): def __init__(self, experiment, dataset, label_names,", "y_true, label_names, submodel, train_shp, n=10): \"\"\"F1 callback Args: n: number of epochs to", "[\"spatial\",\"spectral\"]: label = label[0] y_true.append(label) y_true = np.concatenate(y_true) if not submodel in [\"spatial\",\"spectral\"]:", "= self.train_shp.groupby(\"taxonID\").domainID.unique() domain_confusion = metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=domain_lists) self.experiment.log_metric(name =", "= metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=domain_lists) self.experiment.log_metric(name = \"Within_domain confusion[training]\", value", "scientific_dict=scientific_dict) self.experiment.log_metric(name = \"Within Genus confusion\", value = genus_confusion) #Most confused most_confused =", "= [self.label_names[x] for x in y_true] pred_taxonID = [self.label_names[x] for x in y_pred]", "import pandas as pd from datetime import datetime from DeepTreeAttention.utils import metrics from", "= \"Within_plot confusion[training]\", value = plot_confusion) domain_lists = self.train_shp.groupby(\"taxonID\").domainID.unique() domain_confusion = metrics.site_confusion(y_true =", "= label[0] y_true.append(label) y_true = np.concatenate(y_true) if not submodel in [\"spatial\",\"spectral\"]: confusion_matrix =", "1)}) #assign labels if self.label_names: results[\"true_taxonID\"] = results.true.apply(lambda x: self.label_names[x]) results[\"predicted_taxonID\"] = results.predicted.apply(lambda", "= np.concatenate(y_true) if not submodel in [\"spatial\",\"spectral\"]: confusion_matrix = ConfusionMatrixCallback(experiment=experiment, y_true=y_true, dataset=validation_data, label_names=label_names,", "results[\"true_taxonID\"] = results.true.apply(lambda x: self.label_names[x]) results[\"predicted_taxonID\"] = results.predicted.apply(lambda x: self.label_names[x]) #Within site confusion", "genus_confusion) #Most confused most_confused = results.groupby([\"true_taxonID\",\"predicted_taxonID\"]).size().reset_index(name=\"count\") most_confused = most_confused[~(most_confused.true_taxonID == most_confused.predicted_taxonID)].sort_values(\"count\", ascending=False) self.experiment.log_table(\"most_confused.csv\",most_confused.values)", "self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) class ConfusionMatrixCallback(Callback): def __init__(self, experiment, dataset, label_names, y_true, submodel): self.experiment =", "epoch, logs={}): \"\"\"Plot sample images with labels annotated\"\"\" #fill until there is atleast", "validation_data, label_names, submodel=submodel) #callback_list.append(plot_images) if log_dir is not None: print(\"saving tensorboard logs at", "callback_list.append(reduce_lr) #Get the true labels since they are not shuffled y_true = [", "results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=domain_lists) self.experiment.log_metric(name = \"Within_domain confusion[training]\", value = domain_confusion) #Genus", "on_train_end(self, epoch, logs={}): y_pred = self.model.predict(self.dataset) if self.submodel is \"metadata\": name = \"Metadata", "dataset self.label_names = label_names self.submodel = submodel self.y_true = y_true def on_train_end(self, epoch,", "the train data to check site error \"\"\" #turn off callbacks for metadata", "= 0 for label, prediction, image in zip(true_taxonID, pred_taxonID, images): figure = visualize.plot_prediction(image=image,", "F1Callback(Callback): def __init__(self, experiment, eval_dataset, y_true, label_names, submodel, train_shp, n=10): \"\"\"F1 callback Args:", "[ ] for data, label in validation_data: if submodel in [\"spatial\",\"spectral\"]: label =", "species matrix y_pred = self.model.predict(self.eval_dataset) if self.submodel in [\"spectral\",\"spatial\"]: y_pred = y_pred[0] #F1", "epoch % self.n == 0: return None y_pred = [] sites = []", "of predictions to make sure its constant self.experiment.log_metric(\"Prediction samples\",y_pred.shape[0]) results = pd.DataFrame({\"true\":np.argmax(self.y_true, 1),\"predicted\":np.argmax(y_pred,", "= ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, min_delta=0.1, min_lr=0.00001, verbose=1) callback_list.append(reduce_lr) #Get the true labels since", "#Most confused most_confused = results.groupby([\"true_taxonID\",\"predicted_taxonID\"]).size().reset_index(name=\"count\") most_confused = most_confused[~(most_confused.true_taxonID == most_confused.predicted_taxonID)].sort_values(\"count\", ascending=False) self.experiment.log_table(\"most_confused.csv\",most_confused.values) def", "n=4, function will run every 4 epochs y_true: instead of iterating through the", "[] y_pred = [] y_true = [] limit = 20 num_images = 0", "= [ ] for data, label in validation_data: if submodel in [\"spatial\",\"spectral\"]: label", "images = np.vstack(images) y_true = np.concatenate(y_true) y_pred = np.concatenate(y_pred) y_true = np.argmax(y_true, axis=1)", "= label_names self.submodel = submodel self.y_true = y_true def on_train_end(self, epoch, logs={}): y_pred", "= np.concatenate(y_true) y_pred = np.concatenate(y_pred) y_true = np.argmax(y_true, axis=1) y_pred = np.argmax(y_pred, axis=1)", "tf data object to generate data train_shp: the original shapefile for the train", "is not None: print(\"saving tensorboard logs at {}\".format(log_dir)) tensorboard = TensorBoard(log_dir=log_dir, histogram_freq=0, profile_batch=30)", "the original shapefile for the train data to check site error \"\"\" #turn", "If n=4, function will run every 4 epochs y_true: instead of iterating through", "all the different taxonID variants should be the same, take the first scientific_dict", "== 0: return None y_pred = [] sites = [] #gather site and", "min_delta=0.1, min_lr=0.00001, verbose=1) callback_list.append(reduce_lr) #Get the true labels since they are not shuffled", "#Callbacks \"\"\"Create training callbacks\"\"\" import os import numpy as np import pandas as", "training callbacks\"\"\" import os import numpy as np import pandas as pd from", "submodel is None: #plot_images = ImageCallback(experiment, validation_data, label_names, submodel=submodel) #callback_list.append(plot_images) if log_dir is", "self.n == 0: return None y_pred = [] sites = [] #gather site", "pred_taxonID, images): figure = visualize.plot_prediction(image=image, prediction=prediction, label=label) self.experiment.log_figure(figure_name=\"{}_{}\".format(label, counter)) counter += 1 def", "pd.DataFrame({\"true\":np.argmax(self.y_true, 1),\"predicted\":np.argmax(y_pred, 1)}) #assign labels if self.label_names: results[\"true_taxonID\"] = results.true.apply(lambda x: self.label_names[x]) results[\"predicted_taxonID\"]", "of all the different taxonID variants should be the same, take the first", "= metrics.site_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, site_lists=site_lists) self.experiment.log_metric(name = \"Within_site confusion[training]\", value", "= submodel def on_train_end(self, epoch, logs={}): \"\"\"Plot sample images with labels annotated\"\"\" #fill", "site_lists=site_lists) self.experiment.log_metric(name = \"Within_site confusion[training]\", value = site_confusion) plot_lists = self.train_shp.groupby(\"taxonID\").plotID.unique() plot_confusion =", "shuffled y_true = [ ] for data, label in validation_data: if submodel in", "expand_dims class F1Callback(Callback): def __init__(self, experiment, eval_dataset, y_true, label_names, submodel, train_shp, n=10): \"\"\"F1", "= metrics.genus_confusion(y_true = results.true_taxonID, y_pred = results.predicted_taxonID, scientific_dict=scientific_dict) self.experiment.log_metric(name = \"Within Genus confusion\",", "y_pred[0] #F1 macro, micro = metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"MicroF1\", micro) self.experiment.log_metric(\"MacroF1\", macro) #Log number", "if submodel in [\"spatial\",\"spectral\"]: label = label[0] y_true.append(label) y_true = np.concatenate(y_true) if not", "not None: print(\"saving tensorboard logs at {}\".format(log_dir)) tensorboard = TensorBoard(log_dir=log_dir, histogram_freq=0, profile_batch=30) callback_list.append(tensorboard)", "= y_pred[0] #F1 macro, micro = metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"Final MicroF1\", micro) self.experiment.log_metric(\"Final MacroF1\",", "y_pred) self.experiment.log_metric(\"Final MicroF1\", micro) self.experiment.log_metric(\"Final MacroF1\", macro) #Log number of predictions to make", "a comet experiment object train_data: a tf data object to generate data validation_data:", "data train_shp: the original shapefile for the train data to check site error", "if self.submodel is \"metadata\": name = \"Metadata Confusion Matrix\" elif self.submodel in [\"ensemble\"]:", "#fill until there is atleast 20 images images = [] y_pred = []", "label=label) self.experiment.log_figure(figure_name=\"{}_{}\".format(label, counter)) counter += 1 def create(experiment, train_data, validation_data, train_shp, log_dir=None, label_names=None,", "\"\"\"Create training callbacks\"\"\" import os import numpy as np import pandas as pd", "callback_list = [] reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, min_delta=0.1, min_lr=0.00001, verbose=1) callback_list.append(reduce_lr) #Get", "= y_pred[0] #F1 macro, micro = metrics.f1_scores(self.y_true, y_pred) self.experiment.log_metric(\"MicroF1\", micro) self.experiment.log_metric(\"MacroF1\", macro) #Log", "= np.argmax(y_pred, axis=1) true_taxonID = [self.label_names[x] for x in y_true] pred_taxonID = [self.label_names[x]" ]
[ "# program template for mini-project 0 # Modify the print statement according to", "the print statement according to # the mini-project instructions #CodeSkulptor link: #http://www.codeskulptor.org/#user40_lXiJqEZDdrSdSu5.py print", "template for mini-project 0 # Modify the print statement according to # the", "Modify the print statement according to # the mini-project instructions #CodeSkulptor link: #http://www.codeskulptor.org/#user40_lXiJqEZDdrSdSu5.py", "statement according to # the mini-project instructions #CodeSkulptor link: #http://www.codeskulptor.org/#user40_lXiJqEZDdrSdSu5.py print \"We want...", "0 # Modify the print statement according to # the mini-project instructions #CodeSkulptor", "to # the mini-project instructions #CodeSkulptor link: #http://www.codeskulptor.org/#user40_lXiJqEZDdrSdSu5.py print \"We want... a shrubbery!\"", "program template for mini-project 0 # Modify the print statement according to #", "# Modify the print statement according to # the mini-project instructions #CodeSkulptor link:", "for mini-project 0 # Modify the print statement according to # the mini-project", "mini-project 0 # Modify the print statement according to # the mini-project instructions", "according to # the mini-project instructions #CodeSkulptor link: #http://www.codeskulptor.org/#user40_lXiJqEZDdrSdSu5.py print \"We want... a", "print statement according to # the mini-project instructions #CodeSkulptor link: #http://www.codeskulptor.org/#user40_lXiJqEZDdrSdSu5.py print \"We" ]
[ "import get_type_data class TestFormat(object): def test_get_type_data_with_format(self): results = get_type_data('troff') assert results['identifier'] == 'TROFF'", "assert results['description'] == 'The display text format type for the troff format.' def", "results['identifier'] == 'TROFF' assert results['domain'] == 'DisplayText Formats' assert results['display_name'] == 'troff Format", "assert results['display_name'] == 'troff Format Type' assert results['display_label'] == 'troff' assert results['description'] ==", "Format Type' assert results['display_label'] == 'troff' assert results['description'] == 'The display text format", "results['display_name'] == 'troff Format Type' assert results['display_label'] == 'troff' assert results['description'] == 'The", "import pytest from dlkit.abstract_osid.osid import errors from dlkit.primordium.locale.types.format import get_type_data class TestFormat(object): def", "results['display_label'] == 'troff' assert results['description'] == 'The display text format type for the", "from dlkit.primordium.locale.types.format import get_type_data class TestFormat(object): def test_get_type_data_with_format(self): results = get_type_data('troff') assert results['identifier']", "<reponame>UOC/dlkit<gh_stars>1-10 import pytest from dlkit.abstract_osid.osid import errors from dlkit.primordium.locale.types.format import get_type_data class TestFormat(object):", "== 'troff Format Type' assert results['display_label'] == 'troff' assert results['description'] == 'The display", "assert results['domain'] == 'DisplayText Formats' assert results['display_name'] == 'troff Format Type' assert results['display_label']", "test_get_type_data_with_format(self): results = get_type_data('troff') assert results['identifier'] == 'TROFF' assert results['domain'] == 'DisplayText Formats'", "get_type_data class TestFormat(object): def test_get_type_data_with_format(self): results = get_type_data('troff') assert results['identifier'] == 'TROFF' assert", "== 'DisplayText Formats' assert results['display_name'] == 'troff Format Type' assert results['display_label'] == 'troff'", "display text format type for the troff format.' def test_unknown_type(self): with pytest.raises(errors.NotFound): get_type_data('foo')", "dlkit.primordium.locale.types.format import get_type_data class TestFormat(object): def test_get_type_data_with_format(self): results = get_type_data('troff') assert results['identifier'] ==", "import errors from dlkit.primordium.locale.types.format import get_type_data class TestFormat(object): def test_get_type_data_with_format(self): results = get_type_data('troff')", "from dlkit.abstract_osid.osid import errors from dlkit.primordium.locale.types.format import get_type_data class TestFormat(object): def test_get_type_data_with_format(self): results", "results['domain'] == 'DisplayText Formats' assert results['display_name'] == 'troff Format Type' assert results['display_label'] ==", "'The display text format type for the troff format.' def test_unknown_type(self): with pytest.raises(errors.NotFound):", "get_type_data('troff') assert results['identifier'] == 'TROFF' assert results['domain'] == 'DisplayText Formats' assert results['display_name'] ==", "'troff Format Type' assert results['display_label'] == 'troff' assert results['description'] == 'The display text", "pytest from dlkit.abstract_osid.osid import errors from dlkit.primordium.locale.types.format import get_type_data class TestFormat(object): def test_get_type_data_with_format(self):", "def test_get_type_data_with_format(self): results = get_type_data('troff') assert results['identifier'] == 'TROFF' assert results['domain'] == 'DisplayText", "'TROFF' assert results['domain'] == 'DisplayText Formats' assert results['display_name'] == 'troff Format Type' assert", "'troff' assert results['description'] == 'The display text format type for the troff format.'", "assert results['display_label'] == 'troff' assert results['description'] == 'The display text format type for", "== 'The display text format type for the troff format.' def test_unknown_type(self): with", "errors from dlkit.primordium.locale.types.format import get_type_data class TestFormat(object): def test_get_type_data_with_format(self): results = get_type_data('troff') assert", "results = get_type_data('troff') assert results['identifier'] == 'TROFF' assert results['domain'] == 'DisplayText Formats' assert", "'DisplayText Formats' assert results['display_name'] == 'troff Format Type' assert results['display_label'] == 'troff' assert", "assert results['identifier'] == 'TROFF' assert results['domain'] == 'DisplayText Formats' assert results['display_name'] == 'troff", "= get_type_data('troff') assert results['identifier'] == 'TROFF' assert results['domain'] == 'DisplayText Formats' assert results['display_name']", "dlkit.abstract_osid.osid import errors from dlkit.primordium.locale.types.format import get_type_data class TestFormat(object): def test_get_type_data_with_format(self): results =", "== 'troff' assert results['description'] == 'The display text format type for the troff", "results['description'] == 'The display text format type for the troff format.' def test_unknown_type(self):", "TestFormat(object): def test_get_type_data_with_format(self): results = get_type_data('troff') assert results['identifier'] == 'TROFF' assert results['domain'] ==", "class TestFormat(object): def test_get_type_data_with_format(self): results = get_type_data('troff') assert results['identifier'] == 'TROFF' assert results['domain']", "Formats' assert results['display_name'] == 'troff Format Type' assert results['display_label'] == 'troff' assert results['description']", "Type' assert results['display_label'] == 'troff' assert results['description'] == 'The display text format type", "== 'TROFF' assert results['domain'] == 'DisplayText Formats' assert results['display_name'] == 'troff Format Type'" ]
[ "else: fields[\"payment_account\"] = fields[\"payment_account\"].replace(\" \", \"\") # check amount try: orig = fields[\"amount\"]", "\"message\": errmsg}]})\\ , 400 if transfer_amount > 0 and \"top up\" in fields[\"direction\"]:", "str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def target_balance_external(): \"\"\" Execute a target balance external action \"\"\" data =", "paymentmsg = { \"amount\": { \"value\": \"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\": { \"type\":", "ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 print(paymentmsg) # get", "\"value\": fields[\"account\"], \"name\": \"x\" }, \"description\": fields[\"description\"] } account = fields[\"other_account\"] elif transfer_amount", "= bunq.post(\"v1/user/{}/monetary-account/{}/payment\" .format(config[\"user_id\"], accid), paymentmsg) else: paymentmsg = {\"number_of_required_accepts\": 1, \"entries\": [paymentmsg]} result", "in data: errmsg = \"missing actionFields\" print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\",", "ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # construct payment", "execute the payment if fields[\"payment_type\"] == \"DIRECT\": result = bunq.post(\"v1/user/{}/monetary-account/{}/payment\" .format(config[\"user_id\"], accid), paymentmsg)", "bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid), paymentmsg) print(result) if \"Error\" in result: return json.dumps({\"errors\": [{ \"status\":", "internal action \"\"\" data = request.get_json() print(\"[target_balance_internal] input: {}\".format(json.dumps(data))) if \"actionFields\" not in", "\"description\": fields[\"description\"] } account = fields[\"account\"] else: errmsg = \"No transfer needed, balance", "= \"PHONE_NUMBER\" elif bmvalue[:2].isalpha() and bmvalue[2:4].isdecimal(): bmtype = \"IBAN\" else: errmsg = \"Unrecognized", "\"value\": \"{:.2f}\".format(-transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"payment_account\"], \"name\": fields[\"payment_name\"]", "\"NL42BUNQ0123456789\": return json.dumps({\"data\": [{\"id\": uuid.uuid4().hex}]}) # retrieve balance config = bunq.retrieve_config() if fields[\"payment_type\"]", "isinstance(balance, str): errmsg = balance print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\":", "not in balances: return \"Account balance not found \"+account return \"Account balance not", "target balance external action \"\"\" data = request.get_json() print(\"[target_balance_external] input: {}\".format(json.dumps(data))) if \"actionFields\"", "errmsg}]})\\ , 400 print(result) if \"Error\" in result: return json.dumps({\"errors\": [{ \"status\": \"SKIP\",", "fields[\"direction\"]: bmvalue = fields[\"request_phone_email_iban\"].replace(\" \", \"\") if \"@\" in bmvalue: bmtype = \"EMAIL\"", "in balances: return balances[account] if account in balances and account2 in balances: return", "ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # get account", "config[\"permissions\"]: if \"PaymentRequest\" in config[\"permissions\"]\\ [fields[\"account\"]]: enabled = config[\"permissions\"][fields[\"account\"]]\\ [\"PaymentRequest\"] else: accid, enabled", "\"value\": \"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\", }, \"counterparty_alias\": { \"type\": bmtype, \"name\": bmvalue, \"value\": bmvalue", "fields[\"amount\"] = -1 if fields[\"amount\"] <= 0: return \"only positive amounts allowed: \"+orig", "\"IBAN\", \"value\": fields[\"payment_account\"], \"name\": fields[\"payment_name\"] }, \"description\": fields[\"payment_description\"] } print(paymentmsg) paymentmsg = {\"number_of_required_accepts\":", "str): errmsg = balance print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\", "account2 in balances: return balances[account], balances[account2] if account not in balances: return \"Account", "internal/external actions \"\"\" import json import uuid from flask import request import bunq", "\"No transfer needed, balance already ok\" print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\",", "bmvalue[:2].isalpha() and bmvalue[2:4].isdecimal(): bmtype = \"IBAN\" else: errmsg = \"Unrecognized as email, phone", "accounts \"\"\" balances = bunq.retrieve_account_balances(config) if account2 is None and account in balances:", "\"missing actionFields\" print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400", "print(paymentmsg) # get id and check permissions if fields[\"payment_type\"] == \"DIRECT\": accid, enabled", "\"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"other_account\"], \"name\": \"x\" }, \"description\": fields[\"description\"] } account", "\"status\": \"SKIP\", \"message\": result[\"Error\"][0][\"error_description\"] }]}), 400 return json.dumps({\"data\": [{ \"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def target_balance_external():", "in config: if fields[\"account\"] in config[\"permissions\"]: if \"PaymentRequest\" in config[\"permissions\"]\\ [fields[\"account\"]]: enabled =", "{ \"amount_inquired\": { \"value\": \"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\", }, \"counterparty_alias\": { \"type\": bmtype, \"name\":", "= bunq.retrieve_account_balances(config) if account2 is None and account in balances: return balances[account] if", "not in data: errmsg = \"missing actionFields\" print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\":", "== \"NL42BUNQ0123456789\": return json.dumps({\"data\": [{\"id\": uuid.uuid4().hex}]}) # retrieve balance config = bunq.retrieve_config() if", "balance, balance2 = balance transfer_amount = fields[\"amount\"] - balance if transfer_amount > balance2:", "data: errmsg = \"missing actionFields\" print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\":", "is None: errmsg = \"unknown account: \"+account if not enabled: errmsg = \"Payment", "in balances: return \"Account balance not found \"+account return \"Account balance not found", "import request import bunq import payment def target_balance_internal(): \"\"\" Execute a target balance", "uuid from flask import request import bunq import payment def target_balance_internal(): \"\"\" Execute", "errmsg}]})\\ , 400 # construct payment message if \"{:.2f}\".format(fields[\"amount\"]) == \"0.00\": errmsg =", "bunq import payment def target_balance_internal(): \"\"\" Execute a target balance internal action \"\"\"", "\"status\": \"SKIP\", \"message\": result[\"Error\"][0][\"error_description\"] }]}), 400 return json.dumps({\"data\": [{ \"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def check_fields(internal,", "- balance if transfer_amount > balance2: transfer_amount = balance2 else: balance = get_balance(config,", "fields[\"payment_name\"] }, \"description\": fields[\"payment_description\"] } print(paymentmsg) paymentmsg = {\"number_of_required_accepts\": 1, \"entries\": [paymentmsg]} result", "print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 print(paymentmsg) #", "ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # execute the", "for test payments if fields[\"account\"] == \"NL42BUNQ0123456789\": return json.dumps({\"data\": [{\"id\": uuid.uuid4().hex}]}) # retrieve", "a target balance external action \"\"\" data = request.get_json() print(\"[target_balance_external] input: {}\".format(json.dumps(data))) if", "errmsg = check_fields(False, fields) if errmsg: print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\",", "paymentmsg) else: paymentmsg = {\"number_of_required_accepts\": 1, \"entries\": [paymentmsg]} result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid),", "400 # get account id and check permission if transfer_amount > 0: accid", "\"skim\" in fields[\"direction\"]: paymentmsg = { \"amount\": { \"value\": \"{:.2f}\".format(-transfer_amount), \"currency\": \"EUR\" },", "balance already ok\" print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ ,", "False if \"permissions\" in config: if fields[\"account\"] in config[\"permissions\"]: if \"PaymentRequest\" in config[\"permissions\"]\\", "result = bunq.post(\"v1/user/{}/monetary-account/{}/request-inquiry\"\\ .format(config[\"user_id\"], accid), msg, config) elif transfer_amount < 0 and \"skim\"", "json.dumps({\"data\": [{ \"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def target_balance_external(): \"\"\" Execute a target balance external action", "orig = fields[\"amount\"] fields[\"amount\"] = float(fields[\"amount\"]) except ValueError: fields[\"amount\"] = -1 if fields[\"amount\"]", "check for zero transfer if \"{:.2f}\".format(fields[\"amount\"]) == \"0.00\": errmsg = \"No transfer needed,", "\"currency\": \"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"account\"], \"name\": \"x\" }, \"description\":", "payment.check_source_account(False, True, config, fields[\"account\"]) if accid is None: errmsg = \"unknown account: \"+fields[\"account\"]", "transfer_amount < 0 and \"skim\" in fields[\"direction\"]: paymentmsg = { \"amount\": { \"value\":", "email, phone or iban: \"+bmvalue print(\"[request_inquiry] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\":\\", "balance internal action \"\"\" data = request.get_json() print(\"[target_balance_internal] input: {}\".format(json.dumps(data))) if \"actionFields\" not", "\"SKIP\", \"message\":\\ errmsg}]}), 400 msg = { \"amount_inquired\": { \"value\": \"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\",", "return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 transfer_amount = fields[\"amount\"] - balance", ", 400 transfer_amount = fields[\"amount\"] - balance # check for zero transfer if", "\"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\", }, \"counterparty_alias\": { \"type\": bmtype, \"name\": bmvalue, \"value\": bmvalue },", "\"\") if \"@\" in bmvalue: bmtype = \"EMAIL\" elif bmvalue[:1] == \"+\" and", "400 print(result) if \"Error\" in result: return json.dumps({\"errors\": [{ \"status\": \"SKIP\", \"message\": result[\"Error\"][0][\"error_description\"]", "balance internal/external actions \"\"\" import json import uuid from flask import request import", "{ \"value\": \"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\", }, \"counterparty_alias\": { \"type\": bmtype, \"name\": bmvalue, \"value\":", "\"SKIP\", \"message\": errmsg}]})\\ , 400 print(paymentmsg) # get id and check permissions if", "\"IBAN\", \"value\": fields[\"account\"], \"name\": \"x\" }, \"description\": fields[\"description\"] } account = fields[\"other_account\"] elif", "[{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # get account id and check permission", "data: errmsg = \"missing actionFields\" print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\":", "\"missing actionFields\" print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400", "> balance2: transfer_amount = balance2 else: balance = get_balance(config, fields[\"account\"]) if isinstance(balance, float):", "= bunq.retrieve_config() if fields[\"payment_type\"] == \"DIRECT\": balance = get_balance(config, fields[\"account\"], fields[\"other_account\"]) if isinstance(balance,", "balance if isinstance(balance, str): errmsg = balance print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\":", "balances[account], balances[account2] if account not in balances: return \"Account balance not found \"+account", "msg = { \"amount_inquired\": { \"value\": \"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\", }, \"counterparty_alias\": { \"type\":", "\"direction\", \"payment_type\", \"description\"] else: expected_fields = [\"account\", \"amount\", \"direction\", \"payment_account\", \"payment_name\", \"payment_description\", \"request_phone_email_iban\",", "\"unknown account: \"+account if not enabled: errmsg = \"Payment type not enabled for", "\"direction\", \"payment_account\", \"payment_name\", \"payment_description\", \"request_phone_email_iban\", \"request_description\"] for field in expected_fields: if field not", "send request / execute payment if transfer_amount > 0 and \"top up\" in", "1, \"entries\": [paymentmsg]} result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid), paymentmsg) else: errmsg = \"No", "= fields[\"payment_account\"].replace(\" \", \"\") # check amount try: orig = fields[\"amount\"] fields[\"amount\"] =", "Execute a target balance external action \"\"\" data = request.get_json() print(\"[target_balance_external] input: {}\".format(json.dumps(data)))", "in config[\"accounts\"]: if acc[\"iban\"] == fields[\"account\"]: accid = acc[\"id\"] enabled = False if", "if errmsg: print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400", "accid = acc[\"id\"] enabled = False if \"permissions\" in config: if fields[\"account\"] in", "fields = data[\"actionFields\"] errmsg = check_fields(True, fields) if errmsg: print(\"[target_balance_internal] ERROR: \"+errmsg) return", "data[\"actionFields\"] errmsg = check_fields(False, fields) if errmsg: print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\":", "except ValueError: fields[\"amount\"] = -1 if fields[\"amount\"] <= 0: return \"only positive amounts", "errmsg}]})\\ , 400 fields = data[\"actionFields\"] errmsg = check_fields(True, fields) if errmsg: print(\"[target_balance_internal]", "isinstance(balance, float): transfer_amount = fields[\"amount\"] - balance if isinstance(balance, str): errmsg = balance", "ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 if transfer_amount >", "== \"0.00\": errmsg = \"No transfer needed, balance already ok\" print(\"[target_balance_external] ERROR: \"+errmsg)", "in fields[\"direction\"]: paymentmsg = { \"amount\": { \"value\": \"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\":", "fields[\"description\"] } account = fields[\"account\"] else: errmsg = \"No transfer needed, balance already", "True, config, account) if accid is None: errmsg = \"unknown account: \"+account if", "== \"0.00\": errmsg = \"No transfer needed, balance already ok\" print(\"[target_balance_internal] ERROR: \"+errmsg)", "balance transfer_amount = fields[\"amount\"] - balance if transfer_amount > balance2: transfer_amount = balance2", "json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 print(result) if \"Error\" in result: return", "print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 fields =", "if transfer_amount > 0: accid = None for acc in config[\"accounts\"]: if acc[\"iban\"]", "= [\"account\", \"amount\", \"other_account\", \"direction\", \"payment_type\", \"description\"] else: expected_fields = [\"account\", \"amount\", \"direction\",", "\"actionFields\" not in data: errmsg = \"missing actionFields\" print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\":", "[{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 transfer_amount = fields[\"amount\"] - balance # check", "execute payment if transfer_amount > 0 and \"top up\" in fields[\"direction\"]: bmvalue =", "account, account2=None): \"\"\" Retrieve the balance of one or two accounts \"\"\" balances", "in config[\"permissions\"]: if \"PaymentRequest\" in config[\"permissions\"]\\ [fields[\"account\"]]: enabled = config[\"permissions\"][fields[\"account\"]]\\ [\"PaymentRequest\"] else: accid,", "True, } print(json.dumps(msg)) config = bunq.retrieve_config() result = bunq.post(\"v1/user/{}/monetary-account/{}/request-inquiry\"\\ .format(config[\"user_id\"], accid), msg, config)", "def check_fields(internal, fields): \"\"\" Check the fields \"\"\" # check expected fields if", "config, account) if accid is None: errmsg = \"unknown account: \"+account if not", "ok\" print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 print(result)", "else: errmsg = \"Unrecognized as email, phone or iban: \"+bmvalue print(\"[request_inquiry] ERROR: \"+errmsg)", "errmsg}]})\\ , 400 # get account id and check permission if transfer_amount >", "id and check permissions if fields[\"payment_type\"] == \"DIRECT\": accid, enabled = payment.check_source_account(True, False,", "up\" in fields[\"direction\"]: paymentmsg = { \"amount\": { \"value\": \"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\" },", "\"amount\", \"other_account\", \"direction\", \"payment_type\", \"description\"] else: expected_fields = [\"account\", \"amount\", \"direction\", \"payment_account\", \"payment_name\",", "accid, enabled = payment.check_source_account(False, True, config, fields[\"account\"]) if accid is None: errmsg =", "get_balance(config, account, account2=None): \"\"\" Retrieve the balance of one or two accounts \"\"\"", "return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # execute the payment if", "errmsg = \"unknown account: \"+fields[\"account\"] if not enabled: errmsg = \"Not permitted for", "= -1 if fields[\"amount\"] <= 0: return \"only positive amounts allowed: \"+orig return", "paymentmsg = {\"number_of_required_accepts\": 1, \"entries\": [paymentmsg]} result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid), paymentmsg) print(result)", "[{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # execute the payment if fields[\"payment_type\"] ==", "the target balance internal/external actions \"\"\" import json import uuid from flask import", "= fields[\"amount\"] - balance # check for zero transfer if \"{:.2f}\".format(fields[\"amount\"]) == \"0.00\":", "errmsg = \"unknown account: \"+account if not enabled: errmsg = \"Payment type not", "allowed: \"+orig return None def get_balance(config, account, account2=None): \"\"\" Retrieve the balance of", "[{\"id\": uuid.uuid4().hex}]}) # retrieve balance config = bunq.retrieve_config() if fields[\"payment_type\"] == \"DIRECT\": balance", "in bmvalue: bmtype = \"EMAIL\" elif bmvalue[:1] == \"+\" and bmvalue[1:].isdecimal(): bmtype =", "balance # check for zero transfer if \"{:.2f}\".format(fields[\"amount\"]) == \"0.00\": errmsg = \"No", "json.dumps({\"data\": [{\"id\": uuid.uuid4().hex}]}) # retrieve balance config = bunq.retrieve_config() balance = get_balance(config, fields[\"account\"])", "= acc[\"id\"] enabled = False if \"permissions\" in config: if fields[\"account\"] in config[\"permissions\"]:", "json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 fields = data[\"actionFields\"] errmsg = check_fields(False,", "ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 fields = data[\"actionFields\"]", "config = bunq.retrieve_config() if fields[\"payment_type\"] == \"DIRECT\": balance = get_balance(config, fields[\"account\"], fields[\"other_account\"]) if", "\"SKIP\", \"message\": errmsg}]})\\ , 400 # construct payment message if \"{:.2f}\".format(fields[\"amount\"]) == \"0.00\":", "acc in config[\"accounts\"]: if acc[\"iban\"] == fields[\"account\"]: accid = acc[\"id\"] enabled = False", "account = fields[\"other_account\"] elif transfer_amount < 0 and \"skim\" in fields[\"direction\"]: paymentmsg =", "= fields[\"other_account\"] elif transfer_amount < 0 and \"skim\" in fields[\"direction\"]: paymentmsg = {", "field in expected_fields: if field not in fields: return \"missing field: \"+field #", "input: {}\".format(json.dumps(data))) if \"actionFields\" not in data: errmsg = \"missing actionFields\" print(\"[target_balance_internal] ERROR:", "input: {}\".format(json.dumps(data))) if \"actionFields\" not in data: errmsg = \"missing actionFields\" print(\"[target_balance_external] ERROR:", "= payment.check_source_account(True, False, config, account) else: accid, enabled = payment.check_source_account(False, True, config, account)", "\"description\": fields[\"payment_description\"] } print(paymentmsg) paymentmsg = {\"number_of_required_accepts\": 1, \"entries\": [paymentmsg]} result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\"", "fields[\"account\"] in config[\"permissions\"]: if \"PaymentRequest\" in config[\"permissions\"]\\ [fields[\"account\"]]: enabled = config[\"permissions\"][fields[\"account\"]]\\ [\"PaymentRequest\"] else:", "transfer_amount = fields[\"amount\"] - balance # check for zero transfer if \"{:.2f}\".format(fields[\"amount\"]) ==", "errmsg = balance print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ ,", "errmsg: print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 #", "= [\"account\", \"amount\", \"direction\", \"payment_account\", \"payment_name\", \"payment_description\", \"request_phone_email_iban\", \"request_description\"] for field in expected_fields:", "import json import uuid from flask import request import bunq import payment def", "\"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 transfer_amount = fields[\"amount\"] -", "and check permission if transfer_amount > 0: accid = None for acc in", "not in fields: return \"missing field: \"+field # strip spaces from account numbers", "[{ \"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def target_balance_external(): \"\"\" Execute a target balance external action \"\"\"", "\"message\": errmsg}]})\\ , 400 print(paymentmsg) # get id and check permissions if fields[\"payment_type\"]", "float): transfer_amount = fields[\"amount\"] - balance if isinstance(balance, str): errmsg = balance print(\"[target_balance_internal]", "check permission if transfer_amount > 0: accid = None for acc in config[\"accounts\"]:", "json.dumps({\"errors\": [{ \"status\": \"SKIP\", \"message\": result[\"Error\"][0][\"error_description\"] }]}), 400 return json.dumps({\"data\": [{ \"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]})", "<reponame>woudt/bunq2ifttt \"\"\" Target balance Handles the target balance internal/external actions \"\"\" import json", "print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # the", "\"SKIP\", \"message\": errmsg}]})\\ , 400 if transfer_amount > 0 and \"top up\" in", "\"\"\" import json import uuid from flask import request import bunq import payment", "{\"number_of_required_accepts\": 1, \"entries\": [paymentmsg]} result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid), paymentmsg) else: errmsg =", "errmsg}]}), 400 msg = { \"amount_inquired\": { \"value\": \"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\", }, \"counterparty_alias\":", "== fields[\"account\"]: accid = acc[\"id\"] enabled = False if \"permissions\" in config: if", "400 if transfer_amount > 0 and \"top up\" in fields[\"direction\"]: paymentmsg = {", "json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # construct payment message if \"{:.2f}\".format(fields[\"amount\"])", "config[\"accounts\"]: if acc[\"iban\"] == fields[\"account\"]: accid = acc[\"id\"] enabled = False if \"permissions\"", "import uuid from flask import request import bunq import payment def target_balance_internal(): \"\"\"", "ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # the account", "== \"NL42BUNQ0123456789\": return json.dumps({\"data\": [{\"id\": uuid.uuid4().hex}]}) # retrieve balance config = bunq.retrieve_config() balance", "= get_balance(config, fields[\"account\"]) if isinstance(balance, str): errmsg = balance print(\"[target_balance_external] ERROR: \"+errmsg) return", "acc[\"iban\"] == fields[\"account\"]: accid = acc[\"id\"] enabled = False if \"permissions\" in config:", "zero transfer if \"{:.2f}\".format(fields[\"amount\"]) == \"0.00\": errmsg = \"No transfer needed, balance already", "fields) if errmsg: print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ ,", "= check_fields(True, fields) if errmsg: print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\":", "retrieve balance config = bunq.retrieve_config() if fields[\"payment_type\"] == \"DIRECT\": balance = get_balance(config, fields[\"account\"],", ", 400 # construct payment message if \"{:.2f}\".format(fields[\"amount\"]) == \"0.00\": errmsg = \"No", "not enabled: errmsg = \"Not permitted for account: \"+fields[\"account\"] if errmsg: print(\"[target_balance_external] ERROR:", "spaces from account numbers fields[\"account\"] = fields[\"account\"].replace(\" \", \"\") if internal: fields[\"other_account\"] =", "400 return json.dumps({\"data\": [{ \"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def target_balance_external(): \"\"\" Execute a target balance", "[{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 print(paymentmsg) # get id and check permissions", "if \"Error\" in result: return json.dumps({\"errors\": [{ \"status\": \"SKIP\", \"message\": result[\"Error\"][0][\"error_description\"] }]}), 400", "fields[\"amount\"] - balance # check for zero transfer if \"{:.2f}\".format(fields[\"amount\"]) == \"0.00\": errmsg", "paymentmsg) else: errmsg = \"No transfer needed, balance already ok\" print(\"[target_balance_external] ERROR: \"+errmsg)", "action \"\"\" data = request.get_json() print(\"[target_balance_external] input: {}\".format(json.dumps(data))) if \"actionFields\" not in data:", "if internal: fields[\"other_account\"] = fields[\"other_account\"].replace(\" \", \"\") else: fields[\"payment_account\"] = fields[\"payment_account\"].replace(\" \", \"\")", "\"{:.2f}\".format(fields[\"amount\"]) == \"0.00\": errmsg = \"No transfer needed, balance already ok\" print(\"[target_balance_external] ERROR:", "paymentmsg = { \"amount\": { \"value\": \"{:.2f}\".format(-transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\": { \"type\":", "balances: return \"Account balance not found \"+account return \"Account balance not found \"+account2", "\"+bmvalue print(\"[request_inquiry] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\":\\ errmsg}]}), 400 msg =", "\"request_phone_email_iban\", \"request_description\"] for field in expected_fields: if field not in fields: return \"missing", "check expected fields if internal: expected_fields = [\"account\", \"amount\", \"other_account\", \"direction\", \"payment_type\", \"description\"]", "\"name\": bmvalue, \"value\": bmvalue }, \"description\": fields[\"request_description\"], \"allow_bunqme\": True, } print(json.dumps(msg)) config =", "already ok\" print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400", "if account in balances and account2 in balances: return balances[account], balances[account2] if account", "balances: return balances[account], balances[account2] if account not in balances: return \"Account balance not", "payment message if \"{:.2f}\".format(fields[\"amount\"]) == \"0.00\": errmsg = \"No transfer needed, balance already", "\"permissions\" in config: if fields[\"account\"] in config[\"permissions\"]: if \"PaymentRequest\" in config[\"permissions\"]\\ [fields[\"account\"]]: enabled", "result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid), paymentmsg) else: errmsg = \"No transfer needed, balance", "\"SKIP\", \"message\": result[\"Error\"][0][\"error_description\"] }]}), 400 return json.dumps({\"data\": [{ \"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def check_fields(internal, fields):", "\"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"payment_account\"], \"name\": fields[\"payment_name\"] }, \"description\": fields[\"payment_description\"] } print(paymentmsg)", "\"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # construct payment message", "balances = bunq.retrieve_account_balances(config) if account2 is None and account in balances: return balances[account]", "print(json.dumps(msg)) config = bunq.retrieve_config() result = bunq.post(\"v1/user/{}/monetary-account/{}/request-inquiry\"\\ .format(config[\"user_id\"], accid), msg, config) elif transfer_amount", "> 0 and \"top up\" in fields[\"direction\"]: bmvalue = fields[\"request_phone_email_iban\"].replace(\" \", \"\") if", "balance2 = balance transfer_amount = fields[\"amount\"] - balance if transfer_amount > balance2: transfer_amount", "json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 print(paymentmsg) # get id and check", "if transfer_amount > 0 and \"top up\" in fields[\"direction\"]: bmvalue = fields[\"request_phone_email_iban\"].replace(\" \",", ".format(config[\"user_id\"], accid), paymentmsg) else: errmsg = \"No transfer needed, balance already ok\" print(\"[target_balance_external]", "errmsg = balance print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ ,", "\"\") else: fields[\"payment_account\"] = fields[\"payment_account\"].replace(\" \", \"\") # check amount try: orig =", "ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 transfer_amount = fields[\"amount\"]", "needed, balance already ok\" print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\", "json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 transfer_amount = fields[\"amount\"] - balance #", "= bunq.retrieve_config() balance = get_balance(config, fields[\"account\"]) if isinstance(balance, str): errmsg = balance print(\"[target_balance_external]", "}, \"description\": fields[\"request_description\"], \"allow_bunqme\": True, } print(json.dumps(msg)) config = bunq.retrieve_config() result = bunq.post(\"v1/user/{}/monetary-account/{}/request-inquiry\"\\", "\"request_description\"] for field in expected_fields: if field not in fields: return \"missing field:", "0 and \"top up\" in fields[\"direction\"]: paymentmsg = { \"amount\": { \"value\": \"{:.2f}\".format(transfer_amount),", "if isinstance(balance, str): errmsg = balance print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\",", "accid), paymentmsg) else: errmsg = \"No transfer needed, balance already ok\" print(\"[target_balance_external] ERROR:", "fields[\"amount\"] fields[\"amount\"] = float(fields[\"amount\"]) except ValueError: fields[\"amount\"] = -1 if fields[\"amount\"] <= 0:", "tuple): balance, balance2 = balance transfer_amount = fields[\"amount\"] - balance if transfer_amount >", "}]}), 400 return json.dumps({\"data\": [{ \"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def target_balance_external(): \"\"\" Execute a target", "400 return json.dumps({\"data\": [{ \"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def check_fields(internal, fields): \"\"\" Check the fields", "} print(json.dumps(msg)) config = bunq.retrieve_config() result = bunq.post(\"v1/user/{}/monetary-account/{}/request-inquiry\"\\ .format(config[\"user_id\"], accid), msg, config) elif", "print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 print(result) if", "\"description\": fields[\"request_description\"], \"allow_bunqme\": True, } print(json.dumps(msg)) config = bunq.retrieve_config() result = bunq.post(\"v1/user/{}/monetary-account/{}/request-inquiry\"\\ .format(config[\"user_id\"],", "errmsg = \"Payment type not enabled for account: \"+account if errmsg: print(\"[target_balance_internal] ERROR:", "ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 print(result) if \"Error\"", "json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # execute the payment if fields[\"payment_type\"]", "\"currency\": \"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"payment_account\"], \"name\": fields[\"payment_name\"] }, \"description\":", "\"\") if internal: fields[\"other_account\"] = fields[\"other_account\"].replace(\" \", \"\") else: fields[\"payment_account\"] = fields[\"payment_account\"].replace(\" \",", "fields if internal: expected_fields = [\"account\", \"amount\", \"other_account\", \"direction\", \"payment_type\", \"description\"] else: expected_fields", "errmsg}]})\\ , 400 # the account NL42BUNQ0123456789 is used for test payments if", "= check_fields(False, fields) if errmsg: print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\":", "else: errmsg = \"No transfer needed, balance already ok\" print(\"[target_balance_internal] ERROR: \"+errmsg) return", "permissions if fields[\"payment_type\"] == \"DIRECT\": accid, enabled = payment.check_source_account(True, False, config, account) else:", "fields[\"other_account\"]) if isinstance(balance, tuple): balance, balance2 = balance transfer_amount = fields[\"amount\"] - balance", "= bunq.post(\"v1/user/{}/monetary-account/{}/request-inquiry\"\\ .format(config[\"user_id\"], accid), msg, config) elif transfer_amount < 0 and \"skim\" in", "None: errmsg = \"unknown account: \"+account if not enabled: errmsg = \"Payment type", "check amount try: orig = fields[\"amount\"] fields[\"amount\"] = float(fields[\"amount\"]) except ValueError: fields[\"amount\"] =", "{}\".format(json.dumps(data))) if \"actionFields\" not in data: errmsg = \"missing actionFields\" print(\"[target_balance_external] ERROR: \"+errmsg)", "\"SKIP\", \"message\": errmsg}]})\\ , 400 # the account NL42BUNQ0123456789 is used for test", "construct payment message if \"{:.2f}\".format(fields[\"amount\"]) == \"0.00\": errmsg = \"No transfer needed, balance", "= data[\"actionFields\"] errmsg = check_fields(True, fields) if errmsg: print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\":", "\"\"\" Execute a target balance external action \"\"\" data = request.get_json() print(\"[target_balance_external] input:", "account id and check permission if transfer_amount > 0: accid = None for", "bmvalue = fields[\"request_phone_email_iban\"].replace(\" \", \"\") if \"@\" in bmvalue: bmtype = \"EMAIL\" elif", "bunq.post(\"v1/user/{}/monetary-account/{}/request-inquiry\"\\ .format(config[\"user_id\"], accid), msg, config) elif transfer_amount < 0 and \"skim\" in fields[\"direction\"]:", "= balance print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400", "import bunq import payment def target_balance_internal(): \"\"\" Execute a target balance internal action", "actionFields\" print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 fields", "= balance transfer_amount = fields[\"amount\"] - balance if transfer_amount > balance2: transfer_amount =", "\"message\": errmsg}]})\\ , 400 transfer_amount = fields[\"amount\"] - balance # check for zero", "0 and \"top up\" in fields[\"direction\"]: bmvalue = fields[\"request_phone_email_iban\"].replace(\" \", \"\") if \"@\"", "None and account in balances: return balances[account] if account in balances and account2", "not in data: errmsg = \"missing actionFields\" print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\":", "errmsg}]})\\ , 400 if transfer_amount > 0 and \"top up\" in fields[\"direction\"]: paymentmsg", "\"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 fields = data[\"actionFields\"] errmsg", "fields[\"account\"]) if isinstance(balance, float): transfer_amount = fields[\"amount\"] - balance if isinstance(balance, str): errmsg", "json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\":\\ errmsg}]}), 400 msg = { \"amount_inquired\": { \"value\": \"{:.2f}\".format(transfer_amount),", "fields[\"payment_account\"].replace(\" \", \"\") # check amount try: orig = fields[\"amount\"] fields[\"amount\"] = float(fields[\"amount\"])", "numbers fields[\"account\"] = fields[\"account\"].replace(\" \", \"\") if internal: fields[\"other_account\"] = fields[\"other_account\"].replace(\" \", \"\")", "ValueError: fields[\"amount\"] = -1 if fields[\"amount\"] <= 0: return \"only positive amounts allowed:", "\"Payment type not enabled for account: \"+account if errmsg: print(\"[target_balance_internal] ERROR: \"+errmsg) return", "external action \"\"\" data = request.get_json() print(\"[target_balance_external] input: {}\".format(json.dumps(data))) if \"actionFields\" not in", "fields[\"account\"].replace(\" \", \"\") if internal: fields[\"other_account\"] = fields[\"other_account\"].replace(\" \", \"\") else: fields[\"payment_account\"] =", "message if \"{:.2f}\".format(fields[\"amount\"]) == \"0.00\": errmsg = \"No transfer needed, balance already ok\"", "if isinstance(balance, float): transfer_amount = fields[\"amount\"] - balance if isinstance(balance, str): errmsg =", "transfer_amount = balance2 else: balance = get_balance(config, fields[\"account\"]) if isinstance(balance, float): transfer_amount =", "\"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"account\"], \"name\": \"x\" }, \"description\": fields[\"description\"]", "}, \"description\": fields[\"description\"] } account = fields[\"other_account\"] elif transfer_amount < 0 and \"skim\"", "bmvalue[2:4].isdecimal(): bmtype = \"IBAN\" else: errmsg = \"Unrecognized as email, phone or iban:", "\"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"account\"], \"name\": \"x\" }, \"description\": fields[\"description\"] } account", ".format(config[\"user_id\"], accid), paymentmsg) else: paymentmsg = {\"number_of_required_accepts\": 1, \"entries\": [paymentmsg]} result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\"", "balance print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 #", "fields[\"payment_type\"] == \"DIRECT\": balance = get_balance(config, fields[\"account\"], fields[\"other_account\"]) if isinstance(balance, tuple): balance, balance2", "== \"DIRECT\": result = bunq.post(\"v1/user/{}/monetary-account/{}/payment\" .format(config[\"user_id\"], accid), paymentmsg) else: paymentmsg = {\"number_of_required_accepts\": 1,", "\"x\" }, \"description\": fields[\"description\"] } account = fields[\"account\"] else: errmsg = \"No transfer", "the fields \"\"\" # check expected fields if internal: expected_fields = [\"account\", \"amount\",", "str): errmsg = balance print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\", "\"\"\" data = request.get_json() print(\"[target_balance_external] input: {}\".format(json.dumps(data))) if \"actionFields\" not in data: errmsg", "for zero transfer if \"{:.2f}\".format(fields[\"amount\"]) == \"0.00\": errmsg = \"No transfer needed, balance", "\"type\": \"IBAN\", \"value\": fields[\"other_account\"], \"name\": \"x\" }, \"description\": fields[\"description\"] } account = fields[\"account\"]", "fields[\"description\"] } account = fields[\"other_account\"] elif transfer_amount < 0 and \"skim\" in fields[\"direction\"]:", ".format(config[\"user_id\"], accid), paymentmsg) print(result) if \"Error\" in result: return json.dumps({\"errors\": [{ \"status\": \"SKIP\",", "account = fields[\"account\"] else: errmsg = \"No transfer needed, balance already ok\" print(\"[target_balance_internal]", "result[\"Error\"][0][\"error_description\"] }]}), 400 return json.dumps({\"data\": [{ \"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def target_balance_external(): \"\"\" Execute a", "in expected_fields: if field not in fields: return \"missing field: \"+field # strip", "get_balance(config, fields[\"account\"], fields[\"other_account\"]) if isinstance(balance, tuple): balance, balance2 = balance transfer_amount = fields[\"amount\"]", "retrieve balance config = bunq.retrieve_config() balance = get_balance(config, fields[\"account\"]) if isinstance(balance, str): errmsg", "\"\"\" Check the fields \"\"\" # check expected fields if internal: expected_fields =", "/ execute payment if transfer_amount > 0 and \"top up\" in fields[\"direction\"]: bmvalue", "\"payment_type\", \"description\"] else: expected_fields = [\"account\", \"amount\", \"direction\", \"payment_account\", \"payment_name\", \"payment_description\", \"request_phone_email_iban\", \"request_description\"]", "get_balance(config, fields[\"account\"]) if isinstance(balance, str): errmsg = balance print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\":", "= fields[\"other_account\"].replace(\" \", \"\") else: fields[\"payment_account\"] = fields[\"payment_account\"].replace(\" \", \"\") # check amount", "fields \"\"\" # check expected fields if internal: expected_fields = [\"account\", \"amount\", \"other_account\",", "balance if transfer_amount > balance2: transfer_amount = balance2 else: balance = get_balance(config, fields[\"account\"])", "if fields[\"payment_type\"] == \"DIRECT\": accid, enabled = payment.check_source_account(True, False, config, account) else: accid,", "enabled = payment.check_source_account(True, False, config, account) else: accid, enabled = payment.check_source_account(False, True, config,", "\"counterparty_alias\": { \"type\": bmtype, \"name\": bmvalue, \"value\": bmvalue }, \"description\": fields[\"request_description\"], \"allow_bunqme\": True,", "if acc[\"iban\"] == fields[\"account\"]: accid = acc[\"id\"] enabled = False if \"permissions\" in", "balance config = bunq.retrieve_config() if fields[\"payment_type\"] == \"DIRECT\": balance = get_balance(config, fields[\"account\"], fields[\"other_account\"])", "\"message\": errmsg}]})\\ , 400 # execute the payment if fields[\"payment_type\"] == \"DIRECT\": result", "enabled: errmsg = \"Payment type not enabled for account: \"+account if errmsg: print(\"[target_balance_internal]", "bmtype, \"name\": bmvalue, \"value\": bmvalue }, \"description\": fields[\"request_description\"], \"allow_bunqme\": True, } print(json.dumps(msg)) config", "bunq.retrieve_config() if fields[\"payment_type\"] == \"DIRECT\": balance = get_balance(config, fields[\"account\"], fields[\"other_account\"]) if isinstance(balance, tuple):", "account numbers fields[\"account\"] = fields[\"account\"].replace(\" \", \"\") if internal: fields[\"other_account\"] = fields[\"other_account\"].replace(\" \",", "[{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 print(result) if \"Error\" in result: return json.dumps({\"errors\":", "permitted for account: \"+fields[\"account\"] if errmsg: print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\",", "print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 if transfer_amount", "= { \"amount\": { \"value\": \"{:.2f}\".format(-transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\",", "balance already ok\" print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ ,", "in fields[\"direction\"]: paymentmsg = { \"amount\": { \"value\": \"{:.2f}\".format(-transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\":", "\"PHONE_NUMBER\" elif bmvalue[:2].isalpha() and bmvalue[2:4].isdecimal(): bmtype = \"IBAN\" else: errmsg = \"Unrecognized as", "fields[\"account\"] else: errmsg = \"No transfer needed, balance already ok\" print(\"[target_balance_internal] ERROR: \"+errmsg)", "account2 is None and account in balances: return balances[account] if account in balances", "bmvalue }, \"description\": fields[\"request_description\"], \"allow_bunqme\": True, } print(json.dumps(msg)) config = bunq.retrieve_config() result =", "> 0 and \"top up\" in fields[\"direction\"]: paymentmsg = { \"amount\": { \"value\":", "import payment def target_balance_internal(): \"\"\" Execute a target balance internal action \"\"\" data", "json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # the account NL42BUNQ0123456789 is used", "\"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\":\\ errmsg}]}), 400 msg = { \"amount_inquired\": {", "used for test payments if fields[\"account\"] == \"NL42BUNQ0123456789\": return json.dumps({\"data\": [{\"id\": uuid.uuid4().hex}]}) #", "account) else: accid, enabled = payment.check_source_account(False, True, config, account) if accid is None:", "accid is None: errmsg = \"unknown account: \"+account if not enabled: errmsg =", "ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # send request", "else: accid, enabled = payment.check_source_account(False, True, config, fields[\"account\"]) if accid is None: errmsg", "return \"only positive amounts allowed: \"+orig return None def get_balance(config, account, account2=None): \"\"\"", "positive amounts allowed: \"+orig return None def get_balance(config, account, account2=None): \"\"\" Retrieve the", "check_fields(internal, fields): \"\"\" Check the fields \"\"\" # check expected fields if internal:", "try: orig = fields[\"amount\"] fields[\"amount\"] = float(fields[\"amount\"]) except ValueError: fields[\"amount\"] = -1 if", "}, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"other_account\"], \"name\": \"x\" }, \"description\": fields[\"description\"] }", "= {\"number_of_required_accepts\": 1, \"entries\": [paymentmsg]} result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid), paymentmsg) print(result) if", "# check amount try: orig = fields[\"amount\"] fields[\"amount\"] = float(fields[\"amount\"]) except ValueError: fields[\"amount\"]", "config[\"permissions\"]\\ [fields[\"account\"]]: enabled = config[\"permissions\"][fields[\"account\"]]\\ [\"PaymentRequest\"] else: accid, enabled = payment.check_source_account(False, True, config,", "> 0: accid = None for acc in config[\"accounts\"]: if acc[\"iban\"] == fields[\"account\"]:", "400 # construct payment message if \"{:.2f}\".format(fields[\"amount\"]) == \"0.00\": errmsg = \"No transfer", ", 400 print(result) if \"Error\" in result: return json.dumps({\"errors\": [{ \"status\": \"SKIP\", \"message\":", "errmsg = check_fields(True, fields) if errmsg: print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\",", "print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # get", "\"message\": result[\"Error\"][0][\"error_description\"] }]}), 400 return json.dumps({\"data\": [{ \"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def check_fields(internal, fields): \"\"\"", "\"type\": \"IBAN\", \"value\": fields[\"account\"], \"name\": \"x\" }, \"description\": fields[\"description\"] } account = fields[\"other_account\"]", "0: accid = None for acc in config[\"accounts\"]: if acc[\"iban\"] == fields[\"account\"]: accid", "\"message\": errmsg}]})\\ , 400 # send request / execute payment if transfer_amount >", "str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def check_fields(internal, fields): \"\"\" Check the fields \"\"\" # check expected fields", "\"\"\" # check expected fields if internal: expected_fields = [\"account\", \"amount\", \"other_account\", \"direction\",", "\"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def target_balance_external(): \"\"\" Execute a target balance external action \"\"\" data", "[{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 if transfer_amount > 0 and \"top up\"", "\"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # send request /", "\"{:.2f}\".format(-transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"payment_account\"], \"name\": fields[\"payment_name\"] },", "action \"\"\" data = request.get_json() print(\"[target_balance_internal] input: {}\".format(json.dumps(data))) if \"actionFields\" not in data:", "config, account) else: accid, enabled = payment.check_source_account(False, True, config, account) if accid is", "\"message\":\\ errmsg}]}), 400 msg = { \"amount_inquired\": { \"value\": \"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\", },", "fields) if errmsg: print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ ,", "if fields[\"account\"] == \"NL42BUNQ0123456789\": return json.dumps({\"data\": [{\"id\": uuid.uuid4().hex}]}) # retrieve balance config =", "}, \"description\": fields[\"payment_description\"] } print(paymentmsg) paymentmsg = {\"number_of_required_accepts\": 1, \"entries\": [paymentmsg]} result =", "ok\" print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 if", "in fields[\"direction\"]: bmvalue = fields[\"request_phone_email_iban\"].replace(\" \", \"\") if \"@\" in bmvalue: bmtype =", "data[\"actionFields\"] errmsg = check_fields(True, fields) if errmsg: print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\":", "# send request / execute payment if transfer_amount > 0 and \"top up\"", "expected_fields = [\"account\", \"amount\", \"other_account\", \"direction\", \"payment_type\", \"description\"] else: expected_fields = [\"account\", \"amount\",", "transfer needed, balance already ok\" print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\":", "check_fields(False, fields) if errmsg: print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\", "bmvalue, \"value\": bmvalue }, \"description\": fields[\"request_description\"], \"allow_bunqme\": True, } print(json.dumps(msg)) config = bunq.retrieve_config()", "of one or two accounts \"\"\" balances = bunq.retrieve_account_balances(config) if account2 is None", "fields[\"direction\"]: paymentmsg = { \"amount\": { \"value\": \"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\": {", "payments if fields[\"account\"] == \"NL42BUNQ0123456789\": return json.dumps({\"data\": [{\"id\": uuid.uuid4().hex}]}) # retrieve balance config", "\"{:.2f}\".format(fields[\"amount\"]) == \"0.00\": errmsg = \"No transfer needed, balance already ok\" print(\"[target_balance_internal] ERROR:", "= bunq.retrieve_config() result = bunq.post(\"v1/user/{}/monetary-account/{}/request-inquiry\"\\ .format(config[\"user_id\"], accid), msg, config) elif transfer_amount < 0", "\"{:.2f}\".format(-transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"other_account\"], \"name\": \"x\" },", "# retrieve balance config = bunq.retrieve_config() if fields[\"payment_type\"] == \"DIRECT\": balance = get_balance(config,", "bunq.post(\"v1/user/{}/monetary-account/{}/payment\" .format(config[\"user_id\"], accid), paymentmsg) else: paymentmsg = {\"number_of_required_accepts\": 1, \"entries\": [paymentmsg]} result =", "\"message\": errmsg}]})\\ , 400 # construct payment message if \"{:.2f}\".format(fields[\"amount\"]) == \"0.00\": errmsg", "and check permissions if fields[\"payment_type\"] == \"DIRECT\": accid, enabled = payment.check_source_account(True, False, config,", "\"payment_description\", \"request_phone_email_iban\", \"request_description\"] for field in expected_fields: if field not in fields: return", "None for acc in config[\"accounts\"]: if acc[\"iban\"] == fields[\"account\"]: accid = acc[\"id\"] enabled", "= \"Payment type not enabled for account: \"+account if errmsg: print(\"[target_balance_internal] ERROR: \"+errmsg)", "the account NL42BUNQ0123456789 is used for test payments if fields[\"account\"] == \"NL42BUNQ0123456789\": return", "if \"{:.2f}\".format(fields[\"amount\"]) == \"0.00\": errmsg = \"No transfer needed, balance already ok\" print(\"[target_balance_external]", "\"type\": bmtype, \"name\": bmvalue, \"value\": bmvalue }, \"description\": fields[\"request_description\"], \"allow_bunqme\": True, } print(json.dumps(msg))", "errmsg = \"Unrecognized as email, phone or iban: \"+bmvalue print(\"[request_inquiry] ERROR: \"+errmsg) return", "fields): \"\"\" Check the fields \"\"\" # check expected fields if internal: expected_fields", "if not enabled: errmsg = \"Not permitted for account: \"+fields[\"account\"] if errmsg: print(\"[target_balance_external]", "transfer_amount > 0 and \"top up\" in fields[\"direction\"]: bmvalue = fields[\"request_phone_email_iban\"].replace(\" \", \"\")", "transfer_amount = fields[\"amount\"] - balance if transfer_amount > balance2: transfer_amount = balance2 else:", "else: balance = get_balance(config, fields[\"account\"]) if isinstance(balance, float): transfer_amount = fields[\"amount\"] - balance", "request import bunq import payment def target_balance_internal(): \"\"\" Execute a target balance internal", "if errmsg: print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400", "\"message\": errmsg}]})\\ , 400 # get account id and check permission if transfer_amount", "in data: errmsg = \"missing actionFields\" print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\",", "return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # construct payment message if", "\"top up\" in fields[\"direction\"]: bmvalue = fields[\"request_phone_email_iban\"].replace(\" \", \"\") if \"@\" in bmvalue:", "[paymentmsg]} result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid), paymentmsg) else: errmsg = \"No transfer needed,", "balance = get_balance(config, fields[\"account\"], fields[\"other_account\"]) if isinstance(balance, tuple): balance, balance2 = balance transfer_amount", "in balances and account2 in balances: return balances[account], balances[account2] if account not in", "= request.get_json() print(\"[target_balance_internal] input: {}\".format(json.dumps(data))) if \"actionFields\" not in data: errmsg = \"missing", "elif transfer_amount < 0 and \"skim\" in fields[\"direction\"]: paymentmsg = { \"amount\": {", "errmsg: print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 #", "json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 if transfer_amount > 0 and \"top", "bmtype = \"IBAN\" else: errmsg = \"Unrecognized as email, phone or iban: \"+bmvalue", "\"\"\" Execute a target balance internal action \"\"\" data = request.get_json() print(\"[target_balance_internal] input:", "if field not in fields: return \"missing field: \"+field # strip spaces from", "field: \"+field # strip spaces from account numbers fields[\"account\"] = fields[\"account\"].replace(\" \", \"\")", "}]}), 400 return json.dumps({\"data\": [{ \"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def check_fields(internal, fields): \"\"\" Check the", "= \"unknown account: \"+account if not enabled: errmsg = \"Payment type not enabled", "= payment.check_source_account(False, True, config, fields[\"account\"]) if accid is None: errmsg = \"unknown account:", "\"SKIP\", \"message\": errmsg}]})\\ , 400 fields = data[\"actionFields\"] errmsg = check_fields(True, fields) if", "config = bunq.retrieve_config() result = bunq.post(\"v1/user/{}/monetary-account/{}/request-inquiry\"\\ .format(config[\"user_id\"], accid), msg, config) elif transfer_amount <", "result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid), paymentmsg) print(result) if \"Error\" in result: return json.dumps({\"errors\":", "return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 print(paymentmsg) # get id and", "if isinstance(balance, str): errmsg = balance print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\",", "json import uuid from flask import request import bunq import payment def target_balance_internal():", "if account2 is None and account in balances: return balances[account] if account in", "config[\"permissions\"][fields[\"account\"]]\\ [\"PaymentRequest\"] else: accid, enabled = payment.check_source_account(False, True, config, fields[\"account\"]) if accid is", "[{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # send request / execute payment if", "get id and check permissions if fields[\"payment_type\"] == \"DIRECT\": accid, enabled = payment.check_source_account(True,", "def target_balance_external(): \"\"\" Execute a target balance external action \"\"\" data = request.get_json()", "{ \"amount\": { \"value\": \"{:.2f}\".format(-transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\":", "permission if transfer_amount > 0: accid = None for acc in config[\"accounts\"]: if", "not enabled: errmsg = \"Payment type not enabled for account: \"+account if errmsg:", "\"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # execute the payment", "# get account id and check permission if transfer_amount > 0: accid =", "for account: \"+fields[\"account\"] if errmsg: print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\":", "- balance # check for zero transfer if \"{:.2f}\".format(fields[\"amount\"]) == \"0.00\": errmsg =", "= None for acc in config[\"accounts\"]: if acc[\"iban\"] == fields[\"account\"]: accid = acc[\"id\"]", "iban: \"+bmvalue print(\"[request_inquiry] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\":\\ errmsg}]}), 400 msg", "else: errmsg = \"No transfer needed, balance already ok\" print(\"[target_balance_external] ERROR: \"+errmsg) return", "\"SKIP\", \"message\": errmsg}]})\\ , 400 # send request / execute payment if transfer_amount", "# construct payment message if \"{:.2f}\".format(fields[\"amount\"]) == \"0.00\": errmsg = \"No transfer needed,", "None: errmsg = \"unknown account: \"+fields[\"account\"] if not enabled: errmsg = \"Not permitted", "print(result) if \"Error\" in result: return json.dumps({\"errors\": [{ \"status\": \"SKIP\", \"message\": result[\"Error\"][0][\"error_description\"] }]}),", "if \"permissions\" in config: if fields[\"account\"] in config[\"permissions\"]: if \"PaymentRequest\" in config[\"permissions\"]\\ [fields[\"account\"]]:", "print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # execute", "} print(paymentmsg) paymentmsg = {\"number_of_required_accepts\": 1, \"entries\": [paymentmsg]} result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid),", "request.get_json() print(\"[target_balance_external] input: {}\".format(json.dumps(data))) if \"actionFields\" not in data: errmsg = \"missing actionFields\"", "request.get_json() print(\"[target_balance_internal] input: {}\".format(json.dumps(data))) if \"actionFields\" not in data: errmsg = \"missing actionFields\"", "fields = data[\"actionFields\"] errmsg = check_fields(False, fields) if errmsg: print(\"[target_balance_external] ERROR: \"+errmsg) return", "account in balances and account2 in balances: return balances[account], balances[account2] if account not", "payment if fields[\"payment_type\"] == \"DIRECT\": result = bunq.post(\"v1/user/{}/monetary-account/{}/payment\" .format(config[\"user_id\"], accid), paymentmsg) else: paymentmsg", "\"amount\", \"direction\", \"payment_account\", \"payment_name\", \"payment_description\", \"request_phone_email_iban\", \"request_description\"] for field in expected_fields: if field", "}, \"counterparty_alias\": { \"type\": bmtype, \"name\": bmvalue, \"value\": bmvalue }, \"description\": fields[\"request_description\"], \"allow_bunqme\":", "get account id and check permission if transfer_amount > 0: accid = None", "expected fields if internal: expected_fields = [\"account\", \"amount\", \"other_account\", \"direction\", \"payment_type\", \"description\"] else:", "balance Handles the target balance internal/external actions \"\"\" import json import uuid from", "up\" in fields[\"direction\"]: bmvalue = fields[\"request_phone_email_iban\"].replace(\" \", \"\") if \"@\" in bmvalue: bmtype", "{ \"type\": bmtype, \"name\": bmvalue, \"value\": bmvalue }, \"description\": fields[\"request_description\"], \"allow_bunqme\": True, }", "balances[account] if account in balances and account2 in balances: return balances[account], balances[account2] if", "return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # send request / execute", "[{ \"status\": \"SKIP\", \"message\": result[\"Error\"][0][\"error_description\"] }]}), 400 return json.dumps({\"data\": [{ \"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def", "request / execute payment if transfer_amount > 0 and \"top up\" in fields[\"direction\"]:", "and \"top up\" in fields[\"direction\"]: bmvalue = fields[\"request_phone_email_iban\"].replace(\" \", \"\") if \"@\" in", "[\"account\", \"amount\", \"direction\", \"payment_account\", \"payment_name\", \"payment_description\", \"request_phone_email_iban\", \"request_description\"] for field in expected_fields: if", "print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # send", "paymentmsg) print(result) if \"Error\" in result: return json.dumps({\"errors\": [{ \"status\": \"SKIP\", \"message\": result[\"Error\"][0][\"error_description\"]", "\"+field # strip spaces from account numbers fields[\"account\"] = fields[\"account\"].replace(\" \", \"\") if", "400 transfer_amount = fields[\"amount\"] - balance # check for zero transfer if \"{:.2f}\".format(fields[\"amount\"])", "\"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 print(result) if \"Error\" in", "errmsg = \"missing actionFields\" print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\", "bunq.retrieve_config() result = bunq.post(\"v1/user/{}/monetary-account/{}/request-inquiry\"\\ .format(config[\"user_id\"], accid), msg, config) elif transfer_amount < 0 and", "return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 print(result) if \"Error\" in result:", "isinstance(balance, tuple): balance, balance2 = balance transfer_amount = fields[\"amount\"] - balance if transfer_amount", "not enabled for account: \"+account if errmsg: print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\":", "= { \"amount_inquired\": { \"value\": \"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\", }, \"counterparty_alias\": { \"type\": bmtype,", "\", \"\") # check amount try: orig = fields[\"amount\"] fields[\"amount\"] = float(fields[\"amount\"]) except", "== \"DIRECT\": accid, enabled = payment.check_source_account(True, False, config, account) else: accid, enabled =", "# the account NL42BUNQ0123456789 is used for test payments if fields[\"account\"] == \"NL42BUNQ0123456789\":", "\"top up\" in fields[\"direction\"]: paymentmsg = { \"amount\": { \"value\": \"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\"", "\"SKIP\", \"message\": errmsg}]})\\ , 400 # get account id and check permission if", "[paymentmsg]} result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid), paymentmsg) print(result) if \"Error\" in result: return", "accid = None for acc in config[\"accounts\"]: if acc[\"iban\"] == fields[\"account\"]: accid =", "= fields[\"request_phone_email_iban\"].replace(\" \", \"\") if \"@\" in bmvalue: bmtype = \"EMAIL\" elif bmvalue[:1]", "return json.dumps({\"data\": [{ \"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def check_fields(internal, fields): \"\"\" Check the fields \"\"\"", "as email, phone or iban: \"+bmvalue print(\"[request_inquiry] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\",", "account in balances: return balances[account] if account in balances and account2 in balances:", "fields[\"amount\"] = float(fields[\"amount\"]) except ValueError: fields[\"amount\"] = -1 if fields[\"amount\"] <= 0: return", "if fields[\"payment_type\"] == \"DIRECT\": balance = get_balance(config, fields[\"account\"], fields[\"other_account\"]) if isinstance(balance, tuple): balance,", "\"SKIP\", \"message\": errmsg}]})\\ , 400 print(result) if \"Error\" in result: return json.dumps({\"errors\": [{", "ok\" print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 #", "# check expected fields if internal: expected_fields = [\"account\", \"amount\", \"other_account\", \"direction\", \"payment_type\",", "json.dumps({\"data\": [{\"id\": uuid.uuid4().hex}]}) # retrieve balance config = bunq.retrieve_config() if fields[\"payment_type\"] == \"DIRECT\":", "payment.check_source_account(True, False, config, account) else: accid, enabled = payment.check_source_account(False, True, config, account) if", "\"description\": fields[\"description\"] } account = fields[\"other_account\"] elif transfer_amount < 0 and \"skim\" in", ", 400 # get account id and check permission if transfer_amount > 0:", "\"x\" }, \"description\": fields[\"description\"] } account = fields[\"other_account\"] elif transfer_amount < 0 and", "= bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid), paymentmsg) else: errmsg = \"No transfer needed, balance already", "= {\"number_of_required_accepts\": 1, \"entries\": [paymentmsg]} result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid), paymentmsg) else: errmsg", "fields[\"account\"], fields[\"other_account\"]) if isinstance(balance, tuple): balance, balance2 = balance transfer_amount = fields[\"amount\"] -", "return balances[account] if account in balances and account2 in balances: return balances[account], balances[account2]", "or two accounts \"\"\" balances = bunq.retrieve_account_balances(config) if account2 is None and account", "\"amount\": { \"value\": \"{:.2f}\".format(-transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"payment_account\"],", "actionFields\" print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 fields", "elif bmvalue[:1] == \"+\" and bmvalue[1:].isdecimal(): bmtype = \"PHONE_NUMBER\" elif bmvalue[:2].isalpha() and bmvalue[2:4].isdecimal():", "print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # the", "[{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # construct payment message if \"{:.2f}\".format(fields[\"amount\"]) ==", "{ \"amount\": { \"value\": \"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\":", "if \"actionFields\" not in data: errmsg = \"missing actionFields\" print(\"[target_balance_internal] ERROR: \"+errmsg) return", "is None and account in balances: return balances[account] if account in balances and", "\"type\": \"IBAN\", \"value\": fields[\"payment_account\"], \"name\": fields[\"payment_name\"] }, \"description\": fields[\"payment_description\"] } print(paymentmsg) paymentmsg =", "400 # the account NL42BUNQ0123456789 is used for test payments if fields[\"account\"] ==", "errmsg = \"Not permitted for account: \"+fields[\"account\"] if errmsg: print(\"[target_balance_external] ERROR: \"+errmsg) return", "Handles the target balance internal/external actions \"\"\" import json import uuid from flask", "ok\" print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 print(paymentmsg)", "\"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 print(paymentmsg) # get id", "return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\":\\ errmsg}]}), 400 msg = { \"amount_inquired\": { \"value\":", "\"DIRECT\": accid, enabled = payment.check_source_account(True, False, config, account) else: accid, enabled = payment.check_source_account(False,", "}, \"description\": fields[\"description\"] } account = fields[\"account\"] else: errmsg = \"No transfer needed,", "= fields[\"amount\"] - balance if transfer_amount > balance2: transfer_amount = balance2 else: balance", "if \"PaymentRequest\" in config[\"permissions\"]\\ [fields[\"account\"]]: enabled = config[\"permissions\"][fields[\"account\"]]\\ [\"PaymentRequest\"] else: accid, enabled =", "\"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"payment_account\"], \"name\": fields[\"payment_name\"] }, \"description\": fields[\"payment_description\"]", "fields[\"account\"] = fields[\"account\"].replace(\" \", \"\") if internal: fields[\"other_account\"] = fields[\"other_account\"].replace(\" \", \"\") else:", "payment def target_balance_internal(): \"\"\" Execute a target balance internal action \"\"\" data =", "target_balance_external(): \"\"\" Execute a target balance external action \"\"\" data = request.get_json() print(\"[target_balance_external]", "{ \"type\": \"IBAN\", \"value\": fields[\"account\"], \"name\": \"x\" }, \"description\": fields[\"description\"] } account =", "\"\"\" Target balance Handles the target balance internal/external actions \"\"\" import json import", "def target_balance_internal(): \"\"\" Execute a target balance internal action \"\"\" data = request.get_json()", "\"\") # check amount try: orig = fields[\"amount\"] fields[\"amount\"] = float(fields[\"amount\"]) except ValueError:", "msg, config) elif transfer_amount < 0 and \"skim\" in fields[\"direction\"]: paymentmsg = {", "# check for zero transfer if \"{:.2f}\".format(fields[\"amount\"]) == \"0.00\": errmsg = \"No transfer", "400 print(paymentmsg) # get id and check permissions if fields[\"payment_type\"] == \"DIRECT\": accid,", "\"SKIP\", \"message\": errmsg}]})\\ , 400 # execute the payment if fields[\"payment_type\"] == \"DIRECT\":", "1, \"entries\": [paymentmsg]} result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid), paymentmsg) print(result) if \"Error\" in", "uuid.uuid4().hex}]}) # retrieve balance config = bunq.retrieve_config() balance = get_balance(config, fields[\"account\"]) if isinstance(balance,", "= \"missing actionFields\" print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ ,", "one or two accounts \"\"\" balances = bunq.retrieve_account_balances(config) if account2 is None and", "return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 if transfer_amount > 0 and", "for field in expected_fields: if field not in fields: return \"missing field: \"+field", "} account = fields[\"account\"] else: errmsg = \"No transfer needed, balance already ok\"", "{ \"value\": \"{:.2f}\".format(-transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"payment_account\"], \"name\":", "= get_balance(config, fields[\"account\"]) if isinstance(balance, float): transfer_amount = fields[\"amount\"] - balance if isinstance(balance,", "= { \"amount\": { \"value\": \"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\",", "= fields[\"account\"] else: errmsg = \"No transfer needed, balance already ok\" print(\"[target_balance_internal] ERROR:", "errmsg = \"No transfer needed, balance already ok\" print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\":", ".format(config[\"user_id\"], accid), msg, config) elif transfer_amount < 0 and \"skim\" in fields[\"direction\"]: paymentmsg", "= \"IBAN\" else: errmsg = \"Unrecognized as email, phone or iban: \"+bmvalue print(\"[request_inquiry]", "accid, enabled = payment.check_source_account(True, False, config, account) else: accid, enabled = payment.check_source_account(False, True,", "flask import request import bunq import payment def target_balance_internal(): \"\"\" Execute a target", "NL42BUNQ0123456789 is used for test payments if fields[\"account\"] == \"NL42BUNQ0123456789\": return json.dumps({\"data\": [{\"id\":", "} account = fields[\"other_account\"] elif transfer_amount < 0 and \"skim\" in fields[\"direction\"]: paymentmsg", "\"message\": errmsg}]})\\ , 400 print(result) if \"Error\" in result: return json.dumps({\"errors\": [{ \"status\":", "\"Not permitted for account: \"+fields[\"account\"] if errmsg: print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\":", "\"SKIP\", \"message\": errmsg}]})\\ , 400 fields = data[\"actionFields\"] errmsg = check_fields(False, fields) if", "target balance internal action \"\"\" data = request.get_json() print(\"[target_balance_internal] input: {}\".format(json.dumps(data))) if \"actionFields\"", "expected_fields = [\"account\", \"amount\", \"direction\", \"payment_account\", \"payment_name\", \"payment_description\", \"request_phone_email_iban\", \"request_description\"] for field in", "\"value\": \"{:.2f}\".format(-transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"other_account\"], \"name\": \"x\"", "if fields[\"payment_type\"] == \"DIRECT\": result = bunq.post(\"v1/user/{}/monetary-account/{}/payment\" .format(config[\"user_id\"], accid), paymentmsg) else: paymentmsg =", "\"value\": fields[\"payment_account\"], \"name\": fields[\"payment_name\"] }, \"description\": fields[\"payment_description\"] } print(paymentmsg) paymentmsg = {\"number_of_required_accepts\": 1,", "True, config, fields[\"account\"]) if accid is None: errmsg = \"unknown account: \"+fields[\"account\"] if", "json.dumps({\"data\": [{ \"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def check_fields(internal, fields): \"\"\" Check the fields \"\"\" #", "[{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # the account NL42BUNQ0123456789 is used for", "== \"DIRECT\": balance = get_balance(config, fields[\"account\"], fields[\"other_account\"]) if isinstance(balance, tuple): balance, balance2 =", "\"message\": errmsg}]})\\ , 400 fields = data[\"actionFields\"] errmsg = check_fields(True, fields) if errmsg:", "400 fields = data[\"actionFields\"] errmsg = check_fields(True, fields) if errmsg: print(\"[target_balance_internal] ERROR: \"+errmsg)", "= fields[\"amount\"] fields[\"amount\"] = float(fields[\"amount\"]) except ValueError: fields[\"amount\"] = -1 if fields[\"amount\"] <=", "if accid is None: errmsg = \"unknown account: \"+account if not enabled: errmsg", "type not enabled for account: \"+account if errmsg: print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\":", "elif bmvalue[:2].isalpha() and bmvalue[2:4].isdecimal(): bmtype = \"IBAN\" else: errmsg = \"Unrecognized as email,", "if \"actionFields\" not in data: errmsg = \"missing actionFields\" print(\"[target_balance_external] ERROR: \"+errmsg) return", "\"message\": errmsg}]})\\ , 400 # the account NL42BUNQ0123456789 is used for test payments", "= False if \"permissions\" in config: if fields[\"account\"] in config[\"permissions\"]: if \"PaymentRequest\" in", "transfer_amount = fields[\"amount\"] - balance if isinstance(balance, str): errmsg = balance print(\"[target_balance_internal] ERROR:", "from flask import request import bunq import payment def target_balance_internal(): \"\"\" Execute a", "= config[\"permissions\"][fields[\"account\"]]\\ [\"PaymentRequest\"] else: accid, enabled = payment.check_source_account(False, True, config, fields[\"account\"]) if accid", "id and check permission if transfer_amount > 0: accid = None for acc", "fields[\"amount\"] - balance if isinstance(balance, str): errmsg = balance print(\"[target_balance_internal] ERROR: \"+errmsg) return", "and bmvalue[1:].isdecimal(): bmtype = \"PHONE_NUMBER\" elif bmvalue[:2].isalpha() and bmvalue[2:4].isdecimal(): bmtype = \"IBAN\" else:", "\"entries\": [paymentmsg]} result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid), paymentmsg) else: errmsg = \"No transfer", "\"description\"] else: expected_fields = [\"account\", \"amount\", \"direction\", \"payment_account\", \"payment_name\", \"payment_description\", \"request_phone_email_iban\", \"request_description\"] for", "0: return \"only positive amounts allowed: \"+orig return None def get_balance(config, account, account2=None):", "{ \"value\": \"{:.2f}\".format(-transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"other_account\"], \"name\":", "fields[\"other_account\"] elif transfer_amount < 0 and \"skim\" in fields[\"direction\"]: paymentmsg = { \"amount\":", "400 msg = { \"amount_inquired\": { \"value\": \"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\", }, \"counterparty_alias\": {", ", 400 # the account NL42BUNQ0123456789 is used for test payments if fields[\"account\"]", "if isinstance(balance, tuple): balance, balance2 = balance transfer_amount = fields[\"amount\"] - balance if", "acc[\"id\"] enabled = False if \"permissions\" in config: if fields[\"account\"] in config[\"permissions\"]: if", "[{ \"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def check_fields(internal, fields): \"\"\" Check the fields \"\"\" # check", "= float(fields[\"amount\"]) except ValueError: fields[\"amount\"] = -1 if fields[\"amount\"] <= 0: return \"only", "\"currency\": \"EUR\", }, \"counterparty_alias\": { \"type\": bmtype, \"name\": bmvalue, \"value\": bmvalue }, \"description\":", "for account: \"+account if errmsg: print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\":", "or iban: \"+bmvalue print(\"[request_inquiry] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\":\\ errmsg}]}), 400", "fields[\"other_account\"], \"name\": \"x\" }, \"description\": fields[\"description\"] } account = fields[\"account\"] else: errmsg =", "\"+orig return None def get_balance(config, account, account2=None): \"\"\" Retrieve the balance of one", "\"name\": fields[\"payment_name\"] }, \"description\": fields[\"payment_description\"] } print(paymentmsg) paymentmsg = {\"number_of_required_accepts\": 1, \"entries\": [paymentmsg]}", "\"name\": \"x\" }, \"description\": fields[\"description\"] } account = fields[\"account\"] else: errmsg = \"No", "# execute the payment if fields[\"payment_type\"] == \"DIRECT\": result = bunq.post(\"v1/user/{}/monetary-account/{}/payment\" .format(config[\"user_id\"], accid),", "balance external action \"\"\" data = request.get_json() print(\"[target_balance_external] input: {}\".format(json.dumps(data))) if \"actionFields\" not", "if fields[\"account\"] in config[\"permissions\"]: if \"PaymentRequest\" in config[\"permissions\"]\\ [fields[\"account\"]]: enabled = config[\"permissions\"][fields[\"account\"]]\\ [\"PaymentRequest\"]", "accid), paymentmsg) print(result) if \"Error\" in result: return json.dumps({\"errors\": [{ \"status\": \"SKIP\", \"message\":", "and account2 in balances: return balances[account], balances[account2] if account not in balances: return", "< 0 and \"skim\" in fields[\"direction\"]: paymentmsg = { \"amount\": { \"value\": \"{:.2f}\".format(-transfer_amount),", "accid is None: errmsg = \"unknown account: \"+fields[\"account\"] if not enabled: errmsg =", "amount try: orig = fields[\"amount\"] fields[\"amount\"] = float(fields[\"amount\"]) except ValueError: fields[\"amount\"] = -1", "amounts allowed: \"+orig return None def get_balance(config, account, account2=None): \"\"\" Retrieve the balance", "False, config, account) else: accid, enabled = payment.check_source_account(False, True, config, account) if accid", "in result: return json.dumps({\"errors\": [{ \"status\": \"SKIP\", \"message\": result[\"Error\"][0][\"error_description\"] }]}), 400 return json.dumps({\"data\":", "[fields[\"account\"]]: enabled = config[\"permissions\"][fields[\"account\"]]\\ [\"PaymentRequest\"] else: accid, enabled = payment.check_source_account(False, True, config, fields[\"account\"])", "result: return json.dumps({\"errors\": [{ \"status\": \"SKIP\", \"message\": result[\"Error\"][0][\"error_description\"] }]}), 400 return json.dumps({\"data\": [{", "\"missing field: \"+field # strip spaces from account numbers fields[\"account\"] = fields[\"account\"].replace(\" \",", "\"IBAN\", \"value\": fields[\"other_account\"], \"name\": \"x\" }, \"description\": fields[\"description\"] } account = fields[\"account\"] else:", "print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 transfer_amount =", "and account in balances: return balances[account] if account in balances and account2 in", "actions \"\"\" import json import uuid from flask import request import bunq import", "print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # construct", "fields[\"account\"]) if accid is None: errmsg = \"unknown account: \"+fields[\"account\"] if not enabled:", "json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 fields = data[\"actionFields\"] errmsg = check_fields(True,", "\"+\" and bmvalue[1:].isdecimal(): bmtype = \"PHONE_NUMBER\" elif bmvalue[:2].isalpha() and bmvalue[2:4].isdecimal(): bmtype = \"IBAN\"", "account: \"+fields[\"account\"] if not enabled: errmsg = \"Not permitted for account: \"+fields[\"account\"] if", "config) elif transfer_amount < 0 and \"skim\" in fields[\"direction\"]: paymentmsg = { \"amount\":", "balances and account2 in balances: return balances[account], balances[account2] if account not in balances:", "\"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"account\"], \"name\": \"x\" },", "print(paymentmsg) paymentmsg = {\"number_of_required_accepts\": 1, \"entries\": [paymentmsg]} result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid), paymentmsg)", "}, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"account\"], \"name\": \"x\" }, \"description\": fields[\"description\"] }", "0 and \"skim\" in fields[\"direction\"]: paymentmsg = { \"amount\": { \"value\": \"{:.2f}\".format(-transfer_amount), \"currency\":", "\"name\": \"x\" }, \"description\": fields[\"description\"] } account = fields[\"other_account\"] elif transfer_amount < 0", "\"SKIP\", \"message\": result[\"Error\"][0][\"error_description\"] }]}), 400 return json.dumps({\"data\": [{ \"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def target_balance_external(): \"\"\"", "\"+account if not enabled: errmsg = \"Payment type not enabled for account: \"+account", "\", \"\") else: fields[\"payment_account\"] = fields[\"payment_account\"].replace(\" \", \"\") # check amount try: orig", "- balance if isinstance(balance, str): errmsg = balance print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\":", "400 # send request / execute payment if transfer_amount > 0 and \"top", "= \"No transfer needed, balance already ok\" print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\":", "if \"@\" in bmvalue: bmtype = \"EMAIL\" elif bmvalue[:1] == \"+\" and bmvalue[1:].isdecimal():", "print(\"[target_balance_internal] input: {}\".format(json.dumps(data))) if \"actionFields\" not in data: errmsg = \"missing actionFields\" print(\"[target_balance_internal]", "= request.get_json() print(\"[target_balance_external] input: {}\".format(json.dumps(data))) if \"actionFields\" not in data: errmsg = \"missing", "\"Error\" in result: return json.dumps({\"errors\": [{ \"status\": \"SKIP\", \"message\": result[\"Error\"][0][\"error_description\"] }]}), 400 return", "\"actionFields\" not in data: errmsg = \"missing actionFields\" print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\":", "return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 fields = data[\"actionFields\"] errmsg =", "errmsg = \"missing actionFields\" print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\", "else: expected_fields = [\"account\", \"amount\", \"direction\", \"payment_account\", \"payment_name\", \"payment_description\", \"request_phone_email_iban\", \"request_description\"] for field", "data = request.get_json() print(\"[target_balance_external] input: {}\".format(json.dumps(data))) if \"actionFields\" not in data: errmsg =", "\"+account if errmsg: print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ ,", "\"\"\" data = request.get_json() print(\"[target_balance_internal] input: {}\".format(json.dumps(data))) if \"actionFields\" not in data: errmsg", "result[\"Error\"][0][\"error_description\"] }]}), 400 return json.dumps({\"data\": [{ \"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def check_fields(internal, fields): \"\"\" Check", "return json.dumps({\"data\": [{\"id\": uuid.uuid4().hex}]}) # retrieve balance config = bunq.retrieve_config() if fields[\"payment_type\"] ==", ", 400 fields = data[\"actionFields\"] errmsg = check_fields(True, fields) if errmsg: print(\"[target_balance_internal] ERROR:", "balances[account2] if account not in balances: return \"Account balance not found \"+account return", "check_fields(True, fields) if errmsg: print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\", "{ \"value\": \"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"account\"], \"name\":", "fields[\"payment_type\"] == \"DIRECT\": accid, enabled = payment.check_source_account(True, False, config, account) else: accid, enabled", "\"DIRECT\": result = bunq.post(\"v1/user/{}/monetary-account/{}/payment\" .format(config[\"user_id\"], accid), paymentmsg) else: paymentmsg = {\"number_of_required_accepts\": 1, \"entries\":", "balance of one or two accounts \"\"\" balances = bunq.retrieve_account_balances(config) if account2 is", "\"EUR\", }, \"counterparty_alias\": { \"type\": bmtype, \"name\": bmvalue, \"value\": bmvalue }, \"description\": fields[\"request_description\"],", "\"NL42BUNQ0123456789\": return json.dumps({\"data\": [{\"id\": uuid.uuid4().hex}]}) # retrieve balance config = bunq.retrieve_config() balance =", "None def get_balance(config, account, account2=None): \"\"\" Retrieve the balance of one or two", "accid), paymentmsg) else: paymentmsg = {\"number_of_required_accepts\": 1, \"entries\": [paymentmsg]} result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"],", "account2=None): \"\"\" Retrieve the balance of one or two accounts \"\"\" balances =", "\"amount_inquired\": { \"value\": \"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\", }, \"counterparty_alias\": { \"type\": bmtype, \"name\": bmvalue,", "payment if transfer_amount > 0 and \"top up\" in fields[\"direction\"]: bmvalue = fields[\"request_phone_email_iban\"].replace(\"", "field not in fields: return \"missing field: \"+field # strip spaces from account", "enabled = payment.check_source_account(False, True, config, account) if accid is None: errmsg = \"unknown", "\"only positive amounts allowed: \"+orig return None def get_balance(config, account, account2=None): \"\"\" Retrieve", "uuid.uuid4().hex}]}) # retrieve balance config = bunq.retrieve_config() if fields[\"payment_type\"] == \"DIRECT\": balance =", "\"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"other_account\"], \"name\": \"x\" }, \"description\": fields[\"description\"]", "balances: return balances[account] if account in balances and account2 in balances: return balances[account],", "internal: expected_fields = [\"account\", \"amount\", \"other_account\", \"direction\", \"payment_type\", \"description\"] else: expected_fields = [\"account\",", "\"0.00\": errmsg = \"No transfer needed, balance already ok\" print(\"[target_balance_external] ERROR: \"+errmsg) return", "\"IBAN\" else: errmsg = \"Unrecognized as email, phone or iban: \"+bmvalue print(\"[request_inquiry] ERROR:", "errmsg}]})\\ , 400 # execute the payment if fields[\"payment_type\"] == \"DIRECT\": result =", "account: \"+account if not enabled: errmsg = \"Payment type not enabled for account:", "fields[\"request_phone_email_iban\"].replace(\" \", \"\") if \"@\" in bmvalue: bmtype = \"EMAIL\" elif bmvalue[:1] ==", "fields[\"other_account\"] = fields[\"other_account\"].replace(\" \", \"\") else: fields[\"payment_account\"] = fields[\"payment_account\"].replace(\" \", \"\") # check", "= \"Not permitted for account: \"+fields[\"account\"] if errmsg: print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\":", "transfer_amount > 0: accid = None for acc in config[\"accounts\"]: if acc[\"iban\"] ==", "\"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # the account NL42BUNQ0123456789", "= bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid), paymentmsg) print(result) if \"Error\" in result: return json.dumps({\"errors\": [{", "two accounts \"\"\" balances = bunq.retrieve_account_balances(config) if account2 is None and account in", "Target balance Handles the target balance internal/external actions \"\"\" import json import uuid", "a target balance internal action \"\"\" data = request.get_json() print(\"[target_balance_internal] input: {}\".format(json.dumps(data))) if", "400 fields = data[\"actionFields\"] errmsg = check_fields(False, fields) if errmsg: print(\"[target_balance_external] ERROR: \"+errmsg)", "test payments if fields[\"account\"] == \"NL42BUNQ0123456789\": return json.dumps({\"data\": [{\"id\": uuid.uuid4().hex}]}) # retrieve balance", "bmtype = \"PHONE_NUMBER\" elif bmvalue[:2].isalpha() and bmvalue[2:4].isdecimal(): bmtype = \"IBAN\" else: errmsg =", "for acc in config[\"accounts\"]: if acc[\"iban\"] == fields[\"account\"]: accid = acc[\"id\"] enabled =", "enabled = payment.check_source_account(False, True, config, fields[\"account\"]) if accid is None: errmsg = \"unknown", "json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # send request / execute payment", "{ \"type\": \"IBAN\", \"value\": fields[\"other_account\"], \"name\": \"x\" }, \"description\": fields[\"description\"] } account =", "\"value\": fields[\"other_account\"], \"name\": \"x\" }, \"description\": fields[\"description\"] } account = fields[\"account\"] else: errmsg", "errmsg}]})\\ , 400 print(paymentmsg) # get id and check permissions if fields[\"payment_type\"] ==", "ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\":\\ errmsg}]}), 400 msg = { \"amount_inquired\":", "accid), msg, config) elif transfer_amount < 0 and \"skim\" in fields[\"direction\"]: paymentmsg =", "paymentmsg = {\"number_of_required_accepts\": 1, \"entries\": [paymentmsg]} result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid), paymentmsg) else:", "\"payment_name\", \"payment_description\", \"request_phone_email_iban\", \"request_description\"] for field in expected_fields: if field not in fields:", "{\"number_of_required_accepts\": 1, \"entries\": [paymentmsg]} result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid), paymentmsg) print(result) if \"Error\"", "}, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"payment_account\"], \"name\": fields[\"payment_name\"] }, \"description\": fields[\"payment_description\"] }", "\"DIRECT\": balance = get_balance(config, fields[\"account\"], fields[\"other_account\"]) if isinstance(balance, tuple): balance, balance2 = balance", "[{\"id\": uuid.uuid4().hex}]}) # retrieve balance config = bunq.retrieve_config() balance = get_balance(config, fields[\"account\"]) if", "\"+fields[\"account\"] if not enabled: errmsg = \"Not permitted for account: \"+fields[\"account\"] if errmsg:", "[\"account\", \"amount\", \"other_account\", \"direction\", \"payment_type\", \"description\"] else: expected_fields = [\"account\", \"amount\", \"direction\", \"payment_account\",", "if transfer_amount > 0 and \"top up\" in fields[\"direction\"]: paymentmsg = { \"amount\":", "fields[\"account\"], \"name\": \"x\" }, \"description\": fields[\"description\"] } account = fields[\"other_account\"] elif transfer_amount <", "[\"PaymentRequest\"] else: accid, enabled = payment.check_source_account(False, True, config, fields[\"account\"]) if accid is None:", "and \"skim\" in fields[\"direction\"]: paymentmsg = { \"amount\": { \"value\": \"{:.2f}\".format(-transfer_amount), \"currency\": \"EUR\"", "return None def get_balance(config, account, account2=None): \"\"\" Retrieve the balance of one or", "fields[\"amount\"] <= 0: return \"only positive amounts allowed: \"+orig return None def get_balance(config,", "\"amount\": { \"value\": \"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"account\"],", "\"currency\": \"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"other_account\"], \"name\": \"x\" }, \"description\":", "\"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 if transfer_amount > 0", "\"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # get account id", "in balances: return balances[account], balances[account2] if account not in balances: return \"Account balance", "= \"No transfer needed, balance already ok\" print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\":", "bmvalue[:1] == \"+\" and bmvalue[1:].isdecimal(): bmtype = \"PHONE_NUMBER\" elif bmvalue[:2].isalpha() and bmvalue[2:4].isdecimal(): bmtype", "bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid), paymentmsg) else: errmsg = \"No transfer needed, balance already ok\"", "Retrieve the balance of one or two accounts \"\"\" balances = bunq.retrieve_account_balances(config) if", "\"\"\" balances = bunq.retrieve_account_balances(config) if account2 is None and account in balances: return", "\", \"\") if \"@\" in bmvalue: bmtype = \"EMAIL\" elif bmvalue[:1] == \"+\"", "in fields: return \"missing field: \"+field # strip spaces from account numbers fields[\"account\"]", "return balances[account], balances[account2] if account not in balances: return \"Account balance not found", "fields[\"request_description\"], \"allow_bunqme\": True, } print(json.dumps(msg)) config = bunq.retrieve_config() result = bunq.post(\"v1/user/{}/monetary-account/{}/request-inquiry\"\\ .format(config[\"user_id\"], accid),", "\"entries\": [paymentmsg]} result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid), paymentmsg) print(result) if \"Error\" in result:", "if fields[\"amount\"] <= 0: return \"only positive amounts allowed: \"+orig return None def", "\"unknown account: \"+fields[\"account\"] if not enabled: errmsg = \"Not permitted for account: \"+fields[\"account\"]", "= fields[\"account\"].replace(\" \", \"\") if internal: fields[\"other_account\"] = fields[\"other_account\"].replace(\" \", \"\") else: fields[\"payment_account\"]", "if \"{:.2f}\".format(fields[\"amount\"]) == \"0.00\": errmsg = \"No transfer needed, balance already ok\" print(\"[target_balance_internal]", "\"@\" in bmvalue: bmtype = \"EMAIL\" elif bmvalue[:1] == \"+\" and bmvalue[1:].isdecimal(): bmtype", "fields[\"direction\"]: paymentmsg = { \"amount\": { \"value\": \"{:.2f}\".format(-transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\": {", "\"PaymentRequest\" in config[\"permissions\"]\\ [fields[\"account\"]]: enabled = config[\"permissions\"][fields[\"account\"]]\\ [\"PaymentRequest\"] else: accid, enabled = payment.check_source_account(False,", "balance = get_balance(config, fields[\"account\"]) if isinstance(balance, str): errmsg = balance print(\"[target_balance_external] ERROR: \"+errmsg)", "== \"+\" and bmvalue[1:].isdecimal(): bmtype = \"PHONE_NUMBER\" elif bmvalue[:2].isalpha() and bmvalue[2:4].isdecimal(): bmtype =", "transfer if \"{:.2f}\".format(fields[\"amount\"]) == \"0.00\": errmsg = \"No transfer needed, balance already ok\"", "\"No transfer needed, balance already ok\" print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\",", "account: \"+fields[\"account\"] if errmsg: print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\", "and bmvalue[2:4].isdecimal(): bmtype = \"IBAN\" else: errmsg = \"Unrecognized as email, phone or", "# strip spaces from account numbers fields[\"account\"] = fields[\"account\"].replace(\" \", \"\") if internal:", "account: \"+account if errmsg: print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\", "\"other_account\", \"direction\", \"payment_type\", \"description\"] else: expected_fields = [\"account\", \"amount\", \"direction\", \"payment_account\", \"payment_name\", \"payment_description\",", "config, fields[\"account\"]) if accid is None: errmsg = \"unknown account: \"+fields[\"account\"] if not", "result = bunq.post(\"v1/user/{}/monetary-account/{}/payment\" .format(config[\"user_id\"], accid), paymentmsg) else: paymentmsg = {\"number_of_required_accepts\": 1, \"entries\": [paymentmsg]}", "enabled: errmsg = \"Not permitted for account: \"+fields[\"account\"] if errmsg: print(\"[target_balance_external] ERROR: \"+errmsg)", "fields[\"other_account\"].replace(\" \", \"\") else: fields[\"payment_account\"] = fields[\"payment_account\"].replace(\" \", \"\") # check amount try:", "[{\"status\": \"SKIP\", \"message\":\\ errmsg}]}), 400 msg = { \"amount_inquired\": { \"value\": \"{:.2f}\".format(transfer_amount), \"currency\":", "print(\"[request_inquiry] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\":\\ errmsg}]}), 400 msg = {", "balance2: transfer_amount = balance2 else: balance = get_balance(config, fields[\"account\"]) if isinstance(balance, float): transfer_amount", "= \"unknown account: \"+fields[\"account\"] if not enabled: errmsg = \"Not permitted for account:", "\"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def check_fields(internal, fields): \"\"\" Check the fields \"\"\" # check expected", "json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # get account id and check", ", 400 # execute the payment if fields[\"payment_type\"] == \"DIRECT\": result = bunq.post(\"v1/user/{}/monetary-account/{}/payment\"", "errmsg}]})\\ , 400 fields = data[\"actionFields\"] errmsg = check_fields(False, fields) if errmsg: print(\"[target_balance_external]", "expected_fields: if field not in fields: return \"missing field: \"+field # strip spaces", "needed, balance already ok\" print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\", "return json.dumps({\"errors\": [{ \"status\": \"SKIP\", \"message\": result[\"Error\"][0][\"error_description\"] }]}), 400 return json.dumps({\"data\": [{ \"id\":", ", 400 # send request / execute payment if transfer_amount > 0 and", "balance print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 transfer_amount", "from account numbers fields[\"account\"] = fields[\"account\"].replace(\" \", \"\") if internal: fields[\"other_account\"] = fields[\"other_account\"].replace(\"", "the balance of one or two accounts \"\"\" balances = bunq.retrieve_account_balances(config) if account2", "enabled for account: \"+account if errmsg: print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\",", "= balance2 else: balance = get_balance(config, fields[\"account\"]) if isinstance(balance, float): transfer_amount = fields[\"amount\"]", "# retrieve balance config = bunq.retrieve_config() balance = get_balance(config, fields[\"account\"]) if isinstance(balance, str):", "print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 fields =", "fields[\"payment_account\"] = fields[\"payment_account\"].replace(\" \", \"\") # check amount try: orig = fields[\"amount\"] fields[\"amount\"]", "[{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 fields = data[\"actionFields\"] errmsg = check_fields(False, fields)", "fields[\"payment_type\"] == \"DIRECT\": result = bunq.post(\"v1/user/{}/monetary-account/{}/payment\" .format(config[\"user_id\"], accid), paymentmsg) else: paymentmsg = {\"number_of_required_accepts\":", "\"SKIP\", \"message\": errmsg}]})\\ , 400 transfer_amount = fields[\"amount\"] - balance # check for", "in config[\"permissions\"]\\ [fields[\"account\"]]: enabled = config[\"permissions\"][fields[\"account\"]]\\ [\"PaymentRequest\"] else: accid, enabled = payment.check_source_account(False, True,", "enabled = config[\"permissions\"][fields[\"account\"]]\\ [\"PaymentRequest\"] else: accid, enabled = payment.check_source_account(False, True, config, fields[\"account\"]) if", "= data[\"actionFields\"] errmsg = check_fields(False, fields) if errmsg: print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\":", "transfer_amount > balance2: transfer_amount = balance2 else: balance = get_balance(config, fields[\"account\"]) if isinstance(balance,", "= fields[\"amount\"] - balance if isinstance(balance, str): errmsg = balance print(\"[target_balance_internal] ERROR: \"+errmsg)", "fields[\"payment_account\"], \"name\": fields[\"payment_name\"] }, \"description\": fields[\"payment_description\"] } print(paymentmsg) paymentmsg = {\"number_of_required_accepts\": 1, \"entries\":", "\"value\": bmvalue }, \"description\": fields[\"request_description\"], \"allow_bunqme\": True, } print(json.dumps(msg)) config = bunq.retrieve_config() result", "enabled = False if \"permissions\" in config: if fields[\"account\"] in config[\"permissions\"]: if \"PaymentRequest\"", "else: paymentmsg = {\"number_of_required_accepts\": 1, \"entries\": [paymentmsg]} result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"], accid), paymentmsg)", "\"message\": errmsg}]})\\ , 400 fields = data[\"actionFields\"] errmsg = check_fields(False, fields) if errmsg:", "= payment.check_source_account(False, True, config, account) if accid is None: errmsg = \"unknown account:", "and \"top up\" in fields[\"direction\"]: paymentmsg = { \"amount\": { \"value\": \"{:.2f}\".format(transfer_amount), \"currency\":", "account) if accid is None: errmsg = \"unknown account: \"+account if not enabled:", "if not enabled: errmsg = \"Payment type not enabled for account: \"+account if", "= get_balance(config, fields[\"account\"], fields[\"other_account\"]) if isinstance(balance, tuple): balance, balance2 = balance transfer_amount =", "internal: fields[\"other_account\"] = fields[\"other_account\"].replace(\" \", \"\") else: fields[\"payment_account\"] = fields[\"payment_account\"].replace(\" \", \"\") #", "return json.dumps({\"data\": [{\"id\": uuid.uuid4().hex}]}) # retrieve balance config = bunq.retrieve_config() balance = get_balance(config,", "return \"missing field: \"+field # strip spaces from account numbers fields[\"account\"] = fields[\"account\"].replace(\"", "balance2 else: balance = get_balance(config, fields[\"account\"]) if isinstance(balance, float): transfer_amount = fields[\"amount\"] -", "bmvalue[1:].isdecimal(): bmtype = \"PHONE_NUMBER\" elif bmvalue[:2].isalpha() and bmvalue[2:4].isdecimal(): bmtype = \"IBAN\" else: errmsg", "Execute a target balance internal action \"\"\" data = request.get_json() print(\"[target_balance_internal] input: {}\".format(json.dumps(data)))", "fields[\"account\"]: accid = acc[\"id\"] enabled = False if \"permissions\" in config: if fields[\"account\"]", "\"allow_bunqme\": True, } print(json.dumps(msg)) config = bunq.retrieve_config() result = bunq.post(\"v1/user/{}/monetary-account/{}/request-inquiry\"\\ .format(config[\"user_id\"], accid), msg,", ", 400 if transfer_amount > 0 and \"top up\" in fields[\"direction\"]: paymentmsg =", "is used for test payments if fields[\"account\"] == \"NL42BUNQ0123456789\": return json.dumps({\"data\": [{\"id\": uuid.uuid4().hex}]})", "accid, enabled = payment.check_source_account(False, True, config, account) if accid is None: errmsg =", "transfer needed, balance already ok\" print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\":", "\"0.00\": errmsg = \"No transfer needed, balance already ok\" print(\"[target_balance_internal] ERROR: \"+errmsg) return", "bunq.retrieve_config() balance = get_balance(config, fields[\"account\"]) if isinstance(balance, str): errmsg = balance print(\"[target_balance_external] ERROR:", "\"\"\" Retrieve the balance of one or two accounts \"\"\" balances = bunq.retrieve_account_balances(config)", "fields[\"payment_description\"] } print(paymentmsg) paymentmsg = {\"number_of_required_accepts\": 1, \"entries\": [paymentmsg]} result = bunq.post(\"v1/user/{}/monetary-account/{}/draft-payment\" .format(config[\"user_id\"],", "def get_balance(config, account, account2=None): \"\"\" Retrieve the balance of one or two accounts", "fields[\"account\"] == \"NL42BUNQ0123456789\": return json.dumps({\"data\": [{\"id\": uuid.uuid4().hex}]}) # retrieve balance config = bunq.retrieve_config()", "# get id and check permissions if fields[\"payment_type\"] == \"DIRECT\": accid, enabled =", "= \"Unrecognized as email, phone or iban: \"+bmvalue print(\"[request_inquiry] ERROR: \"+errmsg) return json.dumps({\"errors\":", "fields: return \"missing field: \"+field # strip spaces from account numbers fields[\"account\"] =", "account not in balances: return \"Account balance not found \"+account return \"Account balance", "get_balance(config, fields[\"account\"]) if isinstance(balance, float): transfer_amount = fields[\"amount\"] - balance if isinstance(balance, str):", "check permissions if fields[\"payment_type\"] == \"DIRECT\": accid, enabled = payment.check_source_account(True, False, config, account)", "errmsg = \"No transfer needed, balance already ok\" print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\":", "\"amount\": { \"value\": \"{:.2f}\".format(-transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"other_account\"],", "\"Unrecognized as email, phone or iban: \"+bmvalue print(\"[request_inquiry] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\":", "bunq.retrieve_account_balances(config) if account2 is None and account in balances: return balances[account] if account", "else: accid, enabled = payment.check_source_account(False, True, config, account) if accid is None: errmsg", "transfer_amount > 0 and \"top up\" in fields[\"direction\"]: paymentmsg = { \"amount\": {", "account NL42BUNQ0123456789 is used for test payments if fields[\"account\"] == \"NL42BUNQ0123456789\": return json.dumps({\"data\":", "\"payment_account\", \"payment_name\", \"payment_description\", \"request_phone_email_iban\", \"request_description\"] for field in expected_fields: if field not in", "= \"missing actionFields\" print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ ,", "\", \"\") if internal: fields[\"other_account\"] = fields[\"other_account\"].replace(\" \", \"\") else: fields[\"payment_account\"] = fields[\"payment_account\"].replace(\"", "config: if fields[\"account\"] in config[\"permissions\"]: if \"PaymentRequest\" in config[\"permissions\"]\\ [fields[\"account\"]]: enabled = config[\"permissions\"][fields[\"account\"]]\\", "{ \"type\": \"IBAN\", \"value\": fields[\"payment_account\"], \"name\": fields[\"payment_name\"] }, \"description\": fields[\"payment_description\"] } print(paymentmsg) paymentmsg", "-1 if fields[\"amount\"] <= 0: return \"only positive amounts allowed: \"+orig return None", "\"+fields[\"account\"] if errmsg: print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ ,", "phone or iban: \"+bmvalue print(\"[request_inquiry] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\":\\ errmsg}]}),", "return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # the account NL42BUNQ0123456789 is", "balance = get_balance(config, fields[\"account\"]) if isinstance(balance, float): transfer_amount = fields[\"amount\"] - balance if", "target balance internal/external actions \"\"\" import json import uuid from flask import request", "= balance print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400", "the payment if fields[\"payment_type\"] == \"DIRECT\": result = bunq.post(\"v1/user/{}/monetary-account/{}/payment\" .format(config[\"user_id\"], accid), paymentmsg) else:", ", 400 fields = data[\"actionFields\"] errmsg = check_fields(False, fields) if errmsg: print(\"[target_balance_external] ERROR:", "\"value\": \"{:.2f}\".format(transfer_amount), \"currency\": \"EUR\" }, \"counterparty_alias\": { \"type\": \"IBAN\", \"value\": fields[\"account\"], \"name\": \"x\"", "return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 # get account id and", "<= 0: return \"only positive amounts allowed: \"+orig return None def get_balance(config, account,", "fields[\"amount\"] - balance if transfer_amount > balance2: transfer_amount = balance2 else: balance =", "if transfer_amount > balance2: transfer_amount = balance2 else: balance = get_balance(config, fields[\"account\"]) if", "errmsg}]})\\ , 400 # send request / execute payment if transfer_amount > 0", "data = request.get_json() print(\"[target_balance_internal] input: {}\".format(json.dumps(data))) if \"actionFields\" not in data: errmsg =", "payment.check_source_account(False, True, config, account) if accid is None: errmsg = \"unknown account: \"+account", "isinstance(balance, str): errmsg = balance print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\":", "balance config = bunq.retrieve_config() balance = get_balance(config, fields[\"account\"]) if isinstance(balance, str): errmsg =", "bmtype = \"EMAIL\" elif bmvalue[:1] == \"+\" and bmvalue[1:].isdecimal(): bmtype = \"PHONE_NUMBER\" elif", ", 400 print(paymentmsg) # get id and check permissions if fields[\"payment_type\"] == \"DIRECT\":", "float(fields[\"amount\"]) except ValueError: fields[\"amount\"] = -1 if fields[\"amount\"] <= 0: return \"only positive", "target_balance_internal(): \"\"\" Execute a target balance internal action \"\"\" data = request.get_json() print(\"[target_balance_internal]", "[{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400 fields = data[\"actionFields\"] errmsg = check_fields(True, fields)", "if account not in balances: return \"Account balance not found \"+account return \"Account", "is None: errmsg = \"unknown account: \"+fields[\"account\"] if not enabled: errmsg = \"Not", "if internal: expected_fields = [\"account\", \"amount\", \"other_account\", \"direction\", \"payment_type\", \"description\"] else: expected_fields =", "{}\".format(json.dumps(data))) if \"actionFields\" not in data: errmsg = \"missing actionFields\" print(\"[target_balance_internal] ERROR: \"+errmsg)", "= \"EMAIL\" elif bmvalue[:1] == \"+\" and bmvalue[1:].isdecimal(): bmtype = \"PHONE_NUMBER\" elif bmvalue[:2].isalpha()", "\"EMAIL\" elif bmvalue[:1] == \"+\" and bmvalue[1:].isdecimal(): bmtype = \"PHONE_NUMBER\" elif bmvalue[:2].isalpha() and", "print(\"[target_balance_external] input: {}\".format(json.dumps(data))) if \"actionFields\" not in data: errmsg = \"missing actionFields\" print(\"[target_balance_external]", "return json.dumps({\"data\": [{ \"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def target_balance_external(): \"\"\" Execute a target balance external", "\"message\": result[\"Error\"][0][\"error_description\"] }]}), 400 return json.dumps({\"data\": [{ \"id\": str(result[\"Response\"][0][\"Id\"][\"id\"])}]}) def target_balance_external(): \"\"\" Execute", "Check the fields \"\"\" # check expected fields if internal: expected_fields = [\"account\",", "errmsg}]})\\ , 400 transfer_amount = fields[\"amount\"] - balance # check for zero transfer", "strip spaces from account numbers fields[\"account\"] = fields[\"account\"].replace(\" \", \"\") if internal: fields[\"other_account\"]", "if accid is None: errmsg = \"unknown account: \"+fields[\"account\"] if not enabled: errmsg", "already ok\" print(\"[target_balance_internal] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\": \"SKIP\", \"message\": errmsg}]})\\ , 400", "bmvalue: bmtype = \"EMAIL\" elif bmvalue[:1] == \"+\" and bmvalue[1:].isdecimal(): bmtype = \"PHONE_NUMBER\"", "400 # execute the payment if fields[\"payment_type\"] == \"DIRECT\": result = bunq.post(\"v1/user/{}/monetary-account/{}/payment\" .format(config[\"user_id\"],", "fields[\"account\"]) if isinstance(balance, str): errmsg = balance print(\"[target_balance_external] ERROR: \"+errmsg) return json.dumps({\"errors\": [{\"status\":", "config = bunq.retrieve_config() balance = get_balance(config, fields[\"account\"]) if isinstance(balance, str): errmsg = balance" ]
[ "setup( name='malloc_tracer', version=get_version(), description='This is a debugging tool for tracing malloc that occurs", "without importing, which avoids dependency issues def get_version(): with open('malloc_tracer/version.py') as version_file: return", "malloc that occurs inside a function or class.', long_description=_long_description(), author='Hasenpfote', author_email='<EMAIL>', url='https://github.com/Hasenpfote/malloc_tracer', download_url='',", "Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language", "def _long_description(): with open('README.rst', 'r') as f: return f.read() if __name__ == '__main__':", "open('malloc_tracer/version.py') as version_file: return re.search(r\"\"\"__version__\\s+=\\s+(['\"])(?P<version>.+?)\\1\"\"\", version_file.read()).group('version') def _long_description(): with open('README.rst', 'r') as f:", "version_file: return re.search(r\"\"\"__version__\\s+=\\s+(['\"])(?P<version>.+?)\\1\"\"\", version_file.read()).group('version') def _long_description(): with open('README.rst', 'r') as f: return f.read()", "a debugging tool for tracing malloc that occurs inside a function or class.',", "Environment', 'Intended Audience :: Developers', 'Topic :: Software Development', 'Topic :: Utilities' ],", "Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language", "3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5',", "System :: OS Independent', 'Development Status :: 5 - Production/Stable', 'Environment :: Other", "class.', long_description=_long_description(), author='Hasenpfote', author_email='<EMAIL>', url='https://github.com/Hasenpfote/malloc_tracer', download_url='', packages = ['malloc_tracer'], keywords=['debug', 'debugging-tool', 'tracemalloc'], classifiers=[", "which avoids dependency issues def get_version(): with open('malloc_tracer/version.py') as version_file: return re.search(r\"\"\"__version__\\s+=\\s+(['\"])(?P<version>.+?)\\1\"\"\", version_file.read()).group('version')", ":: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python ::", "import setup # Get version without importing, which avoids dependency issues def get_version():", "function or class.', long_description=_long_description(), author='Hasenpfote', author_email='<EMAIL>', url='https://github.com/Hasenpfote/malloc_tracer', download_url='', packages = ['malloc_tracer'], keywords=['debug', 'debugging-tool',", "Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language", ":: 3.7', 'License :: OSI Approved :: MIT License', 'Operating System :: OS", "issues def get_version(): with open('malloc_tracer/version.py') as version_file: return re.search(r\"\"\"__version__\\s+=\\s+(['\"])(?P<version>.+?)\\1\"\"\", version_file.read()).group('version') def _long_description(): with", "a function or class.', long_description=_long_description(), author='Hasenpfote', author_email='<EMAIL>', url='https://github.com/Hasenpfote/malloc_tracer', download_url='', packages = ['malloc_tracer'], keywords=['debug',", ":: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python ::", "f: return f.read() if __name__ == '__main__': setup( name='malloc_tracer', version=get_version(), description='This is a", "Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python", "as version_file: return re.search(r\"\"\"__version__\\s+=\\s+(['\"])(?P<version>.+?)\\1\"\"\", version_file.read()).group('version') def _long_description(): with open('README.rst', 'r') as f: return", ":: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python ::", "OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Development Status ::", ":: Developers', 'Topic :: Software Development', 'Topic :: Utilities' ], python_requires='>=3.4', install_requires=[], )", "occurs inside a function or class.', long_description=_long_description(), author='Hasenpfote', author_email='<EMAIL>', url='https://github.com/Hasenpfote/malloc_tracer', download_url='', packages =", "author_email='<EMAIL>', url='https://github.com/Hasenpfote/malloc_tracer', download_url='', packages = ['malloc_tracer'], keywords=['debug', 'debugging-tool', 'tracemalloc'], classifiers=[ 'Programming Language ::", "setuptools import setup # Get version without importing, which avoids dependency issues def", "= ['malloc_tracer'], keywords=['debug', 'debugging-tool', 'tracemalloc'], classifiers=[ 'Programming Language :: Python', 'Programming Language ::", "3.6', 'Programming Language :: Python :: 3.7', 'License :: OSI Approved :: MIT", "keywords=['debug', 'debugging-tool', 'tracemalloc'], classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python ::", "or class.', long_description=_long_description(), author='Hasenpfote', author_email='<EMAIL>', url='https://github.com/Hasenpfote/malloc_tracer', download_url='', packages = ['malloc_tracer'], keywords=['debug', 'debugging-tool', 'tracemalloc'],", "'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language ::", "'__main__': setup( name='malloc_tracer', version=get_version(), description='This is a debugging tool for tracing malloc that", "3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7',", "that occurs inside a function or class.', long_description=_long_description(), author='Hasenpfote', author_email='<EMAIL>', url='https://github.com/Hasenpfote/malloc_tracer', download_url='', packages", "MIT License', 'Operating System :: OS Independent', 'Development Status :: 5 - Production/Stable',", "'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'License", "Get version without importing, which avoids dependency issues def get_version(): with open('malloc_tracer/version.py') as", ":: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language ::", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- import re from setuptools import setup", ":: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python ::", "description='This is a debugging tool for tracing malloc that occurs inside a function", "name='malloc_tracer', version=get_version(), description='This is a debugging tool for tracing malloc that occurs inside", "Approved :: MIT License', 'Operating System :: OS Independent', 'Development Status :: 5", "for tracing malloc that occurs inside a function or class.', long_description=_long_description(), author='Hasenpfote', author_email='<EMAIL>',", "Audience :: Developers', 'Topic :: Software Development', 'Topic :: Utilities' ], python_requires='>=3.4', install_requires=[],", "re from setuptools import setup # Get version without importing, which avoids dependency", "'Operating System :: OS Independent', 'Development Status :: 5 - Production/Stable', 'Environment ::", "setup # Get version without importing, which avoids dependency issues def get_version(): with", "__name__ == '__main__': setup( name='malloc_tracer', version=get_version(), description='This is a debugging tool for tracing", "debugging tool for tracing malloc that occurs inside a function or class.', long_description=_long_description(),", "# -*- coding: utf-8 -*- import re from setuptools import setup # Get", "Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python", "3.7', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent',", "avoids dependency issues def get_version(): with open('malloc_tracer/version.py') as version_file: return re.search(r\"\"\"__version__\\s+=\\s+(['\"])(?P<version>.+?)\\1\"\"\", version_file.read()).group('version') def", "version_file.read()).group('version') def _long_description(): with open('README.rst', 'r') as f: return f.read() if __name__ ==", ":: 3.6', 'Programming Language :: Python :: 3.7', 'License :: OSI Approved ::", "== '__main__': setup( name='malloc_tracer', version=get_version(), description='This is a debugging tool for tracing malloc", "Python :: 3.6', 'Programming Language :: Python :: 3.7', 'License :: OSI Approved", "dependency issues def get_version(): with open('malloc_tracer/version.py') as version_file: return re.search(r\"\"\"__version__\\s+=\\s+(['\"])(?P<version>.+?)\\1\"\"\", version_file.read()).group('version') def _long_description():", ":: Other Environment', 'Intended Audience :: Developers', 'Topic :: Software Development', 'Topic ::", "return re.search(r\"\"\"__version__\\s+=\\s+(['\"])(?P<version>.+?)\\1\"\"\", version_file.read()).group('version') def _long_description(): with open('README.rst', 'r') as f: return f.read() if", "'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming", "Status :: 5 - Production/Stable', 'Environment :: Other Environment', 'Intended Audience :: Developers',", "python3 # -*- coding: utf-8 -*- import re from setuptools import setup #", "classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language", "'Programming Language :: Python :: 3.7', 'License :: OSI Approved :: MIT License',", "- Production/Stable', 'Environment :: Other Environment', 'Intended Audience :: Developers', 'Topic :: Software", ":: 5 - Production/Stable', 'Environment :: Other Environment', 'Intended Audience :: Developers', 'Topic", ":: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Development Status", "'Environment :: Other Environment', 'Intended Audience :: Developers', 'Topic :: Software Development', 'Topic", "'tracemalloc'], classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming", "'r') as f: return f.read() if __name__ == '__main__': setup( name='malloc_tracer', version=get_version(), description='This", ":: Python :: 3.7', 'License :: OSI Approved :: MIT License', 'Operating System", "# Get version without importing, which avoids dependency issues def get_version(): with open('malloc_tracer/version.py')", "5 - Production/Stable', 'Environment :: Other Environment', 'Intended Audience :: Developers', 'Topic ::", "['malloc_tracer'], keywords=['debug', 'debugging-tool', 'tracemalloc'], classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python", "Production/Stable', 'Environment :: Other Environment', 'Intended Audience :: Developers', 'Topic :: Software Development',", "utf-8 -*- import re from setuptools import setup # Get version without importing,", "url='https://github.com/Hasenpfote/malloc_tracer', download_url='', packages = ['malloc_tracer'], keywords=['debug', 'debugging-tool', 'tracemalloc'], classifiers=[ 'Programming Language :: Python',", "open('README.rst', 'r') as f: return f.read() if __name__ == '__main__': setup( name='malloc_tracer', version=get_version(),", "tracing malloc that occurs inside a function or class.', long_description=_long_description(), author='Hasenpfote', author_email='<EMAIL>', url='https://github.com/Hasenpfote/malloc_tracer',", "_long_description(): with open('README.rst', 'r') as f: return f.read() if __name__ == '__main__': setup(", "'Intended Audience :: Developers', 'Topic :: Software Development', 'Topic :: Utilities' ], python_requires='>=3.4',", "Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'License ::", "OS Independent', 'Development Status :: 5 - Production/Stable', 'Environment :: Other Environment', 'Intended", "License', 'Operating System :: OS Independent', 'Development Status :: 5 - Production/Stable', 'Environment", "return f.read() if __name__ == '__main__': setup( name='malloc_tracer', version=get_version(), description='This is a debugging", "import re from setuptools import setup # Get version without importing, which avoids", "Independent', 'Development Status :: 5 - Production/Stable', 'Environment :: Other Environment', 'Intended Audience", "3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6',", "Other Environment', 'Intended Audience :: Developers', 'Topic :: Software Development', 'Topic :: Utilities'", "with open('malloc_tracer/version.py') as version_file: return re.search(r\"\"\"__version__\\s+=\\s+(['\"])(?P<version>.+?)\\1\"\"\", version_file.read()).group('version') def _long_description(): with open('README.rst', 'r') as", "is a debugging tool for tracing malloc that occurs inside a function or", "if __name__ == '__main__': setup( name='malloc_tracer', version=get_version(), description='This is a debugging tool for", "'debugging-tool', 'tracemalloc'], classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 3',", "importing, which avoids dependency issues def get_version(): with open('malloc_tracer/version.py') as version_file: return re.search(r\"\"\"__version__\\s+=\\s+(['\"])(?P<version>.+?)\\1\"\"\",", "download_url='', packages = ['malloc_tracer'], keywords=['debug', 'debugging-tool', 'tracemalloc'], classifiers=[ 'Programming Language :: Python', 'Programming", "with open('README.rst', 'r') as f: return f.read() if __name__ == '__main__': setup( name='malloc_tracer',", "def get_version(): with open('malloc_tracer/version.py') as version_file: return re.search(r\"\"\"__version__\\s+=\\s+(['\"])(?P<version>.+?)\\1\"\"\", version_file.read()).group('version') def _long_description(): with open('README.rst',", "version=get_version(), description='This is a debugging tool for tracing malloc that occurs inside a", "tool for tracing malloc that occurs inside a function or class.', long_description=_long_description(), author='Hasenpfote',", "Python :: 3.7', 'License :: OSI Approved :: MIT License', 'Operating System ::", "-*- coding: utf-8 -*- import re from setuptools import setup # Get version", "'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming", "from setuptools import setup # Get version without importing, which avoids dependency issues", "Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python", "long_description=_long_description(), author='Hasenpfote', author_email='<EMAIL>', url='https://github.com/Hasenpfote/malloc_tracer', download_url='', packages = ['malloc_tracer'], keywords=['debug', 'debugging-tool', 'tracemalloc'], classifiers=[ 'Programming", "coding: utf-8 -*- import re from setuptools import setup # Get version without", "version without importing, which avoids dependency issues def get_version(): with open('malloc_tracer/version.py') as version_file:", "re.search(r\"\"\"__version__\\s+=\\s+(['\"])(?P<version>.+?)\\1\"\"\", version_file.read()).group('version') def _long_description(): with open('README.rst', 'r') as f: return f.read() if __name__", "inside a function or class.', long_description=_long_description(), author='Hasenpfote', author_email='<EMAIL>', url='https://github.com/Hasenpfote/malloc_tracer', download_url='', packages = ['malloc_tracer'],", ":: OS Independent', 'Development Status :: 5 - Production/Stable', 'Environment :: Other Environment',", "f.read() if __name__ == '__main__': setup( name='malloc_tracer', version=get_version(), description='This is a debugging tool", "-*- import re from setuptools import setup # Get version without importing, which", "Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4',", "as f: return f.read() if __name__ == '__main__': setup( name='malloc_tracer', version=get_version(), description='This is", ":: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'License :: OSI", "Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python", "get_version(): with open('malloc_tracer/version.py') as version_file: return re.search(r\"\"\"__version__\\s+=\\s+(['\"])(?P<version>.+?)\\1\"\"\", version_file.read()).group('version') def _long_description(): with open('README.rst', 'r')", "packages = ['malloc_tracer'], keywords=['debug', 'debugging-tool', 'tracemalloc'], classifiers=[ 'Programming Language :: Python', 'Programming Language", ":: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language ::", "'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming", "'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Development", ":: MIT License', 'Operating System :: OS Independent', 'Development Status :: 5 -", ":: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language ::", "author='Hasenpfote', author_email='<EMAIL>', url='https://github.com/Hasenpfote/malloc_tracer', download_url='', packages = ['malloc_tracer'], keywords=['debug', 'debugging-tool', 'tracemalloc'], classifiers=[ 'Programming Language", "'Development Status :: 5 - Production/Stable', 'Environment :: Other Environment', 'Intended Audience ::", "Language :: Python :: 3.7', 'License :: OSI Approved :: MIT License', 'Operating" ]
[ "== '__main__': # se abbiamo un token di mapbox nell'environment facciamo il geocoding", "continue match = ORARI_A_RE.match(line) if match: continue match = BLOCCO_NOTE_RE.match(line) if match: blocco_note", "num_orari = len(indirizzo['ore']) / 2 indirizzo['orario_affidabile'] = num_orari == len(indirizzo['giorni']) num_orari = int(num_orari)", "= BLOCCO_ASSOCIAZIONE_RE.match(line) if match: blocco_associazione = True continue match = INDIRIZZO_RE.match(line) if match:", "import json import os import re import sys import unittest import urllib.parse import", "token): mapbox_geocoding_v5 = \"https://api.mapbox.com/geocoding/v5/mapbox.places/\" url = \"{}{}.json?limit=1&country=IT&access_token={}\".format( mapbox_geocoding_v5, urllib.parse.quote(indirizzo, safe=\"\"), token, ) response", "feature = data[\"features\"][0] if \"address\" in feature[\"place_type\"]: if \"Torino Turin\" in feature[\"place_name\"] or", "= geocoding(indirizzo['indirizzo'], mapbox_token) indirizzo['posizione'] = posizione documento['dottori'] = dottori print(json.dumps(documento)) class ParseTestCase(unittest.TestCase): def", "feature[\"center\"] return None @functools.lru_cache(maxsize=128) def geocoding(indirizzo, token): # sostituiamo i sc. con scala", "facciamo il geocoding degli indirizzi mapbox_token = os.getenv(\"MAPBOX_ACCESS_TOKEN\") documento = { 'aggiornamento': None,", "indirizzi mapbox_token = os.getenv(\"MAPBOX_ACCESS_TOKEN\") documento = { 'aggiornamento': None, 'circoscrizione_numero': None, 'circoscrizione_nome': None,", "BLOCCO_NOTE_RE = re.compile(r\"Note\") def do_geocoding(indirizzo, token): mapbox_geocoding_v5 = \"https://api.mapbox.com/geocoding/v5/mapbox.places/\" url = \"{}{}.json?limit=1&country=IT&access_token={}\".format( mapbox_geocoding_v5,", "scala \") posizione = do_geocoding(indirizzo, token) if posizione: return posizione # se non", "return feature[\"center\"] return None @functools.lru_cache(maxsize=128) def geocoding(indirizzo, token): # sostituiamo i sc. con", "\"address\" in feature[\"place_type\"]: if \"Torino Turin\" in feature[\"place_name\"] or \"Collegno\" in feature[\"place_name\"]: return", "'circoscrizione_nome': None, 'mmg': None, 'dottori': None, } dottori = [] dottore = None", "AGGIORNAMENTO_RE.match(line) if match: match_dict = match.groupdict() update = datetime.date( int(match_dict['anno']), MESI[match_dict['mese'].lower()], int(match_dict['giorno']) )", "match_dict['nome'] continue match = MMG_RE.match(line) if match: documento['mmg'] = True continue match =", "match.groupdict() update = datetime.date( int(match_dict['anno']), MESI[match_dict['mese'].lower()], int(match_dict['giorno']) ) documento['aggiornamento'] = update.isoformat() continue match", "per farli piacere a mapbox indirizzo = indirizzo.replace(\" sc. \", \" scala \")", "class ParseTestCase(unittest.TestCase): def test_nome_dottore_deve_fare_il_match_degli_apostrofi(self): match = NOME_DOTTORE_RE.match(\"NUR ADDO' [01234]\") match_dict = match.groupdict() self.assertEqual(match_dict,", "= update.isoformat() continue match = CIRCOSCRIZIONE_RE.match(line) if match: match_dict = match.groupdict() documento['circoscrizione_numero'] =", "ORARI_DA_RE.match(line) if match: continue match = ORARIO_RE.match(line) if match: match_dict = match.groupdict() indirizzo['ore'].append(match_dict['orario'])", "orario in zip(indirizzo['giorni'], orari) ] else: indirizzo['orari'] = [{'giorno': None, 'da': da, 'a':", "if match: match_dict = match.groupdict() indirizzo['giorni'].append(match_dict['giorno']) continue match = ORARI_DA_RE.match(line) if match: continue", "correzione = [(k, v) for k, v in ESPANDI_INDIRIZZO.items() if k in indirizzo]", "urllib.parse.quote(indirizzo, safe=\"\"), token, ) response = requests.get(url) data = response.json() feature = data[\"features\"][0]", "\"{}{}.json?limit=1&country=IT&access_token={}\".format( mapbox_geocoding_v5, urllib.parse.quote(indirizzo, safe=\"\"), token, ) response = requests.get(url) data = response.json() feature", "None blocco_associazione = False blocco_note = False indirizzo = None for line in", "# se non abbiamo trovato l'indirizzo proviamo a sistemarlo a mano correzione =", "@functools.lru_cache(maxsize=128) def geocoding(indirizzo, token): # sostituiamo i sc. con scala per farli piacere", "def test_nome_dottore_deve_fare_il_match_degli_apostrofi(self): match = NOME_DOTTORE_RE.match(\"NUR ADDO' [01234]\") match_dict = match.groupdict() self.assertEqual(match_dict, {\"nome\": \"NUR", "match.groupdict() documento['circoscrizione_numero'] = match_dict['numero'] documento['circoscrizione_nome'] = match_dict['nome'] continue match = MMG_RE.match(line) if match:", "{ 'aggiornamento': None, 'circoscrizione_numero': None, 'circoscrizione_nome': None, 'mmg': None, 'dottori': None, } dottori", "if match: match_dict = match.groupdict() blocco_note = False if dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo", "l'indirizzo proviamo a sistemarlo a mano correzione = [(k, v) for k, v", "'associazione': [], 'indirizzi': [], } continue match = BLOCCO_ASSOCIAZIONE_RE.match(line) if match: blocco_associazione =", "match: blocco_note = True continue if blocco_note: indirizzo['note'].append(line.strip()) continue print(line, file=sys.stderr) # l'ultimo", "len(indirizzo['giorni']) num_orari = int(num_orari) orari = [(indirizzo['ore'][i], indirizzo['ore'][i+num_orari]) for i in range(num_orari)] if", "dottori print(json.dumps(documento)) class ParseTestCase(unittest.TestCase): def test_nome_dottore_deve_fare_il_match_degli_apostrofi(self): match = NOME_DOTTORE_RE.match(\"NUR ADDO' [01234]\") match_dict =", "= requests.get(url) data = response.json() feature = data[\"features\"][0] if \"address\" in feature[\"place_type\"]: if", "match = AGGIORNAMENTO_RE.match(line) if match: match_dict = match.groupdict() update = datetime.date( int(match_dict['anno']), MESI[match_dict['mese'].lower()],", "continue match = ORARI_DA_RE.match(line) if match: continue match = ORARIO_RE.match(line) if match: match_dict", "= re.compile(r\"AGGIORNAMENTO: (?P<giorno>\\d+) (?P<mese>\\w+) (?P<anno>\\d+)\") CIRCOSCRIZIONE_RE = re.compile(r\"CIRCOSCRIZIONE (?P<numero>\\d+): (?P<nome>.+)\") MMG_RE = re.compile(r\"MMG\")", "CIRCOSCRIZIONE_RE = re.compile(r\"CIRCOSCRIZIONE (?P<numero>\\d+): (?P<nome>.+)\") MMG_RE = re.compile(r\"MMG\") NOME_DOTTORE_RE = re.compile(r\"(?P<nome>[\\w\\s']+) \\[(?P<codice>\\w+)\\]\") BLOCCO_ASSOCIAZIONE_RE", "ORARIO_RE = re.compile(r\"(?P<orario>\\d{2}:\\d{2})\") ORARI_A_RE = re.compile(r\"Alle\") BLOCCO_NOTE_RE = re.compile(r\"Note\") def do_geocoding(indirizzo, token): mapbox_geocoding_v5", "2 indirizzo['orario_affidabile'] = num_orari == len(indirizzo['giorni']) num_orari = int(num_orari) orari = [(indirizzo['ore'][i], indirizzo['ore'][i+num_orari])", "[], 'indirizzi': [], } continue match = BLOCCO_ASSOCIAZIONE_RE.match(line) if match: blocco_associazione = True", "NOME_DOTTORE_RE = re.compile(r\"(?P<nome>[\\w\\s']+) \\[(?P<codice>\\w+)\\]\") BLOCCO_ASSOCIAZIONE_RE = re.compile(r\"Associazione:\") INDIRIZZO_RE = re.compile(r\"(?P<indirizzo>.+) TORINO (?P<cap>\\d+) \\(TORINO\\)", "[(k, v) for k, v in ESPANDI_INDIRIZZO.items() if k in indirizzo] if correzione:", "geocoding(indirizzo['indirizzo'], mapbox_token) indirizzo['posizione'] = posizione documento['dottori'] = dottori print(json.dumps(documento)) class ParseTestCase(unittest.TestCase): def test_nome_dottore_deve_fare_il_match_degli_apostrofi(self):", "'telefono': [match_dict['telefono']], 'giorni': [], 'ore': [], 'note': [], } continue # ci sono", "feature[\"place_name\"]: return feature[\"center\"] return None @functools.lru_cache(maxsize=128) def geocoding(indirizzo, token): # sostituiamo i sc.", "= { 'aggiornamento': None, 'circoscrizione_numero': None, 'circoscrizione_nome': None, 'mmg': None, 'dottori': None, }", "4, 'maggio': 5, 'giugno': 6, 'luglio': 7, 'agosto': 8, 'settembre': 9, 'ottobre': 10,", "print(line, file=sys.stderr) # l'ultimo dottore if dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo = None for", "dottore in dottori: for indirizzo in dottore['indirizzi']: # Proviamo a sistemare gli orari", "else: indirizzo['orari'] = [{'giorno': None, 'da': da, 'a': a} for da, a in", "5, 'giugno': 6, 'luglio': 7, 'agosto': 8, 'settembre': 9, 'ottobre': 10, 'novembre': 11,", "indirizzo = None for dottore in dottori: for indirizzo in dottore['indirizzi']: # Proviamo", "re.compile(r\"Alle\") BLOCCO_NOTE_RE = re.compile(r\"Note\") def do_geocoding(indirizzo, token): mapbox_geocoding_v5 = \"https://api.mapbox.com/geocoding/v5/mapbox.places/\" url = \"{}{}.json?limit=1&country=IT&access_token={}\".format(", "# ci sono dottori senza associazione if blocco_associazione: dottore['associazione'].append(line.strip()) continue match = FAX_RE.match(line)", "mapbox_token = os.getenv(\"MAPBOX_ACCESS_TOKEN\") documento = { 'aggiornamento': None, 'circoscrizione_numero': None, 'circoscrizione_nome': None, 'mmg':", "zip(indirizzo['giorni'], orari) ] else: indirizzo['orari'] = [{'giorno': None, 'da': da, 'a': a} for", "= re.compile(r\"FAX \\d+\") TELEFONO_RE = re.compile(r\"(TELEFONO.*:\\s*)?(?P<telefono>\\d+)$\") BLOCCO_ORARI_RE = re.compile(r\"Giorno\") GIORNO_RE = re.compile(r\"(?P<giorno>Lunedi|Martedi|Mercoledi|Giovedi|Venerdi|Sabato)\") ORARI_DA_RE", "posizione = do_geocoding(indirizzo, token) if posizione: return posizione # se non abbiamo trovato", "blocco_note = True continue if blocco_note: indirizzo['note'].append(line.strip()) continue print(line, file=sys.stderr) # l'ultimo dottore", "in feature[\"place_type\"]: if \"Torino Turin\" in feature[\"place_name\"] or \"Collegno\" in feature[\"place_name\"]: return feature[\"center\"]", "dottore if dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo = None for dottore in dottori: for", "match_dict = match.groupdict() blocco_note = False blocco_associazione = False if indirizzo: dottore['indirizzi'].append(indirizzo) indirizzo", "NOME_DOTTORE_RE.match(line) if match: match_dict = match.groupdict() blocco_note = False if dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore)", "do_geocoding(indirizzo, token): mapbox_geocoding_v5 = \"https://api.mapbox.com/geocoding/v5/mapbox.places/\" url = \"{}{}.json?limit=1&country=IT&access_token={}\".format( mapbox_geocoding_v5, urllib.parse.quote(indirizzo, safe=\"\"), token, )", "fileinput import functools import json import os import re import sys import unittest", "farli piacere a mapbox indirizzo = indirizzo.replace(\" sc. \", \" scala \") posizione", "if match: continue match = TELEFONO_RE.match(line) if match: match_dict = match.groupdict() indirizzo['telefono'].append(match_dict['telefono']) continue", "False if dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo = None dottore = { 'nome': match_dict['nome'],", "indirizzo['giorni'].append(match_dict['giorno']) continue match = ORARI_DA_RE.match(line) if match: continue match = ORARIO_RE.match(line) if match:", "'da': da, 'a': a} for da, a in orari] if mapbox_token: posizione =", "test_nome_dottore_deve_fare_il_match_degli_apostrofi(self): match = NOME_DOTTORE_RE.match(\"NUR ADDO' [01234]\") match_dict = match.groupdict() self.assertEqual(match_dict, {\"nome\": \"NUR ADDO'\",", "a mapbox indirizzo = indirizzo.replace(\" sc. \", \" scala \") posizione = do_geocoding(indirizzo,", "indirizzo_corretto = indirizzo.replace(via, via_corretta) posizione = do_geocoding(indirizzo_corretto, token) if posizione: return posizione print(\"Geocoding", "= [{'giorno': None, 'da': da, 'a': a} for da, a in orari] if", "} dottori = [] dottore = None blocco_associazione = False blocco_note = False", "'mmg': None, 'dottori': None, } dottori = [] dottore = None blocco_associazione =", "None, } dottori = [] dottore = None blocco_associazione = False blocco_note =", "= data[\"features\"][0] if \"address\" in feature[\"place_type\"]: if \"Torino Turin\" in feature[\"place_name\"] or \"Collegno\"", "__name__ == '__main__': # se abbiamo un token di mapbox nell'environment facciamo il", "'ore': [], 'note': [], } continue # ci sono dottori senza associazione if", "import requests from fixups import ESPANDI_INDIRIZZO MESI = { 'gennaio': 1, 'febbraio': 2,", "match: continue match = GIORNO_RE.match(line) if match: match_dict = match.groupdict() indirizzo['giorni'].append(match_dict['giorno']) continue match", "\\[(?P<codice>\\w+)\\]\") BLOCCO_ASSOCIAZIONE_RE = re.compile(r\"Associazione:\") INDIRIZZO_RE = re.compile(r\"(?P<indirizzo>.+) TORINO (?P<cap>\\d+) \\(TORINO\\) Telefono: ?(?P<telefono>\\d*)?\") FAX_RE", "= [(k, v) for k, v in ESPANDI_INDIRIZZO.items() if k in indirizzo] if", "{ 'nome': match_dict['nome'], 'codice': match_dict['codice'], 'associazione': [], 'indirizzi': [], } continue match =", "None dottore = { 'nome': match_dict['nome'], 'codice': match_dict['codice'], 'associazione': [], 'indirizzi': [], }", "in zip(indirizzo['giorni'], orari) ] else: indirizzo['orari'] = [{'giorno': None, 'da': da, 'a': a}", "url = \"{}{}.json?limit=1&country=IT&access_token={}\".format( mapbox_geocoding_v5, urllib.parse.quote(indirizzo, safe=\"\"), token, ) response = requests.get(url) data =", "dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo = None for dottore in dottori: for indirizzo in dottore['indirizzi']:", "match: match_dict = match.groupdict() update = datetime.date( int(match_dict['anno']), MESI[match_dict['mese'].lower()], int(match_dict['giorno']) ) documento['aggiornamento'] =", "= re.compile(r\"(TELEFONO.*:\\s*)?(?P<telefono>\\d+)$\") BLOCCO_ORARI_RE = re.compile(r\"Giorno\") GIORNO_RE = re.compile(r\"(?P<giorno>Lunedi|Martedi|Mercoledi|Giovedi|Venerdi|Sabato)\") ORARI_DA_RE = re.compile(r\"Dalle\") ORARIO_RE =", "'agosto': 8, 'settembre': 9, 'ottobre': 10, 'novembre': 11, 'dicembre': 12, } AGGIORNAMENTO_RE =", "import os import re import sys import unittest import urllib.parse import requests from", "con scala per farli piacere a mapbox indirizzo = indirizzo.replace(\" sc. \", \"", "= MMG_RE.match(line) if match: documento['mmg'] = True continue match = NOME_DOTTORE_RE.match(line) if match:", "if blocco_associazione: dottore['associazione'].append(line.strip()) continue match = FAX_RE.match(line) if match: continue match = TELEFONO_RE.match(line)", "1, 'febbraio': 2, 'marzo': 3, 'aprile': 4, 'maggio': 5, 'giugno': 6, 'luglio': 7,", "\" scala \") posizione = do_geocoding(indirizzo, token) if posizione: return posizione # se", "match = INDIRIZZO_RE.match(line) if match: match_dict = match.groupdict() blocco_note = False blocco_associazione =", "if indirizzo: dottore['indirizzi'].append(indirizzo) indirizzo = { 'indirizzo': '{} {} TORINO'.format(match_dict['indirizzo'], match_dict['cap']), 'telefono': [match_dict['telefono']],", "= GIORNO_RE.match(line) if match: match_dict = match.groupdict() indirizzo['giorni'].append(match_dict['giorno']) continue match = ORARI_DA_RE.match(line) if", "None, 'dottori': None, } dottori = [] dottore = None blocco_associazione = False", "10, 'novembre': 11, 'dicembre': 12, } AGGIORNAMENTO_RE = re.compile(r\"AGGIORNAMENTO: (?P<giorno>\\d+) (?P<mese>\\w+) (?P<anno>\\d+)\") CIRCOSCRIZIONE_RE", "indirizzo['telefono'].append(match_dict['telefono']) continue match = BLOCCO_ORARI_RE.match(line) if match: continue match = GIORNO_RE.match(line) if match:", "= re.compile(r\"Associazione:\") INDIRIZZO_RE = re.compile(r\"(?P<indirizzo>.+) TORINO (?P<cap>\\d+) \\(TORINO\\) Telefono: ?(?P<telefono>\\d*)?\") FAX_RE = re.compile(r\"FAX", "continue if blocco_note: indirizzo['note'].append(line.strip()) continue print(line, file=sys.stderr) # l'ultimo dottore if dottore: dottore['indirizzi'].append(indirizzo)", "import functools import json import os import re import sys import unittest import", "mapbox indirizzo = indirizzo.replace(\" sc. \", \" scala \") posizione = do_geocoding(indirizzo, token)", "{}\".format(indirizzo), file=sys.stderr) return None if __name__ == '__main__': # se abbiamo un token", "if k in indirizzo] if correzione: via, via_corretta = correzione[0] indirizzo_corretto = indirizzo.replace(via,", "CIRCOSCRIZIONE_RE.match(line) if match: match_dict = match.groupdict() documento['circoscrizione_numero'] = match_dict['numero'] documento['circoscrizione_nome'] = match_dict['nome'] continue", "match = ORARI_DA_RE.match(line) if match: continue match = ORARIO_RE.match(line) if match: match_dict =", "documento['mmg'] = True continue match = NOME_DOTTORE_RE.match(line) if match: match_dict = match.groupdict() blocco_note", "match_dict['cap']), 'telefono': [match_dict['telefono']], 'giorni': [], 'ore': [], 'note': [], } continue # ci", "None, 'da': da, 'a': a} for da, a in orari] if mapbox_token: posizione", "posizione print(\"Geocoding fallito per {}\".format(indirizzo), file=sys.stderr) return None if __name__ == '__main__': #", "dottori senza associazione if blocco_associazione: dottore['associazione'].append(line.strip()) continue match = FAX_RE.match(line) if match: continue", "= [ {'giorno': giorno, 'da': orario[0], 'a': orario[1]} for giorno, orario in zip(indirizzo['giorni'],", "= datetime.date( int(match_dict['anno']), MESI[match_dict['mese'].lower()], int(match_dict['giorno']) ) documento['aggiornamento'] = update.isoformat() continue match = CIRCOSCRIZIONE_RE.match(line)", "re.compile(r\"CIRCOSCRIZIONE (?P<numero>\\d+): (?P<nome>.+)\") MMG_RE = re.compile(r\"MMG\") NOME_DOTTORE_RE = re.compile(r\"(?P<nome>[\\w\\s']+) \\[(?P<codice>\\w+)\\]\") BLOCCO_ASSOCIAZIONE_RE = re.compile(r\"Associazione:\")", "k, v in ESPANDI_INDIRIZZO.items() if k in indirizzo] if correzione: via, via_corretta =", "ORARI_DA_RE = re.compile(r\"Dalle\") ORARIO_RE = re.compile(r\"(?P<orario>\\d{2}:\\d{2})\") ORARI_A_RE = re.compile(r\"Alle\") BLOCCO_NOTE_RE = re.compile(r\"Note\") def", "\"https://api.mapbox.com/geocoding/v5/mapbox.places/\" url = \"{}{}.json?limit=1&country=IT&access_token={}\".format( mapbox_geocoding_v5, urllib.parse.quote(indirizzo, safe=\"\"), token, ) response = requests.get(url) data", "continue match = ORARIO_RE.match(line) if match: match_dict = match.groupdict() indirizzo['ore'].append(match_dict['orario']) continue match =", "for k, v in ESPANDI_INDIRIZZO.items() if k in indirizzo] if correzione: via, via_corretta", "documento = { 'aggiornamento': None, 'circoscrizione_numero': None, 'circoscrizione_nome': None, 'mmg': None, 'dottori': None,", "'codice': match_dict['codice'], 'associazione': [], 'indirizzi': [], } continue match = BLOCCO_ASSOCIAZIONE_RE.match(line) if match:", "in dottori: for indirizzo in dottore['indirizzi']: # Proviamo a sistemare gli orari num_orari", "if blocco_note: indirizzo['note'].append(line.strip()) continue print(line, file=sys.stderr) # l'ultimo dottore if dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore)", "data = response.json() feature = data[\"features\"][0] if \"address\" in feature[\"place_type\"]: if \"Torino Turin\"", "= False if indirizzo: dottore['indirizzi'].append(indirizzo) indirizzo = { 'indirizzo': '{} {} TORINO'.format(match_dict['indirizzo'], match_dict['cap']),", "if match: continue match = GIORNO_RE.match(line) if match: match_dict = match.groupdict() indirizzo['giorni'].append(match_dict['giorno']) continue", "not line.strip(): continue match = AGGIORNAMENTO_RE.match(line) if match: match_dict = match.groupdict() update =", "documento['aggiornamento'] = update.isoformat() continue match = CIRCOSCRIZIONE_RE.match(line) if match: match_dict = match.groupdict() documento['circoscrizione_numero']", "print(json.dumps(documento)) class ParseTestCase(unittest.TestCase): def test_nome_dottore_deve_fare_il_match_degli_apostrofi(self): match = NOME_DOTTORE_RE.match(\"NUR ADDO' [01234]\") match_dict = match.groupdict()", "3, 'aprile': 4, 'maggio': 5, 'giugno': 6, 'luglio': 7, 'agosto': 8, 'settembre': 9,", "sistemarlo a mano correzione = [(k, v) for k, v in ESPANDI_INDIRIZZO.items() if", "data[\"features\"][0] if \"address\" in feature[\"place_type\"]: if \"Torino Turin\" in feature[\"place_name\"] or \"Collegno\" in", "Proviamo a sistemare gli orari num_orari = len(indirizzo['ore']) / 2 indirizzo['orario_affidabile'] = num_orari", "se non abbiamo trovato l'indirizzo proviamo a sistemarlo a mano correzione = [(k,", "dottore['indirizzi'].append(indirizzo) indirizzo = { 'indirizzo': '{} {} TORINO'.format(match_dict['indirizzo'], match_dict['cap']), 'telefono': [match_dict['telefono']], 'giorni': [],", "'da': orario[0], 'a': orario[1]} for giorno, orario in zip(indirizzo['giorni'], orari) ] else: indirizzo['orari']", "il geocoding degli indirizzi mapbox_token = os.getenv(\"MAPBOX_ACCESS_TOKEN\") documento = { 'aggiornamento': None, 'circoscrizione_numero':", "update = datetime.date( int(match_dict['anno']), MESI[match_dict['mese'].lower()], int(match_dict['giorno']) ) documento['aggiornamento'] = update.isoformat() continue match =", "mapbox_token: posizione = geocoding(indirizzo['indirizzo'], mapbox_token) indirizzo['posizione'] = posizione documento['dottori'] = dottori print(json.dumps(documento)) class", "= match.groupdict() blocco_note = False if dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo = None dottore", "len(indirizzo['ore']) / 2 indirizzo['orario_affidabile'] = num_orari == len(indirizzo['giorni']) num_orari = int(num_orari) orari =", "orari] if mapbox_token: posizione = geocoding(indirizzo['indirizzo'], mapbox_token) indirizzo['posizione'] = posizione documento['dottori'] = dottori", "match.groupdict() blocco_note = False if dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo = None dottore =", "(?P<giorno>\\d+) (?P<mese>\\w+) (?P<anno>\\d+)\") CIRCOSCRIZIONE_RE = re.compile(r\"CIRCOSCRIZIONE (?P<numero>\\d+): (?P<nome>.+)\") MMG_RE = re.compile(r\"MMG\") NOME_DOTTORE_RE =", "= re.compile(r\"MMG\") NOME_DOTTORE_RE = re.compile(r\"(?P<nome>[\\w\\s']+) \\[(?P<codice>\\w+)\\]\") BLOCCO_ASSOCIAZIONE_RE = re.compile(r\"Associazione:\") INDIRIZZO_RE = re.compile(r\"(?P<indirizzo>.+) TORINO", "in indirizzo] if correzione: via, via_corretta = correzione[0] indirizzo_corretto = indirizzo.replace(via, via_corretta) posizione", "] else: indirizzo['orari'] = [{'giorno': None, 'da': da, 'a': a} for da, a", "# sostituiamo i sc. con scala per farli piacere a mapbox indirizzo =", "geocoding degli indirizzi mapbox_token = os.getenv(\"MAPBOX_ACCESS_TOKEN\") documento = { 'aggiornamento': None, 'circoscrizione_numero': None,", "match = ORARIO_RE.match(line) if match: match_dict = match.groupdict() indirizzo['ore'].append(match_dict['orario']) continue match = ORARI_A_RE.match(line)", "continue match = BLOCCO_ASSOCIAZIONE_RE.match(line) if match: blocco_associazione = True continue match = INDIRIZZO_RE.match(line)", "= match.groupdict() update = datetime.date( int(match_dict['anno']), MESI[match_dict['mese'].lower()], int(match_dict['giorno']) ) documento['aggiornamento'] = update.isoformat() continue", "= False indirizzo = None for line in fileinput.input(): line = line.strip('\\x0c') if", "indirizzo in dottore['indirizzi']: # Proviamo a sistemare gli orari num_orari = len(indirizzo['ore']) /", "continue match = GIORNO_RE.match(line) if match: match_dict = match.groupdict() indirizzo['giorni'].append(match_dict['giorno']) continue match =", "= correzione[0] indirizzo_corretto = indirizzo.replace(via, via_corretta) posizione = do_geocoding(indirizzo_corretto, token) if posizione: return", "ORARI_A_RE.match(line) if match: continue match = BLOCCO_NOTE_RE.match(line) if match: blocco_note = True continue", "re import sys import unittest import urllib.parse import requests from fixups import ESPANDI_INDIRIZZO", "match = FAX_RE.match(line) if match: continue match = TELEFONO_RE.match(line) if match: match_dict =", "match: match_dict = match.groupdict() indirizzo['ore'].append(match_dict['orario']) continue match = ORARI_A_RE.match(line) if match: continue match", "MMG_RE = re.compile(r\"MMG\") NOME_DOTTORE_RE = re.compile(r\"(?P<nome>[\\w\\s']+) \\[(?P<codice>\\w+)\\]\") BLOCCO_ASSOCIAZIONE_RE = re.compile(r\"Associazione:\") INDIRIZZO_RE = re.compile(r\"(?P<indirizzo>.+)", "\") posizione = do_geocoding(indirizzo, token) if posizione: return posizione # se non abbiamo", "da, a in orari] if mapbox_token: posizione = geocoding(indirizzo['indirizzo'], mapbox_token) indirizzo['posizione'] = posizione", "= re.compile(r\"(?P<indirizzo>.+) TORINO (?P<cap>\\d+) \\(TORINO\\) Telefono: ?(?P<telefono>\\d*)?\") FAX_RE = re.compile(r\"FAX \\d+\") TELEFONO_RE =", "'maggio': 5, 'giugno': 6, 'luglio': 7, 'agosto': 8, 'settembre': 9, 'ottobre': 10, 'novembre':", "dottori: for indirizzo in dottore['indirizzi']: # Proviamo a sistemare gli orari num_orari =", "dottori.append(dottore) indirizzo = None dottore = { 'nome': match_dict['nome'], 'codice': match_dict['codice'], 'associazione': [],", "FAX_RE.match(line) if match: continue match = TELEFONO_RE.match(line) if match: match_dict = match.groupdict() indirizzo['telefono'].append(match_dict['telefono'])", "'aprile': 4, 'maggio': 5, 'giugno': 6, 'luglio': 7, 'agosto': 8, 'settembre': 9, 'ottobre':", "= BLOCCO_NOTE_RE.match(line) if match: blocco_note = True continue if blocco_note: indirizzo['note'].append(line.strip()) continue print(line,", "# Proviamo a sistemare gli orari num_orari = len(indirizzo['ore']) / 2 indirizzo['orario_affidabile'] =", "match.groupdict() blocco_note = False blocco_associazione = False if indirizzo: dottore['indirizzi'].append(indirizzo) indirizzo = {", "= [(indirizzo['ore'][i], indirizzo['ore'][i+num_orari]) for i in range(num_orari)] if indirizzo['orario_affidabile']: indirizzo['orari'] = [ {'giorno':", "mapbox nell'environment facciamo il geocoding degli indirizzi mapbox_token = os.getenv(\"MAPBOX_ACCESS_TOKEN\") documento = {", "/ 2 indirizzo['orario_affidabile'] = num_orari == len(indirizzo['giorni']) num_orari = int(num_orari) orari = [(indirizzo['ore'][i],", "match = BLOCCO_ORARI_RE.match(line) if match: continue match = GIORNO_RE.match(line) if match: match_dict =", "match_dict = match.groupdict() indirizzo['telefono'].append(match_dict['telefono']) continue match = BLOCCO_ORARI_RE.match(line) if match: continue match =", "if match: match_dict = match.groupdict() blocco_note = False blocco_associazione = False if indirizzo:", "non abbiamo trovato l'indirizzo proviamo a sistemarlo a mano correzione = [(k, v)", "match_dict = match.groupdict() blocco_note = False if dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo = None", "in ESPANDI_INDIRIZZO.items() if k in indirizzo] if correzione: via, via_corretta = correzione[0] indirizzo_corretto", "= INDIRIZZO_RE.match(line) if match: match_dict = match.groupdict() blocco_note = False blocco_associazione = False", "'novembre': 11, 'dicembre': 12, } AGGIORNAMENTO_RE = re.compile(r\"AGGIORNAMENTO: (?P<giorno>\\d+) (?P<mese>\\w+) (?P<anno>\\d+)\") CIRCOSCRIZIONE_RE =", "k in indirizzo] if correzione: via, via_corretta = correzione[0] indirizzo_corretto = indirizzo.replace(via, via_corretta)", "sono dottori senza associazione if blocco_associazione: dottore['associazione'].append(line.strip()) continue match = FAX_RE.match(line) if match:", "indirizzo] if correzione: via, via_corretta = correzione[0] indirizzo_corretto = indirizzo.replace(via, via_corretta) posizione =", "[], } continue match = BLOCCO_ASSOCIAZIONE_RE.match(line) if match: blocco_associazione = True continue match", "if not line.strip(): continue match = AGGIORNAMENTO_RE.match(line) if match: match_dict = match.groupdict() update", "if indirizzo['orario_affidabile']: indirizzo['orari'] = [ {'giorno': giorno, 'da': orario[0], 'a': orario[1]} for giorno,", "= ORARI_A_RE.match(line) if match: continue match = BLOCCO_NOTE_RE.match(line) if match: blocco_note = True", "re.compile(r\"Dalle\") ORARIO_RE = re.compile(r\"(?P<orario>\\d{2}:\\d{2})\") ORARI_A_RE = re.compile(r\"Alle\") BLOCCO_NOTE_RE = re.compile(r\"Note\") def do_geocoding(indirizzo, token):", "senza associazione if blocco_associazione: dottore['associazione'].append(line.strip()) continue match = FAX_RE.match(line) if match: continue match", "FAX_RE = re.compile(r\"FAX \\d+\") TELEFONO_RE = re.compile(r\"(TELEFONO.*:\\s*)?(?P<telefono>\\d+)$\") BLOCCO_ORARI_RE = re.compile(r\"Giorno\") GIORNO_RE = re.compile(r\"(?P<giorno>Lunedi|Martedi|Mercoledi|Giovedi|Venerdi|Sabato)\")", "fileinput.input(): line = line.strip('\\x0c') if not line.strip(): continue match = AGGIORNAMENTO_RE.match(line) if match:", "sistemare gli orari num_orari = len(indirizzo['ore']) / 2 indirizzo['orario_affidabile'] = num_orari == len(indirizzo['giorni'])", "= match_dict['numero'] documento['circoscrizione_nome'] = match_dict['nome'] continue match = MMG_RE.match(line) if match: documento['mmg'] =", "match: match_dict = match.groupdict() indirizzo['telefono'].append(match_dict['telefono']) continue match = BLOCCO_ORARI_RE.match(line) if match: continue match", "match_dict['codice'], 'associazione': [], 'indirizzi': [], } continue match = BLOCCO_ASSOCIAZIONE_RE.match(line) if match: blocco_associazione", "sc. \", \" scala \") posizione = do_geocoding(indirizzo, token) if posizione: return posizione", "return posizione # se non abbiamo trovato l'indirizzo proviamo a sistemarlo a mano", "True continue match = NOME_DOTTORE_RE.match(line) if match: match_dict = match.groupdict() blocco_note = False", "match: documento['mmg'] = True continue match = NOME_DOTTORE_RE.match(line) if match: match_dict = match.groupdict()", "match = ORARI_A_RE.match(line) if match: continue match = BLOCCO_NOTE_RE.match(line) if match: blocco_note =", "posizione: return posizione # se non abbiamo trovato l'indirizzo proviamo a sistemarlo a", "ORARIO_RE.match(line) if match: match_dict = match.groupdict() indirizzo['ore'].append(match_dict['orario']) continue match = ORARI_A_RE.match(line) if match:", "match_dict = match.groupdict() indirizzo['giorni'].append(match_dict['giorno']) continue match = ORARI_DA_RE.match(line) if match: continue match =", "via_corretta) posizione = do_geocoding(indirizzo_corretto, token) if posizione: return posizione print(\"Geocoding fallito per {}\".format(indirizzo),", "trovato l'indirizzo proviamo a sistemarlo a mano correzione = [(k, v) for k,", "in feature[\"place_name\"]: return feature[\"center\"] return None @functools.lru_cache(maxsize=128) def geocoding(indirizzo, token): # sostituiamo i", "None if __name__ == '__main__': # se abbiamo un token di mapbox nell'environment", "gli orari num_orari = len(indirizzo['ore']) / 2 indirizzo['orario_affidabile'] = num_orari == len(indirizzo['giorni']) num_orari", "re.compile(r\"MMG\") NOME_DOTTORE_RE = re.compile(r\"(?P<nome>[\\w\\s']+) \\[(?P<codice>\\w+)\\]\") BLOCCO_ASSOCIAZIONE_RE = re.compile(r\"Associazione:\") INDIRIZZO_RE = re.compile(r\"(?P<indirizzo>.+) TORINO (?P<cap>\\d+)", "= re.compile(r\"Note\") def do_geocoding(indirizzo, token): mapbox_geocoding_v5 = \"https://api.mapbox.com/geocoding/v5/mapbox.places/\" url = \"{}{}.json?limit=1&country=IT&access_token={}\".format( mapbox_geocoding_v5, urllib.parse.quote(indirizzo,", "indirizzo['ore'][i+num_orari]) for i in range(num_orari)] if indirizzo['orario_affidabile']: indirizzo['orari'] = [ {'giorno': giorno, 'da':", "int(match_dict['anno']), MESI[match_dict['mese'].lower()], int(match_dict['giorno']) ) documento['aggiornamento'] = update.isoformat() continue match = CIRCOSCRIZIONE_RE.match(line) if match:", "re.compile(r\"(?P<giorno>Lunedi|Martedi|Mercoledi|Giovedi|Venerdi|Sabato)\") ORARI_DA_RE = re.compile(r\"Dalle\") ORARIO_RE = re.compile(r\"(?P<orario>\\d{2}:\\d{2})\") ORARI_A_RE = re.compile(r\"Alle\") BLOCCO_NOTE_RE = re.compile(r\"Note\")", "giorno, orario in zip(indirizzo['giorni'], orari) ] else: indirizzo['orari'] = [{'giorno': None, 'da': da,", "(?P<mese>\\w+) (?P<anno>\\d+)\") CIRCOSCRIZIONE_RE = re.compile(r\"CIRCOSCRIZIONE (?P<numero>\\d+): (?P<nome>.+)\") MMG_RE = re.compile(r\"MMG\") NOME_DOTTORE_RE = re.compile(r\"(?P<nome>[\\w\\s']+)", "\"Collegno\" in feature[\"place_name\"]: return feature[\"center\"] return None @functools.lru_cache(maxsize=128) def geocoding(indirizzo, token): # sostituiamo", "if correzione: via, via_corretta = correzione[0] indirizzo_corretto = indirizzo.replace(via, via_corretta) posizione = do_geocoding(indirizzo_corretto,", "datetime import fileinput import functools import json import os import re import sys", "= match.groupdict() documento['circoscrizione_numero'] = match_dict['numero'] documento['circoscrizione_nome'] = match_dict['nome'] continue match = MMG_RE.match(line) if", "= line.strip('\\x0c') if not line.strip(): continue match = AGGIORNAMENTO_RE.match(line) if match: match_dict =", "'circoscrizione_numero': None, 'circoscrizione_nome': None, 'mmg': None, 'dottori': None, } dottori = [] dottore", "= response.json() feature = data[\"features\"][0] if \"address\" in feature[\"place_type\"]: if \"Torino Turin\" in", "sostituiamo i sc. con scala per farli piacere a mapbox indirizzo = indirizzo.replace(\"", "[], 'ore': [], 'note': [], } continue # ci sono dottori senza associazione", "indirizzo = { 'indirizzo': '{} {} TORINO'.format(match_dict['indirizzo'], match_dict['cap']), 'telefono': [match_dict['telefono']], 'giorni': [], 'ore':", "via, via_corretta = correzione[0] indirizzo_corretto = indirizzo.replace(via, via_corretta) posizione = do_geocoding(indirizzo_corretto, token) if", "[], } continue # ci sono dottori senza associazione if blocco_associazione: dottore['associazione'].append(line.strip()) continue", "dottore = { 'nome': match_dict['nome'], 'codice': match_dict['codice'], 'associazione': [], 'indirizzi': [], } continue", "= re.compile(r\"(?P<orario>\\d{2}:\\d{2})\") ORARI_A_RE = re.compile(r\"Alle\") BLOCCO_NOTE_RE = re.compile(r\"Note\") def do_geocoding(indirizzo, token): mapbox_geocoding_v5 =", "for giorno, orario in zip(indirizzo['giorni'], orari) ] else: indirizzo['orari'] = [{'giorno': None, 'da':", "TELEFONO_RE = re.compile(r\"(TELEFONO.*:\\s*)?(?P<telefono>\\d+)$\") BLOCCO_ORARI_RE = re.compile(r\"Giorno\") GIORNO_RE = re.compile(r\"(?P<giorno>Lunedi|Martedi|Mercoledi|Giovedi|Venerdi|Sabato)\") ORARI_DA_RE = re.compile(r\"Dalle\") ORARIO_RE", "'__main__': # se abbiamo un token di mapbox nell'environment facciamo il geocoding degli", "def geocoding(indirizzo, token): # sostituiamo i sc. con scala per farli piacere a", "token) if posizione: return posizione # se non abbiamo trovato l'indirizzo proviamo a", "sc. con scala per farli piacere a mapbox indirizzo = indirizzo.replace(\" sc. \",", "in dottore['indirizzi']: # Proviamo a sistemare gli orari num_orari = len(indirizzo['ore']) / 2", "dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo = None dottore = { 'nome': match_dict['nome'], 'codice': match_dict['codice'],", "re.compile(r\"AGGIORNAMENTO: (?P<giorno>\\d+) (?P<mese>\\w+) (?P<anno>\\d+)\") CIRCOSCRIZIONE_RE = re.compile(r\"CIRCOSCRIZIONE (?P<numero>\\d+): (?P<nome>.+)\") MMG_RE = re.compile(r\"MMG\") NOME_DOTTORE_RE", ") documento['aggiornamento'] = update.isoformat() continue match = CIRCOSCRIZIONE_RE.match(line) if match: match_dict = match.groupdict()", "in feature[\"place_name\"] or \"Collegno\" in feature[\"place_name\"]: return feature[\"center\"] return None @functools.lru_cache(maxsize=128) def geocoding(indirizzo,", "GIORNO_RE.match(line) if match: match_dict = match.groupdict() indirizzo['giorni'].append(match_dict['giorno']) continue match = ORARI_DA_RE.match(line) if match:", "= ORARI_DA_RE.match(line) if match: continue match = ORARIO_RE.match(line) if match: match_dict = match.groupdict()", "None, 'circoscrizione_numero': None, 'circoscrizione_nome': None, 'mmg': None, 'dottori': None, } dottori = []", "range(num_orari)] if indirizzo['orario_affidabile']: indirizzo['orari'] = [ {'giorno': giorno, 'da': orario[0], 'a': orario[1]} for", "continue match = AGGIORNAMENTO_RE.match(line) if match: match_dict = match.groupdict() update = datetime.date( int(match_dict['anno']),", "a} for da, a in orari] if mapbox_token: posizione = geocoding(indirizzo['indirizzo'], mapbox_token) indirizzo['posizione']", "continue match = NOME_DOTTORE_RE.match(line) if match: match_dict = match.groupdict() blocco_note = False if", "= NOME_DOTTORE_RE.match(line) if match: match_dict = match.groupdict() blocco_note = False if dottore: dottore['indirizzi'].append(indirizzo)", "continue # ci sono dottori senza associazione if blocco_associazione: dottore['associazione'].append(line.strip()) continue match =", "match: continue match = TELEFONO_RE.match(line) if match: match_dict = match.groupdict() indirizzo['telefono'].append(match_dict['telefono']) continue match", "GIORNO_RE = re.compile(r\"(?P<giorno>Lunedi|Martedi|Mercoledi|Giovedi|Venerdi|Sabato)\") ORARI_DA_RE = re.compile(r\"Dalle\") ORARIO_RE = re.compile(r\"(?P<orario>\\d{2}:\\d{2})\") ORARI_A_RE = re.compile(r\"Alle\") BLOCCO_NOTE_RE", "line in fileinput.input(): line = line.strip('\\x0c') if not line.strip(): continue match = AGGIORNAMENTO_RE.match(line)", "= None dottore = { 'nome': match_dict['nome'], 'codice': match_dict['codice'], 'associazione': [], 'indirizzi': [],", "if match: match_dict = match.groupdict() indirizzo['telefono'].append(match_dict['telefono']) continue match = BLOCCO_ORARI_RE.match(line) if match: continue", "continue match = FAX_RE.match(line) if match: continue match = TELEFONO_RE.match(line) if match: match_dict", "\\(TORINO\\) Telefono: ?(?P<telefono>\\d*)?\") FAX_RE = re.compile(r\"FAX \\d+\") TELEFONO_RE = re.compile(r\"(TELEFONO.*:\\s*)?(?P<telefono>\\d+)$\") BLOCCO_ORARI_RE = re.compile(r\"Giorno\")", "TELEFONO_RE.match(line) if match: match_dict = match.groupdict() indirizzo['telefono'].append(match_dict['telefono']) continue match = BLOCCO_ORARI_RE.match(line) if match:", "indirizzo.replace(\" sc. \", \" scala \") posizione = do_geocoding(indirizzo, token) if posizione: return", "11, 'dicembre': 12, } AGGIORNAMENTO_RE = re.compile(r\"AGGIORNAMENTO: (?P<giorno>\\d+) (?P<mese>\\w+) (?P<anno>\\d+)\") CIRCOSCRIZIONE_RE = re.compile(r\"CIRCOSCRIZIONE", "l'ultimo dottore if dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo = None for dottore in dottori:", "'note': [], } continue # ci sono dottori senza associazione if blocco_associazione: dottore['associazione'].append(line.strip())", "'dicembre': 12, } AGGIORNAMENTO_RE = re.compile(r\"AGGIORNAMENTO: (?P<giorno>\\d+) (?P<mese>\\w+) (?P<anno>\\d+)\") CIRCOSCRIZIONE_RE = re.compile(r\"CIRCOSCRIZIONE (?P<numero>\\d+):", "} AGGIORNAMENTO_RE = re.compile(r\"AGGIORNAMENTO: (?P<giorno>\\d+) (?P<mese>\\w+) (?P<anno>\\d+)\") CIRCOSCRIZIONE_RE = re.compile(r\"CIRCOSCRIZIONE (?P<numero>\\d+): (?P<nome>.+)\") MMG_RE", "\", \" scala \") posizione = do_geocoding(indirizzo, token) if posizione: return posizione #", "match = MMG_RE.match(line) if match: documento['mmg'] = True continue match = NOME_DOTTORE_RE.match(line) if", "token): # sostituiamo i sc. con scala per farli piacere a mapbox indirizzo", "= match.groupdict() blocco_note = False blocco_associazione = False if indirizzo: dottore['indirizzi'].append(indirizzo) indirizzo =", "BLOCCO_ORARI_RE.match(line) if match: continue match = GIORNO_RE.match(line) if match: match_dict = match.groupdict() indirizzo['giorni'].append(match_dict['giorno'])", "= ORARIO_RE.match(line) if match: match_dict = match.groupdict() indirizzo['ore'].append(match_dict['orario']) continue match = ORARI_A_RE.match(line) if", "functools import json import os import re import sys import unittest import urllib.parse", "= re.compile(r\"(?P<nome>[\\w\\s']+) \\[(?P<codice>\\w+)\\]\") BLOCCO_ASSOCIAZIONE_RE = re.compile(r\"Associazione:\") INDIRIZZO_RE = re.compile(r\"(?P<indirizzo>.+) TORINO (?P<cap>\\d+) \\(TORINO\\) Telefono:", "= indirizzo.replace(\" sc. \", \" scala \") posizione = do_geocoding(indirizzo, token) if posizione:", "response = requests.get(url) data = response.json() feature = data[\"features\"][0] if \"address\" in feature[\"place_type\"]:", "v in ESPANDI_INDIRIZZO.items() if k in indirizzo] if correzione: via, via_corretta = correzione[0]", "if match: blocco_associazione = True continue match = INDIRIZZO_RE.match(line) if match: match_dict =", "piacere a mapbox indirizzo = indirizzo.replace(\" sc. \", \" scala \") posizione =", "fallito per {}\".format(indirizzo), file=sys.stderr) return None if __name__ == '__main__': # se abbiamo", "for line in fileinput.input(): line = line.strip('\\x0c') if not line.strip(): continue match =", "fixups import ESPANDI_INDIRIZZO MESI = { 'gennaio': 1, 'febbraio': 2, 'marzo': 3, 'aprile':", "= match.groupdict() indirizzo['giorni'].append(match_dict['giorno']) continue match = ORARI_DA_RE.match(line) if match: continue match = ORARIO_RE.match(line)", "return None if __name__ == '__main__': # se abbiamo un token di mapbox", "def do_geocoding(indirizzo, token): mapbox_geocoding_v5 = \"https://api.mapbox.com/geocoding/v5/mapbox.places/\" url = \"{}{}.json?limit=1&country=IT&access_token={}\".format( mapbox_geocoding_v5, urllib.parse.quote(indirizzo, safe=\"\"), token,", "blocco_note = False if dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo = None dottore = {", "nell'environment facciamo il geocoding degli indirizzi mapbox_token = os.getenv(\"MAPBOX_ACCESS_TOKEN\") documento = { 'aggiornamento':", "ParseTestCase(unittest.TestCase): def test_nome_dottore_deve_fare_il_match_degli_apostrofi(self): match = NOME_DOTTORE_RE.match(\"NUR ADDO' [01234]\") match_dict = match.groupdict() self.assertEqual(match_dict, {\"nome\":", "'dottori': None, } dottori = [] dottore = None blocco_associazione = False blocco_note", "BLOCCO_ASSOCIAZIONE_RE.match(line) if match: blocco_associazione = True continue match = INDIRIZZO_RE.match(line) if match: match_dict", "= FAX_RE.match(line) if match: continue match = TELEFONO_RE.match(line) if match: match_dict = match.groupdict()", "blocco_associazione = False blocco_note = False indirizzo = None for line in fileinput.input():", "safe=\"\"), token, ) response = requests.get(url) data = response.json() feature = data[\"features\"][0] if", "match: blocco_associazione = True continue match = INDIRIZZO_RE.match(line) if match: match_dict = match.groupdict()", "= { 'gennaio': 1, 'febbraio': 2, 'marzo': 3, 'aprile': 4, 'maggio': 5, 'giugno':", "match = GIORNO_RE.match(line) if match: match_dict = match.groupdict() indirizzo['giorni'].append(match_dict['giorno']) continue match = ORARI_DA_RE.match(line)", "json import os import re import sys import unittest import urllib.parse import requests", "re.compile(r\"Note\") def do_geocoding(indirizzo, token): mapbox_geocoding_v5 = \"https://api.mapbox.com/geocoding/v5/mapbox.places/\" url = \"{}{}.json?limit=1&country=IT&access_token={}\".format( mapbox_geocoding_v5, urllib.parse.quote(indirizzo, safe=\"\"),", "{ 'indirizzo': '{} {} TORINO'.format(match_dict['indirizzo'], match_dict['cap']), 'telefono': [match_dict['telefono']], 'giorni': [], 'ore': [], 'note':", "False blocco_note = False indirizzo = None for line in fileinput.input(): line =", "i in range(num_orari)] if indirizzo['orario_affidabile']: indirizzo['orari'] = [ {'giorno': giorno, 'da': orario[0], 'a':", "import ESPANDI_INDIRIZZO MESI = { 'gennaio': 1, 'febbraio': 2, 'marzo': 3, 'aprile': 4,", "= False blocco_associazione = False if indirizzo: dottore['indirizzi'].append(indirizzo) indirizzo = { 'indirizzo': '{}", "match: continue match = BLOCCO_NOTE_RE.match(line) if match: blocco_note = True continue if blocco_note:", "posizione = do_geocoding(indirizzo_corretto, token) if posizione: return posizione print(\"Geocoding fallito per {}\".format(indirizzo), file=sys.stderr)", "match = NOME_DOTTORE_RE.match(line) if match: match_dict = match.groupdict() blocco_note = False if dottore:", "mapbox_geocoding_v5, urllib.parse.quote(indirizzo, safe=\"\"), token, ) response = requests.get(url) data = response.json() feature =", "= match.groupdict() indirizzo['ore'].append(match_dict['orario']) continue match = ORARI_A_RE.match(line) if match: continue match = BLOCCO_NOTE_RE.match(line)", "= int(num_orari) orari = [(indirizzo['ore'][i], indirizzo['ore'][i+num_orari]) for i in range(num_orari)] if indirizzo['orario_affidabile']: indirizzo['orari']", "continue print(line, file=sys.stderr) # l'ultimo dottore if dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo = None", ") response = requests.get(url) data = response.json() feature = data[\"features\"][0] if \"address\" in", "orari num_orari = len(indirizzo['ore']) / 2 indirizzo['orario_affidabile'] = num_orari == len(indirizzo['giorni']) num_orari =", "} continue match = BLOCCO_ASSOCIAZIONE_RE.match(line) if match: blocco_associazione = True continue match =", "= re.compile(r\"(?P<giorno>Lunedi|Martedi|Mercoledi|Giovedi|Venerdi|Sabato)\") ORARI_DA_RE = re.compile(r\"Dalle\") ORARIO_RE = re.compile(r\"(?P<orario>\\d{2}:\\d{2})\") ORARI_A_RE = re.compile(r\"Alle\") BLOCCO_NOTE_RE =", "proviamo a sistemarlo a mano correzione = [(k, v) for k, v in", "= indirizzo.replace(via, via_corretta) posizione = do_geocoding(indirizzo_corretto, token) if posizione: return posizione print(\"Geocoding fallito", "for i in range(num_orari)] if indirizzo['orario_affidabile']: indirizzo['orari'] = [ {'giorno': giorno, 'da': orario[0],", "None, 'mmg': None, 'dottori': None, } dottori = [] dottore = None blocco_associazione", "un token di mapbox nell'environment facciamo il geocoding degli indirizzi mapbox_token = os.getenv(\"MAPBOX_ACCESS_TOKEN\")", "match = TELEFONO_RE.match(line) if match: match_dict = match.groupdict() indirizzo['telefono'].append(match_dict['telefono']) continue match = BLOCCO_ORARI_RE.match(line)", "continue match = INDIRIZZO_RE.match(line) if match: match_dict = match.groupdict() blocco_note = False blocco_associazione", "BLOCCO_ASSOCIAZIONE_RE = re.compile(r\"Associazione:\") INDIRIZZO_RE = re.compile(r\"(?P<indirizzo>.+) TORINO (?P<cap>\\d+) \\(TORINO\\) Telefono: ?(?P<telefono>\\d*)?\") FAX_RE =", "12, } AGGIORNAMENTO_RE = re.compile(r\"AGGIORNAMENTO: (?P<giorno>\\d+) (?P<mese>\\w+) (?P<anno>\\d+)\") CIRCOSCRIZIONE_RE = re.compile(r\"CIRCOSCRIZIONE (?P<numero>\\d+): (?P<nome>.+)\")", "'marzo': 3, 'aprile': 4, 'maggio': 5, 'giugno': 6, 'luglio': 7, 'agosto': 8, 'settembre':", "import datetime import fileinput import functools import json import os import re import", "urllib.parse import requests from fixups import ESPANDI_INDIRIZZO MESI = { 'gennaio': 1, 'febbraio':", "'febbraio': 2, 'marzo': 3, 'aprile': 4, 'maggio': 5, 'giugno': 6, 'luglio': 7, 'agosto':", "ORARI_A_RE = re.compile(r\"Alle\") BLOCCO_NOTE_RE = re.compile(r\"Note\") def do_geocoding(indirizzo, token): mapbox_geocoding_v5 = \"https://api.mapbox.com/geocoding/v5/mapbox.places/\" url", "indirizzo['ore'].append(match_dict['orario']) continue match = ORARI_A_RE.match(line) if match: continue match = BLOCCO_NOTE_RE.match(line) if match:", "i sc. con scala per farli piacere a mapbox indirizzo = indirizzo.replace(\" sc.", "response.json() feature = data[\"features\"][0] if \"address\" in feature[\"place_type\"]: if \"Torino Turin\" in feature[\"place_name\"]", "'gennaio': 1, 'febbraio': 2, 'marzo': 3, 'aprile': 4, 'maggio': 5, 'giugno': 6, 'luglio':", "abbiamo trovato l'indirizzo proviamo a sistemarlo a mano correzione = [(k, v) for", "MESI = { 'gennaio': 1, 'febbraio': 2, 'marzo': 3, 'aprile': 4, 'maggio': 5,", "match.groupdict() indirizzo['giorni'].append(match_dict['giorno']) continue match = ORARI_DA_RE.match(line) if match: continue match = ORARIO_RE.match(line) if", "indirizzo['orario_affidabile'] = num_orari == len(indirizzo['giorni']) num_orari = int(num_orari) orari = [(indirizzo['ore'][i], indirizzo['ore'][i+num_orari]) for", "{'giorno': giorno, 'da': orario[0], 'a': orario[1]} for giorno, orario in zip(indirizzo['giorni'], orari) ]", "for dottore in dottori: for indirizzo in dottore['indirizzi']: # Proviamo a sistemare gli", "# l'ultimo dottore if dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo = None for dottore in", "requests from fixups import ESPANDI_INDIRIZZO MESI = { 'gennaio': 1, 'febbraio': 2, 'marzo':", "None for dottore in dottori: for indirizzo in dottore['indirizzi']: # Proviamo a sistemare", "posizione = geocoding(indirizzo['indirizzo'], mapbox_token) indirizzo['posizione'] = posizione documento['dottori'] = dottori print(json.dumps(documento)) class ParseTestCase(unittest.TestCase):", "False blocco_associazione = False if indirizzo: dottore['indirizzi'].append(indirizzo) indirizzo = { 'indirizzo': '{} {}", "continue match = BLOCCO_NOTE_RE.match(line) if match: blocco_note = True continue if blocco_note: indirizzo['note'].append(line.strip())", "if dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo = None for dottore in dottori: for indirizzo", "orario[0], 'a': orario[1]} for giorno, orario in zip(indirizzo['giorni'], orari) ] else: indirizzo['orari'] =", "True continue if blocco_note: indirizzo['note'].append(line.strip()) continue print(line, file=sys.stderr) # l'ultimo dottore if dottore:", "= posizione documento['dottori'] = dottori print(json.dumps(documento)) class ParseTestCase(unittest.TestCase): def test_nome_dottore_deve_fare_il_match_degli_apostrofi(self): match = NOME_DOTTORE_RE.match(\"NUR", "documento['circoscrizione_numero'] = match_dict['numero'] documento['circoscrizione_nome'] = match_dict['nome'] continue match = MMG_RE.match(line) if match: documento['mmg']", "token di mapbox nell'environment facciamo il geocoding degli indirizzi mapbox_token = os.getenv(\"MAPBOX_ACCESS_TOKEN\") documento", "= None for line in fileinput.input(): line = line.strip('\\x0c') if not line.strip(): continue", "a sistemarlo a mano correzione = [(k, v) for k, v in ESPANDI_INDIRIZZO.items()", "correzione: via, via_corretta = correzione[0] indirizzo_corretto = indirizzo.replace(via, via_corretta) posizione = do_geocoding(indirizzo_corretto, token)", "ESPANDI_INDIRIZZO MESI = { 'gennaio': 1, 'febbraio': 2, 'marzo': 3, 'aprile': 4, 'maggio':", "import re import sys import unittest import urllib.parse import requests from fixups import", "MESI[match_dict['mese'].lower()], int(match_dict['giorno']) ) documento['aggiornamento'] = update.isoformat() continue match = CIRCOSCRIZIONE_RE.match(line) if match: match_dict", "'nome': match_dict['nome'], 'codice': match_dict['codice'], 'associazione': [], 'indirizzi': [], } continue match = BLOCCO_ASSOCIAZIONE_RE.match(line)", "\\d+\") TELEFONO_RE = re.compile(r\"(TELEFONO.*:\\s*)?(?P<telefono>\\d+)$\") BLOCCO_ORARI_RE = re.compile(r\"Giorno\") GIORNO_RE = re.compile(r\"(?P<giorno>Lunedi|Martedi|Mercoledi|Giovedi|Venerdi|Sabato)\") ORARI_DA_RE = re.compile(r\"Dalle\")", "mapbox_geocoding_v5 = \"https://api.mapbox.com/geocoding/v5/mapbox.places/\" url = \"{}{}.json?limit=1&country=IT&access_token={}\".format( mapbox_geocoding_v5, urllib.parse.quote(indirizzo, safe=\"\"), token, ) response =", "# se abbiamo un token di mapbox nell'environment facciamo il geocoding degli indirizzi", "BLOCCO_NOTE_RE.match(line) if match: blocco_note = True continue if blocco_note: indirizzo['note'].append(line.strip()) continue print(line, file=sys.stderr)", "geocoding(indirizzo, token): # sostituiamo i sc. con scala per farli piacere a mapbox", "= TELEFONO_RE.match(line) if match: match_dict = match.groupdict() indirizzo['telefono'].append(match_dict['telefono']) continue match = BLOCCO_ORARI_RE.match(line) if", "match.groupdict() indirizzo['telefono'].append(match_dict['telefono']) continue match = BLOCCO_ORARI_RE.match(line) if match: continue match = GIORNO_RE.match(line) if", "match_dict = match.groupdict() update = datetime.date( int(match_dict['anno']), MESI[match_dict['mese'].lower()], int(match_dict['giorno']) ) documento['aggiornamento'] = update.isoformat()", "ci sono dottori senza associazione if blocco_associazione: dottore['associazione'].append(line.strip()) continue match = FAX_RE.match(line) if", "MMG_RE.match(line) if match: documento['mmg'] = True continue match = NOME_DOTTORE_RE.match(line) if match: match_dict", "in range(num_orari)] if indirizzo['orario_affidabile']: indirizzo['orari'] = [ {'giorno': giorno, 'da': orario[0], 'a': orario[1]}", "False if indirizzo: dottore['indirizzi'].append(indirizzo) indirizzo = { 'indirizzo': '{} {} TORINO'.format(match_dict['indirizzo'], match_dict['cap']), 'telefono':", "= CIRCOSCRIZIONE_RE.match(line) if match: match_dict = match.groupdict() documento['circoscrizione_numero'] = match_dict['numero'] documento['circoscrizione_nome'] = match_dict['nome']", "dottori = [] dottore = None blocco_associazione = False blocco_note = False indirizzo", "re.compile(r\"(?P<indirizzo>.+) TORINO (?P<cap>\\d+) \\(TORINO\\) Telefono: ?(?P<telefono>\\d*)?\") FAX_RE = re.compile(r\"FAX \\d+\") TELEFONO_RE = re.compile(r\"(TELEFONO.*:\\s*)?(?P<telefono>\\d+)$\")", "Telefono: ?(?P<telefono>\\d*)?\") FAX_RE = re.compile(r\"FAX \\d+\") TELEFONO_RE = re.compile(r\"(TELEFONO.*:\\s*)?(?P<telefono>\\d+)$\") BLOCCO_ORARI_RE = re.compile(r\"Giorno\") GIORNO_RE", "INDIRIZZO_RE.match(line) if match: match_dict = match.groupdict() blocco_note = False blocco_associazione = False if", "if posizione: return posizione # se non abbiamo trovato l'indirizzo proviamo a sistemarlo", "continue match = MMG_RE.match(line) if match: documento['mmg'] = True continue match = NOME_DOTTORE_RE.match(line)", "blocco_note = False blocco_associazione = False if indirizzo: dottore['indirizzi'].append(indirizzo) indirizzo = { 'indirizzo':", "posizione documento['dottori'] = dottori print(json.dumps(documento)) class ParseTestCase(unittest.TestCase): def test_nome_dottore_deve_fare_il_match_degli_apostrofi(self): match = NOME_DOTTORE_RE.match(\"NUR ADDO'", "unittest import urllib.parse import requests from fixups import ESPANDI_INDIRIZZO MESI = { 'gennaio':", "if match: continue match = ORARIO_RE.match(line) if match: match_dict = match.groupdict() indirizzo['ore'].append(match_dict['orario']) continue", "import urllib.parse import requests from fixups import ESPANDI_INDIRIZZO MESI = { 'gennaio': 1,", "TORINO'.format(match_dict['indirizzo'], match_dict['cap']), 'telefono': [match_dict['telefono']], 'giorni': [], 'ore': [], 'note': [], } continue #", "if posizione: return posizione print(\"Geocoding fallito per {}\".format(indirizzo), file=sys.stderr) return None if __name__", "do_geocoding(indirizzo, token) if posizione: return posizione # se non abbiamo trovato l'indirizzo proviamo", "import unittest import urllib.parse import requests from fixups import ESPANDI_INDIRIZZO MESI = {", "os.getenv(\"MAPBOX_ACCESS_TOKEN\") documento = { 'aggiornamento': None, 'circoscrizione_numero': None, 'circoscrizione_nome': None, 'mmg': None, 'dottori':", "AGGIORNAMENTO_RE = re.compile(r\"AGGIORNAMENTO: (?P<giorno>\\d+) (?P<mese>\\w+) (?P<anno>\\d+)\") CIRCOSCRIZIONE_RE = re.compile(r\"CIRCOSCRIZIONE (?P<numero>\\d+): (?P<nome>.+)\") MMG_RE =", "[(indirizzo['ore'][i], indirizzo['ore'][i+num_orari]) for i in range(num_orari)] if indirizzo['orario_affidabile']: indirizzo['orari'] = [ {'giorno': giorno,", "mano correzione = [(k, v) for k, v in ESPANDI_INDIRIZZO.items() if k in", "= os.getenv(\"MAPBOX_ACCESS_TOKEN\") documento = { 'aggiornamento': None, 'circoscrizione_numero': None, 'circoscrizione_nome': None, 'mmg': None,", "file=sys.stderr) # l'ultimo dottore if dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo = None for dottore", "INDIRIZZO_RE = re.compile(r\"(?P<indirizzo>.+) TORINO (?P<cap>\\d+) \\(TORINO\\) Telefono: ?(?P<telefono>\\d*)?\") FAX_RE = re.compile(r\"FAX \\d+\") TELEFONO_RE", "[match_dict['telefono']], 'giorni': [], 'ore': [], 'note': [], } continue # ci sono dottori", "dottore = None blocco_associazione = False blocco_note = False indirizzo = None for", "if match: match_dict = match.groupdict() update = datetime.date( int(match_dict['anno']), MESI[match_dict['mese'].lower()], int(match_dict['giorno']) ) documento['aggiornamento']", "os import re import sys import unittest import urllib.parse import requests from fixups", "num_orari = int(num_orari) orari = [(indirizzo['ore'][i], indirizzo['ore'][i+num_orari]) for i in range(num_orari)] if indirizzo['orario_affidabile']:", "Turin\" in feature[\"place_name\"] or \"Collegno\" in feature[\"place_name\"]: return feature[\"center\"] return None @functools.lru_cache(maxsize=128) def", "(?P<numero>\\d+): (?P<nome>.+)\") MMG_RE = re.compile(r\"MMG\") NOME_DOTTORE_RE = re.compile(r\"(?P<nome>[\\w\\s']+) \\[(?P<codice>\\w+)\\]\") BLOCCO_ASSOCIAZIONE_RE = re.compile(r\"Associazione:\") INDIRIZZO_RE", "dottori.append(dottore) indirizzo = None for dottore in dottori: for indirizzo in dottore['indirizzi']: #", "match = NOME_DOTTORE_RE.match(\"NUR ADDO' [01234]\") match_dict = match.groupdict() self.assertEqual(match_dict, {\"nome\": \"NUR ADDO'\", \"codice\":", "v) for k, v in ESPANDI_INDIRIZZO.items() if k in indirizzo] if correzione: via,", "or \"Collegno\" in feature[\"place_name\"]: return feature[\"center\"] return None @functools.lru_cache(maxsize=128) def geocoding(indirizzo, token): #", "via_corretta = correzione[0] indirizzo_corretto = indirizzo.replace(via, via_corretta) posizione = do_geocoding(indirizzo_corretto, token) if posizione:", "for da, a in orari] if mapbox_token: posizione = geocoding(indirizzo['indirizzo'], mapbox_token) indirizzo['posizione'] =", "?(?P<telefono>\\d*)?\") FAX_RE = re.compile(r\"FAX \\d+\") TELEFONO_RE = re.compile(r\"(TELEFONO.*:\\s*)?(?P<telefono>\\d+)$\") BLOCCO_ORARI_RE = re.compile(r\"Giorno\") GIORNO_RE =", "from fixups import ESPANDI_INDIRIZZO MESI = { 'gennaio': 1, 'febbraio': 2, 'marzo': 3,", "match_dict = match.groupdict() documento['circoscrizione_numero'] = match_dict['numero'] documento['circoscrizione_nome'] = match_dict['nome'] continue match = MMG_RE.match(line)", "match: match_dict = match.groupdict() blocco_note = False if dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo =", "orari) ] else: indirizzo['orari'] = [{'giorno': None, 'da': da, 'a': a} for da,", "re.compile(r\"Associazione:\") INDIRIZZO_RE = re.compile(r\"(?P<indirizzo>.+) TORINO (?P<cap>\\d+) \\(TORINO\\) Telefono: ?(?P<telefono>\\d*)?\") FAX_RE = re.compile(r\"FAX \\d+\")", "giorno, 'da': orario[0], 'a': orario[1]} for giorno, orario in zip(indirizzo['giorni'], orari) ] else:", "update.isoformat() continue match = CIRCOSCRIZIONE_RE.match(line) if match: match_dict = match.groupdict() documento['circoscrizione_numero'] = match_dict['numero']", "print(\"Geocoding fallito per {}\".format(indirizzo), file=sys.stderr) return None if __name__ == '__main__': # se", "if match: match_dict = match.groupdict() indirizzo['ore'].append(match_dict['orario']) continue match = ORARI_A_RE.match(line) if match: continue", "re.compile(r\"FAX \\d+\") TELEFONO_RE = re.compile(r\"(TELEFONO.*:\\s*)?(?P<telefono>\\d+)$\") BLOCCO_ORARI_RE = re.compile(r\"Giorno\") GIORNO_RE = re.compile(r\"(?P<giorno>Lunedi|Martedi|Mercoledi|Giovedi|Venerdi|Sabato)\") ORARI_DA_RE =", "associazione if blocco_associazione: dottore['associazione'].append(line.strip()) continue match = FAX_RE.match(line) if match: continue match =", "feature[\"place_name\"] or \"Collegno\" in feature[\"place_name\"]: return feature[\"center\"] return None @functools.lru_cache(maxsize=128) def geocoding(indirizzo, token):", "int(match_dict['giorno']) ) documento['aggiornamento'] = update.isoformat() continue match = CIRCOSCRIZIONE_RE.match(line) if match: match_dict =", "= True continue match = NOME_DOTTORE_RE.match(line) if match: match_dict = match.groupdict() blocco_note =", "import fileinput import functools import json import os import re import sys import", "BLOCCO_ORARI_RE = re.compile(r\"Giorno\") GIORNO_RE = re.compile(r\"(?P<giorno>Lunedi|Martedi|Mercoledi|Giovedi|Venerdi|Sabato)\") ORARI_DA_RE = re.compile(r\"Dalle\") ORARIO_RE = re.compile(r\"(?P<orario>\\d{2}:\\d{2})\") ORARI_A_RE", "= { 'indirizzo': '{} {} TORINO'.format(match_dict['indirizzo'], match_dict['cap']), 'telefono': [match_dict['telefono']], 'giorni': [], 'ore': [],", "match = BLOCCO_NOTE_RE.match(line) if match: blocco_note = True continue if blocco_note: indirizzo['note'].append(line.strip()) continue", "'luglio': 7, 'agosto': 8, 'settembre': 9, 'ottobre': 10, 'novembre': 11, 'dicembre': 12, }", "'giorni': [], 'ore': [], 'note': [], } continue # ci sono dottori senza", "match = BLOCCO_ASSOCIAZIONE_RE.match(line) if match: blocco_associazione = True continue match = INDIRIZZO_RE.match(line) if", "continue match = BLOCCO_ORARI_RE.match(line) if match: continue match = GIORNO_RE.match(line) if match: match_dict", "int(num_orari) orari = [(indirizzo['ore'][i], indirizzo['ore'][i+num_orari]) for i in range(num_orari)] if indirizzo['orario_affidabile']: indirizzo['orari'] =", "indirizzo = None for line in fileinput.input(): line = line.strip('\\x0c') if not line.strip():", "= False blocco_note = False indirizzo = None for line in fileinput.input(): line", "blocco_associazione: dottore['associazione'].append(line.strip()) continue match = FAX_RE.match(line) if match: continue match = TELEFONO_RE.match(line) if", "match = CIRCOSCRIZIONE_RE.match(line) if match: match_dict = match.groupdict() documento['circoscrizione_numero'] = match_dict['numero'] documento['circoscrizione_nome'] =", "scala per farli piacere a mapbox indirizzo = indirizzo.replace(\" sc. \", \" scala", "} continue # ci sono dottori senza associazione if blocco_associazione: dottore['associazione'].append(line.strip()) continue match", "do_geocoding(indirizzo_corretto, token) if posizione: return posizione print(\"Geocoding fallito per {}\".format(indirizzo), file=sys.stderr) return None", "token) if posizione: return posizione print(\"Geocoding fallito per {}\".format(indirizzo), file=sys.stderr) return None if", "'giugno': 6, 'luglio': 7, 'agosto': 8, 'settembre': 9, 'ottobre': 10, 'novembre': 11, 'dicembre':", "return posizione print(\"Geocoding fallito per {}\".format(indirizzo), file=sys.stderr) return None if __name__ == '__main__':", "abbiamo un token di mapbox nell'environment facciamo il geocoding degli indirizzi mapbox_token =", "orari = [(indirizzo['ore'][i], indirizzo['ore'][i+num_orari]) for i in range(num_orari)] if indirizzo['orario_affidabile']: indirizzo['orari'] = [", "datetime.date( int(match_dict['anno']), MESI[match_dict['mese'].lower()], int(match_dict['giorno']) ) documento['aggiornamento'] = update.isoformat() continue match = CIRCOSCRIZIONE_RE.match(line) if", "dottore['associazione'].append(line.strip()) continue match = FAX_RE.match(line) if match: continue match = TELEFONO_RE.match(line) if match:", "a sistemare gli orari num_orari = len(indirizzo['ore']) / 2 indirizzo['orario_affidabile'] = num_orari ==", "= re.compile(r\"Giorno\") GIORNO_RE = re.compile(r\"(?P<giorno>Lunedi|Martedi|Mercoledi|Giovedi|Venerdi|Sabato)\") ORARI_DA_RE = re.compile(r\"Dalle\") ORARIO_RE = re.compile(r\"(?P<orario>\\d{2}:\\d{2})\") ORARI_A_RE =", "= \"{}{}.json?limit=1&country=IT&access_token={}\".format( mapbox_geocoding_v5, urllib.parse.quote(indirizzo, safe=\"\"), token, ) response = requests.get(url) data = response.json()", "match_dict['nome'], 'codice': match_dict['codice'], 'associazione': [], 'indirizzi': [], } continue match = BLOCCO_ASSOCIAZIONE_RE.match(line) if", "indirizzo['note'].append(line.strip()) continue print(line, file=sys.stderr) # l'ultimo dottore if dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo =", "indirizzo['posizione'] = posizione documento['dottori'] = dottori print(json.dumps(documento)) class ParseTestCase(unittest.TestCase): def test_nome_dottore_deve_fare_il_match_degli_apostrofi(self): match =", "token, ) response = requests.get(url) data = response.json() feature = data[\"features\"][0] if \"address\"", "6, 'luglio': 7, 'agosto': 8, 'settembre': 9, 'ottobre': 10, 'novembre': 11, 'dicembre': 12,", "se abbiamo un token di mapbox nell'environment facciamo il geocoding degli indirizzi mapbox_token", "True continue match = INDIRIZZO_RE.match(line) if match: match_dict = match.groupdict() blocco_note = False", "indirizzo = indirizzo.replace(\" sc. \", \" scala \") posizione = do_geocoding(indirizzo, token) if", "[] dottore = None blocco_associazione = False blocco_note = False indirizzo = None", "re.compile(r\"Giorno\") GIORNO_RE = re.compile(r\"(?P<giorno>Lunedi|Martedi|Mercoledi|Giovedi|Venerdi|Sabato)\") ORARI_DA_RE = re.compile(r\"Dalle\") ORARIO_RE = re.compile(r\"(?P<orario>\\d{2}:\\d{2})\") ORARI_A_RE = re.compile(r\"Alle\")", "9, 'ottobre': 10, 'novembre': 11, 'dicembre': 12, } AGGIORNAMENTO_RE = re.compile(r\"AGGIORNAMENTO: (?P<giorno>\\d+) (?P<mese>\\w+)", "8, 'settembre': 9, 'ottobre': 10, 'novembre': 11, 'dicembre': 12, } AGGIORNAMENTO_RE = re.compile(r\"AGGIORNAMENTO:", "continue match = CIRCOSCRIZIONE_RE.match(line) if match: match_dict = match.groupdict() documento['circoscrizione_numero'] = match_dict['numero'] documento['circoscrizione_nome']", "= match_dict['nome'] continue match = MMG_RE.match(line) if match: documento['mmg'] = True continue match", "match: continue match = ORARIO_RE.match(line) if match: match_dict = match.groupdict() indirizzo['ore'].append(match_dict['orario']) continue match", "= num_orari == len(indirizzo['giorni']) num_orari = int(num_orari) orari = [(indirizzo['ore'][i], indirizzo['ore'][i+num_orari]) for i", "ESPANDI_INDIRIZZO.items() if k in indirizzo] if correzione: via, via_corretta = correzione[0] indirizzo_corretto =", "in fileinput.input(): line = line.strip('\\x0c') if not line.strip(): continue match = AGGIORNAMENTO_RE.match(line) if", "'ottobre': 10, 'novembre': 11, 'dicembre': 12, } AGGIORNAMENTO_RE = re.compile(r\"AGGIORNAMENTO: (?P<giorno>\\d+) (?P<mese>\\w+) (?P<anno>\\d+)\")", "{ 'gennaio': 1, 'febbraio': 2, 'marzo': 3, 'aprile': 4, 'maggio': 5, 'giugno': 6,", "requests.get(url) data = response.json() feature = data[\"features\"][0] if \"address\" in feature[\"place_type\"]: if \"Torino", "indirizzo.replace(via, via_corretta) posizione = do_geocoding(indirizzo_corretto, token) if posizione: return posizione print(\"Geocoding fallito per", "orario[1]} for giorno, orario in zip(indirizzo['giorni'], orari) ] else: indirizzo['orari'] = [{'giorno': None,", "re.compile(r\"(?P<orario>\\d{2}:\\d{2})\") ORARI_A_RE = re.compile(r\"Alle\") BLOCCO_NOTE_RE = re.compile(r\"Note\") def do_geocoding(indirizzo, token): mapbox_geocoding_v5 = \"https://api.mapbox.com/geocoding/v5/mapbox.places/\"", "posizione: return posizione print(\"Geocoding fallito per {}\".format(indirizzo), file=sys.stderr) return None if __name__ ==", "import sys import unittest import urllib.parse import requests from fixups import ESPANDI_INDIRIZZO MESI", "if \"address\" in feature[\"place_type\"]: if \"Torino Turin\" in feature[\"place_name\"] or \"Collegno\" in feature[\"place_name\"]:", "2, 'marzo': 3, 'aprile': 4, 'maggio': 5, 'giugno': 6, 'luglio': 7, 'agosto': 8,", "match: match_dict = match.groupdict() indirizzo['giorni'].append(match_dict['giorno']) continue match = ORARI_DA_RE.match(line) if match: continue match", "blocco_note: indirizzo['note'].append(line.strip()) continue print(line, file=sys.stderr) # l'ultimo dottore if dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo", "dottore['indirizzi']: # Proviamo a sistemare gli orari num_orari = len(indirizzo['ore']) / 2 indirizzo['orario_affidabile']", "'settembre': 9, 'ottobre': 10, 'novembre': 11, 'dicembre': 12, } AGGIORNAMENTO_RE = re.compile(r\"AGGIORNAMENTO: (?P<giorno>\\d+)", "documento['circoscrizione_nome'] = match_dict['nome'] continue match = MMG_RE.match(line) if match: documento['mmg'] = True continue", "None for line in fileinput.input(): line = line.strip('\\x0c') if not line.strip(): continue match", "= True continue match = INDIRIZZO_RE.match(line) if match: match_dict = match.groupdict() blocco_note =", "indirizzo['orari'] = [{'giorno': None, 'da': da, 'a': a} for da, a in orari]", "in orari] if mapbox_token: posizione = geocoding(indirizzo['indirizzo'], mapbox_token) indirizzo['posizione'] = posizione documento['dottori'] =", "if dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo = None dottore = { 'nome': match_dict['nome'], 'codice':", "match_dict['numero'] documento['circoscrizione_nome'] = match_dict['nome'] continue match = MMG_RE.match(line) if match: documento['mmg'] = True", "if match: documento['mmg'] = True continue match = NOME_DOTTORE_RE.match(line) if match: match_dict =", "re.compile(r\"(TELEFONO.*:\\s*)?(?P<telefono>\\d+)$\") BLOCCO_ORARI_RE = re.compile(r\"Giorno\") GIORNO_RE = re.compile(r\"(?P<giorno>Lunedi|Martedi|Mercoledi|Giovedi|Venerdi|Sabato)\") ORARI_DA_RE = re.compile(r\"Dalle\") ORARIO_RE = re.compile(r\"(?P<orario>\\d{2}:\\d{2})\")", "(?P<cap>\\d+) \\(TORINO\\) Telefono: ?(?P<telefono>\\d*)?\") FAX_RE = re.compile(r\"FAX \\d+\") TELEFONO_RE = re.compile(r\"(TELEFONO.*:\\s*)?(?P<telefono>\\d+)$\") BLOCCO_ORARI_RE =", "re.compile(r\"(?P<nome>[\\w\\s']+) \\[(?P<codice>\\w+)\\]\") BLOCCO_ASSOCIAZIONE_RE = re.compile(r\"Associazione:\") INDIRIZZO_RE = re.compile(r\"(?P<indirizzo>.+) TORINO (?P<cap>\\d+) \\(TORINO\\) Telefono: ?(?P<telefono>\\d*)?\")", "'a': a} for da, a in orari] if mapbox_token: posizione = geocoding(indirizzo['indirizzo'], mapbox_token)", "= NOME_DOTTORE_RE.match(\"NUR ADDO' [01234]\") match_dict = match.groupdict() self.assertEqual(match_dict, {\"nome\": \"NUR ADDO'\", \"codice\": \"01234\"})", "file=sys.stderr) return None if __name__ == '__main__': # se abbiamo un token di", "= True continue if blocco_note: indirizzo['note'].append(line.strip()) continue print(line, file=sys.stderr) # l'ultimo dottore if", "line.strip(): continue match = AGGIORNAMENTO_RE.match(line) if match: match_dict = match.groupdict() update = datetime.date(", "'a': orario[1]} for giorno, orario in zip(indirizzo['giorni'], orari) ] else: indirizzo['orari'] = [{'giorno':", "degli indirizzi mapbox_token = os.getenv(\"MAPBOX_ACCESS_TOKEN\") documento = { 'aggiornamento': None, 'circoscrizione_numero': None, 'circoscrizione_nome':", "= [] dottore = None blocco_associazione = False blocco_note = False indirizzo =", "7, 'agosto': 8, 'settembre': 9, 'ottobre': 10, 'novembre': 11, 'dicembre': 12, } AGGIORNAMENTO_RE", "match: match_dict = match.groupdict() documento['circoscrizione_numero'] = match_dict['numero'] documento['circoscrizione_nome'] = match_dict['nome'] continue match =", "None, 'circoscrizione_nome': None, 'mmg': None, 'dottori': None, } dottori = [] dottore =", "blocco_associazione = False if indirizzo: dottore['indirizzi'].append(indirizzo) indirizzo = { 'indirizzo': '{} {} TORINO'.format(match_dict['indirizzo'],", "<gh_stars>1-10 import datetime import fileinput import functools import json import os import re", "[], 'note': [], } continue # ci sono dottori senza associazione if blocco_associazione:", "False indirizzo = None for line in fileinput.input(): line = line.strip('\\x0c') if not", "= None blocco_associazione = False blocco_note = False indirizzo = None for line", "if match: blocco_note = True continue if blocco_note: indirizzo['note'].append(line.strip()) continue print(line, file=sys.stderr) #", "correzione[0] indirizzo_corretto = indirizzo.replace(via, via_corretta) posizione = do_geocoding(indirizzo_corretto, token) if posizione: return posizione", "= BLOCCO_ORARI_RE.match(line) if match: continue match = GIORNO_RE.match(line) if match: match_dict = match.groupdict()", "blocco_note = False indirizzo = None for line in fileinput.input(): line = line.strip('\\x0c')", "= False if dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo = None dottore = { 'nome':", "TORINO (?P<cap>\\d+) \\(TORINO\\) Telefono: ?(?P<telefono>\\d*)?\") FAX_RE = re.compile(r\"FAX \\d+\") TELEFONO_RE = re.compile(r\"(TELEFONO.*:\\s*)?(?P<telefono>\\d+)$\") BLOCCO_ORARI_RE", "'{} {} TORINO'.format(match_dict['indirizzo'], match_dict['cap']), 'telefono': [match_dict['telefono']], 'giorni': [], 'ore': [], 'note': [], }", "== len(indirizzo['giorni']) num_orari = int(num_orari) orari = [(indirizzo['ore'][i], indirizzo['ore'][i+num_orari]) for i in range(num_orari)]", "= do_geocoding(indirizzo, token) if posizione: return posizione # se non abbiamo trovato l'indirizzo", "indirizzo: dottore['indirizzi'].append(indirizzo) indirizzo = { 'indirizzo': '{} {} TORINO'.format(match_dict['indirizzo'], match_dict['cap']), 'telefono': [match_dict['telefono']], 'giorni':", "match_dict = match.groupdict() indirizzo['ore'].append(match_dict['orario']) continue match = ORARI_A_RE.match(line) if match: continue match =", "indirizzo['orario_affidabile']: indirizzo['orari'] = [ {'giorno': giorno, 'da': orario[0], 'a': orario[1]} for giorno, orario", "da, 'a': a} for da, a in orari] if mapbox_token: posizione = geocoding(indirizzo['indirizzo'],", "match.groupdict() indirizzo['ore'].append(match_dict['orario']) continue match = ORARI_A_RE.match(line) if match: continue match = BLOCCO_NOTE_RE.match(line) if", "if match: match_dict = match.groupdict() documento['circoscrizione_numero'] = match_dict['numero'] documento['circoscrizione_nome'] = match_dict['nome'] continue match", "dottore: dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo = None for dottore in dottori: for indirizzo in", "{} TORINO'.format(match_dict['indirizzo'], match_dict['cap']), 'telefono': [match_dict['telefono']], 'giorni': [], 'ore': [], 'note': [], } continue", "= do_geocoding(indirizzo_corretto, token) if posizione: return posizione print(\"Geocoding fallito per {}\".format(indirizzo), file=sys.stderr) return", "= dottori print(json.dumps(documento)) class ParseTestCase(unittest.TestCase): def test_nome_dottore_deve_fare_il_match_degli_apostrofi(self): match = NOME_DOTTORE_RE.match(\"NUR ADDO' [01234]\") match_dict", "'indirizzo': '{} {} TORINO'.format(match_dict['indirizzo'], match_dict['cap']), 'telefono': [match_dict['telefono']], 'giorni': [], 'ore': [], 'note': [],", "a mano correzione = [(k, v) for k, v in ESPANDI_INDIRIZZO.items() if k", "di mapbox nell'environment facciamo il geocoding degli indirizzi mapbox_token = os.getenv(\"MAPBOX_ACCESS_TOKEN\") documento =", "= re.compile(r\"Dalle\") ORARIO_RE = re.compile(r\"(?P<orario>\\d{2}:\\d{2})\") ORARI_A_RE = re.compile(r\"Alle\") BLOCCO_NOTE_RE = re.compile(r\"Note\") def do_geocoding(indirizzo,", "None @functools.lru_cache(maxsize=128) def geocoding(indirizzo, token): # sostituiamo i sc. con scala per farli", "posizione # se non abbiamo trovato l'indirizzo proviamo a sistemarlo a mano correzione", "num_orari == len(indirizzo['giorni']) num_orari = int(num_orari) orari = [(indirizzo['ore'][i], indirizzo['ore'][i+num_orari]) for i in", "per {}\".format(indirizzo), file=sys.stderr) return None if __name__ == '__main__': # se abbiamo un", "documento['dottori'] = dottori print(json.dumps(documento)) class ParseTestCase(unittest.TestCase): def test_nome_dottore_deve_fare_il_match_degli_apostrofi(self): match = NOME_DOTTORE_RE.match(\"NUR ADDO' [01234]\")", "'indirizzi': [], } continue match = BLOCCO_ASSOCIAZIONE_RE.match(line) if match: blocco_associazione = True continue", "= AGGIORNAMENTO_RE.match(line) if match: match_dict = match.groupdict() update = datetime.date( int(match_dict['anno']), MESI[match_dict['mese'].lower()], int(match_dict['giorno'])", "if \"Torino Turin\" in feature[\"place_name\"] or \"Collegno\" in feature[\"place_name\"]: return feature[\"center\"] return None", "(?P<anno>\\d+)\") CIRCOSCRIZIONE_RE = re.compile(r\"CIRCOSCRIZIONE (?P<numero>\\d+): (?P<nome>.+)\") MMG_RE = re.compile(r\"MMG\") NOME_DOTTORE_RE = re.compile(r\"(?P<nome>[\\w\\s']+) \\[(?P<codice>\\w+)\\]\")", "feature[\"place_type\"]: if \"Torino Turin\" in feature[\"place_name\"] or \"Collegno\" in feature[\"place_name\"]: return feature[\"center\"] return", "match: match_dict = match.groupdict() blocco_note = False blocco_associazione = False if indirizzo: dottore['indirizzi'].append(indirizzo)", "= match.groupdict() indirizzo['telefono'].append(match_dict['telefono']) continue match = BLOCCO_ORARI_RE.match(line) if match: continue match = GIORNO_RE.match(line)", "sys import unittest import urllib.parse import requests from fixups import ESPANDI_INDIRIZZO MESI =", "= None for dottore in dottori: for indirizzo in dottore['indirizzi']: # Proviamo a", "mapbox_token) indirizzo['posizione'] = posizione documento['dottori'] = dottori print(json.dumps(documento)) class ParseTestCase(unittest.TestCase): def test_nome_dottore_deve_fare_il_match_degli_apostrofi(self): match", "line = line.strip('\\x0c') if not line.strip(): continue match = AGGIORNAMENTO_RE.match(line) if match: match_dict", "= len(indirizzo['ore']) / 2 indirizzo['orario_affidabile'] = num_orari == len(indirizzo['giorni']) num_orari = int(num_orari) orari", "[{'giorno': None, 'da': da, 'a': a} for da, a in orari] if mapbox_token:", "(?P<nome>.+)\") MMG_RE = re.compile(r\"MMG\") NOME_DOTTORE_RE = re.compile(r\"(?P<nome>[\\w\\s']+) \\[(?P<codice>\\w+)\\]\") BLOCCO_ASSOCIAZIONE_RE = re.compile(r\"Associazione:\") INDIRIZZO_RE =", "\"Torino Turin\" in feature[\"place_name\"] or \"Collegno\" in feature[\"place_name\"]: return feature[\"center\"] return None @functools.lru_cache(maxsize=128)", "for indirizzo in dottore['indirizzi']: # Proviamo a sistemare gli orari num_orari = len(indirizzo['ore'])", "a in orari] if mapbox_token: posizione = geocoding(indirizzo['indirizzo'], mapbox_token) indirizzo['posizione'] = posizione documento['dottori']", "blocco_associazione = True continue match = INDIRIZZO_RE.match(line) if match: match_dict = match.groupdict() blocco_note", "if __name__ == '__main__': # se abbiamo un token di mapbox nell'environment facciamo", "return None @functools.lru_cache(maxsize=128) def geocoding(indirizzo, token): # sostituiamo i sc. con scala per", "indirizzo['orari'] = [ {'giorno': giorno, 'da': orario[0], 'a': orario[1]} for giorno, orario in", "[ {'giorno': giorno, 'da': orario[0], 'a': orario[1]} for giorno, orario in zip(indirizzo['giorni'], orari)", "indirizzo = None dottore = { 'nome': match_dict['nome'], 'codice': match_dict['codice'], 'associazione': [], 'indirizzi':", "= \"https://api.mapbox.com/geocoding/v5/mapbox.places/\" url = \"{}{}.json?limit=1&country=IT&access_token={}\".format( mapbox_geocoding_v5, urllib.parse.quote(indirizzo, safe=\"\"), token, ) response = requests.get(url)", "'aggiornamento': None, 'circoscrizione_numero': None, 'circoscrizione_nome': None, 'mmg': None, 'dottori': None, } dottori =", "= re.compile(r\"Alle\") BLOCCO_NOTE_RE = re.compile(r\"Note\") def do_geocoding(indirizzo, token): mapbox_geocoding_v5 = \"https://api.mapbox.com/geocoding/v5/mapbox.places/\" url =", "dottore['indirizzi'].append(indirizzo) dottori.append(dottore) indirizzo = None dottore = { 'nome': match_dict['nome'], 'codice': match_dict['codice'], 'associazione':", "if mapbox_token: posizione = geocoding(indirizzo['indirizzo'], mapbox_token) indirizzo['posizione'] = posizione documento['dottori'] = dottori print(json.dumps(documento))", "line.strip('\\x0c') if not line.strip(): continue match = AGGIORNAMENTO_RE.match(line) if match: match_dict = match.groupdict()", "continue match = TELEFONO_RE.match(line) if match: match_dict = match.groupdict() indirizzo['telefono'].append(match_dict['telefono']) continue match =", "if match: continue match = BLOCCO_NOTE_RE.match(line) if match: blocco_note = True continue if", "= { 'nome': match_dict['nome'], 'codice': match_dict['codice'], 'associazione': [], 'indirizzi': [], } continue match", "= re.compile(r\"CIRCOSCRIZIONE (?P<numero>\\d+): (?P<nome>.+)\") MMG_RE = re.compile(r\"MMG\") NOME_DOTTORE_RE = re.compile(r\"(?P<nome>[\\w\\s']+) \\[(?P<codice>\\w+)\\]\") BLOCCO_ASSOCIAZIONE_RE =" ]
[ "failed!' # fig = plt.figure('Spy Der',figsize=(10,4)) # ax1 = fig.add_subplot(111) # ax1.spy(ErMat,precision=50*step) #", "restore zeta: (include only induced velocity contrib.) Surf_in.zeta=Zeta0[ss_in].copy() Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate derivatives for", "# ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error rel %d to %d' %(ss_in,ss_out)) # ax3=fig.add_subplot(133) # ax3.spy(Dercoll_list[ss_out],precision=50*step)", "Surf_star.zeta=Zeta0_star[ss].copy() Surf_star.zeta[cc,0,nn] += perturb_vector[kk] Surf.generate_collocations() # COMPUTE THE DERIVATIVES Der_an = np.zeros(Surf.maps.K) Der_an", "Surf.gamma_dot=Gamma_dot0.copy() Surf.gamma_dot[mm,nn]+=step Surf.get_joukovski_unsteady() dF=(Surf.funst-F0)/step Dnum[:,pp]=dF.reshape(-1) # restore Surf.gamma_dot=Gamma_dot0.copy() ### verify ermax, ErAbs, ErRel=max_error_tensor(Ders_an[ss],Dnum)", "induced velocity at the panel segments. A copy of Surf is required to", "the analytica derivatives Der_an=Der_an_list[ss] # Copy to avoid modifying the original for other", "vertices Needs to be tested with a case that actually rotates \"\"\" print('-----------------------------", "ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.zeta[ind_3d]+=step # Recompute get_ind_velocities_at_segments and recover the previous grid Surf.get_input_velocities_at_segments()", "at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound and wake%.2d\\tFDstep\\tErrAbs\\tErrRel'%ss_in) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert", "Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) V0=comp_vind(zetac,MS) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0] ### vertices for", "= 0 skew_omega = algebra.skew(Surf.omega) for mm in range(M): for nn in range(N):", "input surfs for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] Surf_star_in=MS.Surfs_star[ss_in] M_in,N_in=Surf_in.maps.M,Surf_in.maps.N # perturb for kk", "and relative error tensors, and the maximum error. @warning: The relative error tensor", "For each output surface, there induced velocity is computed, all other surfaces are", "ermax=np.max(ErAbs) # relative error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound and", "only induced velocity contrib.) Surf_in.gamma=Gammaw0[ss_in].copy() # estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out]", "Step change in input velocity is allocated to both u_ext and zeta_dot '''", "assert ermax<50*step, 'Test failed!' # fig = plt.figure('Spy Der',figsize=(10,4)) # ax1 = fig.add_subplot(111)", "field of external force Surf.u_ext[0,:,:]=Surf.u_ext[0,:,:]-20.0 Surf.u_ext[1,:,:]=Surf.u_ext[1,:,:]+60.0 Surf.u_ext[2,:,:]=Surf.u_ext[2,:,:]+30.0 Surf.u_ext=Surf.u_ext+np.random.rand(*Surf.u_ext.shape) ### analytical derivative # ind", "for ii in range(n_surf): Dervert_list_num.append(0.0*Dervert_list[ii]) # store reference grid Zeta0=[] Zeta0_star=[] for ss", "Zeta0=[] Zeta0_star=[] Fqs0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate vis", "Initialize Der_num = 0.0*Der_an # Loop through the different grid modifications (three directions", "in range(haero.data.aero.n_surf): # tsdata.omega.append(haero.data.structure.timestep_info[-1].for_vel[3:6]) MS=multisurfaces.MultiAeroGridSurfaces(tsdata) MS.get_normal_ind_velocities_at_collocation_points() MS.verify_non_penetration() MS.verify_aic_coll() MS.get_joukovski_qs() MS.verify_joukovski_qs() self.MS=MS def test_nc_dqcdzeta(self):", "elements are filtered out during the search for maximum error, and absolute error", "step size is reduced' print('------------------------------------------------------------ OK') if PlotFlag: pass # fig = plt.figure('Spy", "analytical Dercoll_list,Dervert_list=assembly.dfqsdvind_zeta(MS.Surfs,MS.Surfs_star) # allocate numerical Derlist_num=[] for ii in range(n_surf): sub=[] for jj", "# ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error rel %d' %(ss_in)) # #plt.show() # plt.close() def test_dfqsdvind_zeta(self):", "assert er_max<5e1*step, 'Error larger than 50 times step size' Er_max_star.append(er_max) Surf.gamma=gamma0.copy() ### Warning:", "working correctly, relative error (%.3e) too high!' %ErRel # allocate numerical Derlist_num=[] for", "error, and absolute error is checked. ''' Eabs=np.abs(Pder_num-Pder_an) nnzvec=Pder_an!=0 Erel=np.zeros(Pder_an.shape) Erel[nnzvec]=np.abs(Eabs[nnzvec]/Pder_an[nnzvec]) # Relative", "if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() ### check error at colloc Dercoll_num=np.zeros((3,3)) for cc in range(3):", "error ermax=np.max(ErAbs) # relative error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out))", "sub=[] for jj in range(n_surf): sub.append(0.0*Dervert_list[ii][jj]) Derlist_num.append(sub) # store reference circulation and normal", "# relative error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] assert ermax_rel<1e-16,\\ 'option", "''' def comp_vind(zetac,MS): # comute induced velocity V=np.zeros((3,)) for ss in range(n_surf): Surf_in=MS.Surfs[ss]", "%(step,ermax,ermax_rel)) assert ermax<5e2*step and ermax_rel<50*step, 'Test failed!' # fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4))", "Er_max.append(er_max) def test_dfqsdvind_gamma(self): print('------------------------------ Testing assembly.dfqsdvind_gamma') MS=self.MS n_surf=MS.n_surf # analytical Der_list,Der_star_list=assembly.dfqsdvind_gamma(MS.Surfs,MS.Surfs_star) # allocate", "'Test failed!' # fig = plt.figure('Spy Der',figsize=(10,4)) # ax1 = fig.add_subplot(111) # ax1.spy(ErMat,precision=50*step)", "size' Er_max.append(er_max) def test_dfqsdzeta_omega(self): ''' Note: the get_joukovski_qs method re-computes the induced velocity", "as np import scipy.linalg as scalg import sharpy.utils.h5utils as h5utils import sharpy.linear.src.assembly as", "bound:' %ss) print('FD step: %.2e ---> Max error: %.2e'%(step,er_max) ) assert er_max<5e1*step, 'Error", "%d' %(ss_in)) # #plt.show() # plt.close() def test_dfqsdvind_zeta(self): ''' For each output surface,", "Surf.u_ext=Surf.u_ext+np.random.rand(*Surf.u_ext.shape) ### analytical derivative # ind velocities computed already Surf.get_input_velocities_at_collocation_points() Der=assembly.uc_dncdzeta(Surf) ### numerical", "n_surf=MS.n_surf # Compute the anaytical derivative of the case Der_an_list=assembly.dfqsdzeta_omega(MS.Surfs,MS.Surfs_star) # Initialize Er_max=[]", "maximum error. @warning: The relative error tensor may contain NaN or Inf if", "haero = h5utils.readh5(fname) tsdata = haero.ts00000 # # Rotating cases # fname =", "Derlist_num.append(sub) # store reference circulation and normal induced velocities MS.get_normal_ind_velocities_at_collocation_points() Zeta0=[] Zeta0_star=[] Vind0=[]", "Surf.zeta[ind_3d]+=step # Recompute get_ind_velocities_at_segments and recover the previous grid Surf.get_input_velocities_at_segments() Surf.zeta=zeta0.copy() # Compute", "50 times step size' Er_max[ss]=er_max # assert error decreases with step size for", "to %d' %(ss_out,ss_out)) # #plt.show() # plt.close() def test_dfunstdgamma_dot(self): ''' Test derivative of", "os import copy import warnings import unittest import itertools import numpy as np", "# calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-5,] step=Steps[0] ###### bound for ss_in in range(n_surf):", "in range(n_surf): Der_an=Der_list[ss] Surf=copy.deepcopy(MS.Surfs[ss]) #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() for step in", "Zero elements check iizero=np.abs(Pder_an)<1e-15 for der_here in Pder_num[iizero]: if np.abs(der_here)>err_max: err_max=der_here return err_max,", "to ensure they are re-computed for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] Surf_out.normals=N0[ss_out].copy() del Surf_out.u_ind_coll_norm", "new normal velocity Surf_pert=surface.AeroGridSurface(Surf.maps,zeta=zeta_pert, u_ext=Surf.u_ext,gamma=Surf.gamma) u_norm=Surf_pert.project_coll_to_normal(u_tot0) u_norm_vec=u_norm.reshape(-1,order='C') # FD derivative DerNum[:,jj]=(u_norm_vec-u_norm0_vec)/step er_max=np.max(np.abs(Der-DerNum)) print('FD", "df=(Surf.fqs-fqs0)/step Der_star_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_star_an-Der_star_num)) print('Surface %.2d - wake:' %ss) print('FD step: %.2e ---> Max", "steps. In fact, reducing # the step quickly introduced round-off error. # #", "Wake Surf.gamma=gamma0.copy() gammaw_TE0=Surf_star.gamma[0,:].copy() M_star,N_star=Surf_star.maps.M,Surf_star.maps.N K_star=Surf_star.maps.K for nn in range(N): pp=np.ravel_multi_index( (0,nn), (M_star,N_star)) gammaw_TE=gammaw_TE0.copy()", "pp in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gamma0[ss_in].copy() Surf_in.gamma[mm,nn]+=step # recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True)", "# Initialize to remove previous movements Surf.zeta=zeta0.copy() # Define DoFs where modifications will", "Erel[nnzvec]=np.abs(Eabs[nnzvec]/Pder_an[nnzvec]) # Relative error check: remove NaN and inf... iifinite=np.isfinite(Erel) err_max=0.0 for err_here", "setUp after this test # T.setUp() # T.test_dfqsdvind_gamma() # T.test_dfqsduinput() # T.test_dfqsdzeta_vrel0() #", "(3,M_in+1,N_in+1) ) # perturb bound. vertices and collocation Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step Surf_in.generate_collocations() # perturb", "kk in range(3*Kzeta): # Initialize to remove previous movements Surf.zeta=zeta0.copy() # Define DoFs", "are not affected. Needs to be tested with a case that actually rotates", "Dervert_list.append(dvert_b+dvert_w) # allocate numerical Dercoll_num=np.zeros((3,3)) Dervert_list_num=[] for ii in range(n_surf): Dervert_list_num.append(0.0*Dervert_list[ii]) # store", "# ax1.set_title('error abs %d to %d' %(ss_in,ss_out)) # ax2=fig.add_subplot(132) # ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error", "error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<50*step", "abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d\\t\\t\\tFDstep\\tErrAbs\\tErrRel'%(ss,)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<5e2*step and ermax_rel<50*step, 'Test", "sub=[] sub_star=[] for jj in range(n_surf): sub.append(0.0*Der_list[ii][jj]) sub_star.append(0.0*Der_star_list[ii][jj]) Der_list_num.append(sub) Der_star_list_num.append(sub_star) # store reference", "ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Vind0.append(MS.Surfs[ss].u_ind_coll_norm.copy()) N0.append(MS.Surfs[ss].normals.copy()) # calculate vis FDs Steps=[1e-6,] step=Steps[0] ### loop input", "force Zeta0=[] Zeta0_star=[] Fqs0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate", "restore circulation: (include only induced velocity contrib.) Surf_in.gamma=Gamma0[ss_in].copy() # estimate derivatives for ss_out", "step=Steps[ss] for jj in range(3*Surf.maps.Kzeta): # perturb cc_pert=Surf.maps.ind_3d_vert_vect[0][jj] mm_pert=Surf.maps.ind_3d_vert_vect[1][jj] nn_pert=Surf.maps.ind_3d_vert_vect[2][jj] zeta_pert=zeta0.copy() zeta_pert[cc_pert,mm_pert,nn_pert]+=step #", "TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step # recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore", "wakes, only TE is displaced. ''' print('----------------------------- Testing assembly.test_nc_dqcdzeta') MS=self.MS n_surf=MS.n_surf # analytical", "Der_num=Dervert_list_num[ss_in] ermax,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs) # relative error at max abs", "h5utils.readh5(fname) tsdata = haero.ts00000 # # Rotating cases # fname = './basic_rotating_wing/basic_wing.data.h5' #", "setUp(self): # select test case fname = os.path.dirname(os.path.abspath(__file__)) + '/h5input/goland_mod_Nsurf01_M003_N004_a040.aero_state.h5' haero = h5utils.readh5(fname)", "not correct' if __name__=='__main__': unittest.main() # T=Test_assembly() # T.setUp() # ### force equation", "allocate numerical # Derlist_num=[] # for ii in range(n_surf): # sub=[] # for", "# ### state equation terms # T.test_uc_dncdzeta() # T.test_nc_dqcdzeta() ### force equation (unsteady)", "reduced' # assert Er_max_star[ii]<Er_max_star[ii-1],\\ # 'Error not decreasing as FD step size is", "displaced. ''' print('------------------------------- Testing assembly.dfqsdvind_zeta') MS=self.MS n_surf=MS.n_surf # analytical Dercoll_list,Dervert_list=assembly.dfqsdvind_zeta(MS.Surfs,MS.Surfs_star) # allocate numerical", "%(ss_out,ss_out)) # #plt.show() # plt.close() def test_dfunstdgamma_dot(self): ''' Test derivative of unsteady aerodynamic", "in range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N perturb_vector = np.zeros(3*Surf.maps.Kzeta) # PERTURBATION OF THE SURFACE", "Ders_an=assembly.dfunstdgamma_dot(MS.Surfs) step=1e-6 Ders_num=[] n_surf=len(MS.Surfs) for ss in range(n_surf): Surf=MS.Surfs[ss] Kzeta,K=Surf.maps.Kzeta,Surf.maps.K M,N=Surf.maps.M,Surf.maps.N Dnum=np.zeros((3*Kzeta,K)) #", "n_surf=len(MS.Surfs) for ss in range(n_surf): Surf=MS.Surfs[ss] Kzeta,K=Surf.maps.Kzeta,Surf.maps.K M,N=Surf.maps.M,Surf.maps.N Dnum=np.zeros((3*Kzeta,K)) # get refernce values", "#Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() u_ext0=Surf.u_ext.copy() zeta_dot0=Surf.zeta_dot.copy() for step in Steps: Der_num=0.0*Der_an for", "is required to ensure that other tests are not affected. Needs to be", "for other tests Surf=copy.deepcopy(MS.Surfs[ss]) # Define variables M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta # Save the", "print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<50*step and ermax_rel<50*step, embed()#'Test failed!' # fig=plt.figure('Spy Er vs coll", "er_max<5e1*step, 'Error larger than 50 times step size' Er_max.append(er_max) def test_dfqsdzeta_omega(self): ''' Note:", "changes in panel circulation. Warning: test assumes the derivative of the unsteady force", "''' print('------------------------------ Testing assembly.dfqsdzeta_vrel0') MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsdzeta_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf):", "grid Zeta0=[] Zeta0_star=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) V0=comp_vind(zetac,MS) # calculate vis", "= fig.add_subplot(111) # ax1.spy(ErMat,precision=50*step) # plt.show() def test_dvinddzeta(self): ''' For each output surface,", "1e-6] nsteps = len(Steps) error = np.zeros((nsteps,)) for istep in range(nsteps): step =", "max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d\\t\\t\\tFDstep\\tErrAbs\\tErrRel'%(ss,)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<5e2*step and ermax_rel<50*step,", "# analytical Der_list,Der_star_list=assembly.dfqsdvind_gamma(MS.Surfs,MS.Surfs_star) # allocate numerical Der_list_num=[] Der_star_list_num=[] for ii in range(n_surf): sub=[]", "and collocation Surf.zeta=Zeta0[ss].copy() Surf.zeta[cc,mm,nn] += perturb_vector[kk] # perturb wake TE if mm==M: Surf_star.zeta=Zeta0_star[ss].copy()", "ss_in in range(n_surf): Der_an=Dervert_list[ss_in] Der_num=Dervert_list_num[ss_in] ermax,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs) # relative", "ermax_rel=ErRel[iimax] print('Bound and wake%.2d\\tFDstep\\tErrAbs\\tErrRel'%ss_in) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ercoll<10*step, 'Error at vertices' # fig=plt.figure('Spy", "# Computation Steps=[1e-2, 1e-4, 1e-6] nsteps = len(Steps) error = np.zeros((nsteps,)) for istep", "range(3*Surf_in.maps.Kzeta): cc,mm,nn=np.unravel_index( kk, (3,M_in+1,N_in+1) ) # perturb bound Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step # perturb wake", "step in Steps: Der_num=0.0*Der_an for kk in range(3*Kzeta): Surf.zeta=zeta0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.zeta[ind_3d]+=step", "''' Step change in input velocity is allocated to both u_ext and zeta_dot", "assembly.dfqsdzeta_vrel0') MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsdzeta_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss] Surf=copy.deepcopy(MS.Surfs[ss]) #Surf_star=MS.Surfs_star[ss]", "and collocation Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step Surf_in.generate_collocations() # perturb wake TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step", "%(step,ermax,ermax_rel)) assert ercoll<10*step, 'Error at vertices' # fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4)) #", "n_surf=MS.n_surf # analytical Der_list,Der_star_list=assembly.dfqsdvind_gamma(MS.Surfs,MS.Surfs_star) # allocate numerical Der_list_num=[] Der_star_list_num=[] for ii in range(n_surf):", "print('Bound%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert ermax<50*step, 'Test failed!' Der_an=Der_star_list[ss_out][ss_in] Der_num=Der_star_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Wake%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e'", "nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gammaw0[ss_in].copy() Surf_in.gamma[mm,nn]+=step # recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore circulation: (include", "# sub=[] # for jj in range(n_surf): # sub.append(0.0*Dervert_list[ii][jj]) # Derlist_num.append(sub) # Store", "TE is displaced. ''' def comp_vind(zetac,MS): # comute induced velocity V=np.zeros((3,)) for ss", "embed() assert ercoll<10*step, 'Error at collocation point' ### check error at vert for", "er_max=np.max(np.abs(Der-DerNum)) print('FD step: %.2e ---> Max error: %.2e'%(step,er_max) ) assert er_max<5e1*step, 'Error larger", "Copy to avoid modifying the original for other tests Surf=copy.deepcopy(MS.Surfs[ss]) # Define variables", "assert ermax_rel<1e-16,\\ 'option Merge=True not working correctly, relative error (%.3e) too high!' %ErRel", "# fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(121) # ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error abs", "print('Surface %.2d - wake:' %ss) print('FD step: %.2e ---> Max error: %.2e'%(step,er_max) )", "u_norm_vec=u_norm.reshape(-1,order='C') # FD derivative DerNum[:,jj]=(u_norm_vec-u_norm0_vec)/step er_max=np.max(np.abs(Der-DerNum)) print('FD step: %.2e ---> Max error: %.2e'%(step,er_max)", "# Define variables M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta # Save the reference values at equilibrium", "reduced' print('------------------------------------------------------------ OK') if PlotFlag: pass # fig = plt.figure('Spy Der',figsize=(10,4)) # ax1", "velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore circulation: (include only induced velocity contrib.) Surf_in.gamma=Gamma0[ss_in].copy() #", "libuvlm import sharpy.utils.algebra as algebra np.set_printoptions(linewidth=200,precision=3) def max_error_tensor(Pder_an,Pder_num): ''' Finds the maximum error", "velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore circulation: (include only induced velocity contrib.) Surf_in.gamma=Gammaw0[ss_in].copy() #", "verify ermax, ErAbs, ErRel=max_error_tensor(Ders_an[ss],Dnum) # max absolute error ermax=np.max(ErAbs) # relative error at", "np.set_printoptions(linewidth=200,precision=3) def max_error_tensor(Pder_an,Pder_num): ''' Finds the maximum error analytical derivatives Pder_an. The error", "# Derlist_num.append(sub) # Store the initial values of the variabes Zeta0=[] Zeta0_star=[] N0=[]", "---> Max error: %.2e'%(step,error[istep]) ) assert error[istep]<5e1*step, 'Error larger than 50 times the", "all other surfaces are looped. For wakes, only TE is displaced. ''' print('-------------------------------", "previous movements Surf.zeta=zeta0.copy() # Define DoFs where modifications will take place and modify", "len(Steps) error = np.zeros((nsteps,)) for istep in range(nsteps): step = Steps[istep] for ss", "for ss in range(n_surf): Surf_in=MS.Surfs[ss] Surf_star_in=MS.Surfs_star[ss] V+=Surf_in.get_induced_velocity(zetac) V+=Surf_star_in.get_induced_velocity(zetac) return V print('----------------------------------- Testing assembly.dvinddzeta')", "circulation and force Gamma0=[] Gammaw0=[] Fqs0=[] for ss in range(n_surf): Gamma0.append(MS.Surfs[ss].gamma.copy()) Gammaw0.append(MS.Surfs_star[ss].gamma.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy())", "tested with a case that actually rotates ''' print('------------------------------ Testing assembly.dfqsdzeta_omega') # rename", "velocities MS.get_normal_ind_velocities_at_collocation_points() Zeta0=[] Zeta0_star=[] Vind0=[] N0=[] ZetaC0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F'))", "Er_max=[] # Define steps to run Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): # Select", "ermax, ErAbs, ErRel=max_error_tensor(Ders_an[ss],Dnum) # max absolute error ermax=np.max(ErAbs) # relative error at max", "'Error larger than 50 times step size' Er_max[ss]=er_max # assert error decreases with", "# Initialize Er_max=[] # Define steps to run Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf):", "fqs0=Surf.fqs.copy() u_ext0=Surf.u_ext.copy() zeta_dot0=Surf.zeta_dot.copy() for step in Steps: Der_num=0.0*Der_an for kk in range(3*Kzeta): Surf.u_ext=u_ext0.copy()", "# ind velocities computed already Surf.get_input_velocities_at_collocation_points() Der=assembly.uc_dncdzeta(Surf) ### numerical derivative #Surf.get_normal_input_velocities_at_collocation_points() u_tot0=Surf.u_ind_coll+Surf.u_input_coll u_norm0=Surf.project_coll_to_normal(u_tot0)", "cases # fname = './basic_rotating_wing/basic_wing.data.h5' # haero = h5utils.readh5(fname) # tsdata = haero.data.aero.timestep_info[-1]", "Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) # <--- gammaw_0 needs to be used here! df=(Surf_out.fqs-fqs0)/step", "comute induced velocity V=np.zeros((3,)) for ss in range(n_surf): Surf_in=MS.Surfs[ss] Surf_star_in=MS.Surfs_star[ss] V+=Surf_in.get_induced_velocity(zetac) V+=Surf_star_in.get_induced_velocity(zetac) return", "induced velocity contrib.) Surf_in.zeta=Zeta0[ss_in].copy() Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out]", "needs to be used here! df=(Surf_out.fqs-fqs0)/step Der_star_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ### check error Er_max=[] Er_max_star=[] for", "###### bound for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] # perturb for pp in range(Surf_in.maps.K):", "''' print('---------------------------------- Testing assembly.dfqsduinput') MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsduinput(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf):", "def test_dfunstdgamma_dot(self): ''' Test derivative of unsteady aerodynamic force with respect to changes", "test_dfunstdgamma_dot(self): ''' Test derivative of unsteady aerodynamic force with respect to changes in", "as FD step size is reduced' print('------------------------------------------------------------ OK') def test_dfqsdgamma_vrel0(self): print('----------------------------- Testing assembly.dfqsdgamma_vrel0')", "error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<5e2*step", "### verify ermax, ErAbs, ErRel=max_error_tensor(Ders_an[ss],Dnum) # max absolute error ermax=np.max(ErAbs) # relative error", "MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsduinput(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss] #Surf=copy.deepcopy(MS.Surfs[ss]) Surf=MS.Surfs[ss] #Surf_star=MS.Surfs_star[ss]", "# Zero elements check iizero=np.abs(Pder_an)<1e-15 for der_here in Pder_num[iizero]: if np.abs(der_here)>err_max: err_max=der_here return", "are unchanged # - del ind. vel on output to ensure they are", "Dercoll_num=np.zeros((3,3)) Dervert_list_num=[] for ii in range(n_surf): Dervert_list_num.append(0.0*Dervert_list[ii]) # store reference grid Zeta0=[] Zeta0_star=[]", "Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) V0=comp_vind(zetac,MS) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0] ### vertices for ss_in", "for ss_in in range(n_surf): Der_an=Der_list[ss_out][ss_in] Der_num=Der_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Bound%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert ermax<50*step,", "ermax=np.max(np.abs(ErMat)) print('Wake%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert ermax<50*step, 'Test failed!' # fig = plt.figure('Spy Der',figsize=(10,4))", "error Er_max=[] Er_max_star=[] for ss_out in range(n_surf): for ss_in in range(n_surf): Der_an=Der_list[ss_out][ss_in] Der_num=Der_list_num[ss_out][ss_in]", "prepare output surfaces # - ensure normals are unchanged # - del ind.", "jj in range(n_surf): sub.append(0.0*Dervert_list[ii][jj]) Derlist_num.append(sub) # store reference circulation and normal induced velocities", "range(3*Kzeta): Surf.u_ext=u_ext0.copy() Surf.zeta_dot=zeta_dot0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.u_ext[ind_3d]+=0.5*step Surf.zeta_dot[ind_3d]+=-0.5*step Surf.get_input_velocities_at_segments() Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num))", "Dervert_list = assembly.nc_domegazetadzeta(MS.Surfs,MS.Surfs_star) # allocate numerical # Derlist_num=[] # for ii in range(n_surf):", "er_max<5e1*step, 'Error larger than 50 times step size' Er_max[ss]=er_max # assert error decreases", "### Wake Surf.gamma=gamma0.copy() gammaw_TE0=Surf_star.gamma[0,:].copy() M_star,N_star=Surf_star.maps.M,Surf_star.maps.N K_star=Surf_star.maps.K for nn in range(N): pp=np.ravel_multi_index( (0,nn), (M_star,N_star))", "Der_num = 0.0*Der_an # Loop through the different grid modifications (three directions per", "ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] # perturb for pp in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gamma0[ss_in].copy()", "u_ext=Surf.u_ext,gamma=Surf.gamma) u_norm=Surf_pert.project_coll_to_normal(u_tot0) u_norm_vec=u_norm.reshape(-1,order='C') # FD derivative DerNum[:,jj]=(u_norm_vec-u_norm0_vec)/step er_max=np.max(np.abs(Der-DerNum)) print('FD step: %.2e ---> Max", "Store the initial values of the variabes Zeta0=[] Zeta0_star=[] N0=[] ZetaC0=[] for ss", "MS=self.MS Ders_an=assembly.dfunstdgamma_dot(MS.Surfs) step=1e-6 Ders_num=[] n_surf=len(MS.Surfs) for ss in range(n_surf): Surf=MS.Surfs[ss] Kzeta,K=Surf.maps.Kzeta,Surf.maps.K M,N=Surf.maps.M,Surf.maps.N Dnum=np.zeros((3*Kzeta,K))", "perturb_vector = np.zeros(3*Surf.maps.Kzeta) # PERTURBATION OF THE SURFACE for kk in range(3*Surf.maps.Kzeta): #", "for pp in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gammaw0[ss_in].copy() Surf_in.gamma[mm,nn]+=step # recalculate induced velocity everywhere", "# estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) # <---", "zeta: (include only induced velocity contrib.) Surf_in.zeta=Zeta0[ss_in].copy() Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate derivatives for ss_out", "The relative error tensor may contain NaN or Inf if the analytical derivative", "iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d\\t\\t\\tFDstep\\tErrAbs\\tErrRel'%(ss,)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<5e2*step and ermax_rel<50*step, 'Test failed!' def test_wake_prop(self):", "for ss_in in range(n_surf): Der_an=Dervert_list[ss_out][ss_in].copy() if ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] _,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max absolute", "> 0: assert error[istep]<=error[istep-1],\\ 'Error not decreasing as FD step size is reduced'", "# Recompute get_ind_velocities_at_segments and recover the previous grid Surf.get_input_velocities_at_segments() Surf.zeta=zeta0.copy() # Compute new", "cc in range(3): zetac_pert=zetac.copy() zetac_pert[cc]+=step Vnum=comp_vind(zetac_pert,MS) Dercoll_num[:,cc]=(Vnum-V0)/step ercoll=np.max(np.abs(Dercoll-Dercoll_num)) print('Error coll.\\tFDstep\\tErrAbs') print('\\t\\t%.1e\\t%.1e' %(step,ercoll)) #if", "derivative DerNum[:,jj]=(u_norm_vec-u_norm0_vec)/step er_max=np.max(np.abs(Der-DerNum)) print('FD step: %.2e ---> Max error: %.2e'%(step,er_max) ) assert er_max<5e1*step,", "# T.test_dfqsdzeta_vrel0() # T.test_dfqsdgamma_vrel0() # ### state equation terms # T.test_uc_dncdzeta() # T.test_nc_dqcdzeta()", "SURFACE for kk in range(3*Surf.maps.Kzeta): # generate a random perturbation between the 90%", "circulation: (include only induced velocity contrib.) Surf_in.gamma=Gammaw0[ss_in].copy() # estimate derivatives for ss_out in", "numpy as np import scipy.linalg as scalg import sharpy.utils.h5utils as h5utils import sharpy.linear.src.assembly", "M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K fqs0=Surf.fqs.copy() gamma0=Surf.gamma.copy() for step in Steps: Der_num=0.0*Der_an Der_star_num=0.0*Der_star_an ### Bound for", "''' MS=self.MS Ders_an=assembly.dfunstdgamma_dot(MS.Surfs) step=1e-6 Ders_num=[] n_surf=len(MS.Surfs) for ss in range(n_surf): Surf=MS.Surfs[ss] Kzeta,K=Surf.maps.Kzeta,Surf.maps.K M,N=Surf.maps.M,Surf.maps.N", "tsdata.omega = [] # for ss in range(haero.data.aero.n_surf): # tsdata.omega.append(haero.data.structure.timestep_info[-1].for_vel[3:6]) MS=multisurfaces.MultiAeroGridSurfaces(tsdata) MS.get_normal_ind_velocities_at_collocation_points() MS.verify_non_penetration()", "(include only induced velocity contrib.) Surf_in.gamma=Gammaw0[ss_in].copy() # estimate derivatives for ss_out in range(n_surf):", "%.2e'%(step,er_max) ) assert er_max<5e1*step, 'Error larger than 50 times step size' Er_max.append(er_max) def", "vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0] ### vertices for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] Surf_star_in=MS.Surfs_star[ss_in]", "n_surf=MS.n_surf Der_list=assembly.dfqsduinput(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss] #Surf=copy.deepcopy(MS.Surfs[ss]) Surf=MS.Surfs[ss] #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N", "variabes Zeta0=[] Zeta0_star=[] N0=[] ZetaC0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) N0.append(MS.Surfs[ss].normals.copy())", "get_joukovski_qs method re-computes the induced velocity at the panel segments. A copy of", "Surf.u_ext[ind_3d]+=0.5*step Surf.zeta_dot[ind_3d]+=-0.5*step Surf.get_input_velocities_at_segments() Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss) print('FD", "''' print('------------------------------ Testing assembly.dfqsdzeta_omega') # rename MS=self.MS n_surf=MS.n_surf # Compute the anaytical derivative", "# calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0] ### vertices for ss_in in range(n_surf):", "ermax_rel<50*step, embed()#'Test failed!' # fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(131) # ax1.spy(ErAbs,precision=1e2*step)", "range(n_surf): dcoll_b,dvert_b=assembly.dvinddzeta(zetac,MS.Surfs[ss_in],IsBound=True) dcoll_w,dvert_w=assembly.dvinddzeta(zetac,MS.Surfs_star[ss_in], IsBound=False,M_in_bound=MS.Surfs[ss_in].maps.M) Dercoll+=dcoll_b+dcoll_w Dervert_list.append(dvert_b+dvert_w) # allocate numerical Dercoll_num=np.zeros((3,3)) Dervert_list_num=[] for ii", "term) # T.test_dvinddzeta() # T.test_dfqsdvind_zeta() # run setUp after this test # T.setUp()", "to %d' %(ss_out,ss_out)) # #plt.show() # plt.close() def test_uc_dncdzeta(self,PlotFlag=False): print('---------------------------------- Testing assembly.uc_dncdzeta') MS=self.MS", "Surf.gamma_dot=Gamma_dot0.copy() ### verify ermax, ErAbs, ErRel=max_error_tensor(Ders_an[ss],Dnum) # max absolute error ermax=np.max(ErAbs) # relative", "# ax3=fig.add_subplot(133) # ax3.spy(Dercoll_list[ss_out],precision=50*step) # ax3.set_title('Dcoll an. %d to %d' %(ss_out,ss_out)) # #plt.show()", "# ax2.spy(DerNum,precision=step) # plt.show() def test_nc_domegazetadzeta(self): \"\"\" Variation at colocation points due to", ") assert er_max<5e1*step, 'Error larger than 50 times step size' Er_max.append(er_max) def test_dfqsduinput(self):", "Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): # Select the surface with the analytica derivatives", "the step quickly introduced round-off error. # # assert error decreases with step", "range(n_surf): for ss_in in range(n_surf): Der_an=Dervert_list[ss_out][ss_in].copy() if ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] ermax, ErAbs, ErRel=max_error_tensor(Der_an,Der_num)", "error[istep]<5e1*step, 'Error larger than 50 times the step size' if istep > 0:", "= plt.figure('Spy Der',figsize=(10,4)) # ax1 = fig.add_subplot(111) # ax1.spy(ErMat,precision=50*step) # plt.show() def test_dvinddzeta(self):", "print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<5e2*step and ermax_rel<50*step, 'Test failed!' # fig=plt.figure('Spy Er vs coll", "ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error rel %d' %(ss_in)) # #plt.show() # plt.close() def test_dfqsdvind_zeta(self): '''", "Testing assembly.dfqsdzeta_omega') # rename MS=self.MS n_surf=MS.n_surf # Compute the anaytical derivative of the", "err_max=der_here return err_max, Eabs, Erel class Test_assembly(unittest.TestCase): ''' Test methods into assembly module", "size is reduced' print('------------------------------------------------------------ OK') if PlotFlag: pass # fig = plt.figure('Spy Der',figsize=(10,4))", "times step size' Er_max_star.append(er_max) Surf.gamma=gamma0.copy() ### Warning: this test fails: the dependency on", "# assert error decreases with step size # for ii in range(1,len(Steps)): #", "# ax2 = fig.add_subplot(122) # ax2.spy(DerNum,precision=step) # plt.show() def test_nc_domegazetadzeta(self): \"\"\" Variation at", "Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step", "Steps: # Initialize Der_num = 0.0*Der_an # Loop through the different grid modifications", "for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) # <--- gammaw_0 needs to", "to run Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): # Select the surface with the", "range(len(Steps)): step=Steps[ss] for jj in range(3*Surf.maps.Kzeta): # perturb cc_pert=Surf.maps.ind_3d_vert_vect[0][jj] mm_pert=Surf.maps.ind_3d_vert_vect[1][jj] nn_pert=Surf.maps.ind_3d_vert_vect[2][jj] zeta_pert=zeta0.copy() zeta_pert[cc_pert,mm_pert,nn_pert]+=step", "''' Test assembly <NAME>, 29 May 2018 ''' import os import copy import", "large steps. In fact, reducing # the step quickly introduced round-off error. #", "(3,M+1,N+1) ) Surf.zeta[ind_3d]+=step Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss) print('FD", "import sharpy.linear.src.multisurfaces as multisurfaces import sharpy.linear.src.surface as surface import sharpy.linear.src.libuvlm as libuvlm import", "times the step size' if istep > 0: assert error[istep]<=error[istep-1],\\ 'Error not decreasing", "of Pder_an is nonzero - absolute, otherwise The function returns the absolute and", "for kk in range(3*Surf_in.maps.Kzeta): cc,mm,nn=np.unravel_index( kk, (3,M_in+1,N_in+1) ) # perturb bound. vertices and", "times step size' Er_max.append(er_max) def test_dfqsdvind_gamma(self): print('------------------------------ Testing assembly.dfqsdvind_gamma') MS=self.MS n_surf=MS.n_surf # analytical", "step size' Er_max_star.append(er_max) Surf.gamma=gamma0.copy() ### Warning: this test fails: the dependency on gamma", "method re-computes the induced velocity at the panel segments. A copy of Surf", "Surf_in.zeta[cc,mm,nn]+=step # perturb wake TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step # recalculate induced velocity", "Surf.gamma=gamma0.copy() gammaw_TE0=Surf_star.gamma[0,:].copy() M_star,N_star=Surf_star.maps.M,Surf_star.maps.N K_star=Surf_star.maps.K for nn in range(N): pp=np.ravel_multi_index( (0,nn), (M_star,N_star)) gammaw_TE=gammaw_TE0.copy() gammaw_TE[nn]+=step", "ax2.set_title('error rel %d to %d' %(ss_in,ss_out)) # ax3=fig.add_subplot(133) # ax3.spy(Dercoll_list[ss_out],precision=50*step) # ax3.set_title('Dcoll an.", "#plt.show() # plt.close() def test_uc_dncdzeta(self,PlotFlag=False): print('---------------------------------- Testing assembly.uc_dncdzeta') MS=self.MS n_surf=MS.n_surf MS.get_ind_velocities_at_collocation_points() MS.get_normal_ind_velocities_at_collocation_points() for", "in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gammaw0[ss_in].copy() Surf_in.gamma[mm,nn]+=step # recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) #", "remove NaN and inf... iifinite=np.isfinite(Erel) err_max=0.0 for err_here in Erel[iifinite]: if np.abs(err_here)>err_max: err_max=err_here", "jj in range(n_surf): sub.append(0.0*Dervert_list[ii][jj]) Derlist_num.append(sub) # store reference circulation and force Zeta0=[] Zeta0_star=[]", "range(n_surf): Der_an=Dervert_list[ss_in] Der_num=Dervert_list_num[ss_in] ermax,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs) # relative error at", "Surf_in=MS.Surfs[ss] Surf_star_in=MS.Surfs_star[ss] V+=Surf_in.get_induced_velocity(zetac) V+=Surf_star_in.get_induced_velocity(zetac) return V print('----------------------------------- Testing assembly.dvinddzeta') MS=self.MS n_surf=MS.n_surf zetac=.5*(MS.Surfs[0].zeta[:,1,2]+MS.Surfs[0].zeta[:,1,3]) Dercoll=np.zeros((3,3))", "vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0] ### loop input surfs for ss_in in range(n_surf):", "def test_dfqsdgamma_vrel0(self): print('----------------------------- Testing assembly.dfqsdgamma_vrel0') MS=self.MS n_surf=MS.n_surf Der_list,Der_star_list=assembly.dfqsdgamma_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Er_max_star=[] Steps=[1e-2,1e-4,1e-6,] for ss", "Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) # <--- gammaw_0 needs to be used here! df=(Surf_out.fqs-fqs0)/step Der_star_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ###", "for ss in range(n_surf): Surf=MS.Surfs[ss] Kzeta,K=Surf.maps.Kzeta,Surf.maps.K M,N=Surf.maps.M,Surf.maps.N Dnum=np.zeros((3*Kzeta,K)) # get refernce values Surf.get_joukovski_unsteady()", "29 May 2018 ''' import os import copy import warnings import unittest import", "mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() ### check error at colloc Dercoll_num=np.zeros((3,3)) for cc in range(3): zetac_pert=zetac.copy()", "correctly, relative error (%.3e) too high!' %ErRel # allocate numerical Derlist_num=[] for ii", "# for ss in range(haero.data.aero.n_surf): # tsdata.omega.append(haero.data.structure.timestep_info[-1].for_vel[3:6]) MS=multisurfaces.MultiAeroGridSurfaces(tsdata) MS.get_normal_ind_velocities_at_collocation_points() MS.verify_non_penetration() MS.verify_aic_coll() MS.get_joukovski_qs() MS.verify_joukovski_qs()", "import warnings import unittest import itertools import numpy as np import scipy.linalg as", "# the step quickly introduced round-off error. # # assert error decreases with", "due to geometrical variations at vertices Needs to be tested with a case", "larger than 50 times step size' Er_max.append(er_max) ### Wake Surf.gamma=gamma0.copy() gammaw_TE0=Surf_star.gamma[0,:].copy() M_star,N_star=Surf_star.maps.M,Surf_star.maps.N K_star=Surf_star.maps.K", "Er_max_star=[] for ss_out in range(n_surf): for ss_in in range(n_surf): Der_an=Der_list[ss_out][ss_in] Der_num=Der_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat))", "ensure that other tests are not affected. ''' print('------------------------------ Testing assembly.dfqsdzeta_vrel0') MS=self.MS n_surf=MS.n_surf", "Surf.gamma=gamma0.copy() ### Warning: this test fails: the dependency on gamma is linear, hence", "Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() for step in Steps: Der_num=0.0*Der_an for kk in range(3*Kzeta): Surf.zeta=zeta0.copy()", "ss_in in range(n_surf): Der_an=Dervert_list[ss_out][ss_in].copy() if ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] ermax, ErAbs, ErRel=max_error_tensor(Der_an,Der_num) # max", "different grid modifications (three directions per vertex point) for kk in range(3*Kzeta): #", "dv=(Vnum-V0)/step Dervert_list_num[ss_in][:,kk]=dv.reshape(-1,order='C') # restore Surf_in.zeta=Zeta0[ss_in].copy() if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() ### check error at colloc", "Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss) print('FD step: %.2e --->", "Der=assembly.uc_dncdzeta(Surf) ### numerical derivative #Surf.get_normal_input_velocities_at_collocation_points() u_tot0=Surf.u_ind_coll+Surf.u_input_coll u_norm0=Surf.project_coll_to_normal(u_tot0) u_norm0_vec=u_norm0.reshape(-1,order='C') zeta0=Surf.zeta DerNum=np.zeros(Der.shape) Steps=np.array([1e-2,1e-3,1e-4,1e-5,1e-6]) Er_max=0.0*Steps for", "in range(3*Kzeta): Surf.zeta=zeta0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.zeta[ind_3d]+=step Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d", "50 times step size' Er_max.append(er_max) ### Wake Surf.gamma=gamma0.copy() gammaw_TE0=Surf_star.gamma[0,:].copy() M_star,N_star=Surf_star.maps.M,Surf_star.maps.N K_star=Surf_star.maps.K for nn", "OK') def test_dfqsdgamma_vrel0(self): print('----------------------------- Testing assembly.dfqsdgamma_vrel0') MS=self.MS n_surf=MS.n_surf Der_list,Der_star_list=assembly.dfqsdgamma_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Er_max_star=[] Steps=[1e-2,1e-4,1e-6,] for", "into assembly module ''' def setUp(self): # select test case fname = os.path.dirname(os.path.abspath(__file__))", "Surf_in=MS.Surfs_star[ss_in] # perturb for pp in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gammaw0[ss_in].copy() Surf_in.gamma[mm,nn]+=step # recalculate", "run Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): # Select the surface with the analytica", "h5utils.readh5(fname) # tsdata = haero.data.aero.timestep_info[-1] # tsdata.omega = [] # for ss in", "unchanged # - del ind. vel on output to ensure they are re-computed", "grid Surf.get_input_velocities_at_segments() Surf.zeta=zeta0.copy() # Compute new forces Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d", "# plt.close() def test_dfqsdvind_zeta(self): ''' For each output surface, there induced velocity is", "pass # fig = plt.figure('Spy Der',figsize=(10,4)) # ax1 = fig.add_subplot(121) # ax1.spy(Der,precision=step) #", "Zeta0=[] Zeta0_star=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) V0=comp_vind(zetac,MS) # calculate vis FDs", "for ss_in in range(n_surf): Der_an=Dervert_list[ss_out][ss_in].copy() if ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] ermax, ErAbs, ErRel=max_error_tensor(Der_an,Der_num) #", "= Steps[istep] for ss in range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N perturb_vector = np.zeros(3*Surf.maps.Kzeta) #", "for ss_in in range(n_surf): Surf_in=MS.Surfs_star[ss_in] # perturb for pp in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp]", "Testing assembly.dfqsduinput') MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsduinput(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss] #Surf=copy.deepcopy(MS.Surfs[ss])", "# Define steps to run Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): # Select the", "derivative is zero. These elements are filtered out during the search for maximum", "methods into assembly module ''' def setUp(self): # select test case fname =", "# plt.show() def test_dvinddzeta(self): ''' For each output surface, there induced velocity is", "anaytical derivative of the case Der_an_list=assembly.dfqsdzeta_omega(MS.Surfs,MS.Surfs_star) # Initialize Er_max=[] # Define steps to", "larger than 50 times step size' Er_max.append(er_max) def test_dfqsdzeta_omega(self): ''' Note: the get_joukovski_qs", "### state equation terms # T.test_uc_dncdzeta() # T.test_nc_dqcdzeta() ### force equation (unsteady) #", "error: %.2e'%(step,er_max) ) assert er_max<5e1*step, 'Error larger than 50 times step size' Er_max_star.append(er_max)", "in range(3*Surf_in.maps.Kzeta): cc,mm,nn=np.unravel_index( kk, (3,M_in+1,N_in+1) ) # perturb bound Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step # perturb", "output surface, there induced velocity is computed, all other surfaces are looped. For", "sharpy.linear.src.surface as surface import sharpy.linear.src.libuvlm as libuvlm import sharpy.utils.algebra as algebra np.set_printoptions(linewidth=200,precision=3) def", "size is reduced' def test_dfqsdzeta_vrel0(self): ''' Note: the get_joukovski_qs method re-computes the induced", "COMPUTE THE DERIVATIVES Der_an = np.zeros(Surf.maps.K) Der_an = np.dot(Dervert_list[ss], perturb_vector) Der_num = np.zeros(Surf.maps.K)", "vertices' # fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(121) # ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error", "M,N=Surf.maps.M,Surf.maps.N Dnum=np.zeros((3*Kzeta,K)) # get refernce values Surf.get_joukovski_unsteady() Gamma_dot0=Surf.gamma_dot.copy() F0=Surf.funst.copy() for pp in range(K):", "'Error larger than 50 times the step size' if istep > 0: assert", "Surf_in.zetac=ZetaC0[ss_in].copy('F') Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] dvind=(Surf_out.u_ind_coll_norm-Vind0[ss_out])/step Derlist_num[ss_out][ss_in][:,kk]=dvind.reshape(-1,order='C') ###", "for kk in range(3*Kzeta): # Initialize to remove previous movements Surf.zeta=zeta0.copy() # Define", "NaN and inf... iifinite=np.isfinite(Erel) err_max=0.0 for err_here in Erel[iifinite]: if np.abs(err_here)>err_max: err_max=err_here #", "#Surf.get_normal_input_velocities_at_collocation_points() u_tot0=Surf.u_ind_coll+Surf.u_input_coll u_norm0=Surf.project_coll_to_normal(u_tot0) u_norm0_vec=u_norm0.reshape(-1,order='C') zeta0=Surf.zeta DerNum=np.zeros(Der.shape) Steps=np.array([1e-2,1e-3,1e-4,1e-5,1e-6]) Er_max=0.0*Steps for ss in range(len(Steps)): step=Steps[ss]", "for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Vind0.append(MS.Surfs[ss].u_ind_coll_norm.copy()) N0.append(MS.Surfs[ss].normals.copy()) # calculate vis FDs", "assert er_max<5e1*step, 'Error larger than 50 times step size' Er_max[ss]=er_max # assert error", "surfaces # - ensure normals are unchanged # - del ind. vel on", "derivatives Pder_an. The error is: - relative, if the element of Pder_an is", "failed!' # fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(131) # ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error", "IsBound=False,M_in_bound=MS.Surfs[ss_in].maps.M) Dercoll+=dcoll_b+dcoll_w Dervert_list.append(dvert_b+dvert_w) # allocate numerical Dercoll_num=np.zeros((3,3)) Dervert_list_num=[] for ii in range(n_surf): Dervert_list_num.append(0.0*Dervert_list[ii])", "vert for ss_in in range(n_surf): Der_an=Dervert_list[ss_in] Der_num=Dervert_list_num[ss_in] ermax,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs)", "store reference circulation and force Zeta0=[] Zeta0_star=[] Fqs0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy())", "step: %.2e ---> Max error: %.2e'%(step,er_max) ) assert er_max<5e1*step, 'Error larger than 50", "# analytical Dercoll_list,Dervert_list=assembly.dfqsdvind_zeta(MS.Surfs,MS.Surfs_star) # allocate numerical Derlist_num=[] for ii in range(n_surf): sub=[] for", "vertices and collocation Surf.zeta=Zeta0[ss].copy() Surf.zeta[cc,mm,nn] += perturb_vector[kk] # perturb wake TE if mm==M:", "relative error tensors, and the maximum error. @warning: The relative error tensor may", "= np.dot(Dervert_list[ss], perturb_vector) Der_num = np.zeros(Surf.maps.K) ipanel = 0 skew_omega = algebra.skew(Surf.omega) for", "zetac_pert=zetac.copy() zetac_pert[cc]+=step Vnum=comp_vind(zetac_pert,MS) Dercoll_num[:,cc]=(Vnum-V0)/step ercoll=np.max(np.abs(Dercoll-Dercoll_num)) print('Error coll.\\tFDstep\\tErrAbs') print('\\t\\t%.1e\\t%.1e' %(step,ercoll)) #if ercoll>10*step: embed() assert", "sub.append(0.0*Dervert_list[ii][jj]) Derlist_num.append(sub) # store reference circulation and normal induced velocities MS.get_normal_ind_velocities_at_collocation_points() Zeta0=[] Zeta0_star=[]", "# generate non-zero field of external force Surf.u_ext[0,:,:]=Surf.u_ext[0,:,:]-20.0 Surf.u_ext[1,:,:]=Surf.u_ext[1,:,:]+60.0 Surf.u_ext[2,:,:]=Surf.u_ext[2,:,:]+30.0 Surf.u_ext=Surf.u_ext+np.random.rand(*Surf.u_ext.shape) ### analytical", "(3,M+1,N+1) ) # perturb bound. vertices and collocation Surf.zeta=Zeta0[ss].copy() Surf.zeta[cc,mm,nn] += perturb_vector[kk] #", "print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert ermax<50*step, 'Test failed!' Der_an=Der_star_list[ss_out][ss_in] Der_num=Der_star_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Wake%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax))", "DERIVATIVES Der_an = np.zeros(Surf.maps.K) Der_an = np.dot(Dervert_list[ss], perturb_vector) Der_num = np.zeros(Surf.maps.K) ipanel =", "collocation point' ### check error at vert for ss_in in range(n_surf): Der_an=Dervert_list[ss_in] Der_num=Dervert_list_num[ss_in]", "fig.add_subplot(122) # ax2.spy(DerNum,precision=step) # plt.show() def test_nc_domegazetadzeta(self): \"\"\" Variation at colocation points due", "print('------------------------------ Testing assembly.dfqsdvind_gamma') MS=self.MS n_surf=MS.n_surf # analytical Der_list,Der_star_list=assembly.dfqsdvind_gamma(MS.Surfs,MS.Surfs_star) # allocate numerical Der_list_num=[] Der_star_list_num=[]", "perturb wake TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step ### prepare output surfaces # -", "Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] dvind=(Surf_out.u_ind_coll_norm-Vind0[ss_out])/step Derlist_num[ss_out][ss_in][:,kk]=dvind.reshape(-1,order='C') ### check", "# store reference circulation and force Zeta0=[] Zeta0_star=[] Fqs0=[] for ss in range(n_surf):", "zeta0=Surf.zeta DerNum=np.zeros(Der.shape) Steps=np.array([1e-2,1e-3,1e-4,1e-5,1e-6]) Er_max=0.0*Steps for ss in range(len(Steps)): step=Steps[ss] for jj in range(3*Surf.maps.Kzeta):", "# Rotating cases # fname = './basic_rotating_wing/basic_wing.data.h5' # haero = h5utils.readh5(fname) # tsdata", "error for ss_out in range(n_surf): for ss_in in range(n_surf): Der_an=Dervert_list[ss_out][ss_in].copy() if ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out]", "import numpy as np import scipy.linalg as scalg import sharpy.utils.h5utils as h5utils import", "Merge=True not working correctly, relative error (%.3e) too high!' %ErRel # allocate numerical", "print('----------------------------- Testing assembly.test_nc_domegazetadzeta') MS=self.MS n_surf=MS.n_surf # analytical Dervert_list = assembly.nc_domegazetadzeta(MS.Surfs,MS.Surfs_star) # allocate numerical", "gammaw_TE[nn]+=step Surf.get_joukovski_qs(gammaw_TE=gammaw_TE) df=(Surf.fqs-fqs0)/step Der_star_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_star_an-Der_star_num)) print('Surface %.2d - wake:' %ss) print('FD step: %.2e", "# store reference grid Zeta0=[] Zeta0_star=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) V0=comp_vind(zetac,MS)", "# T.setUp() # T.test_dfqsdvind_gamma() # T.test_dfqsduinput() # T.test_dfqsdzeta_vrel0() # T.test_dfqsdgamma_vrel0() # ### state", "# ax1.set_title('error abs %d' %(ss_in)) # ax2=fig.add_subplot(122) # ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error rel %d'", "N=Surf.maps.N K_star=Surf_star.maps.K C=C_list[ss] Cstar=Cstar_list[ss] # add noise to circulations gamma=Surf.gamma+np.random.rand( *Surf.gamma.shape ) gamma_star=Surf_star.gamma+np.random.rand(", "in range(n_surf): Der_an=Dervert_list[ss_out][ss_in].copy() if ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] ermax, ErAbs, ErRel=max_error_tensor(Der_an,Der_num) # max absolute", "ax1.set_title('error abs %d to %d' %(ss_in,ss_out)) # ax2=fig.add_subplot(132) # ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error rel", "istep > 0: assert error[istep]<=error[istep-1],\\ 'Error not decreasing as FD step size is", "for step in Steps: Der_num=0.0*Der_an for kk in range(3*Kzeta): Surf.u_ext=u_ext0.copy() Surf.zeta_dot=zeta_dot0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1)", "a random perturbation between the 90% and the 110% of the step perturb_vector[kk]", "def max_error_tensor(Pder_an,Pder_num): ''' Finds the maximum error analytical derivatives Pder_an. The error is:", "only TE is displaced. ''' print('----------------------------- Testing assembly.test_nc_dqcdzeta') MS=self.MS n_surf=MS.n_surf # analytical Dercoll_list,Dervert_list=assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star)", "step size' Er_max.append(er_max) ### Wake Surf.gamma=gamma0.copy() gammaw_TE0=Surf_star.gamma[0,:].copy() M_star,N_star=Surf_star.maps.M,Surf_star.maps.N K_star=Surf_star.maps.K for nn in range(N):", "assert Er_max_star[ii]<Er_max_star[ii-1],\\ # 'Error not decreasing as FD step size is reduced' def", "Der_list=assembly.dfqsduinput(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss] #Surf=copy.deepcopy(MS.Surfs[ss]) Surf=MS.Surfs[ss] #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K", "M_in,N_in=Surf_in.maps.M,Surf_in.maps.N # perturb for kk in range(3*Surf_in.maps.Kzeta): cc,mm,nn=np.unravel_index( kk, (3,M_in+1,N_in+1) ) # perturb", "gammaw_0 needs to be used here! df=(Surf_out.fqs-fqs0)/step Der_star_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ### check error Er_max=[] Er_max_star=[]", "# generate a random perturbation between the 90% and the 110% of the", "in range(n_surf): sub.append(0.0*Dervert_list[ii][jj]) Derlist_num.append(sub) # store reference circulation and force Zeta0=[] Zeta0_star=[] Fqs0=[]", "haero.data.aero.timestep_info[-1] # tsdata.omega = [] # for ss in range(haero.data.aero.n_surf): # tsdata.omega.append(haero.data.structure.timestep_info[-1].for_vel[3:6]) MS=multisurfaces.MultiAeroGridSurfaces(tsdata)", "MS=self.MS C_list,Cstar_list=assembly.wake_prop(MS.Surfs,MS.Surfs_star) n_surf=len(MS.Surfs) for ss in range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] N=Surf.maps.N K_star=Surf_star.maps.K C=C_list[ss] Cstar=Cstar_list[ss]", "# #plt.show() # plt.close() def test_dfqsdvind_zeta(self): ''' For each output surface, there induced", "= assembly.nc_domegazetadzeta(MS.Surfs,MS.Surfs_star) # allocate numerical # Derlist_num=[] # for ii in range(n_surf): #", "Surf_in.zeta=Zeta0[ss_in].copy() Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:])", "ax3.set_title('Dcoll an. %d to %d' %(ss_out,ss_out)) # #plt.show() # plt.close() def test_uc_dncdzeta(self,PlotFlag=False): print('----------------------------------", "# store reference circulation and normal induced velocities MS.get_normal_ind_velocities_at_collocation_points() Zeta0=[] Zeta0_star=[] Vind0=[] N0=[]", "MS.get_ind_velocities_at_segments(overwrite=True) # restore circulation: (include only induced velocity contrib.) Surf_in.gamma=Gammaw0[ss_in].copy() # estimate derivatives", "THE SURFACE for kk in range(3*Surf.maps.Kzeta): # generate a random perturbation between the", "through the different grid modifications (three directions per vertex point) for kk in", "# sub.append(0.0*Dervert_list[ii][jj]) # Derlist_num.append(sub) # Store the initial values of the variabes Zeta0=[]", "DerNum=np.zeros(Der.shape) Steps=np.array([1e-2,1e-3,1e-4,1e-5,1e-6]) Er_max=0.0*Steps for ss in range(len(Steps)): step=Steps[ss] for jj in range(3*Surf.maps.Kzeta): #", "point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<5e2*step and ermax_rel<50*step, 'Test failed!' #", "decreases with step size # for ii in range(1,len(Steps)): # assert Er_max[ii]<Er_max[ii-1],\\ #", "induced velocity contrib.) Surf_in.gamma=Gammaw0[ss_in].copy() # estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy()", "nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gamma0[ss_in].copy() Surf_in.gamma[mm,nn]+=step # recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore circulation: (include", "%.2e'%(step,er_max) ) assert er_max<5e1*step, 'Error larger than 50 times step size' Er_max[ss]=er_max #", "\"\"\" Variation at colocation points due to geometrical variations at vertices Needs to", "Save the reference values at equilibrium fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() u_input_seg0=Surf.u_input_seg.copy() for step in Steps:", "T=Test_assembly() # T.setUp() # ### force equation (qs term) # T.test_dvinddzeta() # T.test_dfqsdvind_zeta()", "in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) # <--- gammaw_0 needs to be used", "Surf_out=MS.Surfs[ss_out] dvind=(Surf_out.u_ind_coll_norm-Vind0[ss_out])/step Derlist_num[ss_out][ss_in][:,kk]=dvind.reshape(-1,order='C') ### check error for ss_out in range(n_surf): for ss_in in", "sharpy.linear.src.multisurfaces as multisurfaces import sharpy.linear.src.surface as surface import sharpy.linear.src.libuvlm as libuvlm import sharpy.utils.algebra", "Testing assembly.test_nc_dqcdzeta') MS=self.MS n_surf=MS.n_surf # analytical Dercoll_list,Dervert_list=assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star) # check option Der_all_exp=np.block(Dervert_list)+scalg.block_diag(*Dercoll_list) Der_all=np.block( assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star,Merge=True)", "Surf.zeta_dot[ind_3d]+=-0.5*step Surf.get_input_velocities_at_segments() Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss) print('FD step:", "for jj in range(3*Surf.maps.Kzeta): # perturb cc_pert=Surf.maps.ind_3d_vert_vect[0][jj] mm_pert=Surf.maps.ind_3d_vert_vect[1][jj] nn_pert=Surf.maps.ind_3d_vert_vect[2][jj] zeta_pert=zeta0.copy() zeta_pert[cc_pert,mm_pert,nn_pert]+=step # calculate", "print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<5e2*step and ermax_rel<50*step, 'Test failed!' # fig=plt.figure('Spy Er vs", "are not affected. ''' print('------------------------------ Testing assembly.dfqsdzeta_vrel0') MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsdzeta_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,] for", "fig = plt.figure('Spy Der',figsize=(10,4)) # ax1 = fig.add_subplot(121) # ax1.spy(Der,precision=step) # ax2 =", "# Derlist_num=[] # for ii in range(n_surf): # sub=[] # for jj in", "normal velocity Surf_pert=surface.AeroGridSurface(Surf.maps,zeta=zeta_pert, u_ext=Surf.u_ext,gamma=Surf.gamma) u_norm=Surf_pert.project_coll_to_normal(u_tot0) u_norm_vec=u_norm.reshape(-1,order='C') # FD derivative DerNum[:,jj]=(u_norm_vec-u_norm0_vec)/step er_max=np.max(np.abs(Der-DerNum)) print('FD step:", "times step size' Er_max.append(er_max) def test_dfqsduinput(self): ''' Step change in input velocity is", "check option Der_all_exp=np.block(Dervert_list)+scalg.block_diag(*Dercoll_list) Der_all=np.block( assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star,Merge=True) ) _,ErAbs,ErRel=max_error_tensor(Der_all,Der_all_exp) # max absolute error ermax=np.max(ErAbs) #", "print('---------------------------------- Testing assembly.uc_dncdzeta') MS=self.MS n_surf=MS.n_surf MS.get_ind_velocities_at_collocation_points() MS.get_normal_ind_velocities_at_collocation_points() for ss in range(n_surf): print('Surface %.2d:'", "derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) # <--- gammaw_0 needs", "range(n_surf): for ss_in in range(n_surf): Der_an=Der_list[ss_out][ss_in] Der_num=Der_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Bound%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert", "#Surf=copy.deepcopy(MS.Surfs[ss]) Surf=MS.Surfs[ss] #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() u_ext0=Surf.u_ext.copy() zeta_dot0=Surf.zeta_dot.copy() for step in Steps:", "error. # # assert error decreases with step size # for ii in", "# max absolute error ermax=np.max(ErAbs) # relative error at max abs error point", "Zeta0=[] Zeta0_star=[] N0=[] ZetaC0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) N0.append(MS.Surfs[ss].normals.copy()) #", "Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) N0.append(MS.Surfs[ss].normals.copy()) # Computation Steps=[1e-2, 1e-4, 1e-6] nsteps = len(Steps) error", "''' Note: the get_joukovski_qs method re-computes the induced velocity at the panel segments.", "everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore zeta: (include only induced velocity contrib.) Surf_in.zeta=Zeta0[ss_in].copy() Surf_star_in.zeta=Zeta0_star[ss_in].copy() #", "ErAbs, ErRel=max_error_tensor(Ders_an[ss],Dnum) # max absolute error ermax=np.max(ErAbs) # relative error at max abs", "sharpy.utils.algebra as algebra np.set_printoptions(linewidth=200,precision=3) def max_error_tensor(Pder_an,Pder_num): ''' Finds the maximum error analytical derivatives", "in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) V0=comp_vind(zetac,MS) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0] ###", "_,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs) # relative error at max abs error", "assert er_max<5e1*step, 'Error larger than 50 times step size' Er_max.append(er_max) ### Wake Surf.gamma=gamma0.copy()", "in range(3*Kzeta): Surf.u_ext=u_ext0.copy() Surf.zeta_dot=zeta_dot0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.u_ext[ind_3d]+=0.5*step Surf.zeta_dot[ind_3d]+=-0.5*step Surf.get_input_velocities_at_segments() Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C')", "abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound and wake%.2d\\tFDstep\\tErrAbs\\tErrRel'%ss_in) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ercoll<10*step, 'Error", "### check error at colloc Dercoll_num=np.zeros((3,3)) for cc in range(3): zetac_pert=zetac.copy() zetac_pert[cc]+=step Vnum=comp_vind(zetac_pert,MS)", "obtained even with large steps. In fact, reducing # the step quickly introduced", "error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound and wake%.2d\\tFDstep\\tErrAbs\\tErrRel'%ss_in) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ercoll<10*step, 'Error at", "''' Test derivative of unsteady aerodynamic force with respect to changes in panel", "than 50 times step size' Er_max.append(er_max) def test_dfqsdzeta_omega(self): ''' Note: the get_joukovski_qs method", "the previous grid Surf.get_input_velocities_at_segments() Surf.zeta=zeta0.copy() # Compute new forces Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num))", "kk, (3,M_in+1,N_in+1) ) # perturb bound Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step # perturb wake TE if", "the case Der_an_list=assembly.dfqsdzeta_omega(MS.Surfs,MS.Surfs_star) # Initialize Er_max=[] # Define steps to run Steps=[1e-2,1e-4,1e-6,] for", "absolute error ermax=np.max(ErAbs) # relative error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax]", "larger than 50 times step size' Er_max[ss]=er_max # assert error decreases with step", "on gamma is linear, hence # great accuracy is obtained even with large", "error tensor may contain NaN or Inf if the analytical derivative is zero.", "ipanel += 1 # COMPUTE THE ERROR error[istep] = np.maximum(error[istep], np.absolute(Der_num-Der_an).max()) print('FD step:", "%d' %(ss_in,ss_out)) # ax2=fig.add_subplot(132) # ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error rel %d to %d' %(ss_in,ss_out))", "reduced' print('------------------------------------------------------------ OK') def test_dfqsdgamma_vrel0(self): print('----------------------------- Testing assembly.dfqsdgamma_vrel0') MS=self.MS n_surf=MS.n_surf Der_list,Der_star_list=assembly.dfqsdgamma_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Er_max_star=[]", "mm==M: Surf_star.zeta=Zeta0_star[ss].copy() Surf_star.zeta[cc,0,nn] += perturb_vector[kk] Surf.generate_collocations() # COMPUTE THE DERIVATIVES Der_an = np.zeros(Surf.maps.K)", "Compute new forces Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss) print('FD", "FDs Steps=[1e-6,] step=Steps[0] ### loop input surfs for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] Surf_star_in=MS.Surfs_star[ss_in]", "def test_uc_dncdzeta(self,PlotFlag=False): print('---------------------------------- Testing assembly.uc_dncdzeta') MS=self.MS n_surf=MS.n_surf MS.get_ind_velocities_at_collocation_points() MS.get_normal_ind_velocities_at_collocation_points() for ss in range(n_surf):", "n_surf=MS.n_surf # analytical Dercoll_list,Dervert_list=assembly.dfqsdvind_zeta(MS.Surfs,MS.Surfs_star) # allocate numerical Derlist_num=[] for ii in range(n_surf): sub=[]", "DoFs where modifications will take place and modify the grid ind_3d=np.unravel_index(kk, (3,M+1,N+1) )", "test_nc_domegazetadzeta(self): \"\"\" Variation at colocation points due to geometrical variations at vertices Needs", "print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<5e2*step and ermax_rel<50*step, 'Test failed!' def test_wake_prop(self): MS=self.MS C_list,Cstar_list=assembly.wake_prop(MS.Surfs,MS.Surfs_star) n_surf=len(MS.Surfs)", "COMPUTE THE ERROR error[istep] = np.maximum(error[istep], np.absolute(Der_num-Der_an).max()) print('FD step: %.2e ---> Max error:", "n_surf=MS.n_surf zetac=.5*(MS.Surfs[0].zeta[:,1,2]+MS.Surfs[0].zeta[:,1,3]) Dercoll=np.zeros((3,3)) Dervert_list=[] for ss_in in range(n_surf): dcoll_b,dvert_b=assembly.dvinddzeta(zetac,MS.Surfs[ss_in],IsBound=True) dcoll_w,dvert_w=assembly.dvinddzeta(zetac,MS.Surfs_star[ss_in], IsBound=False,M_in_bound=MS.Surfs[ss_in].maps.M) Dercoll+=dcoll_b+dcoll_w Dervert_list.append(dvert_b+dvert_w)", "Surf_star_in=MS.Surfs_star[ss_in] M_in,N_in=Surf_in.maps.M,Surf_in.maps.N # perturb for kk in range(3*Surf_in.maps.Kzeta): cc,mm,nn=np.unravel_index( kk, (3,M_in+1,N_in+1) ) #", "Kzeta=Surf.maps.Kzeta # Save the reference values at equilibrium fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() u_input_seg0=Surf.u_input_seg.copy() for step", "derivatives Der_an=Der_an_list[ss] # Copy to avoid modifying the original for other tests Surf=copy.deepcopy(MS.Surfs[ss])", "velocity Surf_pert=surface.AeroGridSurface(Surf.maps,zeta=zeta_pert, u_ext=Surf.u_ext,gamma=Surf.gamma) u_norm=Surf_pert.project_coll_to_normal(u_tot0) u_norm_vec=u_norm.reshape(-1,order='C') # FD derivative DerNum[:,jj]=(u_norm_vec-u_norm0_vec)/step er_max=np.max(np.abs(Der-DerNum)) print('FD step: %.2e", "perturb bound. vertices and collocation Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step Surf_in.generate_collocations() # perturb wake TE if", "error at vert for ss_in in range(n_surf): Der_an=Dervert_list[ss_in] Der_num=Dervert_list_num[ss_in] ermax,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max absolute", "error: %.2e'%(step,er_max) ) assert er_max<5e1*step, 'Error larger than 50 times step size' Er_max.append(er_max)", "def test_dfqsdzeta_omega(self): ''' Note: the get_joukovski_qs method re-computes the induced velocity at the", "tests are not affected. Needs to be tested with a case that actually", "movements Surf.zeta=zeta0.copy() # Define DoFs where modifications will take place and modify the", "np.abs(der_here)>err_max: err_max=der_here return err_max, Eabs, Erel class Test_assembly(unittest.TestCase): ''' Test methods into assembly", "np.zeros(Surf.maps.K) Der_an = np.dot(Dervert_list[ss], perturb_vector) Der_num = np.zeros(Surf.maps.K) ipanel = 0 skew_omega =", "store reference circulation and force Gamma0=[] Gammaw0=[] Fqs0=[] for ss in range(n_surf): Gamma0.append(MS.Surfs[ss].gamma.copy())", "noise to circulations gamma=Surf.gamma+np.random.rand( *Surf.gamma.shape ) gamma_star=Surf_star.gamma+np.random.rand( *Surf_star.gamma.shape ) gvec=np.dot(C,gamma.reshape(-1))+np.dot(Cstar,gamma_star.reshape(-1)) gvec_ref=np.concatenate((gamma[-1,:],gamma_star[:-1,:].reshape(-1))) assert np.max(np.abs(gvec-gvec_ref))<1e-15,\\", "if np.abs(der_here)>err_max: err_max=der_here return err_max, Eabs, Erel class Test_assembly(unittest.TestCase): ''' Test methods into", "'Prop. from trailing edge not correct' if __name__=='__main__': unittest.main() # T=Test_assembly() # T.setUp()", "to changes in panel circulation. Warning: test assumes the derivative of the unsteady", "ensure that other tests are not affected. Needs to be tested with a", "contain NaN or Inf if the analytical derivative is zero. These elements are", "ercoll>10*step: embed() assert ercoll<10*step, 'Error at collocation point' ### check error at vert", "<filename>tests/linear/assembly/test_assembly.py ''' Test assembly <NAME>, 29 May 2018 ''' import os import copy", "there induced velocity is computed, all other surfaces are looped. For wakes, only", "AttributeError: pass ### recalculate MS.get_normal_ind_velocities_at_collocation_points() # restore Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zetac=ZetaC0[ss_in].copy('F') Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate derivatives", "'option Merge=True not working correctly, relative error (%.3e) too high!' %ErRel # allocate", "Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Vind0.append(MS.Surfs[ss].u_ind_coll_norm.copy()) N0.append(MS.Surfs[ss].normals.copy()) # calculate vis FDs Steps=[1e-6,] step=Steps[0] ### loop input surfs", ") assert er_max<5e1*step, 'Error larger than 50 times step size' Er_max_star.append(er_max) Surf.gamma=gamma0.copy() ###", "for step in Steps: Der_num=0.0*Der_an Der_star_num=0.0*Der_star_an ### Bound for pp in range(K): mm=Surf.maps.ind_2d_pan_scal[0][pp]", "as scalg import sharpy.utils.h5utils as h5utils import sharpy.linear.src.assembly as assembly import sharpy.linear.src.multisurfaces as", "return err_max, Eabs, Erel class Test_assembly(unittest.TestCase): ''' Test methods into assembly module '''", "pp in range(K): mm,nn=np.unravel_index( pp, (M,N) ) Surf.gamma_dot=Gamma_dot0.copy() Surf.gamma_dot[mm,nn]+=step Surf.get_joukovski_unsteady() dF=(Surf.funst-F0)/step Dnum[:,pp]=dF.reshape(-1) #", "in range(n_surf): dcoll_b,dvert_b=assembly.dvinddzeta(zetac,MS.Surfs[ss_in],IsBound=True) dcoll_w,dvert_w=assembly.dvinddzeta(zetac,MS.Surfs_star[ss_in], IsBound=False,M_in_bound=MS.Surfs[ss_in].maps.M) Dercoll+=dcoll_b+dcoll_w Dervert_list.append(dvert_b+dvert_w) # allocate numerical Dercoll_num=np.zeros((3,3)) Dervert_list_num=[] for", "out during the search for maximum error, and absolute error is checked. '''", "Ders_num=[] n_surf=len(MS.Surfs) for ss in range(n_surf): Surf=MS.Surfs[ss] Kzeta,K=Surf.maps.Kzeta,Surf.maps.K M,N=Surf.maps.M,Surf.maps.N Dnum=np.zeros((3*Kzeta,K)) # get refernce", "Test assembly <NAME>, 29 May 2018 ''' import os import copy import warnings", "%.2e'%(step,error[istep]) ) assert error[istep]<5e1*step, 'Error larger than 50 times the step size' if", "to avoid modifying the original for other tests Surf=copy.deepcopy(MS.Surfs[ss]) # Define variables M,N=Surf.maps.M,Surf.maps.N", "def setUp(self): # select test case fname = os.path.dirname(os.path.abspath(__file__)) + '/h5input/goland_mod_Nsurf01_M003_N004_a040.aero_state.h5' haero =", "in range(3*Surf.maps.Kzeta): # generate a random perturbation between the 90% and the 110%", "nn_pert=Surf.maps.ind_3d_vert_vect[2][jj] zeta_pert=zeta0.copy() zeta_pert[cc_pert,mm_pert,nn_pert]+=step # calculate new normal velocity Surf_pert=surface.AeroGridSurface(Surf.maps,zeta=zeta_pert, u_ext=Surf.u_ext,gamma=Surf.gamma) u_norm=Surf_pert.project_coll_to_normal(u_tot0) u_norm_vec=u_norm.reshape(-1,order='C') #", "in range(N): Der_num[ipanel] = (np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, ZetaC0[ss][:,mm,nn])) - np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, Surf.zetac[:,mm,nn]))) ipanel +=", "'Error at collocation point' ### check error at vert for ss_in in range(n_surf):", "(three directions per vertex point) for kk in range(3*Kzeta): # Initialize to remove", "step size' if istep > 0: assert error[istep]<=error[istep-1],\\ 'Error not decreasing as FD", "error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d\\t\\t\\tFDstep\\tErrAbs\\tErrRel'%(ss,)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<5e2*step and ermax_rel<50*step, 'Test failed!'", "Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] ermax, ErAbs, ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs) # relative error", "# #plt.show() # plt.close() def test_dfunstdgamma_dot(self): ''' Test derivative of unsteady aerodynamic force", "Surf.get_input_velocities_at_segments() Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss) print('FD step: %.2e", "max_error_tensor(Pder_an,Pder_num): ''' Finds the maximum error analytical derivatives Pder_an. The error is: -", "Compute the anaytical derivative of the case Der_an_list=assembly.dfqsdzeta_omega(MS.Surfs,MS.Surfs_star) # Initialize Er_max=[] # Define", "inf... iifinite=np.isfinite(Erel) err_max=0.0 for err_here in Erel[iifinite]: if np.abs(err_here)>err_max: err_max=err_here # Zero elements", "for ss_in in range(n_surf): dcoll_b,dvert_b=assembly.dvinddzeta(zetac,MS.Surfs[ss_in],IsBound=True) dcoll_w,dvert_w=assembly.dvinddzeta(zetac,MS.Surfs_star[ss_in], IsBound=False,M_in_bound=MS.Surfs[ss_in].maps.M) Dercoll+=dcoll_b+dcoll_w Dervert_list.append(dvert_b+dvert_w) # allocate numerical Dercoll_num=np.zeros((3,3))", "numerical # Derlist_num=[] # for ii in range(n_surf): # sub=[] # for jj", "estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] dvind=(Surf_out.u_ind_coll_norm-Vind0[ss_out])/step Derlist_num[ss_out][ss_in][:,kk]=dvind.reshape(-1,order='C') ### check error for", "Er vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(131) # ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error abs %d to", "Der_star_list_num.append(sub_star) # store reference circulation and force Gamma0=[] Gammaw0=[] Fqs0=[] for ss in", "ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0]", "TE if mm==M: Surf_star.zeta=Zeta0_star[ss].copy() Surf_star.zeta[cc,0,nn] += perturb_vector[kk] Surf.generate_collocations() # COMPUTE THE DERIVATIVES Der_an", "point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] assert ermax_rel<1e-16,\\ 'option Merge=True not working correctly, relative error (%.3e)", "range(n_surf): Surf_out=MS.Surfs[ss_out] dvind=(Surf_out.u_ind_coll_norm-Vind0[ss_out])/step Derlist_num[ss_out][ss_in][:,kk]=dvind.reshape(-1,order='C') ### check error for ss_out in range(n_surf): for ss_in", "error (%.3e) too high!' %ErRel # allocate numerical Derlist_num=[] for ii in range(n_surf):", "case Der_an_list=assembly.dfqsdzeta_omega(MS.Surfs,MS.Surfs_star) # Initialize Er_max=[] # Define steps to run Steps=[1e-2,1e-4,1e-6,] for ss", "is displaced. ''' print('----------------------------- Testing assembly.test_nc_dqcdzeta') MS=self.MS n_surf=MS.n_surf # analytical Dercoll_list,Dervert_list=assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star) # check", "tsdata.omega.append(haero.data.structure.timestep_info[-1].for_vel[3:6]) MS=multisurfaces.MultiAeroGridSurfaces(tsdata) MS.get_normal_ind_velocities_at_collocation_points() MS.verify_non_penetration() MS.verify_aic_coll() MS.get_joukovski_qs() MS.verify_joukovski_qs() self.MS=MS def test_nc_dqcdzeta(self): ''' For each", "Surf=copy.deepcopy(MS.Surfs[ss]) # Define variables M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta # Save the reference values at", "assert er_max<5e1*step, 'Error larger than 50 times step size' Er_max.append(er_max) def test_dfqsdvind_gamma(self): print('------------------------------", "- np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, Surf.zetac[:,mm,nn]))) ipanel += 1 # COMPUTE THE ERROR error[istep] =", "and zeta_dot ''' print('---------------------------------- Testing assembly.dfqsduinput') MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsduinput(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,] for ss", "print('Bound%.2d\\t\\t\\tFDstep\\tErrAbs\\tErrRel'%(ss,)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<5e2*step and ermax_rel<50*step, 'Test failed!' def test_wake_prop(self): MS=self.MS C_list,Cstar_list=assembly.wake_prop(MS.Surfs,MS.Surfs_star)", "FD step size is reduced' def test_dfqsdzeta_vrel0(self): ''' Note: the get_joukovski_qs method re-computes", "step*(0.2*np.random.rand()+0.9) cc,mm,nn=np.unravel_index( kk, (3,M+1,N+1) ) # perturb bound. vertices and collocation Surf.zeta=Zeta0[ss].copy() Surf.zeta[cc,mm,nn]", "velocity V=np.zeros((3,)) for ss in range(n_surf): Surf_in=MS.Surfs[ss] Surf_star_in=MS.Surfs_star[ss] V+=Surf_in.get_induced_velocity(zetac) V+=Surf_star_in.get_induced_velocity(zetac) return V print('-----------------------------------", "ss_out in range(n_surf): for ss_in in range(n_surf): Der_an=Dervert_list[ss_out][ss_in].copy() if ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] _,ErAbs,ErRel=max_error_tensor(Der_an,Der_num)", "''' import os import copy import warnings import unittest import itertools import numpy", "###### wake for ss_in in range(n_surf): Surf_in=MS.Surfs_star[ss_in] # perturb for pp in range(Surf_in.maps.K):", "in Pder_num[iizero]: if np.abs(der_here)>err_max: err_max=der_here return err_max, Eabs, Erel class Test_assembly(unittest.TestCase): ''' Test", "# perturb wake TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step # recalculate induced velocity everywhere", "range(n_surf): print('Surface %.2d:' %ss) Surf=MS.Surfs[ss] # generate non-zero field of external force Surf.u_ext[0,:,:]=Surf.u_ext[0,:,:]-20.0", "for pp in range(K): mm=Surf.maps.ind_2d_pan_scal[0][pp] nn=Surf.maps.ind_2d_pan_scal[1][pp] Surf.gamma=gamma0.copy() Surf.gamma[mm,nn]+=step Surf.get_joukovski_qs(gammaw_TE=Surf_star.gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface", "and the maximum error. @warning: The relative error tensor may contain NaN or", "Der',figsize=(10,4)) # ax1 = fig.add_subplot(111) # ax1.spy(ErMat,precision=50*step) # plt.show() def test_dvinddzeta(self): ''' For", "fig.add_subplot(111) # ax1.spy(ErMat,precision=50*step) # plt.show() def test_dvinddzeta(self): ''' For each output surface, there", "other surfaces are looped. For wakes, only TE is displaced. ''' def comp_vind(zetac,MS):", "Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] _,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs) # relative error at max", "the absolute and relative error tensors, and the maximum error. @warning: The relative", "to geometrical variations at vertices Needs to be tested with a case that", "itertools import numpy as np import scipy.linalg as scalg import sharpy.utils.h5utils as h5utils", "%(ss_in,ss_out)) # ax3=fig.add_subplot(133) # ax3.spy(Dercoll_list[ss_out],precision=50*step) # ax3.set_title('Dcoll an. %d to %d' %(ss_out,ss_out)) #", "sub.append(0.0*Der_list[ii][jj]) sub_star.append(0.0*Der_star_list[ii][jj]) Der_list_num.append(sub) Der_star_list_num.append(sub_star) # store reference circulation and force Gamma0=[] Gammaw0=[] Fqs0=[]", "ss in range(n_surf): Gamma0.append(MS.Surfs[ss].gamma.copy()) Gammaw0.append(MS.Surfs_star[ss].gamma.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-5,] step=Steps[0]", "'Error not decreasing as FD step size is reduced' print('------------------------------------------------------------ OK') if PlotFlag:", "panel segments. A copy of Surf is required to ensure that other tests", "fig = plt.figure('Spy Der',figsize=(10,4)) # ax1 = fig.add_subplot(111) # ax1.spy(ErMat,precision=50*step) # plt.show() def", "step = Steps[istep] for ss in range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N perturb_vector = np.zeros(3*Surf.maps.Kzeta)", "at colocation points due to geometrical variations at vertices Needs to be tested", "larger than 50 times step size' Er_max.append(er_max) def test_dfqsduinput(self): ''' Step change in", "'Error not decreasing as FD step size is reduced' def test_dfqsdzeta_vrel0(self): ''' Note:", ") assert er_max<5e1*step, 'Error larger than 50 times step size' Er_max.append(er_max) def test_dfqsdzeta_omega(self):", "# recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore circulation: (include only induced velocity", "induced velocity everywhere Vnum=comp_vind(zetac,MS) dv=(Vnum-V0)/step Dervert_list_num[ss_in][:,kk]=dv.reshape(-1,order='C') # restore Surf_in.zeta=Zeta0[ss_in].copy() if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() ###", "assembly.dfqsdvind_zeta') MS=self.MS n_surf=MS.n_surf # analytical Dercoll_list,Dervert_list=assembly.dfqsdvind_zeta(MS.Surfs,MS.Surfs_star) # allocate numerical Derlist_num=[] for ii in", "Surf_out.normals=N0[ss_out].copy() del Surf_out.u_ind_coll_norm try: del Surf_out.u_ind_coll except AttributeError: pass ### recalculate MS.get_normal_ind_velocities_at_collocation_points() #", "nn in range(N): Der_num[ipanel] = (np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, ZetaC0[ss][:,mm,nn])) - np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, Surf.zetac[:,mm,nn]))) ipanel", "T.test_dfqsduinput() # T.test_dfqsdzeta_vrel0() # T.test_dfqsdgamma_vrel0() # ### state equation terms # T.test_uc_dncdzeta() #", "50 times step size' Er_max.append(er_max) def test_dfqsdzeta_omega(self): ''' Note: the get_joukovski_qs method re-computes", "'/h5input/goland_mod_Nsurf01_M003_N004_a040.aero_state.h5' haero = h5utils.readh5(fname) tsdata = haero.ts00000 # # Rotating cases # fname", "collocation Surf.zeta=Zeta0[ss].copy() Surf.zeta[cc,mm,nn] += perturb_vector[kk] # perturb wake TE if mm==M: Surf_star.zeta=Zeta0_star[ss].copy() Surf_star.zeta[cc,0,nn]", "MS=self.MS n_surf=MS.n_surf # analytical Dercoll_list,Dervert_list=assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star) # check option Der_all_exp=np.block(Dervert_list)+scalg.block_diag(*Dercoll_list) Der_all=np.block( assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star,Merge=True) ) _,ErAbs,ErRel=max_error_tensor(Der_all,Der_all_exp)", "check error for ss_out in range(n_surf): for ss_in in range(n_surf): Der_an=Dervert_list[ss_out][ss_in].copy() if ss_in==ss_out:", "vertices for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] Surf_star_in=MS.Surfs_star[ss_in] M_in,N_in=Surf_in.maps.M,Surf_in.maps.N # perturb for kk in", "forces Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss) print('FD step: %.2e", "range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) V0=comp_vind(zetac,MS) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0] ### vertices", "is linear, hence # great accuracy is obtained even with large steps. In", "in Steps: # Initialize Der_num = 0.0*Der_an # Loop through the different grid", "as surface import sharpy.linear.src.libuvlm as libuvlm import sharpy.utils.algebra as algebra np.set_printoptions(linewidth=200,precision=3) def max_error_tensor(Pder_an,Pder_num):", "import scipy.linalg as scalg import sharpy.utils.h5utils as h5utils import sharpy.linear.src.assembly as assembly import", "For wakes, only TE is displaced. ''' def comp_vind(zetac,MS): # comute induced velocity", "### loop input surfs for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] Surf_star_in=MS.Surfs_star[ss_in] M_in,N_in=Surf_in.maps.M,Surf_in.maps.N # perturb", "Derlist_num=[] for ii in range(n_surf): sub=[] for jj in range(n_surf): sub.append(0.0*Dervert_list[ii][jj]) Derlist_num.append(sub) #", "os.path.dirname(os.path.abspath(__file__)) + '/h5input/goland_mod_Nsurf01_M003_N004_a040.aero_state.h5' haero = h5utils.readh5(fname) tsdata = haero.ts00000 # # Rotating cases", "ax3.set_title('Dcoll an. %d to %d' %(ss_out,ss_out)) # #plt.show() # plt.close() def test_dfunstdgamma_dot(self): '''", "%d to %d' %(ss_in,ss_out)) # ax2=fig.add_subplot(132) # ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error rel %d to", "ax1.spy(ErMat,precision=50*step) # plt.show() def test_dvinddzeta(self): ''' For each output surface, there induced velocity", "check iizero=np.abs(Pder_an)<1e-15 for der_here in Pder_num[iizero]: if np.abs(der_here)>err_max: err_max=der_here return err_max, Eabs, Erel", "(M,N) ) Surf.gamma_dot=Gamma_dot0.copy() Surf.gamma_dot[mm,nn]+=step Surf.get_joukovski_unsteady() dF=(Surf.funst-F0)/step Dnum[:,pp]=dF.reshape(-1) # restore Surf.gamma_dot=Gamma_dot0.copy() ### verify ermax,", "place and modify the grid ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.zeta[ind_3d]+=step # Recompute get_ind_velocities_at_segments and", "for kk in range(3*Surf_in.maps.Kzeta): cc,mm,nn=np.unravel_index( kk, (3,M_in+1,N_in+1) ) # perturb bound Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step", "relative error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d\\t\\t\\tFDstep\\tErrAbs\\tErrRel'%(ss,)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert", "Der_an=Der_list[ss_out][ss_in] Der_num=Der_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Bound%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert ermax<50*step, 'Test failed!' Der_an=Der_star_list[ss_out][ss_in] Der_num=Der_star_list_num[ss_out][ss_in]", "for mm in range(M): for nn in range(N): Der_num[ipanel] = (np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, ZetaC0[ss][:,mm,nn]))", "reference grid Zeta0=[] Zeta0_star=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) V0=comp_vind(zetac,MS) # calculate", "assembly.dfqsdzeta_omega') # rename MS=self.MS n_surf=MS.n_surf # Compute the anaytical derivative of the case", "contrib.) Surf_in.gamma=Gammaw0[ss_in].copy() # estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:])", "K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() u_ext0=Surf.u_ext.copy() zeta_dot0=Surf.zeta_dot.copy() for step in Steps: Der_num=0.0*Der_an for kk in", "Zeta0=[] Zeta0_star=[] Vind0=[] N0=[] ZetaC0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Vind0.append(MS.Surfs[ss].u_ind_coll_norm.copy())", "ii in range(n_surf): # sub=[] # for jj in range(n_surf): # sub.append(0.0*Dervert_list[ii][jj]) #", "error = np.zeros((nsteps,)) for istep in range(nsteps): step = Steps[istep] for ss in", "N0=[] ZetaC0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Vind0.append(MS.Surfs[ss].u_ind_coll_norm.copy()) N0.append(MS.Surfs[ss].normals.copy()) # calculate", "not decreasing as FD step size is reduced' print('------------------------------------------------------------ OK') def test_dfqsdgamma_vrel0(self): print('-----------------------------", "# Select the surface with the analytica derivatives Der_an=Der_an_list[ss] # Copy to avoid", "everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore circulation: (include only induced velocity contrib.) Surf_in.gamma=Gammaw0[ss_in].copy() # estimate", "the maximum error. @warning: The relative error tensor may contain NaN or Inf", "and recover the previous grid Surf.get_input_velocities_at_segments() Surf.zeta=zeta0.copy() # Compute new forces Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step", "recalculate induced velocity everywhere Vnum=comp_vind(zetac,MS) dv=(Vnum-V0)/step Dervert_list_num[ss_in][:,kk]=dv.reshape(-1,order='C') # restore Surf_in.zeta=Zeta0[ss_in].copy() if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy()", "as FD step size is reduced' print('------------------------------------------------------------ OK') if PlotFlag: pass # fig", "Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N perturb_vector = np.zeros(3*Surf.maps.Kzeta) # PERTURBATION OF THE SURFACE for kk in", "haero.ts00000 # # Rotating cases # fname = './basic_rotating_wing/basic_wing.data.h5' # haero = h5utils.readh5(fname)", "output surfaces # - ensure normals are unchanged # - del ind. vel", "Der_all=np.block( assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star,Merge=True) ) _,ErAbs,ErRel=max_error_tensor(Der_all,Der_all_exp) # max absolute error ermax=np.max(ErAbs) # relative error at", "dvind=(Surf_out.u_ind_coll_norm-Vind0[ss_out])/step Derlist_num[ss_out][ss_in][:,kk]=dvind.reshape(-1,order='C') ### check error for ss_out in range(n_surf): for ss_in in range(n_surf):", "in range(3*Surf.maps.Kzeta): # perturb cc_pert=Surf.maps.ind_3d_vert_vect[0][jj] mm_pert=Surf.maps.ind_3d_vert_vect[1][jj] nn_pert=Surf.maps.ind_3d_vert_vect[2][jj] zeta_pert=zeta0.copy() zeta_pert[cc_pert,mm_pert,nn_pert]+=step # calculate new normal", "contrib.) Surf_in.gamma=Gamma0[ss_in].copy() # estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:])", "in range(n_surf): Surf_in=MS.Surfs[ss_in] # perturb for pp in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gamma0[ss_in].copy() Surf_in.gamma[mm,nn]+=step", "- ensure normals are unchanged # - del ind. vel on output to", "everywhere Vnum=comp_vind(zetac,MS) dv=(Vnum-V0)/step Dervert_list_num[ss_in][:,kk]=dv.reshape(-1,order='C') # restore Surf_in.zeta=Zeta0[ss_in].copy() if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() ### check error", "u_ext0=Surf.u_ext.copy() zeta_dot0=Surf.zeta_dot.copy() for step in Steps: Der_num=0.0*Der_an for kk in range(3*Kzeta): Surf.u_ext=u_ext0.copy() Surf.zeta_dot=zeta_dot0.copy()", "perturb_vector[kk] # perturb wake TE if mm==M: Surf_star.zeta=Zeta0_star[ss].copy() Surf_star.zeta[cc,0,nn] += perturb_vector[kk] Surf.generate_collocations() #", "ind velocities computed already Surf.get_input_velocities_at_collocation_points() Der=assembly.uc_dncdzeta(Surf) ### numerical derivative #Surf.get_normal_input_velocities_at_collocation_points() u_tot0=Surf.u_ind_coll+Surf.u_input_coll u_norm0=Surf.project_coll_to_normal(u_tot0) u_norm0_vec=u_norm0.reshape(-1,order='C')", "# perturb bound. vertices and collocation Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step Surf_in.generate_collocations() # perturb wake TE", "or Inf if the analytical derivative is zero. These elements are filtered out", "Surf=MS.Surfs[ss] # generate non-zero field of external force Surf.u_ext[0,:,:]=Surf.u_ext[0,:,:]-20.0 Surf.u_ext[1,:,:]=Surf.u_ext[1,:,:]+60.0 Surf.u_ext[2,:,:]=Surf.u_ext[2,:,:]+30.0 Surf.u_ext=Surf.u_ext+np.random.rand(*Surf.u_ext.shape) ###", "estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Derlist_num[ss_out][ss_in][:,kk]=df.reshape(-1,order='C') ###", "range(3*Surf.maps.Kzeta): # perturb cc_pert=Surf.maps.ind_3d_vert_vect[0][jj] mm_pert=Surf.maps.ind_3d_vert_vect[1][jj] nn_pert=Surf.maps.ind_3d_vert_vect[2][jj] zeta_pert=zeta0.copy() zeta_pert[cc_pert,mm_pert,nn_pert]+=step # calculate new normal velocity", "in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Der_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ###### wake for ss_in in", "Surf.u_ext[0,:,:]=Surf.u_ext[0,:,:]-20.0 Surf.u_ext[1,:,:]=Surf.u_ext[1,:,:]+60.0 Surf.u_ext[2,:,:]=Surf.u_ext[2,:,:]+30.0 Surf.u_ext=Surf.u_ext+np.random.rand(*Surf.u_ext.shape) ### analytical derivative # ind velocities computed already Surf.get_input_velocities_at_collocation_points()", "Erel class Test_assembly(unittest.TestCase): ''' Test methods into assembly module ''' def setUp(self): #", "the 110% of the step perturb_vector[kk] += step*(0.2*np.random.rand()+0.9) cc,mm,nn=np.unravel_index( kk, (3,M+1,N+1) ) #", "# add noise to circulations gamma=Surf.gamma+np.random.rand( *Surf.gamma.shape ) gamma_star=Surf_star.gamma+np.random.rand( *Surf_star.gamma.shape ) gvec=np.dot(C,gamma.reshape(-1))+np.dot(Cstar,gamma_star.reshape(-1)) gvec_ref=np.concatenate((gamma[-1,:],gamma_star[:-1,:].reshape(-1)))", "%d' %(ss_in,ss_out)) # ax3=fig.add_subplot(133) # ax3.spy(Dercoll_list[ss_out],precision=50*step) # ax3.set_title('Dcoll an. %d to %d' %(ss_out,ss_out))", "the variabes Zeta0=[] Zeta0_star=[] N0=[] ZetaC0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy())", "range(3*Surf_in.maps.Kzeta): cc,mm,nn=np.unravel_index( kk, (3,M_in+1,N_in+1) ) # perturb bound. vertices and collocation Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step", "is checked. ''' Eabs=np.abs(Pder_num-Pder_an) nnzvec=Pder_an!=0 Erel=np.zeros(Pder_an.shape) Erel[nnzvec]=np.abs(Eabs[nnzvec]/Pder_an[nnzvec]) # Relative error check: remove NaN", "high!' %ErRel # allocate numerical Derlist_num=[] for ii in range(n_surf): sub=[] for jj", "step size for ss in range(1,len(Steps)): assert Er_max[ss]<Er_max[ss-1],\\ 'Error not decreasing as FD", "algebra.skew(Surf.omega) for mm in range(M): for nn in range(N): Der_num[ipanel] = (np.dot(N0[ss][:,mm,nn], np.dot(skew_omega,", "points ''' MS=self.MS Ders_an=assembly.dfunstdgamma_dot(MS.Surfs) step=1e-6 Ders_num=[] n_surf=len(MS.Surfs) for ss in range(n_surf): Surf=MS.Surfs[ss] Kzeta,K=Surf.maps.Kzeta,Surf.maps.K", "is true only for steady-state linearisation points ''' MS=self.MS Ders_an=assembly.dfunstdgamma_dot(MS.Surfs) step=1e-6 Ders_num=[] n_surf=len(MS.Surfs)", "warnings import unittest import itertools import numpy as np import scipy.linalg as scalg", ") _,ErAbs,ErRel=max_error_tensor(Der_all,Der_all_exp) # max absolute error ermax=np.max(ErAbs) # relative error at max abs", "for ss in range(n_surf): Gamma0.append(MS.Surfs[ss].gamma.copy()) Gammaw0.append(MS.Surfs_star[ss].gamma.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-5,]", "filtered out during the search for maximum error, and absolute error is checked.", "step=Steps[0] ###### bound for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] # perturb for pp in", "sharpy.linear.src.libuvlm as libuvlm import sharpy.utils.algebra as algebra np.set_printoptions(linewidth=200,precision=3) def max_error_tensor(Pder_an,Pder_num): ''' Finds the", "mm=Surf.maps.ind_2d_pan_scal[0][pp] nn=Surf.maps.ind_2d_pan_scal[1][pp] Surf.gamma=gamma0.copy() Surf.gamma[mm,nn]+=step Surf.get_joukovski_qs(gammaw_TE=Surf_star.gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss)", "T.setUp() # ### force equation (qs term) # T.test_dvinddzeta() # T.test_dfqsdvind_zeta() # run", "def test_wake_prop(self): MS=self.MS C_list,Cstar_list=assembly.wake_prop(MS.Surfs,MS.Surfs_star) n_surf=len(MS.Surfs) for ss in range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] N=Surf.maps.N K_star=Surf_star.maps.K", "110% of the step perturb_vector[kk] += step*(0.2*np.random.rand()+0.9) cc,mm,nn=np.unravel_index( kk, (3,M+1,N+1) ) # perturb", "assembly import sharpy.linear.src.multisurfaces as multisurfaces import sharpy.linear.src.surface as surface import sharpy.linear.src.libuvlm as libuvlm", "iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] assert ermax_rel<1e-16,\\ 'option Merge=True not working correctly, relative error (%.3e) too", "is displaced. ''' def comp_vind(zetac,MS): # comute induced velocity V=np.zeros((3,)) for ss in", "perturb wake TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step # recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True)", "circulation and force Zeta0=[] Zeta0_star=[] Fqs0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy())", "velocity contrib.) Surf_in.gamma=Gammaw0[ss_in].copy() # estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs(", "gammaw_TE=gammaw_TE0.copy() gammaw_TE[nn]+=step Surf.get_joukovski_qs(gammaw_TE=gammaw_TE) df=(Surf.fqs-fqs0)/step Der_star_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_star_an-Der_star_num)) print('Surface %.2d - wake:' %ss) print('FD step:", "K_star=Surf_star.maps.K C=C_list[ss] Cstar=Cstar_list[ss] # add noise to circulations gamma=Surf.gamma+np.random.rand( *Surf.gamma.shape ) gamma_star=Surf_star.gamma+np.random.rand( *Surf_star.gamma.shape", "# perturb for pp in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gammaw0[ss_in].copy() Surf_in.gamma[mm,nn]+=step # recalculate induced", "range(n_surf): Der_an=Der_list[ss] Surf=copy.deepcopy(MS.Surfs[ss]) #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() for step in Steps:", "kk in range(3*Kzeta): Surf.zeta=zeta0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.zeta[ind_3d]+=step Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface", "not affected. Needs to be tested with a case that actually rotates '''", "in range(3*Surf_in.maps.Kzeta): cc,mm,nn=np.unravel_index( kk, (3,M_in+1,N_in+1) ) # perturb bound. vertices and collocation Surf_in.zeta=Zeta0[ss_in].copy()", "May 2018 ''' import os import copy import warnings import unittest import itertools", "cc_pert=Surf.maps.ind_3d_vert_vect[0][jj] mm_pert=Surf.maps.ind_3d_vert_vect[1][jj] nn_pert=Surf.maps.ind_3d_vert_vect[2][jj] zeta_pert=zeta0.copy() zeta_pert[cc_pert,mm_pert,nn_pert]+=step # calculate new normal velocity Surf_pert=surface.AeroGridSurface(Surf.maps,zeta=zeta_pert, u_ext=Surf.u_ext,gamma=Surf.gamma) u_norm=Surf_pert.project_coll_to_normal(u_tot0)", "displaced. ''' print('----------------------------- Testing assembly.test_nc_dqcdzeta') MS=self.MS n_surf=MS.n_surf # analytical Dercoll_list,Dervert_list=assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star) # check option", "FD step size is reduced' print('------------------------------------------------------------ OK') if PlotFlag: pass # fig =", "check error at colloc Dercoll_num=np.zeros((3,3)) for cc in range(3): zetac_pert=zetac.copy() zetac_pert[cc]+=step Vnum=comp_vind(zetac_pert,MS) Dercoll_num[:,cc]=(Vnum-V0)/step", "are filtered out during the search for maximum error, and absolute error is", "(3,M+1,N+1) ) Surf.u_ext[ind_3d]+=0.5*step Surf.zeta_dot[ind_3d]+=-0.5*step Surf.get_input_velocities_at_segments() Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:'", "for ii in range(n_surf): sub=[] sub_star=[] for jj in range(n_surf): sub.append(0.0*Der_list[ii][jj]) sub_star.append(0.0*Der_star_list[ii][jj]) Der_list_num.append(sub)", "step in Steps: # Initialize Der_num = 0.0*Der_an # Loop through the different", "values at equilibrium fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() u_input_seg0=Surf.u_input_seg.copy() for step in Steps: # Initialize Der_num", "the initial values of the variabes Zeta0=[] Zeta0_star=[] N0=[] ZetaC0=[] for ss in", "error at colloc Dercoll_num=np.zeros((3,3)) for cc in range(3): zetac_pert=zetac.copy() zetac_pert[cc]+=step Vnum=comp_vind(zetac_pert,MS) Dercoll_num[:,cc]=(Vnum-V0)/step ercoll=np.max(np.abs(Dercoll-Dercoll_num))", "in range(n_surf): Der_an=Der_list[ss] Der_star_an=Der_star_list[ss] Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K fqs0=Surf.fqs.copy() gamma0=Surf.gamma.copy() for step in", "ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) # <--- gammaw_0 needs to be", "Define variables M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta # Save the reference values at equilibrium fqs0=Surf.fqs.copy()", "range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0] ### loop", "# calculate vis FDs Steps=[1e-6,] step=Steps[0] ### loop input surfs for ss_in in", "# check option Der_all_exp=np.block(Dervert_list)+scalg.block_diag(*Dercoll_list) Der_all=np.block( assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star,Merge=True) ) _,ErAbs,ErRel=max_error_tensor(Der_all,Der_all_exp) # max absolute error ermax=np.max(ErAbs)", "generate non-zero field of external force Surf.u_ext[0,:,:]=Surf.u_ext[0,:,:]-20.0 Surf.u_ext[1,:,:]=Surf.u_ext[1,:,:]+60.0 Surf.u_ext[2,:,:]=Surf.u_ext[2,:,:]+30.0 Surf.u_ext=Surf.u_ext+np.random.rand(*Surf.u_ext.shape) ### analytical derivative", "Der_list,Der_star_list=assembly.dfqsdvind_gamma(MS.Surfs,MS.Surfs_star) # allocate numerical Der_list_num=[] Der_star_list_num=[] for ii in range(n_surf): sub=[] sub_star=[] for", "unsteady force only depends on Gamma_dot, which is true only for steady-state linearisation", "test # T.setUp() # T.test_dfqsdvind_gamma() # T.test_dfqsduinput() # T.test_dfqsdzeta_vrel0() # T.test_dfqsdgamma_vrel0() # ###", "### numerical derivative #Surf.get_normal_input_velocities_at_collocation_points() u_tot0=Surf.u_ind_coll+Surf.u_input_coll u_norm0=Surf.project_coll_to_normal(u_tot0) u_norm0_vec=u_norm0.reshape(-1,order='C') zeta0=Surf.zeta DerNum=np.zeros(Der.shape) Steps=np.array([1e-2,1e-3,1e-4,1e-5,1e-6]) Er_max=0.0*Steps for ss", "perturbation between the 90% and the 110% of the step perturb_vector[kk] += step*(0.2*np.random.rand()+0.9)", "bound Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step # perturb wake TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step # recalculate", "at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<5e2*step and", "ermax_rel=ErRel[iimax] assert ermax_rel<1e-16,\\ 'option Merge=True not working correctly, relative error (%.3e) too high!'", "Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step Surf_in.generate_collocations() # perturb wake TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step ### prepare", "error ermax=np.max(ErAbs) # relative error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] assert", "%ErRel # allocate numerical Derlist_num=[] for ii in range(n_surf): sub=[] for jj in", "wakes, only TE is displaced. ''' print('------------------------------- Testing assembly.dfqsdvind_zeta') MS=self.MS n_surf=MS.n_surf # analytical", "range(n_surf): Surf_in=MS.Surfs[ss_in] # perturb for pp in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gamma0[ss_in].copy() Surf_in.gamma[mm,nn]+=step #", "TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step # recalculate induced velocity everywhere Vnum=comp_vind(zetac,MS) dv=(Vnum-V0)/step Dervert_list_num[ss_in][:,kk]=dv.reshape(-1,order='C')", "new forces Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss) print('FD step:", "Gamma_dot0=Surf.gamma_dot.copy() F0=Surf.funst.copy() for pp in range(K): mm,nn=np.unravel_index( pp, (M,N) ) Surf.gamma_dot=Gamma_dot0.copy() Surf.gamma_dot[mm,nn]+=step Surf.get_joukovski_unsteady()", "Dervert_list_num=[] for ii in range(n_surf): Dervert_list_num.append(0.0*Dervert_list[ii]) # store reference grid Zeta0=[] Zeta0_star=[] for", "max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound and wake%.2d\\tFDstep\\tErrAbs\\tErrRel'%ss_in) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ercoll<10*step,", "for ss in range(n_surf): Der_an=Der_list[ss] #Surf=copy.deepcopy(MS.Surfs[ss]) Surf=MS.Surfs[ss] #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() u_ext0=Surf.u_ext.copy()", "Surf_out.u_ind_coll_norm try: del Surf_out.u_ind_coll except AttributeError: pass ### recalculate MS.get_normal_ind_velocities_at_collocation_points() # restore Surf_in.zeta=Zeta0[ss_in].copy()", "error[istep]<=error[istep-1],\\ 'Error not decreasing as FD step size is reduced' print('------------------------------------------------------------ OK') def", "range(n_surf): # Select the surface with the analytica derivatives Der_an=Der_an_list[ss] # Copy to", "import sharpy.linear.src.assembly as assembly import sharpy.linear.src.multisurfaces as multisurfaces import sharpy.linear.src.surface as surface import", "# estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Der_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C')", "which is true only for steady-state linearisation points ''' MS=self.MS Ders_an=assembly.dfunstdgamma_dot(MS.Surfs) step=1e-6 Ders_num=[]", "# analytical Dercoll_list,Dervert_list=assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star) # check option Der_all_exp=np.block(Dervert_list)+scalg.block_diag(*Dercoll_list) Der_all=np.block( assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star,Merge=True) ) _,ErAbs,ErRel=max_error_tensor(Der_all,Der_all_exp) # max", "= h5utils.readh5(fname) # tsdata = haero.data.aero.timestep_info[-1] # tsdata.omega = [] # for ss", "range(3*Surf.maps.Kzeta): # generate a random perturbation between the 90% and the 110% of", "force only depends on Gamma_dot, which is true only for steady-state linearisation points", "Surf.get_joukovski_unsteady() Gamma_dot0=Surf.gamma_dot.copy() F0=Surf.funst.copy() for pp in range(K): mm,nn=np.unravel_index( pp, (M,N) ) Surf.gamma_dot=Gamma_dot0.copy() Surf.gamma_dot[mm,nn]+=step", "relative error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert", "not decreasing as FD step size is reduced' # assert Er_max_star[ii]<Er_max_star[ii-1],\\ # 'Error", "*Surf.gamma.shape ) gamma_star=Surf_star.gamma+np.random.rand( *Surf_star.gamma.shape ) gvec=np.dot(C,gamma.reshape(-1))+np.dot(Cstar,gamma_star.reshape(-1)) gvec_ref=np.concatenate((gamma[-1,:],gamma_star[:-1,:].reshape(-1))) assert np.max(np.abs(gvec-gvec_ref))<1e-15,\\ 'Prop. from trailing edge", "is reduced' print('------------------------------------------------------------ OK') if PlotFlag: pass # fig = plt.figure('Spy Der',figsize=(10,4)) #", "ax1 = fig.add_subplot(111) # ax1.spy(ErMat,precision=50*step) # plt.show() def test_dvinddzeta(self): ''' For each output", "element of Pder_an is nonzero - absolute, otherwise The function returns the absolute", "range(3*Kzeta): # Initialize to remove previous movements Surf.zeta=zeta0.copy() # Define DoFs where modifications", "absolute, otherwise The function returns the absolute and relative error tensors, and the", "__name__=='__main__': unittest.main() # T=Test_assembly() # T.setUp() # ### force equation (qs term) #", "Der_star_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_star_an-Der_star_num)) print('Surface %.2d - wake:' %ss) print('FD step: %.2e ---> Max error:", "''' print('------------------------------- Testing assembly.dfqsdvind_zeta') MS=self.MS n_surf=MS.n_surf # analytical Dercoll_list,Dervert_list=assembly.dfqsdvind_zeta(MS.Surfs,MS.Surfs_star) # allocate numerical Derlist_num=[]", "pass ### recalculate MS.get_normal_ind_velocities_at_collocation_points() # restore Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zetac=ZetaC0[ss_in].copy('F') Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate derivatives for", "50 times the step size' if istep > 0: assert error[istep]<=error[istep-1],\\ 'Error not", "to ensure that other tests are not affected. Needs to be tested with", "Der_an = np.dot(Dervert_list[ss], perturb_vector) Der_num = np.zeros(Surf.maps.K) ipanel = 0 skew_omega = algebra.skew(Surf.omega)", "+= 1 # COMPUTE THE ERROR error[istep] = np.maximum(error[istep], np.absolute(Der_num-Der_an).max()) print('FD step: %.2e", "print('------------------------------------------------------------ OK') if PlotFlag: pass # fig = plt.figure('Spy Der',figsize=(10,4)) # ax1 =", "# <--- gammaw_0 needs to be used here! df=(Surf_out.fqs-fqs0)/step Der_star_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ### check error", "Steps: Der_num=0.0*Der_an for kk in range(3*Kzeta): Surf.zeta=zeta0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.zeta[ind_3d]+=step Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step", "print('FD step: %.2e ---> Max error: %.2e'%(step,error[istep]) ) assert error[istep]<5e1*step, 'Error larger than", "the maximum error analytical derivatives Pder_an. The error is: - relative, if the", "with a case that actually rotates \"\"\" print('----------------------------- Testing assembly.test_nc_domegazetadzeta') MS=self.MS n_surf=MS.n_surf #", "after this test # T.setUp() # T.test_dfqsdvind_gamma() # T.test_dfqsduinput() # T.test_dfqsdzeta_vrel0() # T.test_dfqsdgamma_vrel0()", "# ax1 = fig.add_subplot(121) # ax1.spy(Der,precision=step) # ax2 = fig.add_subplot(122) # ax2.spy(DerNum,precision=step) #", "MS.get_ind_velocities_at_segments(overwrite=True) # restore zeta: (include only induced velocity contrib.) Surf_in.zeta=Zeta0[ss_in].copy() Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate", "contrib.) Surf_in.zeta=Zeta0[ss_in].copy() Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs(", "# restore Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zetac=ZetaC0[ss_in].copy('F') Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out]", "err_max=err_here # Zero elements check iizero=np.abs(Pder_an)<1e-15 for der_here in Pder_num[iizero]: if np.abs(der_here)>err_max: err_max=der_here", "ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs) # relative error at max abs error", "for ss_out in range(n_surf): for ss_in in range(n_surf): Der_an=Dervert_list[ss_out][ss_in].copy() if ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in]", "in range(n_surf): # Select the surface with the analytica derivatives Der_an=Der_an_list[ss] # Copy", "mm,nn=np.unravel_index( pp, (M,N) ) Surf.gamma_dot=Gamma_dot0.copy() Surf.gamma_dot[mm,nn]+=step Surf.get_joukovski_unsteady() dF=(Surf.funst-F0)/step Dnum[:,pp]=dF.reshape(-1) # restore Surf.gamma_dot=Gamma_dot0.copy() ###", "print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<50*step and ermax_rel<50*step, embed()#'Test failed!' # fig=plt.figure('Spy Er vs", "points due to geometrical variations at vertices Needs to be tested with a", "# Initialize Der_num = 0.0*Der_an # Loop through the different grid modifications (three", "for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] Surf_star_in=MS.Surfs_star[ss_in] M_in,N_in=Surf_in.maps.M,Surf_in.maps.N # perturb for kk in range(3*Surf_in.maps.Kzeta):", "induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore circulation: (include only induced velocity contrib.) Surf_in.gamma=Gammaw0[ss_in].copy()", "error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<5e2*step and ermax_rel<50*step, 'Test failed!'", "# get refernce values Surf.get_joukovski_unsteady() Gamma_dot0=Surf.gamma_dot.copy() F0=Surf.funst.copy() for pp in range(K): mm,nn=np.unravel_index( pp,", "decreasing as FD step size is reduced' print('------------------------------------------------------------ OK') if PlotFlag: pass #", "MS=self.MS n_surf=MS.n_surf MS.get_ind_velocities_at_collocation_points() MS.get_normal_ind_velocities_at_collocation_points() for ss in range(n_surf): print('Surface %.2d:' %ss) Surf=MS.Surfs[ss] #", "import sharpy.utils.h5utils as h5utils import sharpy.linear.src.assembly as assembly import sharpy.linear.src.multisurfaces as multisurfaces import", "Surf.zeta=zeta0.copy() # Compute new forces Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:'", "allocate numerical Derlist_num=[] for ii in range(n_surf): sub=[] for jj in range(n_surf): sub.append(0.0*Dervert_list[ii][jj])", "in range(n_surf): Surf=MS.Surfs[ss] Kzeta,K=Surf.maps.Kzeta,Surf.maps.K M,N=Surf.maps.M,Surf.maps.N Dnum=np.zeros((3*Kzeta,K)) # get refernce values Surf.get_joukovski_unsteady() Gamma_dot0=Surf.gamma_dot.copy() F0=Surf.funst.copy()", "haero = h5utils.readh5(fname) # tsdata = haero.data.aero.timestep_info[-1] # tsdata.omega = [] # for", ") Surf.zeta[ind_3d]+=step Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss) print('FD step:", "error ermax=np.max(ErAbs) # relative error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound", "%.2d - bound:' %ss) print('FD step: %.2e ---> Max error: %.2e'%(step,er_max) ) assert", "analytical derivatives Pder_an. The error is: - relative, if the element of Pder_an", "# # assert error decreases with step size # for ii in range(1,len(Steps)):", "Surf_pert=surface.AeroGridSurface(Surf.maps,zeta=zeta_pert, u_ext=Surf.u_ext,gamma=Surf.gamma) u_norm=Surf_pert.project_coll_to_normal(u_tot0) u_norm_vec=u_norm.reshape(-1,order='C') # FD derivative DerNum[:,jj]=(u_norm_vec-u_norm0_vec)/step er_max=np.max(np.abs(Der-DerNum)) print('FD step: %.2e --->", "Surf_out=MS.Surfs[ss_out] Surf_out.normals=N0[ss_out].copy() del Surf_out.u_ind_coll_norm try: del Surf_out.u_ind_coll except AttributeError: pass ### recalculate MS.get_normal_ind_velocities_at_collocation_points()", "an. %d to %d' %(ss_out,ss_out)) # #plt.show() # plt.close() def test_dfunstdgamma_dot(self): ''' Test", "error: %.2e'%(step,error[istep]) ) assert error[istep]<5e1*step, 'Error larger than 50 times the step size'", "in range(N): pp=np.ravel_multi_index( (0,nn), (M_star,N_star)) gammaw_TE=gammaw_TE0.copy() gammaw_TE[nn]+=step Surf.get_joukovski_qs(gammaw_TE=gammaw_TE) df=(Surf.fqs-fqs0)/step Der_star_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_star_an-Der_star_num)) print('Surface %.2d", "np.maximum(error[istep], np.absolute(Der_num-Der_an).max()) print('FD step: %.2e ---> Max error: %.2e'%(step,error[istep]) ) assert error[istep]<5e1*step, 'Error", "returns the absolute and relative error tensors, and the maximum error. @warning: The", "# T.test_dfqsdvind_zeta() # run setUp after this test # T.setUp() # T.test_dfqsdvind_gamma() #", "as FD step size is reduced' def test_dfqsdzeta_vrel0(self): ''' Note: the get_joukovski_qs method", "# calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0] ### loop input surfs for ss_in", "Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K fqs0=Surf.fqs.copy() gamma0=Surf.gamma.copy() for step in Steps: Der_num=0.0*Der_an Der_star_num=0.0*Der_star_an ###", "perturb cc_pert=Surf.maps.ind_3d_vert_vect[0][jj] mm_pert=Surf.maps.ind_3d_vert_vect[1][jj] nn_pert=Surf.maps.ind_3d_vert_vect[2][jj] zeta_pert=zeta0.copy() zeta_pert[cc_pert,mm_pert,nn_pert]+=step # calculate new normal velocity Surf_pert=surface.AeroGridSurface(Surf.maps,zeta=zeta_pert, u_ext=Surf.u_ext,gamma=Surf.gamma)", "ss in range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] N=Surf.maps.N K_star=Surf_star.maps.K C=C_list[ss] Cstar=Cstar_list[ss] # add noise to", "in range(n_surf): # sub=[] # for jj in range(n_surf): # sub.append(0.0*Dervert_list[ii][jj]) # Derlist_num.append(sub)", "Eabs, Erel class Test_assembly(unittest.TestCase): ''' Test methods into assembly module ''' def setUp(self):", ") assert er_max<5e1*step, 'Error larger than 50 times step size' Er_max[ss]=er_max # assert", "ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Der_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ###### wake for ss_in", "ss_out in range(n_surf): for ss_in in range(n_surf): Der_an=Dervert_list[ss_out][ss_in].copy() if ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] ermax,", "with step size # for ii in range(1,len(Steps)): # assert Er_max[ii]<Er_max[ii-1],\\ # 'Error", "# T.test_dfqsdvind_gamma() # T.test_dfqsduinput() # T.test_dfqsdzeta_vrel0() # T.test_dfqsdgamma_vrel0() # ### state equation terms", "%d to %d' %(ss_in,ss_out)) # ax3=fig.add_subplot(133) # ax3.spy(Dercoll_list[ss_out],precision=50*step) # ax3.set_title('Dcoll an. %d to", "Surf_star.zeta[cc,0,nn] += perturb_vector[kk] Surf.generate_collocations() # COMPUTE THE DERIVATIVES Der_an = np.zeros(Surf.maps.K) Der_an =", "50 times step size' Er_max.append(er_max) def test_dfqsdvind_gamma(self): print('------------------------------ Testing assembly.dfqsdvind_gamma') MS=self.MS n_surf=MS.n_surf #", "50 times step size' Er_max_star.append(er_max) Surf.gamma=gamma0.copy() ### Warning: this test fails: the dependency", "Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zetac=ZetaC0[ss_in].copy('F') Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] dvind=(Surf_out.u_ind_coll_norm-Vind0[ss_out])/step Derlist_num[ss_out][ss_in][:,kk]=dvind.reshape(-1,order='C')", "fqs0=Surf.fqs.copy() gamma0=Surf.gamma.copy() for step in Steps: Der_num=0.0*Der_an Der_star_num=0.0*Der_star_an ### Bound for pp in", "Surf_star_in=MS.Surfs_star[ss] V+=Surf_in.get_induced_velocity(zetac) V+=Surf_star_in.get_induced_velocity(zetac) return V print('----------------------------------- Testing assembly.dvinddzeta') MS=self.MS n_surf=MS.n_surf zetac=.5*(MS.Surfs[0].zeta[:,1,2]+MS.Surfs[0].zeta[:,1,3]) Dercoll=np.zeros((3,3)) Dervert_list=[]", "Needs to be tested with a case that actually rotates ''' print('------------------------------ Testing", "ax2 = fig.add_subplot(122) # ax2.spy(DerNum,precision=step) # plt.show() def test_nc_domegazetadzeta(self): \"\"\" Variation at colocation", "induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore zeta: (include only induced velocity contrib.) Surf_in.zeta=Zeta0[ss_in].copy()", "computed, all other surfaces are looped. For wakes, only TE is displaced. '''", "Der_num=0.0*Der_an Der_star_num=0.0*Der_star_an ### Bound for pp in range(K): mm=Surf.maps.ind_2d_pan_scal[0][pp] nn=Surf.maps.ind_2d_pan_scal[1][pp] Surf.gamma=gamma0.copy() Surf.gamma[mm,nn]+=step Surf.get_joukovski_qs(gammaw_TE=Surf_star.gamma[0,:])", "err_here in Erel[iifinite]: if np.abs(err_here)>err_max: err_max=err_here # Zero elements check iizero=np.abs(Pder_an)<1e-15 for der_here", "colloc Dercoll_num=np.zeros((3,3)) for cc in range(3): zetac_pert=zetac.copy() zetac_pert[cc]+=step Vnum=comp_vind(zetac_pert,MS) Dercoll_num[:,cc]=(Vnum-V0)/step ercoll=np.max(np.abs(Dercoll-Dercoll_num)) print('Error coll.\\tFDstep\\tErrAbs')", "to be tested with a case that actually rotates ''' print('------------------------------ Testing assembly.dfqsdzeta_omega')", "range(n_surf): Surf_in=MS.Surfs[ss] Surf_star_in=MS.Surfs_star[ss] V+=Surf_in.get_induced_velocity(zetac) V+=Surf_star_in.get_induced_velocity(zetac) return V print('----------------------------------- Testing assembly.dvinddzeta') MS=self.MS n_surf=MS.n_surf zetac=.5*(MS.Surfs[0].zeta[:,1,2]+MS.Surfs[0].zeta[:,1,3])", "%d' %(ss_out,ss_out)) # #plt.show() # plt.close() def test_uc_dncdzeta(self,PlotFlag=False): print('---------------------------------- Testing assembly.uc_dncdzeta') MS=self.MS n_surf=MS.n_surf", "Surf.zeta[cc,mm,nn] += perturb_vector[kk] # perturb wake TE if mm==M: Surf_star.zeta=Zeta0_star[ss].copy() Surf_star.zeta[cc,0,nn] += perturb_vector[kk]", "ax1=fig.add_subplot(121) # ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error abs %d' %(ss_in)) # ax2=fig.add_subplot(122) # ax2.spy(ErRel,precision=1e2*step) #", "pp=np.ravel_multi_index( (0,nn), (M_star,N_star)) gammaw_TE=gammaw_TE0.copy() gammaw_TE[nn]+=step Surf.get_joukovski_qs(gammaw_TE=gammaw_TE) df=(Surf.fqs-fqs0)/step Der_star_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_star_an-Der_star_num)) print('Surface %.2d - wake:'", "segments. A copy of Surf is required to ensure that other tests are", "range(3): zetac_pert=zetac.copy() zetac_pert[cc]+=step Vnum=comp_vind(zetac_pert,MS) Dercoll_num[:,cc]=(Vnum-V0)/step ercoll=np.max(np.abs(Dercoll-Dercoll_num)) print('Error coll.\\tFDstep\\tErrAbs') print('\\t\\t%.1e\\t%.1e' %(step,ercoll)) #if ercoll>10*step: embed()", "surfs for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] Surf_star_in=MS.Surfs_star[ss_in] M_in,N_in=Surf_in.maps.M,Surf_in.maps.N # perturb for kk in", "function returns the absolute and relative error tensors, and the maximum error. @warning:", "sharpy.linear.src.assembly as assembly import sharpy.linear.src.multisurfaces as multisurfaces import sharpy.linear.src.surface as surface import sharpy.linear.src.libuvlm", "MS=self.MS n_surf=MS.n_surf # analytical Der_list,Der_star_list=assembly.dfqsdvind_gamma(MS.Surfs,MS.Surfs_star) # allocate numerical Der_list_num=[] Der_star_list_num=[] for ii in", "to circulations gamma=Surf.gamma+np.random.rand( *Surf.gamma.shape ) gamma_star=Surf_star.gamma+np.random.rand( *Surf_star.gamma.shape ) gvec=np.dot(C,gamma.reshape(-1))+np.dot(Cstar,gamma_star.reshape(-1)) gvec_ref=np.concatenate((gamma[-1,:],gamma_star[:-1,:].reshape(-1))) assert np.max(np.abs(gvec-gvec_ref))<1e-15,\\ 'Prop.", "kk, (3,M_in+1,N_in+1) ) # perturb bound. vertices and collocation Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step Surf_in.generate_collocations() #", "check error Er_max=[] Er_max_star=[] for ss_out in range(n_surf): for ss_in in range(n_surf): Der_an=Der_list[ss_out][ss_in]", "- del ind. vel on output to ensure they are re-computed for ss_out", "estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) # <--- gammaw_0", "Dercoll_list,Dervert_list=assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star) # check option Der_all_exp=np.block(Dervert_list)+scalg.block_diag(*Dercoll_list) Der_all=np.block( assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star,Merge=True) ) _,ErAbs,ErRel=max_error_tensor(Der_all,Der_all_exp) # max absolute error", "as libuvlm import sharpy.utils.algebra as algebra np.set_printoptions(linewidth=200,precision=3) def max_error_tensor(Pder_an,Pder_num): ''' Finds the maximum", "allocated to both u_ext and zeta_dot ''' print('---------------------------------- Testing assembly.dfqsduinput') MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsduinput(MS.Surfs,MS.Surfs_star)", "MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsdzeta_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss] Surf=copy.deepcopy(MS.Surfs[ss]) #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N", "''' For each output surface, there induced velocity is computed, all other surfaces", "np.absolute(Der_num-Der_an).max()) print('FD step: %.2e ---> Max error: %.2e'%(step,error[istep]) ) assert error[istep]<5e1*step, 'Error larger", "%.2e'%(step,er_max) ) assert er_max<5e1*step, 'Error larger than 50 times step size' Er_max.append(er_max) ###", "Der_an=Dervert_list[ss_in] Der_num=Dervert_list_num[ss_in] ermax,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs) # relative error at max", "= h5utils.readh5(fname) tsdata = haero.ts00000 # # Rotating cases # fname = './basic_rotating_wing/basic_wing.data.h5'", "ss in range(n_surf): print('Surface %.2d:' %ss) Surf=MS.Surfs[ss] # generate non-zero field of external", "Derlist_num.append(sub) # store reference circulation and force Zeta0=[] Zeta0_star=[] Fqs0=[] for ss in", "ii in range(n_surf): sub=[] sub_star=[] for jj in range(n_surf): sub.append(0.0*Der_list[ii][jj]) sub_star.append(0.0*Der_star_list[ii][jj]) Der_list_num.append(sub) Der_star_list_num.append(sub_star)", "Der_num=0.0*Der_an for kk in range(3*Kzeta): Surf.zeta=zeta0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.zeta[ind_3d]+=step Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C')", "ax1.spy(Der,precision=step) # ax2 = fig.add_subplot(122) # ax2.spy(DerNum,precision=step) # plt.show() def test_nc_domegazetadzeta(self): \"\"\" Variation", "Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() u_ext0=Surf.u_ext.copy() zeta_dot0=Surf.zeta_dot.copy() for step in Steps: Der_num=0.0*Der_an for kk in range(3*Kzeta):", "test_nc_dqcdzeta(self): ''' For each output surface, where induced velocity is computed, all other", "are re-computed for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] Surf_out.normals=N0[ss_out].copy() del Surf_out.u_ind_coll_norm try: del Surf_out.u_ind_coll", "#plt.show() # plt.close() def test_dfunstdgamma_dot(self): ''' Test derivative of unsteady aerodynamic force with", "Dercoll_num[:,cc]=(Vnum-V0)/step ercoll=np.max(np.abs(Dercoll-Dercoll_num)) print('Error coll.\\tFDstep\\tErrAbs') print('\\t\\t%.1e\\t%.1e' %(step,ercoll)) #if ercoll>10*step: embed() assert ercoll<10*step, 'Error at", "store reference circulation and normal induced velocities MS.get_normal_ind_velocities_at_collocation_points() Zeta0=[] Zeta0_star=[] Vind0=[] N0=[] ZetaC0=[]", ") Surf.zeta[ind_3d]+=step # Recompute get_ind_velocities_at_segments and recover the previous grid Surf.get_input_velocities_at_segments() Surf.zeta=zeta0.copy() #", "# Compute the anaytical derivative of the case Der_an_list=assembly.dfqsdzeta_omega(MS.Surfs,MS.Surfs_star) # Initialize Er_max=[] #", "to be tested with a case that actually rotates \"\"\" print('----------------------------- Testing assembly.test_nc_domegazetadzeta')", "Gamma_dot, which is true only for steady-state linearisation points ''' MS=self.MS Ders_an=assembly.dfunstdgamma_dot(MS.Surfs) step=1e-6", "at the panel segments. A copy of Surf is required to ensure that", "Kzeta,K=Surf.maps.Kzeta,Surf.maps.K M,N=Surf.maps.M,Surf.maps.N Dnum=np.zeros((3*Kzeta,K)) # get refernce values Surf.get_joukovski_unsteady() Gamma_dot0=Surf.gamma_dot.copy() F0=Surf.funst.copy() for pp in", "ErAbs, ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs) # relative error at max abs", "MS.get_normal_ind_velocities_at_collocation_points() Zeta0=[] Zeta0_star=[] Vind0=[] N0=[] ZetaC0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy())", "plt.close() def test_uc_dncdzeta(self,PlotFlag=False): print('---------------------------------- Testing assembly.uc_dncdzeta') MS=self.MS n_surf=MS.n_surf MS.get_ind_velocities_at_collocation_points() MS.get_normal_ind_velocities_at_collocation_points() for ss in", "# tsdata = haero.data.aero.timestep_info[-1] # tsdata.omega = [] # for ss in range(haero.data.aero.n_surf):", "# tsdata.omega = [] # for ss in range(haero.data.aero.n_surf): # tsdata.omega.append(haero.data.structure.timestep_info[-1].for_vel[3:6]) MS=multisurfaces.MultiAeroGridSurfaces(tsdata) MS.get_normal_ind_velocities_at_collocation_points()", "round-off error. # # assert error decreases with step size # for ii", "%(ss_in)) # ax2=fig.add_subplot(122) # ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error rel %d' %(ss_in)) # #plt.show() #", "the element of Pder_an is nonzero - absolute, otherwise The function returns the", "in range(n_surf): sub.append(0.0*Dervert_list[ii][jj]) Derlist_num.append(sub) # store reference circulation and normal induced velocities MS.get_normal_ind_velocities_at_collocation_points()", "# fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(131) # ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error abs", "= haero.ts00000 # # Rotating cases # fname = './basic_rotating_wing/basic_wing.data.h5' # haero =", "# COMPUTE THE ERROR error[istep] = np.maximum(error[istep], np.absolute(Der_num-Der_an).max()) print('FD step: %.2e ---> Max", "''' For each output surface, where induced velocity is computed, all other surfaces", "point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound and wake%.2d\\tFDstep\\tErrAbs\\tErrRel'%ss_in) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ercoll<10*step, 'Error at vertices'", "absolute error is checked. ''' Eabs=np.abs(Pder_num-Pder_an) nnzvec=Pder_an!=0 Erel=np.zeros(Pder_an.shape) Erel[nnzvec]=np.abs(Eabs[nnzvec]/Pder_an[nnzvec]) # Relative error check:", "ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] dvind=(Surf_out.u_ind_coll_norm-Vind0[ss_out])/step Derlist_num[ss_out][ss_in][:,kk]=dvind.reshape(-1,order='C') ### check error for ss_out in range(n_surf):", "for jj in range(n_surf): sub.append(0.0*Dervert_list[ii][jj]) Derlist_num.append(sub) # store reference circulation and force Zeta0=[]", ") gvec=np.dot(C,gamma.reshape(-1))+np.dot(Cstar,gamma_star.reshape(-1)) gvec_ref=np.concatenate((gamma[-1,:],gamma_star[:-1,:].reshape(-1))) assert np.max(np.abs(gvec-gvec_ref))<1e-15,\\ 'Prop. from trailing edge not correct' if __name__=='__main__':", "wake%.2d\\tFDstep\\tErrAbs\\tErrRel'%ss_in) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ercoll<10*step, 'Error at vertices' # fig=plt.figure('Spy Er vs coll", "u_tot0=Surf.u_ind_coll+Surf.u_input_coll u_norm0=Surf.project_coll_to_normal(u_tot0) u_norm0_vec=u_norm0.reshape(-1,order='C') zeta0=Surf.zeta DerNum=np.zeros(Der.shape) Steps=np.array([1e-2,1e-3,1e-4,1e-5,1e-6]) Er_max=0.0*Steps for ss in range(len(Steps)): step=Steps[ss] for", "For each output surface, where induced velocity is computed, all other surfaces are", "\"\"\" print('----------------------------- Testing assembly.test_nc_domegazetadzeta') MS=self.MS n_surf=MS.n_surf # analytical Dervert_list = assembly.nc_domegazetadzeta(MS.Surfs,MS.Surfs_star) # allocate", "Max error: %.2e'%(step,error[istep]) ) assert error[istep]<5e1*step, 'Error larger than 50 times the step", "# FD derivative DerNum[:,jj]=(u_norm_vec-u_norm0_vec)/step er_max=np.max(np.abs(Der-DerNum)) print('FD step: %.2e ---> Max error: %.2e'%(step,er_max) )", "at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d\\t\\t\\tFDstep\\tErrAbs\\tErrRel'%(ss,)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<5e2*step and", "# ax2=fig.add_subplot(132) # ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error rel %d to %d' %(ss_in,ss_out)) # ax3=fig.add_subplot(133)", "to be used here! df=(Surf_out.fqs-fqs0)/step Der_star_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ### check error Er_max=[] Er_max_star=[] for ss_out", "Der_num=Derlist_num[ss_out][ss_in] ermax, ErAbs, ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs) # relative error at", "vertices and collocation Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step Surf_in.generate_collocations() # perturb wake TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy()", "ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error abs %d to %d' %(ss_in,ss_out)) # ax2=fig.add_subplot(132) # ax2.spy(ErRel,precision=1e2*step) #", "during the search for maximum error, and absolute error is checked. ''' Eabs=np.abs(Pder_num-Pder_an)", "coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(131) # ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error abs %d to %d' %(ss_in,ss_out))", "range(n_surf): sub.append(0.0*Dervert_list[ii][jj]) Derlist_num.append(sub) # store reference circulation and force Zeta0=[] Zeta0_star=[] Fqs0=[] for", "# ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error abs %d' %(ss_in)) # ax2=fig.add_subplot(122) # ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error", "Zeta0_star=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) V0=comp_vind(zetac,MS) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,]", "nn in range(N): pp=np.ravel_multi_index( (0,nn), (M_star,N_star)) gammaw_TE=gammaw_TE0.copy() gammaw_TE[nn]+=step Surf.get_joukovski_qs(gammaw_TE=gammaw_TE) df=(Surf.fqs-fqs0)/step Der_star_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_star_an-Der_star_num)) print('Surface", "# fig = plt.figure('Spy Der',figsize=(10,4)) # ax1 = fig.add_subplot(111) # ax1.spy(ErMat,precision=50*step) # plt.show()", "only TE is displaced. ''' def comp_vind(zetac,MS): # comute induced velocity V=np.zeros((3,)) for", "of the step perturb_vector[kk] += step*(0.2*np.random.rand()+0.9) cc,mm,nn=np.unravel_index( kk, (3,M+1,N+1) ) # perturb bound.", "variables M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta # Save the reference values at equilibrium fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy()", "Dervert_list_num.append(0.0*Dervert_list[ii]) # store reference grid Zeta0=[] Zeta0_star=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy())", "assert np.max(np.abs(gvec-gvec_ref))<1e-15,\\ 'Prop. from trailing edge not correct' if __name__=='__main__': unittest.main() # T=Test_assembly()", "%d' %(ss_in)) # ax2=fig.add_subplot(122) # ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error rel %d' %(ss_in)) # #plt.show()", "ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Bound%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert ermax<50*step, 'Test failed!' Der_an=Der_star_list[ss_out][ss_in] Der_num=Der_star_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat))", "to both u_ext and zeta_dot ''' print('---------------------------------- Testing assembly.dfqsduinput') MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsduinput(MS.Surfs,MS.Surfs_star) Er_max=[]", "in input velocity is allocated to both u_ext and zeta_dot ''' print('---------------------------------- Testing", "ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Vind0.append(MS.Surfs[ss].u_ind_coll_norm.copy()) N0.append(MS.Surfs[ss].normals.copy()) # calculate vis FDs Steps=[1e-6,]", "are looped. For wakes, only TE is displaced. ''' def comp_vind(zetac,MS): # comute", "non-zero field of external force Surf.u_ext[0,:,:]=Surf.u_ext[0,:,:]-20.0 Surf.u_ext[1,:,:]=Surf.u_ext[1,:,:]+60.0 Surf.u_ext[2,:,:]=Surf.u_ext[2,:,:]+30.0 Surf.u_ext=Surf.u_ext+np.random.rand(*Surf.u_ext.shape) ### analytical derivative #", "MS.verify_aic_coll() MS.get_joukovski_qs() MS.verify_joukovski_qs() self.MS=MS def test_nc_dqcdzeta(self): ''' For each output surface, where induced", "assembly.dfqsduinput') MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsduinput(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss] #Surf=copy.deepcopy(MS.Surfs[ss]) Surf=MS.Surfs[ss]", "only induced velocity contrib.) Surf_in.gamma=Gamma0[ss_in].copy() # estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out]", "abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] assert ermax_rel<1e-16,\\ 'option Merge=True not working correctly, relative", "that actually rotates ''' print('------------------------------ Testing assembly.dfqsdzeta_omega') # rename MS=self.MS n_surf=MS.n_surf # Compute", "Der_an=Der_list[ss] #Surf=copy.deepcopy(MS.Surfs[ss]) Surf=MS.Surfs[ss] #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() u_ext0=Surf.u_ext.copy() zeta_dot0=Surf.zeta_dot.copy() for step in", "for ss in range(n_surf): print('Surface %.2d:' %ss) Surf=MS.Surfs[ss] # generate non-zero field of", "Needs to be tested with a case that actually rotates \"\"\" print('----------------------------- Testing", "%(ss_out,ss_out)) # #plt.show() # plt.close() def test_uc_dncdzeta(self,PlotFlag=False): print('---------------------------------- Testing assembly.uc_dncdzeta') MS=self.MS n_surf=MS.n_surf MS.get_ind_velocities_at_collocation_points()", "for kk in range(3*Surf.maps.Kzeta): # generate a random perturbation between the 90% and", "MS.get_joukovski_qs() MS.verify_joukovski_qs() self.MS=MS def test_nc_dqcdzeta(self): ''' For each output surface, where induced velocity", "ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) N0.append(MS.Surfs[ss].normals.copy()) # Computation Steps=[1e-2, 1e-4, 1e-6] nsteps", "the unsteady force only depends on Gamma_dot, which is true only for steady-state", "in range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] N=Surf.maps.N K_star=Surf_star.maps.K C=C_list[ss] Cstar=Cstar_list[ss] # add noise to circulations", "1 # COMPUTE THE ERROR error[istep] = np.maximum(error[istep], np.absolute(Der_num-Der_an).max()) print('FD step: %.2e --->", "# T.setUp() # ### force equation (qs term) # T.test_dvinddzeta() # T.test_dfqsdvind_zeta() #", "ermax_rel<50*step, 'Test failed!' def test_wake_prop(self): MS=self.MS C_list,Cstar_list=assembly.wake_prop(MS.Surfs,MS.Surfs_star) n_surf=len(MS.Surfs) for ss in range(n_surf): Surf=MS.Surfs[ss]", "is displaced. ''' print('------------------------------- Testing assembly.dfqsdvind_zeta') MS=self.MS n_surf=MS.n_surf # analytical Dercoll_list,Dervert_list=assembly.dfqsdvind_zeta(MS.Surfs,MS.Surfs_star) # allocate", "max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<50*step and ermax_rel<50*step,", "recover the previous grid Surf.get_input_velocities_at_segments() Surf.zeta=zeta0.copy() # Compute new forces Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C')", "# plt.close() def test_uc_dncdzeta(self,PlotFlag=False): print('---------------------------------- Testing assembly.uc_dncdzeta') MS=self.MS n_surf=MS.n_surf MS.get_ind_velocities_at_collocation_points() MS.get_normal_ind_velocities_at_collocation_points() for ss", "Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss] Der_star_an=Der_star_list[ss] Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K fqs0=Surf.fqs.copy() gamma0=Surf.gamma.copy()", "'Error larger than 50 times step size' Er_max.append(er_max) def test_dfqsduinput(self): ''' Step change", "error tensors, and the maximum error. @warning: The relative error tensor may contain", "the search for maximum error, and absolute error is checked. ''' Eabs=np.abs(Pder_num-Pder_an) nnzvec=Pder_an!=0", "def test_dfqsdzeta_vrel0(self): ''' Note: the get_joukovski_qs method re-computes the induced velocity at the", "K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() for step in Steps: Der_num=0.0*Der_an for kk in range(3*Kzeta):", "# Copy to avoid modifying the original for other tests Surf=copy.deepcopy(MS.Surfs[ss]) # Define", "'Error not decreasing as FD step size is reduced' print('------------------------------------------------------------ OK') def test_dfqsdgamma_vrel0(self):", "gamma=Surf.gamma+np.random.rand( *Surf.gamma.shape ) gamma_star=Surf_star.gamma+np.random.rand( *Surf_star.gamma.shape ) gvec=np.dot(C,gamma.reshape(-1))+np.dot(Cstar,gamma_star.reshape(-1)) gvec_ref=np.concatenate((gamma[-1,:],gamma_star[:-1,:].reshape(-1))) assert np.max(np.abs(gvec-gvec_ref))<1e-15,\\ 'Prop. from trailing", "normals are unchanged # - del ind. vel on output to ensure they", "= './basic_rotating_wing/basic_wing.data.h5' # haero = h5utils.readh5(fname) # tsdata = haero.data.aero.timestep_info[-1] # tsdata.omega =", "Recompute get_ind_velocities_at_segments and recover the previous grid Surf.get_input_velocities_at_segments() Surf.zeta=zeta0.copy() # Compute new forces", "decreases with step size for ss in range(1,len(Steps)): assert Er_max[ss]<Er_max[ss-1],\\ 'Error not decreasing", "range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gamma0[ss_in].copy() Surf_in.gamma[mm,nn]+=step # recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore", "del ind. vel on output to ensure they are re-computed for ss_out in", "ensure normals are unchanged # - del ind. vel on output to ensure", "copy of Surf is required to ensure that other tests are not affected.", "other tests are not affected. Needs to be tested with a case that", "Er_max=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss] #Surf=copy.deepcopy(MS.Surfs[ss]) Surf=MS.Surfs[ss] #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta", "ermax<5e2*step and ermax_rel<50*step, 'Test failed!' # fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(131)", "# relative error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound and wake%.2d\\tFDstep\\tErrAbs\\tErrRel'%ss_in)", "in range(1,len(Steps)): # assert Er_max[ii]<Er_max[ii-1],\\ # 'Error not decreasing as FD step size", "rename MS=self.MS n_surf=MS.n_surf # Compute the anaytical derivative of the case Der_an_list=assembly.dfqsdzeta_omega(MS.Surfs,MS.Surfs_star) #", "be tested with a case that actually rotates ''' print('------------------------------ Testing assembly.dfqsdzeta_omega') #", "if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step # recalculate induced velocity everywhere Vnum=comp_vind(zetac,MS) dv=(Vnum-V0)/step Dervert_list_num[ss_in][:,kk]=dv.reshape(-1,order='C') #", "decreasing as FD step size is reduced' def test_dfqsdzeta_vrel0(self): ''' Note: the get_joukovski_qs", "Surf_in=MS.Surfs[ss_in] Surf_star_in=MS.Surfs_star[ss_in] M_in,N_in=Surf_in.maps.M,Surf_in.maps.N # perturb for kk in range(3*Surf_in.maps.Kzeta): cc,mm,nn=np.unravel_index( kk, (3,M_in+1,N_in+1) )", "ii in range(n_surf): Dervert_list_num.append(0.0*Dervert_list[ii]) # store reference grid Zeta0=[] Zeta0_star=[] for ss in", "Loop through the different grid modifications (three directions per vertex point) for kk", "in range(n_surf): for ss_in in range(n_surf): Der_an=Dervert_list[ss_out][ss_in].copy() if ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] ermax, ErAbs,", "Er_max.append(er_max) def test_dfqsdzeta_omega(self): ''' Note: the get_joukovski_qs method re-computes the induced velocity at", "in range(n_surf): Surf_in=MS.Surfs[ss] Surf_star_in=MS.Surfs_star[ss] V+=Surf_in.get_induced_velocity(zetac) V+=Surf_star_in.get_induced_velocity(zetac) return V print('----------------------------------- Testing assembly.dvinddzeta') MS=self.MS n_surf=MS.n_surf", "dF=(Surf.funst-F0)/step Dnum[:,pp]=dF.reshape(-1) # restore Surf.gamma_dot=Gamma_dot0.copy() ### verify ermax, ErAbs, ErRel=max_error_tensor(Ders_an[ss],Dnum) # max absolute", "surface with the analytica derivatives Der_an=Der_an_list[ss] # Copy to avoid modifying the original", "print('------------------------------ Testing assembly.dfqsdzeta_omega') # rename MS=self.MS n_surf=MS.n_surf # Compute the anaytical derivative of", "nonzero - absolute, otherwise The function returns the absolute and relative error tensors,", "Er_max=0.0*Steps for ss in range(len(Steps)): step=Steps[ss] for jj in range(3*Surf.maps.Kzeta): # perturb cc_pert=Surf.maps.ind_3d_vert_vect[0][jj]", "Surf_in.gamma=Gamma0[ss_in].copy() # estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step", "assembly module ''' def setUp(self): # select test case fname = os.path.dirname(os.path.abspath(__file__)) +", "Er_max[ss]<Er_max[ss-1],\\ 'Error not decreasing as FD step size is reduced' print('------------------------------------------------------------ OK') if", "where induced velocity is computed, all other surfaces are looped. For wakes, only", "zetac_pert[cc]+=step Vnum=comp_vind(zetac_pert,MS) Dercoll_num[:,cc]=(Vnum-V0)/step ercoll=np.max(np.abs(Dercoll-Dercoll_num)) print('Error coll.\\tFDstep\\tErrAbs') print('\\t\\t%.1e\\t%.1e' %(step,ercoll)) #if ercoll>10*step: embed() assert ercoll<10*step,", "Surf.gamma[mm,nn]+=step Surf.get_joukovski_qs(gammaw_TE=Surf_star.gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss) print('FD step: %.2e", "on output to ensure they are re-computed for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] Surf_out.normals=N0[ss_out].copy()", "derivative #Surf.get_normal_input_velocities_at_collocation_points() u_tot0=Surf.u_ind_coll+Surf.u_input_coll u_norm0=Surf.project_coll_to_normal(u_tot0) u_norm0_vec=u_norm0.reshape(-1,order='C') zeta0=Surf.zeta DerNum=np.zeros(Der.shape) Steps=np.array([1e-2,1e-3,1e-4,1e-5,1e-6]) Er_max=0.0*Steps for ss in range(len(Steps)):", "print('----------------------------- Testing assembly.dfqsdgamma_vrel0') MS=self.MS n_surf=MS.n_surf Der_list,Der_star_list=assembly.dfqsdgamma_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Er_max_star=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf):", "required to ensure that other tests are not affected. Needs to be tested", "ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.u_ext[ind_3d]+=0.5*step Surf.zeta_dot[ind_3d]+=-0.5*step Surf.get_input_velocities_at_segments() Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d -", "Er_max.append(er_max) ### Wake Surf.gamma=gamma0.copy() gammaw_TE0=Surf_star.gamma[0,:].copy() M_star,N_star=Surf_star.maps.M,Surf_star.maps.N K_star=Surf_star.maps.K for nn in range(N): pp=np.ravel_multi_index( (0,nn),", "calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0] ### loop input surfs for ss_in in", "looped. For wakes, only TE is displaced. ''' def comp_vind(zetac,MS): # comute induced", "remove previous movements Surf.zeta=zeta0.copy() # Define DoFs where modifications will take place and", "not decreasing as FD step size is reduced' def test_dfqsdzeta_vrel0(self): ''' Note: the", "derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Derlist_num[ss_out][ss_in][:,kk]=df.reshape(-1,order='C') ### check", "maximum error, and absolute error is checked. ''' Eabs=np.abs(Pder_num-Pder_an) nnzvec=Pder_an!=0 Erel=np.zeros(Pder_an.shape) Erel[nnzvec]=np.abs(Eabs[nnzvec]/Pder_an[nnzvec]) #", "### force equation (qs term) # T.test_dvinddzeta() # T.test_dfqsdvind_zeta() # run setUp after", "assert Er_max[ii]<Er_max[ii-1],\\ # 'Error not decreasing as FD step size is reduced' #", "# - del ind. vel on output to ensure they are re-computed for", "C_list,Cstar_list=assembly.wake_prop(MS.Surfs,MS.Surfs_star) n_surf=len(MS.Surfs) for ss in range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] N=Surf.maps.N K_star=Surf_star.maps.K C=C_list[ss] Cstar=Cstar_list[ss] #", "run setUp after this test # T.setUp() # T.test_dfqsdvind_gamma() # T.test_dfqsduinput() # T.test_dfqsdzeta_vrel0()", "Eabs=np.abs(Pder_num-Pder_an) nnzvec=Pder_an!=0 Erel=np.zeros(Pder_an.shape) Erel[nnzvec]=np.abs(Eabs[nnzvec]/Pder_an[nnzvec]) # Relative error check: remove NaN and inf... iifinite=np.isfinite(Erel)", "for ss in range(haero.data.aero.n_surf): # tsdata.omega.append(haero.data.structure.timestep_info[-1].for_vel[3:6]) MS=multisurfaces.MultiAeroGridSurfaces(tsdata) MS.get_normal_ind_velocities_at_collocation_points() MS.verify_non_penetration() MS.verify_aic_coll() MS.get_joukovski_qs() MS.verify_joukovski_qs() self.MS=MS", "surfaces are looped. For wakes, only TE is displaced. ''' print('------------------------------- Testing assembly.dfqsdvind_zeta')", "checked. ''' Eabs=np.abs(Pder_num-Pder_an) nnzvec=Pder_an!=0 Erel=np.zeros(Pder_an.shape) Erel[nnzvec]=np.abs(Eabs[nnzvec]/Pder_an[nnzvec]) # Relative error check: remove NaN and", "range(n_surf): Der_an=Dervert_list[ss_out][ss_in].copy() if ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] ermax, ErAbs, ErRel=max_error_tensor(Der_an,Der_num) # max absolute error", "ss in range(n_surf): # Select the surface with the analytica derivatives Der_an=Der_an_list[ss] #", "scalg import sharpy.utils.h5utils as h5utils import sharpy.linear.src.assembly as assembly import sharpy.linear.src.multisurfaces as multisurfaces", "for nn in range(N): pp=np.ravel_multi_index( (0,nn), (M_star,N_star)) gammaw_TE=gammaw_TE0.copy() gammaw_TE[nn]+=step Surf.get_joukovski_qs(gammaw_TE=gammaw_TE) df=(Surf.fqs-fqs0)/step Der_star_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_star_an-Der_star_num))", "### recalculate MS.get_normal_ind_velocities_at_collocation_points() # restore Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zetac=ZetaC0[ss_in].copy('F') Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate derivatives for ss_out", "the anaytical derivative of the case Der_an_list=assembly.dfqsdzeta_omega(MS.Surfs,MS.Surfs_star) # Initialize Er_max=[] # Define steps", "and ermax_rel<50*step, 'Test failed!' def test_wake_prop(self): MS=self.MS C_list,Cstar_list=assembly.wake_prop(MS.Surfs,MS.Surfs_star) n_surf=len(MS.Surfs) for ss in range(n_surf):", "(include only induced velocity contrib.) Surf_in.gamma=Gamma0[ss_in].copy() # estimate derivatives for ss_out in range(n_surf):", "range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Der_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ###### wake for ss_in in range(n_surf):", "print('----------------------------- Testing assembly.test_nc_dqcdzeta') MS=self.MS n_surf=MS.n_surf # analytical Dercoll_list,Dervert_list=assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star) # check option Der_all_exp=np.block(Dervert_list)+scalg.block_diag(*Dercoll_list) Der_all=np.block(", "max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] assert ermax_rel<1e-16,\\ 'option Merge=True not working correctly,", "with a case that actually rotates ''' print('------------------------------ Testing assembly.dfqsdzeta_omega') # rename MS=self.MS", "failed!' Der_an=Der_star_list[ss_out][ss_in] Der_num=Der_star_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Wake%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert ermax<50*step, 'Test failed!' #", ") # perturb bound. vertices and collocation Surf.zeta=Zeta0[ss].copy() Surf.zeta[cc,mm,nn] += perturb_vector[kk] # perturb", "range(K): mm,nn=np.unravel_index( pp, (M,N) ) Surf.gamma_dot=Gamma_dot0.copy() Surf.gamma_dot[mm,nn]+=step Surf.get_joukovski_unsteady() dF=(Surf.funst-F0)/step Dnum[:,pp]=dF.reshape(-1) # restore Surf.gamma_dot=Gamma_dot0.copy()", "in range(n_surf): Gamma0.append(MS.Surfs[ss].gamma.copy()) Gammaw0.append(MS.Surfs_star[ss].gamma.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-5,] step=Steps[0] ######", "np.abs(err_here)>err_max: err_max=err_here # Zero elements check iizero=np.abs(Pder_an)<1e-15 for der_here in Pder_num[iizero]: if np.abs(der_here)>err_max:", "range(3*Kzeta): Surf.zeta=zeta0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.zeta[ind_3d]+=step Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d -", "# ax1 = fig.add_subplot(111) # ax1.spy(ErMat,precision=50*step) # plt.show() def test_dvinddzeta(self): ''' For each", "vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(121) # ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error abs %d' %(ss_in)) #", "perturb_vector[kk] += step*(0.2*np.random.rand()+0.9) cc,mm,nn=np.unravel_index( kk, (3,M+1,N+1) ) # perturb bound. vertices and collocation", "they are re-computed for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] Surf_out.normals=N0[ss_out].copy() del Surf_out.u_ind_coll_norm try: del", "df=(Surf_out.fqs-fqs0)/step Derlist_num[ss_out][ss_in][:,kk]=df.reshape(-1,order='C') ### check error for ss_out in range(n_surf): for ss_in in range(n_surf):", "for maximum error, and absolute error is checked. ''' Eabs=np.abs(Pder_num-Pder_an) nnzvec=Pder_an!=0 Erel=np.zeros(Pder_an.shape) Erel[nnzvec]=np.abs(Eabs[nnzvec]/Pder_an[nnzvec])", "np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, Surf.zetac[:,mm,nn]))) ipanel += 1 # COMPUTE THE ERROR error[istep] = np.maximum(error[istep],", "df=(Surf.fqs-fqs0)/step Der_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss) print('FD step: %.2e ---> Max", "in range(1,len(Steps)): assert Er_max[ss]<Er_max[ss-1],\\ 'Error not decreasing as FD step size is reduced'", "scipy.linalg as scalg import sharpy.utils.h5utils as h5utils import sharpy.linear.src.assembly as assembly import sharpy.linear.src.multisurfaces", "MS=self.MS n_surf=MS.n_surf # analytical Dercoll_list,Dervert_list=assembly.dfqsdvind_zeta(MS.Surfs,MS.Surfs_star) # allocate numerical Derlist_num=[] for ii in range(n_surf):", "with respect to changes in panel circulation. Warning: test assumes the derivative of", "algebra np.set_printoptions(linewidth=200,precision=3) def max_error_tensor(Pder_an,Pder_num): ''' Finds the maximum error analytical derivatives Pder_an. The", "# Loop through the different grid modifications (three directions per vertex point) for", "= haero.data.aero.timestep_info[-1] # tsdata.omega = [] # for ss in range(haero.data.aero.n_surf): # tsdata.omega.append(haero.data.structure.timestep_info[-1].for_vel[3:6])", "ax1=fig.add_subplot(131) # ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error abs %d to %d' %(ss_in,ss_out)) # ax2=fig.add_subplot(132) #", "nnzvec=Pder_an!=0 Erel=np.zeros(Pder_an.shape) Erel[nnzvec]=np.abs(Eabs[nnzvec]/Pder_an[nnzvec]) # Relative error check: remove NaN and inf... iifinite=np.isfinite(Erel) err_max=0.0", "modifications will take place and modify the grid ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.zeta[ind_3d]+=step #", "for pp in range(K): mm,nn=np.unravel_index( pp, (M,N) ) Surf.gamma_dot=Gamma_dot0.copy() Surf.gamma_dot[mm,nn]+=step Surf.get_joukovski_unsteady() dF=(Surf.funst-F0)/step Dnum[:,pp]=dF.reshape(-1)", "Surf.get_joukovski_unsteady() dF=(Surf.funst-F0)/step Dnum[:,pp]=dF.reshape(-1) # restore Surf.gamma_dot=Gamma_dot0.copy() ### verify ermax, ErAbs, ErRel=max_error_tensor(Ders_an[ss],Dnum) # max", "fails: the dependency on gamma is linear, hence # great accuracy is obtained", "MS.verify_non_penetration() MS.verify_aic_coll() MS.get_joukovski_qs() MS.verify_joukovski_qs() self.MS=MS def test_nc_dqcdzeta(self): ''' For each output surface, where", "print('Bound and wake%.2d\\tFDstep\\tErrAbs\\tErrRel'%ss_in) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ercoll<10*step, 'Error at vertices' # fig=plt.figure('Spy Er", "ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) V0=comp_vind(zetac,MS) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0]", "for ss in range(n_surf): # Select the surface with the analytica derivatives Der_an=Der_an_list[ss]", "absolute and relative error tensors, and the maximum error. @warning: The relative error", "check error at vert for ss_in in range(n_surf): Der_an=Dervert_list[ss_in] Der_num=Dervert_list_num[ss_in] ermax,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max", "fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(121) # ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error abs %d'", "nn=Surf.maps.ind_2d_pan_scal[1][pp] Surf.gamma=gamma0.copy() Surf.gamma[mm,nn]+=step Surf.get_joukovski_qs(gammaw_TE=Surf_star.gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss) print('FD", "in range(n_surf): Surf_in=MS.Surfs_star[ss_in] # perturb for pp in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gammaw0[ss_in].copy() Surf_in.gamma[mm,nn]+=step", "ermax=np.max(ErAbs) # relative error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] assert ermax_rel<1e-16,\\", "in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0] ###", "pp in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gammaw0[ss_in].copy() Surf_in.gamma[mm,nn]+=step # recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True)", "ax2=fig.add_subplot(132) # ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error rel %d to %d' %(ss_in,ss_out)) # ax3=fig.add_subplot(133) #", "previous grid Surf.get_input_velocities_at_segments() Surf.zeta=zeta0.copy() # Compute new forces Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface", "# comute induced velocity V=np.zeros((3,)) for ss in range(n_surf): Surf_in=MS.Surfs[ss] Surf_star_in=MS.Surfs_star[ss] V+=Surf_in.get_induced_velocity(zetac) V+=Surf_star_in.get_induced_velocity(zetac)", "n_surf=len(MS.Surfs) for ss in range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] N=Surf.maps.N K_star=Surf_star.maps.K C=C_list[ss] Cstar=Cstar_list[ss] # add", "Surf.generate_collocations() # COMPUTE THE DERIVATIVES Der_an = np.zeros(Surf.maps.K) Der_an = np.dot(Dervert_list[ss], perturb_vector) Der_num", "error. @warning: The relative error tensor may contain NaN or Inf if the", "zeta_pert[cc_pert,mm_pert,nn_pert]+=step # calculate new normal velocity Surf_pert=surface.AeroGridSurface(Surf.maps,zeta=zeta_pert, u_ext=Surf.u_ext,gamma=Surf.gamma) u_norm=Surf_pert.project_coll_to_normal(u_tot0) u_norm_vec=u_norm.reshape(-1,order='C') # FD derivative", "[] # for ss in range(haero.data.aero.n_surf): # tsdata.omega.append(haero.data.structure.timestep_info[-1].for_vel[3:6]) MS=multisurfaces.MultiAeroGridSurfaces(tsdata) MS.get_normal_ind_velocities_at_collocation_points() MS.verify_non_penetration() MS.verify_aic_coll() MS.get_joukovski_qs()", "V+=Surf_star_in.get_induced_velocity(zetac) return V print('----------------------------------- Testing assembly.dvinddzeta') MS=self.MS n_surf=MS.n_surf zetac=.5*(MS.Surfs[0].zeta[:,1,2]+MS.Surfs[0].zeta[:,1,3]) Dercoll=np.zeros((3,3)) Dervert_list=[] for ss_in", "of the case Der_an_list=assembly.dfqsdzeta_omega(MS.Surfs,MS.Surfs_star) # Initialize Er_max=[] # Define steps to run Steps=[1e-2,1e-4,1e-6,]", "quickly introduced round-off error. # # assert error decreases with step size #", "perturb wake TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step # recalculate induced velocity everywhere Vnum=comp_vind(zetac,MS)", "h5utils import sharpy.linear.src.assembly as assembly import sharpy.linear.src.multisurfaces as multisurfaces import sharpy.linear.src.surface as surface", "range(n_surf): for ss_in in range(n_surf): Der_an=Dervert_list[ss_out][ss_in].copy() if ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] _,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max", "the get_joukovski_qs method re-computes the induced velocity at the panel segments. A copy", "# ax3.spy(Dercoll_list[ss_out],precision=50*step) # ax3.set_title('Dcoll an. %d to %d' %(ss_out,ss_out)) # #plt.show() # plt.close()", "and wake%.2d\\tFDstep\\tErrAbs\\tErrRel'%ss_in) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ercoll<10*step, 'Error at vertices' # fig=plt.figure('Spy Er vs", "Pder_num[iizero]: if np.abs(der_here)>err_max: err_max=der_here return err_max, Eabs, Erel class Test_assembly(unittest.TestCase): ''' Test methods", "larger than 50 times step size' Er_max_star.append(er_max) Surf.gamma=gamma0.copy() ### Warning: this test fails:", "mm in range(M): for nn in range(N): Der_num[ipanel] = (np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, ZetaC0[ss][:,mm,nn])) -", "otherwise The function returns the absolute and relative error tensors, and the maximum", "dcoll_w,dvert_w=assembly.dvinddzeta(zetac,MS.Surfs_star[ss_in], IsBound=False,M_in_bound=MS.Surfs[ss_in].maps.M) Dercoll+=dcoll_b+dcoll_w Dervert_list.append(dvert_b+dvert_w) # allocate numerical Dercoll_num=np.zeros((3,3)) Dervert_list_num=[] for ii in range(n_surf):", "if the element of Pder_an is nonzero - absolute, otherwise The function returns", "sub=[] for jj in range(n_surf): sub.append(0.0*Dervert_list[ii][jj]) Derlist_num.append(sub) # store reference circulation and force", "print('------------------------------ Testing assembly.dfqsdzeta_vrel0') MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsdzeta_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss]", "ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] _,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs) # relative error at", "ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.zeta[ind_3d]+=step Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss)", "derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] dvind=(Surf_out.u_ind_coll_norm-Vind0[ss_out])/step Derlist_num[ss_out][ss_in][:,kk]=dvind.reshape(-1,order='C') ### check error for ss_out", "= 0.0*Der_an # Loop through the different grid modifications (three directions per vertex", "induced velocity V=np.zeros((3,)) for ss in range(n_surf): Surf_in=MS.Surfs[ss] Surf_star_in=MS.Surfs_star[ss] V+=Surf_in.get_induced_velocity(zetac) V+=Surf_star_in.get_induced_velocity(zetac) return V", "= np.zeros(Surf.maps.K) Der_an = np.dot(Dervert_list[ss], perturb_vector) Der_num = np.zeros(Surf.maps.K) ipanel = 0 skew_omega", "gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Derlist_num[ss_out][ss_in][:,kk]=df.reshape(-1,order='C') ### check error for ss_out in range(n_surf): for ss_in in", "for pp in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gamma0[ss_in].copy() Surf_in.gamma[mm,nn]+=step # recalculate induced velocity everywhere", "%(step,ermax)) assert ermax<50*step, 'Test failed!' # fig = plt.figure('Spy Der',figsize=(10,4)) # ax1 =", "---> Max error: %.2e'%(step,er_max) ) assert er_max<5e1*step, 'Error larger than 50 times step", "V+=Surf_in.get_induced_velocity(zetac) V+=Surf_star_in.get_induced_velocity(zetac) return V print('----------------------------------- Testing assembly.dvinddzeta') MS=self.MS n_surf=MS.n_surf zetac=.5*(MS.Surfs[0].zeta[:,1,2]+MS.Surfs[0].zeta[:,1,3]) Dercoll=np.zeros((3,3)) Dervert_list=[] for", "in range(K): mm,nn=np.unravel_index( pp, (M,N) ) Surf.gamma_dot=Gamma_dot0.copy() Surf.gamma_dot[mm,nn]+=step Surf.get_joukovski_unsteady() dF=(Surf.funst-F0)/step Dnum[:,pp]=dF.reshape(-1) # restore", "Test_assembly(unittest.TestCase): ''' Test methods into assembly module ''' def setUp(self): # select test", "assembly.test_nc_domegazetadzeta') MS=self.MS n_surf=MS.n_surf # analytical Dervert_list = assembly.nc_domegazetadzeta(MS.Surfs,MS.Surfs_star) # allocate numerical # Derlist_num=[]", "ss in range(n_surf): Surf_in=MS.Surfs[ss] Surf_star_in=MS.Surfs_star[ss] V+=Surf_in.get_induced_velocity(zetac) V+=Surf_star_in.get_induced_velocity(zetac) return V print('----------------------------------- Testing assembly.dvinddzeta') MS=self.MS", "abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<50*step and ermax_rel<50*step, embed()#'Test", "Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] N=Surf.maps.N K_star=Surf_star.maps.K C=C_list[ss] Cstar=Cstar_list[ss] # add noise to circulations gamma=Surf.gamma+np.random.rand( *Surf.gamma.shape", "# allocate numerical Derlist_num=[] for ii in range(n_surf): sub=[] for jj in range(n_surf):", "Gamma0.append(MS.Surfs[ss].gamma.copy()) Gammaw0.append(MS.Surfs_star[ss].gamma.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-5,] step=Steps[0] ###### bound for", "panel circulation. Warning: test assumes the derivative of the unsteady force only depends", "Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step ### prepare output surfaces # - ensure normals are unchanged #", "plt.show() def test_nc_domegazetadzeta(self): \"\"\" Variation at colocation points due to geometrical variations at", "sub.append(0.0*Dervert_list[ii][jj]) # Derlist_num.append(sub) # Store the initial values of the variabes Zeta0=[] Zeta0_star=[]", "too high!' %ErRel # allocate numerical Derlist_num=[] for ii in range(n_surf): sub=[] for", "to remove previous movements Surf.zeta=zeta0.copy() # Define DoFs where modifications will take place", "tests are not affected. ''' print('------------------------------ Testing assembly.dfqsdzeta_vrel0') MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsdzeta_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,]", "ax1 = fig.add_subplot(121) # ax1.spy(Der,precision=step) # ax2 = fig.add_subplot(122) # ax2.spy(DerNum,precision=step) # plt.show()", "FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0] ### vertices for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] Surf_star_in=MS.Surfs_star[ss_in] M_in,N_in=Surf_in.maps.M,Surf_in.maps.N", "#Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() for step in Steps: Der_num=0.0*Der_an for kk", "Steps[istep] for ss in range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N perturb_vector = np.zeros(3*Surf.maps.Kzeta) # PERTURBATION", "recalculate MS.get_normal_ind_velocities_at_collocation_points() # restore Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zetac=ZetaC0[ss_in].copy('F') Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate derivatives for ss_out in", "def test_dfqsduinput(self): ''' Step change in input velocity is allocated to both u_ext", "steps to run Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): # Select the surface with", "step size' Er_max.append(er_max) def test_dfqsdvind_gamma(self): print('------------------------------ Testing assembly.dfqsdvind_gamma') MS=self.MS n_surf=MS.n_surf # analytical Der_list,Der_star_list=assembly.dfqsdvind_gamma(MS.Surfs,MS.Surfs_star)", "gvec=np.dot(C,gamma.reshape(-1))+np.dot(Cstar,gamma_star.reshape(-1)) gvec_ref=np.concatenate((gamma[-1,:],gamma_star[:-1,:].reshape(-1))) assert np.max(np.abs(gvec-gvec_ref))<1e-15,\\ 'Prop. from trailing edge not correct' if __name__=='__main__': unittest.main()", "estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Der_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ######", "bound. vertices and collocation Surf.zeta=Zeta0[ss].copy() Surf.zeta[cc,mm,nn] += perturb_vector[kk] # perturb wake TE if", "import sharpy.linear.src.surface as surface import sharpy.linear.src.libuvlm as libuvlm import sharpy.utils.algebra as algebra np.set_printoptions(linewidth=200,precision=3)", "and normal induced velocities MS.get_normal_ind_velocities_at_collocation_points() Zeta0=[] Zeta0_star=[] Vind0=[] N0=[] ZetaC0=[] for ss in", "assert er_max<5e1*step, 'Error larger than 50 times step size' Er_max.append(er_max) def test_dfqsduinput(self): '''", "range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] N=Surf.maps.N K_star=Surf_star.maps.K C=C_list[ss] Cstar=Cstar_list[ss] # add noise to circulations gamma=Surf.gamma+np.random.rand(", "in range(len(Steps)): step=Steps[ss] for jj in range(3*Surf.maps.Kzeta): # perturb cc_pert=Surf.maps.ind_3d_vert_vect[0][jj] mm_pert=Surf.maps.ind_3d_vert_vect[1][jj] nn_pert=Surf.maps.ind_3d_vert_vect[2][jj] zeta_pert=zeta0.copy()", "in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) N0.append(MS.Surfs[ss].normals.copy()) # Computation Steps=[1e-2, 1e-4, 1e-6] nsteps =", "if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step ### prepare output surfaces # - ensure normals are", "of the unsteady force only depends on Gamma_dot, which is true only for", "sharpy.utils.h5utils as h5utils import sharpy.linear.src.assembly as assembly import sharpy.linear.src.multisurfaces as multisurfaces import sharpy.linear.src.surface", "M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() u_ext0=Surf.u_ext.copy() zeta_dot0=Surf.zeta_dot.copy() for step in Steps: Der_num=0.0*Der_an for kk", "size # for ii in range(1,len(Steps)): # assert Er_max[ii]<Er_max[ii-1],\\ # 'Error not decreasing", "# plt.close() def test_dfunstdgamma_dot(self): ''' Test derivative of unsteady aerodynamic force with respect", "# restore zeta: (include only induced velocity contrib.) Surf_in.zeta=Zeta0[ss_in].copy() Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate derivatives", "derivative of the case Der_an_list=assembly.dfqsdzeta_omega(MS.Surfs,MS.Surfs_star) # Initialize Er_max=[] # Define steps to run", "for ss in range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] N=Surf.maps.N K_star=Surf_star.maps.K C=C_list[ss] Cstar=Cstar_list[ss] # add noise", "%(step,ermax,ermax_rel)) assert ermax<5e2*step and ermax_rel<50*step, 'Test failed!' def test_wake_prop(self): MS=self.MS C_list,Cstar_list=assembly.wake_prop(MS.Surfs,MS.Surfs_star) n_surf=len(MS.Surfs) for", "in range(n_surf): sub.append(0.0*Der_list[ii][jj]) sub_star.append(0.0*Der_star_list[ii][jj]) Der_list_num.append(sub) Der_star_list_num.append(sub_star) # store reference circulation and force Gamma0=[]", "tested with a case that actually rotates \"\"\" print('----------------------------- Testing assembly.test_nc_domegazetadzeta') MS=self.MS n_surf=MS.n_surf", "Pder_an is nonzero - absolute, otherwise The function returns the absolute and relative", ") assert error[istep]<5e1*step, 'Error larger than 50 times the step size' if istep", "# ax2.set_title('error rel %d' %(ss_in)) # #plt.show() # plt.close() def test_dfqsdvind_zeta(self): ''' For", "### Bound for pp in range(K): mm=Surf.maps.ind_2d_pan_scal[0][pp] nn=Surf.maps.ind_2d_pan_scal[1][pp] Surf.gamma=gamma0.copy() Surf.gamma[mm,nn]+=step Surf.get_joukovski_qs(gammaw_TE=Surf_star.gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,pp]=df.reshape(-1,order='C')", "print('Wake%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert ermax<50*step, 'Test failed!' # fig = plt.figure('Spy Der',figsize=(10,4)) #", "if mm==M: Surf_star.zeta=Zeta0_star[ss].copy() Surf_star.zeta[cc,0,nn] += perturb_vector[kk] Surf.generate_collocations() # COMPUTE THE DERIVATIVES Der_an =", "Inf if the analytical derivative is zero. These elements are filtered out during", "velocities computed already Surf.get_input_velocities_at_collocation_points() Der=assembly.uc_dncdzeta(Surf) ### numerical derivative #Surf.get_normal_input_velocities_at_collocation_points() u_tot0=Surf.u_ind_coll+Surf.u_input_coll u_norm0=Surf.project_coll_to_normal(u_tot0) u_norm0_vec=u_norm0.reshape(-1,order='C') zeta0=Surf.zeta", "range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Derlist_num[ss_out][ss_in][:,kk]=df.reshape(-1,order='C') ### check error for ss_out in", "np.zeros(3*Surf.maps.Kzeta) # PERTURBATION OF THE SURFACE for kk in range(3*Surf.maps.Kzeta): # generate a", "= os.path.dirname(os.path.abspath(__file__)) + '/h5input/goland_mod_Nsurf01_M003_N004_a040.aero_state.h5' haero = h5utils.readh5(fname) tsdata = haero.ts00000 # # Rotating", "%(ss_in,ss_out)) # ax2=fig.add_subplot(132) # ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error rel %d to %d' %(ss_in,ss_out)) #", "ss in range(haero.data.aero.n_surf): # tsdata.omega.append(haero.data.structure.timestep_info[-1].for_vel[3:6]) MS=multisurfaces.MultiAeroGridSurfaces(tsdata) MS.get_normal_ind_velocities_at_collocation_points() MS.verify_non_penetration() MS.verify_aic_coll() MS.get_joukovski_qs() MS.verify_joukovski_qs() self.MS=MS def", "# - ensure normals are unchanged # - del ind. vel on output", "Fqs0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,]", "Der_an=Dervert_list[ss_out][ss_in].copy() if ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] ermax, ErAbs, ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs)", "Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) N0.append(MS.Surfs[ss].normals.copy()) # Computation Steps=[1e-2, 1e-4, 1e-6] nsteps = len(Steps) error = np.zeros((nsteps,))", "Surf.gamma=gamma0.copy() Surf.gamma[mm,nn]+=step Surf.get_joukovski_qs(gammaw_TE=Surf_star.gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss) print('FD step:", "accuracy is obtained even with large steps. In fact, reducing # the step", "1e-4, 1e-6] nsteps = len(Steps) error = np.zeros((nsteps,)) for istep in range(nsteps): step", "fig.add_subplot(121) # ax1.spy(Der,precision=step) # ax2 = fig.add_subplot(122) # ax2.spy(DerNum,precision=step) # plt.show() def test_nc_domegazetadzeta(self):", "'./basic_rotating_wing/basic_wing.data.h5' # haero = h5utils.readh5(fname) # tsdata = haero.data.aero.timestep_info[-1] # tsdata.omega = []", "in range(n_surf): Surf_out=MS.Surfs[ss_out] Surf_out.normals=N0[ss_out].copy() del Surf_out.u_ind_coll_norm try: del Surf_out.u_ind_coll except AttributeError: pass ###", "assembly.dfqsdgamma_vrel0') MS=self.MS n_surf=MS.n_surf Der_list,Der_star_list=assembly.dfqsdgamma_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Er_max_star=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss] Der_star_an=Der_star_list[ss]", "for ss in range(n_surf): Der_an=Der_list[ss] Der_star_an=Der_star_list[ss] Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K fqs0=Surf.fqs.copy() gamma0=Surf.gamma.copy() for", "Note: the get_joukovski_qs method re-computes the induced velocity at the panel segments. A", "in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Derlist_num[ss_out][ss_in][:,kk]=df.reshape(-1,order='C') ### check error for ss_out", "sub_star=[] for jj in range(n_surf): sub.append(0.0*Der_list[ii][jj]) sub_star.append(0.0*Der_star_list[ii][jj]) Der_list_num.append(sub) Der_star_list_num.append(sub_star) # store reference circulation", "each output surface, where induced velocity is computed, all other surfaces are looped.", "select test case fname = os.path.dirname(os.path.abspath(__file__)) + '/h5input/goland_mod_Nsurf01_M003_N004_a040.aero_state.h5' haero = h5utils.readh5(fname) tsdata =", "iizero=np.abs(Pder_an)<1e-15 for der_here in Pder_num[iizero]: if np.abs(der_here)>err_max: err_max=der_here return err_max, Eabs, Erel class", "range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N perturb_vector = np.zeros(3*Surf.maps.Kzeta) # PERTURBATION OF THE SURFACE for", "error is checked. ''' Eabs=np.abs(Pder_num-Pder_an) nnzvec=Pder_an!=0 Erel=np.zeros(Pder_an.shape) Erel[nnzvec]=np.abs(Eabs[nnzvec]/Pder_an[nnzvec]) # Relative error check: remove", "respect to changes in panel circulation. Warning: test assumes the derivative of the", "for ss_out in range(n_surf): for ss_in in range(n_surf): Der_an=Der_list[ss_out][ss_in] Der_num=Der_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Bound%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out))", "other surfaces are looped. For wakes, only TE is displaced. ''' print('------------------------------- Testing", "as h5utils import sharpy.linear.src.assembly as assembly import sharpy.linear.src.multisurfaces as multisurfaces import sharpy.linear.src.surface as", "del Surf_out.u_ind_coll_norm try: del Surf_out.u_ind_coll except AttributeError: pass ### recalculate MS.get_normal_ind_velocities_at_collocation_points() # restore", "MS.get_ind_velocities_at_segments(overwrite=True) # restore circulation: (include only induced velocity contrib.) Surf_in.gamma=Gamma0[ss_in].copy() # estimate derivatives", ") # perturb bound. vertices and collocation Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step Surf_in.generate_collocations() # perturb wake", "point) for kk in range(3*Kzeta): # Initialize to remove previous movements Surf.zeta=zeta0.copy() #", "MS=self.MS n_surf=MS.n_surf # analytical Dervert_list = assembly.nc_domegazetadzeta(MS.Surfs,MS.Surfs_star) # allocate numerical # Derlist_num=[] #", "force with respect to changes in panel circulation. Warning: test assumes the derivative", "#if ercoll>10*step: embed() assert ercoll<10*step, 'Error at collocation point' ### check error at", "%d to %d' %(ss_out,ss_out)) # #plt.show() # plt.close() def test_uc_dncdzeta(self,PlotFlag=False): print('---------------------------------- Testing assembly.uc_dncdzeta')", "Gamma0=[] Gammaw0=[] Fqs0=[] for ss in range(n_surf): Gamma0.append(MS.Surfs[ss].gamma.copy()) Gammaw0.append(MS.Surfs_star[ss].gamma.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate vis", "### prepare output surfaces # - ensure normals are unchanged # - del", "iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound and wake%.2d\\tFDstep\\tErrAbs\\tErrRel'%ss_in) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ercoll<10*step, 'Error at vertices' #", "Testing assembly.dfqsdvind_gamma') MS=self.MS n_surf=MS.n_surf # analytical Der_list,Der_star_list=assembly.dfqsdvind_gamma(MS.Surfs,MS.Surfs_star) # allocate numerical Der_list_num=[] Der_star_list_num=[] for", "range(n_surf): Surf_in=MS.Surfs_star[ss_in] # perturb for pp in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gammaw0[ss_in].copy() Surf_in.gamma[mm,nn]+=step #", "fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Der_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ###### wake for ss_in in range(n_surf): Surf_in=MS.Surfs_star[ss_in] #", "that other tests are not affected. ''' print('------------------------------ Testing assembly.dfqsdzeta_vrel0') MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsdzeta_vrel0(MS.Surfs,MS.Surfs_star)", "0.0*Der_an # Loop through the different grid modifications (three directions per vertex point)", "ss in range(n_surf): Surf=MS.Surfs[ss] Kzeta,K=Surf.maps.Kzeta,Surf.maps.K M,N=Surf.maps.M,Surf.maps.N Dnum=np.zeros((3*Kzeta,K)) # get refernce values Surf.get_joukovski_unsteady() Gamma_dot0=Surf.gamma_dot.copy()", "take place and modify the grid ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.zeta[ind_3d]+=step # Recompute get_ind_velocities_at_segments", "derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Der_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ###### wake", "# #plt.show() # plt.close() def test_uc_dncdzeta(self,PlotFlag=False): print('---------------------------------- Testing assembly.uc_dncdzeta') MS=self.MS n_surf=MS.n_surf MS.get_ind_velocities_at_collocation_points() MS.get_normal_ind_velocities_at_collocation_points()", "Er_max_star[ii]<Er_max_star[ii-1],\\ # 'Error not decreasing as FD step size is reduced' def test_dfqsdzeta_vrel0(self):", "print('------------------------------- Testing assembly.dfqsdvind_zeta') MS=self.MS n_surf=MS.n_surf # analytical Dercoll_list,Dervert_list=assembly.dfqsdvind_zeta(MS.Surfs,MS.Surfs_star) # allocate numerical Derlist_num=[] for", "modify the grid ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.zeta[ind_3d]+=step # Recompute get_ind_velocities_at_segments and recover the", "vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-5,] step=Steps[0] ###### bound for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] #", "assert error[istep]<5e1*step, 'Error larger than 50 times the step size' if istep >", "A copy of Surf is required to ensure that other tests are not", "test_dvinddzeta(self): ''' For each output surface, there induced velocity is computed, all other", "# restore Surf.gamma_dot=Gamma_dot0.copy() ### verify ermax, ErAbs, ErRel=max_error_tensor(Ders_an[ss],Dnum) # max absolute error ermax=np.max(ErAbs)", "_,ErAbs,ErRel=max_error_tensor(Der_all,Der_all_exp) # max absolute error ermax=np.max(ErAbs) # relative error at max abs error", "%.2d:' %ss) Surf=MS.Surfs[ss] # generate non-zero field of external force Surf.u_ext[0,:,:]=Surf.u_ext[0,:,:]-20.0 Surf.u_ext[1,:,:]=Surf.u_ext[1,:,:]+60.0 Surf.u_ext[2,:,:]=Surf.u_ext[2,:,:]+30.0", "%.2e ---> Max error: %.2e'%(step,error[istep]) ) assert error[istep]<5e1*step, 'Error larger than 50 times", "used here! df=(Surf_out.fqs-fqs0)/step Der_star_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ### check error Er_max=[] Er_max_star=[] for ss_out in range(n_surf):", "the different grid modifications (three directions per vertex point) for kk in range(3*Kzeta):", "MS.get_normal_ind_velocities_at_collocation_points() for ss in range(n_surf): print('Surface %.2d:' %ss) Surf=MS.Surfs[ss] # generate non-zero field", "in range(n_surf): print('Surface %.2d:' %ss) Surf=MS.Surfs[ss] # generate non-zero field of external force", "'Error larger than 50 times step size' Er_max.append(er_max) ### Wake Surf.gamma=gamma0.copy() gammaw_TE0=Surf_star.gamma[0,:].copy() M_star,N_star=Surf_star.maps.M,Surf_star.maps.N", "# ### force equation (qs term) # T.test_dvinddzeta() # T.test_dfqsdvind_zeta() # run setUp", "surfaces are looped. For wakes, only TE is displaced. ''' print('----------------------------- Testing assembly.test_nc_dqcdzeta')", "# allocate numerical # Derlist_num=[] # for ii in range(n_surf): # sub=[] #", "error check: remove NaN and inf... iifinite=np.isfinite(Erel) err_max=0.0 for err_here in Erel[iifinite]: if", "MS=self.MS n_surf=MS.n_surf # Compute the anaytical derivative of the case Der_an_list=assembly.dfqsdzeta_omega(MS.Surfs,MS.Surfs_star) # Initialize", "test_dfqsdvind_zeta(self): ''' For each output surface, there induced velocity is computed, all other", "Der_an = np.zeros(Surf.maps.K) Der_an = np.dot(Dervert_list[ss], perturb_vector) Der_num = np.zeros(Surf.maps.K) ipanel = 0", "# plt.show() def test_nc_domegazetadzeta(self): \"\"\" Variation at colocation points due to geometrical variations", "error[istep] = np.maximum(error[istep], np.absolute(Der_num-Der_an).max()) print('FD step: %.2e ---> Max error: %.2e'%(step,error[istep]) ) assert", "MS=multisurfaces.MultiAeroGridSurfaces(tsdata) MS.get_normal_ind_velocities_at_collocation_points() MS.verify_non_penetration() MS.verify_aic_coll() MS.get_joukovski_qs() MS.verify_joukovski_qs() self.MS=MS def test_nc_dqcdzeta(self): ''' For each output", "except AttributeError: pass ### recalculate MS.get_normal_ind_velocities_at_collocation_points() # restore Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zetac=ZetaC0[ss_in].copy('F') Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate", "range(haero.data.aero.n_surf): # tsdata.omega.append(haero.data.structure.timestep_info[-1].for_vel[3:6]) MS=multisurfaces.MultiAeroGridSurfaces(tsdata) MS.get_normal_ind_velocities_at_collocation_points() MS.verify_non_penetration() MS.verify_aic_coll() MS.get_joukovski_qs() MS.verify_joukovski_qs() self.MS=MS def test_nc_dqcdzeta(self): '''", "be tested with a case that actually rotates \"\"\" print('----------------------------- Testing assembly.test_nc_domegazetadzeta') MS=self.MS", "rel %d' %(ss_in)) # #plt.show() # plt.close() def test_dfqsdvind_zeta(self): ''' For each output", "mm_pert=Surf.maps.ind_3d_vert_vect[1][jj] nn_pert=Surf.maps.ind_3d_vert_vect[2][jj] zeta_pert=zeta0.copy() zeta_pert[cc_pert,mm_pert,nn_pert]+=step # calculate new normal velocity Surf_pert=surface.AeroGridSurface(Surf.maps,zeta=zeta_pert, u_ext=Surf.u_ext,gamma=Surf.gamma) u_norm=Surf_pert.project_coll_to_normal(u_tot0) u_norm_vec=u_norm.reshape(-1,order='C')", "test fails: the dependency on gamma is linear, hence # great accuracy is", "change in input velocity is allocated to both u_ext and zeta_dot ''' print('----------------------------------", "Steps=[1e-6,] step=Steps[0] ### vertices for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] Surf_star_in=MS.Surfs_star[ss_in] M_in,N_in=Surf_in.maps.M,Surf_in.maps.N # perturb", "assert error decreases with step size # for ii in range(1,len(Steps)): # assert", "velocity contrib.) Surf_in.gamma=Gamma0[ss_in].copy() # estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs(", "Erel=np.zeros(Pder_an.shape) Erel[nnzvec]=np.abs(Eabs[nnzvec]/Pder_an[nnzvec]) # Relative error check: remove NaN and inf... iifinite=np.isfinite(Erel) err_max=0.0 for", "Surf_star=MS.Surfs_star[ss] N=Surf.maps.N K_star=Surf_star.maps.K C=C_list[ss] Cstar=Cstar_list[ss] # add noise to circulations gamma=Surf.gamma+np.random.rand( *Surf.gamma.shape )", "K_star=Surf_star.maps.K for nn in range(N): pp=np.ravel_multi_index( (0,nn), (M_star,N_star)) gammaw_TE=gammaw_TE0.copy() gammaw_TE[nn]+=step Surf.get_joukovski_qs(gammaw_TE=gammaw_TE) df=(Surf.fqs-fqs0)/step Der_star_num[:,pp]=df.reshape(-1,order='C')", "assert ermax<5e2*step and ermax_rel<50*step, 'Test failed!' def test_wake_prop(self): MS=self.MS C_list,Cstar_list=assembly.wake_prop(MS.Surfs,MS.Surfs_star) n_surf=len(MS.Surfs) for ss", "# analytical Dervert_list = assembly.nc_domegazetadzeta(MS.Surfs,MS.Surfs_star) # allocate numerical # Derlist_num=[] # for ii", "perturb bound. vertices and collocation Surf.zeta=Zeta0[ss].copy() Surf.zeta[cc,mm,nn] += perturb_vector[kk] # perturb wake TE", "assert error[istep]<=error[istep-1],\\ 'Error not decreasing as FD step size is reduced' print('------------------------------------------------------------ OK')", "assert error decreases with step size for ss in range(1,len(Steps)): assert Er_max[ss]<Er_max[ss-1],\\ 'Error", "#plt.show() # plt.close() def test_dfqsdvind_zeta(self): ''' For each output surface, there induced velocity", "is reduced' def test_dfqsdzeta_vrel0(self): ''' Note: the get_joukovski_qs method re-computes the induced velocity", "# perturb bound Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step # perturb wake TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step", "this test # T.setUp() # T.test_dfqsdvind_gamma() # T.test_dfqsduinput() # T.test_dfqsdzeta_vrel0() # T.test_dfqsdgamma_vrel0() #", "output to ensure they are re-computed for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] Surf_out.normals=N0[ss_out].copy() del", "than 50 times step size' Er_max.append(er_max) ### Wake Surf.gamma=gamma0.copy() gammaw_TE0=Surf_star.gamma[0,:].copy() M_star,N_star=Surf_star.maps.M,Surf_star.maps.N K_star=Surf_star.maps.K for", "velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore zeta: (include only induced velocity contrib.) Surf_in.zeta=Zeta0[ss_in].copy() Surf_star_in.zeta=Zeta0_star[ss_in].copy()", "Testing assembly.dfqsdgamma_vrel0') MS=self.MS n_surf=MS.n_surf Der_list,Der_star_list=assembly.dfqsdgamma_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Er_max_star=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss]", "tsdata = haero.ts00000 # # Rotating cases # fname = './basic_rotating_wing/basic_wing.data.h5' # haero", "in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gamma0[ss_in].copy() Surf_in.gamma[mm,nn]+=step # recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) #", "perturb_vector[kk] Surf.generate_collocations() # COMPUTE THE DERIVATIVES Der_an = np.zeros(Surf.maps.K) Der_an = np.dot(Dervert_list[ss], perturb_vector)", "may contain NaN or Inf if the analytical derivative is zero. These elements", "Define steps to run Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): # Select the surface", "size' Er_max.append(er_max) def test_dfqsdvind_gamma(self): print('------------------------------ Testing assembly.dfqsdvind_gamma') MS=self.MS n_surf=MS.n_surf # analytical Der_list,Der_star_list=assembly.dfqsdvind_gamma(MS.Surfs,MS.Surfs_star) #", "for jj in range(n_surf): # sub.append(0.0*Dervert_list[ii][jj]) # Derlist_num.append(sub) # Store the initial values", "Der_star_an=Der_star_list[ss] Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K fqs0=Surf.fqs.copy() gamma0=Surf.gamma.copy() for step in Steps: Der_num=0.0*Der_an Der_star_num=0.0*Der_star_an", "at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] assert ermax_rel<1e-16,\\ 'option Merge=True not working", "Surf.gamma_dot[mm,nn]+=step Surf.get_joukovski_unsteady() dF=(Surf.funst-F0)/step Dnum[:,pp]=dF.reshape(-1) # restore Surf.gamma_dot=Gamma_dot0.copy() ### verify ermax, ErAbs, ErRel=max_error_tensor(Ders_an[ss],Dnum) #", "skew_omega = algebra.skew(Surf.omega) for mm in range(M): for nn in range(N): Der_num[ipanel] =", "For wakes, only TE is displaced. ''' print('----------------------------- Testing assembly.test_nc_dqcdzeta') MS=self.MS n_surf=MS.n_surf #", "ii in range(1,len(Steps)): # assert Er_max[ii]<Er_max[ii-1],\\ # 'Error not decreasing as FD step", "# Define DoFs where modifications will take place and modify the grid ind_3d=np.unravel_index(kk,", "induced velocity is computed, all other surfaces are looped. For wakes, only TE", "in range(n_surf): for ss_in in range(n_surf): Der_an=Dervert_list[ss_out][ss_in].copy() if ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] _,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) #", "ermax_rel<50*step, 'Test failed!' # fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(131) # ax1.spy(ErAbs,precision=1e2*step)", "<NAME>, 29 May 2018 ''' import os import copy import warnings import unittest", "error analytical derivatives Pder_an. The error is: - relative, if the element of", "mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step # recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore zeta: (include", "between the 90% and the 110% of the step perturb_vector[kk] += step*(0.2*np.random.rand()+0.9) cc,mm,nn=np.unravel_index(", "Surf_star_in.zeta[cc,0,nn]+=step # recalculate induced velocity everywhere Vnum=comp_vind(zetac,MS) dv=(Vnum-V0)/step Dervert_list_num[ss_in][:,kk]=dv.reshape(-1,order='C') # restore Surf_in.zeta=Zeta0[ss_in].copy() if", "Dnum[:,pp]=dF.reshape(-1) # restore Surf.gamma_dot=Gamma_dot0.copy() ### verify ermax, ErAbs, ErRel=max_error_tensor(Ders_an[ss],Dnum) # max absolute error", "calculate new normal velocity Surf_pert=surface.AeroGridSurface(Surf.maps,zeta=zeta_pert, u_ext=Surf.u_ext,gamma=Surf.gamma) u_norm=Surf_pert.project_coll_to_normal(u_tot0) u_norm_vec=u_norm.reshape(-1,order='C') # FD derivative DerNum[:,jj]=(u_norm_vec-u_norm0_vec)/step er_max=np.max(np.abs(Der-DerNum))", "Surf.zeta=zeta0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.zeta[ind_3d]+=step Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:'", "range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Vind0.append(MS.Surfs[ss].u_ind_coll_norm.copy()) N0.append(MS.Surfs[ss].normals.copy()) # calculate vis FDs Steps=[1e-6,] step=Steps[0] ###", "only for steady-state linearisation points ''' MS=self.MS Ders_an=assembly.dfunstdgamma_dot(MS.Surfs) step=1e-6 Ders_num=[] n_surf=len(MS.Surfs) for ss", "cc,mm,nn=np.unravel_index( kk, (3,M+1,N+1) ) # perturb bound. vertices and collocation Surf.zeta=Zeta0[ss].copy() Surf.zeta[cc,mm,nn] +=", "for steady-state linearisation points ''' MS=self.MS Ders_an=assembly.dfunstdgamma_dot(MS.Surfs) step=1e-6 Ders_num=[] n_surf=len(MS.Surfs) for ss in", "Der_an=Der_an_list[ss] # Copy to avoid modifying the original for other tests Surf=copy.deepcopy(MS.Surfs[ss]) #", "abs %d to %d' %(ss_in,ss_out)) # ax2=fig.add_subplot(132) # ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error rel %d", "and modify the grid ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.zeta[ind_3d]+=step # Recompute get_ind_velocities_at_segments and recover", "size' Er_max_star.append(er_max) Surf.gamma=gamma0.copy() ### Warning: this test fails: the dependency on gamma is", "ermax=np.max(ErAbs) # relative error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d\\t\\t\\tFDstep\\tErrAbs\\tErrRel'%(ss,)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e'", "unittest.main() # T=Test_assembly() # T.setUp() # ### force equation (qs term) # T.test_dvinddzeta()", "pp in range(K): mm=Surf.maps.ind_2d_pan_scal[0][pp] nn=Surf.maps.ind_2d_pan_scal[1][pp] Surf.gamma=gamma0.copy() Surf.gamma[mm,nn]+=step Surf.get_joukovski_qs(gammaw_TE=Surf_star.gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d", "step size is reduced' def test_dfqsdzeta_vrel0(self): ''' Note: the get_joukovski_qs method re-computes the", "in range(n_surf): # sub.append(0.0*Dervert_list[ii][jj]) # Derlist_num.append(sub) # Store the initial values of the", "assembly.uc_dncdzeta') MS=self.MS n_surf=MS.n_surf MS.get_ind_velocities_at_collocation_points() MS.get_normal_ind_velocities_at_collocation_points() for ss in range(n_surf): print('Surface %.2d:' %ss) Surf=MS.Surfs[ss]", "MS.verify_joukovski_qs() self.MS=MS def test_nc_dqcdzeta(self): ''' For each output surface, where induced velocity is", "Pder_an. The error is: - relative, if the element of Pder_an is nonzero", "print('------------------------------------------------------------ OK') def test_dfqsdgamma_vrel0(self): print('----------------------------- Testing assembly.dfqsdgamma_vrel0') MS=self.MS n_surf=MS.n_surf Der_list,Der_star_list=assembly.dfqsdgamma_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Er_max_star=[] Steps=[1e-2,1e-4,1e-6,]", "all other surfaces are looped. For wakes, only TE is displaced. ''' print('-----------------------------", "where modifications will take place and modify the grid ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.zeta[ind_3d]+=step", "in range(n_surf): Surf_in=MS.Surfs[ss_in] Surf_star_in=MS.Surfs_star[ss_in] M_in,N_in=Surf_in.maps.M,Surf_in.maps.N # perturb for kk in range(3*Surf_in.maps.Kzeta): cc,mm,nn=np.unravel_index( kk,", "#Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0] ### vertices for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] Surf_star_in=MS.Surfs_star[ss_in] M_in,N_in=Surf_in.maps.M,Surf_in.maps.N #", "# restore Surf_in.zeta=Zeta0[ss_in].copy() if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() ### check error at colloc Dercoll_num=np.zeros((3,3)) for", "import copy import warnings import unittest import itertools import numpy as np import", "C=C_list[ss] Cstar=Cstar_list[ss] # add noise to circulations gamma=Surf.gamma+np.random.rand( *Surf.gamma.shape ) gamma_star=Surf_star.gamma+np.random.rand( *Surf_star.gamma.shape )", "decreasing as FD step size is reduced' # assert Er_max_star[ii]<Er_max_star[ii-1],\\ # 'Error not", "equation (qs term) # T.test_dvinddzeta() # T.test_dfqsdvind_zeta() # run setUp after this test", "ensure they are re-computed for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] Surf_out.normals=N0[ss_out].copy() del Surf_out.u_ind_coll_norm try:", "Surf=copy.deepcopy(MS.Surfs[ss]) #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() for step in Steps: Der_num=0.0*Der_an for", "that other tests are not affected. Needs to be tested with a case", "restore Surf.gamma_dot=Gamma_dot0.copy() ### verify ermax, ErAbs, ErRel=max_error_tensor(Ders_an[ss],Dnum) # max absolute error ermax=np.max(ErAbs) #", "THE DERIVATIVES Der_an = np.zeros(Surf.maps.K) Der_an = np.dot(Dervert_list[ss], perturb_vector) Der_num = np.zeros(Surf.maps.K) ipanel", "for jj in range(n_surf): sub.append(0.0*Der_list[ii][jj]) sub_star.append(0.0*Der_star_list[ii][jj]) Der_list_num.append(sub) Der_star_list_num.append(sub_star) # store reference circulation and", "larger than 50 times the step size' if istep > 0: assert error[istep]<=error[istep-1],\\", "analytica derivatives Der_an=Der_an_list[ss] # Copy to avoid modifying the original for other tests", "range(1,len(Steps)): assert Er_max[ss]<Er_max[ss-1],\\ 'Error not decreasing as FD step size is reduced' print('------------------------------------------------------------", "everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore circulation: (include only induced velocity contrib.) Surf_in.gamma=Gamma0[ss_in].copy() # estimate", "# store reference circulation and force Gamma0=[] Gammaw0=[] Fqs0=[] for ss in range(n_surf):", "istep in range(nsteps): step = Steps[istep] for ss in range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N", "in range(n_surf): Dervert_list_num.append(0.0*Dervert_list[ii]) # store reference grid Zeta0=[] Zeta0_star=[] for ss in range(n_surf):", "larger than 50 times step size' Er_max.append(er_max) def test_dfqsdvind_gamma(self): print('------------------------------ Testing assembly.dfqsdvind_gamma') MS=self.MS", "circulation: (include only induced velocity contrib.) Surf_in.gamma=Gamma0[ss_in].copy() # estimate derivatives for ss_out in", "# relative error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d\\t\\t\\tFDstep\\tErrAbs\\tErrRel'%(ss,)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel))", "Surf.zeta[ind_3d]+=step Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss) print('FD step: %.2e", "for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] Surf_out.normals=N0[ss_out].copy() del Surf_out.u_ind_coll_norm try: del Surf_out.u_ind_coll except AttributeError:", "ss_in in range(n_surf): Der_an=Der_list[ss_out][ss_in] Der_num=Der_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Bound%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert ermax<50*step, 'Test", "looped. For wakes, only TE is displaced. ''' print('----------------------------- Testing assembly.test_nc_dqcdzeta') MS=self.MS n_surf=MS.n_surf", "ax3.spy(Dercoll_list[ss_out],precision=50*step) # ax3.set_title('Dcoll an. %d to %d' %(ss_out,ss_out)) # #plt.show() # plt.close() def", "ax1.set_title('error abs %d' %(ss_in)) # ax2=fig.add_subplot(122) # ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error rel %d' %(ss_in))", "T.test_dfqsdvind_gamma() # T.test_dfqsduinput() # T.test_dfqsdzeta_vrel0() # T.test_dfqsdgamma_vrel0() # ### state equation terms #", "Surf.get_joukovski_qs(gammaw_TE=gammaw_TE) df=(Surf.fqs-fqs0)/step Der_star_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_star_an-Der_star_num)) print('Surface %.2d - wake:' %ss) print('FD step: %.2e --->", "analytical Dervert_list = assembly.nc_domegazetadzeta(MS.Surfs,MS.Surfs_star) # allocate numerical # Derlist_num=[] # for ii in", "# perturb for pp in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gamma0[ss_in].copy() Surf_in.gamma[mm,nn]+=step # recalculate induced", "size' Er_max.append(er_max) ### Wake Surf.gamma=gamma0.copy() gammaw_TE0=Surf_star.gamma[0,:].copy() M_star,N_star=Surf_star.maps.M,Surf_star.maps.N K_star=Surf_star.maps.K for nn in range(N): pp=np.ravel_multi_index(", "external force Surf.u_ext[0,:,:]=Surf.u_ext[0,:,:]-20.0 Surf.u_ext[1,:,:]=Surf.u_ext[1,:,:]+60.0 Surf.u_ext[2,:,:]=Surf.u_ext[2,:,:]+30.0 Surf.u_ext=Surf.u_ext+np.random.rand(*Surf.u_ext.shape) ### analytical derivative # ind velocities computed", "ermax<50*step, 'Test failed!' Der_an=Der_star_list[ss_out][ss_in] Der_num=Der_star_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Wake%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert ermax<50*step, 'Test", "# Store the initial values of the variabes Zeta0=[] Zeta0_star=[] N0=[] ZetaC0=[] for", "unittest import itertools import numpy as np import scipy.linalg as scalg import sharpy.utils.h5utils", "in range(n_surf): sub=[] for jj in range(n_surf): sub.append(0.0*Dervert_list[ii][jj]) Derlist_num.append(sub) # store reference circulation", "Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step # recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore zeta: (include only", "df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss) print('FD step: %.2e ---> Max", "Surf is required to ensure that other tests are not affected. ''' print('------------------------------", "Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Derlist_num[ss_out][ss_in][:,kk]=df.reshape(-1,order='C') ### check error for ss_out in range(n_surf):", "add noise to circulations gamma=Surf.gamma+np.random.rand( *Surf.gamma.shape ) gamma_star=Surf_star.gamma+np.random.rand( *Surf_star.gamma.shape ) gvec=np.dot(C,gamma.reshape(-1))+np.dot(Cstar,gamma_star.reshape(-1)) gvec_ref=np.concatenate((gamma[-1,:],gamma_star[:-1,:].reshape(-1))) assert", "ss in range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N perturb_vector = np.zeros(3*Surf.maps.Kzeta) # PERTURBATION OF THE", "= algebra.skew(Surf.omega) for mm in range(M): for nn in range(N): Der_num[ipanel] = (np.dot(N0[ss][:,mm,nn],", "test_uc_dncdzeta(self,PlotFlag=False): print('---------------------------------- Testing assembly.uc_dncdzeta') MS=self.MS n_surf=MS.n_surf MS.get_ind_velocities_at_collocation_points() MS.get_normal_ind_velocities_at_collocation_points() for ss in range(n_surf): print('Surface", "copy import warnings import unittest import itertools import numpy as np import scipy.linalg", "force Gamma0=[] Gammaw0=[] Fqs0=[] for ss in range(n_surf): Gamma0.append(MS.Surfs[ss].gamma.copy()) Gammaw0.append(MS.Surfs_star[ss].gamma.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate", "wake for ss_in in range(n_surf): Surf_in=MS.Surfs_star[ss_in] # perturb for pp in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp]", "Dercoll_num=np.zeros((3,3)) for cc in range(3): zetac_pert=zetac.copy() zetac_pert[cc]+=step Vnum=comp_vind(zetac_pert,MS) Dercoll_num[:,cc]=(Vnum-V0)/step ercoll=np.max(np.abs(Dercoll-Dercoll_num)) print('Error coll.\\tFDstep\\tErrAbs') print('\\t\\t%.1e\\t%.1e'", "cc,mm,nn=np.unravel_index( kk, (3,M_in+1,N_in+1) ) # perturb bound. vertices and collocation Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step Surf_in.generate_collocations()", "even with large steps. In fact, reducing # the step quickly introduced round-off", "Vnum=comp_vind(zetac_pert,MS) Dercoll_num[:,cc]=(Vnum-V0)/step ercoll=np.max(np.abs(Dercoll-Dercoll_num)) print('Error coll.\\tFDstep\\tErrAbs') print('\\t\\t%.1e\\t%.1e' %(step,ercoll)) #if ercoll>10*step: embed() assert ercoll<10*step, 'Error", "Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0] ### loop input surfs", "print('Surface %.2d:' %ss) Surf=MS.Surfs[ss] # generate non-zero field of external force Surf.u_ext[0,:,:]=Surf.u_ext[0,:,:]-20.0 Surf.u_ext[1,:,:]=Surf.u_ext[1,:,:]+60.0", "for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) N0.append(MS.Surfs[ss].normals.copy()) # Computation Steps=[1e-2, 1e-4, 1e-6]", "re-computes the induced velocity at the panel segments. A copy of Surf is", "Steps=[1e-2, 1e-4, 1e-6] nsteps = len(Steps) error = np.zeros((nsteps,)) for istep in range(nsteps):", "option Der_all_exp=np.block(Dervert_list)+scalg.block_diag(*Dercoll_list) Der_all=np.block( assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star,Merge=True) ) _,ErAbs,ErRel=max_error_tensor(Der_all,Der_all_exp) # max absolute error ermax=np.max(ErAbs) # relative", "at vert for ss_in in range(n_surf): Der_an=Dervert_list[ss_in] Der_num=Dervert_list_num[ss_in] ermax,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max absolute error", "error decreases with step size # for ii in range(1,len(Steps)): # assert Er_max[ii]<Er_max[ii-1],\\", "Test methods into assembly module ''' def setUp(self): # select test case fname", "df=(Surf_out.fqs-fqs0)/step Der_star_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ### check error Er_max=[] Er_max_star=[] for ss_out in range(n_surf): for ss_in", "recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore zeta: (include only induced velocity contrib.)", "OK') if PlotFlag: pass # fig = plt.figure('Spy Der',figsize=(10,4)) # ax1 = fig.add_subplot(121)", "plt.show() def test_dvinddzeta(self): ''' For each output surface, there induced velocity is computed,", "np import scipy.linalg as scalg import sharpy.utils.h5utils as h5utils import sharpy.linear.src.assembly as assembly", "the grid ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.zeta[ind_3d]+=step # Recompute get_ind_velocities_at_segments and recover the previous", "trailing edge not correct' if __name__=='__main__': unittest.main() # T=Test_assembly() # T.setUp() # ###", "def test_dfqsdvind_zeta(self): ''' For each output surface, there induced velocity is computed, all", "loop input surfs for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] Surf_star_in=MS.Surfs_star[ss_in] M_in,N_in=Surf_in.maps.M,Surf_in.maps.N # perturb for", "zeta0=Surf.zeta.copy() for step in Steps: Der_num=0.0*Der_an for kk in range(3*Kzeta): Surf.zeta=zeta0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1)", "FD step size is reduced' # assert Er_max_star[ii]<Er_max_star[ii-1],\\ # 'Error not decreasing as", "comp_vind(zetac,MS): # comute induced velocity V=np.zeros((3,)) for ss in range(n_surf): Surf_in=MS.Surfs[ss] Surf_star_in=MS.Surfs_star[ss] V+=Surf_in.get_induced_velocity(zetac)", "assert ermax<50*step and ermax_rel<50*step, embed()#'Test failed!' # fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4)) #", "# estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] dvind=(Surf_out.u_ind_coll_norm-Vind0[ss_out])/step Derlist_num[ss_out][ss_in][:,kk]=dvind.reshape(-1,order='C') ### check error", "# ax3.set_title('Dcoll an. %d to %d' %(ss_out,ss_out)) # #plt.show() # plt.close() def test_dfunstdgamma_dot(self):", "ermax=np.max(np.abs(ErMat)) print('Bound%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert ermax<50*step, 'Test failed!' Der_an=Der_star_list[ss_out][ss_in] Der_num=Der_star_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Wake%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out))", "assert ermax<5e2*step and ermax_rel<50*step, 'Test failed!' # fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4)) #", "#Steps=[1e-2,1e-4,1e-6,] Steps=[1e-5,] step=Steps[0] ###### bound for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] # perturb for", "the derivative of the unsteady force only depends on Gamma_dot, which is true", "M_star,N_star=Surf_star.maps.M,Surf_star.maps.N K_star=Surf_star.maps.K for nn in range(N): pp=np.ravel_multi_index( (0,nn), (M_star,N_star)) gammaw_TE=gammaw_TE0.copy() gammaw_TE[nn]+=step Surf.get_joukovski_qs(gammaw_TE=gammaw_TE) df=(Surf.fqs-fqs0)/step", "range(1,len(Steps)): # assert Er_max[ii]<Er_max[ii-1],\\ # 'Error not decreasing as FD step size is", "Steps: Der_num=0.0*Der_an for kk in range(3*Kzeta): Surf.u_ext=u_ext0.copy() Surf.zeta_dot=zeta_dot0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.u_ext[ind_3d]+=0.5*step Surf.zeta_dot[ind_3d]+=-0.5*step", "er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss) print('FD step: %.2e ---> Max error: %.2e'%(step,er_max)", "# T.test_dfqsduinput() # T.test_dfqsdzeta_vrel0() # T.test_dfqsdgamma_vrel0() # ### state equation terms # T.test_uc_dncdzeta()", "ercoll<10*step, 'Error at collocation point' ### check error at vert for ss_in in", "maximum error analytical derivatives Pder_an. The error is: - relative, if the element", "assembly.nc_domegazetadzeta(MS.Surfs,MS.Surfs_star) # allocate numerical # Derlist_num=[] # for ii in range(n_surf): # sub=[]", "# perturb for kk in range(3*Surf_in.maps.Kzeta): cc,mm,nn=np.unravel_index( kk, (3,M_in+1,N_in+1) ) # perturb bound.", "range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) # <--- gammaw_0 needs to be used here!", "tests Surf=copy.deepcopy(MS.Surfs[ss]) # Define variables M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta # Save the reference values", "and ermax_rel<50*step, 'Test failed!' # fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(131) #", "# calculate new normal velocity Surf_pert=surface.AeroGridSurface(Surf.maps,zeta=zeta_pert, u_ext=Surf.u_ext,gamma=Surf.gamma) u_norm=Surf_pert.project_coll_to_normal(u_tot0) u_norm_vec=u_norm.reshape(-1,order='C') # FD derivative DerNum[:,jj]=(u_norm_vec-u_norm0_vec)/step", "''' Eabs=np.abs(Pder_num-Pder_an) nnzvec=Pder_an!=0 Erel=np.zeros(Pder_an.shape) Erel[nnzvec]=np.abs(Eabs[nnzvec]/Pder_an[nnzvec]) # Relative error check: remove NaN and inf...", "than 50 times step size' Er_max.append(er_max) def test_dfqsdvind_gamma(self): print('------------------------------ Testing assembly.dfqsdvind_gamma') MS=self.MS n_surf=MS.n_surf", "with step size for ss in range(1,len(Steps)): assert Er_max[ss]<Er_max[ss-1],\\ 'Error not decreasing as", "point' ### check error at vert for ss_in in range(n_surf): Der_an=Dervert_list[ss_in] Der_num=Dervert_list_num[ss_in] ermax,ErAbs,ErRel=max_error_tensor(Der_an,Der_num)", "error is: - relative, if the element of Pder_an is nonzero - absolute,", "# Compute new forces Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss)", "Der_list,Der_star_list=assembly.dfqsdgamma_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Er_max_star=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss] Der_star_an=Der_star_list[ss] Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N", "assembly <NAME>, 29 May 2018 ''' import os import copy import warnings import", "Der_an=Der_star_list[ss_out][ss_in] Der_num=Der_star_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Wake%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert ermax<50*step, 'Test failed!' # fig", "mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step ### prepare output surfaces # - ensure normals are unchanged", "in range(n_surf): sub=[] sub_star=[] for jj in range(n_surf): sub.append(0.0*Der_list[ii][jj]) sub_star.append(0.0*Der_star_list[ii][jj]) Der_list_num.append(sub) Der_star_list_num.append(sub_star) #", "# fig = plt.figure('Spy Der',figsize=(10,4)) # ax1 = fig.add_subplot(121) # ax1.spy(Der,precision=step) # ax2", "u_ext and zeta_dot ''' print('---------------------------------- Testing assembly.dfqsduinput') MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsduinput(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,] for", "velocity contrib.) Surf_in.zeta=Zeta0[ss_in].copy() Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy()", "fact, reducing # the step quickly introduced round-off error. # # assert error", "step size' Er_max.append(er_max) def test_dfqsdzeta_omega(self): ''' Note: the get_joukovski_qs method re-computes the induced", "max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<5e2*step and ermax_rel<50*step,", "reference circulation and normal induced velocities MS.get_normal_ind_velocities_at_collocation_points() Zeta0=[] Zeta0_star=[] Vind0=[] N0=[] ZetaC0=[] for", "del Surf_out.u_ind_coll except AttributeError: pass ### recalculate MS.get_normal_ind_velocities_at_collocation_points() # restore Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zetac=ZetaC0[ss_in].copy('F') Surf_star_in.zeta=Zeta0_star[ss_in].copy()", "velocity is allocated to both u_ext and zeta_dot ''' print('---------------------------------- Testing assembly.dfqsduinput') MS=self.MS", "print('Surface %.2d - bound:' %ss) print('FD step: %.2e ---> Max error: %.2e'%(step,er_max) )", "to %d' %(ss_in,ss_out)) # ax2=fig.add_subplot(132) # ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error rel %d to %d'", "the surface with the analytica derivatives Der_an=Der_an_list[ss] # Copy to avoid modifying the", "# perturb for kk in range(3*Surf_in.maps.Kzeta): cc,mm,nn=np.unravel_index( kk, (3,M_in+1,N_in+1) ) # perturb bound", "Surf.get_input_velocities_at_collocation_points() Der=assembly.uc_dncdzeta(Surf) ### numerical derivative #Surf.get_normal_input_velocities_at_collocation_points() u_tot0=Surf.u_ind_coll+Surf.u_input_coll u_norm0=Surf.project_coll_to_normal(u_tot0) u_norm0_vec=u_norm0.reshape(-1,order='C') zeta0=Surf.zeta DerNum=np.zeros(Der.shape) Steps=np.array([1e-2,1e-3,1e-4,1e-5,1e-6]) Er_max=0.0*Steps", "@warning: The relative error tensor may contain NaN or Inf if the analytical", "than 50 times step size' Er_max[ss]=er_max # assert error decreases with step size", "# haero = h5utils.readh5(fname) # tsdata = haero.data.aero.timestep_info[-1] # tsdata.omega = [] #", "ERROR error[istep] = np.maximum(error[istep], np.absolute(Der_num-Der_an).max()) print('FD step: %.2e ---> Max error: %.2e'%(step,error[istep]) )", "np.max(np.abs(gvec-gvec_ref))<1e-15,\\ 'Prop. from trailing edge not correct' if __name__=='__main__': unittest.main() # T=Test_assembly() #", "dcoll_b,dvert_b=assembly.dvinddzeta(zetac,MS.Surfs[ss_in],IsBound=True) dcoll_w,dvert_w=assembly.dvinddzeta(zetac,MS.Surfs_star[ss_in], IsBound=False,M_in_bound=MS.Surfs[ss_in].maps.M) Dercoll+=dcoll_b+dcoll_w Dervert_list.append(dvert_b+dvert_w) # allocate numerical Dercoll_num=np.zeros((3,3)) Dervert_list_num=[] for ii in", "max absolute error ermax=np.max(ErAbs) # relative error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape)", "MS=self.MS n_surf=MS.n_surf Der_list,Der_star_list=assembly.dfqsdgamma_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Er_max_star=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss] Der_star_an=Der_star_list[ss] Surf=MS.Surfs[ss]", "range(n_surf): Surf=MS.Surfs[ss] Kzeta,K=Surf.maps.Kzeta,Surf.maps.K M,N=Surf.maps.M,Surf.maps.N Dnum=np.zeros((3*Kzeta,K)) # get refernce values Surf.get_joukovski_unsteady() Gamma_dot0=Surf.gamma_dot.copy() F0=Surf.funst.copy() for", "that actually rotates \"\"\" print('----------------------------- Testing assembly.test_nc_domegazetadzeta') MS=self.MS n_surf=MS.n_surf # analytical Dervert_list =", "Der_star_list_num=[] for ii in range(n_surf): sub=[] sub_star=[] for jj in range(n_surf): sub.append(0.0*Der_list[ii][jj]) sub_star.append(0.0*Der_star_list[ii][jj])", "kk in range(3*Surf_in.maps.Kzeta): cc,mm,nn=np.unravel_index( kk, (3,M_in+1,N_in+1) ) # perturb bound Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step #", "Der_list=assembly.dfqsdzeta_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss] Surf=copy.deepcopy(MS.Surfs[ss]) #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta", "not working correctly, relative error (%.3e) too high!' %ErRel # allocate numerical Derlist_num=[]", "restore Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zetac=ZetaC0[ss_in].copy('F') Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] dvind=(Surf_out.u_ind_coll_norm-Vind0[ss_out])/step", "nsteps = len(Steps) error = np.zeros((nsteps,)) for istep in range(nsteps): step = Steps[istep]", "(qs term) # T.test_dvinddzeta() # T.test_dfqsdvind_zeta() # run setUp after this test #", "PlotFlag: pass # fig = plt.figure('Spy Der',figsize=(10,4)) # ax1 = fig.add_subplot(121) # ax1.spy(Der,precision=step)", "module ''' def setUp(self): # select test case fname = os.path.dirname(os.path.abspath(__file__)) + '/h5input/goland_mod_Nsurf01_M003_N004_a040.aero_state.h5'", "Gammaw0.append(MS.Surfs_star[ss].gamma.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-5,] step=Steps[0] ###### bound for ss_in", "are looped. For wakes, only TE is displaced. ''' print('----------------------------- Testing assembly.test_nc_dqcdzeta') MS=self.MS", "check: remove NaN and inf... iifinite=np.isfinite(Erel) err_max=0.0 for err_here in Erel[iifinite]: if np.abs(err_here)>err_max:", "vertex point) for kk in range(3*Kzeta): # Initialize to remove previous movements Surf.zeta=zeta0.copy()", "a case that actually rotates \"\"\" print('----------------------------- Testing assembly.test_nc_domegazetadzeta') MS=self.MS n_surf=MS.n_surf # analytical", "in range(n_surf): for ss_in in range(n_surf): Der_an=Der_list[ss_out][ss_in] Der_num=Der_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Bound%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax))", "affected. ''' print('------------------------------ Testing assembly.dfqsdzeta_vrel0') MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsdzeta_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,] for ss in", "+ '/h5input/goland_mod_Nsurf01_M003_N004_a040.aero_state.h5' haero = h5utils.readh5(fname) tsdata = haero.ts00000 # # Rotating cases #", "great accuracy is obtained even with large steps. In fact, reducing # the", "Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step # perturb wake TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step # recalculate induced", "ZetaC0[ss][:,mm,nn])) - np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, Surf.zetac[:,mm,nn]))) ipanel += 1 # COMPUTE THE ERROR error[istep]", "is: - relative, if the element of Pder_an is nonzero - absolute, otherwise", "Erel[iifinite]: if np.abs(err_here)>err_max: err_max=err_here # Zero elements check iizero=np.abs(Pder_an)<1e-15 for der_here in Pder_num[iizero]:", "abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<5e2*step and ermax_rel<50*step, 'Test", "for cc in range(3): zetac_pert=zetac.copy() zetac_pert[cc]+=step Vnum=comp_vind(zetac_pert,MS) Dercoll_num[:,cc]=(Vnum-V0)/step ercoll=np.max(np.abs(Dercoll-Dercoll_num)) print('Error coll.\\tFDstep\\tErrAbs') print('\\t\\t%.1e\\t%.1e' %(step,ercoll))", "step in Steps: Der_num=0.0*Der_an Der_star_num=0.0*Der_star_an ### Bound for pp in range(K): mm=Surf.maps.ind_2d_pan_scal[0][pp] nn=Surf.maps.ind_2d_pan_scal[1][pp]", "for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] # perturb for pp in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp]", "Surf.zeta=zeta0.copy() # Define DoFs where modifications will take place and modify the grid", "numerical derivative #Surf.get_normal_input_velocities_at_collocation_points() u_tot0=Surf.u_ind_coll+Surf.u_input_coll u_norm0=Surf.project_coll_to_normal(u_tot0) u_norm0_vec=u_norm0.reshape(-1,order='C') zeta0=Surf.zeta DerNum=np.zeros(Der.shape) Steps=np.array([1e-2,1e-3,1e-4,1e-5,1e-6]) Er_max=0.0*Steps for ss in", "Der_list_num=[] Der_star_list_num=[] for ii in range(n_surf): sub=[] sub_star=[] for jj in range(n_surf): sub.append(0.0*Der_list[ii][jj])", "Der_an=Der_list[ss] Surf=copy.deepcopy(MS.Surfs[ss]) #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() for step in Steps: Der_num=0.0*Der_an", "''' def setUp(self): # select test case fname = os.path.dirname(os.path.abspath(__file__)) + '/h5input/goland_mod_Nsurf01_M003_N004_a040.aero_state.h5' haero", "the dependency on gamma is linear, hence # great accuracy is obtained even", "# Relative error check: remove NaN and inf... iifinite=np.isfinite(Erel) err_max=0.0 for err_here in", "# ax1=fig.add_subplot(131) # ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error abs %d to %d' %(ss_in,ss_out)) # ax2=fig.add_subplot(132)", "for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Der_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ###### wake for", "restore Surf_in.zeta=Zeta0[ss_in].copy() if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() ### check error at colloc Dercoll_num=np.zeros((3,3)) for cc", "computed already Surf.get_input_velocities_at_collocation_points() Der=assembly.uc_dncdzeta(Surf) ### numerical derivative #Surf.get_normal_input_velocities_at_collocation_points() u_tot0=Surf.u_ind_coll+Surf.u_input_coll u_norm0=Surf.project_coll_to_normal(u_tot0) u_norm0_vec=u_norm0.reshape(-1,order='C') zeta0=Surf.zeta DerNum=np.zeros(Der.shape)", "at colloc Dercoll_num=np.zeros((3,3)) for cc in range(3): zetac_pert=zetac.copy() zetac_pert[cc]+=step Vnum=comp_vind(zetac_pert,MS) Dercoll_num[:,cc]=(Vnum-V0)/step ercoll=np.max(np.abs(Dercoll-Dercoll_num)) print('Error", "n_surf=MS.n_surf # analytical Dervert_list = assembly.nc_domegazetadzeta(MS.Surfs,MS.Surfs_star) # allocate numerical # Derlist_num=[] # for", "for ii in range(1,len(Steps)): # assert Er_max[ii]<Er_max[ii-1],\\ # 'Error not decreasing as FD", "Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Der_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ###### wake for ss_in in range(n_surf): Surf_in=MS.Surfs_star[ss_in]", "Der_num=Der_star_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Wake%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert ermax<50*step, 'Test failed!' # fig =", "looped. For wakes, only TE is displaced. ''' print('------------------------------- Testing assembly.dfqsdvind_zeta') MS=self.MS n_surf=MS.n_surf", "failed!' def test_wake_prop(self): MS=self.MS C_list,Cstar_list=assembly.wake_prop(MS.Surfs,MS.Surfs_star) n_surf=len(MS.Surfs) for ss in range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] N=Surf.maps.N", "ermax_rel<1e-16,\\ 'option Merge=True not working correctly, relative error (%.3e) too high!' %ErRel #", "Surf_star_in.zeta=Zeta0_star[ss_in].copy() ### check error at colloc Dercoll_num=np.zeros((3,3)) for cc in range(3): zetac_pert=zetac.copy() zetac_pert[cc]+=step", "assert ercoll<10*step, 'Error at collocation point' ### check error at vert for ss_in", "will take place and modify the grid ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.zeta[ind_3d]+=step # Recompute", "recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore circulation: (include only induced velocity contrib.)", "er_max<5e1*step, 'Error larger than 50 times step size' Er_max.append(er_max) def test_dfqsduinput(self): ''' Step", "''' print('----------------------------- Testing assembly.test_nc_dqcdzeta') MS=self.MS n_surf=MS.n_surf # analytical Dercoll_list,Dervert_list=assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star) # check option Der_all_exp=np.block(Dervert_list)+scalg.block_diag(*Dercoll_list)", "induced velocities MS.get_normal_ind_velocities_at_collocation_points() Zeta0=[] Zeta0_star=[] Vind0=[] N0=[] ZetaC0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy())", "Initialize to remove previous movements Surf.zeta=zeta0.copy() # Define DoFs where modifications will take", "Der_list_num.append(sub) Der_star_list_num.append(sub_star) # store reference circulation and force Gamma0=[] Gammaw0=[] Fqs0=[] for ss", "in range(3): zetac_pert=zetac.copy() zetac_pert[cc]+=step Vnum=comp_vind(zetac_pert,MS) Dercoll_num[:,cc]=(Vnum-V0)/step ercoll=np.max(np.abs(Dercoll-Dercoll_num)) print('Error coll.\\tFDstep\\tErrAbs') print('\\t\\t%.1e\\t%.1e' %(step,ercoll)) #if ercoll>10*step:", "Der_num = np.zeros(Surf.maps.K) ipanel = 0 skew_omega = algebra.skew(Surf.omega) for mm in range(M):", "in panel circulation. Warning: test assumes the derivative of the unsteady force only", "coll.\\tFDstep\\tErrAbs') print('\\t\\t%.1e\\t%.1e' %(step,ercoll)) #if ercoll>10*step: embed() assert ercoll<10*step, 'Error at collocation point' ###", "# recalculate induced velocity everywhere Vnum=comp_vind(zetac,MS) dv=(Vnum-V0)/step Dervert_list_num[ss_in][:,kk]=dv.reshape(-1,order='C') # restore Surf_in.zeta=Zeta0[ss_in].copy() if mm==M_in:", "u_norm0=Surf.project_coll_to_normal(u_tot0) u_norm0_vec=u_norm0.reshape(-1,order='C') zeta0=Surf.zeta DerNum=np.zeros(Der.shape) Steps=np.array([1e-2,1e-3,1e-4,1e-5,1e-6]) Er_max=0.0*Steps for ss in range(len(Steps)): step=Steps[ss] for jj", "Surf.zetac[:,mm,nn]))) ipanel += 1 # COMPUTE THE ERROR error[istep] = np.maximum(error[istep], np.absolute(Der_num-Der_an).max()) print('FD", "all other surfaces are looped. For wakes, only TE is displaced. ''' def", "u_input_seg0=Surf.u_input_seg.copy() for step in Steps: # Initialize Der_num = 0.0*Der_an # Loop through", "jj in range(n_surf): # sub.append(0.0*Dervert_list[ii][jj]) # Derlist_num.append(sub) # Store the initial values of", "def test_nc_dqcdzeta(self): ''' For each output surface, where induced velocity is computed, all", "zeta_pert=zeta0.copy() zeta_pert[cc_pert,mm_pert,nn_pert]+=step # calculate new normal velocity Surf_pert=surface.AeroGridSurface(Surf.maps,zeta=zeta_pert, u_ext=Surf.u_ext,gamma=Surf.gamma) u_norm=Surf_pert.project_coll_to_normal(u_tot0) u_norm_vec=u_norm.reshape(-1,order='C') # FD", "Gammaw0=[] Fqs0=[] for ss in range(n_surf): Gamma0.append(MS.Surfs[ss].gamma.copy()) Gammaw0.append(MS.Surfs_star[ss].gamma.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate vis FDs", "print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert ermax<50*step, 'Test failed!' # fig = plt.figure('Spy Der',figsize=(10,4)) # ax1", "linearisation points ''' MS=self.MS Ders_an=assembly.dfunstdgamma_dot(MS.Surfs) step=1e-6 Ders_num=[] n_surf=len(MS.Surfs) for ss in range(n_surf): Surf=MS.Surfs[ss]", "range(n_surf): sub.append(0.0*Dervert_list[ii][jj]) Derlist_num.append(sub) # store reference circulation and normal induced velocities MS.get_normal_ind_velocities_at_collocation_points() Zeta0=[]", "fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(131) # ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error abs %d", "derivative of unsteady aerodynamic force with respect to changes in panel circulation. Warning:", "from trailing edge not correct' if __name__=='__main__': unittest.main() # T=Test_assembly() # T.setUp() #", "print('----------------------------------- Testing assembly.dvinddzeta') MS=self.MS n_surf=MS.n_surf zetac=.5*(MS.Surfs[0].zeta[:,1,2]+MS.Surfs[0].zeta[:,1,3]) Dercoll=np.zeros((3,3)) Dervert_list=[] for ss_in in range(n_surf): dcoll_b,dvert_b=assembly.dvinddzeta(zetac,MS.Surfs[ss_in],IsBound=True)", "dependency on gamma is linear, hence # great accuracy is obtained even with", "= plt.figure('Spy Der',figsize=(10,4)) # ax1 = fig.add_subplot(121) # ax1.spy(Der,precision=step) # ax2 = fig.add_subplot(122)", "er_max<5e1*step, 'Error larger than 50 times step size' Er_max.append(er_max) def test_dfqsdvind_gamma(self): print('------------------------------ Testing", "surface, there induced velocity is computed, all other surfaces are looped. For wakes,", "n_surf=MS.n_surf # analytical Dercoll_list,Dervert_list=assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star) # check option Der_all_exp=np.block(Dervert_list)+scalg.block_diag(*Dercoll_list) Der_all=np.block( assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star,Merge=True) ) _,ErAbs,ErRel=max_error_tensor(Der_all,Der_all_exp) #", "ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) N0.append(MS.Surfs[ss].normals.copy()) # Computation Steps=[1e-2, 1e-4, 1e-6] nsteps = len(Steps) error =", "'Error larger than 50 times step size' Er_max.append(er_max) def test_dfqsdvind_gamma(self): print('------------------------------ Testing assembly.dfqsdvind_gamma')", "get refernce values Surf.get_joukovski_unsteady() Gamma_dot0=Surf.gamma_dot.copy() F0=Surf.funst.copy() for pp in range(K): mm,nn=np.unravel_index( pp, (M,N)", "step perturb_vector[kk] += step*(0.2*np.random.rand()+0.9) cc,mm,nn=np.unravel_index( kk, (3,M+1,N+1) ) # perturb bound. vertices and", "kk in range(3*Surf_in.maps.Kzeta): cc,mm,nn=np.unravel_index( kk, (3,M_in+1,N_in+1) ) # perturb bound. vertices and collocation", "# assert Er_max_star[ii]<Er_max_star[ii-1],\\ # 'Error not decreasing as FD step size is reduced'", "range(K): mm=Surf.maps.ind_2d_pan_scal[0][pp] nn=Surf.maps.ind_2d_pan_scal[1][pp] Surf.gamma=gamma0.copy() Surf.gamma[mm,nn]+=step Surf.get_joukovski_qs(gammaw_TE=Surf_star.gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:'", "ss in range(n_surf): Der_an=Der_list[ss] #Surf=copy.deepcopy(MS.Surfs[ss]) Surf=MS.Surfs[ss] #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() u_ext0=Surf.u_ext.copy() zeta_dot0=Surf.zeta_dot.copy()", "K=Surf.maps.K Kzeta=Surf.maps.Kzeta # Save the reference values at equilibrium fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() u_input_seg0=Surf.u_input_seg.copy() for", "%(step,ermax,ermax_rel)) assert ermax<50*step and ermax_rel<50*step, embed()#'Test failed!' # fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4))", "For wakes, only TE is displaced. ''' print('------------------------------- Testing assembly.dfqsdvind_zeta') MS=self.MS n_surf=MS.n_surf #", "ErRel=max_error_tensor(Ders_an[ss],Dnum) # max absolute error ermax=np.max(ErAbs) # relative error at max abs error", "test case fname = os.path.dirname(os.path.abspath(__file__)) + '/h5input/goland_mod_Nsurf01_M003_N004_a040.aero_state.h5' haero = h5utils.readh5(fname) tsdata = haero.ts00000", "90% and the 110% of the step perturb_vector[kk] += step*(0.2*np.random.rand()+0.9) cc,mm,nn=np.unravel_index( kk, (3,M+1,N+1)", "THE ERROR error[istep] = np.maximum(error[istep], np.absolute(Der_num-Der_an).max()) print('FD step: %.2e ---> Max error: %.2e'%(step,error[istep])", "ermax<50*step, 'Test failed!' # fig = plt.figure('Spy Der',figsize=(10,4)) # ax1 = fig.add_subplot(111) #", "# 'Error not decreasing as FD step size is reduced' def test_dfqsdzeta_vrel0(self): '''", "zeta_dot ''' print('---------------------------------- Testing assembly.dfqsduinput') MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsduinput(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,] for ss in", "the reference values at equilibrium fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() u_input_seg0=Surf.u_input_seg.copy() for step in Steps: #", "df=(Surf_out.fqs-fqs0)/step Der_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ###### wake for ss_in in range(n_surf): Surf_in=MS.Surfs_star[ss_in] # perturb for pp", "#Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0] ### loop input surfs for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] Surf_star_in=MS.Surfs_star[ss_in]", "in range(n_surf): Surf_out=MS.Surfs[ss_out] dvind=(Surf_out.u_ind_coll_norm-Vind0[ss_out])/step Derlist_num[ss_out][ss_in][:,kk]=dvind.reshape(-1,order='C') ### check error for ss_out in range(n_surf): for", "Computation Steps=[1e-2, 1e-4, 1e-6] nsteps = len(Steps) error = np.zeros((nsteps,)) for istep in", "the induced velocity at the panel segments. A copy of Surf is required", "Er vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(121) # ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error abs %d' %(ss_in))", "rel %d to %d' %(ss_in,ss_out)) # ax3=fig.add_subplot(133) # ax3.spy(Dercoll_list[ss_out],precision=50*step) # ax3.set_title('Dcoll an. %d", "induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore circulation: (include only induced velocity contrib.) Surf_in.gamma=Gamma0[ss_in].copy()", "(3,M+1,N+1) ) Surf.zeta[ind_3d]+=step # Recompute get_ind_velocities_at_segments and recover the previous grid Surf.get_input_velocities_at_segments() Surf.zeta=zeta0.copy()", "wake:' %ss) print('FD step: %.2e ---> Max error: %.2e'%(step,er_max) ) assert er_max<5e1*step, 'Error", "for der_here in Pder_num[iizero]: if np.abs(der_here)>err_max: err_max=der_here return err_max, Eabs, Erel class Test_assembly(unittest.TestCase):", "force equation (qs term) # T.test_dvinddzeta() # T.test_dfqsdvind_zeta() # run setUp after this", "in range(n_surf): Der_an=Der_list[ss] #Surf=copy.deepcopy(MS.Surfs[ss]) Surf=MS.Surfs[ss] #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() u_ext0=Surf.u_ext.copy() zeta_dot0=Surf.zeta_dot.copy() for", "range(n_surf): Der_an=Der_list[ss] #Surf=copy.deepcopy(MS.Surfs[ss]) Surf=MS.Surfs[ss] #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() u_ext0=Surf.u_ext.copy() zeta_dot0=Surf.zeta_dot.copy() for step", "only depends on Gamma_dot, which is true only for steady-state linearisation points '''", "if ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] _,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs) # relative error", "ermax,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs) # relative error at max abs error", "Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Vind0.append(MS.Surfs[ss].u_ind_coll_norm.copy()) N0.append(MS.Surfs[ss].normals.copy()) # calculate vis FDs Steps=[1e-6,] step=Steps[0] ### loop", "Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Der_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ###### wake for ss_in in range(n_surf): Surf_in=MS.Surfs_star[ss_in] # perturb", "assumes the derivative of the unsteady force only depends on Gamma_dot, which is", "reference circulation and force Gamma0=[] Gammaw0=[] Fqs0=[] for ss in range(n_surf): Gamma0.append(MS.Surfs[ss].gamma.copy()) Gammaw0.append(MS.Surfs_star[ss].gamma.copy())", "Surf_in.gamma=Gammaw0[ss_in].copy() Surf_in.gamma[mm,nn]+=step # recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore circulation: (include only", "grid ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.zeta[ind_3d]+=step # Recompute get_ind_velocities_at_segments and recover the previous grid", "Dnum=np.zeros((3*Kzeta,K)) # get refernce values Surf.get_joukovski_unsteady() Gamma_dot0=Surf.gamma_dot.copy() F0=Surf.funst.copy() for pp in range(K): mm,nn=np.unravel_index(", "def test_nc_domegazetadzeta(self): \"\"\" Variation at colocation points due to geometrical variations at vertices", "M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() for step in Steps: Der_num=0.0*Der_an for kk in", "# restore circulation: (include only induced velocity contrib.) Surf_in.gamma=Gamma0[ss_in].copy() # estimate derivatives for", "elements check iizero=np.abs(Pder_an)<1e-15 for der_here in Pder_num[iizero]: if np.abs(der_here)>err_max: err_max=der_here return err_max, Eabs,", "np.dot(skew_omega, Surf.zetac[:,mm,nn]))) ipanel += 1 # COMPUTE THE ERROR error[istep] = np.maximum(error[istep], np.absolute(Der_num-Der_an).max())", "zetac=.5*(MS.Surfs[0].zeta[:,1,2]+MS.Surfs[0].zeta[:,1,3]) Dercoll=np.zeros((3,3)) Dervert_list=[] for ss_in in range(n_surf): dcoll_b,dvert_b=assembly.dvinddzeta(zetac,MS.Surfs[ss_in],IsBound=True) dcoll_w,dvert_w=assembly.dvinddzeta(zetac,MS.Surfs_star[ss_in], IsBound=False,M_in_bound=MS.Surfs[ss_in].maps.M) Dercoll+=dcoll_b+dcoll_w Dervert_list.append(dvert_b+dvert_w) #", "F0=Surf.funst.copy() for pp in range(K): mm,nn=np.unravel_index( pp, (M,N) ) Surf.gamma_dot=Gamma_dot0.copy() Surf.gamma_dot[mm,nn]+=step Surf.get_joukovski_unsteady() dF=(Surf.funst-F0)/step", "- wake:' %ss) print('FD step: %.2e ---> Max error: %.2e'%(step,er_max) ) assert er_max<5e1*step,", "tensors, and the maximum error. @warning: The relative error tensor may contain NaN", "wake TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step # recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) #", "as multisurfaces import sharpy.linear.src.surface as surface import sharpy.linear.src.libuvlm as libuvlm import sharpy.utils.algebra as", "vel on output to ensure they are re-computed for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out]", "FD step size is reduced' print('------------------------------------------------------------ OK') def test_dfqsdgamma_vrel0(self): print('----------------------------- Testing assembly.dfqsdgamma_vrel0') MS=self.MS", "here! df=(Surf_out.fqs-fqs0)/step Der_star_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ### check error Er_max=[] Er_max_star=[] for ss_out in range(n_surf): for", "error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<50*step and ermax_rel<50*step, embed()#'Test failed!'", "import itertools import numpy as np import scipy.linalg as scalg import sharpy.utils.h5utils as", "bound. vertices and collocation Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step Surf_in.generate_collocations() # perturb wake TE if mm==M_in:", "NaN or Inf if the analytical derivative is zero. These elements are filtered", "wake TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step ### prepare output surfaces # - ensure", "= np.zeros((nsteps,)) for istep in range(nsteps): step = Steps[istep] for ss in range(n_surf):", "(np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, ZetaC0[ss][:,mm,nn])) - np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, Surf.zetac[:,mm,nn]))) ipanel += 1 # COMPUTE THE", "plt.figure('Spy Der',figsize=(10,4)) # ax1 = fig.add_subplot(111) # ax1.spy(ErMat,precision=50*step) # plt.show() def test_dvinddzeta(self): '''", "for ss in range(len(Steps)): step=Steps[ss] for jj in range(3*Surf.maps.Kzeta): # perturb cc_pert=Surf.maps.ind_3d_vert_vect[0][jj] mm_pert=Surf.maps.ind_3d_vert_vect[1][jj]", "return V print('----------------------------------- Testing assembly.dvinddzeta') MS=self.MS n_surf=MS.n_surf zetac=.5*(MS.Surfs[0].zeta[:,1,2]+MS.Surfs[0].zeta[:,1,3]) Dercoll=np.zeros((3,3)) Dervert_list=[] for ss_in in", "0 skew_omega = algebra.skew(Surf.omega) for mm in range(M): for nn in range(N): Der_num[ipanel]", "MS.get_normal_ind_velocities_at_collocation_points() # restore Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zetac=ZetaC0[ss_in].copy('F') Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate derivatives for ss_out in range(n_surf):", "# for jj in range(n_surf): # sub.append(0.0*Dervert_list[ii][jj]) # Derlist_num.append(sub) # Store the initial", "original for other tests Surf=copy.deepcopy(MS.Surfs[ss]) # Define variables M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta # Save", "perturb for pp in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gamma0[ss_in].copy() Surf_in.gamma[mm,nn]+=step # recalculate induced velocity", "# ax2=fig.add_subplot(122) # ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error rel %d' %(ss_in)) # #plt.show() # plt.close()", "fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Derlist_num[ss_out][ss_in][:,kk]=df.reshape(-1,order='C') ### check error for ss_out in range(n_surf): for", "size is reduced' print('------------------------------------------------------------ OK') def test_dfqsdgamma_vrel0(self): print('----------------------------- Testing assembly.dfqsdgamma_vrel0') MS=self.MS n_surf=MS.n_surf Der_list,Der_star_list=assembly.dfqsdgamma_vrel0(MS.Surfs,MS.Surfs_star)", "DerNum[:,jj]=(u_norm_vec-u_norm0_vec)/step er_max=np.max(np.abs(Der-DerNum)) print('FD step: %.2e ---> Max error: %.2e'%(step,er_max) ) assert er_max<5e1*step, 'Error", "ercoll<10*step, 'Error at vertices' # fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(121) #", "error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d\\t\\t\\tFDstep\\tErrAbs\\tErrRel'%(ss,)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<5e2*step", "In fact, reducing # the step quickly introduced round-off error. # # assert", "ax2=fig.add_subplot(122) # ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error rel %d' %(ss_in)) # #plt.show() # plt.close() def", "Surf=MS.Surfs[ss] Kzeta,K=Surf.maps.Kzeta,Surf.maps.K M,N=Surf.maps.M,Surf.maps.N Dnum=np.zeros((3*Kzeta,K)) # get refernce values Surf.get_joukovski_unsteady() Gamma_dot0=Surf.gamma_dot.copy() F0=Surf.funst.copy() for pp", "edge not correct' if __name__=='__main__': unittest.main() # T=Test_assembly() # T.setUp() # ### force", "The error is: - relative, if the element of Pder_an is nonzero -", "if istep > 0: assert error[istep]<=error[istep-1],\\ 'Error not decreasing as FD step size", "Testing assembly.test_nc_domegazetadzeta') MS=self.MS n_surf=MS.n_surf # analytical Dervert_list = assembly.nc_domegazetadzeta(MS.Surfs,MS.Surfs_star) # allocate numerical #", "the 90% and the 110% of the step perturb_vector[kk] += step*(0.2*np.random.rand()+0.9) cc,mm,nn=np.unravel_index( kk,", "Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss] Surf=copy.deepcopy(MS.Surfs[ss]) #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy()", "circulation. Warning: test assumes the derivative of the unsteady force only depends on", "to ensure that other tests are not affected. ''' print('------------------------------ Testing assembly.dfqsdzeta_vrel0') MS=self.MS", "for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,]", "test_dfqsdzeta_vrel0(self): ''' Note: the get_joukovski_qs method re-computes the induced velocity at the panel", "### vertices for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] Surf_star_in=MS.Surfs_star[ss_in] M_in,N_in=Surf_in.maps.M,Surf_in.maps.N # perturb for kk", "error ermax=np.max(ErAbs) # relative error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d\\t\\t\\tFDstep\\tErrAbs\\tErrRel'%(ss,))", "%ss) Surf=MS.Surfs[ss] # generate non-zero field of external force Surf.u_ext[0,:,:]=Surf.u_ext[0,:,:]-20.0 Surf.u_ext[1,:,:]=Surf.u_ext[1,:,:]+60.0 Surf.u_ext[2,:,:]=Surf.u_ext[2,:,:]+30.0 Surf.u_ext=Surf.u_ext+np.random.rand(*Surf.u_ext.shape)", "Surf.u_ext[2,:,:]=Surf.u_ext[2,:,:]+30.0 Surf.u_ext=Surf.u_ext+np.random.rand(*Surf.u_ext.shape) ### analytical derivative # ind velocities computed already Surf.get_input_velocities_at_collocation_points() Der=assembly.uc_dncdzeta(Surf) ###", "Steps=[1e-6,] step=Steps[0] ### loop input surfs for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] Surf_star_in=MS.Surfs_star[ss_in] M_in,N_in=Surf_in.maps.M,Surf_in.maps.N", "Bound for pp in range(K): mm=Surf.maps.ind_2d_pan_scal[0][pp] nn=Surf.maps.ind_2d_pan_scal[1][pp] Surf.gamma=gamma0.copy() Surf.gamma[mm,nn]+=step Surf.get_joukovski_qs(gammaw_TE=Surf_star.gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num))", "reference circulation and force Zeta0=[] Zeta0_star=[] Fqs0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy())", "der_here in Pder_num[iizero]: if np.abs(der_here)>err_max: err_max=der_here return err_max, Eabs, Erel class Test_assembly(unittest.TestCase): '''", "than 50 times step size' Er_max.append(er_max) def test_dfqsduinput(self): ''' Step change in input", "Der',figsize=(10,4)) # ax1 = fig.add_subplot(121) # ax1.spy(Der,precision=step) # ax2 = fig.add_subplot(122) # ax2.spy(DerNum,precision=step)", "for ss in range(1,len(Steps)): assert Er_max[ss]<Er_max[ss-1],\\ 'Error not decreasing as FD step size", "cc,mm,nn=np.unravel_index( kk, (3,M_in+1,N_in+1) ) # perturb bound Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step # perturb wake TE", "output surface, where induced velocity is computed, all other surfaces are looped. For", "steady-state linearisation points ''' MS=self.MS Ders_an=assembly.dfunstdgamma_dot(MS.Surfs) step=1e-6 Ders_num=[] n_surf=len(MS.Surfs) for ss in range(n_surf):", "These elements are filtered out during the search for maximum error, and absolute", "in range(K): mm=Surf.maps.ind_2d_pan_scal[0][pp] nn=Surf.maps.ind_2d_pan_scal[1][pp] Surf.gamma=gamma0.copy() Surf.gamma[mm,nn]+=step Surf.get_joukovski_qs(gammaw_TE=Surf_star.gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d -", "surface import sharpy.linear.src.libuvlm as libuvlm import sharpy.utils.algebra as algebra np.set_printoptions(linewidth=200,precision=3) def max_error_tensor(Pder_an,Pder_num): '''", "ermax<50*step and ermax_rel<50*step, embed()#'Test failed!' # fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(131)", "= [] # for ss in range(haero.data.aero.n_surf): # tsdata.omega.append(haero.data.structure.timestep_info[-1].for_vel[3:6]) MS=multisurfaces.MultiAeroGridSurfaces(tsdata) MS.get_normal_ind_velocities_at_collocation_points() MS.verify_non_penetration() MS.verify_aic_coll()", "assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star,Merge=True) ) _,ErAbs,ErRel=max_error_tensor(Der_all,Der_all_exp) # max absolute error ermax=np.max(ErAbs) # relative error at max", "class Test_assembly(unittest.TestCase): ''' Test methods into assembly module ''' def setUp(self): # select", "gamma0=Surf.gamma.copy() for step in Steps: Der_num=0.0*Der_an Der_star_num=0.0*Der_star_an ### Bound for pp in range(K):", "Variation at colocation points due to geometrical variations at vertices Needs to be", "range(N): Der_num[ipanel] = (np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, ZetaC0[ss][:,mm,nn])) - np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, Surf.zetac[:,mm,nn]))) ipanel += 1", "kk in range(3*Kzeta): Surf.u_ext=u_ext0.copy() Surf.zeta_dot=zeta_dot0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.u_ext[ind_3d]+=0.5*step Surf.zeta_dot[ind_3d]+=-0.5*step Surf.get_input_velocities_at_segments() Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step", "is nonzero - absolute, otherwise The function returns the absolute and relative error", "Derlist_num=[] # for ii in range(n_surf): # sub=[] # for jj in range(n_surf):", "Der_num[ipanel] = (np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, ZetaC0[ss][:,mm,nn])) - np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, Surf.zetac[:,mm,nn]))) ipanel += 1 #", "error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] assert ermax_rel<1e-16,\\ 'option Merge=True not", "# assert Er_max[ii]<Er_max[ii-1],\\ # 'Error not decreasing as FD step size is reduced'", "The function returns the absolute and relative error tensors, and the maximum error.", "for kk in range(3*Kzeta): Surf.zeta=zeta0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.zeta[ind_3d]+=step Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num))", "restore circulation: (include only induced velocity contrib.) Surf_in.gamma=Gammaw0[ss_in].copy() # estimate derivatives for ss_out", "already Surf.get_input_velocities_at_collocation_points() Der=assembly.uc_dncdzeta(Surf) ### numerical derivative #Surf.get_normal_input_velocities_at_collocation_points() u_tot0=Surf.u_ind_coll+Surf.u_input_coll u_norm0=Surf.project_coll_to_normal(u_tot0) u_norm0_vec=u_norm0.reshape(-1,order='C') zeta0=Surf.zeta DerNum=np.zeros(Der.shape) Steps=np.array([1e-2,1e-3,1e-4,1e-5,1e-6])", "velocity everywhere Vnum=comp_vind(zetac,MS) dv=(Vnum-V0)/step Dervert_list_num[ss_in][:,kk]=dv.reshape(-1,order='C') # restore Surf_in.zeta=Zeta0[ss_in].copy() if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() ### check", "directions per vertex point) for kk in range(3*Kzeta): # Initialize to remove previous", "iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<5e2*step and ermax_rel<50*step, 'Test failed!' # fig=plt.figure('Spy", "T.test_dfqsdgamma_vrel0() # ### state equation terms # T.test_uc_dncdzeta() # T.test_nc_dqcdzeta() ### force equation", "required to ensure that other tests are not affected. ''' print('------------------------------ Testing assembly.dfqsdzeta_vrel0')", "ss_in in range(n_surf): Der_an=Dervert_list[ss_out][ss_in].copy() if ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] _,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max absolute error", "range(n_surf): sub.append(0.0*Der_list[ii][jj]) sub_star.append(0.0*Der_star_list[ii][jj]) Der_list_num.append(sub) Der_star_list_num.append(sub_star) # store reference circulation and force Gamma0=[] Gammaw0=[]", "test_dfqsdgamma_vrel0(self): print('----------------------------- Testing assembly.dfqsdgamma_vrel0') MS=self.MS n_surf=MS.n_surf Der_list,Der_star_list=assembly.dfqsdgamma_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Er_max_star=[] Steps=[1e-2,1e-4,1e-6,] for ss in", "step in Steps: Der_num=0.0*Der_an for kk in range(3*Kzeta): Surf.u_ext=u_ext0.copy() Surf.zeta_dot=zeta_dot0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1) )", ") # perturb bound Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step # perturb wake TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy()", "case that actually rotates \"\"\" print('----------------------------- Testing assembly.test_nc_domegazetadzeta') MS=self.MS n_surf=MS.n_surf # analytical Dervert_list", "depends on Gamma_dot, which is true only for steady-state linearisation points ''' MS=self.MS", "err_max=0.0 for err_here in Erel[iifinite]: if np.abs(err_here)>err_max: err_max=err_here # Zero elements check iizero=np.abs(Pder_an)<1e-15", "and absolute error is checked. ''' Eabs=np.abs(Pder_num-Pder_an) nnzvec=Pder_an!=0 Erel=np.zeros(Pder_an.shape) Erel[nnzvec]=np.abs(Eabs[nnzvec]/Pder_an[nnzvec]) # Relative error", "ii in range(n_surf): sub=[] for jj in range(n_surf): sub.append(0.0*Dervert_list[ii][jj]) Derlist_num.append(sub) # store reference", "np.zeros((nsteps,)) for istep in range(nsteps): step = Steps[istep] for ss in range(n_surf): Surf=MS.Surfs[ss]", "range(n_surf): # sub=[] # for jj in range(n_surf): # sub.append(0.0*Dervert_list[ii][jj]) # Derlist_num.append(sub) #", "# rename MS=self.MS n_surf=MS.n_surf # Compute the anaytical derivative of the case Der_an_list=assembly.dfqsdzeta_omega(MS.Surfs,MS.Surfs_star)", "Define DoFs where modifications will take place and modify the grid ind_3d=np.unravel_index(kk, (3,M+1,N+1)", "Dercoll+=dcoll_b+dcoll_w Dervert_list.append(dvert_b+dvert_w) # allocate numerical Dercoll_num=np.zeros((3,3)) Dervert_list_num=[] for ii in range(n_surf): Dervert_list_num.append(0.0*Dervert_list[ii]) #", "= len(Steps) error = np.zeros((nsteps,)) for istep in range(nsteps): step = Steps[istep] for", "Zeta0_star=[] Fqs0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate vis FDs", "# ax2.set_title('error rel %d to %d' %(ss_in,ss_out)) # ax3=fig.add_subplot(133) # ax3.spy(Dercoll_list[ss_out],precision=50*step) # ax3.set_title('Dcoll", "step size is reduced' print('------------------------------------------------------------ OK') def test_dfqsdgamma_vrel0(self): print('----------------------------- Testing assembly.dfqsdgamma_vrel0') MS=self.MS n_surf=MS.n_surf", "def test_dvinddzeta(self): ''' For each output surface, there induced velocity is computed, all", "the step size' if istep > 0: assert error[istep]<=error[istep-1],\\ 'Error not decreasing as", "as FD step size is reduced' # assert Er_max_star[ii]<Er_max_star[ii-1],\\ # 'Error not decreasing", "correct' if __name__=='__main__': unittest.main() # T=Test_assembly() # T.setUp() # ### force equation (qs", "def comp_vind(zetac,MS): # comute induced velocity V=np.zeros((3,)) for ss in range(n_surf): Surf_in=MS.Surfs[ss] Surf_star_in=MS.Surfs_star[ss]", "Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K fqs0=Surf.fqs.copy() gamma0=Surf.gamma.copy() for step in Steps: Der_num=0.0*Der_an Der_star_num=0.0*Der_star_an ### Bound", "# allocate numerical Dercoll_num=np.zeros((3,3)) Dervert_list_num=[] for ii in range(n_surf): Dervert_list_num.append(0.0*Dervert_list[ii]) # store reference", "Test derivative of unsteady aerodynamic force with respect to changes in panel circulation.", "T.test_dfqsdvind_zeta() # run setUp after this test # T.setUp() # T.test_dfqsdvind_gamma() # T.test_dfqsduinput()", "ax2.set_title('error rel %d' %(ss_in)) # #plt.show() # plt.close() def test_dfqsdvind_zeta(self): ''' For each", "vis FDs Steps=[1e-6,] step=Steps[0] ### loop input surfs for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in]", "of Surf is required to ensure that other tests are not affected. Needs", "as algebra np.set_printoptions(linewidth=200,precision=3) def max_error_tensor(Pder_an,Pder_num): ''' Finds the maximum error analytical derivatives Pder_an.", "kk, (3,M+1,N+1) ) # perturb bound. vertices and collocation Surf.zeta=Zeta0[ss].copy() Surf.zeta[cc,mm,nn] += perturb_vector[kk]", "derivative of the unsteady force only depends on Gamma_dot, which is true only", "analytical Dercoll_list,Dervert_list=assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star) # check option Der_all_exp=np.block(Dervert_list)+scalg.block_diag(*Dercoll_list) Der_all=np.block( assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star,Merge=True) ) _,ErAbs,ErRel=max_error_tensor(Der_all,Der_all_exp) # max absolute", "T.setUp() # T.test_dfqsdvind_gamma() # T.test_dfqsduinput() # T.test_dfqsdzeta_vrel0() # T.test_dfqsdgamma_vrel0() # ### state equation", "# assert error decreases with step size for ss in range(1,len(Steps)): assert Er_max[ss]<Er_max[ss-1],\\", "reducing # the step quickly introduced round-off error. # # assert error decreases", "each output surface, there induced velocity is computed, all other surfaces are looped.", "iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<50*step and ermax_rel<50*step, embed()#'Test failed!' # fig=plt.figure('Spy", "assembly.dfqsdvind_gamma') MS=self.MS n_surf=MS.n_surf # analytical Der_list,Der_star_list=assembly.dfqsdvind_gamma(MS.Surfs,MS.Surfs_star) # allocate numerical Der_list_num=[] Der_star_list_num=[] for ii", "calculate vis FDs Steps=[1e-6,] step=Steps[0] ### loop input surfs for ss_in in range(n_surf):", "displaced. ''' def comp_vind(zetac,MS): # comute induced velocity V=np.zeros((3,)) for ss in range(n_surf):", "Vind0.append(MS.Surfs[ss].u_ind_coll_norm.copy()) N0.append(MS.Surfs[ss].normals.copy()) # calculate vis FDs Steps=[1e-6,] step=Steps[0] ### loop input surfs for", "if np.abs(err_here)>err_max: err_max=err_here # Zero elements check iizero=np.abs(Pder_an)<1e-15 for der_here in Pder_num[iizero]: if", "Der_star_num=0.0*Der_star_an ### Bound for pp in range(K): mm=Surf.maps.ind_2d_pan_scal[0][pp] nn=Surf.maps.ind_2d_pan_scal[1][pp] Surf.gamma=gamma0.copy() Surf.gamma[mm,nn]+=step Surf.get_joukovski_qs(gammaw_TE=Surf_star.gamma[0,:]) df=(Surf.fqs-fqs0)/step", "K=Surf.maps.K fqs0=Surf.fqs.copy() gamma0=Surf.gamma.copy() for step in Steps: Der_num=0.0*Der_an Der_star_num=0.0*Der_star_an ### Bound for pp", "'Error larger than 50 times step size' Er_max_star.append(er_max) Surf.gamma=gamma0.copy() ### Warning: this test", "other surfaces are looped. For wakes, only TE is displaced. ''' print('----------------------------- Testing", "relative error tensor may contain NaN or Inf if the analytical derivative is", "%d' %(ss_out,ss_out)) # #plt.show() # plt.close() def test_dfunstdgamma_dot(self): ''' Test derivative of unsteady", "Testing assembly.dfqsdvind_zeta') MS=self.MS n_surf=MS.n_surf # analytical Dercoll_list,Dervert_list=assembly.dfqsdvind_zeta(MS.Surfs,MS.Surfs_star) # allocate numerical Derlist_num=[] for ii", "plt.close() def test_dfqsdvind_zeta(self): ''' For each output surface, there induced velocity is computed,", "for err_here in Erel[iifinite]: if np.abs(err_here)>err_max: err_max=err_here # Zero elements check iizero=np.abs(Pder_an)<1e-15 for", "ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] Surf_out.normals=N0[ss_out].copy() del Surf_out.u_ind_coll_norm try: del Surf_out.u_ind_coll except AttributeError: pass", "### Warning: this test fails: the dependency on gamma is linear, hence #", "in Steps: Der_num=0.0*Der_an for kk in range(3*Kzeta): Surf.zeta=zeta0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.zeta[ind_3d]+=step Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:])", "gamma_star=Surf_star.gamma+np.random.rand( *Surf_star.gamma.shape ) gvec=np.dot(C,gamma.reshape(-1))+np.dot(Cstar,gamma_star.reshape(-1)) gvec_ref=np.concatenate((gamma[-1,:],gamma_star[:-1,:].reshape(-1))) assert np.max(np.abs(gvec-gvec_ref))<1e-15,\\ 'Prop. from trailing edge not correct'", "multisurfaces import sharpy.linear.src.surface as surface import sharpy.linear.src.libuvlm as libuvlm import sharpy.utils.algebra as algebra", "for ii in range(n_surf): # sub=[] # for jj in range(n_surf): # sub.append(0.0*Dervert_list[ii][jj])", "plt.figure('Spy Der',figsize=(10,4)) # ax1 = fig.add_subplot(121) # ax1.spy(Der,precision=step) # ax2 = fig.add_subplot(122) #", "point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<50*step and ermax_rel<50*step, embed()#'Test failed!' #", "# select test case fname = os.path.dirname(os.path.abspath(__file__)) + '/h5input/goland_mod_Nsurf01_M003_N004_a040.aero_state.h5' haero = h5utils.readh5(fname) tsdata", "0: assert error[istep]<=error[istep-1],\\ 'Error not decreasing as FD step size is reduced' print('------------------------------------------------------------", "relative, if the element of Pder_an is nonzero - absolute, otherwise The function", "u_norm=Surf_pert.project_coll_to_normal(u_tot0) u_norm_vec=u_norm.reshape(-1,order='C') # FD derivative DerNum[:,jj]=(u_norm_vec-u_norm0_vec)/step er_max=np.max(np.abs(Der-DerNum)) print('FD step: %.2e ---> Max error:", "# run setUp after this test # T.setUp() # T.test_dfqsdvind_gamma() # T.test_dfqsduinput() #", "Er_max=[] Er_max_star=[] for ss_out in range(n_surf): for ss_in in range(n_surf): Der_an=Der_list[ss_out][ss_in] Der_num=Der_list_num[ss_out][ss_in] ErMat=Der_an-Der_num", "gammaw_TE0=Surf_star.gamma[0,:].copy() M_star,N_star=Surf_star.maps.M,Surf_star.maps.N K_star=Surf_star.maps.K for nn in range(N): pp=np.ravel_multi_index( (0,nn), (M_star,N_star)) gammaw_TE=gammaw_TE0.copy() gammaw_TE[nn]+=step Surf.get_joukovski_qs(gammaw_TE=gammaw_TE)", "reduced' def test_dfqsdzeta_vrel0(self): ''' Note: the get_joukovski_qs method re-computes the induced velocity at", "# ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error abs %d to %d' %(ss_in,ss_out)) # ax2=fig.add_subplot(132) # ax2.spy(ErRel,precision=1e2*step)", "TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step ### prepare output surfaces # - ensure normals", "Surf.u_ext[1,:,:]=Surf.u_ext[1,:,:]+60.0 Surf.u_ext[2,:,:]=Surf.u_ext[2,:,:]+30.0 Surf.u_ext=Surf.u_ext+np.random.rand(*Surf.u_ext.shape) ### analytical derivative # ind velocities computed already Surf.get_input_velocities_at_collocation_points() Der=assembly.uc_dncdzeta(Surf)", "# perturb bound. vertices and collocation Surf.zeta=Zeta0[ss].copy() Surf.zeta[cc,mm,nn] += perturb_vector[kk] # perturb wake", "at vertices Needs to be tested with a case that actually rotates \"\"\"", "case that actually rotates ''' print('------------------------------ Testing assembly.dfqsdzeta_omega') # rename MS=self.MS n_surf=MS.n_surf #", "variations at vertices Needs to be tested with a case that actually rotates", "circulations gamma=Surf.gamma+np.random.rand( *Surf.gamma.shape ) gamma_star=Surf_star.gamma+np.random.rand( *Surf_star.gamma.shape ) gvec=np.dot(C,gamma.reshape(-1))+np.dot(Cstar,gamma_star.reshape(-1)) gvec_ref=np.concatenate((gamma[-1,:],gamma_star[:-1,:].reshape(-1))) assert np.max(np.abs(gvec-gvec_ref))<1e-15,\\ 'Prop. from", "and inf... iifinite=np.isfinite(Erel) err_max=0.0 for err_here in Erel[iifinite]: if np.abs(err_here)>err_max: err_max=err_here # Zero", "collocation Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step Surf_in.generate_collocations() # perturb wake TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step ###", "avoid modifying the original for other tests Surf=copy.deepcopy(MS.Surfs[ss]) # Define variables M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K", "error: %.2e'%(step,er_max) ) assert er_max<5e1*step, 'Error larger than 50 times step size' Er_max[ss]=er_max", "(%.3e) too high!' %ErRel # allocate numerical Derlist_num=[] for ii in range(n_surf): sub=[]", "FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0] ### loop input surfs for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in]", "Dercoll_list,Dervert_list=assembly.dfqsdvind_zeta(MS.Surfs,MS.Surfs_star) # allocate numerical Derlist_num=[] for ii in range(n_surf): sub=[] for jj in", "gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Der_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ###### wake for ss_in in range(n_surf): Surf_in=MS.Surfs_star[ss_in] # perturb for", "Der_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ###### wake for ss_in in range(n_surf): Surf_in=MS.Surfs_star[ss_in] # perturb for pp in", "for jj in range(n_surf): sub.append(0.0*Dervert_list[ii][jj]) Derlist_num.append(sub) # store reference circulation and normal induced", "print('Error coll.\\tFDstep\\tErrAbs') print('\\t\\t%.1e\\t%.1e' %(step,ercoll)) #if ercoll>10*step: embed() assert ercoll<10*step, 'Error at collocation point'", ") gamma_star=Surf_star.gamma+np.random.rand( *Surf_star.gamma.shape ) gvec=np.dot(C,gamma.reshape(-1))+np.dot(Cstar,gamma_star.reshape(-1)) gvec_ref=np.concatenate((gamma[-1,:],gamma_star[:-1,:].reshape(-1))) assert np.max(np.abs(gvec-gvec_ref))<1e-15,\\ 'Prop. from trailing edge not", "Derlist_num[ss_out][ss_in][:,kk]=df.reshape(-1,order='C') ### check error for ss_out in range(n_surf): for ss_in in range(n_surf): Der_an=Dervert_list[ss_out][ss_in].copy()", "Surf_in=MS.Surfs[ss_in] # perturb for pp in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gamma0[ss_in].copy() Surf_in.gamma[mm,nn]+=step # recalculate", "for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) V0=comp_vind(zetac,MS) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,]", "the panel segments. A copy of Surf is required to ensure that other", "mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gammaw0[ss_in].copy() Surf_in.gamma[mm,nn]+=step # recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore circulation:", "is zero. These elements are filtered out during the search for maximum error,", "mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step # recalculate induced velocity everywhere Vnum=comp_vind(zetac,MS) dv=(Vnum-V0)/step Dervert_list_num[ss_in][:,kk]=dv.reshape(-1,order='C') # restore", "range(n_surf): Der_an=Der_list[ss] Der_star_an=Der_star_list[ss] Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K fqs0=Surf.fqs.copy() gamma0=Surf.gamma.copy() for step in Steps:", "ermax=np.max(ErAbs) # relative error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e'", "fname = os.path.dirname(os.path.abspath(__file__)) + '/h5input/goland_mod_Nsurf01_M003_N004_a040.aero_state.h5' haero = h5utils.readh5(fname) tsdata = haero.ts00000 # #", "in range(M): for nn in range(N): Der_num[ipanel] = (np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, ZetaC0[ss][:,mm,nn])) - np.dot(N0[ss][:,mm,nn],", "wake TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step # recalculate induced velocity everywhere Vnum=comp_vind(zetac,MS) dv=(Vnum-V0)/step", "'Error not decreasing as FD step size is reduced' # assert Er_max_star[ii]<Er_max_star[ii-1],\\ #", "modifications (three directions per vertex point) for kk in range(3*Kzeta): # Initialize to", "Surf=MS.Surfs[ss] #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() u_ext0=Surf.u_ext.copy() zeta_dot0=Surf.zeta_dot.copy() for step in Steps: Der_num=0.0*Der_an", "linear, hence # great accuracy is obtained even with large steps. In fact,", "range(nsteps): step = Steps[istep] for ss in range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N perturb_vector =", "true only for steady-state linearisation points ''' MS=self.MS Ders_an=assembly.dfunstdgamma_dot(MS.Surfs) step=1e-6 Ders_num=[] n_surf=len(MS.Surfs) for", "in range(3*Kzeta): # Initialize to remove previous movements Surf.zeta=zeta0.copy() # Define DoFs where", "perturb_vector) Der_num = np.zeros(Surf.maps.K) ipanel = 0 skew_omega = algebra.skew(Surf.omega) for mm in", "analytical Der_list,Der_star_list=assembly.dfqsdvind_gamma(MS.Surfs,MS.Surfs_star) # allocate numerical Der_list_num=[] Der_star_list_num=[] for ii in range(n_surf): sub=[] sub_star=[]", "numerical Dercoll_num=np.zeros((3,3)) Dervert_list_num=[] for ii in range(n_surf): Dervert_list_num.append(0.0*Dervert_list[ii]) # store reference grid Zeta0=[]", "print('\\t\\t%.1e\\t%.1e' %(step,ercoll)) #if ercoll>10*step: embed() assert ercoll<10*step, 'Error at collocation point' ### check", "circulation and normal induced velocities MS.get_normal_ind_velocities_at_collocation_points() Zeta0=[] Zeta0_star=[] Vind0=[] N0=[] ZetaC0=[] for ss", "ercoll=np.max(np.abs(Dercoll-Dercoll_num)) print('Error coll.\\tFDstep\\tErrAbs') print('\\t\\t%.1e\\t%.1e' %(step,ercoll)) #if ercoll>10*step: embed() assert ercoll<10*step, 'Error at collocation", "Er_max_star=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss] Der_star_an=Der_star_list[ss] Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K fqs0=Surf.fqs.copy()", "err_max, Eabs, Erel class Test_assembly(unittest.TestCase): ''' Test methods into assembly module ''' def", "wakes, only TE is displaced. ''' def comp_vind(zetac,MS): # comute induced velocity V=np.zeros((3,))", "# ax1=fig.add_subplot(121) # ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error abs %d' %(ss_in)) # ax2=fig.add_subplot(122) # ax2.spy(ErRel,precision=1e2*step)", "times step size' Er_max.append(er_max) ### Wake Surf.gamma=gamma0.copy() gammaw_TE0=Surf_star.gamma[0,:].copy() M_star,N_star=Surf_star.maps.M,Surf_star.maps.N K_star=Surf_star.maps.K for nn in", "# T.test_dvinddzeta() # T.test_dfqsdvind_zeta() # run setUp after this test # T.setUp() #", "import sharpy.utils.algebra as algebra np.set_printoptions(linewidth=200,precision=3) def max_error_tensor(Pder_an,Pder_num): ''' Finds the maximum error analytical", "tensor may contain NaN or Inf if the analytical derivative is zero. These", "ermax_rel=ErRel[iimax] print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<50*step and ermax_rel<50*step, embed()#'Test failed!' # fig=plt.figure('Spy Er", "# perturb wake TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step ### prepare output surfaces #", "velocity is computed, all other surfaces are looped. For wakes, only TE is", "# # Rotating cases # fname = './basic_rotating_wing/basic_wing.data.h5' # haero = h5utils.readh5(fname) #", "ZetaC0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Vind0.append(MS.Surfs[ss].u_ind_coll_norm.copy()) N0.append(MS.Surfs[ss].normals.copy()) # calculate vis", "def test_dfqsdvind_gamma(self): print('------------------------------ Testing assembly.dfqsdvind_gamma') MS=self.MS n_surf=MS.n_surf # analytical Der_list,Der_star_list=assembly.dfqsdvind_gamma(MS.Surfs,MS.Surfs_star) # allocate numerical", "# ax1.spy(Der,precision=step) # ax2 = fig.add_subplot(122) # ax2.spy(DerNum,precision=step) # plt.show() def test_nc_domegazetadzeta(self): \"\"\"", "fname = './basic_rotating_wing/basic_wing.data.h5' # haero = h5utils.readh5(fname) # tsdata = haero.data.aero.timestep_info[-1] # tsdata.omega", "MS.get_normal_ind_velocities_at_collocation_points() MS.verify_non_penetration() MS.verify_aic_coll() MS.get_joukovski_qs() MS.verify_joukovski_qs() self.MS=MS def test_nc_dqcdzeta(self): ''' For each output surface,", "is obtained even with large steps. In fact, reducing # the step quickly", "step size is reduced' # assert Er_max_star[ii]<Er_max_star[ii-1],\\ # 'Error not decreasing as FD", "range(N): pp=np.ravel_multi_index( (0,nn), (M_star,N_star)) gammaw_TE=gammaw_TE0.copy() gammaw_TE[nn]+=step Surf.get_joukovski_qs(gammaw_TE=gammaw_TE) df=(Surf.fqs-fqs0)/step Der_star_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_star_an-Der_star_num)) print('Surface %.2d -", "ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] ermax, ErAbs, ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs) # relative", "iifinite=np.isfinite(Erel) err_max=0.0 for err_here in Erel[iifinite]: if np.abs(err_here)>err_max: err_max=err_here # Zero elements check", "if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step # recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore zeta:", "fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() for step in Steps: Der_num=0.0*Der_an for kk in range(3*Kzeta): Surf.zeta=zeta0.copy() ind_3d=np.unravel_index(kk,", "refernce values Surf.get_joukovski_unsteady() Gamma_dot0=Surf.gamma_dot.copy() F0=Surf.funst.copy() for pp in range(K): mm,nn=np.unravel_index( pp, (M,N) )", "ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Wake%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert ermax<50*step, 'Test failed!' # fig = plt.figure('Spy", "T.test_dvinddzeta() # T.test_dfqsdvind_zeta() # run setUp after this test # T.setUp() # T.test_dfqsdvind_gamma()", "range(n_surf): Der_an=Dervert_list[ss_out][ss_in].copy() if ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] _,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs) #", "step size' Er_max[ss]=er_max # assert error decreases with step size for ss in", "on Gamma_dot, which is true only for steady-state linearisation points ''' MS=self.MS Ders_an=assembly.dfunstdgamma_dot(MS.Surfs)", "MS.get_ind_velocities_at_collocation_points() MS.get_normal_ind_velocities_at_collocation_points() for ss in range(n_surf): print('Surface %.2d:' %ss) Surf=MS.Surfs[ss] # generate non-zero", "derivs',figsize=(12,4)) # ax1=fig.add_subplot(131) # ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error abs %d to %d' %(ss_in,ss_out)) #", "step size # for ii in range(1,len(Steps)): # assert Er_max[ii]<Er_max[ii-1],\\ # 'Error not", "in Erel[iifinite]: if np.abs(err_here)>err_max: err_max=err_here # Zero elements check iizero=np.abs(Pder_an)<1e-15 for der_here in", "Der_num=Derlist_num[ss_out][ss_in] _,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs) # relative error at max abs", "in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Vind0.append(MS.Surfs[ss].u_ind_coll_norm.copy()) N0.append(MS.Surfs[ss].normals.copy()) # calculate vis FDs Steps=[1e-6,] step=Steps[0]", "Der_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss) print('FD step: %.2e ---> Max error:", "ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Derlist_num[ss_out][ss_in][:,kk]=df.reshape(-1,order='C') ### check error for", "reference values at equilibrium fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() u_input_seg0=Surf.u_input_seg.copy() for step in Steps: # Initialize", "Er_max=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss] Surf=copy.deepcopy(MS.Surfs[ss]) #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy()", "fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() u_input_seg0=Surf.u_input_seg.copy() for step in Steps: # Initialize Der_num = 0.0*Der_an #", "### check error at vert for ss_in in range(n_surf): Der_an=Dervert_list[ss_in] Der_num=Dervert_list_num[ss_in] ermax,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) #", ") Surf.u_ext[ind_3d]+=0.5*step Surf.zeta_dot[ind_3d]+=-0.5*step Surf.get_input_velocities_at_segments() Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss)", "Initialize Er_max=[] # Define steps to run Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): #", "'Test failed!' # fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(131) # ax1.spy(ErAbs,precision=1e2*step) #", "ermax, ErAbs, ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs) # relative error at max", "ss in range(len(Steps)): step=Steps[ss] for jj in range(3*Surf.maps.Kzeta): # perturb cc_pert=Surf.maps.ind_3d_vert_vect[0][jj] mm_pert=Surf.maps.ind_3d_vert_vect[1][jj] nn_pert=Surf.maps.ind_3d_vert_vect[2][jj]", "PERTURBATION OF THE SURFACE for kk in range(3*Surf.maps.Kzeta): # generate a random perturbation", "# recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore zeta: (include only induced velocity", "M,N=Surf.maps.M,Surf.maps.N perturb_vector = np.zeros(3*Surf.maps.Kzeta) # PERTURBATION OF THE SURFACE for kk in range(3*Surf.maps.Kzeta):", "Derlist_num[ss_out][ss_in][:,kk]=dvind.reshape(-1,order='C') ### check error for ss_out in range(n_surf): for ss_in in range(n_surf): Der_an=Dervert_list[ss_out][ss_in].copy()", "are looped. For wakes, only TE is displaced. ''' print('------------------------------- Testing assembly.dfqsdvind_zeta') MS=self.MS", "### check error for ss_out in range(n_surf): for ss_in in range(n_surf): Der_an=Dervert_list[ss_out][ss_in].copy() if", "FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-5,] step=Steps[0] ###### bound for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] # perturb", "ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] Surf_star_in=MS.Surfs_star[ss_in] M_in,N_in=Surf_in.maps.M,Surf_in.maps.N # perturb for kk in range(3*Surf_in.maps.Kzeta): cc,mm,nn=np.unravel_index(", "Surf is required to ensure that other tests are not affected. Needs to", "assert Er_max[ss]<Er_max[ss-1],\\ 'Error not decreasing as FD step size is reduced' print('------------------------------------------------------------ OK')", "in Steps: Der_num=0.0*Der_an for kk in range(3*Kzeta): Surf.u_ext=u_ext0.copy() Surf.zeta_dot=zeta_dot0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.u_ext[ind_3d]+=0.5*step", "np.dot(skew_omega, ZetaC0[ss][:,mm,nn])) - np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, Surf.zetac[:,mm,nn]))) ipanel += 1 # COMPUTE THE ERROR", "Fqs0=[] for ss in range(n_surf): Gamma0.append(MS.Surfs[ss].gamma.copy()) Gammaw0.append(MS.Surfs_star[ss].gamma.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,]", "### analytical derivative # ind velocities computed already Surf.get_input_velocities_at_collocation_points() Der=assembly.uc_dncdzeta(Surf) ### numerical derivative", "step=1e-6 Ders_num=[] n_surf=len(MS.Surfs) for ss in range(n_surf): Surf=MS.Surfs[ss] Kzeta,K=Surf.maps.Kzeta,Surf.maps.K M,N=Surf.maps.M,Surf.maps.N Dnum=np.zeros((3*Kzeta,K)) # get", "Max error: %.2e'%(step,er_max) ) assert er_max<5e1*step, 'Error larger than 50 times step size'", "= np.maximum(error[istep], np.absolute(Der_num-Der_an).max()) print('FD step: %.2e ---> Max error: %.2e'%(step,error[istep]) ) assert error[istep]<5e1*step,", "per vertex point) for kk in range(3*Kzeta): # Initialize to remove previous movements", "for ss_in in range(n_surf): Der_an=Dervert_list[ss_in] Der_num=Dervert_list_num[ss_in] ermax,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs) #", "ss in range(n_surf): Der_an=Der_list[ss] Der_star_an=Der_star_list[ss] Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K fqs0=Surf.fqs.copy() gamma0=Surf.gamma.copy() for step", "V=np.zeros((3,)) for ss in range(n_surf): Surf_in=MS.Surfs[ss] Surf_star_in=MS.Surfs_star[ss] V+=Surf_in.get_induced_velocity(zetac) V+=Surf_star_in.get_induced_velocity(zetac) return V print('----------------------------------- Testing", "'Test failed!' Der_an=Der_star_list[ss_out][ss_in] Der_num=Der_star_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Wake%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert ermax<50*step, 'Test failed!'", "ss in range(n_surf): Der_an=Der_list[ss] Surf=copy.deepcopy(MS.Surfs[ss]) #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() for step", "step=Steps[0] ### loop input surfs for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] Surf_star_in=MS.Surfs_star[ss_in] M_in,N_in=Surf_in.maps.M,Surf_in.maps.N #", "values of the variabes Zeta0=[] Zeta0_star=[] N0=[] ZetaC0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy())", "perturb wake TE if mm==M: Surf_star.zeta=Zeta0_star[ss].copy() Surf_star.zeta[cc,0,nn] += perturb_vector[kk] Surf.generate_collocations() # COMPUTE THE", "%.2e'%(step,er_max) ) assert er_max<5e1*step, 'Error larger than 50 times step size' Er_max_star.append(er_max) Surf.gamma=gamma0.copy()", "- absolute, otherwise The function returns the absolute and relative error tensors, and", "print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ercoll<10*step, 'Error at vertices' # fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4))", "# allocate numerical Der_list_num=[] Der_star_list_num=[] for ii in range(n_surf): sub=[] sub_star=[] for jj", "Dervert_list=[] for ss_in in range(n_surf): dcoll_b,dvert_b=assembly.dvinddzeta(zetac,MS.Surfs[ss_in],IsBound=True) dcoll_w,dvert_w=assembly.dvinddzeta(zetac,MS.Surfs_star[ss_in], IsBound=False,M_in_bound=MS.Surfs[ss_in].maps.M) Dercoll+=dcoll_b+dcoll_w Dervert_list.append(dvert_b+dvert_w) # allocate numerical", "with large steps. In fact, reducing # the step quickly introduced round-off error.", "# ax3.set_title('Dcoll an. %d to %d' %(ss_out,ss_out)) # #plt.show() # plt.close() def test_uc_dncdzeta(self,PlotFlag=False):", "u_norm0_vec=u_norm0.reshape(-1,order='C') zeta0=Surf.zeta DerNum=np.zeros(Der.shape) Steps=np.array([1e-2,1e-3,1e-4,1e-5,1e-6]) Er_max=0.0*Steps for ss in range(len(Steps)): step=Steps[ss] for jj in", "# perturb cc_pert=Surf.maps.ind_3d_vert_vect[0][jj] mm_pert=Surf.maps.ind_3d_vert_vect[1][jj] nn_pert=Surf.maps.ind_3d_vert_vect[2][jj] zeta_pert=zeta0.copy() zeta_pert[cc_pert,mm_pert,nn_pert]+=step # calculate new normal velocity Surf_pert=surface.AeroGridSurface(Surf.maps,zeta=zeta_pert,", "ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error rel %d to %d' %(ss_in,ss_out)) # ax3=fig.add_subplot(133) # ax3.spy(Dercoll_list[ss_out],precision=50*step) #", "for step in Steps: Der_num=0.0*Der_an for kk in range(3*Kzeta): Surf.zeta=zeta0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1) )", "aerodynamic force with respect to changes in panel circulation. Warning: test assumes the", "vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(131) # ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error abs %d to %d'", "ss_in in range(n_surf): Surf_in=MS.Surfs_star[ss_in] # perturb for pp in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gammaw0[ss_in].copy()", "re-computed for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] Surf_out.normals=N0[ss_out].copy() del Surf_out.u_ind_coll_norm try: del Surf_out.u_ind_coll except", "# COMPUTE THE DERIVATIVES Der_an = np.zeros(Surf.maps.K) Der_an = np.dot(Dervert_list[ss], perturb_vector) Der_num =", "Surf.zeta=Zeta0[ss].copy() Surf.zeta[cc,mm,nn] += perturb_vector[kk] # perturb wake TE if mm==M: Surf_star.zeta=Zeta0_star[ss].copy() Surf_star.zeta[cc,0,nn] +=", "for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] dvind=(Surf_out.u_ind_coll_norm-Vind0[ss_out])/step Derlist_num[ss_out][ss_in][:,kk]=dvind.reshape(-1,order='C') ### check error for ss_out in", "# for ii in range(1,len(Steps)): # assert Er_max[ii]<Er_max[ii-1],\\ # 'Error not decreasing as", "rotates ''' print('------------------------------ Testing assembly.dfqsdzeta_omega') # rename MS=self.MS n_surf=MS.n_surf # Compute the anaytical", "ss_out in range(n_surf): for ss_in in range(n_surf): Der_an=Der_list[ss_out][ss_in] Der_num=Der_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Bound%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e'", "colocation points due to geometrical variations at vertices Needs to be tested with", "wake TE if mm==M: Surf_star.zeta=Zeta0_star[ss].copy() Surf_star.zeta[cc,0,nn] += perturb_vector[kk] Surf.generate_collocations() # COMPUTE THE DERIVATIVES", "other tests are not affected. ''' print('------------------------------ Testing assembly.dfqsdzeta_vrel0') MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsdzeta_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[]", "tsdata = haero.data.aero.timestep_info[-1] # tsdata.omega = [] # for ss in range(haero.data.aero.n_surf): #", "+= perturb_vector[kk] # perturb wake TE if mm==M: Surf_star.zeta=Zeta0_star[ss].copy() Surf_star.zeta[cc,0,nn] += perturb_vector[kk] Surf.generate_collocations()", "induced velocity contrib.) Surf_in.gamma=Gamma0[ss_in].copy() # estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy()", "range(n_surf): sub=[] for jj in range(n_surf): sub.append(0.0*Dervert_list[ii][jj]) Derlist_num.append(sub) # store reference circulation and", "V0=comp_vind(zetac,MS) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0] ### vertices for ss_in in", "is computed, all other surfaces are looped. For wakes, only TE is displaced.", "gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) # <--- gammaw_0 needs to be used here! df=(Surf_out.fqs-fqs0)/step Der_star_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ### check", "error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] assert ermax_rel<1e-16,\\ 'option Merge=True not working correctly, relative error", "input velocity is allocated to both u_ext and zeta_dot ''' print('---------------------------------- Testing assembly.dfqsduinput')", "er_max<5e1*step, 'Error larger than 50 times step size' Er_max.append(er_max) ### Wake Surf.gamma=gamma0.copy() gammaw_TE0=Surf_star.gamma[0,:].copy()", "perturb for pp in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gammaw0[ss_in].copy() Surf_in.gamma[mm,nn]+=step # recalculate induced velocity", "point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d\\t\\t\\tFDstep\\tErrAbs\\tErrRel'%(ss,)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<5e2*step and ermax_rel<50*step, 'Test failed!' def", "in range(n_surf): Der_an=Dervert_list[ss_in] Der_num=Dervert_list_num[ss_in] ermax,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs) # relative error", "ermax_rel=ErRel[iimax] print('Bound%.2d\\t\\t\\tFDstep\\tErrAbs\\tErrRel'%(ss,)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<5e2*step and ermax_rel<50*step, 'Test failed!' def test_wake_prop(self): MS=self.MS", "Er_max[ss]=er_max # assert error decreases with step size for ss in range(1,len(Steps)): assert", "Er_max[ii]<Er_max[ii-1],\\ # 'Error not decreasing as FD step size is reduced' # assert", "error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound and wake%.2d\\tFDstep\\tErrAbs\\tErrRel'%ss_in) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel))", "Vnum=comp_vind(zetac,MS) dv=(Vnum-V0)/step Dervert_list_num[ss_in][:,kk]=dv.reshape(-1,order='C') # restore Surf_in.zeta=Zeta0[ss_in].copy() if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() ### check error at", "relative error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] assert ermax_rel<1e-16,\\ 'option Merge=True", "# relative error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel))", "Relative error check: remove NaN and inf... iifinite=np.isfinite(Erel) err_max=0.0 for err_here in Erel[iifinite]:", "print('FD step: %.2e ---> Max error: %.2e'%(step,er_max) ) assert er_max<5e1*step, 'Error larger than", "surfaces are looped. For wakes, only TE is displaced. ''' def comp_vind(zetac,MS): #", "''' Finds the maximum error analytical derivatives Pder_an. The error is: - relative,", "step=Steps[0] ### vertices for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] Surf_star_in=MS.Surfs_star[ss_in] M_in,N_in=Surf_in.maps.M,Surf_in.maps.N # perturb for", "abs %d' %(ss_in)) # ax2=fig.add_subplot(122) # ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error rel %d' %(ss_in)) #", "Der_star_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ### check error Er_max=[] Er_max_star=[] for ss_out in range(n_surf): for ss_in in", "gamma is linear, hence # great accuracy is obtained even with large steps.", "test_dfqsduinput(self): ''' Step change in input velocity is allocated to both u_ext and", "Dercoll=np.zeros((3,3)) Dervert_list=[] for ss_in in range(n_surf): dcoll_b,dvert_b=assembly.dvinddzeta(zetac,MS.Surfs[ss_in],IsBound=True) dcoll_w,dvert_w=assembly.dvinddzeta(zetac,MS.Surfs_star[ss_in], IsBound=False,M_in_bound=MS.Surfs[ss_in].maps.M) Dercoll+=dcoll_b+dcoll_w Dervert_list.append(dvert_b+dvert_w) # allocate", "to %d' %(ss_in,ss_out)) # ax3=fig.add_subplot(133) # ax3.spy(Dercoll_list[ss_out],precision=50*step) # ax3.set_title('Dcoll an. %d to %d'", "Surf_star_in.zeta[cc,0,nn]+=step # recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore zeta: (include only induced", "range(M): for nn in range(N): Der_num[ipanel] = (np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, ZetaC0[ss][:,mm,nn])) - np.dot(N0[ss][:,mm,nn], np.dot(skew_omega,", "%d to %d' %(ss_out,ss_out)) # #plt.show() # plt.close() def test_dfunstdgamma_dot(self): ''' Test derivative", "''' Test methods into assembly module ''' def setUp(self): # select test case", "is required to ensure that other tests are not affected. ''' print('------------------------------ Testing", "size for ss in range(1,len(Steps)): assert Er_max[ss]<Er_max[ss-1],\\ 'Error not decreasing as FD step", "%(step,ercoll)) #if ercoll>10*step: embed() assert ercoll<10*step, 'Error at collocation point' ### check error", "Testing assembly.dvinddzeta') MS=self.MS n_surf=MS.n_surf zetac=.5*(MS.Surfs[0].zeta[:,1,2]+MS.Surfs[0].zeta[:,1,3]) Dercoll=np.zeros((3,3)) Dervert_list=[] for ss_in in range(n_surf): dcoll_b,dvert_b=assembly.dvinddzeta(zetac,MS.Surfs[ss_in],IsBound=True) dcoll_w,dvert_w=assembly.dvinddzeta(zetac,MS.Surfs_star[ss_in],", "normal induced velocities MS.get_normal_ind_velocities_at_collocation_points() Zeta0=[] Zeta0_star=[] Vind0=[] N0=[] ZetaC0=[] for ss in range(n_surf):", "Steps=np.array([1e-2,1e-3,1e-4,1e-5,1e-6]) Er_max=0.0*Steps for ss in range(len(Steps)): step=Steps[ss] for jj in range(3*Surf.maps.Kzeta): # perturb", "this test fails: the dependency on gamma is linear, hence # great accuracy", "# perturb wake TE if mm==M: Surf_star.zeta=Zeta0_star[ss].copy() Surf_star.zeta[cc,0,nn] += perturb_vector[kk] Surf.generate_collocations() # COMPUTE", "Steps=[1e-5,] step=Steps[0] ###### bound for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] # perturb for pp", "'Error at vertices' # fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(121) # ax1.spy(ErAbs,precision=1e2*step)", "<--- gammaw_0 needs to be used here! df=(Surf_out.fqs-fqs0)/step Der_star_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ### check error Er_max=[]", "in range(n_surf): Der_an=Der_list[ss_out][ss_in] Der_num=Der_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Bound%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert ermax<50*step, 'Test failed!'", "step quickly introduced round-off error. # # assert error decreases with step size", "in range(nsteps): step = Steps[istep] for ss in range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N perturb_vector", "at vertices' # fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(121) # ax1.spy(ErAbs,precision=1e2*step) #", "ind. vel on output to ensure they are re-computed for ss_out in range(n_surf):", "hence # great accuracy is obtained even with large steps. In fact, reducing", "OF THE SURFACE for kk in range(3*Surf.maps.Kzeta): # generate a random perturbation between", "embed()#'Test failed!' # fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(131) # ax1.spy(ErAbs,precision=1e2*step) #", "step: %.2e ---> Max error: %.2e'%(step,error[istep]) ) assert error[istep]<5e1*step, 'Error larger than 50", "is reduced' # assert Er_max_star[ii]<Er_max_star[ii-1],\\ # 'Error not decreasing as FD step size", "2018 ''' import os import copy import warnings import unittest import itertools import", "Warning: test assumes the derivative of the unsteady force only depends on Gamma_dot,", "geometrical variations at vertices Needs to be tested with a case that actually", "(3,M_in+1,N_in+1) ) # perturb bound Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step # perturb wake TE if mm==M_in:", "in range(n_surf): Der_an=Dervert_list[ss_out][ss_in].copy() if ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] _,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs)", "not decreasing as FD step size is reduced' print('------------------------------------------------------------ OK') if PlotFlag: pass", "derivative # ind velocities computed already Surf.get_input_velocities_at_collocation_points() Der=assembly.uc_dncdzeta(Surf) ### numerical derivative #Surf.get_normal_input_velocities_at_collocation_points() u_tot0=Surf.u_ind_coll+Surf.u_input_coll", "a case that actually rotates ''' print('------------------------------ Testing assembly.dfqsdzeta_omega') # rename MS=self.MS n_surf=MS.n_surf", "test_dfqsdzeta_omega(self): ''' Note: the get_joukovski_qs method re-computes the induced velocity at the panel", "n_surf=MS.n_surf MS.get_ind_velocities_at_collocation_points() MS.get_normal_ind_velocities_at_collocation_points() for ss in range(n_surf): print('Surface %.2d:' %ss) Surf=MS.Surfs[ss] # generate", "not affected. ''' print('------------------------------ Testing assembly.dfqsdzeta_vrel0') MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsdzeta_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,] for ss", "of Surf is required to ensure that other tests are not affected. '''", "allocate numerical Der_list_num=[] Der_star_list_num=[] for ii in range(n_surf): sub=[] sub_star=[] for jj in", "than 50 times step size' Er_max_star.append(er_max) Surf.gamma=gamma0.copy() ### Warning: this test fails: the", "ss in range(1,len(Steps)): assert Er_max[ss]<Er_max[ss-1],\\ 'Error not decreasing as FD step size is", "state equation terms # T.test_uc_dncdzeta() # T.test_nc_dqcdzeta() ### force equation (unsteady) # T.test_dfunstdgamma_dot()", "Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N perturb_vector = np.zeros(3*Surf.maps.Kzeta) # PERTURBATION OF THE SURFACE for kk", "# 'Error not decreasing as FD step size is reduced' # assert Er_max_star[ii]<Er_max_star[ii-1],\\", "np.zeros(Surf.maps.K) ipanel = 0 skew_omega = algebra.skew(Surf.omega) for mm in range(M): for nn", "Dervert_list_num[ss_in][:,kk]=dv.reshape(-1,order='C') # restore Surf_in.zeta=Zeta0[ss_in].copy() if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() ### check error at colloc Dercoll_num=np.zeros((3,3))", "assert ercoll<10*step, 'Error at vertices' # fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(121)", "Vind0=[] N0=[] ZetaC0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Vind0.append(MS.Surfs[ss].u_ind_coll_norm.copy()) N0.append(MS.Surfs[ss].normals.copy()) #", "FD derivative DerNum[:,jj]=(u_norm_vec-u_norm0_vec)/step er_max=np.max(np.abs(Der-DerNum)) print('FD step: %.2e ---> Max error: %.2e'%(step,er_max) ) assert", "Steps: Der_num=0.0*Der_an Der_star_num=0.0*Der_star_an ### Bound for pp in range(K): mm=Surf.maps.ind_2d_pan_scal[0][pp] nn=Surf.maps.ind_2d_pan_scal[1][pp] Surf.gamma=gamma0.copy() Surf.gamma[mm,nn]+=step", "range(n_surf): Surf_out=MS.Surfs[ss_out] Surf_out.normals=N0[ss_out].copy() del Surf_out.u_ind_coll_norm try: del Surf_out.u_ind_coll except AttributeError: pass ### recalculate", "range(n_surf): Der_an=Der_list[ss_out][ss_in] Der_num=Der_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Bound%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert ermax<50*step, 'Test failed!' Der_an=Der_star_list[ss_out][ss_in]", "mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gamma0[ss_in].copy() Surf_in.gamma[mm,nn]+=step # recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore circulation:", "%.2d - wake:' %ss) print('FD step: %.2e ---> Max error: %.2e'%(step,er_max) ) assert", "sub=[] # for jj in range(n_surf): # sub.append(0.0*Dervert_list[ii][jj]) # Derlist_num.append(sub) # Store the", "= fig.add_subplot(121) # ax1.spy(Der,precision=step) # ax2 = fig.add_subplot(122) # ax2.spy(DerNum,precision=step) # plt.show() def", "get_ind_velocities_at_segments and recover the previous grid Surf.get_input_velocities_at_segments() Surf.zeta=zeta0.copy() # Compute new forces Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:])", "V print('----------------------------------- Testing assembly.dvinddzeta') MS=self.MS n_surf=MS.n_surf zetac=.5*(MS.Surfs[0].zeta[:,1,2]+MS.Surfs[0].zeta[:,1,3]) Dercoll=np.zeros((3,3)) Dervert_list=[] for ss_in in range(n_surf):", "T.test_dfqsdzeta_vrel0() # T.test_dfqsdgamma_vrel0() # ### state equation terms # T.test_uc_dncdzeta() # T.test_nc_dqcdzeta() ###", "%.2e ---> Max error: %.2e'%(step,er_max) ) assert er_max<5e1*step, 'Error larger than 50 times", "if PlotFlag: pass # fig = plt.figure('Spy Der',figsize=(10,4)) # ax1 = fig.add_subplot(121) #", "Surf_in.zeta=Zeta0[ss_in].copy() if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() ### check error at colloc Dercoll_num=np.zeros((3,3)) for cc in", "Derlist_num.append(sub) # Store the initial values of the variabes Zeta0=[] Zeta0_star=[] N0=[] ZetaC0=[]", "for istep in range(nsteps): step = Steps[istep] for ss in range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss]", "of unsteady aerodynamic force with respect to changes in panel circulation. Warning: test", "velocity at the panel segments. A copy of Surf is required to ensure", "# PERTURBATION OF THE SURFACE for kk in range(3*Surf.maps.Kzeta): # generate a random", "and ermax_rel<50*step, embed()#'Test failed!' # fig=plt.figure('Spy Er vs coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(131) #", "test assumes the derivative of the unsteady force only depends on Gamma_dot, which", "Der_num=0.0*Der_an for kk in range(3*Kzeta): Surf.u_ext=u_ext0.copy() Surf.zeta_dot=zeta_dot0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.u_ext[ind_3d]+=0.5*step Surf.zeta_dot[ind_3d]+=-0.5*step Surf.get_input_velocities_at_segments()", "than 50 times the step size' if istep > 0: assert error[istep]<=error[istep-1],\\ 'Error", "ZetaC0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) N0.append(MS.Surfs[ss].normals.copy()) # Computation Steps=[1e-2, 1e-4,", "modifying the original for other tests Surf=copy.deepcopy(MS.Surfs[ss]) # Define variables M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta", "sub_star.append(0.0*Der_star_list[ii][jj]) Der_list_num.append(sub) Der_star_list_num.append(sub_star) # store reference circulation and force Gamma0=[] Gammaw0=[] Fqs0=[] for", "Er_max.append(er_max) def test_dfqsduinput(self): ''' Step change in input velocity is allocated to both", "relative error (%.3e) too high!' %ErRel # allocate numerical Derlist_num=[] for ii in", "= (np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, ZetaC0[ss][:,mm,nn])) - np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, Surf.zetac[:,mm,nn]))) ipanel += 1 # COMPUTE", "50 times step size' Er_max.append(er_max) def test_dfqsduinput(self): ''' Step change in input velocity", "M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta # Save the reference values at equilibrium fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() u_input_seg0=Surf.u_input_seg.copy()", "Surf_in.gamma=Gamma0[ss_in].copy() Surf_in.gamma[mm,nn]+=step # recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore circulation: (include only", "import os import copy import warnings import unittest import itertools import numpy as", "# ax1.spy(ErMat,precision=50*step) # plt.show() def test_dvinddzeta(self): ''' For each output surface, there induced", "ermax<5e2*step and ermax_rel<50*step, 'Test failed!' def test_wake_prop(self): MS=self.MS C_list,Cstar_list=assembly.wake_prop(MS.Surfs,MS.Surfs_star) n_surf=len(MS.Surfs) for ss in", "Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0] ### loop input surfs for", "if ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] ermax, ErAbs, ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs) #", "actually rotates ''' print('------------------------------ Testing assembly.dfqsdzeta_omega') # rename MS=self.MS n_surf=MS.n_surf # Compute the", "N0=[] ZetaC0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) N0.append(MS.Surfs[ss].normals.copy()) # Computation Steps=[1e-2,", "size' if istep > 0: assert error[istep]<=error[istep-1],\\ 'Error not decreasing as FD step", "Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0] ### loop input", "decreasing as FD step size is reduced' print('------------------------------------------------------------ OK') def test_dfqsdgamma_vrel0(self): print('----------------------------- Testing", "search for maximum error, and absolute error is checked. ''' Eabs=np.abs(Pder_num-Pder_an) nnzvec=Pder_an!=0 Erel=np.zeros(Pder_an.shape)", "+= step*(0.2*np.random.rand()+0.9) cc,mm,nn=np.unravel_index( kk, (3,M+1,N+1) ) # perturb bound. vertices and collocation Surf.zeta=Zeta0[ss].copy()", "print('---------------------------------- Testing assembly.dfqsduinput') MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsduinput(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss]", "# tsdata.omega.append(haero.data.structure.timestep_info[-1].for_vel[3:6]) MS=multisurfaces.MultiAeroGridSurfaces(tsdata) MS.get_normal_ind_velocities_at_collocation_points() MS.verify_non_penetration() MS.verify_aic_coll() MS.get_joukovski_qs() MS.verify_joukovski_qs() self.MS=MS def test_nc_dqcdzeta(self): ''' For", ") assert er_max<5e1*step, 'Error larger than 50 times step size' Er_max.append(er_max) def test_dfqsdvind_gamma(self):", "+= perturb_vector[kk] Surf.generate_collocations() # COMPUTE THE DERIVATIVES Der_an = np.zeros(Surf.maps.K) Der_an = np.dot(Dervert_list[ss],", "Warning: this test fails: the dependency on gamma is linear, hence # great", "perturb for kk in range(3*Surf_in.maps.Kzeta): cc,mm,nn=np.unravel_index( kk, (3,M_in+1,N_in+1) ) # perturb bound Surf_in.zeta=Zeta0[ss_in].copy()", "Der_num=Der_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Bound%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert ermax<50*step, 'Test failed!' Der_an=Der_star_list[ss_out][ss_in] Der_num=Der_star_list_num[ss_out][ss_in] ErMat=Der_an-Der_num", "zeta0=Surf.zeta.copy() u_input_seg0=Surf.u_input_seg.copy() for step in Steps: # Initialize Der_num = 0.0*Der_an # Loop", "calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-6,] step=Steps[0] ### vertices for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in]", "range(n_surf): Gamma0.append(MS.Surfs[ss].gamma.copy()) Gammaw0.append(MS.Surfs_star[ss].gamma.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-5,] step=Steps[0] ###### bound", "try: del Surf_out.u_ind_coll except AttributeError: pass ### recalculate MS.get_normal_ind_velocities_at_collocation_points() # restore Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zetac=ZetaC0[ss_in].copy('F')", "TE is displaced. ''' print('------------------------------- Testing assembly.dfqsdvind_zeta') MS=self.MS n_surf=MS.n_surf # analytical Dercoll_list,Dervert_list=assembly.dfqsdvind_zeta(MS.Surfs,MS.Surfs_star) #", "for step in Steps: # Initialize Der_num = 0.0*Der_an # Loop through the", "range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) N0.append(MS.Surfs[ss].normals.copy()) # Computation Steps=[1e-2, 1e-4, 1e-6] nsteps = len(Steps)", "the step perturb_vector[kk] += step*(0.2*np.random.rand()+0.9) cc,mm,nn=np.unravel_index( kk, (3,M+1,N+1) ) # perturb bound. vertices", "fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) # <--- gammaw_0 needs to be used here! df=(Surf_out.fqs-fqs0)/step Der_star_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C')", "n_surf=MS.n_surf Der_list=assembly.dfqsdzeta_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss] Surf=copy.deepcopy(MS.Surfs[ss]) #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K", "plt.close() def test_dfunstdgamma_dot(self): ''' Test derivative of unsteady aerodynamic force with respect to", "# T=Test_assembly() # T.setUp() # ### force equation (qs term) # T.test_dvinddzeta() #", "numerical Derlist_num=[] for ii in range(n_surf): sub=[] for jj in range(n_surf): sub.append(0.0*Dervert_list[ii][jj]) Derlist_num.append(sub)", "in Steps: Der_num=0.0*Der_an Der_star_num=0.0*Der_star_an ### Bound for pp in range(K): mm=Surf.maps.ind_2d_pan_scal[0][pp] nn=Surf.maps.ind_2d_pan_scal[1][pp] Surf.gamma=gamma0.copy()", "Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Derlist_num[ss_out][ss_in][:,kk]=df.reshape(-1,order='C') ### check error for ss_out in range(n_surf): for ss_in", "Finds the maximum error analytical derivatives Pder_an. The error is: - relative, if", "Zeta0_star=[] N0=[] ZetaC0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) N0.append(MS.Surfs[ss].normals.copy()) # Computation", "at equilibrium fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() u_input_seg0=Surf.u_input_seg.copy() for step in Steps: # Initialize Der_num =", "Der_all_exp=np.block(Dervert_list)+scalg.block_diag(*Dercoll_list) Der_all=np.block( assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star,Merge=True) ) _,ErAbs,ErRel=max_error_tensor(Der_all,Der_all_exp) # max absolute error ermax=np.max(ErAbs) # relative error", "and the 110% of the step perturb_vector[kk] += step*(0.2*np.random.rand()+0.9) cc,mm,nn=np.unravel_index( kk, (3,M+1,N+1) )", "allocate numerical Dercoll_num=np.zeros((3,3)) Dervert_list_num=[] for ii in range(n_surf): Dervert_list_num.append(0.0*Dervert_list[ii]) # store reference grid", "# restore circulation: (include only induced velocity contrib.) Surf_in.gamma=Gammaw0[ss_in].copy() # estimate derivatives for", "for ii in range(n_surf): sub=[] for jj in range(n_surf): sub.append(0.0*Dervert_list[ii][jj]) Derlist_num.append(sub) # store", "ipanel = 0 skew_omega = algebra.skew(Surf.omega) for mm in range(M): for nn in", "assembly.test_nc_dqcdzeta') MS=self.MS n_surf=MS.n_surf # analytical Dercoll_list,Dervert_list=assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star) # check option Der_all_exp=np.block(Dervert_list)+scalg.block_diag(*Dercoll_list) Der_all=np.block( assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star,Merge=True) )", "N0.append(MS.Surfs[ss].normals.copy()) # Computation Steps=[1e-2, 1e-4, 1e-6] nsteps = len(Steps) error = np.zeros((nsteps,)) for", "range(n_surf): # sub.append(0.0*Dervert_list[ii][jj]) # Derlist_num.append(sub) # Store the initial values of the variabes", "is reduced' print('------------------------------------------------------------ OK') def test_dfqsdgamma_vrel0(self): print('----------------------------- Testing assembly.dfqsdgamma_vrel0') MS=self.MS n_surf=MS.n_surf Der_list,Der_star_list=assembly.dfqsdgamma_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[]", "- relative, if the element of Pder_an is nonzero - absolute, otherwise The", "# for ii in range(n_surf): # sub=[] # for jj in range(n_surf): #", "step size' Er_max.append(er_max) def test_dfqsduinput(self): ''' Step change in input velocity is allocated", "test_wake_prop(self): MS=self.MS C_list,Cstar_list=assembly.wake_prop(MS.Surfs,MS.Surfs_star) n_surf=len(MS.Surfs) for ss in range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] N=Surf.maps.N K_star=Surf_star.maps.K C=C_list[ss]", "for kk in range(3*Kzeta): Surf.u_ext=u_ext0.copy() Surf.zeta_dot=zeta_dot0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.u_ext[ind_3d]+=0.5*step Surf.zeta_dot[ind_3d]+=-0.5*step Surf.get_input_velocities_at_segments() Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:])", "assembly.dvinddzeta') MS=self.MS n_surf=MS.n_surf zetac=.5*(MS.Surfs[0].zeta[:,1,2]+MS.Surfs[0].zeta[:,1,3]) Dercoll=np.zeros((3,3)) Dervert_list=[] for ss_in in range(n_surf): dcoll_b,dvert_b=assembly.dvinddzeta(zetac,MS.Surfs[ss_in],IsBound=True) dcoll_w,dvert_w=assembly.dvinddzeta(zetac,MS.Surfs_star[ss_in], IsBound=False,M_in_bound=MS.Surfs[ss_in].maps.M)", "ss_in in range(n_surf): dcoll_b,dvert_b=assembly.dvinddzeta(zetac,MS.Surfs[ss_in],IsBound=True) dcoll_w,dvert_w=assembly.dvinddzeta(zetac,MS.Surfs_star[ss_in], IsBound=False,M_in_bound=MS.Surfs[ss_in].maps.M) Dercoll+=dcoll_b+dcoll_w Dervert_list.append(dvert_b+dvert_w) # allocate numerical Dercoll_num=np.zeros((3,3)) Dervert_list_num=[]", "= np.zeros(Surf.maps.K) ipanel = 0 skew_omega = algebra.skew(Surf.omega) for mm in range(M): for", "Surf_star_in.zeta[cc,0,nn]+=step ### prepare output surfaces # - ensure normals are unchanged # -", "for ss in range(n_surf): Der_an=Der_list[ss] Surf=copy.deepcopy(MS.Surfs[ss]) #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() for", "of the variabes Zeta0=[] Zeta0_star=[] N0=[] ZetaC0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F'))", "coll derivs',figsize=(12,4)) # ax1=fig.add_subplot(121) # ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error abs %d' %(ss_in)) # ax2=fig.add_subplot(122)", "other tests Surf=copy.deepcopy(MS.Surfs[ss]) # Define variables M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta # Save the reference", "error decreases with step size for ss in range(1,len(Steps)): assert Er_max[ss]<Er_max[ss-1],\\ 'Error not", "%(step,ermax)) assert ermax<50*step, 'Test failed!' Der_an=Der_star_list[ss_out][ss_in] Der_num=Der_star_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Wake%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert", "# great accuracy is obtained even with large steps. In fact, reducing #", "test_dfqsdvind_gamma(self): print('------------------------------ Testing assembly.dfqsdvind_gamma') MS=self.MS n_surf=MS.n_surf # analytical Der_list,Der_star_list=assembly.dfqsdvind_gamma(MS.Surfs,MS.Surfs_star) # allocate numerical Der_list_num=[]", "store reference grid Zeta0=[] Zeta0_star=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) V0=comp_vind(zetac,MS) #", "Surf.u_ext=u_ext0.copy() Surf.zeta_dot=zeta_dot0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.u_ext[ind_3d]+=0.5*step Surf.zeta_dot[ind_3d]+=-0.5*step Surf.get_input_velocities_at_segments() Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface", "*Surf_star.gamma.shape ) gvec=np.dot(C,gamma.reshape(-1))+np.dot(Cstar,gamma_star.reshape(-1)) gvec_ref=np.concatenate((gamma[-1,:],gamma_star[:-1,:].reshape(-1))) assert np.max(np.abs(gvec-gvec_ref))<1e-15,\\ 'Prop. from trailing edge not correct' if", "Testing assembly.uc_dncdzeta') MS=self.MS n_surf=MS.n_surf MS.get_ind_velocities_at_collocation_points() MS.get_normal_ind_velocities_at_collocation_points() for ss in range(n_surf): print('Surface %.2d:' %ss)", "range(n_surf): sub=[] sub_star=[] for jj in range(n_surf): sub.append(0.0*Der_list[ii][jj]) sub_star.append(0.0*Der_star_list[ii][jj]) Der_list_num.append(sub) Der_star_list_num.append(sub_star) # store", "at collocation point' ### check error at vert for ss_in in range(n_surf): Der_an=Dervert_list[ss_in]", "size' Er_max[ss]=er_max # assert error decreases with step size for ss in range(1,len(Steps)):", "Surf_in.gamma[mm,nn]+=step # recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore circulation: (include only induced", "# T.test_dfqsdgamma_vrel0() # ### state equation terms # T.test_uc_dncdzeta() # T.test_nc_dqcdzeta() ### force", "MS=self.MS n_surf=MS.n_surf zetac=.5*(MS.Surfs[0].zeta[:,1,2]+MS.Surfs[0].zeta[:,1,3]) Dercoll=np.zeros((3,3)) Dervert_list=[] for ss_in in range(n_surf): dcoll_b,dvert_b=assembly.dvinddzeta(zetac,MS.Surfs[ss_in],IsBound=True) dcoll_w,dvert_w=assembly.dvinddzeta(zetac,MS.Surfs_star[ss_in], IsBound=False,M_in_bound=MS.Surfs[ss_in].maps.M) Dercoll+=dcoll_b+dcoll_w", "case fname = os.path.dirname(os.path.abspath(__file__)) + '/h5input/goland_mod_Nsurf01_M003_N004_a040.aero_state.h5' haero = h5utils.readh5(fname) tsdata = haero.ts00000 #", "analytical derivative # ind velocities computed already Surf.get_input_velocities_at_collocation_points() Der=assembly.uc_dncdzeta(Surf) ### numerical derivative #Surf.get_normal_input_velocities_at_collocation_points()", "Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss] #Surf=copy.deepcopy(MS.Surfs[ss]) Surf=MS.Surfs[ss] #Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta fqs0=Surf.fqs.copy()", "# fname = './basic_rotating_wing/basic_wing.data.h5' # haero = h5utils.readh5(fname) # tsdata = haero.data.aero.timestep_info[-1] #", "the original for other tests Surf=copy.deepcopy(MS.Surfs[ss]) # Define variables M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K Kzeta=Surf.maps.Kzeta #", "Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss) print('FD step: %.2e ---> Max error:", "an. %d to %d' %(ss_out,ss_out)) # #plt.show() # plt.close() def test_uc_dncdzeta(self,PlotFlag=False): print('---------------------------------- Testing", "gvec_ref=np.concatenate((gamma[-1,:],gamma_star[:-1,:].reshape(-1))) assert np.max(np.abs(gvec-gvec_ref))<1e-15,\\ 'Prop. from trailing edge not correct' if __name__=='__main__': unittest.main() #", "zero. These elements are filtered out during the search for maximum error, and", "of external force Surf.u_ext[0,:,:]=Surf.u_ext[0,:,:]-20.0 Surf.u_ext[1,:,:]=Surf.u_ext[1,:,:]+60.0 Surf.u_ext[2,:,:]=Surf.u_ext[2,:,:]+30.0 Surf.u_ext=Surf.u_ext+np.random.rand(*Surf.u_ext.shape) ### analytical derivative # ind velocities", "### check error Er_max=[] Er_max_star=[] for ss_out in range(n_surf): for ss_in in range(n_surf):", "Surf_out.u_ind_coll except AttributeError: pass ### recalculate MS.get_normal_ind_velocities_at_collocation_points() # restore Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zetac=ZetaC0[ss_in].copy('F') Surf_star_in.zeta=Zeta0_star[ss_in].copy() #", "Er_max_star.append(er_max) Surf.gamma=gamma0.copy() ### Warning: this test fails: the dependency on gamma is linear,", "size' Er_max.append(er_max) def test_dfqsduinput(self): ''' Step change in input velocity is allocated to", "for ss in range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N perturb_vector = np.zeros(3*Surf.maps.Kzeta) # PERTURBATION OF", "ermax_rel=ErRel[iimax] print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<5e2*step and ermax_rel<50*step, 'Test failed!' # fig=plt.figure('Spy Er", "'Test failed!' def test_wake_prop(self): MS=self.MS C_list,Cstar_list=assembly.wake_prop(MS.Surfs,MS.Surfs_star) n_surf=len(MS.Surfs) for ss in range(n_surf): Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss]", "Rotating cases # fname = './basic_rotating_wing/basic_wing.data.h5' # haero = h5utils.readh5(fname) # tsdata =", "TE is displaced. ''' print('----------------------------- Testing assembly.test_nc_dqcdzeta') MS=self.MS n_surf=MS.n_surf # analytical Dercoll_list,Dervert_list=assembly.nc_dqcdzeta(MS.Surfs,MS.Surfs_star) #", "perturb bound Surf_in.zeta=Zeta0[ss_in].copy() Surf_in.zeta[cc,mm,nn]+=step # perturb wake TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step #", "Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step # recalculate induced velocity everywhere Vnum=comp_vind(zetac,MS) dv=(Vnum-V0)/step Dervert_list_num[ss_in][:,kk]=dv.reshape(-1,order='C') # restore Surf_in.zeta=Zeta0[ss_in].copy()", "er_max=np.max(np.abs(Der_star_an-Der_star_num)) print('Surface %.2d - wake:' %ss) print('FD step: %.2e ---> Max error: %.2e'%(step,er_max)", "with the analytica derivatives Der_an=Der_an_list[ss] # Copy to avoid modifying the original for", "# Save the reference values at equilibrium fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() u_input_seg0=Surf.u_input_seg.copy() for step in", "range(n_surf): Surf_in=MS.Surfs[ss_in] Surf_star_in=MS.Surfs_star[ss_in] M_in,N_in=Surf_in.maps.M,Surf_in.maps.N # perturb for kk in range(3*Surf_in.maps.Kzeta): cc,mm,nn=np.unravel_index( kk, (3,M_in+1,N_in+1)", "relative error at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound and wake%.2d\\tFDstep\\tErrAbs\\tErrRel'%ss_in) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e'", "'Error larger than 50 times step size' Er_max.append(er_max) def test_dfqsdzeta_omega(self): ''' Note: the", "Surf_in.gamma=Gammaw0[ss_in].copy() # estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) #", "force Surf.u_ext[0,:,:]=Surf.u_ext[0,:,:]-20.0 Surf.u_ext[1,:,:]=Surf.u_ext[1,:,:]+60.0 Surf.u_ext[2,:,:]=Surf.u_ext[2,:,:]+30.0 Surf.u_ext=Surf.u_ext+np.random.rand(*Surf.u_ext.shape) ### analytical derivative # ind velocities computed already", "assert ermax<50*step, 'Test failed!' Der_an=Der_star_list[ss_out][ss_in] Der_num=Der_star_list_num[ss_out][ss_in] ErMat=Der_an-Der_num ermax=np.max(np.abs(ErMat)) print('Wake%.2d->Bound%.2d\\tFDstep\\tError'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e' %(step,ermax)) assert ermax<50*step,", "pp, (M,N) ) Surf.gamma_dot=Gamma_dot0.copy() Surf.gamma_dot[mm,nn]+=step Surf.get_joukovski_unsteady() dF=(Surf.funst-F0)/step Dnum[:,pp]=dF.reshape(-1) # restore Surf.gamma_dot=Gamma_dot0.copy() ### verify", "introduced round-off error. # # assert error decreases with step size # for", "n_surf=MS.n_surf Der_list,Der_star_list=assembly.dfqsdgamma_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Er_max_star=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss] Der_star_an=Der_star_list[ss] Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss]", "kk in range(3*Surf.maps.Kzeta): # generate a random perturbation between the 90% and the", "only induced velocity contrib.) Surf_in.zeta=Zeta0[ss_in].copy() Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate derivatives for ss_out in range(n_surf):", "rotates \"\"\" print('----------------------------- Testing assembly.test_nc_domegazetadzeta') MS=self.MS n_surf=MS.n_surf # analytical Dervert_list = assembly.nc_domegazetadzeta(MS.Surfs,MS.Surfs_star) #", "(include only induced velocity contrib.) Surf_in.zeta=Zeta0[ss_in].copy() Surf_star_in.zeta=Zeta0_star[ss_in].copy() # estimate derivatives for ss_out in", "surface, where induced velocity is computed, all other surfaces are looped. For wakes,", "if the analytical derivative is zero. These elements are filtered out during the", "Der_an=Dervert_list[ss_out][ss_in].copy() if ss_in==ss_out: Der_an=Der_an+Dercoll_list[ss_out] Der_num=Derlist_num[ss_out][ss_in] _,ErAbs,ErRel=max_error_tensor(Der_an,Der_num) # max absolute error ermax=np.max(ErAbs) # relative", "np.dot(Dervert_list[ss], perturb_vector) Der_num = np.zeros(Surf.maps.K) ipanel = 0 skew_omega = algebra.skew(Surf.omega) for mm", "at max abs error point iimax=np.unravel_index(np.argmax(ErAbs),ErAbs.shape) ermax_rel=ErRel[iimax] print('Bound%.2d->Bound%.2d\\tFDstep\\tErrAbs\\tErrRel'%(ss_in,ss_out)) print('\\t\\t\\t%.1e\\t%.1e\\t%.1e' %(step,ermax,ermax_rel)) assert ermax<50*step and", "Fqs0.append(MS.Surfs[ss].fqs.copy()) # calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-5,] step=Steps[0] ###### bound for ss_in in", "ax2.spy(DerNum,precision=step) # plt.show() def test_nc_domegazetadzeta(self): \"\"\" Variation at colocation points due to geometrical", "size is reduced' # assert Er_max_star[ii]<Er_max_star[ii-1],\\ # 'Error not decreasing as FD step", "jj in range(3*Surf.maps.Kzeta): # perturb cc_pert=Surf.maps.ind_3d_vert_vect[0][jj] mm_pert=Surf.maps.ind_3d_vert_vect[1][jj] nn_pert=Surf.maps.ind_3d_vert_vect[2][jj] zeta_pert=zeta0.copy() zeta_pert[cc_pert,mm_pert,nn_pert]+=step # calculate new", "derivs',figsize=(12,4)) # ax1=fig.add_subplot(121) # ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error abs %d' %(ss_in)) # ax2=fig.add_subplot(122) #", "unsteady aerodynamic force with respect to changes in panel circulation. Warning: test assumes", "(M_star,N_star)) gammaw_TE=gammaw_TE0.copy() gammaw_TE[nn]+=step Surf.get_joukovski_qs(gammaw_TE=gammaw_TE) df=(Surf.fqs-fqs0)/step Der_star_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_star_an-Der_star_num)) print('Surface %.2d - wake:' %ss) print('FD", "Surf.zeta_dot=zeta_dot0.copy() ind_3d=np.unravel_index(kk, (3,M+1,N+1) ) Surf.u_ext[ind_3d]+=0.5*step Surf.zeta_dot[ind_3d]+=-0.5*step Surf.get_input_velocities_at_segments() Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d", "times step size' Er_max[ss]=er_max # assert error decreases with step size for ss", "numerical Der_list_num=[] Der_star_list_num=[] for ii in range(n_surf): sub=[] sub_star=[] for jj in range(n_surf):", ") Surf.gamma_dot=Gamma_dot0.copy() Surf.gamma_dot[mm,nn]+=step Surf.get_joukovski_unsteady() dF=(Surf.funst-F0)/step Dnum[:,pp]=dF.reshape(-1) # restore Surf.gamma_dot=Gamma_dot0.copy() ### verify ermax, ErAbs,", "random perturbation between the 90% and the 110% of the step perturb_vector[kk] +=", "initial values of the variabes Zeta0=[] Zeta0_star=[] N0=[] ZetaC0=[] for ss in range(n_surf):", "sub.append(0.0*Dervert_list[ii][jj]) Derlist_num.append(sub) # store reference circulation and force Zeta0=[] Zeta0_star=[] Fqs0=[] for ss", "ax1.spy(ErAbs,precision=1e2*step) # ax1.set_title('error abs %d' %(ss_in)) # ax2=fig.add_subplot(122) # ax2.spy(ErRel,precision=1e2*step) # ax2.set_title('error rel", "Er_max=[] Er_max_star=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss] Der_star_an=Der_star_list[ss] Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K", "%(ss_in)) # #plt.show() # plt.close() def test_dfqsdvind_zeta(self): ''' For each output surface, there", "Der_an=Der_list[ss] Der_star_an=Der_star_list[ss] Surf=MS.Surfs[ss] Surf_star=MS.Surfs_star[ss] M,N=Surf.maps.M,Surf.maps.N K=Surf.maps.K fqs0=Surf.fqs.copy() gamma0=Surf.gamma.copy() for step in Steps: Der_num=0.0*Der_an", "self.MS=MS def test_nc_dqcdzeta(self): ''' For each output surface, where induced velocity is computed,", "perturb for kk in range(3*Surf_in.maps.Kzeta): cc,mm,nn=np.unravel_index( kk, (3,M_in+1,N_in+1) ) # perturb bound. vertices", "# estimate derivatives for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Derlist_num[ss_out][ss_in][:,kk]=df.reshape(-1,order='C')", "Testing assembly.dfqsdzeta_vrel0') MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsdzeta_vrel0(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,] for ss in range(n_surf): Der_an=Der_list[ss] Surf=copy.deepcopy(MS.Surfs[ss])", "range(n_surf): Dervert_list_num.append(0.0*Dervert_list[ii]) # store reference grid Zeta0=[] Zeta0_star=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy())", "actually rotates \"\"\" print('----------------------------- Testing assembly.test_nc_domegazetadzeta') MS=self.MS n_surf=MS.n_surf # analytical Dervert_list = assembly.nc_domegazetadzeta(MS.Surfs,MS.Surfs_star)", "Der_an_list=assembly.dfqsdzeta_omega(MS.Surfs,MS.Surfs_star) # Initialize Er_max=[] # Define steps to run Steps=[1e-2,1e-4,1e-6,] for ss in", "zeta_dot0=Surf.zeta_dot.copy() for step in Steps: Der_num=0.0*Der_an for kk in range(3*Kzeta): Surf.u_ext=u_ext0.copy() Surf.zeta_dot=zeta_dot0.copy() ind_3d=np.unravel_index(kk,", "values Surf.get_joukovski_unsteady() Gamma_dot0=Surf.gamma_dot.copy() F0=Surf.funst.copy() for pp in range(K): mm,nn=np.unravel_index( pp, (M,N) ) Surf.gamma_dot=Gamma_dot0.copy()", "= fig.add_subplot(122) # ax2.spy(DerNum,precision=step) # plt.show() def test_nc_domegazetadzeta(self): \"\"\" Variation at colocation points", "(0,nn), (M_star,N_star)) gammaw_TE=gammaw_TE0.copy() gammaw_TE[nn]+=step Surf.get_joukovski_qs(gammaw_TE=gammaw_TE) df=(Surf.fqs-fqs0)/step Der_star_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_star_an-Der_star_num)) print('Surface %.2d - wake:' %ss)", "Surf.get_joukovski_qs(gammaw_TE=Surf_star.gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,pp]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d - bound:' %ss) print('FD step: %.2e --->", "for ss_out in range(n_surf): Surf_out=MS.Surfs[ss_out] fqs0=Fqs0[ss_out].copy() Surf_out.get_joukovski_qs( gammaw_TE=MS.Surfs_star[ss_out].gamma[0,:]) df=(Surf_out.fqs-fqs0)/step Derlist_num[ss_out][ss_in][:,kk]=df.reshape(-1,order='C') ### check error", "grid modifications (three directions per vertex point) for kk in range(3*Kzeta): # Initialize", "times step size' Er_max.append(er_max) def test_dfqsdzeta_omega(self): ''' Note: the get_joukovski_qs method re-computes the", ") assert er_max<5e1*step, 'Error larger than 50 times step size' Er_max.append(er_max) ### Wake", "only TE is displaced. ''' print('------------------------------- Testing assembly.dfqsdvind_zeta') MS=self.MS n_surf=MS.n_surf # analytical Dercoll_list,Dervert_list=assembly.dfqsdvind_zeta(MS.Surfs,MS.Surfs_star)", "affected. Needs to be tested with a case that actually rotates ''' print('------------------------------", "if __name__=='__main__': unittest.main() # T=Test_assembly() # T.setUp() # ### force equation (qs term)", "%ss) print('FD step: %.2e ---> Max error: %.2e'%(step,er_max) ) assert er_max<5e1*step, 'Error larger", "Surf.get_input_velocities_at_segments() Surf.zeta=zeta0.copy() # Compute new forces Surf.get_joukovski_qs(gammaw_TE=MS.Surfs_star[ss].gamma[0,:]) df=(Surf.fqs-fqs0)/step Der_num[:,kk]=df.reshape(-1,order='C') er_max=np.max(np.abs(Der_an-Der_num)) print('Surface %.2d -", "for nn in range(N): Der_num[ipanel] = (np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, ZetaC0[ss][:,mm,nn])) - np.dot(N0[ss][:,mm,nn], np.dot(skew_omega, Surf.zetac[:,mm,nn])))", "range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp] nn=Surf_in.maps.ind_2d_pan_scal[1][pp] Surf_in.gamma=Gammaw0[ss_in].copy() Surf_in.gamma[mm,nn]+=step # recalculate induced velocity everywhere MS.get_ind_velocities_at_segments(overwrite=True) # restore", "be used here! df=(Surf_out.fqs-fqs0)/step Der_star_list_num[ss_out][ss_in][:,pp]=df.reshape(-1,order='C') ### check error Er_max=[] Er_max_star=[] for ss_out in", "the analytical derivative is zero. These elements are filtered out during the search", "Surf_in.zeta[cc,mm,nn]+=step Surf_in.generate_collocations() # perturb wake TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step ### prepare output", "analytical derivative is zero. These elements are filtered out during the search for", "Select the surface with the analytica derivatives Der_an=Der_an_list[ss] # Copy to avoid modifying", "ax3=fig.add_subplot(133) # ax3.spy(Dercoll_list[ss_out],precision=50*step) # ax3.set_title('Dcoll an. %d to %d' %(ss_out,ss_out)) # #plt.show() #", "import unittest import itertools import numpy as np import scipy.linalg as scalg import", "N0.append(MS.Surfs[ss].normals.copy()) # calculate vis FDs Steps=[1e-6,] step=Steps[0] ### loop input surfs for ss_in", "and force Zeta0=[] Zeta0_star=[] Fqs0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) #", "assert er_max<5e1*step, 'Error larger than 50 times step size' Er_max.append(er_max) def test_dfqsdzeta_omega(self): '''", "both u_ext and zeta_dot ''' print('---------------------------------- Testing assembly.dfqsduinput') MS=self.MS n_surf=MS.n_surf Der_list=assembly.dfqsduinput(MS.Surfs,MS.Surfs_star) Er_max=[] Steps=[1e-2,1e-4,1e-6,]", "- bound:' %ss) print('FD step: %.2e ---> Max error: %.2e'%(step,er_max) ) assert er_max<5e1*step,", "= np.zeros(3*Surf.maps.Kzeta) # PERTURBATION OF THE SURFACE for kk in range(3*Surf.maps.Kzeta): # generate", "import sharpy.linear.src.libuvlm as libuvlm import sharpy.utils.algebra as algebra np.set_printoptions(linewidth=200,precision=3) def max_error_tensor(Pder_an,Pder_num): ''' Finds", "and force Gamma0=[] Gammaw0=[] Fqs0=[] for ss in range(n_surf): Gamma0.append(MS.Surfs[ss].gamma.copy()) Gammaw0.append(MS.Surfs_star[ss].gamma.copy()) Fqs0.append(MS.Surfs[ss].fqs.copy()) #", "calculate vis FDs #Steps=[1e-2,1e-4,1e-6,] Steps=[1e-5,] step=Steps[0] ###### bound for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in]", "Surf_in.generate_collocations() # perturb wake TE if mm==M_in: Surf_star_in.zeta=Zeta0_star[ss_in].copy() Surf_star_in.zeta[cc,0,nn]+=step ### prepare output surfaces", "as assembly import sharpy.linear.src.multisurfaces as multisurfaces import sharpy.linear.src.surface as surface import sharpy.linear.src.libuvlm as", "bound for ss_in in range(n_surf): Surf_in=MS.Surfs[ss_in] # perturb for pp in range(Surf_in.maps.K): mm=Surf_in.maps.ind_2d_pan_scal[0][pp]", "er_max<5e1*step, 'Error larger than 50 times step size' Er_max_star.append(er_max) Surf.gamma=gamma0.copy() ### Warning: this", "jj in range(n_surf): sub.append(0.0*Der_list[ii][jj]) sub_star.append(0.0*Der_star_list[ii][jj]) Der_list_num.append(sub) Der_star_list_num.append(sub_star) # store reference circulation and force", "equilibrium fqs0=Surf.fqs.copy() zeta0=Surf.zeta.copy() u_input_seg0=Surf.u_input_seg.copy() for step in Steps: # Initialize Der_num = 0.0*Der_an", "Zeta0_star=[] Vind0=[] N0=[] ZetaC0=[] for ss in range(n_surf): Zeta0.append(MS.Surfs[ss].zeta.copy()) ZetaC0.append(MS.Surfs[ss].zetac.copy('F')) Zeta0_star.append(MS.Surfs_star[ss].zeta.copy()) Vind0.append(MS.Surfs[ss].u_ind_coll_norm.copy()) N0.append(MS.Surfs[ss].normals.copy())", "is allocated to both u_ext and zeta_dot ''' print('---------------------------------- Testing assembly.dfqsduinput') MS=self.MS n_surf=MS.n_surf", "generate a random perturbation between the 90% and the 110% of the step", "Cstar=Cstar_list[ss] # add noise to circulations gamma=Surf.gamma+np.random.rand( *Surf.gamma.shape ) gamma_star=Surf_star.gamma+np.random.rand( *Surf_star.gamma.shape ) gvec=np.dot(C,gamma.reshape(-1))+np.dot(Cstar,gamma_star.reshape(-1))" ]
[ "self.extra=nn.Sequential( nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=stride), nn.BatchNorm2d(ch_out) ) def forward(self, x): out=F.relu(self.bn1(self.conv1(x))) out=self.bn2(self.conv2(out)) out=self.extra(x)+out return out class", "super(Resnet,self).__init__() self.con1=nn.Sequential( nn.Conv2d(3,64,kernel_size=3,stride=3,padding=0), nn.BatchNorm2d(64) ) self.blc1=ResBl(64,128,stride=2) self.blc2=ResBl(128,256,stride=2) self.blc3=ResBl(256,512,stride=2) self.blc4=ResBl(512,512,stride=2) self.outlayer=nn.Linear(512*1*1,10) def forward(self,x): x=F.relu(self.con1(x))", "x=torch.randn(2,3,32,32) # model=Resnet() # out=model(x) # print('resnet:',out.shape) # # # if __name__ ==", "#print(x.shape) x=F.adaptive_avg_pool2d(x,[1,1]) #print(x.shape) x=x.view(x.size(0),-1) x=self.outlayer(x) return x # def main(): # blk=ResBl(64,128,stride=4) #", "#print(x.shape) x=x.view(x.size(0),-1) x=self.outlayer(x) return x # def main(): # blk=ResBl(64,128,stride=4) # tmp =", "x=self.blc2(x) x=self.blc3(x) x=self.blc4(x) #print(x.shape) x=F.adaptive_avg_pool2d(x,[1,1]) #print(x.shape) x=x.view(x.size(0),-1) x=self.outlayer(x) return x # def main():", "__init__(self): super(Resnet,self).__init__() self.con1=nn.Sequential( nn.Conv2d(3,64,kernel_size=3,stride=3,padding=0), nn.BatchNorm2d(64) ) self.blc1=ResBl(64,128,stride=2) self.blc2=ResBl(128,256,stride=2) self.blc3=ResBl(256,512,stride=2) self.blc4=ResBl(512,512,stride=2) self.outlayer=nn.Linear(512*1*1,10) def forward(self,x):", "def main(): # blk=ResBl(64,128,stride=4) # tmp = torch.randn(2, 64, 32, 32) # out=blk(tmp)", "return out class Resnet(nn.Module): def __init__(self): super(Resnet,self).__init__() self.con1=nn.Sequential( nn.Conv2d(3,64,kernel_size=3,stride=3,padding=0), nn.BatchNorm2d(64) ) self.blc1=ResBl(64,128,stride=2) self.blc2=ResBl(128,256,stride=2)", "self.bn2=nn.BatchNorm2d(ch_out) self.extra=nn.Sequential() if ch_out!=ch_in: self.extra=nn.Sequential( nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=stride), nn.BatchNorm2d(ch_out) ) def forward(self, x): out=F.relu(self.bn1(self.conv1(x))) out=self.bn2(self.conv2(out))", "# def main(): # blk=ResBl(64,128,stride=4) # tmp = torch.randn(2, 64, 32, 32) #", "out=self.bn2(self.conv2(out)) out=self.extra(x)+out return out class Resnet(nn.Module): def __init__(self): super(Resnet,self).__init__() self.con1=nn.Sequential( nn.Conv2d(3,64,kernel_size=3,stride=3,padding=0), nn.BatchNorm2d(64) )", "blk=ResBl(64,128,stride=4) # tmp = torch.randn(2, 64, 32, 32) # out=blk(tmp) # print('block:',out.shape) #", "ResBl(nn.Module): def __init__(self,ch_in,ch_out,stride=1): super(ResBl,self).__init__() self.conv1=nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=stride,padding=1) self.bn1=nn.BatchNorm2d(ch_out) self.conv2=nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1) self.bn2=nn.BatchNorm2d(ch_out) self.extra=nn.Sequential() if ch_out!=ch_in: self.extra=nn.Sequential( nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=stride),", "__init__(self,ch_in,ch_out,stride=1): super(ResBl,self).__init__() self.conv1=nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=stride,padding=1) self.bn1=nn.BatchNorm2d(ch_out) self.conv2=nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1) self.bn2=nn.BatchNorm2d(ch_out) self.extra=nn.Sequential() if ch_out!=ch_in: self.extra=nn.Sequential( nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=stride), nn.BatchNorm2d(ch_out) )", "from torch import nn from torch.nn import functional as F class ResBl(nn.Module): def", "32) # out=blk(tmp) # print('block:',out.shape) # # x=torch.randn(2,3,32,32) # model=Resnet() # out=model(x) #", "model=Resnet() # out=model(x) # print('resnet:',out.shape) # # # if __name__ == '__main__': #", "x=F.adaptive_avg_pool2d(x,[1,1]) #print(x.shape) x=x.view(x.size(0),-1) x=self.outlayer(x) return x # def main(): # blk=ResBl(64,128,stride=4) # tmp", "self.bn1=nn.BatchNorm2d(ch_out) self.conv2=nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1) self.bn2=nn.BatchNorm2d(ch_out) self.extra=nn.Sequential() if ch_out!=ch_in: self.extra=nn.Sequential( nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=stride), nn.BatchNorm2d(ch_out) ) def forward(self, x):", "self.con1=nn.Sequential( nn.Conv2d(3,64,kernel_size=3,stride=3,padding=0), nn.BatchNorm2d(64) ) self.blc1=ResBl(64,128,stride=2) self.blc2=ResBl(128,256,stride=2) self.blc3=ResBl(256,512,stride=2) self.blc4=ResBl(512,512,stride=2) self.outlayer=nn.Linear(512*1*1,10) def forward(self,x): x=F.relu(self.con1(x)) x=self.blc1(x)", "def forward(self,x): x=F.relu(self.con1(x)) x=self.blc1(x) x=self.blc2(x) x=self.blc3(x) x=self.blc4(x) #print(x.shape) x=F.adaptive_avg_pool2d(x,[1,1]) #print(x.shape) x=x.view(x.size(0),-1) x=self.outlayer(x) return", "32, 32) # out=blk(tmp) # print('block:',out.shape) # # x=torch.randn(2,3,32,32) # model=Resnet() # out=model(x)", "out=self.extra(x)+out return out class Resnet(nn.Module): def __init__(self): super(Resnet,self).__init__() self.con1=nn.Sequential( nn.Conv2d(3,64,kernel_size=3,stride=3,padding=0), nn.BatchNorm2d(64) ) self.blc1=ResBl(64,128,stride=2)", "self.conv1=nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=stride,padding=1) self.bn1=nn.BatchNorm2d(ch_out) self.conv2=nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1) self.bn2=nn.BatchNorm2d(ch_out) self.extra=nn.Sequential() if ch_out!=ch_in: self.extra=nn.Sequential( nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=stride), nn.BatchNorm2d(ch_out) ) def forward(self,", "self.conv2=nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1) self.bn2=nn.BatchNorm2d(ch_out) self.extra=nn.Sequential() if ch_out!=ch_in: self.extra=nn.Sequential( nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=stride), nn.BatchNorm2d(ch_out) ) def forward(self, x): out=F.relu(self.bn1(self.conv1(x)))", "x): out=F.relu(self.bn1(self.conv1(x))) out=self.bn2(self.conv2(out)) out=self.extra(x)+out return out class Resnet(nn.Module): def __init__(self): super(Resnet,self).__init__() self.con1=nn.Sequential( nn.Conv2d(3,64,kernel_size=3,stride=3,padding=0),", "x=x.view(x.size(0),-1) x=self.outlayer(x) return x # def main(): # blk=ResBl(64,128,stride=4) # tmp = torch.randn(2,", "def forward(self, x): out=F.relu(self.bn1(self.conv1(x))) out=self.bn2(self.conv2(out)) out=self.extra(x)+out return out class Resnet(nn.Module): def __init__(self): super(Resnet,self).__init__()", "# model=Resnet() # out=model(x) # print('resnet:',out.shape) # # # if __name__ == '__main__':", "def __init__(self): super(Resnet,self).__init__() self.con1=nn.Sequential( nn.Conv2d(3,64,kernel_size=3,stride=3,padding=0), nn.BatchNorm2d(64) ) self.blc1=ResBl(64,128,stride=2) self.blc2=ResBl(128,256,stride=2) self.blc3=ResBl(256,512,stride=2) self.blc4=ResBl(512,512,stride=2) self.outlayer=nn.Linear(512*1*1,10) def", "main(): # blk=ResBl(64,128,stride=4) # tmp = torch.randn(2, 64, 32, 32) # out=blk(tmp) #", "self.blc2=ResBl(128,256,stride=2) self.blc3=ResBl(256,512,stride=2) self.blc4=ResBl(512,512,stride=2) self.outlayer=nn.Linear(512*1*1,10) def forward(self,x): x=F.relu(self.con1(x)) x=self.blc1(x) x=self.blc2(x) x=self.blc3(x) x=self.blc4(x) #print(x.shape) x=F.adaptive_avg_pool2d(x,[1,1])", "x # def main(): # blk=ResBl(64,128,stride=4) # tmp = torch.randn(2, 64, 32, 32)", "F class ResBl(nn.Module): def __init__(self,ch_in,ch_out,stride=1): super(ResBl,self).__init__() self.conv1=nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=stride,padding=1) self.bn1=nn.BatchNorm2d(ch_out) self.conv2=nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1) self.bn2=nn.BatchNorm2d(ch_out) self.extra=nn.Sequential() if ch_out!=ch_in:", "x=self.blc3(x) x=self.blc4(x) #print(x.shape) x=F.adaptive_avg_pool2d(x,[1,1]) #print(x.shape) x=x.view(x.size(0),-1) x=self.outlayer(x) return x # def main(): #", "Resnet(nn.Module): def __init__(self): super(Resnet,self).__init__() self.con1=nn.Sequential( nn.Conv2d(3,64,kernel_size=3,stride=3,padding=0), nn.BatchNorm2d(64) ) self.blc1=ResBl(64,128,stride=2) self.blc2=ResBl(128,256,stride=2) self.blc3=ResBl(256,512,stride=2) self.blc4=ResBl(512,512,stride=2) self.outlayer=nn.Linear(512*1*1,10)", "tmp = torch.randn(2, 64, 32, 32) # out=blk(tmp) # print('block:',out.shape) # # x=torch.randn(2,3,32,32)", "# tmp = torch.randn(2, 64, 32, 32) # out=blk(tmp) # print('block:',out.shape) # #", "import functional as F class ResBl(nn.Module): def __init__(self,ch_in,ch_out,stride=1): super(ResBl,self).__init__() self.conv1=nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=stride,padding=1) self.bn1=nn.BatchNorm2d(ch_out) self.conv2=nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1) self.bn2=nn.BatchNorm2d(ch_out)", "import torch from torch import nn from torch.nn import functional as F class", "x=self.blc1(x) x=self.blc2(x) x=self.blc3(x) x=self.blc4(x) #print(x.shape) x=F.adaptive_avg_pool2d(x,[1,1]) #print(x.shape) x=x.view(x.size(0),-1) x=self.outlayer(x) return x # def", "import nn from torch.nn import functional as F class ResBl(nn.Module): def __init__(self,ch_in,ch_out,stride=1): super(ResBl,self).__init__()", "nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=stride), nn.BatchNorm2d(ch_out) ) def forward(self, x): out=F.relu(self.bn1(self.conv1(x))) out=self.bn2(self.conv2(out)) out=self.extra(x)+out return out class Resnet(nn.Module):", "# x=torch.randn(2,3,32,32) # model=Resnet() # out=model(x) # print('resnet:',out.shape) # # # if __name__", "if ch_out!=ch_in: self.extra=nn.Sequential( nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=stride), nn.BatchNorm2d(ch_out) ) def forward(self, x): out=F.relu(self.bn1(self.conv1(x))) out=self.bn2(self.conv2(out)) out=self.extra(x)+out return", "out=F.relu(self.bn1(self.conv1(x))) out=self.bn2(self.conv2(out)) out=self.extra(x)+out return out class Resnet(nn.Module): def __init__(self): super(Resnet,self).__init__() self.con1=nn.Sequential( nn.Conv2d(3,64,kernel_size=3,stride=3,padding=0), nn.BatchNorm2d(64)", "out class Resnet(nn.Module): def __init__(self): super(Resnet,self).__init__() self.con1=nn.Sequential( nn.Conv2d(3,64,kernel_size=3,stride=3,padding=0), nn.BatchNorm2d(64) ) self.blc1=ResBl(64,128,stride=2) self.blc2=ResBl(128,256,stride=2) self.blc3=ResBl(256,512,stride=2)", "from torch.nn import functional as F class ResBl(nn.Module): def __init__(self,ch_in,ch_out,stride=1): super(ResBl,self).__init__() self.conv1=nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=stride,padding=1) self.bn1=nn.BatchNorm2d(ch_out)", "torch import nn from torch.nn import functional as F class ResBl(nn.Module): def __init__(self,ch_in,ch_out,stride=1):", "nn from torch.nn import functional as F class ResBl(nn.Module): def __init__(self,ch_in,ch_out,stride=1): super(ResBl,self).__init__() self.conv1=nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=stride,padding=1)", "class ResBl(nn.Module): def __init__(self,ch_in,ch_out,stride=1): super(ResBl,self).__init__() self.conv1=nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=stride,padding=1) self.bn1=nn.BatchNorm2d(ch_out) self.conv2=nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1) self.bn2=nn.BatchNorm2d(ch_out) self.extra=nn.Sequential() if ch_out!=ch_in: self.extra=nn.Sequential(", "torch.randn(2, 64, 32, 32) # out=blk(tmp) # print('block:',out.shape) # # x=torch.randn(2,3,32,32) # model=Resnet()", "# print('block:',out.shape) # # x=torch.randn(2,3,32,32) # model=Resnet() # out=model(x) # print('resnet:',out.shape) # #", "ch_out!=ch_in: self.extra=nn.Sequential( nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=stride), nn.BatchNorm2d(ch_out) ) def forward(self, x): out=F.relu(self.bn1(self.conv1(x))) out=self.bn2(self.conv2(out)) out=self.extra(x)+out return out", ") self.blc1=ResBl(64,128,stride=2) self.blc2=ResBl(128,256,stride=2) self.blc3=ResBl(256,512,stride=2) self.blc4=ResBl(512,512,stride=2) self.outlayer=nn.Linear(512*1*1,10) def forward(self,x): x=F.relu(self.con1(x)) x=self.blc1(x) x=self.blc2(x) x=self.blc3(x) x=self.blc4(x)", "class Resnet(nn.Module): def __init__(self): super(Resnet,self).__init__() self.con1=nn.Sequential( nn.Conv2d(3,64,kernel_size=3,stride=3,padding=0), nn.BatchNorm2d(64) ) self.blc1=ResBl(64,128,stride=2) self.blc2=ResBl(128,256,stride=2) self.blc3=ResBl(256,512,stride=2) self.blc4=ResBl(512,512,stride=2)", "= torch.randn(2, 64, 32, 32) # out=blk(tmp) # print('block:',out.shape) # # x=torch.randn(2,3,32,32) #", "64, 32, 32) # out=blk(tmp) # print('block:',out.shape) # # x=torch.randn(2,3,32,32) # model=Resnet() #", "self.outlayer=nn.Linear(512*1*1,10) def forward(self,x): x=F.relu(self.con1(x)) x=self.blc1(x) x=self.blc2(x) x=self.blc3(x) x=self.blc4(x) #print(x.shape) x=F.adaptive_avg_pool2d(x,[1,1]) #print(x.shape) x=x.view(x.size(0),-1) x=self.outlayer(x)", "torch.nn import functional as F class ResBl(nn.Module): def __init__(self,ch_in,ch_out,stride=1): super(ResBl,self).__init__() self.conv1=nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=stride,padding=1) self.bn1=nn.BatchNorm2d(ch_out) self.conv2=nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1)", "self.blc1=ResBl(64,128,stride=2) self.blc2=ResBl(128,256,stride=2) self.blc3=ResBl(256,512,stride=2) self.blc4=ResBl(512,512,stride=2) self.outlayer=nn.Linear(512*1*1,10) def forward(self,x): x=F.relu(self.con1(x)) x=self.blc1(x) x=self.blc2(x) x=self.blc3(x) x=self.blc4(x) #print(x.shape)", "torch from torch import nn from torch.nn import functional as F class ResBl(nn.Module):", "x=self.blc4(x) #print(x.shape) x=F.adaptive_avg_pool2d(x,[1,1]) #print(x.shape) x=x.view(x.size(0),-1) x=self.outlayer(x) return x # def main(): # blk=ResBl(64,128,stride=4)", "def __init__(self,ch_in,ch_out,stride=1): super(ResBl,self).__init__() self.conv1=nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=stride,padding=1) self.bn1=nn.BatchNorm2d(ch_out) self.conv2=nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1) self.bn2=nn.BatchNorm2d(ch_out) self.extra=nn.Sequential() if ch_out!=ch_in: self.extra=nn.Sequential( nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=stride), nn.BatchNorm2d(ch_out)", "self.blc4=ResBl(512,512,stride=2) self.outlayer=nn.Linear(512*1*1,10) def forward(self,x): x=F.relu(self.con1(x)) x=self.blc1(x) x=self.blc2(x) x=self.blc3(x) x=self.blc4(x) #print(x.shape) x=F.adaptive_avg_pool2d(x,[1,1]) #print(x.shape) x=x.view(x.size(0),-1)", "# out=blk(tmp) # print('block:',out.shape) # # x=torch.randn(2,3,32,32) # model=Resnet() # out=model(x) # print('resnet:',out.shape)", "forward(self,x): x=F.relu(self.con1(x)) x=self.blc1(x) x=self.blc2(x) x=self.blc3(x) x=self.blc4(x) #print(x.shape) x=F.adaptive_avg_pool2d(x,[1,1]) #print(x.shape) x=x.view(x.size(0),-1) x=self.outlayer(x) return x", "nn.BatchNorm2d(ch_out) ) def forward(self, x): out=F.relu(self.bn1(self.conv1(x))) out=self.bn2(self.conv2(out)) out=self.extra(x)+out return out class Resnet(nn.Module): def", "<filename>CIFA10/resnet.py import torch from torch import nn from torch.nn import functional as F", "nn.BatchNorm2d(64) ) self.blc1=ResBl(64,128,stride=2) self.blc2=ResBl(128,256,stride=2) self.blc3=ResBl(256,512,stride=2) self.blc4=ResBl(512,512,stride=2) self.outlayer=nn.Linear(512*1*1,10) def forward(self,x): x=F.relu(self.con1(x)) x=self.blc1(x) x=self.blc2(x) x=self.blc3(x)", "# out=model(x) # print('resnet:',out.shape) # # # if __name__ == '__main__': # main()", "return x # def main(): # blk=ResBl(64,128,stride=4) # tmp = torch.randn(2, 64, 32,", "out=blk(tmp) # print('block:',out.shape) # # x=torch.randn(2,3,32,32) # model=Resnet() # out=model(x) # print('resnet:',out.shape) #", "# # x=torch.randn(2,3,32,32) # model=Resnet() # out=model(x) # print('resnet:',out.shape) # # # if", "nn.Conv2d(3,64,kernel_size=3,stride=3,padding=0), nn.BatchNorm2d(64) ) self.blc1=ResBl(64,128,stride=2) self.blc2=ResBl(128,256,stride=2) self.blc3=ResBl(256,512,stride=2) self.blc4=ResBl(512,512,stride=2) self.outlayer=nn.Linear(512*1*1,10) def forward(self,x): x=F.relu(self.con1(x)) x=self.blc1(x) x=self.blc2(x)", "x=self.outlayer(x) return x # def main(): # blk=ResBl(64,128,stride=4) # tmp = torch.randn(2, 64,", "super(ResBl,self).__init__() self.conv1=nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=stride,padding=1) self.bn1=nn.BatchNorm2d(ch_out) self.conv2=nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1) self.bn2=nn.BatchNorm2d(ch_out) self.extra=nn.Sequential() if ch_out!=ch_in: self.extra=nn.Sequential( nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=stride), nn.BatchNorm2d(ch_out) ) def", "self.blc3=ResBl(256,512,stride=2) self.blc4=ResBl(512,512,stride=2) self.outlayer=nn.Linear(512*1*1,10) def forward(self,x): x=F.relu(self.con1(x)) x=self.blc1(x) x=self.blc2(x) x=self.blc3(x) x=self.blc4(x) #print(x.shape) x=F.adaptive_avg_pool2d(x,[1,1]) #print(x.shape)", "x=F.relu(self.con1(x)) x=self.blc1(x) x=self.blc2(x) x=self.blc3(x) x=self.blc4(x) #print(x.shape) x=F.adaptive_avg_pool2d(x,[1,1]) #print(x.shape) x=x.view(x.size(0),-1) x=self.outlayer(x) return x #", "functional as F class ResBl(nn.Module): def __init__(self,ch_in,ch_out,stride=1): super(ResBl,self).__init__() self.conv1=nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=stride,padding=1) self.bn1=nn.BatchNorm2d(ch_out) self.conv2=nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1) self.bn2=nn.BatchNorm2d(ch_out) self.extra=nn.Sequential()", "self.extra=nn.Sequential() if ch_out!=ch_in: self.extra=nn.Sequential( nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=stride), nn.BatchNorm2d(ch_out) ) def forward(self, x): out=F.relu(self.bn1(self.conv1(x))) out=self.bn2(self.conv2(out)) out=self.extra(x)+out", ") def forward(self, x): out=F.relu(self.bn1(self.conv1(x))) out=self.bn2(self.conv2(out)) out=self.extra(x)+out return out class Resnet(nn.Module): def __init__(self):", "as F class ResBl(nn.Module): def __init__(self,ch_in,ch_out,stride=1): super(ResBl,self).__init__() self.conv1=nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=stride,padding=1) self.bn1=nn.BatchNorm2d(ch_out) self.conv2=nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1) self.bn2=nn.BatchNorm2d(ch_out) self.extra=nn.Sequential() if", "# blk=ResBl(64,128,stride=4) # tmp = torch.randn(2, 64, 32, 32) # out=blk(tmp) # print('block:',out.shape)", "print('block:',out.shape) # # x=torch.randn(2,3,32,32) # model=Resnet() # out=model(x) # print('resnet:',out.shape) # # #", "forward(self, x): out=F.relu(self.bn1(self.conv1(x))) out=self.bn2(self.conv2(out)) out=self.extra(x)+out return out class Resnet(nn.Module): def __init__(self): super(Resnet,self).__init__() self.con1=nn.Sequential(" ]
[ "cv2 import configs FONT_SYTLE = ImageFont.truetype(configs.IMAGE_FONT, 25) def draw_image(img, text, color): img =", "def id2data(_id): _labels = [ '_', '有害垃圾', '可回收垃圾', '厨余垃圾', '其他垃圾' ] class_name, object_name", "] class_name, object_name = configs.PREDICT_LABELS[_id].split('/') class_id = _labels.index(class_name) return { 'class_id': str(class_id), 'class_name':", "25) def draw_image(img, text, color): img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) draw = ImageDraw.Draw(img) draw.text((0,", "base64_str = base64.b64encode(base64_str).decode('utf8') return base64_str def id2data(_id): _labels = [ '_', '有害垃圾', '可回收垃圾',", "[ '_', '有害垃圾', '可回收垃圾', '厨余垃圾', '其他垃圾' ] class_name, object_name = configs.PREDICT_LABELS[_id].split('/') class_id =", "draw = ImageDraw.Draw(img) draw.text((0, 0), '{}'.format(text), color, font=FONT_SYTLE) return img def cv2base64(image): base64_str", "import cv2 import configs FONT_SYTLE = ImageFont.truetype(configs.IMAGE_FONT, 25) def draw_image(img, text, color): img", "Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) draw = ImageDraw.Draw(img) draw.text((0, 0), '{}'.format(text), color, font=FONT_SYTLE) return img def", "base64 from PIL import Image, ImageDraw, ImageFont import cv2 import configs FONT_SYTLE =", "import configs FONT_SYTLE = ImageFont.truetype(configs.IMAGE_FONT, 25) def draw_image(img, text, color): img = Image.fromarray(cv2.cvtColor(img,", "return img def cv2base64(image): base64_str = cv2.imencode('.jpg',image)[1].tostring() base64_str = base64.b64encode(base64_str).decode('utf8') return base64_str def", "from PIL import Image, ImageDraw, ImageFont import cv2 import configs FONT_SYTLE = ImageFont.truetype(configs.IMAGE_FONT,", "object_name = configs.PREDICT_LABELS[_id].split('/') class_id = _labels.index(class_name) return { 'class_id': str(class_id), 'class_name': class_name, 'object_name':", "FONT_SYTLE = ImageFont.truetype(configs.IMAGE_FONT, 25) def draw_image(img, text, color): img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) draw", "img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) draw = ImageDraw.Draw(img) draw.text((0, 0), '{}'.format(text), color, font=FONT_SYTLE) return", "ImageDraw, ImageFont import cv2 import configs FONT_SYTLE = ImageFont.truetype(configs.IMAGE_FONT, 25) def draw_image(img, text,", "def draw_image(img, text, color): img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) draw = ImageDraw.Draw(img) draw.text((0, 0),", "color, font=FONT_SYTLE) return img def cv2base64(image): base64_str = cv2.imencode('.jpg',image)[1].tostring() base64_str = base64.b64encode(base64_str).decode('utf8') return", "ImageFont import cv2 import configs FONT_SYTLE = ImageFont.truetype(configs.IMAGE_FONT, 25) def draw_image(img, text, color):", "'厨余垃圾', '其他垃圾' ] class_name, object_name = configs.PREDICT_LABELS[_id].split('/') class_id = _labels.index(class_name) return { 'class_id':", "color): img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) draw = ImageDraw.Draw(img) draw.text((0, 0), '{}'.format(text), color, font=FONT_SYTLE)", "ImageDraw.Draw(img) draw.text((0, 0), '{}'.format(text), color, font=FONT_SYTLE) return img def cv2base64(image): base64_str = cv2.imencode('.jpg',image)[1].tostring()", "'可回收垃圾', '厨余垃圾', '其他垃圾' ] class_name, object_name = configs.PREDICT_LABELS[_id].split('/') class_id = _labels.index(class_name) return {", "import Image, ImageDraw, ImageFont import cv2 import configs FONT_SYTLE = ImageFont.truetype(configs.IMAGE_FONT, 25) def", "import base64 from PIL import Image, ImageDraw, ImageFont import cv2 import configs FONT_SYTLE", "configs FONT_SYTLE = ImageFont.truetype(configs.IMAGE_FONT, 25) def draw_image(img, text, color): img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))", "draw.text((0, 0), '{}'.format(text), color, font=FONT_SYTLE) return img def cv2base64(image): base64_str = cv2.imencode('.jpg',image)[1].tostring() base64_str", "PIL import Image, ImageDraw, ImageFont import cv2 import configs FONT_SYTLE = ImageFont.truetype(configs.IMAGE_FONT, 25)", "= Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) draw = ImageDraw.Draw(img) draw.text((0, 0), '{}'.format(text), color, font=FONT_SYTLE) return img", "def cv2base64(image): base64_str = cv2.imencode('.jpg',image)[1].tostring() base64_str = base64.b64encode(base64_str).decode('utf8') return base64_str def id2data(_id): _labels", "base64_str def id2data(_id): _labels = [ '_', '有害垃圾', '可回收垃圾', '厨余垃圾', '其他垃圾' ] class_name,", "= configs.PREDICT_LABELS[_id].split('/') class_id = _labels.index(class_name) return { 'class_id': str(class_id), 'class_name': class_name, 'object_name': object_name", "base64.b64encode(base64_str).decode('utf8') return base64_str def id2data(_id): _labels = [ '_', '有害垃圾', '可回收垃圾', '厨余垃圾', '其他垃圾'", "Image, ImageDraw, ImageFont import cv2 import configs FONT_SYTLE = ImageFont.truetype(configs.IMAGE_FONT, 25) def draw_image(img,", "return base64_str def id2data(_id): _labels = [ '_', '有害垃圾', '可回收垃圾', '厨余垃圾', '其他垃圾' ]", "= ImageFont.truetype(configs.IMAGE_FONT, 25) def draw_image(img, text, color): img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) draw =", "'_', '有害垃圾', '可回收垃圾', '厨余垃圾', '其他垃圾' ] class_name, object_name = configs.PREDICT_LABELS[_id].split('/') class_id = _labels.index(class_name)", "font=FONT_SYTLE) return img def cv2base64(image): base64_str = cv2.imencode('.jpg',image)[1].tostring() base64_str = base64.b64encode(base64_str).decode('utf8') return base64_str", "= base64.b64encode(base64_str).decode('utf8') return base64_str def id2data(_id): _labels = [ '_', '有害垃圾', '可回收垃圾', '厨余垃圾',", "ImageFont.truetype(configs.IMAGE_FONT, 25) def draw_image(img, text, color): img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) draw = ImageDraw.Draw(img)", "text, color): img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) draw = ImageDraw.Draw(img) draw.text((0, 0), '{}'.format(text), color,", "0), '{}'.format(text), color, font=FONT_SYTLE) return img def cv2base64(image): base64_str = cv2.imencode('.jpg',image)[1].tostring() base64_str =", "= ImageDraw.Draw(img) draw.text((0, 0), '{}'.format(text), color, font=FONT_SYTLE) return img def cv2base64(image): base64_str =", "= cv2.imencode('.jpg',image)[1].tostring() base64_str = base64.b64encode(base64_str).decode('utf8') return base64_str def id2data(_id): _labels = [ '_',", "id2data(_id): _labels = [ '_', '有害垃圾', '可回收垃圾', '厨余垃圾', '其他垃圾' ] class_name, object_name =", "cv2.COLOR_BGR2RGB)) draw = ImageDraw.Draw(img) draw.text((0, 0), '{}'.format(text), color, font=FONT_SYTLE) return img def cv2base64(image):", "<reponame>CoderChen01/smart_trash_can import base64 from PIL import Image, ImageDraw, ImageFont import cv2 import configs", "cv2.imencode('.jpg',image)[1].tostring() base64_str = base64.b64encode(base64_str).decode('utf8') return base64_str def id2data(_id): _labels = [ '_', '有害垃圾',", "draw_image(img, text, color): img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)) draw = ImageDraw.Draw(img) draw.text((0, 0), '{}'.format(text),", "class_name, object_name = configs.PREDICT_LABELS[_id].split('/') class_id = _labels.index(class_name) return { 'class_id': str(class_id), 'class_name': class_name,", "'有害垃圾', '可回收垃圾', '厨余垃圾', '其他垃圾' ] class_name, object_name = configs.PREDICT_LABELS[_id].split('/') class_id = _labels.index(class_name) return", "= [ '_', '有害垃圾', '可回收垃圾', '厨余垃圾', '其他垃圾' ] class_name, object_name = configs.PREDICT_LABELS[_id].split('/') class_id", "'其他垃圾' ] class_name, object_name = configs.PREDICT_LABELS[_id].split('/') class_id = _labels.index(class_name) return { 'class_id': str(class_id),", "_labels = [ '_', '有害垃圾', '可回收垃圾', '厨余垃圾', '其他垃圾' ] class_name, object_name = configs.PREDICT_LABELS[_id].split('/')", "img def cv2base64(image): base64_str = cv2.imencode('.jpg',image)[1].tostring() base64_str = base64.b64encode(base64_str).decode('utf8') return base64_str def id2data(_id):", "base64_str = cv2.imencode('.jpg',image)[1].tostring() base64_str = base64.b64encode(base64_str).decode('utf8') return base64_str def id2data(_id): _labels = [", "configs.PREDICT_LABELS[_id].split('/') class_id = _labels.index(class_name) return { 'class_id': str(class_id), 'class_name': class_name, 'object_name': object_name }", "'{}'.format(text), color, font=FONT_SYTLE) return img def cv2base64(image): base64_str = cv2.imencode('.jpg',image)[1].tostring() base64_str = base64.b64encode(base64_str).decode('utf8')", "cv2base64(image): base64_str = cv2.imencode('.jpg',image)[1].tostring() base64_str = base64.b64encode(base64_str).decode('utf8') return base64_str def id2data(_id): _labels =" ]
[ "sorted(scans.keys()) else: logger.warn('Could not find file {0}. Estimating scans from available files.'.format(filename)) filelist", "type=str) @click.argument('candnum', type=int) @click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) > threshold') def", "in list_cands with abs(snr) > threshold \"\"\" reproduce.refine_cand(candsfile, candnum=candnum, threshold=threshold) @cli.command() @click.argument('candsfile', type=str)", "default=True, help='Create html version') @click.option('--basenb', type=str, default='', help='Full path to base notebook. Default", "search and save candidates for all in candsfile with snr > threshold \"\"\"", "type=str, default='rtpipe_cbe.conf') @click.option('--logfile', type=bool, default=False) @click.option('--bdfdir', default='') def searchone(filename, scan, paramfile, logfile, bdfdir):", "applying classifications') def nbcompile(filename, html, basenb, agdir): \"\"\" Compile the baseinteract.ipynb notebook into", "return metadata for pipeline for first scan \"\"\" filename = os.path.abspath(filename) scans =", "script prints all. assumes filename is an sdm. \"\"\" filename = os.path.abspath(filename) scans", "= os.path.abspath(filename) bignumber = 500 if os.path.exists(filename): scans = ps.read_scans(filename, bdfdir=bdfdir) scanlist =", "ps.read_scans(filename, bdfdir=bdfdir) if scan != 0: d = rt.set_pipeline(filename, scan, paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile)", "scan, paramfile, logfile, bdfdir): \"\"\" Searches one scan of filename filename is name", "paramfile, logfile, bdfdir): \"\"\" Searches one scan of filename filename is name of", "os.path.basename(filename), scanlist, snrmin=snrmin, snrmax=snrmax) @cli.command() @click.argument('filename', type=str) @click.option('--html', type=bool, default=True, help='Create html version')", "Run refinement search for candnum in list_cands with abs(snr) > threshold \"\"\" reproduce.refine_cand(candsfile,", "ps.read_scans(filename, bdfdir=bdfdir) logger.info('Scans, Target names:') logger.info('%s' % str([(ss, scans[ss]['source']) for ss in scans]))", "not exist. \"\"\" filename = os.path.abspath(filename) bignumber = 500 if os.path.exists(filename): scans =", "else: logger.warn('Could not find file {0}. Estimating scans from available files.'.format(filename)) filelist =", "if it does not exist. \"\"\" filename = os.path.abspath(filename) bignumber = 500 if", "threshold): \"\"\" Run refinement search and save candidates for all in candsfile with", "('filename.GN' expected locally). scan is scan number to search. if none provided, script", "rt import rtpipe.parsecands as pc import rtpipe.parsesdm as ps import rtpipe.reproduce as reproduce", "%(message)s') logging.captureWarnings(True) logger = logging.getLogger(__name__) @click.group('rtpipe') def cli(): pass @cli.command() @click.argument('filename') @click.option('--paramfile', default='')", "is name of local sdm ('filename.GN' expected locally). scan is scan number to", "paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile) @cli.command() @click.argument('filename') @click.option('--snrmin', default=0.) @click.option('--snrmax', default=999.) @click.option('--bdfdir', default='') def mergeall(filename,", "pc.nbcompile(os.path.dirname(filename), os.path.basename(filename), html=html, basenb=basenb, agdir=agdir) @cli.command() @click.argument('candsfile', type=str) @click.option('--threshold', type=float, default=0., help='Filter candidates", "locally). scan is scan number to search. if none provided, script prints all.", "finding relevant files if it does not exist. \"\"\" filename = os.path.abspath(filename) bignumber", "over all scans Tries to find scans from filename, but will fall back", "refinement search and save candidates for all in candsfile with snr > threshold", "Target names:') logger.info('%s' % str([(ss, scans[ss]['source']) for ss in scans])) logger.info('Example pipeline:') state", "cands/noise files over all scans Tries to find scans from filename, but will", "logfile=logfile) @cli.command() @click.argument('filename') @click.option('--snrmin', default=0.) @click.option('--snrmax', default=999.) @click.option('--bdfdir', default='') def mergeall(filename, snrmin, snrmax,", "snrmin=snrmin, snrmax=snrmax) @cli.command() @click.argument('filename', type=str) @click.option('--html', type=bool, default=True, help='Create html version') @click.option('--basenb', type=str,", "in candsfile \"\"\" reproduce.list_cands(candsfile, threshold) @cli.command() @click.argument('candsfile', type=str) @click.argument('candnum', type=int) @click.option('--threshold', type=float, default=0.,", "scans = ps.read_scans(filename, bdfdir=bdfdir) logger.info('Scans, Target names:') logger.info('%s' % str([(ss, scans[ss]['source']) for ss", "candnum=candnum, threshold=threshold) @cli.command() @click.argument('candsfile', type=str) @click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) >", "@click.argument('candsfile', type=str) @click.argument('candnum', type=int) @click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) > threshold')", "in candsfile with snr > threshold \"\"\" reproduce.refine_cands(candsfile, threshold=threshold) if __name__ == '__main__':", "distribution version') @click.option('--agdir', type=str, default='', help='Activegit repo for applying classifications') def nbcompile(filename, html,", "threshold=threshold) @cli.command() @click.argument('candsfile', type=str) @click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) > threshold')", "for ss in scans])) logger.info('Example pipeline:') state = rt.set_pipeline(filename, scans.popitem()[0], paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile)", "for scan in scanlist: pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scanlist, snrmin=snrmin, snrmax=snrmax) @cli.command() @click.argument('filename',", "default='') @click.option('--bdfdir', default='') @click.option('--scan', default=1) def read(filename, paramfile, bdfdir, scan): \"\"\" Simple parse", "import click, os, glob import logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s -", "help='Create html version') @click.option('--basenb', type=str, default='', help='Full path to base notebook. Default to", "@click.option('--snrmin', default=0.) @click.option('--snrmax', default=999.) @click.option('--bdfdir', default='') def mergeall(filename, snrmin, snrmax, bdfdir): \"\"\" Merge", "default='') def mergeall(filename, snrmin, snrmax, bdfdir): \"\"\" Merge cands/noise files over all scans", "type=str) @click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) > threshold') def list_cands(candsfile, threshold):", "= rt.set_pipeline(filename, scan, paramfile=paramfile, logfile=False) @cli.command() @click.argument('filename', type=str) @click.option('--scan', type=int, default=0) @click.option('--paramfile', type=str,", "scans])) logger.info('Example pipeline:') state = rt.set_pipeline(filename, scan, paramfile=paramfile, logfile=False) @cli.command() @click.argument('filename', type=str) @click.option('--scan',", "candnum, threshold): \"\"\" Run refinement search for candnum in list_cands with abs(snr) >", "abs(snr) > threshold \"\"\" reproduce.refine_cand(candsfile, candnum=candnum, threshold=threshold) @cli.command() @click.argument('candsfile', type=str) @click.option('--threshold', type=float, default=0.,", "pc import rtpipe.parsesdm as ps import rtpipe.reproduce as reproduce import click, os, glob", "as ps import rtpipe.reproduce as reproduce import click, os, glob import logging logging.basicConfig(level=logging.INFO,", "os.path.abspath(filename) bignumber = 500 if os.path.exists(filename): scans = ps.read_scans(filename, bdfdir=bdfdir) scanlist = sorted(scans.keys())", "exist. \"\"\" filename = os.path.abspath(filename) bignumber = 500 if os.path.exists(filename): scans = ps.read_scans(filename,", "to abs(snr) > threshold') def list_cands(candsfile, threshold): \"\"\" Print candidates above abs(snr) in", "scan != 0: d = rt.set_pipeline(filename, scan, paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile) rt.pipeline(d, range(d['nsegments'])) #", "# clean up and merge files pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scans.keys()) else: logger.info('Scans,", "logger = logging.getLogger(__name__) @click.group('rtpipe') def cli(): pass @cli.command() @click.argument('filename') @click.option('--paramfile', default='') @click.option('--bdfdir', default='')", "% str([(ss, scans[ss]['source']) for ss in scans])) logger.info('Example pipeline:') state = rt.set_pipeline(filename, scans.popitem()[0],", "fileroot=os.path.basename(filename), logfile=logfile) @cli.command() @click.argument('filename') @click.option('--snrmin', default=0.) @click.option('--snrmax', default=999.) @click.option('--bdfdir', default='') def mergeall(filename, snrmin,", "scanlist: pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scanlist, snrmin=snrmin, snrmax=snrmax) @cli.command() @click.argument('filename', type=str) @click.option('--html', type=bool,", "default='', help='Full path to base notebook. Default to distribution version') @click.option('--agdir', type=str, default='',", "default=0., help='Filter candidates to abs(snr) > threshold') def refine_cand(candsfile, candnum, threshold): \"\"\" Run", "bignumber = 500 if os.path.exists(filename): scans = ps.read_scans(filename, bdfdir=bdfdir) scanlist = sorted(scans.keys()) else:", "all scans Tries to find scans from filename, but will fall back to", "parse filenames for scans. Looking over big range.') scanlist = range(bignumber) logger.info('Merging over", "reproduce.list_cands(candsfile, threshold) @cli.command() @click.argument('candsfile', type=str) @click.argument('candnum', type=int) @click.option('--threshold', type=float, default=0., help='Filter candidates to", "for pipeline for first scan \"\"\" filename = os.path.abspath(filename) scans = ps.read_scans(filename, bdfdir=bdfdir)", "rt.set_pipeline(filename, scan, paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile) rt.pipeline(d, range(d['nsegments'])) # clean up and merge files", "= logging.getLogger(__name__) @click.group('rtpipe') def cli(): pass @cli.command() @click.argument('filename') @click.option('--paramfile', default='') @click.option('--bdfdir', default='') @click.option('--scan',", "to base notebook. Default to distribution version') @click.option('--agdir', type=str, default='', help='Activegit repo for", "@cli.command() @click.argument('filename') @click.option('--snrmin', default=0.) @click.option('--snrmax', default=999.) @click.option('--bdfdir', default='') def mergeall(filename, snrmin, snrmax, bdfdir):", "scan in scanlist: pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scanlist, snrmin=snrmin, snrmax=snrmax) @cli.command() @click.argument('filename', type=str)", "- %(message)s') logging.captureWarnings(True) logger = logging.getLogger(__name__) @click.group('rtpipe') def cli(): pass @cli.command() @click.argument('filename') @click.option('--paramfile',", "scanlist = sorted(set([int(fn.rstrip('.pkl').split('_sc')[1].split('seg')[0]) for fn in filelist])) except IndexError: logger.warn('Could not parse filenames", "type=bool, default=True, help='Create html version') @click.option('--basenb', type=str, default='', help='Full path to base notebook.", "help='Activegit repo for applying classifications') def nbcompile(filename, html, basenb, agdir): \"\"\" Compile the", "@click.argument('filename', type=str) @click.option('--scan', type=int, default=0) @click.option('--paramfile', type=str, default='rtpipe_cbe.conf') @click.option('--logfile', type=bool, default=False) @click.option('--bdfdir', default='')", "notebook into an analysis notebook for filename \"\"\" filename = os.path.abspath(filename) pc.nbcompile(os.path.dirname(filename), os.path.basename(filename),", "%(levelname)s - %(message)s') logging.captureWarnings(True) logger = logging.getLogger(__name__) @click.group('rtpipe') def cli(): pass @cli.command() @click.argument('filename')", "and return metadata for pipeline for first scan \"\"\" filename = os.path.abspath(filename) scans", "rt.set_pipeline(filename, scans.popitem()[0], paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile) @cli.command() @click.argument('filename') @click.option('--snrmin', default=0.) @click.option('--snrmax', default=999.) @click.option('--bdfdir', default='')", "and merge files pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scans.keys()) else: logger.info('Scans, Target names:') logger.info('%s'", "logging.captureWarnings(True) logger = logging.getLogger(__name__) @click.group('rtpipe') def cli(): pass @cli.command() @click.argument('filename') @click.option('--paramfile', default='') @click.option('--bdfdir',", "one scan of filename filename is name of local sdm ('filename.GN' expected locally).", "!= 0: d = rt.set_pipeline(filename, scan, paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile) rt.pipeline(d, range(d['nsegments'])) # clean", "filenames for scans. Looking over big range.') scanlist = range(bignumber) logger.info('Merging over scans", "pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scans.keys()) else: logger.info('Scans, Target names:') logger.info('%s' % str([(ss, scans[ss]['source']) for ss", "scan of filename filename is name of local sdm ('filename.GN' expected locally). scan", "range.') scanlist = range(bignumber) logger.info('Merging over scans {0}'.format(scanlist)) for scan in scanlist: pc.merge_segments(filename,", "paramfile, bdfdir, scan): \"\"\" Simple parse and return metadata for pipeline for first", "candidates to abs(snr) > threshold') def list_cands(candsfile, threshold): \"\"\" Print candidates above abs(snr)", "read(filename, paramfile, bdfdir, scan): \"\"\" Simple parse and return metadata for pipeline for", "\"\"\" Searches one scan of filename filename is name of local sdm ('filename.GN'", "for first scan \"\"\" filename = os.path.abspath(filename) scans = ps.read_scans(filename, bdfdir=bdfdir) logger.info('Scans, Target", "version') @click.option('--agdir', type=str, default='', help='Activegit repo for applying classifications') def nbcompile(filename, html, basenb,", "\"\"\" reproduce.list_cands(candsfile, threshold) @cli.command() @click.argument('candsfile', type=str) @click.argument('candnum', type=int) @click.option('--threshold', type=float, default=0., help='Filter candidates", "threshold \"\"\" reproduce.refine_cand(candsfile, candnum=candnum, threshold=threshold) @cli.command() @click.argument('candsfile', type=str) @click.option('--threshold', type=float, default=0., help='Filter candidates", "searchone(filename, scan, paramfile, logfile, bdfdir): \"\"\" Searches one scan of filename filename is", "html version') @click.option('--basenb', type=str, default='', help='Full path to base notebook. Default to distribution", "@click.option('--agdir', type=str, default='', help='Activegit repo for applying classifications') def nbcompile(filename, html, basenb, agdir):", "logger.info('%s' % str([(ss, scans[ss]['source']) for ss in scans])) logger.info('Example pipeline:') state = rt.set_pipeline(filename,", "scans = ps.read_scans(filename, bdfdir=bdfdir) scanlist = sorted(scans.keys()) else: logger.warn('Could not find file {0}.", "threshold') def refine_cands(candsfile, threshold): \"\"\" Run refinement search and save candidates for all", "pass @cli.command() @click.argument('filename') @click.option('--paramfile', default='') @click.option('--bdfdir', default='') @click.option('--scan', default=1) def read(filename, paramfile, bdfdir,", "to abs(snr) > threshold') def refine_cand(candsfile, candnum, threshold): \"\"\" Run refinement search for", "rt.set_pipeline(filename, scan, paramfile=paramfile, logfile=False) @cli.command() @click.argument('filename', type=str) @click.option('--scan', type=int, default=0) @click.option('--paramfile', type=str, default='rtpipe_cbe.conf')", "list_cands with abs(snr) > threshold \"\"\" reproduce.refine_cand(candsfile, candnum=candnum, threshold=threshold) @cli.command() @click.argument('candsfile', type=str) @click.option('--threshold',", "- %(name)s - %(levelname)s - %(message)s') logging.captureWarnings(True) logger = logging.getLogger(__name__) @click.group('rtpipe') def cli():", "type=bool, default=False) @click.option('--bdfdir', default='') def searchone(filename, scan, paramfile, logfile, bdfdir): \"\"\" Searches one", "range(d['nsegments'])) # clean up and merge files pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scans.keys()) else:", "ps.read_scans(filename, bdfdir=bdfdir) scanlist = sorted(scans.keys()) else: logger.warn('Could not find file {0}. Estimating scans", "= glob.glob(os.path.join(os.path.dirname(filename), '*{0}_sc*pkl'.format(os.path.basename(filename)))) try: scanlist = sorted(set([int(fn.rstrip('.pkl').split('_sc')[1].split('seg')[0]) for fn in filelist])) except IndexError:", "import rtpipe.parsecands as pc import rtpipe.parsesdm as ps import rtpipe.reproduce as reproduce import", "\"\"\" filename = os.path.abspath(filename) scans = ps.read_scans(filename, bdfdir=bdfdir) if scan != 0: d", "candsfile with snr > threshold \"\"\" reproduce.refine_cands(candsfile, threshold=threshold) if __name__ == '__main__': cli()", "@click.option('--bdfdir', default='') def searchone(filename, scan, paramfile, logfile, bdfdir): \"\"\" Searches one scan of", "find file {0}. Estimating scans from available files.'.format(filename)) filelist = glob.glob(os.path.join(os.path.dirname(filename), '*{0}_sc*pkl'.format(os.path.basename(filename)))) try:", "IndexError: logger.warn('Could not parse filenames for scans. Looking over big range.') scanlist =", "def list_cands(candsfile, threshold): \"\"\" Print candidates above abs(snr) in candsfile \"\"\" reproduce.list_cands(candsfile, threshold)", "notebook for filename \"\"\" filename = os.path.abspath(filename) pc.nbcompile(os.path.dirname(filename), os.path.basename(filename), html=html, basenb=basenb, agdir=agdir) @cli.command()", "scans.keys()) else: logger.info('Scans, Target names:') logger.info('%s' % str([(ss, scans[ss]['source']) for ss in scans]))", "d = rt.set_pipeline(filename, scan, paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile) rt.pipeline(d, range(d['nsegments'])) # clean up and", "= sorted(scans.keys()) else: logger.warn('Could not find file {0}. Estimating scans from available files.'.format(filename))", "state = rt.set_pipeline(filename, scans.popitem()[0], paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile) @cli.command() @click.argument('filename') @click.option('--snrmin', default=0.) @click.option('--snrmax', default=999.)", "rtpipe.parsesdm as ps import rtpipe.reproduce as reproduce import click, os, glob import logging", "default=False) @click.option('--bdfdir', default='') def searchone(filename, scan, paramfile, logfile, bdfdir): \"\"\" Searches one scan", "but will fall back to finding relevant files if it does not exist.", "type=str) @click.option('--scan', type=int, default=0) @click.option('--paramfile', type=str, default='rtpipe_cbe.conf') @click.option('--logfile', type=bool, default=False) @click.option('--bdfdir', default='') def", "available files.'.format(filename)) filelist = glob.glob(os.path.join(os.path.dirname(filename), '*{0}_sc*pkl'.format(os.path.basename(filename)))) try: scanlist = sorted(set([int(fn.rstrip('.pkl').split('_sc')[1].split('seg')[0]) for fn in", "first scan \"\"\" filename = os.path.abspath(filename) scans = ps.read_scans(filename, bdfdir=bdfdir) logger.info('Scans, Target names:')", "@click.option('--paramfile', type=str, default='rtpipe_cbe.conf') @click.option('--logfile', type=bool, default=False) @click.option('--bdfdir', default='') def searchone(filename, scan, paramfile, logfile,", "save candidates for all in candsfile with snr > threshold \"\"\" reproduce.refine_cands(candsfile, threshold=threshold)", "type=int, default=0) @click.option('--paramfile', type=str, default='rtpipe_cbe.conf') @click.option('--logfile', type=bool, default=False) @click.option('--bdfdir', default='') def searchone(filename, scan,", "to abs(snr) > threshold') def refine_cands(candsfile, threshold): \"\"\" Run refinement search and save", "default='rtpipe_cbe.conf') @click.option('--logfile', type=bool, default=False) @click.option('--bdfdir', default='') def searchone(filename, scan, paramfile, logfile, bdfdir): \"\"\"", "logger.warn('Could not parse filenames for scans. Looking over big range.') scanlist = range(bignumber)", "type=float, default=0., help='Filter candidates to abs(snr) > threshold') def list_cands(candsfile, threshold): \"\"\" Print", "parse and return metadata for pipeline for first scan \"\"\" filename = os.path.abspath(filename)", "= sorted(set([int(fn.rstrip('.pkl').split('_sc')[1].split('seg')[0]) for fn in filelist])) except IndexError: logger.warn('Could not parse filenames for", "os.path.abspath(filename) scans = ps.read_scans(filename, bdfdir=bdfdir) if scan != 0: d = rt.set_pipeline(filename, scan,", "not parse filenames for scans. Looking over big range.') scanlist = range(bignumber) logger.info('Merging", "@click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) > threshold') def refine_cands(candsfile, threshold): \"\"\"", "an analysis notebook for filename \"\"\" filename = os.path.abspath(filename) pc.nbcompile(os.path.dirname(filename), os.path.basename(filename), html=html, basenb=basenb,", "logfile=False) @cli.command() @click.argument('filename', type=str) @click.option('--scan', type=int, default=0) @click.option('--paramfile', type=str, default='rtpipe_cbe.conf') @click.option('--logfile', type=bool, default=False)", "basenb=basenb, agdir=agdir) @cli.command() @click.argument('candsfile', type=str) @click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) >", "candnum in list_cands with abs(snr) > threshold \"\"\" reproduce.refine_cand(candsfile, candnum=candnum, threshold=threshold) @cli.command() @click.argument('candsfile',", "> threshold') def refine_cands(candsfile, threshold): \"\"\" Run refinement search and save candidates for", "files over all scans Tries to find scans from filename, but will fall", "str([(ss, scans[ss]['source']) for ss in scans])) logger.info('Example pipeline:') state = rt.set_pipeline(filename, scans.popitem()[0], paramfile=paramfile,", "ss in scans])) logger.info('Example pipeline:') state = rt.set_pipeline(filename, scans.popitem()[0], paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile) @cli.command()", "refine_cands(candsfile, threshold): \"\"\" Run refinement search and save candidates for all in candsfile", "in scans])) logger.info('Example pipeline:') state = rt.set_pipeline(filename, scans.popitem()[0], paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile) @cli.command() @click.argument('filename')", "Merge cands/noise files over all scans Tries to find scans from filename, but", "rtpipe.RT as rt import rtpipe.parsecands as pc import rtpipe.parsesdm as ps import rtpipe.reproduce", "import rtpipe.RT as rt import rtpipe.parsecands as pc import rtpipe.parsesdm as ps import", "logger.warn('Could not find file {0}. Estimating scans from available files.'.format(filename)) filelist = glob.glob(os.path.join(os.path.dirname(filename),", "not find file {0}. Estimating scans from available files.'.format(filename)) filelist = glob.glob(os.path.join(os.path.dirname(filename), '*{0}_sc*pkl'.format(os.path.basename(filename))))", "into an analysis notebook for filename \"\"\" filename = os.path.abspath(filename) pc.nbcompile(os.path.dirname(filename), os.path.basename(filename), html=html,", "= ps.read_scans(filename, bdfdir=bdfdir) logger.info('Scans, Target names:') logger.info('%s' % str([(ss, scans[ss]['source']) for ss in", "import logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logging.captureWarnings(True) logger =", "logfile=logfile) rt.pipeline(d, range(d['nsegments'])) # clean up and merge files pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename),", "relevant files if it does not exist. \"\"\" filename = os.path.abspath(filename) bignumber =", "@click.option('--scan', default=1) def read(filename, paramfile, bdfdir, scan): \"\"\" Simple parse and return metadata", "> threshold') def list_cands(candsfile, threshold): \"\"\" Print candidates above abs(snr) in candsfile \"\"\"", "def read(filename, paramfile, bdfdir, scan): \"\"\" Simple parse and return metadata for pipeline", "above abs(snr) in candsfile \"\"\" reproduce.list_cands(candsfile, threshold) @cli.command() @click.argument('candsfile', type=str) @click.argument('candnum', type=int) @click.option('--threshold',", "type=float, default=0., help='Filter candidates to abs(snr) > threshold') def refine_cands(candsfile, threshold): \"\"\" Run", "= 500 if os.path.exists(filename): scans = ps.read_scans(filename, bdfdir=bdfdir) scanlist = sorted(scans.keys()) else: logger.warn('Could", "logger.info('Scans, Target names:') logger.info('%s' % str([(ss, scans[ss]['source']) for ss in scans])) logger.info('Example pipeline:')", "logger.info('Merging over scans {0}'.format(scanlist)) for scan in scanlist: pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scanlist,", "glob import logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logging.captureWarnings(True) logger", "default=0) @click.option('--paramfile', type=str, default='rtpipe_cbe.conf') @click.option('--logfile', type=bool, default=False) @click.option('--bdfdir', default='') def searchone(filename, scan, paramfile,", "= range(bignumber) logger.info('Merging over scans {0}'.format(scanlist)) for scan in scanlist: pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename),", "help='Filter candidates to abs(snr) > threshold') def list_cands(candsfile, threshold): \"\"\" Print candidates above", "candidates to abs(snr) > threshold') def refine_cand(candsfile, candnum, threshold): \"\"\" Run refinement search", "@click.option('--basenb', type=str, default='', help='Full path to base notebook. Default to distribution version') @click.option('--agdir',", "merge files pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scans.keys()) else: logger.info('Scans, Target names:') logger.info('%s' %", "to distribution version') @click.option('--agdir', type=str, default='', help='Activegit repo for applying classifications') def nbcompile(filename,", "str([(ss, scans[ss]['source']) for ss in scans])) logger.info('Example pipeline:') state = rt.set_pipeline(filename, scan, paramfile=paramfile,", "def nbcompile(filename, html, basenb, agdir): \"\"\" Compile the baseinteract.ipynb notebook into an analysis", "scans. Looking over big range.') scanlist = range(bignumber) logger.info('Merging over scans {0}'.format(scanlist)) for", "@click.argument('filename') @click.option('--paramfile', default='') @click.option('--bdfdir', default='') @click.option('--scan', default=1) def read(filename, paramfile, bdfdir, scan): \"\"\"", "is an sdm. \"\"\" filename = os.path.abspath(filename) scans = ps.read_scans(filename, bdfdir=bdfdir) if scan", "to search. if none provided, script prints all. assumes filename is an sdm.", "help='Full path to base notebook. Default to distribution version') @click.option('--agdir', type=str, default='', help='Activegit", "logfile, bdfdir): \"\"\" Searches one scan of filename filename is name of local", "provided, script prints all. assumes filename is an sdm. \"\"\" filename = os.path.abspath(filename)", "abs(snr) > threshold') def refine_cand(candsfile, candnum, threshold): \"\"\" Run refinement search for candnum", "files pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scans.keys()) else: logger.info('Scans, Target names:') logger.info('%s' % str([(ss,", "for applying classifications') def nbcompile(filename, html, basenb, agdir): \"\"\" Compile the baseinteract.ipynb notebook", "Run refinement search and save candidates for all in candsfile with snr >", "type=float, default=0., help='Filter candidates to abs(snr) > threshold') def refine_cand(candsfile, candnum, threshold): \"\"\"", "def searchone(filename, scan, paramfile, logfile, bdfdir): \"\"\" Searches one scan of filename filename", "basenb, agdir): \"\"\" Compile the baseinteract.ipynb notebook into an analysis notebook for filename", "\"\"\" Simple parse and return metadata for pipeline for first scan \"\"\" filename", "\"\"\" Merge cands/noise files over all scans Tries to find scans from filename,", "default='', help='Activegit repo for applying classifications') def nbcompile(filename, html, basenb, agdir): \"\"\" Compile", "filename = os.path.abspath(filename) scans = ps.read_scans(filename, bdfdir=bdfdir) logger.info('Scans, Target names:') logger.info('%s' % str([(ss,", "scan is scan number to search. if none provided, script prints all. assumes", "pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scans.keys()) else: logger.info('Scans, Target names:') logger.info('%s' % str([(ss, scans[ss]['source'])", "\"\"\" Run refinement search and save candidates for all in candsfile with snr", "def cli(): pass @cli.command() @click.argument('filename') @click.option('--paramfile', default='') @click.option('--bdfdir', default='') @click.option('--scan', default=1) def read(filename,", "scans from available files.'.format(filename)) filelist = glob.glob(os.path.join(os.path.dirname(filename), '*{0}_sc*pkl'.format(os.path.basename(filename)))) try: scanlist = sorted(set([int(fn.rstrip('.pkl').split('_sc')[1].split('seg')[0]) for", "@click.option('--bdfdir', default='') def mergeall(filename, snrmin, snrmax, bdfdir): \"\"\" Merge cands/noise files over all", "scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scanlist, snrmin=snrmin, snrmax=snrmax) @cli.command() @click.argument('filename', type=str) @click.option('--html', type=bool, default=True, help='Create", "prints all. assumes filename is an sdm. \"\"\" filename = os.path.abspath(filename) scans =", "range(bignumber) logger.info('Merging over scans {0}'.format(scanlist)) for scan in scanlist: pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename),", "Estimating scans from available files.'.format(filename)) filelist = glob.glob(os.path.join(os.path.dirname(filename), '*{0}_sc*pkl'.format(os.path.basename(filename)))) try: scanlist = sorted(set([int(fn.rstrip('.pkl').split('_sc')[1].split('seg')[0])", "threshold') def list_cands(candsfile, threshold): \"\"\" Print candidates above abs(snr) in candsfile \"\"\" reproduce.list_cands(candsfile,", "abs(snr) > threshold') def list_cands(candsfile, threshold): \"\"\" Print candidates above abs(snr) in candsfile", "from filename, but will fall back to finding relevant files if it does", "of filename filename is name of local sdm ('filename.GN' expected locally). scan is", "for all in candsfile with snr > threshold \"\"\" reproduce.refine_cands(candsfile, threshold=threshold) if __name__", "for filename \"\"\" filename = os.path.abspath(filename) pc.nbcompile(os.path.dirname(filename), os.path.basename(filename), html=html, basenb=basenb, agdir=agdir) @cli.command() @click.argument('candsfile',", "import rtpipe.parsesdm as ps import rtpipe.reproduce as reproduce import click, os, glob import", "with abs(snr) > threshold \"\"\" reproduce.refine_cand(candsfile, candnum=candnum, threshold=threshold) @cli.command() @click.argument('candsfile', type=str) @click.option('--threshold', type=float,", "0: d = rt.set_pipeline(filename, scan, paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile) rt.pipeline(d, range(d['nsegments'])) # clean up", "help='Filter candidates to abs(snr) > threshold') def refine_cands(candsfile, threshold): \"\"\" Run refinement search", "over scans {0}'.format(scanlist)) for scan in scanlist: pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scanlist, snrmin=snrmin,", "@click.argument('candnum', type=int) @click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) > threshold') def refine_cand(candsfile,", "glob.glob(os.path.join(os.path.dirname(filename), '*{0}_sc*pkl'.format(os.path.basename(filename)))) try: scanlist = sorted(set([int(fn.rstrip('.pkl').split('_sc')[1].split('seg')[0]) for fn in filelist])) except IndexError: logger.warn('Could", "logger.info('Example pipeline:') state = rt.set_pipeline(filename, scan, paramfile=paramfile, logfile=False) @cli.command() @click.argument('filename', type=str) @click.option('--scan', type=int,", "click, os, glob import logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')", "import rtpipe.reproduce as reproduce import click, os, glob import logging logging.basicConfig(level=logging.INFO, format='%(asctime)s -", "bdfdir, scan): \"\"\" Simple parse and return metadata for pipeline for first scan", "bdfdir=bdfdir) scanlist = sorted(scans.keys()) else: logger.warn('Could not find file {0}. Estimating scans from", "@click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) > threshold') def refine_cand(candsfile, candnum, threshold):", "pipeline:') state = rt.set_pipeline(filename, scan, paramfile=paramfile, logfile=False) @cli.command() @click.argument('filename', type=str) @click.option('--scan', type=int, default=0)", "Tries to find scans from filename, but will fall back to finding relevant", "@click.option('--logfile', type=bool, default=False) @click.option('--bdfdir', default='') def searchone(filename, scan, paramfile, logfile, bdfdir): \"\"\" Searches", "type=int) @click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) > threshold') def refine_cand(candsfile, candnum,", "scans = ps.read_scans(filename, bdfdir=bdfdir) if scan != 0: d = rt.set_pipeline(filename, scan, paramfile=paramfile,", "scans Tries to find scans from filename, but will fall back to finding", "local sdm ('filename.GN' expected locally). scan is scan number to search. if none", "ps import rtpipe.reproduce as reproduce import click, os, glob import logging logging.basicConfig(level=logging.INFO, format='%(asctime)s", "of local sdm ('filename.GN' expected locally). scan is scan number to search. if", "\"\"\" filename = os.path.abspath(filename) scans = ps.read_scans(filename, bdfdir=bdfdir) logger.info('Scans, Target names:') logger.info('%s' %", "search. if none provided, script prints all. assumes filename is an sdm. \"\"\"", "metadata for pipeline for first scan \"\"\" filename = os.path.abspath(filename) scans = ps.read_scans(filename,", "os.path.abspath(filename) scans = ps.read_scans(filename, bdfdir=bdfdir) logger.info('Scans, Target names:') logger.info('%s' % str([(ss, scans[ss]['source']) for", "rt.pipeline(d, range(d['nsegments'])) # clean up and merge files pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scans.keys())", "refine_cand(candsfile, candnum, threshold): \"\"\" Run refinement search for candnum in list_cands with abs(snr)", "filename \"\"\" filename = os.path.abspath(filename) pc.nbcompile(os.path.dirname(filename), os.path.basename(filename), html=html, basenb=basenb, agdir=agdir) @cli.command() @click.argument('candsfile', type=str)", "classifications') def nbcompile(filename, html, basenb, agdir): \"\"\" Compile the baseinteract.ipynb notebook into an", "snrmax, bdfdir): \"\"\" Merge cands/noise files over all scans Tries to find scans", "an sdm. \"\"\" filename = os.path.abspath(filename) scans = ps.read_scans(filename, bdfdir=bdfdir) if scan !=", "filelist])) except IndexError: logger.warn('Could not parse filenames for scans. Looking over big range.')", "for scans. Looking over big range.') scanlist = range(bignumber) logger.info('Merging over scans {0}'.format(scanlist))", "will fall back to finding relevant files if it does not exist. \"\"\"", "= os.path.abspath(filename) pc.nbcompile(os.path.dirname(filename), os.path.basename(filename), html=html, basenb=basenb, agdir=agdir) @cli.command() @click.argument('candsfile', type=str) @click.option('--threshold', type=float, default=0.,", "reproduce import click, os, glob import logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s", "bdfdir=bdfdir) if scan != 0: d = rt.set_pipeline(filename, scan, paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile) rt.pipeline(d,", "else: logger.info('Scans, Target names:') logger.info('%s' % str([(ss, scans[ss]['source']) for ss in scans])) logger.info('Example", "@cli.command() @click.argument('filename') @click.option('--paramfile', default='') @click.option('--bdfdir', default='') @click.option('--scan', default=1) def read(filename, paramfile, bdfdir, scan):", "@cli.command() @click.argument('candsfile', type=str) @click.argument('candnum', type=int) @click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) >", "fn in filelist])) except IndexError: logger.warn('Could not parse filenames for scans. Looking over", "default=999.) @click.option('--bdfdir', default='') def mergeall(filename, snrmin, snrmax, bdfdir): \"\"\" Merge cands/noise files over", "from available files.'.format(filename)) filelist = glob.glob(os.path.join(os.path.dirname(filename), '*{0}_sc*pkl'.format(os.path.basename(filename)))) try: scanlist = sorted(set([int(fn.rstrip('.pkl').split('_sc')[1].split('seg')[0]) for fn", "scans {0}'.format(scanlist)) for scan in scanlist: pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scanlist, snrmin=snrmin, snrmax=snrmax)", "version') @click.option('--basenb', type=str, default='', help='Full path to base notebook. Default to distribution version')", "@click.argument('candsfile', type=str) @click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) > threshold') def list_cands(candsfile,", "rtpipe.parsecands as pc import rtpipe.parsesdm as ps import rtpipe.reproduce as reproduce import click,", "mergeall(filename, snrmin, snrmax, bdfdir): \"\"\" Merge cands/noise files over all scans Tries to", "filename is name of local sdm ('filename.GN' expected locally). scan is scan number", "@click.option('--html', type=bool, default=True, help='Create html version') @click.option('--basenb', type=str, default='', help='Full path to base", "fileroot=os.path.basename(filename), logfile=logfile) rt.pipeline(d, range(d['nsegments'])) # clean up and merge files pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename),", "@click.group('rtpipe') def cli(): pass @cli.command() @click.argument('filename') @click.option('--paramfile', default='') @click.option('--bdfdir', default='') @click.option('--scan', default=1) def", "Searches one scan of filename filename is name of local sdm ('filename.GN' expected", "names:') logger.info('%s' % str([(ss, scans[ss]['source']) for ss in scans])) logger.info('Example pipeline:') state =", "snrmin, snrmax, bdfdir): \"\"\" Merge cands/noise files over all scans Tries to find", "@click.option('--paramfile', default='') @click.option('--bdfdir', default='') @click.option('--scan', default=1) def read(filename, paramfile, bdfdir, scan): \"\"\" Simple", "scanlist = sorted(scans.keys()) else: logger.warn('Could not find file {0}. Estimating scans from available", "Compile the baseinteract.ipynb notebook into an analysis notebook for filename \"\"\" filename =", "scans from filename, but will fall back to finding relevant files if it", "in filelist])) except IndexError: logger.warn('Could not parse filenames for scans. Looking over big", "candidates to abs(snr) > threshold') def refine_cands(candsfile, threshold): \"\"\" Run refinement search and", "def mergeall(filename, snrmin, snrmax, bdfdir): \"\"\" Merge cands/noise files over all scans Tries", "the baseinteract.ipynb notebook into an analysis notebook for filename \"\"\" filename = os.path.abspath(filename)", "@click.option('--snrmax', default=999.) @click.option('--bdfdir', default='') def mergeall(filename, snrmin, snrmax, bdfdir): \"\"\" Merge cands/noise files", "if os.path.exists(filename): scans = ps.read_scans(filename, bdfdir=bdfdir) scanlist = sorted(scans.keys()) else: logger.warn('Could not find", "back to finding relevant files if it does not exist. \"\"\" filename =", "os, glob import logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logging.captureWarnings(True)", "paramfile=paramfile, logfile=False) @cli.command() @click.argument('filename', type=str) @click.option('--scan', type=int, default=0) @click.option('--paramfile', type=str, default='rtpipe_cbe.conf') @click.option('--logfile', type=bool,", "type=str) @click.option('--html', type=bool, default=True, help='Create html version') @click.option('--basenb', type=str, default='', help='Full path to", "500 if os.path.exists(filename): scans = ps.read_scans(filename, bdfdir=bdfdir) scanlist = sorted(scans.keys()) else: logger.warn('Could not", "def refine_cands(candsfile, threshold): \"\"\" Run refinement search and save candidates for all in", "all in candsfile with snr > threshold \"\"\" reproduce.refine_cands(candsfile, threshold=threshold) if __name__ ==", "repo for applying classifications') def nbcompile(filename, html, basenb, agdir): \"\"\" Compile the baseinteract.ipynb", "as reproduce import click, os, glob import logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s -", "os.path.exists(filename): scans = ps.read_scans(filename, bdfdir=bdfdir) scanlist = sorted(scans.keys()) else: logger.warn('Could not find file", "logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logging.captureWarnings(True) logger = logging.getLogger(__name__)", "bdfdir=bdfdir) logger.info('Scans, Target names:') logger.info('%s' % str([(ss, scans[ss]['source']) for ss in scans])) logger.info('Example", "to finding relevant files if it does not exist. \"\"\" filename = os.path.abspath(filename)", "html, basenb, agdir): \"\"\" Compile the baseinteract.ipynb notebook into an analysis notebook for", "files.'.format(filename)) filelist = glob.glob(os.path.join(os.path.dirname(filename), '*{0}_sc*pkl'.format(os.path.basename(filename)))) try: scanlist = sorted(set([int(fn.rstrip('.pkl').split('_sc')[1].split('seg')[0]) for fn in filelist]))", "file {0}. Estimating scans from available files.'.format(filename)) filelist = glob.glob(os.path.join(os.path.dirname(filename), '*{0}_sc*pkl'.format(os.path.basename(filename)))) try: scanlist", "refinement search for candnum in list_cands with abs(snr) > threshold \"\"\" reproduce.refine_cand(candsfile, candnum=candnum,", "state = rt.set_pipeline(filename, scan, paramfile=paramfile, logfile=False) @cli.command() @click.argument('filename', type=str) @click.option('--scan', type=int, default=0) @click.option('--paramfile',", "base notebook. Default to distribution version') @click.option('--agdir', type=str, default='', help='Activegit repo for applying", "scan): \"\"\" Simple parse and return metadata for pipeline for first scan \"\"\"", "'*{0}_sc*pkl'.format(os.path.basename(filename)))) try: scanlist = sorted(set([int(fn.rstrip('.pkl').split('_sc')[1].split('seg')[0]) for fn in filelist])) except IndexError: logger.warn('Could not", "threshold): \"\"\" Run refinement search for candnum in list_cands with abs(snr) > threshold", "path to base notebook. Default to distribution version') @click.option('--agdir', type=str, default='', help='Activegit repo", "candidates above abs(snr) in candsfile \"\"\" reproduce.list_cands(candsfile, threshold) @cli.command() @click.argument('candsfile', type=str) @click.argument('candnum', type=int)", "Looking over big range.') scanlist = range(bignumber) logger.info('Merging over scans {0}'.format(scanlist)) for scan", "@click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) > threshold') def list_cands(candsfile, threshold): \"\"\"", "os.path.abspath(filename) pc.nbcompile(os.path.dirname(filename), os.path.basename(filename), html=html, basenb=basenb, agdir=agdir) @cli.command() @click.argument('candsfile', type=str) @click.option('--threshold', type=float, default=0., help='Filter", "list_cands(candsfile, threshold): \"\"\" Print candidates above abs(snr) in candsfile \"\"\" reproduce.list_cands(candsfile, threshold) @cli.command()", "scans])) logger.info('Example pipeline:') state = rt.set_pipeline(filename, scans.popitem()[0], paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile) @cli.command() @click.argument('filename') @click.option('--snrmin',", "up and merge files pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scans.keys()) else: logger.info('Scans, Target names:')", "@cli.command() @click.argument('filename', type=str) @click.option('--scan', type=int, default=0) @click.option('--paramfile', type=str, default='rtpipe_cbe.conf') @click.option('--logfile', type=bool, default=False) @click.option('--bdfdir',", "for ss in scans])) logger.info('Example pipeline:') state = rt.set_pipeline(filename, scan, paramfile=paramfile, logfile=False) @cli.command()", "filename = os.path.abspath(filename) pc.nbcompile(os.path.dirname(filename), os.path.basename(filename), html=html, basenb=basenb, agdir=agdir) @cli.command() @click.argument('candsfile', type=str) @click.option('--threshold', type=float,", "scans[ss]['source']) for ss in scans])) logger.info('Example pipeline:') state = rt.set_pipeline(filename, scans.popitem()[0], paramfile=paramfile, fileroot=os.path.basename(filename),", "@click.argument('filename', type=str) @click.option('--html', type=bool, default=True, help='Create html version') @click.option('--basenb', type=str, default='', help='Full path", "\"\"\" Compile the baseinteract.ipynb notebook into an analysis notebook for filename \"\"\" filename", "Default to distribution version') @click.option('--agdir', type=str, default='', help='Activegit repo for applying classifications') def", "rtpipe.reproduce as reproduce import click, os, glob import logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s", "threshold) @cli.command() @click.argument('candsfile', type=str) @click.argument('candnum', type=int) @click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr)", "if scan != 0: d = rt.set_pipeline(filename, scan, paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile) rt.pipeline(d, range(d['nsegments']))", "search for candnum in list_cands with abs(snr) > threshold \"\"\" reproduce.refine_cand(candsfile, candnum=candnum, threshold=threshold)", "sdm ('filename.GN' expected locally). scan is scan number to search. if none provided,", "@click.argument('candsfile', type=str) @click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) > threshold') def refine_cands(candsfile,", "over big range.') scanlist = range(bignumber) logger.info('Merging over scans {0}'.format(scanlist)) for scan in", "name of local sdm ('filename.GN' expected locally). scan is scan number to search.", "for fn in filelist])) except IndexError: logger.warn('Could not parse filenames for scans. Looking", "scan, paramfile=paramfile, logfile=False) @cli.command() @click.argument('filename', type=str) @click.option('--scan', type=int, default=0) @click.option('--paramfile', type=str, default='rtpipe_cbe.conf') @click.option('--logfile',", "filename is an sdm. \"\"\" filename = os.path.abspath(filename) scans = ps.read_scans(filename, bdfdir=bdfdir) if", "notebook. Default to distribution version') @click.option('--agdir', type=str, default='', help='Activegit repo for applying classifications')", "type=str, default='', help='Activegit repo for applying classifications') def nbcompile(filename, html, basenb, agdir): \"\"\"", "type=str, default='', help='Full path to base notebook. Default to distribution version') @click.option('--agdir', type=str,", "html=html, basenb=basenb, agdir=agdir) @cli.command() @click.argument('candsfile', type=str) @click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr)", "= ps.read_scans(filename, bdfdir=bdfdir) if scan != 0: d = rt.set_pipeline(filename, scan, paramfile=paramfile, fileroot=os.path.basename(filename),", "= rt.set_pipeline(filename, scans.popitem()[0], paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile) @cli.command() @click.argument('filename') @click.option('--snrmin', default=0.) @click.option('--snrmax', default=999.) @click.option('--bdfdir',", "logger.info('Example pipeline:') state = rt.set_pipeline(filename, scans.popitem()[0], paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile) @cli.command() @click.argument('filename') @click.option('--snrmin', default=0.)", "reproduce.refine_cand(candsfile, candnum=candnum, threshold=threshold) @cli.command() @click.argument('candsfile', type=str) @click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr)", "default='') @click.option('--scan', default=1) def read(filename, paramfile, bdfdir, scan): \"\"\" Simple parse and return", "assumes filename is an sdm. \"\"\" filename = os.path.abspath(filename) scans = ps.read_scans(filename, bdfdir=bdfdir)", "agdir): \"\"\" Compile the baseinteract.ipynb notebook into an analysis notebook for filename \"\"\"", "@click.argument('filename') @click.option('--snrmin', default=0.) @click.option('--snrmax', default=999.) @click.option('--bdfdir', default='') def mergeall(filename, snrmin, snrmax, bdfdir): \"\"\"", "\"\"\" Print candidates above abs(snr) in candsfile \"\"\" reproduce.list_cands(candsfile, threshold) @cli.command() @click.argument('candsfile', type=str)", "> threshold \"\"\" reproduce.refine_cand(candsfile, candnum=candnum, threshold=threshold) @cli.command() @click.argument('candsfile', type=str) @click.option('--threshold', type=float, default=0., help='Filter", "default=0., help='Filter candidates to abs(snr) > threshold') def refine_cands(candsfile, threshold): \"\"\" Run refinement", "as rt import rtpipe.parsecands as pc import rtpipe.parsesdm as ps import rtpipe.reproduce as", "to find scans from filename, but will fall back to finding relevant files", "sorted(set([int(fn.rstrip('.pkl').split('_sc')[1].split('seg')[0]) for fn in filelist])) except IndexError: logger.warn('Could not parse filenames for scans.", "%(name)s - %(levelname)s - %(message)s') logging.captureWarnings(True) logger = logging.getLogger(__name__) @click.group('rtpipe') def cli(): pass", "sdm. \"\"\" filename = os.path.abspath(filename) scans = ps.read_scans(filename, bdfdir=bdfdir) if scan != 0:", "@cli.command() @click.argument('filename', type=str) @click.option('--html', type=bool, default=True, help='Create html version') @click.option('--basenb', type=str, default='', help='Full", "bdfdir): \"\"\" Searches one scan of filename filename is name of local sdm", "if none provided, script prints all. assumes filename is an sdm. \"\"\" filename", "pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scanlist, snrmin=snrmin, snrmax=snrmax) @cli.command() @click.argument('filename', type=str) @click.option('--html', type=bool, default=True, help='Create html", "scans.popitem()[0], paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile) @cli.command() @click.argument('filename') @click.option('--snrmin', default=0.) @click.option('--snrmax', default=999.) @click.option('--bdfdir', default='') def", "{0}'.format(scanlist)) for scan in scanlist: pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scanlist, snrmin=snrmin, snrmax=snrmax) @cli.command()", "os.path.basename(filename), html=html, basenb=basenb, agdir=agdir) @cli.command() @click.argument('candsfile', type=str) @click.option('--threshold', type=float, default=0., help='Filter candidates to", "as pc import rtpipe.parsesdm as ps import rtpipe.reproduce as reproduce import click, os,", "@click.option('--bdfdir', default='') @click.option('--scan', default=1) def read(filename, paramfile, bdfdir, scan): \"\"\" Simple parse and", "= rt.set_pipeline(filename, scan, paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile) rt.pipeline(d, range(d['nsegments'])) # clean up and merge", "= os.path.abspath(filename) scans = ps.read_scans(filename, bdfdir=bdfdir) logger.info('Scans, Target names:') logger.info('%s' % str([(ss, scans[ss]['source'])", "default='') def searchone(filename, scan, paramfile, logfile, bdfdir): \"\"\" Searches one scan of filename", "candsfile \"\"\" reproduce.list_cands(candsfile, threshold) @cli.command() @click.argument('candsfile', type=str) @click.argument('candnum', type=int) @click.option('--threshold', type=float, default=0., help='Filter", "% str([(ss, scans[ss]['source']) for ss in scans])) logger.info('Example pipeline:') state = rt.set_pipeline(filename, scan,", "except IndexError: logger.warn('Could not parse filenames for scans. Looking over big range.') scanlist", "\"\"\" filename = os.path.abspath(filename) pc.nbcompile(os.path.dirname(filename), os.path.basename(filename), html=html, basenb=basenb, agdir=agdir) @cli.command() @click.argument('candsfile', type=str) @click.option('--threshold',", "scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scans.keys()) else: logger.info('Scans, Target names:') logger.info('%s' % str([(ss, scans[ss]['source']) for", "cli(): pass @cli.command() @click.argument('filename') @click.option('--paramfile', default='') @click.option('--bdfdir', default='') @click.option('--scan', default=1) def read(filename, paramfile,", "try: scanlist = sorted(set([int(fn.rstrip('.pkl').split('_sc')[1].split('seg')[0]) for fn in filelist])) except IndexError: logger.warn('Could not parse", "default=0., help='Filter candidates to abs(snr) > threshold') def list_cands(candsfile, threshold): \"\"\" Print candidates", "none provided, script prints all. assumes filename is an sdm. \"\"\" filename =", "\"\"\" Run refinement search for candnum in list_cands with abs(snr) > threshold \"\"\"", "@click.option('--scan', type=int, default=0) @click.option('--paramfile', type=str, default='rtpipe_cbe.conf') @click.option('--logfile', type=bool, default=False) @click.option('--bdfdir', default='') def searchone(filename,", "paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile) rt.pipeline(d, range(d['nsegments'])) # clean up and merge files pc.merge_segments(filename, scan)", "number to search. if none provided, script prints all. assumes filename is an", "in scans])) logger.info('Example pipeline:') state = rt.set_pipeline(filename, scan, paramfile=paramfile, logfile=False) @cli.command() @click.argument('filename', type=str)", "is scan number to search. if none provided, script prints all. assumes filename", "type=str) @click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) > threshold') def refine_cands(candsfile, threshold):", "it does not exist. \"\"\" filename = os.path.abspath(filename) bignumber = 500 if os.path.exists(filename):", "agdir=agdir) @cli.command() @click.argument('candsfile', type=str) @click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) > threshold')", "fall back to finding relevant files if it does not exist. \"\"\" filename", "snrmax=snrmax) @cli.command() @click.argument('filename', type=str) @click.option('--html', type=bool, default=True, help='Create html version') @click.option('--basenb', type=str, default='',", "ss in scans])) logger.info('Example pipeline:') state = rt.set_pipeline(filename, scan, paramfile=paramfile, logfile=False) @cli.command() @click.argument('filename',", "pipeline:') state = rt.set_pipeline(filename, scans.popitem()[0], paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile) @cli.command() @click.argument('filename') @click.option('--snrmin', default=0.) @click.option('--snrmax',", "= ps.read_scans(filename, bdfdir=bdfdir) scanlist = sorted(scans.keys()) else: logger.warn('Could not find file {0}. Estimating", "filename filename is name of local sdm ('filename.GN' expected locally). scan is scan", "format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logging.captureWarnings(True) logger = logging.getLogger(__name__) @click.group('rtpipe') def", "threshold') def refine_cand(candsfile, candnum, threshold): \"\"\" Run refinement search for candnum in list_cands", "nbcompile(filename, html, basenb, agdir): \"\"\" Compile the baseinteract.ipynb notebook into an analysis notebook", "for candnum in list_cands with abs(snr) > threshold \"\"\" reproduce.refine_cand(candsfile, candnum=candnum, threshold=threshold) @cli.command()", "expected locally). scan is scan number to search. if none provided, script prints", "find scans from filename, but will fall back to finding relevant files if", "scans[ss]['source']) for ss in scans])) logger.info('Example pipeline:') state = rt.set_pipeline(filename, scan, paramfile=paramfile, logfile=False)", "all. assumes filename is an sdm. \"\"\" filename = os.path.abspath(filename) scans = ps.read_scans(filename,", "files if it does not exist. \"\"\" filename = os.path.abspath(filename) bignumber = 500", "= os.path.abspath(filename) scans = ps.read_scans(filename, bdfdir=bdfdir) if scan != 0: d = rt.set_pipeline(filename,", "pipeline for first scan \"\"\" filename = os.path.abspath(filename) scans = ps.read_scans(filename, bdfdir=bdfdir) logger.info('Scans,", "baseinteract.ipynb notebook into an analysis notebook for filename \"\"\" filename = os.path.abspath(filename) pc.nbcompile(os.path.dirname(filename),", "Print candidates above abs(snr) in candsfile \"\"\" reproduce.list_cands(candsfile, threshold) @cli.command() @click.argument('candsfile', type=str) @click.argument('candnum',", "big range.') scanlist = range(bignumber) logger.info('Merging over scans {0}'.format(scanlist)) for scan in scanlist:", "pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scanlist, snrmin=snrmin, snrmax=snrmax) @cli.command() @click.argument('filename', type=str) @click.option('--html', type=bool, default=True,", "filename, but will fall back to finding relevant files if it does not", "scan number to search. if none provided, script prints all. assumes filename is", "Simple parse and return metadata for pipeline for first scan \"\"\" filename =", "filelist = glob.glob(os.path.join(os.path.dirname(filename), '*{0}_sc*pkl'.format(os.path.basename(filename)))) try: scanlist = sorted(set([int(fn.rstrip('.pkl').split('_sc')[1].split('seg')[0]) for fn in filelist])) except", "> threshold') def refine_cand(candsfile, candnum, threshold): \"\"\" Run refinement search for candnum in", "- %(levelname)s - %(message)s') logging.captureWarnings(True) logger = logging.getLogger(__name__) @click.group('rtpipe') def cli(): pass @cli.command()", "{0}. Estimating scans from available files.'.format(filename)) filelist = glob.glob(os.path.join(os.path.dirname(filename), '*{0}_sc*pkl'.format(os.path.basename(filename)))) try: scanlist =", "scanlist = range(bignumber) logger.info('Merging over scans {0}'.format(scanlist)) for scan in scanlist: pc.merge_segments(filename, scan)", "abs(snr) > threshold') def refine_cands(candsfile, threshold): \"\"\" Run refinement search and save candidates", "scan, paramfile=paramfile, fileroot=os.path.basename(filename), logfile=logfile) rt.pipeline(d, range(d['nsegments'])) # clean up and merge files pc.merge_segments(filename,", "threshold): \"\"\" Print candidates above abs(snr) in candsfile \"\"\" reproduce.list_cands(candsfile, threshold) @cli.command() @click.argument('candsfile',", "filename = os.path.abspath(filename) scans = ps.read_scans(filename, bdfdir=bdfdir) if scan != 0: d =", "bdfdir): \"\"\" Merge cands/noise files over all scans Tries to find scans from", "default=1) def read(filename, paramfile, bdfdir, scan): \"\"\" Simple parse and return metadata for", "help='Filter candidates to abs(snr) > threshold') def refine_cand(candsfile, candnum, threshold): \"\"\" Run refinement", "\"\"\" reproduce.refine_cand(candsfile, candnum=candnum, threshold=threshold) @cli.command() @click.argument('candsfile', type=str) @click.option('--threshold', type=float, default=0., help='Filter candidates to", "candidates for all in candsfile with snr > threshold \"\"\" reproduce.refine_cands(candsfile, threshold=threshold) if", "in scanlist: pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scanlist, snrmin=snrmin, snrmax=snrmax) @cli.command() @click.argument('filename', type=str) @click.option('--html',", "logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logging.captureWarnings(True) logger = logging.getLogger(__name__) @click.group('rtpipe')", "filename = os.path.abspath(filename) bignumber = 500 if os.path.exists(filename): scans = ps.read_scans(filename, bdfdir=bdfdir) scanlist", "os.path.basename(filename), scans.keys()) else: logger.info('Scans, Target names:') logger.info('%s' % str([(ss, scans[ss]['source']) for ss in", "default=0.) @click.option('--snrmax', default=999.) @click.option('--bdfdir', default='') def mergeall(filename, snrmin, snrmax, bdfdir): \"\"\" Merge cands/noise", "analysis notebook for filename \"\"\" filename = os.path.abspath(filename) pc.nbcompile(os.path.dirname(filename), os.path.basename(filename), html=html, basenb=basenb, agdir=agdir)", "does not exist. \"\"\" filename = os.path.abspath(filename) bignumber = 500 if os.path.exists(filename): scans", "def refine_cand(candsfile, candnum, threshold): \"\"\" Run refinement search for candnum in list_cands with", "scanlist, snrmin=snrmin, snrmax=snrmax) @cli.command() @click.argument('filename', type=str) @click.option('--html', type=bool, default=True, help='Create html version') @click.option('--basenb',", "abs(snr) in candsfile \"\"\" reproduce.list_cands(candsfile, threshold) @cli.command() @click.argument('candsfile', type=str) @click.argument('candnum', type=int) @click.option('--threshold', type=float,", "logging.getLogger(__name__) @click.group('rtpipe') def cli(): pass @cli.command() @click.argument('filename') @click.option('--paramfile', default='') @click.option('--bdfdir', default='') @click.option('--scan', default=1)", "clean up and merge files pc.merge_segments(filename, scan) pc.merge_scans(os.path.dirname(filename), os.path.basename(filename), scans.keys()) else: logger.info('Scans, Target", "@cli.command() @click.argument('candsfile', type=str) @click.option('--threshold', type=float, default=0., help='Filter candidates to abs(snr) > threshold') def", "and save candidates for all in candsfile with snr > threshold \"\"\" reproduce.refine_cands(candsfile,", "scan \"\"\" filename = os.path.abspath(filename) scans = ps.read_scans(filename, bdfdir=bdfdir) logger.info('Scans, Target names:') logger.info('%s'", "\"\"\" filename = os.path.abspath(filename) bignumber = 500 if os.path.exists(filename): scans = ps.read_scans(filename, bdfdir=bdfdir)" ]
[ "SECRET KEY, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=secret key\", 'User-Agent': \"PostmanRuntime/7.15.0\", 'Accept': \"*/*\", 'Cache-Control': \"no-cache\", 'Postman-Token': \"<PASSWORD>\",", "http import requests r=requests.get(\"https://raw.githubusercontent.com/becloudready/snowflake-tutorials/master/dataset/employees01.csv\") r.status_code ##200 status ok, it work ##403 athorization denied", "coding: utf-8 -*- \"\"\" Created on Wed Jul 3 14:45:54 2019 @author: deborahedds", "'User-Agent': \"PostmanRuntime/7.15.0\", 'Accept': \"*/*\", 'Cache-Control': \"no-cache\", 'Postman-Token': \"<PASSWORD>\", 'accept-encoding': \"gzip, deflate\", 'Connection': \"keep-alive\",", "= { 'Host': \"collabera-aws-training.s3.amazonaws.com\", 'X-Amz-Content-Sha256': \"e3numbergeneratedbypostman55\", 'X-Amz-Date': \"20190703T185532Z\", 'Authorization': \"AWS SECRET KEY, SignedHeaders=host;x-amz-content-sha256;x-amz-date,", "file r.text ###Get the second from the last row r.text.split('\\n')[-2] ##write into a", "date import datetime as dt dt.datetime.today() ##print today's date using ddmmyy format ddmmyy=print(dt.datetime.now().strftime('%d-%m-%y'))", "into a file a=\"/Users/deborahedds/Downloads/testfile04\" f1=open(a, \"w\") f1.write(r.text.split('\\n')[-2]) f1.close() ##import from s3 import requests", "r.text.split('\\n')[-2] ##write into a file a=\"/Users/deborahedds/Downloads/testfile04\" f1=open(a, \"w\") f1.write(r.text.split('\\n')[-2]) f1.close() ##import from s3", "###import datetime and get today's date import datetime as dt dt.datetime.today() ##print today's", "Signature=secret key\", 'User-Agent': \"PostmanRuntime/7.15.0\", 'Accept': \"*/*\", 'Cache-Control': \"no-cache\", 'Postman-Token': \"<PASSWORD>\", 'accept-encoding': \"gzip, deflate\",", "athorization denied ##500 server ##read as a text file r.text ###Get the second", "response = requests.request(\"GET\", url, headers=headers) print(response.text) ##get the second to last row number", "{ 'Host': \"collabera-aws-training.s3.amazonaws.com\", 'X-Amz-Content-Sha256': \"e3numbergeneratedbypostman55\", 'X-Amz-Date': \"20190703T185532Z\", 'Authorization': \"AWS SECRET KEY, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=secret", "second from the last row r.text.split('\\n')[-2] ##write into a file a=\"/Users/deborahedds/Downloads/testfile04\" f1=open(a, \"w\")", "\"e3numbergeneratedbypostman55\", 'X-Amz-Date': \"20190703T185532Z\", 'Authorization': \"AWS SECRET KEY, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=secret key\", 'User-Agent': \"PostmanRuntime/7.15.0\", 'Accept':", "deborahedds \"\"\" ###get files from http import requests r=requests.get(\"https://raw.githubusercontent.com/becloudready/snowflake-tutorials/master/dataset/employees01.csv\") r.status_code ##200 status ok,", "'X-Amz-Date': \"20190703T185532Z\", 'Authorization': \"AWS SECRET KEY, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=secret key\", 'User-Agent': \"PostmanRuntime/7.15.0\", 'Accept': \"*/*\",", "a=\"/Users/deborahedds/Downloads/testfile06\" f1=open(a, \"w\") f1.write(str(int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2)) f1.close() ###import datetime and get today's date import", "status ok, it work ##403 athorization denied ##500 server ##read as a text", "second to last row number 177 +2 int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2 a=\"/Users/deborahedds/Downloads/testfile06\" f1=open(a, \"w\") f1.write(str(int(response.text.split('\\n')[-2].split(',')[3].split()[0])", "today's date import datetime as dt dt.datetime.today() ##print today's date using ddmmyy format", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on Wed Jul 3", "row number 177 +2 int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2 a=\"/Users/deborahedds/Downloads/testfile06\" f1=open(a, \"w\") f1.write(str(int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2)) f1.close() ###import", "import requests r=requests.get(\"https://raw.githubusercontent.com/becloudready/snowflake-tutorials/master/dataset/employees01.csv\") r.status_code ##200 status ok, it work ##403 athorization denied ##500", "\"20190703T185532Z\", 'Authorization': \"AWS SECRET KEY, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=secret key\", 'User-Agent': \"PostmanRuntime/7.15.0\", 'Accept': \"*/*\", 'Cache-Control':", "ok, it work ##403 athorization denied ##500 server ##read as a text file", "KEY, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=secret key\", 'User-Agent': \"PostmanRuntime/7.15.0\", 'Accept': \"*/*\", 'Cache-Control': \"no-cache\", 'Postman-Token': \"<PASSWORD>\", 'accept-encoding':", "= requests.request(\"GET\", url, headers=headers) print(response.text) ##get the second to last row number 177", "on Wed Jul 3 14:45:54 2019 @author: deborahedds \"\"\" ###get files from http", "the second from the last row r.text.split('\\n')[-2] ##write into a file a=\"/Users/deborahedds/Downloads/testfile04\" f1=open(a,", "import requests url = \"https://collabera-aws-training.s3.amazonaws.com/employees01.csv\" headers = { 'Host': \"collabera-aws-training.s3.amazonaws.com\", 'X-Amz-Content-Sha256': \"e3numbergeneratedbypostman55\", 'X-Amz-Date':", "##200 status ok, it work ##403 athorization denied ##500 server ##read as a", "server ##read as a text file r.text ###Get the second from the last", "denied ##500 server ##read as a text file r.text ###Get the second from", "\"\"\" Created on Wed Jul 3 14:45:54 2019 @author: deborahedds \"\"\" ###get files", "f1.write(str(int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2)) f1.close() ###import datetime and get today's date import datetime as dt", "deflate\", 'Connection': \"keep-alive\", 'cache-control': \"no-cache\" } response = requests.request(\"GET\", url, headers=headers) print(response.text) ##get", "f1=open(a, \"w\") f1.write(str(int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2)) f1.close() ###import datetime and get today's date import datetime", "r.text ###Get the second from the last row r.text.split('\\n')[-2] ##write into a file", "url, headers=headers) print(response.text) ##get the second to last row number 177 +2 int(response.text.split('\\n')[-2].split(',')[3].split()[0])", "a=\"/Users/deborahedds/Downloads/testfile04\" f1=open(a, \"w\") f1.write(r.text.split('\\n')[-2]) f1.close() ##import from s3 import requests url = \"https://collabera-aws-training.s3.amazonaws.com/employees01.csv\"", "177 +2 int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2 a=\"/Users/deborahedds/Downloads/testfile06\" f1=open(a, \"w\") f1.write(str(int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2)) f1.close() ###import datetime and", "text file r.text ###Get the second from the last row r.text.split('\\n')[-2] ##write into", "'Accept': \"*/*\", 'Cache-Control': \"no-cache\", 'Postman-Token': \"<PASSWORD>\", 'accept-encoding': \"gzip, deflate\", 'Connection': \"keep-alive\", 'cache-control': \"no-cache\"", "##get the second to last row number 177 +2 int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2 a=\"/Users/deborahedds/Downloads/testfile06\" f1=open(a,", "14:45:54 2019 @author: deborahedds \"\"\" ###get files from http import requests r=requests.get(\"https://raw.githubusercontent.com/becloudready/snowflake-tutorials/master/dataset/employees01.csv\") r.status_code", "'cache-control': \"no-cache\" } response = requests.request(\"GET\", url, headers=headers) print(response.text) ##get the second to", "##read as a text file r.text ###Get the second from the last row", "} response = requests.request(\"GET\", url, headers=headers) print(response.text) ##get the second to last row", "-*- \"\"\" Created on Wed Jul 3 14:45:54 2019 @author: deborahedds \"\"\" ###get", "f1.write(r.text.split('\\n')[-2]) f1.close() ##import from s3 import requests url = \"https://collabera-aws-training.s3.amazonaws.com/employees01.csv\" headers = {", "key\", 'User-Agent': \"PostmanRuntime/7.15.0\", 'Accept': \"*/*\", 'Cache-Control': \"no-cache\", 'Postman-Token': \"<PASSWORD>\", 'accept-encoding': \"gzip, deflate\", 'Connection':", "###get files from http import requests r=requests.get(\"https://raw.githubusercontent.com/becloudready/snowflake-tutorials/master/dataset/employees01.csv\") r.status_code ##200 status ok, it work", "= \"https://collabera-aws-training.s3.amazonaws.com/employees01.csv\" headers = { 'Host': \"collabera-aws-training.s3.amazonaws.com\", 'X-Amz-Content-Sha256': \"e3numbergeneratedbypostman55\", 'X-Amz-Date': \"20190703T185532Z\", 'Authorization': \"AWS", "number 177 +2 int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2 a=\"/Users/deborahedds/Downloads/testfile06\" f1=open(a, \"w\") f1.write(str(int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2)) f1.close() ###import datetime", "\"\"\" ###get files from http import requests r=requests.get(\"https://raw.githubusercontent.com/becloudready/snowflake-tutorials/master/dataset/employees01.csv\") r.status_code ##200 status ok, it", "##500 server ##read as a text file r.text ###Get the second from the", "+2)) f1.close() ###import datetime and get today's date import datetime as dt dt.datetime.today()", "file a=\"/Users/deborahedds/Downloads/testfile04\" f1=open(a, \"w\") f1.write(r.text.split('\\n')[-2]) f1.close() ##import from s3 import requests url =", "headers=headers) print(response.text) ##get the second to last row number 177 +2 int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2", "f1.close() ##import from s3 import requests url = \"https://collabera-aws-training.s3.amazonaws.com/employees01.csv\" headers = { 'Host':", "the last row r.text.split('\\n')[-2] ##write into a file a=\"/Users/deborahedds/Downloads/testfile04\" f1=open(a, \"w\") f1.write(r.text.split('\\n')[-2]) f1.close()", "datetime and get today's date import datetime as dt dt.datetime.today() ##print today's date", "requests url = \"https://collabera-aws-training.s3.amazonaws.com/employees01.csv\" headers = { 'Host': \"collabera-aws-training.s3.amazonaws.com\", 'X-Amz-Content-Sha256': \"e3numbergeneratedbypostman55\", 'X-Amz-Date': \"20190703T185532Z\",", "\"PostmanRuntime/7.15.0\", 'Accept': \"*/*\", 'Cache-Control': \"no-cache\", 'Postman-Token': \"<PASSWORD>\", 'accept-encoding': \"gzip, deflate\", 'Connection': \"keep-alive\", 'cache-control':", "##import from s3 import requests url = \"https://collabera-aws-training.s3.amazonaws.com/employees01.csv\" headers = { 'Host': \"collabera-aws-training.s3.amazonaws.com\",", "last row r.text.split('\\n')[-2] ##write into a file a=\"/Users/deborahedds/Downloads/testfile04\" f1=open(a, \"w\") f1.write(r.text.split('\\n')[-2]) f1.close() ##import", "Created on Wed Jul 3 14:45:54 2019 @author: deborahedds \"\"\" ###get files from", "-*- coding: utf-8 -*- \"\"\" Created on Wed Jul 3 14:45:54 2019 @author:", "and get today's date import datetime as dt dt.datetime.today() ##print today's date using", "<reponame>dedds001/snowflake-tutorials<filename>collabera_python.py<gh_stars>0 #!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on Wed Jul", "##write into a file a=\"/Users/deborahedds/Downloads/testfile04\" f1=open(a, \"w\") f1.write(r.text.split('\\n')[-2]) f1.close() ##import from s3 import", "\"https://collabera-aws-training.s3.amazonaws.com/employees01.csv\" headers = { 'Host': \"collabera-aws-training.s3.amazonaws.com\", 'X-Amz-Content-Sha256': \"e3numbergeneratedbypostman55\", 'X-Amz-Date': \"20190703T185532Z\", 'Authorization': \"AWS SECRET", "\"no-cache\" } response = requests.request(\"GET\", url, headers=headers) print(response.text) ##get the second to last", "a text file r.text ###Get the second from the last row r.text.split('\\n')[-2] ##write", "f1=open(a, \"w\") f1.write(r.text.split('\\n')[-2]) f1.close() ##import from s3 import requests url = \"https://collabera-aws-training.s3.amazonaws.com/employees01.csv\" headers", "# -*- coding: utf-8 -*- \"\"\" Created on Wed Jul 3 14:45:54 2019", "int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2 a=\"/Users/deborahedds/Downloads/testfile06\" f1=open(a, \"w\") f1.write(str(int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2)) f1.close() ###import datetime and get today's", "\"no-cache\", 'Postman-Token': \"<PASSWORD>\", 'accept-encoding': \"gzip, deflate\", 'Connection': \"keep-alive\", 'cache-control': \"no-cache\" } response =", "requests r=requests.get(\"https://raw.githubusercontent.com/becloudready/snowflake-tutorials/master/dataset/employees01.csv\") r.status_code ##200 status ok, it work ##403 athorization denied ##500 server", "s3 import requests url = \"https://collabera-aws-training.s3.amazonaws.com/employees01.csv\" headers = { 'Host': \"collabera-aws-training.s3.amazonaws.com\", 'X-Amz-Content-Sha256': \"e3numbergeneratedbypostman55\",", "last row number 177 +2 int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2 a=\"/Users/deborahedds/Downloads/testfile06\" f1=open(a, \"w\") f1.write(str(int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2)) f1.close()", "Jul 3 14:45:54 2019 @author: deborahedds \"\"\" ###get files from http import requests", "files from http import requests r=requests.get(\"https://raw.githubusercontent.com/becloudready/snowflake-tutorials/master/dataset/employees01.csv\") r.status_code ##200 status ok, it work ##403", "+2 int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2 a=\"/Users/deborahedds/Downloads/testfile06\" f1=open(a, \"w\") f1.write(str(int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2)) f1.close() ###import datetime and get", "\"keep-alive\", 'cache-control': \"no-cache\" } response = requests.request(\"GET\", url, headers=headers) print(response.text) ##get the second", "the second to last row number 177 +2 int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2 a=\"/Users/deborahedds/Downloads/testfile06\" f1=open(a, \"w\")", "utf-8 -*- \"\"\" Created on Wed Jul 3 14:45:54 2019 @author: deborahedds \"\"\"", "a file a=\"/Users/deborahedds/Downloads/testfile04\" f1=open(a, \"w\") f1.write(r.text.split('\\n')[-2]) f1.close() ##import from s3 import requests url", "'Cache-Control': \"no-cache\", 'Postman-Token': \"<PASSWORD>\", 'accept-encoding': \"gzip, deflate\", 'Connection': \"keep-alive\", 'cache-control': \"no-cache\" } response", "from http import requests r=requests.get(\"https://raw.githubusercontent.com/becloudready/snowflake-tutorials/master/dataset/employees01.csv\") r.status_code ##200 status ok, it work ##403 athorization", "\"<PASSWORD>\", 'accept-encoding': \"gzip, deflate\", 'Connection': \"keep-alive\", 'cache-control': \"no-cache\" } response = requests.request(\"GET\", url,", "'Authorization': \"AWS SECRET KEY, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=secret key\", 'User-Agent': \"PostmanRuntime/7.15.0\", 'Accept': \"*/*\", 'Cache-Control': \"no-cache\",", "from the last row r.text.split('\\n')[-2] ##write into a file a=\"/Users/deborahedds/Downloads/testfile04\" f1=open(a, \"w\") f1.write(r.text.split('\\n')[-2])", "3 14:45:54 2019 @author: deborahedds \"\"\" ###get files from http import requests r=requests.get(\"https://raw.githubusercontent.com/becloudready/snowflake-tutorials/master/dataset/employees01.csv\")", "2019 @author: deborahedds \"\"\" ###get files from http import requests r=requests.get(\"https://raw.githubusercontent.com/becloudready/snowflake-tutorials/master/dataset/employees01.csv\") r.status_code ##200", "@author: deborahedds \"\"\" ###get files from http import requests r=requests.get(\"https://raw.githubusercontent.com/becloudready/snowflake-tutorials/master/dataset/employees01.csv\") r.status_code ##200 status", "\"*/*\", 'Cache-Control': \"no-cache\", 'Postman-Token': \"<PASSWORD>\", 'accept-encoding': \"gzip, deflate\", 'Connection': \"keep-alive\", 'cache-control': \"no-cache\" }", "r=requests.get(\"https://raw.githubusercontent.com/becloudready/snowflake-tutorials/master/dataset/employees01.csv\") r.status_code ##200 status ok, it work ##403 athorization denied ##500 server ##read", "requests.request(\"GET\", url, headers=headers) print(response.text) ##get the second to last row number 177 +2", "as a text file r.text ###Get the second from the last row r.text.split('\\n')[-2]", "'accept-encoding': \"gzip, deflate\", 'Connection': \"keep-alive\", 'cache-control': \"no-cache\" } response = requests.request(\"GET\", url, headers=headers)", "\"w\") f1.write(r.text.split('\\n')[-2]) f1.close() ##import from s3 import requests url = \"https://collabera-aws-training.s3.amazonaws.com/employees01.csv\" headers =", "headers = { 'Host': \"collabera-aws-training.s3.amazonaws.com\", 'X-Amz-Content-Sha256': \"e3numbergeneratedbypostman55\", 'X-Amz-Date': \"20190703T185532Z\", 'Authorization': \"AWS SECRET KEY,", "get today's date import datetime as dt dt.datetime.today() ##print today's date using ddmmyy", "print(response.text) ##get the second to last row number 177 +2 int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2 a=\"/Users/deborahedds/Downloads/testfile06\"", "###Get the second from the last row r.text.split('\\n')[-2] ##write into a file a=\"/Users/deborahedds/Downloads/testfile04\"", "r.status_code ##200 status ok, it work ##403 athorization denied ##500 server ##read as", "'Postman-Token': \"<PASSWORD>\", 'accept-encoding': \"gzip, deflate\", 'Connection': \"keep-alive\", 'cache-control': \"no-cache\" } response = requests.request(\"GET\",", "'Connection': \"keep-alive\", 'cache-control': \"no-cache\" } response = requests.request(\"GET\", url, headers=headers) print(response.text) ##get the", "\"w\") f1.write(str(int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2)) f1.close() ###import datetime and get today's date import datetime as", "url = \"https://collabera-aws-training.s3.amazonaws.com/employees01.csv\" headers = { 'Host': \"collabera-aws-training.s3.amazonaws.com\", 'X-Amz-Content-Sha256': \"e3numbergeneratedbypostman55\", 'X-Amz-Date': \"20190703T185532Z\", 'Authorization':", "'X-Amz-Content-Sha256': \"e3numbergeneratedbypostman55\", 'X-Amz-Date': \"20190703T185532Z\", 'Authorization': \"AWS SECRET KEY, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=secret key\", 'User-Agent': \"PostmanRuntime/7.15.0\",", "'Host': \"collabera-aws-training.s3.amazonaws.com\", 'X-Amz-Content-Sha256': \"e3numbergeneratedbypostman55\", 'X-Amz-Date': \"20190703T185532Z\", 'Authorization': \"AWS SECRET KEY, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=secret key\",", "\"gzip, deflate\", 'Connection': \"keep-alive\", 'cache-control': \"no-cache\" } response = requests.request(\"GET\", url, headers=headers) print(response.text)", "from s3 import requests url = \"https://collabera-aws-training.s3.amazonaws.com/employees01.csv\" headers = { 'Host': \"collabera-aws-training.s3.amazonaws.com\", 'X-Amz-Content-Sha256':", "+2 a=\"/Users/deborahedds/Downloads/testfile06\" f1=open(a, \"w\") f1.write(str(int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2)) f1.close() ###import datetime and get today's date", "f1.close() ###import datetime and get today's date import datetime as dt dt.datetime.today() ##print", "\"collabera-aws-training.s3.amazonaws.com\", 'X-Amz-Content-Sha256': \"e3numbergeneratedbypostman55\", 'X-Amz-Date': \"20190703T185532Z\", 'Authorization': \"AWS SECRET KEY, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=secret key\", 'User-Agent':", "to last row number 177 +2 int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2 a=\"/Users/deborahedds/Downloads/testfile06\" f1=open(a, \"w\") f1.write(str(int(response.text.split('\\n')[-2].split(',')[3].split()[0]) +2))", "\"AWS SECRET KEY, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=secret key\", 'User-Agent': \"PostmanRuntime/7.15.0\", 'Accept': \"*/*\", 'Cache-Control': \"no-cache\", 'Postman-Token':", "SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=secret key\", 'User-Agent': \"PostmanRuntime/7.15.0\", 'Accept': \"*/*\", 'Cache-Control': \"no-cache\", 'Postman-Token': \"<PASSWORD>\", 'accept-encoding': \"gzip,", "python3 # -*- coding: utf-8 -*- \"\"\" Created on Wed Jul 3 14:45:54", "##403 athorization denied ##500 server ##read as a text file r.text ###Get the", "Wed Jul 3 14:45:54 2019 @author: deborahedds \"\"\" ###get files from http import", "work ##403 athorization denied ##500 server ##read as a text file r.text ###Get", "it work ##403 athorization denied ##500 server ##read as a text file r.text", "row r.text.split('\\n')[-2] ##write into a file a=\"/Users/deborahedds/Downloads/testfile04\" f1=open(a, \"w\") f1.write(r.text.split('\\n')[-2]) f1.close() ##import from" ]
[]
[ "2018-12-16 13:01 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('vespene', '0008_auto_20181106_2233'),", "by Django 2.1.2 on 2018-12-16 13:01 from django.db import migrations class Migration(migrations.Migration): dependencies", "# Generated by Django 2.1.2 on 2018-12-16 13:01 from django.db import migrations class", "from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('vespene', '0008_auto_20181106_2233'), ] operations", "django.db import migrations class Migration(migrations.Migration): dependencies = [ ('vespene', '0008_auto_20181106_2233'), ] operations =", "2.1.2 on 2018-12-16 13:01 from django.db import migrations class Migration(migrations.Migration): dependencies = [", "migrations class Migration(migrations.Migration): dependencies = [ ('vespene', '0008_auto_20181106_2233'), ] operations = [ migrations.RemoveField(", "Generated by Django 2.1.2 on 2018-12-16 13:01 from django.db import migrations class Migration(migrations.Migration):", "class Migration(migrations.Migration): dependencies = [ ('vespene', '0008_auto_20181106_2233'), ] operations = [ migrations.RemoveField( model_name='workerpool',", "Migration(migrations.Migration): dependencies = [ ('vespene', '0008_auto_20181106_2233'), ] operations = [ migrations.RemoveField( model_name='workerpool', name='sudo_password',", "Django 2.1.2 on 2018-12-16 13:01 from django.db import migrations class Migration(migrations.Migration): dependencies =", "on 2018-12-16 13:01 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('vespene',", "dependencies = [ ('vespene', '0008_auto_20181106_2233'), ] operations = [ migrations.RemoveField( model_name='workerpool', name='sudo_password', ),", "13:01 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('vespene', '0008_auto_20181106_2233'), ]", "import migrations class Migration(migrations.Migration): dependencies = [ ('vespene', '0008_auto_20181106_2233'), ] operations = [", "= [ ('vespene', '0008_auto_20181106_2233'), ] operations = [ migrations.RemoveField( model_name='workerpool', name='sudo_password', ), ]" ]
[ "finally: hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(any(x > 1 for x in ncomments.values())) def my_run_func(idx):", "= self.explored['csr_mat'][0].tocsr() self.explored['csr_mat'][1] = self.explored['csr_mat'][0].tocsr() traj.f_explore(cartesian_product(self.explored)) def explore_large(self, traj): self.explored ={'Normal.trial': [0,1]} traj.f_explore(cartesian_product(self.explored))", "below test not new, so ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_store_child('group') # group has", "self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB'", "import tables as pt from pypet.tests.testutils.ioutils import run_suite, make_temp_dir, make_trajectory_name,\\ get_root_logger, parse_args, get_log_config,", "+= 1 finally: hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(any(x > 1 for x in ncomments.values()))", "self.assertRaises(pex.DataNotInStorageError): self.traj.parameters.f_load_child('new.test.group', load_data=pypetconstants.LOAD_SKELETON) def test_switch_on_all_comments(self): self.explore(self.traj) self.traj.hdf5.purge_duplicate_comments=0 self.make_run() hdf5file = pt.open_file(self.filename) traj_group =", "irun in range(3): spsparse_dia = spsp.lil_matrix((111,111)) spsparse_dia[3,2+irun] = 44.5*irun matrices_dia.append(spsparse_dia.tocsc().todia()) self.explore_dict={'string':[np.array(['Uno', 'Dos', 'Tres']),", "'.hdf5' head, tail = os.path.split(env.v_traj.v_storage_service.filename) self.assertEqual(tail, the_file_name) class EnvironmentTest(TrajectoryComparator): tags = 'integration', 'hdf5',", "log_config=get_log_config(), dynamic_imports=SlowResult, display_time=0.1) traj = env.v_traj res=traj.f_add_result(SlowResult, 'iii', 42, 43, comment='llk') traj.f_store() service_logger", "[1], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.expanded['csr_mat'][0][1,2]=44.0 self.expanded['csr_mat'][1][2,2]=33 self.expanded['csr_mat'][0]=self.expanded['csr_mat'][0].tocsr() self.expanded['csr_mat'][1]=self.expanded['csr_mat'][1].tocsr() self.traj.f_expand(cartesian_product(self.expanded)) self.traj.f_store()", "# list_that_should_change = [42, 42, 42, 42] # # env.f_run(test_runfunc, list_that_should_change) # #", "matrices_csr = [] for irun in range(3): spsparse_csr = spsp.lil_matrix((111,111)) spsparse_csr[3,2+irun] = 44.5*irun", "name self.assertTrue(not name in overview_group, '%s in overviews but should not!' % name)", "* %s' % (str(traj.crun.z),str(traj.x),str(traj.y))) traj.v_idx=-1 def check_if_z_is_correct(self,traj): traj.v_shortcuts=False for x in range(len(traj)): traj.v_idx=x", "wrap_mode='QUEUE', continue_folder=tmp) with self.assertRaises(ValueError): Environment(use_scoop=True, wrap_mode='QUEUE') with self.assertRaises(ValueError): Environment(automatic_storing=False, continuable=True, continue_folder=tmp) with self.assertRaises(ValueError):", "expand(self): self.expanded ={'Normal.trial': [1], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.expanded['csr_mat'][0][1,2]=44.0 self.expanded['csr_mat'][1][2,2]=33 self.expanded['csr_mat'][0]=self.expanded['csr_mat'][0].tocsr()", "self.assertTrue(self.traj.TestResItem, 42) def test_store_single_group(self): self.traj.f_store() self.traj.f_add_parameter_group('new.test.group').v_annotations.f_set(42) self.traj.f_store_item('new.group') # group is below test not", "self.env.v_trajectory.v_name del self.env self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.expand(self.traj)", "self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct_map(traj, args1, args2, args3) for res in results: self.assertEqual(len(res), 2)", "self.use_scoop=False self.freeze_input=False self.pandas_format='fixed' self.pandas_append=False self.complib = 'zlib' self.complevel=9 self.shuffle=True self.fletcher32 = False self.encoding", "self.compare_trajectories(self.traj,newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 30.0,", "random.seed() self.trajname = make_trajectory_name(self) self.filename = make_temp_dir(os.path.join('experiments', 'tests', 'HDF5', 'test%s.hdf5' % self.trajname)) env", "if param.v_name in self.explore_dict: param.f_unlock() if param.v_explored: param._shrink() self.explore_dict={'Numpy.string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis',", "False self.ncores = 1 self.use_pool=True self.pandas_format='table' self.pandas_append=True self.complib = 'blosc' self.complevel=2 self.shuffle=False self.fletcher32", "> 6MB' % str(size_in_mb)) with self.assertRaises(TypeError): self.explore(self.traj) def test_run_complex(self): self.traj.f_add_parameter('TEST', 'test_run_complex') ###Explore self.explore_complex_params(self.traj)", "= spsp.lil_matrix((111,111)) spsparse_csr[3,2+irun] = 44.5*irun matrices_csr.append(spsparse_csr.tocsr()) matrices_csc = [] for irun in range(3):", "= Environment(trajectory=self.trajname, filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config(), results_per_run=5, wildcard_functions=self.wildcard_functions, derived_parameters_per_run=5, multiproc=self.multiproc, ncores=self.ncores, wrap_mode=self.mode, use_pool=self.use_pool,", "= make_temp_dir('test.hdf5') head, tail = os.path.split(filename) env = Environment(filename=head) the_file_name = env.v_traj.v_name +", ": matrices_csr, 'csc_mat' : matrices_csc, 'bsr_mat' : matrices_bsr, 'dia_mat' : matrices_dia, 'list' :", "1 self.traj.overview.results_summary = 0 self.make_run() self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=1 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=1 self.make_run() hdf5file = pt.open_file(self.filename,", "self.pandas_append=True self.complib = 'blosc' self.complevel=2 self.shuffle=False self.fletcher32 = False self.encoding='latin1' self.graceful_exit = True", "Environment(use_scoop=True, immediate_postproc=True) with self.assertRaises(ValueError): Environment(use_pool=True, immediate_postproc=True) with self.assertRaises(ValueError): Environment(continuable=True, wrap_mode='QUEUE', continue_folder=tmp) with self.assertRaises(ValueError):", "port=self.port, add_time=self.add_time, timeout=self.timeout, graceful_exit=self.graceful_exit) traj = env.v_trajectory traj.v_standard_parameter=Parameter ## Create some parameters self.param_dict={}", "= Environment(trajectory=self.trajname,filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config() if self.log_config else None, multiproc=self.multiproc, wrap_mode=self.mode, ncores=self.ncores, use_pool=self.use_pool,", "from pypet import pypetconstants, Result, manual_run import pypet.pypetexceptions as pex import sys import", "traj.res.runs.crun.z x = traj.par.x y = traj.par.y self.assertTrue(z==x*y,' z != x*y: %s !=", "self.traj) newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset = set((x.v_name for x", "self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(any(x > 1 for x in ncomments.values())) def my_run_func(idx): return 'hello_%d'", "class DeepCopyTest(TrajectoryComparator): # # def test_deep_copy_data(self): # # self.filename = make_temp_dir('experiments/tests/HDF5/testcopy.hdf5') # self.logfolder", "###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) self.assertEqual(len(results), len(self.traj)) traj = self.traj self.assertTrue(len(traj) ==", "recursive is false with self.assertRaises(TypeError): self.traj.parameters.new.f_remove_child('test') self.traj.new.f_remove_child('test', recursive=True) self.assertTrue('new.group' not in self.traj) self.traj.new.f_load_child('test',", ": [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} with self.assertRaises(pex.NotUniqueNodeError): traj.f_explore(self.explore_dict) traj.f_shrink(force=True) par_dict = traj.parameters.f_to_dict() for param_name in par_dict:", "from pypet import Parameter import tables as pt from pypet.tests.testutils.ioutils import run_suite, make_temp_dir,", "int)) man_multiply(self.traj) traj = self.traj traj.f_store() self.assertTrue(len(traj), 5) self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(),", "load_derived_parameters=how, load_results=how) return newtraj def explore(self,traj): self.explore_dict={'x':[-1,1,2,3,4],'y':[1,1,2,2,3]} traj.f_explore(self.explore_dict) def explore_cartesian(self,traj): self.explore_dict=cartesian_product({'x':[-1,1,2,3,4, 5, 6],'y':[1,1,2,2,3,4,4]})", "= 0 self.make_run() hdf5file = pt.open_file(self.filename) overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview') should_not =", "None: env1 = Environment(continuable=True, continue_folder=tmp, log_config=None, filename=self.filename) with self.assertRaises(ValueError): env1.f_run_map(multiply_args, [1], [2], [3])", "spsp import random from pypet import Parameter import tables as pt from pypet.tests.testutils.ioutils", "x in range(len(self.traj))] args2=[100*x for x in range(len(self.traj))] args3=list(range(len(self.traj))) results = self.env.f_run_map(multiply_args, args1,", "get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 2.0, 'Size is %sMB > 6MB'", "wrap_mode='LOCK') def test_run(self): self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton()", "= [] for irun in range(3): spsparse_dia = spsp.lil_matrix((111,111)) spsparse_dia[3,2+irun] = 44.5*irun matrices_dia.append(spsparse_dia.tocsc().todia())", "newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_derived_parameters=how, load_results=how) return newtraj def explore(self,traj): self.explore_dict={'x':[-1,1,2,3,4],'y':[1,1,2,2,3]} traj.f_explore(self.explore_dict) def explore_cartesian(self,traj):", "self.make_run_large_data() self.assertTrue(self.traj.f_is_completed()) # Check if printing and repr work get_root_logger().info(str(self.env)) get_root_logger().info(repr(self.env)) newtraj =", "< 6.0, 'Size is %sMB > 6MB' % str(size_in_mb)) self.compare_trajectories(mp_traj, self.traj) self.multiproc =", "= 1 self.use_pool=True self.log_stdout=False self.freeze_input=False self.use_scoop = False self.log_config = True self.port =", "= Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=self.traj.v_name, index=None, as_new=False, load_data=0) newtraj.v_auto_load = True newtraj.par.f_load_child('y', load_data=1) for", "['derived_parameters_overview', 'results_overview'] for name in should: self.assertTrue(name in overview_group, '%s not in overviews", "# env = Environment(trajectory=self.trajname,filename=self.filename, # file_title=self.trajname, log_folder=self.logfolder, # log_stdout=False, # multiproc=False, # deep_copy_data=True)", "= self.env.f_run(multiply_with_graceful_exit) self.are_results_in_order(results) self.assertFalse(self.traj.f_is_completed()) def test_f_iter_runs(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj", "log_folder=self.logfolder, # log_stdout=False, # multiproc=False, # deep_copy_data=False) # # traj = env.v_trajectory #", "os.path.join(path, 'LOG.txt') with open(mainfilename, mode='r') as mainf: full_text = mainf.read() self.assertTrue('nodes/s)' in full_text)", "numpy as np from pypet.trajectory import Trajectory, load_trajectory from pypet.utils.explore import cartesian_product from", "def test_if_results_are_sorted_correctly(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) self.assertEqual(len(results), len(self.traj)) traj = self.traj", "= pt.open_file(self.filename) overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview') should = ['derived_parameters_overview', 'results_overview'] for name", "self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton()", "str(size_in_mb)) self.assertTrue(size_in_mb < 2.0, 'Size is %sMB > 6MB' % str(size_in_mb)) with self.assertRaises(TypeError):", "self.assertTrue(name in overview_group, '%s not in overviews but it should!' % name) hdf5file.close()", "run self.setUp() self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments = False ###Explore self.explore(self.traj) self.make_run() # newtraj =", "is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 6.0, 'Size is %sMB > 6MB' %", "ncomments: ncomments[comment] = 0 ncomments[comment] += 1 finally: hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(any(x >", "file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config(), results_per_run=5, wildcard_functions=self.wildcard_functions, derived_parameters_per_run=5, multiproc=self.multiproc, ncores=self.ncores, wrap_mode=self.mode, use_pool=self.use_pool, gc_interval=self.gc_interval, freeze_input=self.freeze_input, fletcher32=self.fletcher32,", "= Environment(filename=self.filename, log_config=get_log_config()) traj2 = env2.v_trajectory traj2.f_store() self.assertTrue(os.path.exists(self.filename)) with pt.open_file(self.filename, mode='r') as file:", "make_trajectory_name(self) env = Environment(trajectory=self.trajname,filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config() if self.log_config else None, multiproc=self.multiproc, wrap_mode=self.mode,", "import time import numpy as np from pypet.trajectory import Trajectory, load_trajectory from pypet.utils.explore", "self.multiproc self.multiproc = False ### Make a new single core run self.setUp() self.traj.f_add_parameter('TEST',", "Environment(trajectory=self.trajname,filename=self.filename, # file_title=self.trajname, log_folder=self.logfolder, # log_stdout=False, # multiproc=False, # deep_copy_data=True) # # traj", "self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' %", "= [] for irun in range(3): spsparse_csr = spsp.lil_matrix((111,111)) spsparse_csr[3,2+irun] = 44.5*irun matrices_csr.append(spsparse_csr.tocsr())", "newtraj) def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False): ### Load The Trajectory and check if the values are", "% str(size_in_mb)) self.assertTrue(size_in_mb < 6.0, 'Size is %sMB > 6MB' % str(size_in_mb)) mp_traj", "(idx+1) not in nameset) self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.crun.z==traj.x*traj.y,'", "self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj", "self.assertRaises(ValueError): self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem,(),{},5)) self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem)) self.traj.results.f_remove_child('TestResItem') self.assertTrue('TestResItem' not in self.traj) self.traj.results.f_load_child('TestResItem', load_data=pypetconstants.LOAD_SKELETON) self.traj.f_load_item((pypetconstants.LEAF,self.traj.TestResItem,(),{'load_only':", "env @unittest.skipIf(not hasattr(os, 'nice') and psutil is None, 'Niceness not supported under non", "make_trajectory_name,\\ get_root_logger, parse_args, get_log_config, get_log_path from pypet.tests.testutils.data import create_param_dict, add_params, multiply,\\ simple_calculations, TrajectoryComparator,", "'csc_mat' : matrices_csc, 'bsr_mat' : matrices_bsr, 'dia_mat' : matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} traj.f_explore(self.explore_dict)", "traj.v_idx) # list_that_changes[traj.v_idx] = 1000 # class DeepCopyTest(TrajectoryComparator): # # def test_deep_copy_data(self): #", "% self.trajname)) env = Environment(trajectory=self.trajname, filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config(), results_per_run=5, wildcard_functions=self.wildcard_functions, derived_parameters_per_run=5, multiproc=self.multiproc,", "if dill is not None: env1 = Environment(continuable=True, continue_folder=tmp, log_config=None, filename=self.filename) with self.assertRaises(ValueError):", "= make_temp_dir('full_store.hdf5') with Environment(filename=filename, log_config=get_log_config()) as env: traj = env.v_trajectory traj.par.x = Parameter('x',", "%sMB > 6MB' % str(size_in_mb)) self.compare_trajectories(mp_traj, self.traj) self.multiproc = old_multiproc def test_errors(self): tmp", "= size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 30.0, 'Size is %sMB", "enumerate(list_that_should_change): # self.assertTrue(list_that_should_change[irun] == 1000) if __name__ == '__main__': opt_args = parse_args() run_suite(**opt_args)", "= -13 simple_kwarg= 13.0 results = self.env.f_run(simple_calculations,simple_arg,simple_kwarg=simple_kwarg) self.are_results_in_order(results) def test_a_large_run(self): get_root_logger().info('Testing large run')", "traj_group = hdf5file.get_node(where='/',name= self.traj.v_name) for node in traj_group._f_walk_groups(): if ('/derived_parameters/' in node._v_pathname or", "traj = env.v_trajectory traj.v_standard_parameter=Parameter traj.f_add_parameter('x',99) traj.f_add_parameter('y',99) self.env=env self.traj=traj def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False, how=2): ### Load", "from pypet.tests.testutils.ioutils import run_suite, make_temp_dir, make_trajectory_name,\\ get_root_logger, parse_args, get_log_config, get_log_path from pypet.tests.testutils.data import", "mode='a') ncomments = {} try: traj_group = hdf5file.get_node(where='/',name= self.traj.v_name) for node in traj_group._f_walk_groups():", "def explore(self,traj): self.explore_dict={'x':[-1,1,2,3,4],'y':[1,1,2,2,3]} traj.f_explore(self.explore_dict) def explore_cartesian(self,traj): self.explore_dict=cartesian_product({'x':[-1,1,2,3,4, 5, 6],'y':[1,1,2,2,3,4,4]}) traj.f_explore(self.explore_dict) def expand(self,traj): self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12,13]}", "size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 6.0, 'Size is", "None self.graceful_exit = True def tearDown(self): self.env.f_disable_logging() super(ResultSortTest, self).tearDown() def setUp(self): self.set_mode() self.filename", "self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) for idx, run_name in", "Unix.') def test_niceness(self): ###Explore self.explore(self.traj) self.env.f_run(with_niceness) self.assertTrue(self.traj.f_is_completed()) def test_file_overwriting(self): self.traj.f_store() with pt.open_file(self.filename, mode='r')", "platform import logging import time import numpy as np from pypet.trajectory import Trajectory,", "= os.path.join(path, 'LOG.txt') with open(mainfilename, mode='r') as mainf: full_text = mainf.read() self.assertTrue('nodes/s)' in", "self.traj=traj def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False, how=2): ### Load The Trajectory and check if the values", "name.split('.')[-1] # Get only the name of the table, no the full name", "test_run(self): self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)", "self.use_scoop = False self.log_config = True self.port = None self.graceful_exit = True def", "(str(trajnice), str(osnice))) def add_large_data(traj): np_array = np.random.rand(100, 1000, 10) traj.f_add_result('l4rge', np_array) traj.f_store_item('l4rge') traj.f_remove_item('l4rge')", "env1 = Environment(continuable=True, continue_folder=tmp, log_config=None, filename=self.filename) with self.assertRaises(ValueError): env1.f_run_map(multiply_args, [1], [2], [3]) with", "self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb", "traj_name = self.traj.v_name self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.traj.res.f_remove()", "ncomments[comment] += 1 finally: hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(all(x == 1 for x in", "trajnice = traj.niceness osnice = psutil.Process().nice() if trajnice != osnice: if traj.use_scoop: import", "traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = Trajectory()", "class TestOtherHDF5Settings2(EnvironmentTest): tags = 'integration', 'hdf5', 'environment', 'hdf5_settings' def set_mode(self): EnvironmentTest.set_mode(self) self.mode =", "range(3): spsparse_csc = spsp.lil_matrix((111,111)) spsparse_csc[3,2+irun] = 44.5*irun matrices_csc.append(spsparse_csc.tocsc()) matrices_bsr = [] for irun", "deep_copy_data=False) # # traj = env.v_trajectory # # traj.f_add_parameter('dummy', 1) # traj.f_explore({'dummy':[12, 3,", "= traj.niceness osnice = psutil.Process().nice() if trajnice != osnice: if traj.use_scoop: import scoop", "= 1 self.use_pool=True self.pandas_format='table' self.pandas_append=False self.complib = 'lzo' self.complevel=2 self.shuffle=False self.fletcher32 = True", "# env.f_run(test_runfunc, list_that_should_change) # # traj.v_auto_load=True # # for irun, val in enumerate(list_that_should_change):", "for x in range(len(self.traj))] args2=[100*x for x in range(len(self.traj))] args3=list(range(len(self.traj))) results = self.env.f_run_map(multiply_args,", "in ncomments.values())) def my_run_func(idx): return 'hello_%d' % idx def my_set_func(idx): return 'huhu_%d' %", "= None self.niceness = None self.port = None self.timeout = None self.add_time=True self.graceful_exit", "43) with self.assertRaises(ValueError): self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem,(),{},5)) self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem)) self.traj.results.f_remove_child('TestResItem') self.assertTrue('TestResItem' not in self.traj) self.traj.results.f_load_child('TestResItem',", "Overview TESTS ############################# def test_switch_ON_large_tables(self): self.traj.f_add_parameter('TEST', 'test_switch_ON_LARGE_tables') ###Explore self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview = 1 self.env._traj.config.hdf5.overview.derived_parameters_overview", "= True def tearDown(self): self.env.f_disable_logging() super(ResultSortTest, self).tearDown() def setUp(self): self.set_mode() self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5','sort_tests.hdf5'))", "should_not: name = name.split('.')[-1] # Get only the name of the table, no", "service_logger.setLevel(logging.INFO) root.setLevel(logging.INFO) traj.f_load(load_data=3) service_logger.setLevel(old_level) root.setLevel(old_level) path = get_log_path(traj) mainfilename = os.path.join(path, 'LOG.txt') with", "spsp.lil_matrix((111,111)) spsparse_csc[3,2+irun] = 44.5*irun matrices_csc.append(spsparse_csc.tocsc()) matrices_bsr = [] for irun in range(3): spsparse_bsr", "else: trajnice = traj.niceness osnice = psutil.Process().nice() if trajnice != osnice: if traj.use_scoop:", "traj): self.explored ={'Normal.trial': [0], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.explored['csr_mat'][0][1,2]=44.0 self.explored['csr_mat'][1][2,2]=33 self.explored['csr_mat'][0]", "self.ncores = 1 self.use_pool=True self.log_stdout=False self.freeze_input=False self.use_scoop = False self.log_config = True self.port", "self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.res.runs.crun.z==newtraj.par.x*newtraj.par.y,' z != x*y: %s", "irun in range(3): spsparse_bsr = spsp.lil_matrix((111,111)) spsparse_bsr[3,2+irun] = 44.5*irun matrices_bsr.append(spsparse_bsr.tocsr().tobsr()) matrices_dia = []", "= 1000 # class DeepCopyTest(TrajectoryComparator): # # def test_deep_copy_data(self): # # self.filename =", "= '<NAME>' import os import platform import logging import time import numpy as", "self.assertFalse(os.path.exists(self.filename)) env2.f_disable_logging() env3.f_disable_logging() def test_time_display_of_loading(self): filename = make_temp_dir('sloooow.hdf5') env = Environment(trajectory='traj', add_time=True, filename=filename,", "get_root_logger().info('Testing large run') self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore_large(self.traj) self.make_run_large_data() self.assertTrue(self.traj.f_is_completed()) # Check if printing", "30MB' % str(size_in_mb)) def test_two_runs(self): self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments = False ###Explore self.explore(self.traj) self.make_run()", "in overview_group, '%s in overviews but should not!' % name) hdf5file.close() def test_store_form_tuple(self):", "test_graceful_exit(self): ###Explore self.explore_cartesian(self.traj) results = self.env.f_run(multiply_with_graceful_exit) self.are_results_in_order(results) self.assertFalse(self.traj.f_is_completed()) def test_f_iter_runs(self): ###Explore self.explore(self.traj) results", "traj) def with_niceness(traj): if traj.multiproc: if hasattr(os, 'nice'): trajnice = traj.niceness osnice =", "if traj.multiproc: if hasattr(os, 'nice'): trajnice = traj.niceness osnice = os.nice(0) else: trajnice", "as np from pypet.trajectory import Trajectory, load_trajectory from pypet.utils.explore import cartesian_product from pypet.environment", "env.f_run(test_runfunc, list_that_should_not_change) # # traj.v_auto_load=True # # for irun, val in enumerate(list_that_should_not_change): #", "0 self.env._traj.config.hdf5.overview.derived_parameters_summary = 0 self.env._traj.config.hdf5.overview.results_summary = 0 self.env._traj.config.hdf5.purge_duplicate_comments = 0 self.env._traj.config.hdf5.overview.parameters_overview = 0", "traj.multiproc: if hasattr(os, 'nice'): trajnice = traj.niceness osnice = os.nice(0) else: trajnice =", "is self.traj) newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset = set((x.v_name for", "parse_args, get_log_config, get_log_path from pypet.tests.testutils.data import create_param_dict, add_params, multiply,\\ simple_calculations, TrajectoryComparator, multiply_args, multiply_with_storing,", "Add some parameter: add_params(traj,self.param_dict) #remember the trajectory and the environment self.traj = traj", "# # self.filename = make_temp_dir('experiments/tests/HDF5/testcopy.hdf5') # self.logfolder = make_temp_dir('experiments/tests/Log') # self.trajname = make_trajectory_name(self)", "= 44.5*irun matrices_csr.append(spsparse_csr.tocsr()) matrices_csc = [] for irun in range(3): spsparse_csc = spsp.lil_matrix((111,111))", "ncomments[comment] = 0 ncomments[comment] += 1 finally: hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(all(x == 1", "self.explore(self.traj) self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename) size_in_mb =", "def test_switch_off_all_tables(self): ###Explore self.traj.f_add_parameter('TEST', 'test_switch_off_ALL_tables') self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview = 0 self.env._traj.config.hdf5.overview.derived_parameters_overview = 0 self.env._traj.config.hdf5.overview.derived_parameters_summary", "% name) hdf5file.close() self.traj.f_load(load_parameters=2, load_derived_parameters=2, load_results=2) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name) self.compare_trajectories(newtraj,self.traj) def test_switch_off_all_tables(self): ###Explore", "newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_parameters=2, load_derived_parameters=2, load_results=2, load_other_data=2) return newtraj def test_expand(self): ###Explore", "self.traj.f_add_parameter('TEST', 'test_expand_after_reload') ###Explore self.explore(self.traj) self.make_run() traj_name = self.traj.v_name self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config())", "multiproc=self.multiproc, wrap_mode=self.mode, ncores=self.ncores, use_pool=self.use_pool, use_scoop=self.use_scoop, port=self.port, freeze_input=self.freeze_input, graceful_exit=self.graceful_exit) traj = env.v_trajectory traj.v_standard_parameter=Parameter traj.f_add_parameter('x',99)", "%s' % (str(newtraj.crun.z),str(newtraj.x),str(newtraj.y))) traj = self.traj self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_", "' '%s != %s' % (str(trajnice), str(osnice))) def add_large_data(traj): np_array = np.random.rand(100, 1000,", "self.traj.f_load_item((pypetconstants.LEAF,self.traj.TestResItem,(),{'load_only': 'TestResItem'})) self.assertTrue(self.traj.TestResItem, 42) def test_store_single_group(self): self.traj.f_store() self.traj.f_add_parameter_group('new.test.group').v_annotations.f_set(42) self.traj.f_store_item('new.group') # group is below", "self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def check_if_z_is_correct_map(self,traj, args1,", "= 'zlib' self.complevel=9 self.shuffle=True self.fletcher32 = False self.encoding = 'utf8' self.log_stdout=False self.wildcard_functions =", "setUp(self): self.set_mode() self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5','sort_tests.hdf5')) self.trajname = make_trajectory_name(self) env = Environment(trajectory=self.trajname,filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout,", "= make_temp_dir(os.path.join('experiments', 'tests', 'Log')) random.seed() self.trajname = make_trajectory_name(self) self.filename = make_temp_dir(os.path.join('experiments', 'tests', 'HDF5',", "except ImportError: dill = None import scipy.sparse as spsp import random from pypet", "self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) self.assertTrue(len(newtraj) == 1) size=os.path.getsize(self.filename)", "range(3): spsparse_csr = spsp.lil_matrix((111,111)) spsparse_csr[3,2+irun] = 44.5*irun matrices_csr.append(spsparse_csr.tocsr()) matrices_csc = [] for irun", "self.env=env self.traj=traj def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False, how=2): ### Load The Trajectory and check if the", "continuable=True, continue_folder=tmp) with self.assertRaises(ValueError): Environment(port='www.nosi.de', wrap_mode='LOCK') def test_run(self): self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore(self.traj) self.make_run()", "deep_copy_data=True) # # traj = env.v_trajectory # # traj.f_add_parameter('dummy', 1) # traj.f_explore({'dummy':[12, 3,", "= self.multiproc self.multiproc = False ### Make a new single core run self.setUp()", "1000, 10) traj.f_add_result('l4rge', np_array) traj.f_store_item('l4rge') traj.f_remove_item('l4rge') array_list = [] for irun in range(111):", "return newtraj def explore(self,traj): self.explore_dict={'x':[-1,1,2,3,4],'y':[1,1,2,2,3]} traj.f_explore(self.explore_dict) def explore_cartesian(self,traj): self.explore_dict=cartesian_product({'x':[-1,1,2,3,4, 5, 6],'y':[1,1,2,2,3,4,4]}) traj.f_explore(self.explore_dict) def", "enumerate(self.traj.f_iter_runs(yields='copy')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is not self.traj) newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx", "self.make_run() hdf5file = pt.open_file(self.filename) overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview') should_not = HDF5StorageService.NAME_TABLE_MAPPING.keys() for", "results_per_run=5, wildcard_functions=self.wildcard_functions, derived_parameters_per_run=5, multiproc=self.multiproc, ncores=self.ncores, wrap_mode=self.mode, use_pool=self.use_pool, gc_interval=self.gc_interval, freeze_input=self.freeze_input, fletcher32=self.fletcher32, complevel=self.complevel, complib=self.complib, shuffle=self.shuffle,", "= Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.expand(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results)", "large run') self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore_large(self.traj) self.make_run_large_data() self.assertTrue(self.traj.f_is_completed()) # Check if printing and", "list_that_should_not_change = [42, 42, 42, 42] # # env.f_run(test_runfunc, list_that_should_not_change) # # traj.v_auto_load=True", "for x in traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d' % (idx+1) not in nameset) self.assertTrue('run_%08d' % idx", "= load_trajectory(index=-1, filename=filename) self.assertTrue('hi' in traj) def with_niceness(traj): if traj.multiproc: if hasattr(os, 'nice'):", "node._v_attrs: self.assertTrue('SRVC_INIT_COMMENT' in node._v_attrs, 'There is no comment in node %s!' % node._v_name)", "%s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj in enumerate(self.traj.f_iter_runs(yields='self')): run_name", "!= x*y: %s != %s * %s' % (str(traj.crun.z),str(traj.x),str(traj.y))) traj.v_idx=-1 def check_if_z_is_correct(self,traj): traj.v_shortcuts=False", "mode='r') as file: nchildren = len(file.root._v_children) self.assertTrue(nchildren > 0) env2 = Environment(filename=self.filename, log_config=get_log_config())", "matrices_bsr.append(spsparse_bsr.tocsr().tobsr()) matrices_dia = [] for irun in range(3): spsparse_dia = spsp.lil_matrix((111,111)) spsparse_dia[3,2+irun] =", "test_runfunc(traj, list_that_changes): # traj.f_add_result('kkk', list_that_changes[traj.v_idx] + traj.v_idx) # list_that_changes[traj.v_idx] = 1000 # class", "# # list_that_should_change = [42, 42, 42, 42] # # env.f_run(test_runfunc, list_that_should_change) #", "self.explore_dict={'x':[-1,1,2,3,4],'y':[1,1,2,2,3]} traj.f_explore(self.explore_dict) def explore_cartesian(self,traj): self.explore_dict=cartesian_product({'x':[-1,1,2,3,4, 5, 6],'y':[1,1,2,2,3,4,4]}) traj.f_explore(self.explore_dict) def expand(self,traj): self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12,13]} with self.assertRaises(ValueError):", "'tests', 'HDF5', 'test%s.hdf5' % self.trajname)) env = Environment(trajectory=self.trajname, filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config(), results_per_run=5,", "self.assertTrue(newtraj.res.runs.crun.z==newtraj.par.x*newtraj.par.y,' z != x*y: %s != %s * %s' % (str(newtraj.crun.z),str(newtraj.x),str(newtraj.y))) traj =", "self.compare_trajectories(self.traj, newtraj) def test_expand_after_reload(self): self.traj.f_add_parameter('TEST', 'test_expand_after_reload') ###Explore self.explore(self.traj) self.make_run() traj_name = self.traj.v_name self.env", "so ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_store_child('group') # group has children and recursive is", "matrices_csr.append(spsparse_csr.tocsr()) matrices_csc = [] for irun in range(3): spsparse_csc = spsp.lil_matrix((111,111)) spsparse_csc[3,2+irun] =", "res[1]) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.assertEqual(len(traj), 5) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly(self): ###Explore", "size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 2.0, 'Size", "comment in node %s!' % node._v_name) hdf5file.close() def test_purge_duplicate_comments(self): self.explore(self.traj) with self.assertRaises(RuntimeError): self.traj.hdf5.purge_duplicate_comments", "5, 6],'y':[1,1,2,2,3,4,4]}) traj.f_explore(self.explore_dict) def expand(self,traj): self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12,13]} with self.assertRaises(ValueError): traj.f_expand(self.expand_dict) self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12]} traj.f_expand(self.expand_dict) def test_if_results_are_sorted_correctly_manual_runs(self):", "self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) self.expand(self.traj) self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj)", "self.expand(self.traj) self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+\\ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj)", "make_temp_dir('experiments/tests/HDF5/testcopy.hdf5') # self.logfolder = make_temp_dir('experiments/tests/Log') # self.trajname = make_trajectory_name(self) # # env =", "import Environment from pypet.storageservice import HDF5StorageService from pypet import pypetconstants, Result, manual_run import", "size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 2.0, 'Size is %sMB >", "self.env._traj.config.hdf5.overview.explored_parameters_overview = 0 self.make_run() hdf5file = pt.open_file(self.filename) overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview') should_not", "full name self.assertTrue(not name in overview_group, '%s in overviews but should not!' %", "= self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_expand_after_reload(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply)", "6.0, 'Size is %sMB > 6MB' % str(size_in_mb)) mp_traj = self.traj old_multiproc =", "[['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} traj.f_explore(self.explore_dict) def explore(self, traj): self.explored ={'Normal.trial': [0], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)),", "def my_set_func(idx): return 'huhu_%d' % idx class TestOtherHDF5Settings(EnvironmentTest): tags = 'integration', 'hdf5', 'environment',", "self.assertRaises(ValueError): Environment(port='www.nosi.de', wrap_mode='LOCK') def test_run(self): self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj =", "test_f_iter_runs(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0]))", "traj.f_store_item('l4rge') traj.f_remove_item('l4rge') array_list = [] for irun in range(111): array_list.append(np.random.rand(10)) traj.f_add_result('m4ny', *array_list) class", "= logging.getLogger('pypet') old_level = root.level service_logger.setLevel(logging.INFO) root.setLevel(logging.INFO) traj.f_load(load_data=3) service_logger.setLevel(old_level) root.setLevel(old_level) path = get_log_path(traj)", "'int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat' : matrices_csc, 'bsr_mat' : matrices_bsr, 'dia_mat' : matrices_dia,", "explore_complex_params(self, traj): matrices_csr = [] for irun in range(3): spsparse_csr = spsp.lil_matrix((111,111)) spsparse_csr[3,2+irun]", "'Nueve', 'Diez'])], 'Normal.int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat' : matrices_csc, 'bsr_mat' : matrices_bsr, 'dia_mat'", "only_empties=True) self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb", "def test_errors(self): tmp = make_temp_dir('cont') if dill is not None: env1 = Environment(continuable=True,", "with self.assertRaises(ValueError): Environment(use_pool=True, immediate_postproc=True) with self.assertRaises(ValueError): Environment(continuable=True, wrap_mode='QUEUE', continue_folder=tmp) with self.assertRaises(ValueError): Environment(use_scoop=True, wrap_mode='QUEUE')", "scoop.IS_ORIGIN): return raise RuntimeError('traj niceness != os niceness; ' '%s != %s' %", "newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_parameters=2, load_derived_parameters=2, load_results=2, load_other_data=2) return newtraj def test_expand(self): ###Explore self.traj.f_add_parameter('TEST',", "my_set_func(idx): return 'huhu_%d' % idx class TestOtherHDF5Settings(EnvironmentTest): tags = 'integration', 'hdf5', 'environment', 'hdf5_settings'", "idx, traj in enumerate(self.traj.f_iter_runs(yields='copy')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is not self.traj) newtraj.v_crun=run_name self.traj.v_idx", "= res[0] self.assertEqual(self.traj.res.runs[idx].z, res[1]) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_graceful_exit(self):", "self.explore(self.traj) self.make_run() traj_name = self.traj.v_name self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory", "self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_graceful_exit(self): ###Explore self.explore_cartesian(self.traj) results = self.env.f_run(multiply_with_graceful_exit) self.are_results_in_order(results)", "0 ncomments[comment] += 1 finally: hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(all(x == 1 for x", "super(EnvironmentTest, self).tearDown() def setUp(self): self.set_mode() self.logfolder = make_temp_dir(os.path.join('experiments', 'tests', 'Log')) random.seed() self.trajname =", "load_parameters=2, load_derived_parameters=2, load_results=2, load_other_data=2) return newtraj def test_expand(self): ###Explore self.traj.f_add_parameter('TEST', 'test_expand') self.explore(self.traj) self.make_run()", "as_new=as_new, load_parameters=2, load_derived_parameters=2, load_results=2, load_other_data=2) return newtraj def test_expand(self): ###Explore self.traj.f_add_parameter('TEST', 'test_expand') self.explore(self.traj)", "class EnvironmentTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' def set_mode(self): self.mode = 'LOCK' self.multiproc", "the name of the table, no the full name self.assertTrue(not name in overview_group,", "traj.f_add_result('l4rge', np_array) traj.f_store_item('l4rge') traj.f_remove_item('l4rge') array_list = [] for irun in range(111): array_list.append(np.random.rand(10)) traj.f_add_result('m4ny',", "= None self.graceful_exit = True def tearDown(self): self.env.f_disable_logging() super(ResultSortTest, self).tearDown() def setUp(self): self.set_mode()", "= make_trajectory_name(self) env = Environment(trajectory=self.trajname,filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config() if self.log_config else None, multiproc=self.multiproc,", "the same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_parameters=2, load_derived_parameters=2, load_results=2, load_other_data=2)", "freeze_input=self.freeze_input, graceful_exit=self.graceful_exit) traj = env.v_trajectory traj.v_standard_parameter=Parameter traj.f_add_parameter('x',99) traj.f_add_parameter('y',99) self.env=env self.traj=traj def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False, how=2):", "self.assertTrue(os.path.exists(self.filename)) with pt.open_file(self.filename, mode='r') as file: nchildren = len(file.root._v_children) self.assertTrue(nchildren > 1) env3", "dill is not None: env1 = Environment(continuable=True, continue_folder=tmp, log_config=None, filename=self.filename) with self.assertRaises(ValueError): env1.f_run_map(multiply_args,", "matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} with self.assertRaises(pex.NotUniqueNodeError): traj.f_explore(self.explore_dict) traj.f_shrink(force=True) par_dict = traj.parameters.f_to_dict() for param_name", "pypet import pypetconstants, Result, manual_run import pypet.pypetexceptions as pex import sys import unittest", "pt.open_file(self.filename, mode='r') as file: nchildren = len(file.root._v_children) self.assertTrue(nchildren > 0) env2 = Environment(filename=self.filename,", "42, 42, 42] # # env.f_run(test_runfunc, list_that_should_change) # # traj.v_auto_load=True # # for", "or scoop.IS_ORIGIN): return raise RuntimeError('traj niceness != os niceness; ' '%s != %s'", "self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.assertEqual(len(traj), 5) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results)", "%s * %s' % (str(newtraj.crun.z),str(newtraj.x),str(newtraj.y))) traj = self.traj self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun is", "manual_run import pypet.pypetexceptions as pex import sys import unittest try: import psutil except", "> 1 for x in ncomments.values())) def my_run_func(idx): return 'hello_%d' % idx def", "finally: hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(all(x == 1 for x in ncomments.values())) def test_NOT_purge_duplicate_comments(self):", "except ImportError: psutil = None try: import dill except ImportError: dill = None", "# # env.f_run(test_runfunc, list_that_should_not_change) # # traj.v_auto_load=True # # for irun, val in", "'utf8' self.log_stdout=False self.wildcard_functions = None self.niceness = None self.port = None self.timeout =", "in range(3): spsparse_bsr = spsp.lil_matrix((111,111)) spsparse_bsr[3,2+irun] = 44.5*irun matrices_bsr.append(spsparse_bsr.tocsr().tobsr()) matrices_dia = [] for", "# self.assertTrue(list_that_should_not_change[irun] == 42) # x=traj.results.runs[irun].kkk # self.assertTrue(x==42+irun) # # def test_not_deep_copy_data(self): #", "for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset", "False self.ncores = 1 self.use_pool=True self.log_stdout=False self.freeze_input=False self.use_scoop = False self.log_config = True", "'nice'): trajnice = traj.niceness osnice = os.nice(0) else: trajnice = traj.niceness osnice =", "43, comment='llk') traj.f_store() service_logger = traj.v_storage_service._logger root = logging.getLogger('pypet') old_level = root.level service_logger.setLevel(logging.INFO)", "name = name.split('.')[-1] # Get only the name of the table, no the", "self.pandas_format='table' self.pandas_append=True self.complib = 'blosc' self.complevel=2 self.shuffle=False self.fletcher32 = False self.encoding='latin1' self.graceful_exit =", "self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct_map(traj, args1, args2, args3) for res in", "0 self.make_run() hdf5file = pt.open_file(self.filename) overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview') should_not = HDF5StorageService.NAME_TABLE_MAPPING.keys()", "def test_time_display_of_loading(self): filename = make_temp_dir('sloooow.hdf5') env = Environment(trajectory='traj', add_time=True, filename=filename, log_stdout=False, log_config=get_log_config(), dynamic_imports=SlowResult,", "[3]) with self.assertRaises(ValueError): Environment(multiproc=True, use_pool=False, freeze_input=True, filename=self.filename, log_config=None) env3 = Environment(log_config=None, filename=self.filename) with", "some parameter: add_params(traj,self.param_dict) #remember the trajectory and the environment self.traj = traj self.env", "'Size is %sMB > 6MB' % str(size_in_mb)) mp_traj = self.traj old_multiproc = self.multiproc", "-1) self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx) def test_f_iter_runs_auto_load(self): ###Explore", "= {('$', 'crun') : my_run_func, ('$set', 'crunset'): my_set_func} class ResultSortTest(TrajectoryComparator): tags = 'integration',", "pex import sys import unittest try: import psutil except ImportError: psutil = None", "idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s", "'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'Normal.int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat'", "def test_f_iter_runs(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) ==", "in traj_group._f_walk_groups(): if ('/derived_parameters/' in node._v_pathname or '/results/' in node._v_pathname): if 'SRVC_LEAF' in", "make_temp_dir(os.path.join('experiments', 'tests', 'Log')) random.seed() self.trajname = make_trajectory_name(self) self.filename = make_temp_dir(os.path.join('experiments', 'tests', 'HDF5', 'test%s.hdf5'", "as spsp import random from pypet import Parameter import tables as pt from", "1 self.use_pool=True self.pandas_format='table' self.pandas_append=True self.complib = 'blosc' self.complevel=2 self.shuffle=False self.fletcher32 = False self.encoding='latin1'", "self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) for res in results: self.assertEqual(len(res), 2) self.assertTrue(isinstance(res[0], int)) self.assertTrue(isinstance(res[1],", "Environment(trajectory='traj', add_time=True, filename=filename, log_stdout=False, log_config=get_log_config(), dynamic_imports=SlowResult, display_time=0.1) traj = env.v_traj res=traj.f_add_result(SlowResult, 'iii', 42,", "self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb))", "% idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.res.runs.crun.z==newtraj.par.x*newtraj.par.y,' z != x*y: %s !=", "[0,1]} traj.f_explore(cartesian_product(self.explored)) def tearDown(self): self.env.f_disable_logging() super(EnvironmentTest, self).tearDown() def setUp(self): self.set_mode() self.logfolder = make_temp_dir(os.path.join('experiments',", "= env.v_traj res=traj.f_add_result(SlowResult, 'iii', 42, 43, comment='llk') traj.f_store() service_logger = traj.v_storage_service._logger root =", "self.expanded['csr_mat'][1]=self.expanded['csr_mat'][1].tocsr() self.traj.f_expand(cartesian_product(self.expanded)) self.traj.f_store() ################## Overview TESTS ############################# def test_switch_ON_large_tables(self): self.traj.f_add_parameter('TEST', 'test_switch_ON_LARGE_tables') ###Explore self.explore(self.traj)", "try: import psutil except ImportError: psutil = None try: import dill except ImportError:", "= self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)", "42) # x=traj.results.runs[irun].kkk # self.assertTrue(x==42+irun) # # def test_not_deep_copy_data(self): # self.filename = make_temp_dir('experiments/tests/HDF5/testcoyp2.hdf5')", "self.traj.results.f_load_child('TestResItem', load_data=pypetconstants.LOAD_SKELETON) self.traj.f_load_item((pypetconstants.LEAF,self.traj.TestResItem,(),{'load_only': 'TestResItem'})) self.assertTrue(self.traj.TestResItem, 42) def test_store_single_group(self): self.traj.f_store() self.traj.f_add_parameter_group('new.test.group').v_annotations.f_set(42) self.traj.f_store_item('new.group') # group", "multiproc=False, # deep_copy_data=True) # # traj = env.v_trajectory # # traj.f_add_parameter('dummy', 1) #", "self.traj.res.f_remove() self.traj.dpar.f_remove() self.expand() get_root_logger().info('\\n $$$$$$$$$$$$ Second Run $$$$$$$$$$ \\n') self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)", "HDF5StorageService.NAME_TABLE_MAPPING.keys() for name in should_not: name = name.split('.')[-1] # Get only the name", "self.traj.f_store() self.traj.f_add_result('TestResItem', 42, 43) with self.assertRaises(ValueError): self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem,(),{},5)) self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem)) self.traj.results.f_remove_child('TestResItem') self.assertTrue('TestResItem' not", "Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_derived_parameters=how, load_results=how) return newtraj def explore(self,traj): self.explore_dict={'x':[-1,1,2,3,4],'y':[1,1,2,2,3]} traj.f_explore(self.explore_dict)", "param.v_explored: param._shrink() self.explore_dict={'Numpy.string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'Normal.int':[1,2,3], 'csr_mat'", "self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx) def test_expand(self): ###Explore self.explore(self.traj)", "self.explore(self.traj) self.make_run() # newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename) size_in_mb", "self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem)) self.traj.results.f_remove_child('TestResItem') self.assertTrue('TestResItem' not in self.traj) self.traj.results.f_load_child('TestResItem', load_data=pypetconstants.LOAD_SKELETON) self.traj.f_load_item((pypetconstants.LEAF,self.traj.TestResItem,(),{'load_only': 'TestResItem'})) self.assertTrue(self.traj.TestResItem, 42)", "self.expand(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton()", "self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000.", "simple_calculations, TrajectoryComparator, multiply_args, multiply_with_storing, \\ multiply_with_graceful_exit def add_one_particular_item(traj, store_full): traj.hi = Result('hi', 42,", "timeout=self.timeout, graceful_exit=self.graceful_exit) traj = env.v_trajectory traj.v_standard_parameter=Parameter ## Create some parameters self.param_dict={} create_param_dict(self.param_dict) ###", "load_data=2, filename=self.filename) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB'", "(str(z),str(x),str(y))) traj.v_idx=-1 traj.v_shortcuts=True # def test_runfunc(traj, list_that_changes): # traj.f_add_result('kkk', list_that_changes[traj.v_idx] + traj.v_idx) #", "= 0 ncomments[comment] += 1 finally: hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(all(x == 1 for", "6MB' % str(size_in_mb)) def test_just_one_run(self): self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)", "traj.v_standard_parameter=Parameter traj.f_add_parameter('x',99) traj.f_add_parameter('y',99) self.env=env self.traj=traj def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False, how=2): ### Load The Trajectory and", "= self.explored['csr_mat'][0].tocsr() traj.f_explore(cartesian_product(self.explored)) def explore_large(self, traj): self.explored ={'Normal.trial': [0,1]} traj.f_explore(cartesian_product(self.explored)) def tearDown(self): self.env.f_disable_logging()", "traj.v_auto_load=True # # for irun, val in enumerate(list_that_should_change): # self.assertTrue(list_that_should_change[irun] == 1000) if", "= self.traj old_multiproc = self.multiproc self.multiproc = False ### Make a new single", "not None: env1 = Environment(continuable=True, continue_folder=tmp, log_config=None, filename=self.filename) with self.assertRaises(ValueError): env1.f_run_map(multiply_args, [1], [2],", "traj.v_standard_parameter=Parameter ## Create some parameters self.param_dict={} create_param_dict(self.param_dict) ### Add some parameter: add_params(traj,self.param_dict) #remember", "for node in traj_group._f_walk_groups(): if ('/derived_parameters/' in node._v_pathname or '/results/' in node._v_pathname): if", "self.traj.parameters.new.f_store_child('group') # group has children and recursive is false with self.assertRaises(TypeError): self.traj.parameters.new.f_remove_child('test') self.traj.new.f_remove_child('test',", "= 'utf8' self.log_stdout=False self.wildcard_functions = None self.niceness = None self.port = None self.timeout", "make_temp_dir(os.path.join('experiments', 'tests', 'HDF5', 'test%s.hdf5' % self.trajname)) env = Environment(trajectory=self.trajname, filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config(),", "###Explore self.explore(self.traj) self.env.f_run(with_niceness) self.assertTrue(self.traj.f_is_completed()) def test_file_overwriting(self): self.traj.f_store() with pt.open_file(self.filename, mode='r') as file: nchildren", "None import scipy.sparse as spsp import random from pypet import Parameter import tables", "with pt.open_file(self.filename, mode='r') as file: nchildren = len(file.root._v_children) self.assertTrue(nchildren > 1) env3 =", "not in self.traj) self.traj.results.f_load_child('TestResItem', load_data=pypetconstants.LOAD_SKELETON) self.traj.f_load_item((pypetconstants.LEAF,self.traj.TestResItem,(),{'load_only': 'TestResItem'})) self.assertTrue(self.traj.TestResItem, 42) def test_store_single_group(self): self.traj.f_store() self.traj.f_add_parameter_group('new.test.group').v_annotations.f_set(42)", "= self.env.f_run(multiply) self.are_results_in_order(results) self.assertEqual(len(results), len(self.traj)) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(),", "psutil is None, 'Niceness not supported under non Unix.') def test_niceness(self): ###Explore self.explore(self.traj)", "is %sMB > 6MB' % str(size_in_mb)) with self.assertRaises(TypeError): self.explore(self.traj) def test_run_complex(self): self.traj.f_add_parameter('TEST', 'test_run_complex')", "np from pypet.trajectory import Trajectory, load_trajectory from pypet.utils.explore import cartesian_product from pypet.environment import", "x*y: %s != %s * %s' % (str(traj.crun.z),str(traj.x),str(traj.y))) traj.v_idx=-1 def check_if_z_is_correct(self,traj): traj.v_shortcuts=False for", "len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) for res in results: self.assertEqual(len(res), 2) self.assertTrue(isinstance(res[0], int))", "import HDF5StorageService from pypet import pypetconstants, Result, manual_run import pypet.pypetexceptions as pex import", "os.path.split(filename) env = Environment(filename=head) the_file_name = env.v_traj.v_name + '.hdf5' head, tail = os.path.split(env.v_traj.v_storage_service.filename)", "import pypet.pypetexceptions as pex import sys import unittest try: import psutil except ImportError:", "def test_expand_after_reload(self): self.traj.f_add_parameter('TEST', 'test_expand_after_reload') ###Explore self.explore(self.traj) self.make_run() traj_name = self.traj.v_name self.env = Environment(trajectory=self.traj,", "self.gc_interval = None self.ncores = 1 self.use_pool=True self.use_scoop=False self.freeze_input=False self.pandas_format='fixed' self.pandas_append=False self.complib =", "use_pool=False, freeze_input=True, filename=self.filename, log_config=None) env3 = Environment(log_config=None, filename=self.filename) with self.assertRaises(ValueError): env3.f_run_map(multiply_args) with self.assertRaises(ValueError):", "== run_name) self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y)))", "test_errors(self): tmp = make_temp_dir('cont') if dill is not None: env1 = Environment(continuable=True, continue_folder=tmp,", "traj.niceness osnice = os.nice(0) else: trajnice = traj.niceness osnice = psutil.Process().nice() if trajnice", "= None import scipy.sparse as spsp import random from pypet import Parameter import", "for x, arg1, arg2, arg3 in zip(range(len(traj)), args1, args2, args3): traj.v_idx=x self.assertTrue(traj.crun.z==traj.x*traj.y+arg1+arg2+arg3,' z", "42] # # env.f_run(test_runfunc, list_that_should_not_change) # # traj.v_auto_load=True # # for irun, val", "def tearDown(self): self.env.f_disable_logging() super(EnvironmentTest, self).tearDown() def setUp(self): self.set_mode() self.logfolder = make_temp_dir(os.path.join('experiments', 'tests', 'Log'))", "('/derived_parameters/' in node._v_pathname or '/results/' in node._v_pathname): if 'SRVC_LEAF' in node._v_attrs: if 'SRVC_INIT_COMMENT'", "self.traj.results.f_remove_child('TestResItem') self.assertTrue('TestResItem' not in self.traj) self.traj.results.f_load_child('TestResItem', load_data=pypetconstants.LOAD_SKELETON) self.traj.f_load_item((pypetconstants.LEAF,self.traj.TestResItem,(),{'load_only': 'TestResItem'})) self.assertTrue(self.traj.TestResItem, 42) def test_store_single_group(self):", "tags = 'integration', 'hdf5', 'environment', 'quick' def test_make_default_file_when_giving_directory_without_slash(self): filename = make_temp_dir('test.hdf5') head, tail", "self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.traj.res.f_remove() self.traj.dpar.f_remove() self.expand() get_root_logger().info('\\n", "% str(size_in_mb)) self.assertTrue(size_in_mb < 30.0, 'Size is %sMB > 30MB' % str(size_in_mb)) def", "is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 30.0, 'Size is %sMB > 30MB' %", "matrices_dia = [] for irun in range(3): spsparse_dia = spsp.lil_matrix((111,111)) spsparse_dia[3,2+irun] = 44.5*irun", "self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) for res in results: self.assertEqual(len(res), 2) self.assertTrue(isinstance(res[0], int)) self.assertTrue(isinstance(res[1], int))", "log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.expand(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj", "% (idx+1) not in nameset) self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun == run_name)", "self.fletcher32 = True self.encoding='latin1' self.wildcard_functions = {('$', 'crun') : my_run_func, ('$set', 'crunset'): my_set_func}", "Trajectory and check if the values are still the same newtraj = Trajectory()", "wrap_mode=self.mode, use_pool=self.use_pool, gc_interval=self.gc_interval, freeze_input=self.freeze_input, fletcher32=self.fletcher32, complevel=self.complevel, complib=self.complib, shuffle=self.shuffle, pandas_append=self.pandas_append, pandas_format=self.pandas_format, encoding=self.encoding, niceness=self.niceness, use_scoop=self.use_scoop,", "Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.expand(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj", "%sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 30.0, 'Size is %sMB > 30MB' % str(size_in_mb))", "newtraj.par.f_load_child('y', load_data=1) for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx =", "self.traj.f_store() self.traj.f_add_parameter_group('new.test.group').v_annotations.f_set(42) self.traj.f_store_item('new.group') # group is below test not new, so ValueError thrown:", "% idx def my_set_func(idx): return 'huhu_%d' % idx class TestOtherHDF5Settings(EnvironmentTest): tags = 'integration',", "man_multiply(self.traj) traj = self.traj traj.f_store() self.assertTrue(len(traj), 5) self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)", "len(list(self.expand_dict.values())[0])+ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj)", "self.encoding = 'utf8' self.log_stdout=False self.wildcard_functions = None self.niceness = None self.port = None", "args2, args3): traj.v_idx=x self.assertTrue(traj.crun.z==traj.x*traj.y+arg1+arg2+arg3,' z != x*y: %s != %s * %s' %", "root.setLevel(logging.INFO) traj.f_load(load_data=3) service_logger.setLevel(old_level) root.setLevel(old_level) path = get_log_path(traj) mainfilename = os.path.join(path, 'LOG.txt') with open(mainfilename,", "traj.v_auto_load=True # # for irun, val in enumerate(list_that_should_not_change): # self.assertTrue(list_that_should_not_change[irun] == 42) #", "tags def test_full_store(self): filename = make_temp_dir('full_store.hdf5') with Environment(filename=filename, log_config=get_log_config()) as env: traj =", "traj.v_idx=-1 def check_if_z_is_correct(self,traj): traj.v_shortcuts=False for x in range(len(traj)): traj.v_idx=x z = traj.res.runs.crun.z x", "'hdf5_settings' def set_mode(self): EnvironmentTest.set_mode(self) self.mode = 'LOCK' self.multiproc = False self.ncores = 1", "mainfilename = os.path.join(path, 'LOG.txt') with open(mainfilename, mode='r') as mainf: full_text = mainf.read() self.assertTrue('nodes/s)'", "root.level service_logger.setLevel(logging.INFO) root.setLevel(logging.INFO) traj.f_load(load_data=3) service_logger.setLevel(old_level) root.setLevel(old_level) path = get_log_path(traj) mainfilename = os.path.join(path, 'LOG.txt')", "self.explored ={'Normal.trial': [0,1]} traj.f_explore(cartesian_product(self.explored)) def tearDown(self): self.env.f_disable_logging() super(EnvironmentTest, self).tearDown() def setUp(self): self.set_mode() self.logfolder", "def add_one_particular_item(traj, store_full): traj.hi = Result('hi', 42, 'hi!') traj.f_store() traj.f_remove_child('hi') class SlowResult(Result): def", "'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.explored['csr_mat'][0][1,2]=44.0 self.explored['csr_mat'][1][2,2]=33 self.explored['csr_mat'][0] = self.explored['csr_mat'][0].tocsr() self.explored['csr_mat'][1] = self.explored['csr_mat'][0].tocsr() traj.f_explore(cartesian_product(self.explored)) def", "= HDF5StorageService.NAME_TABLE_MAPPING.keys() for name in should_not: name = name.split('.')[-1] # Get only the", "str(size_in_mb)) self.assertTrue(size_in_mb < 6.0, 'Size is %sMB > 6MB' % str(size_in_mb)) mp_traj =", "Result, manual_run import pypet.pypetexceptions as pex import sys import unittest try: import psutil", "filename=self.filename) with self.assertRaises(ValueError): env1.f_run_map(multiply_args, [1], [2], [3]) with self.assertRaises(ValueError): Environment(multiproc=True, use_pool=False, freeze_input=True, filename=self.filename,", "self.multiproc = False ### Make a new single core run self.setUp() self.traj.f_add_parameter('TEST', 'test_run')", "sys import unittest try: import psutil except ImportError: psutil = None try: import", "traj = load_trajectory(index=-1, filename=filename) self.assertTrue('hi' in traj) def with_niceness(traj): if traj.multiproc: if hasattr(os,", "= 0 self.env._traj.config.hdf5.overview.parameters_overview = 0 self.env._traj.config.hdf5.overview.config_overview = 0 self.env._traj.config.hdf5.overview.explored_parameters_overview = 0 self.make_run() hdf5file", "np_array) traj.f_store_item('l4rge') traj.f_remove_item('l4rge') array_list = [] for irun in range(111): array_list.append(np.random.rand(10)) traj.f_add_result('m4ny', *array_list)", "'zlib' self.complevel=9 self.shuffle=True self.fletcher32 = False self.encoding = 'utf8' self.log_stdout=False self.wildcard_functions = None", "self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) traj_name = self.env.v_trajectory.v_name del self.env self.env = Environment(trajectory=self.traj, log_stdout=False,", "self.explored ={'Normal.trial': [0], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.explored['csr_mat'][0][1,2]=44.0 self.explored['csr_mat'][1][2,2]=33 self.explored['csr_mat'][0] =", "args2, args3): for x, arg1, arg2, arg3 in zip(range(len(traj)), args1, args2, args3): traj.v_idx=x", "z != x*y: %s != %s * %s' % (str(traj.crun.z),str(traj.x),str(traj.y))) traj.v_idx=-1 def check_if_z_is_correct(self,traj):", "self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def check_if_z_is_correct_map(self,traj, args1, args2, args3): for x, arg1, arg2, arg3", "only_empties=True) self.compare_trajectories(self.traj, newtraj) def expand(self): self.expanded ={'Normal.trial': [1], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)),", "self.traj.f_add_result('TestResItem', 42, 43) with self.assertRaises(ValueError): self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem,(),{},5)) self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem)) self.traj.results.f_remove_child('TestResItem') self.assertTrue('TestResItem' not in", "def test_f_iter_runs_auto_load(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) ==", "= None self.ncores = 1 self.use_pool=True self.use_scoop=False self.freeze_input=False self.pandas_format='fixed' self.pandas_append=False self.complib = 'zlib'", "self.explored['csr_mat'][0][1,2]=44.0 self.explored['csr_mat'][1][2,2]=33 self.explored['csr_mat'][0] = self.explored['csr_mat'][0].tocsr() self.explored['csr_mat'][1] = self.explored['csr_mat'][0].tocsr() traj.f_explore(cartesian_product(self.explored)) def explore_large(self, traj): self.explored", "log_config=None, filename=self.filename) with self.assertRaises(ValueError): env1.f_run_map(multiply_args, [1], [2], [3]) with self.assertRaises(ValueError): Environment(multiproc=True, use_pool=False, freeze_input=True,", "self.assertEqual(len(results), len(self.traj)) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct_map(traj, args1,", "= pt.open_file(self.filename) traj_group = hdf5file.get_node(where='/', name= self.traj.v_name) for node in traj_group._f_walk_groups(): if 'SRVC_LEAF'", "test_NOT_purge_duplicate_comments(self): self.explore(self.traj) self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=0 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=0 self.make_run() hdf5file = pt.open_file(self.filename, mode='a') ncomments =", "None self.ncores = 1 self.use_pool=True self.use_scoop=False self.freeze_input=False self.pandas_format='fixed' self.pandas_append=False self.complib = 'zlib' self.complevel=9", "thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_remove_child('group') # group is below test not new, so ValueError", "'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'Normal.int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat' : matrices_csc, 'bsr_mat' :", "file_title=self.trajname, log_folder=self.logfolder, # log_stdout=False, # multiproc=False, # deep_copy_data=False) # # traj = env.v_trajectory", "= make_temp_dir('cont') if dill is not None: env1 = Environment(continuable=True, continue_folder=tmp, log_config=None, filename=self.filename)", "Second Run $$$$$$$$$$ \\n') self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj)", "self.env._traj.config.hdf5.overview.derived_parameters_summary = 0 self.env._traj.config.hdf5.overview.results_summary = 0 self.env._traj.config.hdf5.purge_duplicate_comments = 0 self.env._traj.config.hdf5.overview.parameters_overview = 0 self.env._traj.config.hdf5.overview.config_overview", "= traj.niceness osnice = os.nice(0) else: trajnice = traj.niceness osnice = psutil.Process().nice() if", "$$$$$$$$$$$$ Second Run $$$$$$$$$$ \\n') self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,", "self.expanded['csr_mat'][1][2,2]=33 self.expanded['csr_mat'][0]=self.expanded['csr_mat'][0].tocsr() self.expanded['csr_mat'][1]=self.expanded['csr_mat'][1].tocsr() self.traj.f_expand(cartesian_product(self.expanded)) self.traj.f_store() ################## Overview TESTS ############################# def test_switch_ON_large_tables(self): self.traj.f_add_parameter('TEST', 'test_switch_ON_LARGE_tables')", "is false with self.assertRaises(TypeError): self.traj.parameters.new.f_remove_child('test') self.traj.new.f_remove_child('test', recursive=True) self.assertTrue('new.group' not in self.traj) self.traj.new.f_load_child('test', recursive=True,", "# for irun, val in enumerate(list_that_should_change): # self.assertTrue(list_that_should_change[irun] == 1000) if __name__ ==", "def check_if_z_is_correct_map(self,traj, args1, args2, args3): for x, arg1, arg2, arg3 in zip(range(len(traj)), args1,", "self.mode = 'LOCK' self.multiproc = False self.gc_interval = None self.ncores = 1 self.use_pool=True", "super(ResultSortTest, self).tearDown() def setUp(self): self.set_mode() self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5','sort_tests.hdf5')) self.trajname = make_trajectory_name(self) env =", "# # traj.v_auto_load=True # # for irun, val in enumerate(list_that_should_not_change): # self.assertTrue(list_that_should_not_change[irun] ==", "###Explore self.explore(self.traj) self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename) size_in_mb", "self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5','sort_tests.hdf5')) self.trajname = make_trajectory_name(self) env = Environment(trajectory=self.trajname,filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config() if", "self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def check_if_z_is_correct_map(self,traj, args1, args2, args3):", "service_logger.setLevel(old_level) root.setLevel(old_level) path = get_log_path(traj) mainfilename = os.path.join(path, 'LOG.txt') with open(mainfilename, mode='r') as", "newtraj = self.load_trajectory(trajectory_name=self.traj.v_name) self.compare_trajectories(newtraj,self.traj) def test_switch_off_all_tables(self): ###Explore self.traj.f_add_parameter('TEST', 'test_switch_off_ALL_tables') self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview = 0", "%s!' % node._v_name) hdf5file.close() def test_purge_duplicate_comments(self): self.explore(self.traj) with self.assertRaises(RuntimeError): self.traj.hdf5.purge_duplicate_comments = 1 self.traj.overview.results_summary", "as env: traj = env.v_trajectory traj.par.x = Parameter('x', 3, 'jj') traj.f_explore({'x': [1,2,3]}) env.f_run(add_one_particular_item,", "int)) idx = res[0] self.assertEqual(self.traj.res.runs[idx].z, res[1]) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj)", "traj): matrices_csr = [] for irun in range(3): spsparse_csr = spsp.lil_matrix((111,111)) spsparse_csr[3,2+irun] =", "group has children and recursive is false with self.assertRaises(TypeError): self.traj.parameters.new.f_remove_child('test') self.traj.new.f_remove_child('test', recursive=True) self.assertTrue('new.group'", "Load The Trajectory and check if the values are still the same newtraj", "self.multiproc = False self.gc_interval = None self.ncores = 1 self.use_pool=True self.use_scoop=False self.freeze_input=False self.pandas_format='fixed'", "traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d' % (idx+1) not in nameset) self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun", "= traj.v_storage_service._logger root = logging.getLogger('pypet') old_level = root.level service_logger.setLevel(logging.INFO) root.setLevel(logging.INFO) traj.f_load(load_data=3) service_logger.setLevel(old_level) root.setLevel(old_level)", "the values are still the same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new,", "self.log_config else None, multiproc=self.multiproc, wrap_mode=self.mode, ncores=self.ncores, use_pool=self.use_pool, use_scoop=self.use_scoop, port=self.port, freeze_input=self.freeze_input, graceful_exit=self.graceful_exit) traj =", "1 finally: hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(any(x > 1 for x in ncomments.values())) def", "self.compare_trajectories(self.traj, newtraj) def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False): ### Load The Trajectory and check if the values", "def test_runfunc(traj, list_that_changes): # traj.f_add_result('kkk', list_that_changes[traj.v_idx] + traj.v_idx) # list_that_changes[traj.v_idx] = 1000 #", "True def tearDown(self): self.env.f_disable_logging() super(ResultSortTest, self).tearDown() def setUp(self): self.set_mode() self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5','sort_tests.hdf5')) self.trajname", "TESTS ############################# def test_switch_ON_large_tables(self): self.traj.f_add_parameter('TEST', 'test_switch_ON_LARGE_tables') ###Explore self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview = 1 self.env._traj.config.hdf5.overview.derived_parameters_overview =", "* %s' % (str(z),str(x),str(y))) traj.v_idx=-1 traj.v_shortcuts=True # def test_runfunc(traj, list_that_changes): # traj.f_add_result('kkk', list_that_changes[traj.v_idx]", "###Explore self.explore(self.traj) args1=[10*x for x in range(len(self.traj))] args2=[100*x for x in range(len(self.traj))] args3=list(range(len(self.traj)))", "def test_NOT_purge_duplicate_comments(self): self.explore(self.traj) self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=0 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=0 self.make_run() hdf5file = pt.open_file(self.filename, mode='a') ncomments", "self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) self.expand(self.traj) self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+\\ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton()", "= [] for irun in range(3): spsparse_bsr = spsp.lil_matrix((111,111)) spsparse_bsr[3,2+irun] = 44.5*irun matrices_bsr.append(spsparse_bsr.tocsr().tobsr())", "= 1 self.use_pool=True self.use_scoop=False self.freeze_input=False self.pandas_format='fixed' self.pandas_append=False self.complib = 'zlib' self.complevel=9 self.shuffle=True self.fletcher32", "env = Environment(trajectory='traj', add_time=True, filename=filename, log_stdout=False, log_config=get_log_config(), dynamic_imports=SlowResult, display_time=0.1) traj = env.v_traj res=traj.f_add_result(SlowResult,", "Environment(log_config=None, filename=self.filename) with self.assertRaises(ValueError): env3.f_run_map(multiply_args) with self.assertRaises(ValueError): Environment(use_scoop=True, immediate_postproc=True) with self.assertRaises(ValueError): Environment(use_pool=True, immediate_postproc=True)", "in nameset) self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.res.runs.crun.z==newtraj.par.x*newtraj.par.y,' z !=", "make_temp_dir(os.path.join('experiments','tests','HDF5','sort_tests.hdf5')) self.trajname = make_trajectory_name(self) env = Environment(trajectory=self.trajname,filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config() if self.log_config else", "'Log')) random.seed() self.trajname = make_trajectory_name(self) self.filename = make_temp_dir(os.path.join('experiments', 'tests', 'HDF5', 'test%s.hdf5' % self.trajname))", "self.traj.f_add_parameter_group('new.test.group').v_annotations.f_set(42) self.traj.f_store_item('new.group') # group is below test not new, so ValueError thrown: with", "'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.expanded['csr_mat'][0][1,2]=44.0 self.expanded['csr_mat'][1][2,2]=33 self.expanded['csr_mat'][0]=self.expanded['csr_mat'][0].tocsr() self.expanded['csr_mat'][1]=self.expanded['csr_mat'][1].tocsr() self.traj.f_expand(cartesian_product(self.expanded)) self.traj.f_store() ################## Overview TESTS #############################", "'test_run') ###Explore self.explore(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj)", "for x in range(len(self.traj))] args3=list(range(len(self.traj))) results = self.env.f_run_map(multiply_args, args1, arg2=args2, arg3=args3) self.assertEqual(len(results), len(self.traj))", "self.traj traj.f_store() self.assertTrue(len(traj), 5) self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj =", "make_trajectory_name(self) self.filename = make_temp_dir(os.path.join('experiments', 'tests', 'HDF5', 'test%s.hdf5' % self.trajname)) env = Environment(trajectory=self.trajname, filename=self.filename,", "_load(self, load_dict): time.sleep(3) super(SlowResult, self)._load(load_dict) class FullStorageTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' #", "ncomments[comment] += 1 finally: hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(any(x > 1 for x in", "range(len(self.traj))] args3=list(range(len(self.traj))) results = self.env.f_run_map(multiply_args, args1, arg2=args2, arg3=args3) self.assertEqual(len(results), len(self.traj)) traj = self.traj", "matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} traj.f_explore(self.explore_dict) def explore(self, traj): self.explored ={'Normal.trial': [0], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]),", "self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly_using_map(self): ###Explore self.explore(self.traj) args1=[10*x for x in range(len(self.traj))] args2=[100*x for x", "self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb))", "% (str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj in enumerate(self.traj.f_iter_runs(yields='self')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is self.traj)", "newtraj def explore(self,traj): self.explore_dict={'x':[-1,1,2,3,4],'y':[1,1,2,2,3]} traj.f_explore(self.explore_dict) def explore_cartesian(self,traj): self.explore_dict=cartesian_product({'x':[-1,1,2,3,4, 5, 6],'y':[1,1,2,2,3,4,4]}) traj.f_explore(self.explore_dict) def expand(self,traj):", "and repr work get_root_logger().info(str(self.env)) get_root_logger().info(repr(self.env)) newtraj = Trajectory() newtraj.f_load(name=self.traj.v_name, as_new=False, load_data=2, filename=self.filename) self.traj.f_load_skeleton()", "== -1) self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx) def test_expand(self):", "None try: import dill except ImportError: dill = None import scipy.sparse as spsp", "self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) get_root_logger().info(results) traj = self.traj self.assertEqual(len(traj), len(list(list(self.explore_dict.values())[0]))) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(),", "The Trajectory and check if the values are still the same newtraj =", "ncomments: ncomments[comment] = 0 ncomments[comment] += 1 finally: hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(all(x ==", "x in ncomments.values())) def test_NOT_purge_duplicate_comments(self): self.explore(self.traj) self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=0 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=0 self.make_run() hdf5file =", "1000 # class DeepCopyTest(TrajectoryComparator): # # def test_deep_copy_data(self): # # self.filename = make_temp_dir('experiments/tests/HDF5/testcopy.hdf5')", "== 1) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb <", "nameset) self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.res.runs.crun.z==newtraj.par.x*newtraj.par.y,' z != x*y:", "42, 43, comment='llk') traj.f_store() service_logger = traj.v_storage_service._logger root = logging.getLogger('pypet') old_level = root.level", "pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx) def test_f_iter_runs_auto_load(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj", "self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(),", "self.traj) self.traj.new.f_load_child('test', recursive=True, load_data=pypetconstants.LOAD_SKELETON) self.assertTrue(self.traj.new.group.v_annotations.annotation, 42) self.traj.f_delete_item('new.test.group') with self.assertRaises(pex.DataNotInStorageError): self.traj.parameters.f_load_child('new.test.group', load_data=pypetconstants.LOAD_SKELETON) def test_switch_on_all_comments(self):", "self.check_if_z_is_correct(traj) for res in results: self.assertEqual(len(res), 2) self.assertTrue(isinstance(res[0], int)) self.assertTrue(isinstance(res[1], int)) idx =", "mainf.read() self.assertTrue('nodes/s)' in full_text) env.f_disable_logging() def make_run_large_data(self): self.env.f_run(add_large_data) def make_run(self): ### Make a", "arg3 in zip(range(len(traj)), args1, args2, args3): traj.v_idx=x self.assertTrue(traj.crun.z==traj.x*traj.y+arg1+arg2+arg3,' z != x*y: %s !=", "get_root_logger().info(repr(self.env)) newtraj = Trajectory() newtraj.f_load(name=self.traj.v_name, as_new=False, load_data=2, filename=self.filename) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) size=os.path.getsize(self.filename)", "np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat' : matrices_csc,", "log_stdout=False, log_config=get_log_config(), dynamic_imports=SlowResult, display_time=0.1) traj = env.v_traj res=traj.f_add_result(SlowResult, 'iii', 42, 43, comment='llk') traj.f_store()", "newtraj) def expand(self): self.expanded ={'Normal.trial': [1], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.expanded['csr_mat'][0][1,2]=44.0", "import Trajectory, load_trajectory from pypet.utils.explore import cartesian_product from pypet.environment import Environment from pypet.storageservice", "2.0, 'Size is %sMB > 6MB' % str(size_in_mb)) with self.assertRaises(TypeError): self.explore(self.traj) def test_run_complex(self):", "False self.encoding='latin1' self.graceful_exit = True class TestOtherHDF5Settings2(EnvironmentTest): tags = 'integration', 'hdf5', 'environment', 'hdf5_settings'", "list_that_should_change) # # traj.v_auto_load=True # # for irun, val in enumerate(list_that_should_change): # self.assertTrue(list_that_should_change[irun]", "in node %s!' % node._v_name) hdf5file.close() def test_purge_duplicate_comments(self): self.explore(self.traj) with self.assertRaises(RuntimeError): self.traj.hdf5.purge_duplicate_comments =", "dynamic_imports=SlowResult, display_time=0.1) traj = env.v_traj res=traj.f_add_result(SlowResult, 'iii', 42, 43, comment='llk') traj.f_store() service_logger =", "'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'Normal.int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat' : matrices_csc, 'bsr_mat'", "self.set_mode() self.logfolder = make_temp_dir(os.path.join('experiments', 'tests', 'Log')) random.seed() self.trajname = make_trajectory_name(self) self.filename = make_temp_dir(os.path.join('experiments',", "= 'LOCK' self.multiproc = False self.gc_interval = None self.ncores = 1 self.use_pool=True self.use_scoop=False", "filename=self.filename) with self.assertRaises(ValueError): env3.f_run_map(multiply_args) with self.assertRaises(ValueError): Environment(use_scoop=True, immediate_postproc=True) with self.assertRaises(ValueError): Environment(use_pool=True, immediate_postproc=True) with", "self.explore(self.traj) self.traj.hdf5.purge_duplicate_comments=0 self.make_run() hdf5file = pt.open_file(self.filename) traj_group = hdf5file.get_node(where='/', name= self.traj.v_name) for node", "self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly_using_map(self): ###Explore self.explore(self.traj) args1=[10*x for x in range(len(self.traj))]", "old_level = root.level service_logger.setLevel(logging.INFO) root.setLevel(logging.INFO) traj.f_load(load_data=3) service_logger.setLevel(old_level) root.setLevel(old_level) path = get_log_path(traj) mainfilename =", "self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False): ###", "!= %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) traj = self.traj self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun", "traj.f_add_parameter('y',99) self.env=env self.traj=traj def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False, how=2): ### Load The Trajectory and check if", "3, 3, 4]}) # # list_that_should_not_change = [42, 42, 42, 42] # #", "# # for irun, val in enumerate(list_that_should_not_change): # self.assertTrue(list_that_should_not_change[irun] == 42) # x=traj.results.runs[irun].kkk", "node._v_pathname or '/results/' in node._v_pathname): if 'SRVC_LEAF' in node._v_attrs: if 'SRVC_INIT_COMMENT' in node._v_attrs:", "unittest try: import psutil except ImportError: psutil = None try: import dill except", "self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=self.traj.v_name, index=None,", "$$$$$$$$$$$$$$$$$ Second Run $$$$$$$$$$$$$$$$$$$$$$$$') self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj)", "'bsr_mat' : matrices_bsr, 'dia_mat' : matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} traj.f_explore(self.explore_dict) def explore(self, traj):", "self.env.f_run(multiply) self.are_results_in_order(results) get_root_logger().info(results) traj = self.traj self.assertEqual(len(traj), len(list(list(self.explore_dict.values())[0]))) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) traj_name", "self.explore_large(self.traj) self.make_run_large_data() self.assertTrue(self.traj.f_is_completed()) # Check if printing and repr work get_root_logger().info(str(self.env)) get_root_logger().info(repr(self.env)) newtraj", "log_stdout=self.log_stdout, log_config=get_log_config(), results_per_run=5, wildcard_functions=self.wildcard_functions, derived_parameters_per_run=5, multiproc=self.multiproc, ncores=self.ncores, wrap_mode=self.mode, use_pool=self.use_pool, gc_interval=self.gc_interval, freeze_input=self.freeze_input, fletcher32=self.fletcher32, complevel=self.complevel,", "from pypet.storageservice import HDF5StorageService from pypet import pypetconstants, Result, manual_run import pypet.pypetexceptions as", "self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) for res in results: self.assertEqual(len(res), 2)", "= False ###Explore self.explore(self.traj) self.make_run() # newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,", "'tests', 'Log')) random.seed() self.trajname = make_trajectory_name(self) self.filename = make_temp_dir(os.path.join('experiments', 'tests', 'HDF5', 'test%s.hdf5' %", "load_data=pypetconstants.LOAD_SKELETON) def test_switch_on_all_comments(self): self.explore(self.traj) self.traj.hdf5.purge_duplicate_comments=0 self.make_run() hdf5file = pt.open_file(self.filename) traj_group = hdf5file.get_node(where='/', name=", "os import platform import logging import time import numpy as np from pypet.trajectory", "ResultSortTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' def set_mode(self): self.mode = 'LOCK' self.multiproc =", "self.compare_trajectories(self.traj,newtraj) def check_if_z_is_correct_map(self,traj, args1, args2, args3): for x, arg1, arg2, arg3 in zip(range(len(traj)),", "-13 simple_kwarg= 13.0 results = self.env.f_run(simple_calculations,simple_arg,simple_kwarg=simple_kwarg) self.are_results_in_order(results) def test_a_large_run(self): get_root_logger().info('Testing large run') self.traj.f_add_parameter('TEST',", "newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_derived_parameters=how, load_results=how) return newtraj def explore(self,traj):", "6MB' % str(size_in_mb)) with self.assertRaises(TypeError): self.explore(self.traj) def test_run_complex(self): self.traj.f_add_parameter('TEST', 'test_run_complex') ###Explore self.explore_complex_params(self.traj) self.make_run()", "for idx, traj in enumerate(self.traj.f_iter_runs(yields='copy')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is not self.traj) newtraj.v_crun=run_name", "node._v_pathname): if 'SRVC_LEAF' in node._v_attrs: if 'SRVC_INIT_COMMENT' in node._v_attrs: comment = node._v_attrs['SRVC_INIT_COMMENT'] if", "range(3): spsparse_bsr = spsp.lil_matrix((111,111)) spsparse_bsr[3,2+irun] = 44.5*irun matrices_bsr.append(spsparse_bsr.tocsr().tobsr()) matrices_dia = [] for irun", "self.assertTrue(size_in_mb < 6.0, 'Size is %sMB > 6MB' % str(size_in_mb)) mp_traj = self.traj", "in ncomments: ncomments[comment] = 0 ncomments[comment] += 1 finally: hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(all(x", "group is below test not new, so ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_store_child('group') #", "1 self.env._traj.config.hdf5.overview.derived_parameters_overview = 1 self.make_run() hdf5file = pt.open_file(self.filename) overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview')", "= 0 ncomments[comment] += 1 finally: hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(any(x > 1 for", "def _load(self, load_dict): time.sleep(3) super(SlowResult, self)._load(load_dict) class FullStorageTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment'", "self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def expand(self): self.expanded ={'Normal.trial': [1], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]),", "self.explore(self.traj) self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=0 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=0 self.make_run() hdf5file = pt.open_file(self.filename, mode='a') ncomments = {}", "self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct_map(traj, args1, args2, args3) for res", "traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) for res in", "def test_not_deep_copy_data(self): # self.filename = make_temp_dir('experiments/tests/HDF5/testcoyp2.hdf5') # self.logfolder = make_temp_dir('experiments/tests/Log') # self.trajname =", "are still the same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_derived_parameters=how, load_results=how)", "= self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_graceful_exit(self): ###Explore self.explore_cartesian(self.traj) results = self.env.f_run(multiply_with_graceful_exit)", "path = get_log_path(traj) mainfilename = os.path.join(path, 'LOG.txt') with open(mainfilename, mode='r') as mainf: full_text", "* %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj in enumerate(self.traj.f_iter_runs(yields='self')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj", "immediate_postproc=True) with self.assertRaises(ValueError): Environment(continuable=True, wrap_mode='QUEUE', continue_folder=tmp) with self.assertRaises(ValueError): Environment(use_scoop=True, wrap_mode='QUEUE') with self.assertRaises(ValueError): Environment(automatic_storing=False,", "def with_niceness(traj): if traj.multiproc: if hasattr(os, 'nice'): trajnice = traj.niceness osnice = os.nice(0)", "!= x*y: %s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj in", "import run_suite, make_temp_dir, make_trajectory_name,\\ get_root_logger, parse_args, get_log_config, get_log_path from pypet.tests.testutils.data import create_param_dict, add_params,", "supported under non Unix.') def test_niceness(self): ###Explore self.explore(self.traj) self.env.f_run(with_niceness) self.assertTrue(self.traj.f_is_completed()) def test_file_overwriting(self): self.traj.f_store()", "(not scoop.IS_RUNNING or scoop.IS_ORIGIN): return raise RuntimeError('traj niceness != os niceness; ' '%s", "self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) self.assertEqual(len(results), len(self.traj)) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0]))", "traj.f_add_parameter('dummy', 1) # traj.f_explore({'dummy':[12, 3, 3, 4]}) # # list_that_should_not_change = [42, 42,", "traj = self.traj traj.f_store() self.assertTrue(len(traj), 5) self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj)", "irun, val in enumerate(list_that_should_not_change): # self.assertTrue(list_that_should_not_change[irun] == 42) # x=traj.results.runs[irun].kkk # self.assertTrue(x==42+irun) #", "file_title=self.trajname, log_folder=self.logfolder, # log_stdout=False, # multiproc=False, # deep_copy_data=True) # # traj = env.v_trajectory", "mp_traj = self.traj old_multiproc = self.multiproc self.multiproc = False ### Make a new", "self.compare_trajectories(self.traj, newtraj) def expand(self): self.expanded ={'Normal.trial': [1], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]}", "under non Unix.') def test_niceness(self): ###Explore self.explore(self.traj) self.env.f_run(with_niceness) self.assertTrue(self.traj.f_is_completed()) def test_file_overwriting(self): self.traj.f_store() with", "self.ncores = 1 self.use_pool=True self.pandas_format='table' self.pandas_append=True self.complib = 'blosc' self.complevel=2 self.shuffle=False self.fletcher32 =", "self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj)", "trajnice = traj.niceness osnice = os.nice(0) else: trajnice = traj.niceness osnice = psutil.Process().nice()", "self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False): ### Load The Trajectory and check", "traj.v_idx=-1 traj.v_shortcuts=True # def test_runfunc(traj, list_that_changes): # traj.f_add_result('kkk', list_that_changes[traj.v_idx] + traj.v_idx) # list_that_changes[traj.v_idx]", "args2=[100*x for x in range(len(self.traj))] args3=list(range(len(self.traj))) results = self.env.f_run_map(multiply_args, args1, arg2=args2, arg3=args3) self.assertEqual(len(results),", "so ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_remove_child('group') # group is below test not new,", "self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_expand_after_reload(self): ###Explore", "with self.assertRaises(pex.NotUniqueNodeError): traj.f_explore(self.explore_dict) traj.f_shrink(force=True) par_dict = traj.parameters.f_to_dict() for param_name in par_dict: param =", "self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx) def test_expand(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply)", "44.5*irun matrices_csr.append(spsparse_csr.tocsr()) matrices_csc = [] for irun in range(3): spsparse_csc = spsp.lil_matrix((111,111)) spsparse_csc[3,2+irun]", "self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename) size_in_mb =", "pt.open_file(self.filename) overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview') should = ['derived_parameters_overview', 'results_overview'] for name in", "self.env.f_disable_logging() super(EnvironmentTest, self).tearDown() def setUp(self): self.set_mode() self.logfolder = make_temp_dir(os.path.join('experiments', 'tests', 'Log')) random.seed() self.trajname", "self.param_dict={} create_param_dict(self.param_dict) ### Add some parameter: add_params(traj,self.param_dict) #remember the trajectory and the environment", "is %sMB > 6MB' % str(size_in_mb)) def test_just_one_run(self): self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)", "Environment(use_pool=True, immediate_postproc=True) with self.assertRaises(ValueError): Environment(continuable=True, wrap_mode='QUEUE', continue_folder=tmp) with self.assertRaises(ValueError): Environment(use_scoop=True, wrap_mode='QUEUE') with self.assertRaises(ValueError):", "explore(self,traj): self.explore_dict={'x':[-1,1,2,3,4],'y':[1,1,2,2,3]} traj.f_explore(self.explore_dict) def explore_cartesian(self,traj): self.explore_dict=cartesian_product({'x':[-1,1,2,3,4, 5, 6],'y':[1,1,2,2,3,4,4]}) traj.f_explore(self.explore_dict) def expand(self,traj): self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12,13]} with", "% idx class TestOtherHDF5Settings(EnvironmentTest): tags = 'integration', 'hdf5', 'environment', 'hdf5_settings' def set_mode(self): EnvironmentTest.set_mode(self)", "= 'integration', 'hdf5', 'environment' def set_mode(self): self.mode = 'LOCK' self.multiproc = False self.gc_interval", "comment='llk') traj.f_store() service_logger = traj.v_storage_service._logger root = logging.getLogger('pypet') old_level = root.level service_logger.setLevel(logging.INFO) root.setLevel(logging.INFO)", "0 self.env._traj.config.hdf5.overview.explored_parameters_overview = 0 self.make_run() hdf5file = pt.open_file(self.filename) overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview')", "import pypetconstants, Result, manual_run import pypet.pypetexceptions as pex import sys import unittest try:", "% (str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj in enumerate(self.traj.f_iter_runs(yields='copy')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is not", "1 for x in ncomments.values())) def test_NOT_purge_duplicate_comments(self): self.explore(self.traj) self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=0 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=0 self.make_run()", "traj.f_add_parameter('dummy', 1) # traj.f_explore({'dummy':[12, 3, 3, 4]}) # # list_that_should_change = [42, 42,", "in range(3): spsparse_csc = spsp.lil_matrix((111,111)) spsparse_csc[3,2+irun] = 44.5*irun matrices_csc.append(spsparse_csc.tocsc()) matrices_bsr = [] for", "self.assertTrue(size_in_mb < 6.0, 'Size is %sMB > 6MB' % str(size_in_mb)) def test_just_one_run(self): self.make_run()", "'test%s.hdf5' % self.trajname)) env = Environment(trajectory=self.trajname, filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config(), results_per_run=5, wildcard_functions=self.wildcard_functions, derived_parameters_per_run=5,", "niceness != os niceness; ' '%s != %s' % (str(trajnice), str(osnice))) def add_large_data(traj):", "par_dict = traj.parameters.f_to_dict() for param_name in par_dict: param = par_dict[param_name] if param.v_name in", "try: traj_group = hdf5file.get_node(where='/',name= self.traj.v_name) for node in traj_group._f_walk_groups(): if ('/derived_parameters/' in node._v_pathname", "True newtraj.par.f_load_child('y', load_data=1) for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx", "traj in enumerate(self.traj.f_iter_runs(yields='copy')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is not self.traj) newtraj.v_crun=run_name self.traj.v_idx =", "env.f_disable_logging() def make_run_large_data(self): self.env.f_run(add_large_data) def make_run(self): ### Make a test run simple_arg =", "self.env.v_trajectory self.traj.f_load(name=traj_name) self.traj.res.f_remove() self.traj.dpar.f_remove() self.expand() get_root_logger().info('\\n $$$$$$$$$$$$ Second Run $$$$$$$$$$ \\n') self.make_run() newtraj", "traj = env.v_trajectory traj.par.x = Parameter('x', 3, 'jj') traj.f_explore({'x': [1,2,3]}) env.f_run(add_one_particular_item, True) traj", "self.fletcher32 = False self.encoding='latin1' self.graceful_exit = True class TestOtherHDF5Settings2(EnvironmentTest): tags = 'integration', 'hdf5',", "gc_interval=self.gc_interval, freeze_input=self.freeze_input, fletcher32=self.fletcher32, complevel=self.complevel, complib=self.complib, shuffle=self.shuffle, pandas_append=self.pandas_append, pandas_format=self.pandas_format, encoding=self.encoding, niceness=self.niceness, use_scoop=self.use_scoop, port=self.port, add_time=self.add_time,", "list_that_changes[traj.v_idx] = 1000 # class DeepCopyTest(TrajectoryComparator): # # def test_deep_copy_data(self): # # self.filename", "newtraj) def test_expand_after_reload(self): self.traj.f_add_parameter('TEST', 'test_expand_after_reload') ###Explore self.explore(self.traj) self.make_run() traj_name = self.traj.v_name self.env =", "= self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def expand(self): self.expanded ={'Normal.trial': [1], 'Numpy.double':", "matrices_csr, 'csc_mat' : matrices_csc, 'bsr_mat' : matrices_bsr, 'dia_mat' : matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]}", "self.assertTrue(newtraj.v_idx == idx) def test_f_iter_runs_auto_load(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj =", "== len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=self.traj.v_name, index=None, as_new=False,", "in range(3): spsparse_csr = spsp.lil_matrix((111,111)) spsparse_csr[3,2+irun] = 44.5*irun matrices_csr.append(spsparse_csr.tocsr()) matrices_csc = [] for", "newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_parameters=2, load_derived_parameters=2, load_results=2, load_other_data=2) return newtraj", "Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=self.traj.v_name, index=None, as_new=False, load_data=0) newtraj.v_auto_load = True newtraj.par.f_load_child('y', load_data=1) for idx,", "+= 1 finally: hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(all(x == 1 for x in ncomments.values()))", "newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def check_if_z_is_correct_map(self,traj, args1, args2, args3): for", "and psutil is None, 'Niceness not supported under non Unix.') def test_niceness(self): ###Explore", "self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.traj.res.f_remove() self.traj.dpar.f_remove() self.expand() get_root_logger().info('\\n $$$$$$$$$$$$ Second Run $$$$$$$$$$ \\n')", "with self.assertRaises(ValueError): Environment(continuable=True, wrap_mode='QUEUE', continue_folder=tmp) with self.assertRaises(ValueError): Environment(use_scoop=True, wrap_mode='QUEUE') with self.assertRaises(ValueError): Environment(automatic_storing=False, continuable=True,", "def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False): ### Load The Trajectory and check if the values are still", "traj = self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj =", "# file_title=self.trajname, log_folder=self.logfolder, # log_stdout=False, # multiproc=False, # deep_copy_data=False) # # traj =", "time import numpy as np from pypet.trajectory import Trajectory, load_trajectory from pypet.utils.explore import", "results = self.env.f_run(simple_calculations,simple_arg,simple_kwarg=simple_kwarg) self.are_results_in_order(results) def test_a_large_run(self): get_root_logger().info('Testing large run') self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore_large(self.traj)", "tail = os.path.split(env.v_traj.v_storage_service.filename) self.assertEqual(tail, the_file_name) class EnvironmentTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' def", "str(size_in_mb)) self.compare_trajectories(mp_traj, self.traj) self.multiproc = old_multiproc def test_errors(self): tmp = make_temp_dir('cont') if dill", "self.complevel=9 self.shuffle=True self.fletcher32 = False self.encoding = 'utf8' self.log_stdout=False self.wildcard_functions = None self.niceness", "newtraj.v_auto_load = True newtraj.par.f_load_child('y', load_data=1) for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name self.traj.v_idx =", "simple_arg = -13 simple_kwarg= 13.0 results = self.env.f_run(simple_calculations,simple_arg,simple_kwarg=simple_kwarg) self.are_results_in_order(results) def test_a_large_run(self): get_root_logger().info('Testing large", "parameter: add_params(traj,self.param_dict) #remember the trajectory and the environment self.traj = traj self.env =", "== -1) self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx) def test_f_iter_runs_auto_load(self):", "overview_group, '%s in overviews but should not!' % name) hdf5file.close() def test_store_form_tuple(self): self.traj.f_store()", "with self.assertRaises(ValueError): self.traj.parameters.new.f_store_child('group') # group has children and recursive is false with self.assertRaises(TypeError):", "get_root_logger().info(str(self.env)) get_root_logger().info(repr(self.env)) newtraj = Trajectory() newtraj.f_load(name=self.traj.v_name, as_new=False, load_data=2, filename=self.filename) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj)", "0 self.env._traj.config.hdf5.overview.derived_parameters_overview = 0 self.env._traj.config.hdf5.overview.derived_parameters_summary = 0 self.env._traj.config.hdf5.overview.results_summary = 0 self.env._traj.config.hdf5.purge_duplicate_comments = 0", "\\n') self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def expand(self): self.expanded", "test_switch_off_all_tables(self): ###Explore self.traj.f_add_parameter('TEST', 'test_switch_off_ALL_tables') self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview = 0 self.env._traj.config.hdf5.overview.derived_parameters_overview = 0 self.env._traj.config.hdf5.overview.derived_parameters_summary =", "for node in traj_group._f_walk_groups(): if 'SRVC_LEAF' in node._v_attrs: self.assertTrue('SRVC_INIT_COMMENT' in node._v_attrs, 'There is", "recursive=True) self.assertTrue('new.group' not in self.traj) self.traj.new.f_load_child('test', recursive=True, load_data=pypetconstants.LOAD_SKELETON) self.assertTrue(self.traj.new.group.v_annotations.annotation, 42) self.traj.f_delete_item('new.test.group') with self.assertRaises(pex.DataNotInStorageError):", "values are still the same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_parameters=2,", "but should not!' % name) hdf5file.close() def test_store_form_tuple(self): self.traj.f_store() self.traj.f_add_result('TestResItem', 42, 43) with", "self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_expand_after_reload(self):", "None self.niceness = None self.port = None self.timeout = None self.add_time=True self.graceful_exit =", "load_results=2, load_other_data=2) return newtraj def test_expand(self): ###Explore self.traj.f_add_parameter('TEST', 'test_expand') self.explore(self.traj) self.make_run() self.expand() get_root_logger().info('\\n", "###Explore self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview = 1 self.env._traj.config.hdf5.overview.derived_parameters_overview = 1 self.make_run() hdf5file = pt.open_file(self.filename) overview_group", "res[0] self.assertEqual(self.traj.res.runs[idx].z, res[1]) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.assertEqual(len(traj), 5) self.compare_trajectories(self.traj,newtraj) def", "x = traj.par.x y = traj.par.y self.assertTrue(z==x*y,' z != x*y: %s != %s", "= make_trajectory_name(self) self.filename = make_temp_dir(os.path.join('experiments', 'tests', 'HDF5', 'test%s.hdf5' % self.trajname)) env = Environment(trajectory=self.trajname,", ": matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} traj.f_explore(self.explore_dict) def explore(self, traj): self.explored ={'Normal.trial': [0], 'Numpy.double':", "from pypet.tests.testutils.data import create_param_dict, add_params, multiply,\\ simple_calculations, TrajectoryComparator, multiply_args, multiply_with_storing, \\ multiply_with_graceful_exit def", "make_run_large_data(self): self.env.f_run(add_large_data) def make_run(self): ### Make a test run simple_arg = -13 simple_kwarg=", "self.traj.parameters.f_load_child('new.test.group', load_data=pypetconstants.LOAD_SKELETON) def test_switch_on_all_comments(self): self.explore(self.traj) self.traj.hdf5.purge_duplicate_comments=0 self.make_run() hdf5file = pt.open_file(self.filename) traj_group = hdf5file.get_node(where='/',", "self.assertRaises(ValueError): self.traj.parameters.new.f_store_child('group') # group has children and recursive is false with self.assertRaises(TypeError): self.traj.parameters.new.f_remove_child('test')", "'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat'", "overviews but it should!' % name) hdf5file.close() self.traj.f_load(load_parameters=2, load_derived_parameters=2, load_results=2) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name)", "def make_run_large_data(self): self.env.f_run(add_large_data) def make_run(self): ### Make a test run simple_arg = -13", "ncomments = {} try: traj_group = hdf5file.get_node(where='/',name= self.traj.v_name) for node in traj_group._f_walk_groups(): if", "###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton()", "< 30.0, 'Size is %sMB > 30MB' % str(size_in_mb)) def test_two_runs(self): self.traj.f_add_parameter('TEST', 'test_run')", "immediate_postproc=True) with self.assertRaises(ValueError): Environment(use_pool=True, immediate_postproc=True) with self.assertRaises(ValueError): Environment(continuable=True, wrap_mode='QUEUE', continue_folder=tmp) with self.assertRaises(ValueError): Environment(use_scoop=True,", "wrap_mode=self.mode, ncores=self.ncores, use_pool=self.use_pool, use_scoop=self.use_scoop, port=self.port, freeze_input=self.freeze_input, graceful_exit=self.graceful_exit) traj = env.v_trajectory traj.v_standard_parameter=Parameter traj.f_add_parameter('x',99) traj.f_add_parameter('y',99)", "self.explore_dict=cartesian_product({'x':[-1,1,2,3,4, 5, 6],'y':[1,1,2,2,3,4,4]}) traj.f_explore(self.explore_dict) def expand(self,traj): self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12,13]} with self.assertRaises(ValueError): traj.f_expand(self.expand_dict) self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12]} traj.f_expand(self.expand_dict) def", "SlowResult(Result): def _load(self, load_dict): time.sleep(3) super(SlowResult, self)._load(load_dict) class FullStorageTest(TrajectoryComparator): tags = 'integration', 'hdf5',", "in results: self.assertEqual(len(res), 2) self.assertTrue(isinstance(res[0], int)) self.assertTrue(isinstance(res[1], int)) idx = res[0] self.assertEqual(self.traj.res.runs[idx].z, res[1])", "only_empties=True) for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx", "self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,", "Environment(trajectory=self.trajname,filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config() if self.log_config else None, multiproc=self.multiproc, wrap_mode=self.mode, ncores=self.ncores, use_pool=self.use_pool, use_scoop=self.use_scoop,", "= [] for irun in range(111): array_list.append(np.random.rand(10)) traj.f_add_result('m4ny', *array_list) class SimpleEnvironmentTest(TrajectoryComparator): tags =", "> 30MB' % str(size_in_mb)) def test_two_runs(self): self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments = False ###Explore self.explore(self.traj)", "node in traj_group._f_walk_groups(): if 'SRVC_LEAF' in node._v_attrs: self.assertTrue('SRVC_INIT_COMMENT' in node._v_attrs, 'There is no", "in range(len(traj)): traj.v_idx=x z = traj.res.runs.crun.z x = traj.par.x y = traj.par.y self.assertTrue(z==x*y,'", "= False self.gc_interval = None self.ncores = 1 self.use_pool=True self.use_scoop=False self.freeze_input=False self.pandas_format='fixed' self.pandas_append=False", "some parameters self.param_dict={} create_param_dict(self.param_dict) ### Add some parameter: add_params(traj,self.param_dict) #remember the trajectory and", "= traj.par.x y = traj.par.y self.assertTrue(z==x*y,' z != x*y: %s != %s *", "root.setLevel(old_level) path = get_log_path(traj) mainfilename = os.path.join(path, 'LOG.txt') with open(mainfilename, mode='r') as mainf:", "= {} try: traj_group = hdf5file.get_node(where='/',name= self.traj.v_name) for node in traj_group._f_walk_groups(): if ('/derived_parameters/'", "= env2.v_trajectory traj2.f_store() self.assertTrue(os.path.exists(self.filename)) with pt.open_file(self.filename, mode='r') as file: nchildren = len(file.root._v_children) self.assertTrue(nchildren", "for name in should: self.assertTrue(name in overview_group, '%s not in overviews but it", "[['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} with self.assertRaises(pex.NotUniqueNodeError): traj.f_explore(self.explore_dict) traj.f_shrink(force=True) par_dict = traj.parameters.f_to_dict() for param_name in par_dict: param", "self.pandas_append=False self.complib = 'zlib' self.complevel=9 self.shuffle=True self.fletcher32 = False self.encoding = 'utf8' self.log_stdout=False", "get_root_logger, parse_args, get_log_config, get_log_path from pypet.tests.testutils.data import create_param_dict, add_params, multiply,\\ simple_calculations, TrajectoryComparator, multiply_args,", "self.assertRaises(ValueError): Environment(use_pool=True, immediate_postproc=True) with self.assertRaises(ValueError): Environment(continuable=True, wrap_mode='QUEUE', continue_folder=tmp) with self.assertRaises(ValueError): Environment(use_scoop=True, wrap_mode='QUEUE') with", "hdf5file.close() def test_purge_duplicate_comments(self): self.explore(self.traj) with self.assertRaises(RuntimeError): self.traj.hdf5.purge_duplicate_comments = 1 self.traj.overview.results_summary = 0 self.make_run()", "self.assertRaises(ValueError): traj.f_expand(self.expand_dict) self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12]} traj.f_expand(self.expand_dict) def test_if_results_are_sorted_correctly_manual_runs(self): ###Explore self.explore(self.traj) self.traj.f_store(only_init=True) man_multiply = manual_run()(multiply_with_storing) for", "= Environment(log_config=None, filename=self.filename) with self.assertRaises(ValueError): env3.f_run_map(multiply_args) with self.assertRaises(ValueError): Environment(use_scoop=True, immediate_postproc=True) with self.assertRaises(ValueError): Environment(use_pool=True,", "%s * %s' % (str(z),str(x),str(y))) traj.v_idx=-1 traj.v_shortcuts=True # def test_runfunc(traj, list_that_changes): # traj.f_add_result('kkk',", "test_switch_on_all_comments(self): self.explore(self.traj) self.traj.hdf5.purge_duplicate_comments=0 self.make_run() hdf5file = pt.open_file(self.filename) traj_group = hdf5file.get_node(where='/', name= self.traj.v_name) for", "traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)", "def expand(self,traj): self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12,13]} with self.assertRaises(ValueError): traj.f_expand(self.expand_dict) self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12]} traj.f_expand(self.expand_dict) def test_if_results_are_sorted_correctly_manual_runs(self): ###Explore self.explore(self.traj) self.traj.f_store(only_init=True)", "self.log_stdout=False self.wildcard_functions = None self.niceness = None self.port = None self.timeout = None", "self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_expand_after_reload(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results)", "newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_expand_after_reload(self): ###Explore self.explore(self.traj) results =", "Environment(filename=self.filename, log_config=get_log_config()) traj2 = env2.v_trajectory traj2.f_store() self.assertTrue(os.path.exists(self.filename)) with pt.open_file(self.filename, mode='r') as file: nchildren", "%s * %s' % (str(traj.crun.z),str(traj.x),str(traj.y))) traj.v_idx=-1 def check_if_z_is_correct(self,traj): traj.v_shortcuts=False for x in range(len(traj)):", "= par_dict[param_name] if param.v_name in self.explore_dict: param.f_unlock() if param.v_explored: param._shrink() self.explore_dict={'Numpy.string':[np.array(['Uno', 'Dos', 'Tres']),", "= set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d' % (idx+1) not in nameset) self.assertTrue('run_%08d'", "= Environment(filename=self.filename, overwrite_file=True, log_config=get_log_config()) self.assertFalse(os.path.exists(self.filename)) env2.f_disable_logging() env3.f_disable_logging() def test_time_display_of_loading(self): filename = make_temp_dir('sloooow.hdf5') env", "$$$$$$$$$$$$$$$$$$$$$$$$') self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def test_expand_after_reload(self): self.traj.f_add_parameter('TEST',", "logging.getLogger('pypet') old_level = root.level service_logger.setLevel(logging.INFO) root.setLevel(logging.INFO) traj.f_load(load_data=3) service_logger.setLevel(old_level) root.setLevel(old_level) path = get_log_path(traj) mainfilename", "self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) for res in results: self.assertEqual(len(res),", "self.assertTrue(any(x > 1 for x in ncomments.values())) def my_run_func(idx): return 'hello_%d' % idx", "self.filename = make_temp_dir(os.path.join('experiments', 'tests', 'HDF5', 'test%s.hdf5' % self.trajname)) env = Environment(trajectory=self.trajname, filename=self.filename, file_title=self.trajname,", "% (str(newtraj.crun.z),str(newtraj.x),str(newtraj.y))) traj = self.traj self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_ ==", "for idx in self.traj.f_iter_runs(yields='idx'): self.assertTrue(isinstance(idx, int)) man_multiply(self.traj) traj = self.traj traj.f_store() self.assertTrue(len(traj), 5)", "with self.assertRaises(TypeError): self.explore(self.traj) def test_run_complex(self): self.traj.f_add_parameter('TEST', 'test_run_complex') ###Explore self.explore_complex_params(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj =", "'%s in overviews but should not!' % name) hdf5file.close() def test_store_form_tuple(self): self.traj.f_store() self.traj.f_add_result('TestResItem',", "'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} with self.assertRaises(pex.NotUniqueNodeError): traj.f_explore(self.explore_dict) traj.f_shrink(force=True) par_dict = traj.parameters.f_to_dict() for param_name in", "pt.open_file(self.filename) overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview') should_not = HDF5StorageService.NAME_TABLE_MAPPING.keys() for name in should_not:", "FullStorageTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' # Test tags def test_full_store(self): filename =", "load_data=pypetconstants.LOAD_SKELETON) self.assertTrue(self.traj.new.group.v_annotations.annotation, 42) self.traj.f_delete_item('new.test.group') with self.assertRaises(pex.DataNotInStorageError): self.traj.parameters.f_load_child('new.test.group', load_data=pypetconstants.LOAD_SKELETON) def test_switch_on_all_comments(self): self.explore(self.traj) self.traj.hdf5.purge_duplicate_comments=0 self.make_run()", "self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.expand(self.traj) results = self.env.f_run(multiply)", "add_params(traj,self.param_dict) #remember the trajectory and the environment self.traj = traj self.env = env", "self.pandas_format='table' self.pandas_append=False self.complib = 'lzo' self.complevel=2 self.shuffle=False self.fletcher32 = True self.encoding='latin1' self.wildcard_functions =", "idx = res[0] self.assertEqual(self.traj.res.runs[idx].z, res[1]) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.assertEqual(len(traj), 5)", "44.5*irun matrices_csc.append(spsparse_csc.tocsc()) matrices_bsr = [] for irun in range(3): spsparse_bsr = spsp.lil_matrix((111,111)) spsparse_bsr[3,2+irun]", "node._v_attrs: if 'SRVC_INIT_COMMENT' in node._v_attrs: comment = node._v_attrs['SRVC_INIT_COMMENT'] if comment not in ncomments:", "% str(size_in_mb)) self.assertTrue(size_in_mb < 6.0, 'Size is %sMB > 6MB' % str(size_in_mb)) def", "= 'LOCK' self.multiproc = False self.ncores = 1 self.use_pool=True self.log_stdout=False self.freeze_input=False self.use_scoop =", "scipy.sparse as spsp import random from pypet import Parameter import tables as pt", "env1.f_run_map(multiply_args, [1], [2], [3]) with self.assertRaises(ValueError): Environment(multiproc=True, use_pool=False, freeze_input=True, filename=self.filename, log_config=None) env3 =", "= env @unittest.skipIf(not hasattr(os, 'nice') and psutil is None, 'Niceness not supported under", "SimpleEnvironmentTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment', 'quick' def test_make_default_file_when_giving_directory_without_slash(self): filename = make_temp_dir('test.hdf5') head,", "create_param_dict(self.param_dict) ### Add some parameter: add_params(traj,self.param_dict) #remember the trajectory and the environment self.traj", "self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview = 0 self.env._traj.config.hdf5.overview.derived_parameters_overview = 0 self.env._traj.config.hdf5.overview.derived_parameters_summary = 0 self.env._traj.config.hdf5.overview.results_summary = 0", "make_run(self): ### Make a test run simple_arg = -13 simple_kwarg= 13.0 results =", "nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s'", "self.make_run() hdf5file = pt.open_file(self.filename) overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview') should = ['derived_parameters_overview', 'results_overview']", "pt from pypet.tests.testutils.ioutils import run_suite, make_temp_dir, make_trajectory_name,\\ get_root_logger, parse_args, get_log_config, get_log_path from pypet.tests.testutils.data", "idx newtraj.v_idx = idx nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d' %", "expand(self,traj): self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12,13]} with self.assertRaises(ValueError): traj.f_expand(self.expand_dict) self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12]} traj.f_expand(self.expand_dict) def test_if_results_are_sorted_correctly_manual_runs(self): ###Explore self.explore(self.traj) self.traj.f_store(only_init=True) man_multiply", "traj.hi = Result('hi', 42, 'hi!') traj.f_store() traj.f_remove_child('hi') class SlowResult(Result): def _load(self, load_dict): time.sleep(3)", "[0], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.explored['csr_mat'][0][1,2]=44.0 self.explored['csr_mat'][1][2,2]=33 self.explored['csr_mat'][0] = self.explored['csr_mat'][0].tocsr() self.explored['csr_mat'][1]", "self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000.", "self.niceness = None self.port = None self.timeout = None self.add_time=True self.graceful_exit = False", "5) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) self.assertEqual(len(results), len(self.traj)) traj", "= make_temp_dir('experiments/tests/HDF5/testcopy.hdf5') # self.logfolder = make_temp_dir('experiments/tests/Log') # self.trajname = make_trajectory_name(self) # # env", "load_trajectory from pypet.utils.explore import cartesian_product from pypet.environment import Environment from pypet.storageservice import HDF5StorageService", "= True self.port = None self.graceful_exit = True def tearDown(self): self.env.f_disable_logging() super(ResultSortTest, self).tearDown()", "self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) self.assertTrue(len(newtraj) == 1) size=os.path.getsize(self.filename) size_in_mb = size/1000000.", "self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) self.assertTrue(len(newtraj) == 1) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is", "for param_name in par_dict: param = par_dict[param_name] if param.v_name in self.explore_dict: param.f_unlock() if", "'Nueve', 'Diez'])], 'int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat' : matrices_csc, 'bsr_mat' : matrices_bsr, 'dia_mat'", "array_list = [] for irun in range(111): array_list.append(np.random.rand(10)) traj.f_add_result('m4ny', *array_list) class SimpleEnvironmentTest(TrajectoryComparator): tags", "3, 3, 4]}) # # list_that_should_change = [42, 42, 42, 42] # #", "file: nchildren = len(file.root._v_children) self.assertTrue(nchildren > 1) env3 = Environment(filename=self.filename, overwrite_file=True, log_config=get_log_config()) self.assertFalse(os.path.exists(self.filename))", "x in range(len(traj)): traj.v_idx=x z = traj.res.runs.crun.z x = traj.par.x y = traj.par.y", "log_config=get_log_config()) traj2 = env2.v_trajectory traj2.f_store() self.assertTrue(os.path.exists(self.filename)) with pt.open_file(self.filename, mode='r') as file: nchildren =", "param._shrink() self.explore_dict={'Numpy.string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'Normal.int':[1,2,3], 'csr_mat' :", "self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj =", "env.f_run(test_runfunc, list_that_should_change) # # traj.v_auto_load=True # # for irun, val in enumerate(list_that_should_change): #", "-1) self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx) def test_expand(self): ###Explore", "with self.assertRaises(ValueError): Environment(use_scoop=True, immediate_postproc=True) with self.assertRaises(ValueError): Environment(use_pool=True, immediate_postproc=True) with self.assertRaises(ValueError): Environment(continuable=True, wrap_mode='QUEUE', continue_folder=tmp)", "def test_just_one_run(self): self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) self.assertTrue(len(newtraj)", "z != x*y: %s != %s * %s' % (str(newtraj.crun.z),str(newtraj.x),str(newtraj.y))) traj = self.traj", "len(list(self.expand_dict.values())[0])+\\ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj)", "env.v_trajectory # # traj.f_add_parameter('dummy', 1) # traj.f_explore({'dummy':[12, 3, 3, 4]}) # # list_that_should_not_change", "self.compare_trajectories(self.traj,newtraj) def test_expand_after_reload(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj)", "# traj = env.v_trajectory # # traj.f_add_parameter('dummy', 1) # traj.f_explore({'dummy':[12, 3, 3, 4]})", "'Size is %sMB > 6MB' % str(size_in_mb)) self.compare_trajectories(mp_traj, self.traj) self.multiproc = old_multiproc def", "results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(),", "> 1) env3 = Environment(filename=self.filename, overwrite_file=True, log_config=get_log_config()) self.assertFalse(os.path.exists(self.filename)) env2.f_disable_logging() env3.f_disable_logging() def test_time_display_of_loading(self): filename", "self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=self.traj.v_name, index=None, as_new=False, load_data=0) newtraj.v_auto_load =", "test_f_iter_runs_auto_load(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0]))", "psutil except ImportError: psutil = None try: import dill except ImportError: dill =", "use_pool=self.use_pool, use_scoop=self.use_scoop, port=self.port, freeze_input=self.freeze_input, graceful_exit=self.graceful_exit) traj = env.v_trajectory traj.v_standard_parameter=Parameter traj.f_add_parameter('x',99) traj.f_add_parameter('y',99) self.env=env self.traj=traj", "class TestOtherHDF5Settings(EnvironmentTest): tags = 'integration', 'hdf5', 'environment', 'hdf5_settings' def set_mode(self): EnvironmentTest.set_mode(self) self.mode =", "should not!' % name) hdf5file.close() def test_store_form_tuple(self): self.traj.f_store() self.traj.f_add_result('TestResItem', 42, 43) with self.assertRaises(ValueError):", "args3=list(range(len(self.traj))) results = self.env.f_run_map(multiply_args, args1, arg2=args2, arg3=args3) self.assertEqual(len(results), len(self.traj)) traj = self.traj self.assertTrue(len(traj)", "self.traj.f_add_parameter('TEST', 'test_expand') self.explore(self.traj) self.make_run() self.expand() get_root_logger().info('\\n $$$$$$$$$$$$$$$$$ Second Run $$$$$$$$$$$$$$$$$$$$$$$$') self.make_run() newtraj =", "self.complib = 'lzo' self.complevel=2 self.shuffle=False self.fletcher32 = True self.encoding='latin1' self.wildcard_functions = {('$', 'crun')", "4]}) # # list_that_should_change = [42, 42, 42, 42] # # env.f_run(test_runfunc, list_that_should_change)", "= Environment(filename=head) the_file_name = env.v_traj.v_name + '.hdf5' head, tail = os.path.split(env.v_traj.v_storage_service.filename) self.assertEqual(tail, the_file_name)", "if comment not in ncomments: ncomments[comment] = 0 ncomments[comment] += 1 finally: hdf5file.close()", "traj.v_storage_service._logger root = logging.getLogger('pypet') old_level = root.level service_logger.setLevel(logging.INFO) root.setLevel(logging.INFO) traj.f_load(load_data=3) service_logger.setLevel(old_level) root.setLevel(old_level) path", "parameters self.param_dict={} create_param_dict(self.param_dict) ### Add some parameter: add_params(traj,self.param_dict) #remember the trajectory and the", "results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)", "%s != %s * %s' % (str(traj.crun.z),str(traj.x),str(traj.y))) traj.v_idx=-1 def check_if_z_is_correct(self,traj): traj.v_shortcuts=False for x", "'/results/' in node._v_pathname): if 'SRVC_LEAF' in node._v_attrs: if 'SRVC_INIT_COMMENT' in node._v_attrs: comment =", "create_param_dict, add_params, multiply,\\ simple_calculations, TrajectoryComparator, multiply_args, multiply_with_storing, \\ multiply_with_graceful_exit def add_one_particular_item(traj, store_full): traj.hi", "name) hdf5file.close() def test_store_form_tuple(self): self.traj.f_store() self.traj.f_add_result('TestResItem', 42, 43) with self.assertRaises(ValueError): self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem,(),{},5)) self.traj.f_store_item((pypetconstants.LEAF,", "= pt.open_file(self.filename, mode='a') ncomments = {} try: traj_group = hdf5file.get_node(where='/',name= self.traj.v_name) for node", "1) # traj.f_explore({'dummy':[12, 3, 3, 4]}) # # list_that_should_not_change = [42, 42, 42,", "with open(mainfilename, mode='r') as mainf: full_text = mainf.read() self.assertTrue('nodes/s)' in full_text) env.f_disable_logging() def", "Environment(trajectory=self.trajname,filename=self.filename, # file_title=self.trajname, log_folder=self.logfolder, # log_stdout=False, # multiproc=False, # deep_copy_data=False) # # traj", "int)) idx = res[0] self.assertEqual(self.traj.res.runs[idx].z, res[1]) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.assertEqual(len(traj),", "= False self.ncores = 1 self.use_pool=True self.pandas_format='table' self.pandas_append=True self.complib = 'blosc' self.complevel=2 self.shuffle=False", "= 'blosc' self.complevel=2 self.shuffle=False self.fletcher32 = False self.encoding='latin1' self.graceful_exit = True class TestOtherHDF5Settings2(EnvironmentTest):", "self.compare_trajectories(mp_traj, self.traj) self.multiproc = old_multiproc def test_errors(self): tmp = make_temp_dir('cont') if dill is", "self.traj.f_store(only_init=True) man_multiply = manual_run()(multiply_with_storing) for idx in self.traj.f_iter_runs(yields='idx'): self.assertTrue(isinstance(idx, int)) man_multiply(self.traj) traj =", "return raise RuntimeError('traj niceness != os niceness; ' '%s != %s' % (str(trajnice),", "'integration', 'hdf5', 'environment' def set_mode(self): self.mode = 'LOCK' self.multiproc = False self.gc_interval =", "str(size_in_mb)) self.assertTrue(size_in_mb < 6.0, 'Size is %sMB > 6MB' % str(size_in_mb)) self.compare_trajectories(mp_traj, self.traj)", "%s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj in enumerate(self.traj.f_iter_runs(yields='self')): run_name = traj.f_idx_to_run(idx)", "0 self.env._traj.config.hdf5.overview.results_summary = 0 self.env._traj.config.hdf5.purge_duplicate_comments = 0 self.env._traj.config.hdf5.overview.parameters_overview = 0 self.env._traj.config.hdf5.overview.config_overview = 0", "get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 6.0, 'Size is %sMB > 6MB'", "= Parameter('x', 3, 'jj') traj.f_explore({'x': [1,2,3]}) env.f_run(add_one_particular_item, True) traj = load_trajectory(index=-1, filename=filename) self.assertTrue('hi'", "check if the values are still the same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name,", "test_store_single_group(self): self.traj.f_store() self.traj.f_add_parameter_group('new.test.group').v_annotations.f_set(42) self.traj.f_store_item('new.group') # group is below test not new, so ValueError", "def set_mode(self): EnvironmentTest.set_mode(self) self.mode = 'LOCK' self.multiproc = False self.ncores = 1 self.use_pool=True", "'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat' : matrices_csc, 'bsr_mat'", "self.explore_dict={'Numpy.string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'Normal.int':[1,2,3], 'csr_mat' : matrices_csr,", "# group is below test not new, so ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_store_child('group')", "traj.v_idx=x z = traj.res.runs.crun.z x = traj.par.x y = traj.par.y self.assertTrue(z==x*y,' z !=", "self.traj.v_name) for node in traj_group._f_walk_groups(): if 'SRVC_LEAF' in node._v_attrs: self.assertTrue('SRVC_INIT_COMMENT' in node._v_attrs, 'There", "super(SlowResult, self)._load(load_dict) class FullStorageTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' # Test tags def", "self.env.f_run(multiply) self.are_results_in_order(results) self.assertEqual(len(results), len(self.traj)) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)", "42, 43) with self.assertRaises(ValueError): self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem,(),{},5)) self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem)) self.traj.results.f_remove_child('TestResItem') self.assertTrue('TestResItem' not in self.traj)", "res in results: self.assertEqual(len(res), 2) self.assertTrue(isinstance(res[0], int)) self.assertTrue(isinstance(res[1], int)) idx = res[0] self.assertEqual(self.traj.res.runs[idx].z,", "complevel=self.complevel, complib=self.complib, shuffle=self.shuffle, pandas_append=self.pandas_append, pandas_format=self.pandas_format, encoding=self.encoding, niceness=self.niceness, use_scoop=self.use_scoop, port=self.port, add_time=self.add_time, timeout=self.timeout, graceful_exit=self.graceful_exit) traj", "test run simple_arg = -13 simple_kwarg= 13.0 results = self.env.f_run(simple_calculations,simple_arg,simple_kwarg=simple_kwarg) self.are_results_in_order(results) def test_a_large_run(self):", "0 self.env._traj.config.hdf5.overview.config_overview = 0 self.env._traj.config.hdf5.overview.explored_parameters_overview = 0 self.make_run() hdf5file = pt.open_file(self.filename) overview_group =", "= hdf5file.get_node(where='/'+ self.traj.v_name, name='overview') should = ['derived_parameters_overview', 'results_overview'] for name in should: self.assertTrue(name", "def test_a_large_run(self): get_root_logger().info('Testing large run') self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore_large(self.traj) self.make_run_large_data() self.assertTrue(self.traj.f_is_completed()) # Check", "= hdf5file.get_node(where='/',name= self.traj.v_name) for node in traj_group._f_walk_groups(): if ('/derived_parameters/' in node._v_pathname or '/results/'", "spsp.lil_matrix((111,111)) spsparse_csr[3,2+irun] = 44.5*irun matrices_csr.append(spsparse_csr.tocsr()) matrices_csc = [] for irun in range(3): spsparse_csc", "is below test not new, so ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_store_child('group') # group", "self.traj.hdf5.purge_duplicate_comments = False ###Explore self.explore(self.traj) self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,", "log_config=get_log_config()) as env: traj = env.v_trajectory traj.par.x = Parameter('x', 3, 'jj') traj.f_explore({'x': [1,2,3]})", "False ###Explore self.explore(self.traj) self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename)", "import numpy as np from pypet.trajectory import Trajectory, load_trajectory from pypet.utils.explore import cartesian_product", "1 self.use_pool=True self.log_stdout=False self.freeze_input=False self.use_scoop = False self.log_config = True self.port = None", "!= %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj in enumerate(self.traj.f_iter_runs(yields='self')): run_name =", "par_dict[param_name] if param.v_name in self.explore_dict: param.f_unlock() if param.v_explored: param._shrink() self.explore_dict={'Numpy.string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco',", "not!' % name) hdf5file.close() def test_store_form_tuple(self): self.traj.f_store() self.traj.f_add_result('TestResItem', 42, 43) with self.assertRaises(ValueError): self.traj.f_store_item((pypetconstants.LEAF,", "with self.assertRaises(pex.DataNotInStorageError): self.traj.parameters.f_load_child('new.test.group', load_data=pypetconstants.LOAD_SKELETON) def test_switch_on_all_comments(self): self.explore(self.traj) self.traj.hdf5.purge_duplicate_comments=0 self.make_run() hdf5file = pt.open_file(self.filename) traj_group", "log_config=get_log_config() if self.log_config else None, multiproc=self.multiproc, wrap_mode=self.mode, ncores=self.ncores, use_pool=self.use_pool, use_scoop=self.use_scoop, port=self.port, freeze_input=self.freeze_input, graceful_exit=self.graceful_exit)", "(str(newtraj.crun.z),str(traj.x),str(traj.y))) traj = self.traj self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY)", "ncomments.values())) def my_run_func(idx): return 'hello_%d' % idx def my_set_func(idx): return 'huhu_%d' % idx", "x*y: %s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj in enumerate(self.traj.f_iter_runs(yields='self')):", "== len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) self.expand(self.traj) self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj) ==", "tearDown(self): self.env.f_disable_logging() super(EnvironmentTest, self).tearDown() def setUp(self): self.set_mode() self.logfolder = make_temp_dir(os.path.join('experiments', 'tests', 'Log')) random.seed()", "> 6MB' % str(size_in_mb)) self.compare_trajectories(mp_traj, self.traj) self.multiproc = old_multiproc def test_errors(self): tmp =", "filename=self.filename) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' %", "self.graceful_exit = True def tearDown(self): self.env.f_disable_logging() super(ResultSortTest, self).tearDown() def setUp(self): self.set_mode() self.filename =", "Environment from pypet.storageservice import HDF5StorageService from pypet import pypetconstants, Result, manual_run import pypet.pypetexceptions", "only_empties=True) self.check_if_z_is_correct_map(traj, args1, args2, args3) for res in results: self.assertEqual(len(res), 2) self.assertTrue(isinstance(res[0], int))", "in range(len(self.traj))] args3=list(range(len(self.traj))) results = self.env.f_run_map(multiply_args, args1, arg2=args2, arg3=args3) self.assertEqual(len(results), len(self.traj)) traj =", "= None self.port = None self.timeout = None self.add_time=True self.graceful_exit = False def", "log_stdout=self.log_stdout, log_config=get_log_config() if self.log_config else None, multiproc=self.multiproc, wrap_mode=self.mode, ncores=self.ncores, use_pool=self.use_pool, use_scoop=self.use_scoop, port=self.port, freeze_input=self.freeze_input,", "self.encoding='latin1' self.wildcard_functions = {('$', 'crun') : my_run_func, ('$set', 'crunset'): my_set_func} class ResultSortTest(TrajectoryComparator): tags", "== 42) # x=traj.results.runs[irun].kkk # self.assertTrue(x==42+irun) # # def test_not_deep_copy_data(self): # self.filename =", "log_config=get_log_config()) self.assertFalse(os.path.exists(self.filename)) env2.f_disable_logging() env3.f_disable_logging() def test_time_display_of_loading(self): filename = make_temp_dir('sloooow.hdf5') env = Environment(trajectory='traj', add_time=True,", "z != x*y: %s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj", "args1, args2, args3): traj.v_idx=x self.assertTrue(traj.crun.z==traj.x*traj.y+arg1+arg2+arg3,' z != x*y: %s != %s * %s'", "test_run_complex(self): self.traj.f_add_parameter('TEST', 'test_run_complex') ###Explore self.explore_complex_params(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)", "test_if_results_are_sorted_correctly_manual_runs(self): ###Explore self.explore(self.traj) self.traj.f_store(only_init=True) man_multiply = manual_run()(multiply_with_storing) for idx in self.traj.f_iter_runs(yields='idx'): self.assertTrue(isinstance(idx, int))", "name='overview') should = ['derived_parameters_overview', 'results_overview'] for name in should: self.assertTrue(name in overview_group, '%s", "for irun, val in enumerate(list_that_should_not_change): # self.assertTrue(list_that_should_not_change[irun] == 42) # x=traj.results.runs[irun].kkk # self.assertTrue(x==42+irun)", "self.make_run() self.expand() get_root_logger().info('\\n $$$$$$$$$$$$$$$$$ Second Run $$$$$$$$$$$$$$$$$$$$$$$$') self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(),", "arg1, arg2, arg3 in zip(range(len(traj)), args1, args2, args3): traj.v_idx=x self.assertTrue(traj.crun.z==traj.x*traj.y+arg1+arg2+arg3,' z != x*y:", "### Make a new single core run self.setUp() self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments = False", "repr work get_root_logger().info(str(self.env)) get_root_logger().info(repr(self.env)) newtraj = Trajectory() newtraj.f_load(name=self.traj.v_name, as_new=False, load_data=2, filename=self.filename) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(),", "log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.expand(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj =", "newtraj def test_expand(self): ###Explore self.traj.f_add_parameter('TEST', 'test_expand') self.explore(self.traj) self.make_run() self.expand() get_root_logger().info('\\n $$$$$$$$$$$$$$$$$ Second Run", "node._v_attrs, 'There is no comment in node %s!' % node._v_name) hdf5file.close() def test_purge_duplicate_comments(self):", "2) self.assertTrue(isinstance(res[0], int)) self.assertTrue(isinstance(res[1], int)) idx = res[0] self.assertEqual(self.traj.res.runs[idx].z, res[1]) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)", "'LOCK' self.multiproc = False self.gc_interval = None self.ncores = 1 self.use_pool=True self.use_scoop=False self.freeze_input=False", "self.assertTrue(nchildren > 1) env3 = Environment(filename=self.filename, overwrite_file=True, log_config=get_log_config()) self.assertFalse(os.path.exists(self.filename)) env2.f_disable_logging() env3.f_disable_logging() def test_time_display_of_loading(self):", "self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def check_if_z_is_correct_map(self,traj, args1, args2, args3): for x, arg1, arg2,", "idx = res[0] self.assertEqual(self.traj.res.runs[idx].z, res[1]) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def", "below test not new, so ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_remove_child('group') # group is", "False self.ncores = 1 self.use_pool=True self.pandas_format='table' self.pandas_append=False self.complib = 'lzo' self.complevel=2 self.shuffle=False self.fletcher32", "= self.env.v_trajectory self.traj.f_load(name=traj_name) self.traj.res.f_remove() self.traj.dpar.f_remove() self.expand() get_root_logger().info('\\n $$$$$$$$$$$$ Second Run $$$$$$$$$$ \\n') self.make_run()", "self.env self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.expand(self.traj) results =", "'LOCK' self.multiproc = False self.ncores = 1 self.use_pool=True self.pandas_format='table' self.pandas_append=False self.complib = 'lzo'", "for idx, traj in enumerate(self.traj.f_iter_runs(yields='self')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is self.traj) newtraj.v_crun=run_name self.traj.v_idx", "= Environment(trajectory='traj', add_time=True, filename=filename, log_stdout=False, log_config=get_log_config(), dynamic_imports=SlowResult, display_time=0.1) traj = env.v_traj res=traj.f_add_result(SlowResult, 'iii',", "'integration', 'hdf5', 'environment', 'quick' def test_make_default_file_when_giving_directory_without_slash(self): filename = make_temp_dir('test.hdf5') head, tail = os.path.split(filename)", "self.multiproc = False self.ncores = 1 self.use_pool=True self.pandas_format='table' self.pandas_append=True self.complib = 'blosc' self.complevel=2", "self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=self.traj.v_name,", "traj): self.explored ={'Normal.trial': [0,1]} traj.f_explore(cartesian_product(self.explored)) def tearDown(self): self.env.f_disable_logging() super(EnvironmentTest, self).tearDown() def setUp(self): self.set_mode()", "self.traj.hdf5.purge_duplicate_comments=1 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=1 self.make_run() hdf5file = pt.open_file(self.filename, mode='a') ncomments = {} try: traj_group", "= self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj)", "in traj_group._f_walk_groups(): if 'SRVC_LEAF' in node._v_attrs: self.assertTrue('SRVC_INIT_COMMENT' in node._v_attrs, 'There is no comment", "not in nameset) self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.res.runs.crun.z==newtraj.par.x*newtraj.par.y,' z", "Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_parameters=2, load_derived_parameters=2, load_results=2, load_other_data=2) return newtraj def test_expand(self):", "= 44.5*irun matrices_csc.append(spsparse_csc.tocsc()) matrices_bsr = [] for irun in range(3): spsparse_bsr = spsp.lil_matrix((111,111))", "EnvironmentTest.set_mode(self) self.mode = 'LOCK' self.multiproc = False self.ncores = 1 self.use_pool=True self.pandas_format='table' self.pandas_append=True", "in node._v_attrs, 'There is no comment in node %s!' % node._v_name) hdf5file.close() def", "def tearDown(self): self.env.f_disable_logging() super(ResultSortTest, self).tearDown() def setUp(self): self.set_mode() self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5','sort_tests.hdf5')) self.trajname =", "False self.gc_interval = None self.ncores = 1 self.use_pool=True self.use_scoop=False self.freeze_input=False self.pandas_format='fixed' self.pandas_append=False self.complib", "= self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)", "my_set_func} class ResultSortTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' def set_mode(self): self.mode = 'LOCK'", ":[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.explored['csr_mat'][0][1,2]=44.0 self.explored['csr_mat'][1][2,2]=33 self.explored['csr_mat'][0] = self.explored['csr_mat'][0].tocsr() self.explored['csr_mat'][1] = self.explored['csr_mat'][0].tocsr() traj.f_explore(cartesian_product(self.explored)) def explore_large(self,", "name) hdf5file.close() self.traj.f_load(load_parameters=2, load_derived_parameters=2, load_results=2) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name) self.compare_trajectories(newtraj,self.traj) def test_switch_off_all_tables(self): ###Explore self.traj.f_add_parameter('TEST',", "set_mode(self): self.mode = 'LOCK' self.multiproc = False self.ncores = 1 self.use_pool=True self.log_stdout=False self.freeze_input=False", "newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def expand(self): self.expanded ={'Normal.trial': [1],", "args3): for x, arg1, arg2, arg3 in zip(range(len(traj)), args1, args2, args3): traj.v_idx=x self.assertTrue(traj.crun.z==traj.x*traj.y+arg1+arg2+arg3,'", "self.traj.v_idx = idx newtraj.v_idx = idx nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,))))", "as mainf: full_text = mainf.read() self.assertTrue('nodes/s)' in full_text) env.f_disable_logging() def make_run_large_data(self): self.env.f_run(add_large_data) def", "self.explore(self.traj) args1=[10*x for x in range(len(self.traj))] args2=[100*x for x in range(len(self.traj))] args3=list(range(len(self.traj))) results", "test_make_default_file_when_giving_directory_without_slash(self): filename = make_temp_dir('test.hdf5') head, tail = os.path.split(filename) env = Environment(filename=head) the_file_name =", "< 6.0, 'Size is %sMB > 6MB' % str(size_in_mb)) mp_traj = self.traj old_multiproc", "old_multiproc def test_errors(self): tmp = make_temp_dir('cont') if dill is not None: env1 =", "'csr_mat' : matrices_csr, 'csc_mat' : matrices_csc, 'bsr_mat' : matrices_bsr, 'dia_mat' : matrices_dia, 'list'", "newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size", "file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config() if self.log_config else None, multiproc=self.multiproc, wrap_mode=self.mode, ncores=self.ncores, use_pool=self.use_pool, use_scoop=self.use_scoop, port=self.port,", "def test_make_default_file_when_giving_directory_without_slash(self): filename = make_temp_dir('test.hdf5') head, tail = os.path.split(filename) env = Environment(filename=head) the_file_name", "in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s *", "make_temp_dir, make_trajectory_name,\\ get_root_logger, parse_args, get_log_config, get_log_path from pypet.tests.testutils.data import create_param_dict, add_params, multiply,\\ simple_calculations,", "self.assertTrue(traj.crun.z==traj.x*traj.y+arg1+arg2+arg3,' z != x*y: %s != %s * %s' % (str(traj.crun.z),str(traj.x),str(traj.y))) traj.v_idx=-1 def", "def test_two_runs(self): self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments = False ###Explore self.explore(self.traj) self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)", "check_if_z_is_correct(self,traj): traj.v_shortcuts=False for x in range(len(traj)): traj.v_idx=x z = traj.res.runs.crun.z x = traj.par.x", "def test_run(self): self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(),", "= make_temp_dir('sloooow.hdf5') env = Environment(trajectory='traj', add_time=True, filename=filename, log_stdout=False, log_config=get_log_config(), dynamic_imports=SlowResult, display_time=0.1) traj =", "mainf: full_text = mainf.read() self.assertTrue('nodes/s)' in full_text) env.f_disable_logging() def make_run_large_data(self): self.env.f_run(add_large_data) def make_run(self):", "run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is not self.traj) newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx =", "self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly_using_map(self):", "###Explore self.traj.f_add_parameter('TEST', 'test_switch_off_ALL_tables') self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview = 0 self.env._traj.config.hdf5.overview.derived_parameters_overview = 0 self.env._traj.config.hdf5.overview.derived_parameters_summary = 0", "len(list(list(self.explore_dict.values())[0]))) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) traj_name = self.env.v_trajectory.v_name del self.env self.env = Environment(trajectory=self.traj,", "should: self.assertTrue(name in overview_group, '%s not in overviews but it should!' % name)", "self.logfolder = make_temp_dir('experiments/tests/Log') # self.trajname = make_trajectory_name(self) # # env = Environment(trajectory=self.trajname,filename=self.filename, #", "self.assertTrue(self.traj.f_is_completed()) def test_file_overwriting(self): self.traj.f_store() with pt.open_file(self.filename, mode='r') as file: nchildren = len(file.root._v_children) self.assertTrue(nchildren", "newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) self.assertTrue(len(newtraj) == 1) size=os.path.getsize(self.filename) size_in_mb", "pypetconstants, Result, manual_run import pypet.pypetexceptions as pex import sys import unittest try: import", "self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_graceful_exit(self): ###Explore self.explore_cartesian(self.traj) results = self.env.f_run(multiply_with_graceful_exit) self.are_results_in_order(results) self.assertFalse(self.traj.f_is_completed()) def", "# Test tags def test_full_store(self): filename = make_temp_dir('full_store.hdf5') with Environment(filename=filename, log_config=get_log_config()) as env:", "spsparse_csr = spsp.lil_matrix((111,111)) spsparse_csr[3,2+irun] = 44.5*irun matrices_csr.append(spsparse_csr.tocsr()) matrices_csc = [] for irun in", "= self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def test_expand_after_reload(self): self.traj.f_add_parameter('TEST', 'test_expand_after_reload') ###Explore self.explore(self.traj)", "# traj.v_auto_load=True # # for irun, val in enumerate(list_that_should_not_change): # self.assertTrue(list_that_should_not_change[irun] == 42)", "env = Environment(trajectory=self.trajname,filename=self.filename, # file_title=self.trajname, log_folder=self.logfolder, # log_stdout=False, # multiproc=False, # deep_copy_data=False) #", "!= x*y: %s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) traj = self.traj self.assertTrue(traj.v_idx", "list_that_changes[traj.v_idx] + traj.v_idx) # list_that_changes[traj.v_idx] = 1000 # class DeepCopyTest(TrajectoryComparator): # # def", "'lzo' self.complevel=2 self.shuffle=False self.fletcher32 = True self.encoding='latin1' self.wildcard_functions = {('$', 'crun') : my_run_func,", "ncomments.values())) def test_NOT_purge_duplicate_comments(self): self.explore(self.traj) self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=0 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=0 self.make_run() hdf5file = pt.open_file(self.filename, mode='a')", "'crun') : my_run_func, ('$set', 'crunset'): my_set_func} class ResultSortTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment'", "Create some parameters self.param_dict={} create_param_dict(self.param_dict) ### Add some parameter: add_params(traj,self.param_dict) #remember the trajectory", "pypet.utils.explore import cartesian_product from pypet.environment import Environment from pypet.storageservice import HDF5StorageService from pypet", "self.are_results_in_order(results) def test_a_large_run(self): get_root_logger().info('Testing large run') self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore_large(self.traj) self.make_run_large_data() self.assertTrue(self.traj.f_is_completed()) #", "pypet.trajectory import Trajectory, load_trajectory from pypet.utils.explore import cartesian_product from pypet.environment import Environment from", "self.assertRaises(ValueError): Environment(use_scoop=True, immediate_postproc=True) with self.assertRaises(ValueError): Environment(use_pool=True, immediate_postproc=True) with self.assertRaises(ValueError): Environment(continuable=True, wrap_mode='QUEUE', continue_folder=tmp) with", "%sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 2.0, 'Size is %sMB > 6MB' % str(size_in_mb))", "is None, 'Niceness not supported under non Unix.') def test_niceness(self): ###Explore self.explore(self.traj) self.env.f_run(with_niceness)", "in enumerate(self.traj.f_iter_runs(yields='self')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is self.traj) newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx", "if the values are still the same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index,", "= len(file.root._v_children) self.assertTrue(nchildren > 1) env3 = Environment(filename=self.filename, overwrite_file=True, log_config=get_log_config()) self.assertFalse(os.path.exists(self.filename)) env2.f_disable_logging() env3.f_disable_logging()", "self.compare_trajectories(self.traj, newtraj) self.assertTrue(len(newtraj) == 1) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' %", "[] for irun in range(3): spsparse_bsr = spsp.lil_matrix((111,111)) spsparse_bsr[3,2+irun] = 44.5*irun matrices_bsr.append(spsparse_bsr.tocsr().tobsr()) matrices_dia", "spsparse_dia = spsp.lil_matrix((111,111)) spsparse_dia[3,2+irun] = 44.5*irun matrices_dia.append(spsparse_dia.tocsc().todia()) self.explore_dict={'string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']),", "42, 42] # # env.f_run(test_runfunc, list_that_should_not_change) # # traj.v_auto_load=True # # for irun,", "continue_folder=tmp) with self.assertRaises(ValueError): Environment(port='www.nosi.de', wrap_mode='LOCK') def test_run(self): self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed())", "self.compare_trajectories(self.traj,newtraj) def test_graceful_exit(self): ###Explore self.explore_cartesian(self.traj) results = self.env.f_run(multiply_with_graceful_exit) self.are_results_in_order(results) self.assertFalse(self.traj.f_is_completed()) def test_f_iter_runs(self): ###Explore", "# x=traj.results.runs[irun].kkk # self.assertTrue(x==42+irun) # # def test_not_deep_copy_data(self): # self.filename = make_temp_dir('experiments/tests/HDF5/testcoyp2.hdf5') #", "osnice: if traj.use_scoop: import scoop if (not scoop.IS_RUNNING or scoop.IS_ORIGIN): return raise RuntimeError('traj", "the table, no the full name self.assertTrue(not name in overview_group, '%s in overviews", "in self.traj) self.traj.results.f_load_child('TestResItem', load_data=pypetconstants.LOAD_SKELETON) self.traj.f_load_item((pypetconstants.LEAF,self.traj.TestResItem,(),{'load_only': 'TestResItem'})) self.assertTrue(self.traj.TestResItem, 42) def test_store_single_group(self): self.traj.f_store() self.traj.f_add_parameter_group('new.test.group').v_annotations.f_set(42) self.traj.f_store_item('new.group')", "import unittest try: import psutil except ImportError: psutil = None try: import dill", "self.shuffle=False self.fletcher32 = True self.encoding='latin1' self.wildcard_functions = {('$', 'crun') : my_run_func, ('$set', 'crunset'):", "= 1 self.traj.overview.results_summary = 0 self.make_run() self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=1 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=1 self.make_run() hdf5file =", "only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_expand_after_reload(self): ###Explore self.explore(self.traj)", "'Size is %sMB > 6MB' % str(size_in_mb)) with self.assertRaises(TypeError): self.explore(self.traj) def test_run_complex(self): self.traj.f_add_parameter('TEST',", "44.5*irun matrices_bsr.append(spsparse_bsr.tocsr().tobsr()) matrices_dia = [] for irun in range(3): spsparse_dia = spsp.lil_matrix((111,111)) spsparse_dia[3,2+irun]", "def explore_large(self, traj): self.explored ={'Normal.trial': [0,1]} traj.f_explore(cartesian_product(self.explored)) def tearDown(self): self.env.f_disable_logging() super(EnvironmentTest, self).tearDown() def", "self.wildcard_functions = None self.niceness = None self.port = None self.timeout = None self.add_time=True", "with self.assertRaises(ValueError): Environment(multiproc=True, use_pool=False, freeze_input=True, filename=self.filename, log_config=None) env3 = Environment(log_config=None, filename=self.filename) with self.assertRaises(ValueError):", "Environment(filename=head) the_file_name = env.v_traj.v_name + '.hdf5' head, tail = os.path.split(env.v_traj.v_storage_service.filename) self.assertEqual(tail, the_file_name) class", "Environment(multiproc=True, use_pool=False, freeze_input=True, filename=self.filename, log_config=None) env3 = Environment(log_config=None, filename=self.filename) with self.assertRaises(ValueError): env3.f_run_map(multiply_args) with", "traj.f_shrink(force=True) par_dict = traj.parameters.f_to_dict() for param_name in par_dict: param = par_dict[param_name] if param.v_name", "test_expand(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) get_root_logger().info(results) traj = self.traj self.assertEqual(len(traj), len(list(list(self.explore_dict.values())[0])))", "in par_dict: param = par_dict[param_name] if param.v_name in self.explore_dict: param.f_unlock() if param.v_explored: param._shrink()", "group is below test not new, so ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_remove_child('group') #", "= self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.assertEqual(len(traj), 5) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly(self): ###Explore self.explore(self.traj) results", "!= %s * %s' % (str(traj.crun.z),str(traj.x),str(traj.y))) traj.v_idx=-1 def check_if_z_is_correct(self,traj): traj.v_shortcuts=False for x in", "in self.traj) self.traj.new.f_load_child('test', recursive=True, load_data=pypetconstants.LOAD_SKELETON) self.assertTrue(self.traj.new.group.v_annotations.annotation, 42) self.traj.f_delete_item('new.test.group') with self.assertRaises(pex.DataNotInStorageError): self.traj.parameters.f_load_child('new.test.group', load_data=pypetconstants.LOAD_SKELETON) def", "traj_group = hdf5file.get_node(where='/', name= self.traj.v_name) for node in traj_group._f_walk_groups(): if 'SRVC_LEAF' in node._v_attrs:", "== len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) for", "len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) self.expand(self.traj) self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+\\", "args1=[10*x for x in range(len(self.traj))] args2=[100*x for x in range(len(self.traj))] args3=list(range(len(self.traj))) results =", "traj.v_shortcuts=True # def test_runfunc(traj, list_that_changes): # traj.f_add_result('kkk', list_that_changes[traj.v_idx] + traj.v_idx) # list_that_changes[traj.v_idx] =", "traj self.env = env @unittest.skipIf(not hasattr(os, 'nice') and psutil is None, 'Niceness not", "node._v_attrs['SRVC_INIT_COMMENT'] if comment not in ncomments: ncomments[comment] = 0 ncomments[comment] += 1 finally:", "= os.path.split(env.v_traj.v_storage_service.filename) self.assertEqual(tail, the_file_name) class EnvironmentTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' def set_mode(self):", "= pt.open_file(self.filename) overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview') should_not = HDF5StorageService.NAME_TABLE_MAPPING.keys() for name in", "'TestResItem'})) self.assertTrue(self.traj.TestResItem, 42) def test_store_single_group(self): self.traj.f_store() self.traj.f_add_parameter_group('new.test.group').v_annotations.f_set(42) self.traj.f_store_item('new.group') # group is below test", "return 'huhu_%d' % idx class TestOtherHDF5Settings(EnvironmentTest): tags = 'integration', 'hdf5', 'environment', 'hdf5_settings' def", "self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) self.expand(self.traj) self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+\\ len(list(self.explore_dict.values())[0]))", "self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s' %", "self.assertTrue(size_in_mb < 6.0, 'Size is %sMB > 6MB' % str(size_in_mb)) self.compare_trajectories(mp_traj, self.traj) self.multiproc", "< 2.0, 'Size is %sMB > 6MB' % str(size_in_mb)) with self.assertRaises(TypeError): self.explore(self.traj) def", "self.traj.hdf5.purge_duplicate_comments = 1 self.traj.overview.results_summary = 0 self.make_run() self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=1 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=1 self.make_run() hdf5file", "= spsp.lil_matrix((111,111)) spsparse_dia[3,2+irun] = 44.5*irun matrices_dia.append(spsparse_dia.tocsc().todia()) self.explore_dict={'string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho',", "False self.encoding = 'utf8' self.log_stdout=False self.wildcard_functions = None self.niceness = None self.port =", "matrices_dia.append(spsparse_dia.tocsc().todia()) self.explore_dict={'string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'int':[1,2,3], 'csr_mat' :", "len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) for idx,", "self.assertTrue(len(traj), 5) self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton()", "non Unix.') def test_niceness(self): ###Explore self.explore(self.traj) self.env.f_run(with_niceness) self.assertTrue(self.traj.f_is_completed()) def test_file_overwriting(self): self.traj.f_store() with pt.open_file(self.filename,", "= False self.log_config = True self.port = None self.graceful_exit = True def tearDown(self):", "self.assertEqual(len(res), 2) self.assertTrue(isinstance(res[0], int)) self.assertTrue(isinstance(res[1], int)) idx = res[0] self.assertEqual(self.traj.res.runs[idx].z, res[1]) newtraj =", "# list_that_should_not_change = [42, 42, 42, 42] # # env.f_run(test_runfunc, list_that_should_not_change) # #", "self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) self.assertEqual(len(results), len(self.traj)) traj =", "self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name", "env: traj = env.v_trajectory traj.par.x = Parameter('x', 3, 'jj') traj.f_explore({'x': [1,2,3]}) env.f_run(add_one_particular_item, True)", ":[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.expanded['csr_mat'][0][1,2]=44.0 self.expanded['csr_mat'][1][2,2]=33 self.expanded['csr_mat'][0]=self.expanded['csr_mat'][0].tocsr() self.expanded['csr_mat'][1]=self.expanded['csr_mat'][1].tocsr() self.traj.f_expand(cartesian_product(self.expanded)) self.traj.f_store() ################## Overview TESTS ############################# def", "is %sMB > 6MB' % str(size_in_mb)) mp_traj = self.traj old_multiproc = self.multiproc self.multiproc", "self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+\\ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(),", "self.are_results_in_order(results) self.assertFalse(self.traj.f_is_completed()) def test_f_iter_runs(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj", "not new, so ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_store_child('group') # group has children and", "filename=filename, log_stdout=False, log_config=get_log_config(), dynamic_imports=SlowResult, display_time=0.1) traj = env.v_traj res=traj.f_add_result(SlowResult, 'iii', 42, 43, comment='llk')", "self.trajname)) env = Environment(trajectory=self.trajname, filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config(), results_per_run=5, wildcard_functions=self.wildcard_functions, derived_parameters_per_run=5, multiproc=self.multiproc, ncores=self.ncores,", "= os.path.split(filename) env = Environment(filename=head) the_file_name = env.v_traj.v_name + '.hdf5' head, tail =", "'test_expand') self.explore(self.traj) self.make_run() self.expand() get_root_logger().info('\\n $$$$$$$$$$$$$$$$$ Second Run $$$$$$$$$$$$$$$$$$$$$$$$') self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)", "in zip(range(len(traj)), args1, args2, args3): traj.v_idx=x self.assertTrue(traj.crun.z==traj.x*traj.y+arg1+arg2+arg3,' z != x*y: %s != %s", "30.0, 'Size is %sMB > 30MB' % str(size_in_mb)) def test_two_runs(self): self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments", "Environment(filename=self.filename, overwrite_file=True, log_config=get_log_config()) self.assertFalse(os.path.exists(self.filename)) env2.f_disable_logging() env3.f_disable_logging() def test_time_display_of_loading(self): filename = make_temp_dir('sloooow.hdf5') env =", "= res[0] self.assertEqual(self.traj.res.runs[idx].z, res[1]) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.assertEqual(len(traj), 5) self.compare_trajectories(self.traj,newtraj)", "newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly_using_map(self): ###Explore self.explore(self.traj) args1=[10*x for", "is below test not new, so ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_remove_child('group') # group", "x*y: %s != %s * %s' % (str(z),str(x),str(y))) traj.v_idx=-1 traj.v_shortcuts=True # def test_runfunc(traj,", "= False self.encoding = 'utf8' self.log_stdout=False self.wildcard_functions = None self.niceness = None self.port", "self.assertEqual(len(results), len(self.traj)) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) for", "self.assertTrue(newtraj.v_idx == idx) def test_expand(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) get_root_logger().info(results) traj", "* %s' % (str(newtraj.crun.z),str(newtraj.x),str(newtraj.y))) traj = self.traj self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun is None)", "traj.v_shortcuts=False for x in range(len(traj)): traj.v_idx=x z = traj.res.runs.crun.z x = traj.par.x y", "only_empties=True) self.check_if_z_is_correct(traj) for res in results: self.assertEqual(len(res), 2) self.assertTrue(isinstance(res[0], int)) self.assertTrue(isinstance(res[1], int)) idx", "os.path.split(env.v_traj.v_storage_service.filename) self.assertEqual(tail, the_file_name) class EnvironmentTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' def set_mode(self): self.mode", "'csc_mat' : matrices_csc, 'bsr_mat' : matrices_bsr, 'dia_mat' : matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} with", "self.check_if_z_is_correct(traj) newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=self.traj.v_name, index=None, as_new=False, load_data=0) newtraj.v_auto_load = True newtraj.par.f_load_child('y',", "mode='r') as file: nchildren = len(file.root._v_children) self.assertTrue(nchildren > 1) env3 = Environment(filename=self.filename, overwrite_file=True,", "1) # traj.f_explore({'dummy':[12, 3, 3, 4]}) # # list_that_should_change = [42, 42, 42,", "setUp(self): self.set_mode() self.logfolder = make_temp_dir(os.path.join('experiments', 'tests', 'Log')) random.seed() self.trajname = make_trajectory_name(self) self.filename =", "multiply_args, multiply_with_storing, \\ multiply_with_graceful_exit def add_one_particular_item(traj, store_full): traj.hi = Result('hi', 42, 'hi!') traj.f_store()", "ImportError: psutil = None try: import dill except ImportError: dill = None import", "port=self.port, freeze_input=self.freeze_input, graceful_exit=self.graceful_exit) traj = env.v_trajectory traj.v_standard_parameter=Parameter traj.f_add_parameter('x',99) traj.f_add_parameter('y',99) self.env=env self.traj=traj def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False,", "in range(3): spsparse_dia = spsp.lil_matrix((111,111)) spsparse_dia[3,2+irun] = 44.5*irun matrices_dia.append(spsparse_dia.tocsc().todia()) self.explore_dict={'string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco',", "import create_param_dict, add_params, multiply,\\ simple_calculations, TrajectoryComparator, multiply_args, multiply_with_storing, \\ multiply_with_graceful_exit def add_one_particular_item(traj, store_full):", "import platform import logging import time import numpy as np from pypet.trajectory import", "self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def check_if_z_is_correct_map(self,traj, args1, args2, args3): for x, arg1,", "'environment', 'hdf5_settings' def set_mode(self): EnvironmentTest.set_mode(self) self.mode = 'LOCK' self.multiproc = False self.ncores =", "self.expanded ={'Normal.trial': [1], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.expanded['csr_mat'][0][1,2]=44.0 self.expanded['csr_mat'][1][2,2]=33 self.expanded['csr_mat'][0]=self.expanded['csr_mat'][0].tocsr() self.expanded['csr_mat'][1]=self.expanded['csr_mat'][1].tocsr()", "42) self.traj.f_delete_item('new.test.group') with self.assertRaises(pex.DataNotInStorageError): self.traj.parameters.f_load_child('new.test.group', load_data=pypetconstants.LOAD_SKELETON) def test_switch_on_all_comments(self): self.explore(self.traj) self.traj.hdf5.purge_duplicate_comments=0 self.make_run() hdf5file =", "len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct_map(traj, args1, args2, args3) for res in results: self.assertEqual(len(res),", "self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem,(),{},5)) self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem)) self.traj.results.f_remove_child('TestResItem') self.assertTrue('TestResItem' not in self.traj) self.traj.results.f_load_child('TestResItem', load_data=pypetconstants.LOAD_SKELETON) self.traj.f_load_item((pypetconstants.LEAF,self.traj.TestResItem,(),{'load_only': 'TestResItem'}))", "size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 2.0, 'Size is", "= old_multiproc def test_errors(self): tmp = make_temp_dir('cont') if dill is not None: env1", "ImportError: dill = None import scipy.sparse as spsp import random from pypet import", "filename=filename) self.assertTrue('hi' in traj) def with_niceness(traj): if traj.multiproc: if hasattr(os, 'nice'): trajnice =", "fletcher32=self.fletcher32, complevel=self.complevel, complib=self.complib, shuffle=self.shuffle, pandas_append=self.pandas_append, pandas_format=self.pandas_format, encoding=self.encoding, niceness=self.niceness, use_scoop=self.use_scoop, port=self.port, add_time=self.add_time, timeout=self.timeout, graceful_exit=self.graceful_exit)", "== len(list(self.expand_dict.values())[0])+ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)", "= spsp.lil_matrix((111,111)) spsparse_csc[3,2+irun] = 44.5*irun matrices_csc.append(spsparse_csc.tocsc()) matrices_bsr = [] for irun in range(3):", "test_two_runs(self): self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments = False ###Explore self.explore(self.traj) self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton()", "if self.log_config else None, multiproc=self.multiproc, wrap_mode=self.mode, ncores=self.ncores, use_pool=self.use_pool, use_scoop=self.use_scoop, port=self.port, freeze_input=self.freeze_input, graceful_exit=self.graceful_exit) traj", "10) traj.f_add_result('l4rge', np_array) traj.f_store_item('l4rge') traj.f_remove_item('l4rge') array_list = [] for irun in range(111): array_list.append(np.random.rand(10))", "only the name of the table, no the full name self.assertTrue(not name in", "get_log_path(traj) mainfilename = os.path.join(path, 'LOG.txt') with open(mainfilename, mode='r') as mainf: full_text = mainf.read()", "test_purge_duplicate_comments(self): self.explore(self.traj) with self.assertRaises(RuntimeError): self.traj.hdf5.purge_duplicate_comments = 1 self.traj.overview.results_summary = 0 self.make_run() self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=1", "tags = 'integration', 'hdf5', 'environment', 'hdf5_settings' def set_mode(self): EnvironmentTest.set_mode(self) self.mode = 'LOCK' self.multiproc", "run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset = set((x.v_name", "self.explore_cartesian(self.traj) results = self.env.f_run(multiply_with_graceful_exit) self.are_results_in_order(results) self.assertFalse(self.traj.f_is_completed()) def test_f_iter_runs(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply)", "arg2, arg3 in zip(range(len(traj)), args1, args2, args3): traj.v_idx=x self.assertTrue(traj.crun.z==traj.x*traj.y+arg1+arg2+arg3,' z != x*y: %s", "traj in enumerate(self.traj.f_iter_runs(yields='self')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is self.traj) newtraj.v_crun=run_name self.traj.v_idx = idx", "traj.f_add_result('kkk', list_that_changes[traj.v_idx] + traj.v_idx) # list_that_changes[traj.v_idx] = 1000 # class DeepCopyTest(TrajectoryComparator): # #", "range(len(traj)): traj.v_idx=x z = traj.res.runs.crun.z x = traj.par.x y = traj.par.y self.assertTrue(z==x*y,' z", "traj.f_add_result('m4ny', *array_list) class SimpleEnvironmentTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment', 'quick' def test_make_default_file_when_giving_directory_without_slash(self): filename", "# # for irun, val in enumerate(list_that_should_change): # self.assertTrue(list_that_should_change[irun] == 1000) if __name__", "= Environment(trajectory=self.trajname,filename=self.filename, # file_title=self.trajname, log_folder=self.logfolder, # log_stdout=False, # multiproc=False, # deep_copy_data=False) # #", "matrices_csc.append(spsparse_csc.tocsc()) matrices_bsr = [] for irun in range(3): spsparse_bsr = spsp.lil_matrix((111,111)) spsparse_bsr[3,2+irun] =", "= 44.5*irun matrices_dia.append(spsparse_dia.tocsc().todia()) self.explore_dict={'string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'int':[1,2,3],", "'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.explored['csr_mat'][0][1,2]=44.0 self.explored['csr_mat'][1][2,2]=33 self.explored['csr_mat'][0] = self.explored['csr_mat'][0].tocsr() self.explored['csr_mat'][1] =", "a new single core run self.setUp() self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments = False ###Explore self.explore(self.traj)", "self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments = False ###Explore self.explore(self.traj) self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(),", "is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 2.0, 'Size is %sMB > 6MB' %", "= Environment(continuable=True, continue_folder=tmp, log_config=None, filename=self.filename) with self.assertRaises(ValueError): env1.f_run_map(multiply_args, [1], [2], [3]) with self.assertRaises(ValueError):", "is no comment in node %s!' % node._v_name) hdf5file.close() def test_purge_duplicate_comments(self): self.explore(self.traj) with", "13.0 results = self.env.f_run(simple_calculations,simple_arg,simple_kwarg=simple_kwarg) self.are_results_in_order(results) def test_a_large_run(self): get_root_logger().info('Testing large run') self.traj.f_add_parameter('TEST', 'test_run') ###Explore", "x*y: %s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) traj = self.traj self.assertTrue(traj.v_idx ==", "'hi!') traj.f_store() traj.f_remove_child('hi') class SlowResult(Result): def _load(self, load_dict): time.sleep(3) super(SlowResult, self)._load(load_dict) class FullStorageTest(TrajectoryComparator):", "set_mode(self): EnvironmentTest.set_mode(self) self.mode = 'LOCK' self.multiproc = False self.ncores = 1 self.use_pool=True self.pandas_format='table'", "self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) self.assertTrue(len(newtraj) == 1) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size", "def setUp(self): self.set_mode() self.logfolder = make_temp_dir(os.path.join('experiments', 'tests', 'Log')) random.seed() self.trajname = make_trajectory_name(self) self.filename", "in overview_group, '%s not in overviews but it should!' % name) hdf5file.close() self.traj.f_load(load_parameters=2,", "# Check if printing and repr work get_root_logger().info(str(self.env)) get_root_logger().info(repr(self.env)) newtraj = Trajectory() newtraj.f_load(name=self.traj.v_name,", "= True newtraj.par.f_load_child('y', load_data=1) for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name self.traj.v_idx = idx", "run_name) self.assertTrue(newtraj.res.runs.crun.z==newtraj.par.x*newtraj.par.y,' z != x*y: %s != %s * %s' % (str(newtraj.crun.z),str(newtraj.x),str(newtraj.y))) traj", "'test_switch_ON_LARGE_tables') ###Explore self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview = 1 self.env._traj.config.hdf5.overview.derived_parameters_overview = 1 self.make_run() hdf5file = pt.open_file(self.filename)", "traj.v_idx=x self.assertTrue(traj.crun.z==traj.x*traj.y+arg1+arg2+arg3,' z != x*y: %s != %s * %s' % (str(traj.crun.z),str(traj.x),str(traj.y))) traj.v_idx=-1", "'dia_mat' : matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} traj.f_explore(self.explore_dict) def explore(self, traj): self.explored ={'Normal.trial': [0],", "import psutil except ImportError: psutil = None try: import dill except ImportError: dill", "return 'hello_%d' % idx def my_set_func(idx): return 'huhu_%d' % idx class TestOtherHDF5Settings(EnvironmentTest): tags", "self.expanded['csr_mat'][0][1,2]=44.0 self.expanded['csr_mat'][1][2,2]=33 self.expanded['csr_mat'][0]=self.expanded['csr_mat'][0].tocsr() self.expanded['csr_mat'][1]=self.expanded['csr_mat'][1].tocsr() self.traj.f_expand(cartesian_product(self.expanded)) self.traj.f_store() ################## Overview TESTS ############################# def test_switch_ON_large_tables(self): self.traj.f_add_parameter('TEST',", "Parameter import tables as pt from pypet.tests.testutils.ioutils import run_suite, make_temp_dir, make_trajectory_name,\\ get_root_logger, parse_args,", "val in enumerate(list_that_should_not_change): # self.assertTrue(list_that_should_not_change[irun] == 42) # x=traj.results.runs[irun].kkk # self.assertTrue(x==42+irun) # #", "%sMB > 30MB' % str(size_in_mb)) def test_two_runs(self): self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments = False ###Explore", "spsparse_bsr = spsp.lil_matrix((111,111)) spsparse_bsr[3,2+irun] = 44.5*irun matrices_bsr.append(spsparse_bsr.tocsr().tobsr()) matrices_dia = [] for irun in", "'test_run') self.traj.hdf5.purge_duplicate_comments = False ###Explore self.explore(self.traj) self.make_run() # newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(),", "self.make_run() self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=1 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=1 self.make_run() hdf5file = pt.open_file(self.filename, mode='a') ncomments = {}", "matrices_csc, 'bsr_mat' : matrices_bsr, 'dia_mat' : matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} traj.f_explore(self.explore_dict) def explore(self,", "self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.assertEqual(len(traj), 5) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly(self): ###Explore self.explore(self.traj) results =", "42, 42, 42] # # env.f_run(test_runfunc, list_that_should_not_change) # # traj.v_auto_load=True # # for", "self.assertTrue(self.traj.f_is_completed()) # Check if printing and repr work get_root_logger().info(str(self.env)) get_root_logger().info(repr(self.env)) newtraj = Trajectory()", "Check if printing and repr work get_root_logger().info(str(self.env)) get_root_logger().info(repr(self.env)) newtraj = Trajectory() newtraj.f_load(name=self.traj.v_name, as_new=False,", "run_name) self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) for", "{} try: traj_group = hdf5file.get_node(where='/',name= self.traj.v_name) for node in traj_group._f_walk_groups(): if ('/derived_parameters/' in", "self.assertEqual(len(traj), 5) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) self.assertEqual(len(results), len(self.traj))", "self.load_trajectory(trajectory_name=self.traj.v_name) self.compare_trajectories(newtraj,self.traj) def test_switch_off_all_tables(self): ###Explore self.traj.f_add_parameter('TEST', 'test_switch_off_ALL_tables') self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview = 0 self.env._traj.config.hdf5.overview.derived_parameters_overview =", "overviews but should not!' % name) hdf5file.close() def test_store_form_tuple(self): self.traj.f_store() self.traj.f_add_result('TestResItem', 42, 43)", "% str(size_in_mb)) with self.assertRaises(TypeError): self.explore(self.traj) def test_run_complex(self): self.traj.f_add_parameter('TEST', 'test_run_complex') ###Explore self.explore_complex_params(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed())", "only_empties=True) self.compare_trajectories(self.traj, newtraj) def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False): ### Load The Trajectory and check if the", "of the table, no the full name self.assertTrue(not name in overview_group, '%s in", "def test_purge_duplicate_comments(self): self.explore(self.traj) with self.assertRaises(RuntimeError): self.traj.hdf5.purge_duplicate_comments = 1 self.traj.overview.results_summary = 0 self.make_run() self.traj.f_get('purge_duplicate_comments').f_unlock()", "self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly_using_map(self): ###Explore self.explore(self.traj) args1=[10*x for x in range(len(self.traj))] args2=[100*x", "self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)", "newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name self.traj.v_idx", "EnvironmentTest.set_mode(self) self.mode = 'LOCK' self.multiproc = False self.ncores = 1 self.use_pool=True self.pandas_format='table' self.pandas_append=False", "# env.f_run(test_runfunc, list_that_should_not_change) # # traj.v_auto_load=True # # for irun, val in enumerate(list_that_should_not_change):", "= traj.f_idx_to_run(idx) self.assertTrue(traj is not self.traj) newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx", "only_empties=True) self.compare_trajectories(self.traj, newtraj) def test_expand_after_reload(self): self.traj.f_add_parameter('TEST', 'test_expand_after_reload') ###Explore self.explore(self.traj) self.make_run() traj_name = self.traj.v_name", "= 'lzo' self.complevel=2 self.shuffle=False self.fletcher32 = True self.encoding='latin1' self.wildcard_functions = {('$', 'crun') :", "ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_store_child('group') # group has children and recursive is false", "self.env._traj.config.hdf5.overview.parameters_overview = 0 self.env._traj.config.hdf5.overview.config_overview = 0 self.env._traj.config.hdf5.overview.explored_parameters_overview = 0 self.make_run() hdf5file = pt.open_file(self.filename)", "6MB' % str(size_in_mb)) mp_traj = self.traj old_multiproc = self.multiproc self.multiproc = False ###", "only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_graceful_exit(self): ###Explore self.explore_cartesian(self.traj) results = self.env.f_run(multiply_with_graceful_exit) self.are_results_in_order(results) self.assertFalse(self.traj.f_is_completed()) def test_f_iter_runs(self):", "self.env.f_run(add_large_data) def make_run(self): ### Make a test run simple_arg = -13 simple_kwarg= 13.0", "in enumerate(list_that_should_not_change): # self.assertTrue(list_that_should_not_change[irun] == 42) # x=traj.results.runs[irun].kkk # self.assertTrue(x==42+irun) # # def", "hdf5file = pt.open_file(self.filename, mode='a') ncomments = {} try: traj_group = hdf5file.get_node(where='/',name= self.traj.v_name) for", "= self.env.v_trajectory.v_name del self.env self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name)", "= manual_run()(multiply_with_storing) for idx in self.traj.f_iter_runs(yields='idx'): self.assertTrue(isinstance(idx, int)) man_multiply(self.traj) traj = self.traj traj.f_store()", "self.traj.overview.results_summary=1 self.make_run() hdf5file = pt.open_file(self.filename, mode='a') ncomments = {} try: traj_group = hdf5file.get_node(where='/',name=", "'quick' def test_make_default_file_when_giving_directory_without_slash(self): filename = make_temp_dir('test.hdf5') head, tail = os.path.split(filename) env = Environment(filename=head)", "self.assertTrue(list_that_should_not_change[irun] == 42) # x=traj.results.runs[irun].kkk # self.assertTrue(x==42+irun) # # def test_not_deep_copy_data(self): # self.filename", "import Parameter import tables as pt from pypet.tests.testutils.ioutils import run_suite, make_temp_dir, make_trajectory_name,\\ get_root_logger,", "test_expand_after_reload(self): self.traj.f_add_parameter('TEST', 'test_expand_after_reload') ###Explore self.explore(self.traj) self.make_run() traj_name = self.traj.v_name self.env = Environment(trajectory=self.traj, log_stdout=False,", "def make_run(self): ### Make a test run simple_arg = -13 simple_kwarg= 13.0 results", "in range(111): array_list.append(np.random.rand(10)) traj.f_add_result('m4ny', *array_list) class SimpleEnvironmentTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment', 'quick'", "def test_switch_ON_large_tables(self): self.traj.f_add_parameter('TEST', 'test_switch_ON_LARGE_tables') ###Explore self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview = 1 self.env._traj.config.hdf5.overview.derived_parameters_overview = 1 self.make_run()", "self.compare_trajectories(newtraj,self.traj) def test_switch_off_all_tables(self): ###Explore self.traj.f_add_parameter('TEST', 'test_switch_off_ALL_tables') self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview = 0 self.env._traj.config.hdf5.overview.derived_parameters_overview = 0", "self.use_pool=True self.pandas_format='table' self.pandas_append=False self.complib = 'lzo' self.complevel=2 self.shuffle=False self.fletcher32 = True self.encoding='latin1' self.wildcard_functions", "# # traj.f_add_parameter('dummy', 1) # traj.f_explore({'dummy':[12, 3, 3, 4]}) # # list_that_should_change =", "with self.assertRaises(ValueError): Environment(port='www.nosi.de', wrap_mode='LOCK') def test_run(self): self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj", "freeze_input=self.freeze_input, fletcher32=self.fletcher32, complevel=self.complevel, complib=self.complib, shuffle=self.shuffle, pandas_append=self.pandas_append, pandas_format=self.pandas_format, encoding=self.encoding, niceness=self.niceness, use_scoop=self.use_scoop, port=self.port, add_time=self.add_time, timeout=self.timeout,", "= hdf5file.get_node(where='/', name= self.traj.v_name) for node in traj_group._f_walk_groups(): if 'SRVC_LEAF' in node._v_attrs: self.assertTrue('SRVC_INIT_COMMENT'", "= self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name self.traj.v_idx =", "env.v_trajectory # # traj.f_add_parameter('dummy', 1) # traj.f_explore({'dummy':[12, 3, 3, 4]}) # # list_that_should_change", "True self.encoding='latin1' self.wildcard_functions = {('$', 'crun') : my_run_func, ('$set', 'crunset'): my_set_func} class ResultSortTest(TrajectoryComparator):", "'bsr_mat' : matrices_bsr, 'dia_mat' : matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} with self.assertRaises(pex.NotUniqueNodeError): traj.f_explore(self.explore_dict) traj.f_shrink(force=True)", "1) self.assertTrue(all(x == 1 for x in ncomments.values())) def test_NOT_purge_duplicate_comments(self): self.explore(self.traj) self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=0", "if hasattr(os, 'nice'): trajnice = traj.niceness osnice = os.nice(0) else: trajnice = traj.niceness", "test not new, so ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_remove_child('group') # group is below", "args3) for res in results: self.assertEqual(len(res), 2) self.assertTrue(isinstance(res[0], int)) self.assertTrue(isinstance(res[1], int)) idx =", "import random from pypet import Parameter import tables as pt from pypet.tests.testutils.ioutils import", "* %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) traj = self.traj self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun is None)", "traj.f_explore(cartesian_product(self.explored)) def explore_large(self, traj): self.explored ={'Normal.trial': [0,1]} traj.f_explore(cartesian_product(self.explored)) def tearDown(self): self.env.f_disable_logging() super(EnvironmentTest, self).tearDown()", "env3 = Environment(log_config=None, filename=self.filename) with self.assertRaises(ValueError): env3.f_run_map(multiply_args) with self.assertRaises(ValueError): Environment(use_scoop=True, immediate_postproc=True) with self.assertRaises(ValueError):", "hdf5file.get_node(where='/', name= self.traj.v_name) for node in traj_group._f_walk_groups(): if 'SRVC_LEAF' in node._v_attrs: self.assertTrue('SRVC_INIT_COMMENT' in", "### Load The Trajectory and check if the values are still the same", "0 self.env._traj.config.hdf5.purge_duplicate_comments = 0 self.env._traj.config.hdf5.overview.parameters_overview = 0 self.env._traj.config.hdf5.overview.config_overview = 0 self.env._traj.config.hdf5.overview.explored_parameters_overview = 0", "traj.f_explore(self.explore_dict) def explore(self, traj): self.explored ={'Normal.trial': [0], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]}", "env2.f_disable_logging() env3.f_disable_logging() def test_time_display_of_loading(self): filename = make_temp_dir('sloooow.hdf5') env = Environment(trajectory='traj', add_time=True, filename=filename, log_stdout=False,", "self.explored['csr_mat'][1] = self.explored['csr_mat'][0].tocsr() traj.f_explore(cartesian_product(self.explored)) def explore_large(self, traj): self.explored ={'Normal.trial': [0,1]} traj.f_explore(cartesian_product(self.explored)) def tearDown(self):", "None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx) def test_f_iter_runs_auto_load(self): ###Explore self.explore(self.traj) results =", "# def test_not_deep_copy_data(self): # self.filename = make_temp_dir('experiments/tests/HDF5/testcoyp2.hdf5') # self.logfolder = make_temp_dir('experiments/tests/Log') # self.trajname", "= 1 self.make_run() hdf5file = pt.open_file(self.filename) overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview') should =", "self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_expand_after_reload(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj", "Make a test run simple_arg = -13 simple_kwarg= 13.0 results = self.env.f_run(simple_calculations,simple_arg,simple_kwarg=simple_kwarg) self.are_results_in_order(results)", "as_new=False, load_data=2, filename=self.filename) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is", "= 0 self.make_run() self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=1 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=1 self.make_run() hdf5file = pt.open_file(self.filename, mode='a') ncomments", "= Environment(trajectory=self.trajname,filename=self.filename, # file_title=self.trajname, log_folder=self.logfolder, # log_stdout=False, # multiproc=False, # deep_copy_data=True) # #", "env = Environment(filename=head) the_file_name = env.v_traj.v_name + '.hdf5' head, tail = os.path.split(env.v_traj.v_storage_service.filename) self.assertEqual(tail,", "= spsp.lil_matrix((111,111)) spsparse_bsr[3,2+irun] = 44.5*irun matrices_bsr.append(spsparse_bsr.tocsr().tobsr()) matrices_dia = [] for irun in range(3):", "load_derived_parameters=2, load_results=2, load_other_data=2) return newtraj def test_expand(self): ###Explore self.traj.f_add_parameter('TEST', 'test_expand') self.explore(self.traj) self.make_run() self.expand()", "self.timeout = None self.add_time=True self.graceful_exit = False def explore_complex_params(self, traj): matrices_csr = []", "newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.assertEqual(len(traj), 5) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly(self): ###Explore self.explore(self.traj)", "traj_group._f_walk_groups(): if ('/derived_parameters/' in node._v_pathname or '/results/' in node._v_pathname): if 'SRVC_LEAF' in node._v_attrs:", "newtraj.f_load(name=self.traj.v_name, as_new=False, load_data=2, filename=self.filename) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size", "trajnice != osnice: if traj.use_scoop: import scoop if (not scoop.IS_RUNNING or scoop.IS_ORIGIN): return", "results = self.env.f_run(multiply) self.are_results_in_order(results) get_root_logger().info(results) traj = self.traj self.assertEqual(len(traj), len(list(list(self.explore_dict.values())[0]))) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)", "raise RuntimeError('traj niceness != os niceness; ' '%s != %s' % (str(trajnice), str(osnice)))", "== 1 for x in ncomments.values())) def test_NOT_purge_duplicate_comments(self): self.explore(self.traj) self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=0 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=0", "self.ncores = 1 self.use_pool=True self.pandas_format='table' self.pandas_append=False self.complib = 'lzo' self.complevel=2 self.shuffle=False self.fletcher32 =", "self.env.f_disable_logging() super(ResultSortTest, self).tearDown() def setUp(self): self.set_mode() self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5','sort_tests.hdf5')) self.trajname = make_trajectory_name(self) env", "run_name) self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) traj", "# traj.f_explore({'dummy':[12, 3, 3, 4]}) # # list_that_should_change = [42, 42, 42, 42]", "'Diez'])], 'Normal.int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat' : matrices_csc, 'bsr_mat' : matrices_bsr, 'dia_mat' :", "traj.f_explore(self.explore_dict) traj.f_shrink(force=True) par_dict = traj.parameters.f_to_dict() for param_name in par_dict: param = par_dict[param_name] if", "if traj.use_scoop: import scoop if (not scoop.IS_RUNNING or scoop.IS_ORIGIN): return raise RuntimeError('traj niceness", "self.expand() get_root_logger().info('\\n $$$$$$$$$$$$ Second Run $$$$$$$$$$ \\n') self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(),", "import sys import unittest try: import psutil except ImportError: psutil = None try:", "#remember the trajectory and the environment self.traj = traj self.env = env @unittest.skipIf(not", "self.check_if_z_is_correct_map(traj, args1, args2, args3) for res in results: self.assertEqual(len(res), 2) self.assertTrue(isinstance(res[0], int)) self.assertTrue(isinstance(res[1],", "env3.f_disable_logging() def test_time_display_of_loading(self): filename = make_temp_dir('sloooow.hdf5') env = Environment(trajectory='traj', add_time=True, filename=filename, log_stdout=False, log_config=get_log_config(),", "self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) self.expand(self.traj) self.env.f_run(multiply)", "env2 = Environment(filename=self.filename, log_config=get_log_config()) traj2 = env2.v_trajectory traj2.f_store() self.assertTrue(os.path.exists(self.filename)) with pt.open_file(self.filename, mode='r') as", "for res in results: self.assertEqual(len(res), 2) self.assertTrue(isinstance(res[0], int)) self.assertTrue(isinstance(res[1], int)) idx = res[0]", "in overviews but should not!' % name) hdf5file.close() def test_store_form_tuple(self): self.traj.f_store() self.traj.f_add_result('TestResItem', 42,", "in ncomments.values())) def test_NOT_purge_duplicate_comments(self): self.explore(self.traj) self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=0 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=0 self.make_run() hdf5file = pt.open_file(self.filename,", "env.v_traj res=traj.f_add_result(SlowResult, 'iii', 42, 43, comment='llk') traj.f_store() service_logger = traj.v_storage_service._logger root = logging.getLogger('pypet')", "self.explored['csr_mat'][1][2,2]=33 self.explored['csr_mat'][0] = self.explored['csr_mat'][0].tocsr() self.explored['csr_mat'][1] = self.explored['csr_mat'][0].tocsr() traj.f_explore(cartesian_product(self.explored)) def explore_large(self, traj): self.explored ={'Normal.trial':", "x in range(len(self.traj))] args3=list(range(len(self.traj))) results = self.env.f_run_map(multiply_args, args1, arg2=args2, arg3=args3) self.assertEqual(len(results), len(self.traj)) traj", "pypet.storageservice import HDF5StorageService from pypet import pypetconstants, Result, manual_run import pypet.pypetexceptions as pex", "use_scoop=self.use_scoop, port=self.port, freeze_input=self.freeze_input, graceful_exit=self.graceful_exit) traj = env.v_trajectory traj.v_standard_parameter=Parameter traj.f_add_parameter('x',99) traj.f_add_parameter('y',99) self.env=env self.traj=traj def", "6MB' % str(size_in_mb)) self.compare_trajectories(mp_traj, self.traj) self.multiproc = old_multiproc def test_errors(self): tmp = make_temp_dir('cont')", "None self.timeout = None self.add_time=True self.graceful_exit = False def explore_complex_params(self, traj): matrices_csr =", "service_logger = traj.v_storage_service._logger root = logging.getLogger('pypet') old_level = root.level service_logger.setLevel(logging.INFO) root.setLevel(logging.INFO) traj.f_load(load_data=3) service_logger.setLevel(old_level)", "TrajectoryComparator, multiply_args, multiply_with_storing, \\ multiply_with_graceful_exit def add_one_particular_item(traj, store_full): traj.hi = Result('hi', 42, 'hi!')", "%s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj in enumerate(self.traj.f_iter_runs(yields='self')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is", "irun, val in enumerate(list_that_should_change): # self.assertTrue(list_that_should_change[irun] == 1000) if __name__ == '__main__': opt_args", "'hdf5', 'environment', 'quick' def test_make_default_file_when_giving_directory_without_slash(self): filename = make_temp_dir('test.hdf5') head, tail = os.path.split(filename) env", "and recursive is false with self.assertRaises(TypeError): self.traj.parameters.new.f_remove_child('test') self.traj.new.f_remove_child('test', recursive=True) self.assertTrue('new.group' not in self.traj)", "self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def test_expand_after_reload(self): self.traj.f_add_parameter('TEST', 'test_expand_after_reload') ###Explore self.explore(self.traj) self.make_run()", "hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(all(x == 1 for x in ncomments.values())) def test_NOT_purge_duplicate_comments(self): self.explore(self.traj)", "self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def expand(self): self.expanded ={'Normal.trial': [1], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])],", "results = self.env.f_run(multiply) self.are_results_in_order(results) self.assertEqual(len(results), len(self.traj)) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton()", "only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_expand_after_reload(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj", "self.make_run() # newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename) size_in_mb =", "42] # # env.f_run(test_runfunc, list_that_should_change) # # traj.v_auto_load=True # # for irun, val", "Run $$$$$$$$$$$$$$$$$$$$$$$$') self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def test_expand_after_reload(self):", "# newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000.", "and check if the values are still the same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename)", "in self.traj.f_iter_runs(yields='idx'): self.assertTrue(isinstance(idx, int)) man_multiply(self.traj) traj = self.traj traj.f_store() self.assertTrue(len(traj), 5) self.assertTrue(len(traj) ==", "% str(size_in_mb)) def test_two_runs(self): self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments = False ###Explore self.explore(self.traj) self.make_run() newtraj", "self.port = None self.graceful_exit = True def tearDown(self): self.env.f_disable_logging() super(ResultSortTest, self).tearDown() def setUp(self):", "args1, args2, args3): for x, arg1, arg2, arg3 in zip(range(len(traj)), args1, args2, args3):", "self.env._traj.config.hdf5.overview.results_overview = 0 self.env._traj.config.hdf5.overview.derived_parameters_overview = 0 self.env._traj.config.hdf5.overview.derived_parameters_summary = 0 self.env._traj.config.hdf5.overview.results_summary = 0 self.env._traj.config.hdf5.purge_duplicate_comments", "self.env._traj.config.hdf5.overview.derived_parameters_overview = 1 self.make_run() hdf5file = pt.open_file(self.filename) overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview') should", "nchildren = len(file.root._v_children) self.assertTrue(nchildren > 0) env2 = Environment(filename=self.filename, log_config=get_log_config()) traj2 = env2.v_trajectory", "44.5*irun matrices_dia.append(spsparse_dia.tocsc().todia()) self.explore_dict={'string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'int':[1,2,3], 'csr_mat'", "'integration', 'hdf5', 'environment' def set_mode(self): self.mode = 'LOCK' self.multiproc = False self.ncores =", "+ traj.v_idx) # list_that_changes[traj.v_idx] = 1000 # class DeepCopyTest(TrajectoryComparator): # # def test_deep_copy_data(self):", "# # traj.v_auto_load=True # # for irun, val in enumerate(list_that_should_change): # self.assertTrue(list_that_should_change[irun] ==", "for irun in range(3): spsparse_csr = spsp.lil_matrix((111,111)) spsparse_csr[3,2+irun] = 44.5*irun matrices_csr.append(spsparse_csr.tocsr()) matrices_csc =", "self).tearDown() def setUp(self): self.set_mode() self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5','sort_tests.hdf5')) self.trajname = make_trajectory_name(self) env = Environment(trajectory=self.trajname,filename=self.filename,", "import scipy.sparse as spsp import random from pypet import Parameter import tables as", "self.traj.f_load(name=traj_name) self.traj.res.f_remove() self.traj.dpar.f_remove() self.expand() get_root_logger().info('\\n $$$$$$$$$$$$ Second Run $$$$$$$$$$ \\n') self.make_run() newtraj =", "log_config=get_log_config(), results_per_run=5, wildcard_functions=self.wildcard_functions, derived_parameters_per_run=5, multiproc=self.multiproc, ncores=self.ncores, wrap_mode=self.mode, use_pool=self.use_pool, gc_interval=self.gc_interval, freeze_input=self.freeze_input, fletcher32=self.fletcher32, complevel=self.complevel, complib=self.complib,", "log_stdout=False, # multiproc=False, # deep_copy_data=False) # # traj = env.v_trajectory # # traj.f_add_parameter('dummy',", "Result('hi', 42, 'hi!') traj.f_store() traj.f_remove_child('hi') class SlowResult(Result): def _load(self, load_dict): time.sleep(3) super(SlowResult, self)._load(load_dict)", "= traj.f_idx_to_run(idx) self.assertTrue(traj is self.traj) newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset", "= np.random.rand(100, 1000, 10) traj.f_add_result('l4rge', np_array) traj.f_store_item('l4rge') traj.f_remove_item('l4rge') array_list = [] for irun", "spsparse_bsr[3,2+irun] = 44.5*irun matrices_bsr.append(spsparse_bsr.tocsr().tobsr()) matrices_dia = [] for irun in range(3): spsparse_dia =", "arg2=args2, arg3=args3) self.assertEqual(len(results), len(self.traj)) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)", "%s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) traj = self.traj self.assertTrue(traj.v_idx == -1)", "self.traj.dpar.f_remove() self.expand() get_root_logger().info('\\n $$$$$$$$$$$$ Second Run $$$$$$$$$$ \\n') self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton()", "# Get only the name of the table, no the full name self.assertTrue(not", "self.traj) self.multiproc = old_multiproc def test_errors(self): tmp = make_temp_dir('cont') if dill is not", "hdf5file.get_node(where='/'+ self.traj.v_name, name='overview') should = ['derived_parameters_overview', 'results_overview'] for name in should: self.assertTrue(name in", "(str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj in enumerate(self.traj.f_iter_runs(yields='self')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is self.traj) newtraj.v_crun=run_name", "hdf5file = pt.open_file(self.filename) overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview') should_not = HDF5StorageService.NAME_TABLE_MAPPING.keys() for name", "[42, 42, 42, 42] # # env.f_run(test_runfunc, list_that_should_change) # # traj.v_auto_load=True # #", "= env.v_trajectory traj.v_standard_parameter=Parameter traj.f_add_parameter('x',99) traj.f_add_parameter('y',99) self.env=env self.traj=traj def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False, how=2): ### Load The", "else None, multiproc=self.multiproc, wrap_mode=self.mode, ncores=self.ncores, use_pool=self.use_pool, use_scoop=self.use_scoop, port=self.port, freeze_input=self.freeze_input, graceful_exit=self.graceful_exit) traj = env.v_trajectory", "log_folder=self.logfolder, # log_stdout=False, # multiproc=False, # deep_copy_data=True) # # traj = env.v_trajectory #", "%sMB > 6MB' % str(size_in_mb)) with self.assertRaises(TypeError): self.explore(self.traj) def test_run_complex(self): self.traj.f_add_parameter('TEST', 'test_run_complex') ###Explore", "self.assertTrue(nchildren > 0) env2 = Environment(filename=self.filename, log_config=get_log_config()) traj2 = env2.v_trajectory traj2.f_store() self.assertTrue(os.path.exists(self.filename)) with", "###Explore self.explore_large(self.traj) self.make_run_large_data() self.assertTrue(self.traj.f_is_completed()) # Check if printing and repr work get_root_logger().info(str(self.env)) get_root_logger().info(repr(self.env))", "self.explore(self.traj) def test_run_complex(self): self.traj.f_add_parameter('TEST', 'test_run_complex') ###Explore self.explore_complex_params(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton()", "load_results=how) return newtraj def explore(self,traj): self.explore_dict={'x':[-1,1,2,3,4],'y':[1,1,2,2,3]} traj.f_explore(self.explore_dict) def explore_cartesian(self,traj): self.explore_dict=cartesian_product({'x':[-1,1,2,3,4, 5, 6],'y':[1,1,2,2,3,4,4]}) traj.f_explore(self.explore_dict)", "random from pypet import Parameter import tables as pt from pypet.tests.testutils.ioutils import run_suite,", "traj.f_remove_item('l4rge') array_list = [] for irun in range(111): array_list.append(np.random.rand(10)) traj.f_add_result('m4ny', *array_list) class SimpleEnvironmentTest(TrajectoryComparator):", "= self.traj.v_name self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.traj.res.f_remove() self.traj.dpar.f_remove()", "self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) for idx, run_name", "% name) hdf5file.close() def test_store_form_tuple(self): self.traj.f_store() self.traj.f_add_result('TestResItem', 42, 43) with self.assertRaises(ValueError): self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem,(),{},5))", "only_empties=True) self.compare_trajectories(self.traj,newtraj) def check_if_z_is_correct_map(self,traj, args1, args2, args3): for x, arg1, arg2, arg3 in", "str(size_in_mb)) def test_two_runs(self): self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments = False ###Explore self.explore(self.traj) self.make_run() newtraj =", "traj_group._f_walk_groups(): if 'SRVC_LEAF' in node._v_attrs: self.assertTrue('SRVC_INIT_COMMENT' in node._v_attrs, 'There is no comment in", "% (str(newtraj.crun.z),str(traj.x),str(traj.y))) traj = self.traj self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_ ==", "1) env3 = Environment(filename=self.filename, overwrite_file=True, log_config=get_log_config()) self.assertFalse(os.path.exists(self.filename)) env2.f_disable_logging() env3.f_disable_logging() def test_time_display_of_loading(self): filename =", "del self.env self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.expand(self.traj) results", "self.assertTrue(traj is not self.traj) newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset =", "'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} traj.f_explore(self.explore_dict) def explore(self, traj): self.explored ={'Normal.trial': [0], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])],", "= self.traj traj.f_store() self.assertTrue(len(traj), 5) self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj", "HDF5StorageService from pypet import pypetconstants, Result, manual_run import pypet.pypetexceptions as pex import sys", "self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(all(x == 1 for x in ncomments.values())) def test_NOT_purge_duplicate_comments(self): self.explore(self.traj) self.traj.f_get('purge_duplicate_comments').f_unlock()", "self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.expand(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj)", "self.env.f_run(with_niceness) self.assertTrue(self.traj.f_is_completed()) def test_file_overwriting(self): self.traj.f_store() with pt.open_file(self.filename, mode='r') as file: nchildren = len(file.root._v_children)", "newtraj = Trajectory() newtraj.f_load(name=self.traj.v_name, as_new=False, load_data=2, filename=self.filename) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) size=os.path.getsize(self.filename) size_in_mb", "import os import platform import logging import time import numpy as np from", "= 0 self.env._traj.config.hdf5.overview.explored_parameters_overview = 0 self.make_run() hdf5file = pt.open_file(self.filename) overview_group = hdf5file.get_node(where='/'+ self.traj.v_name,", "osnice = psutil.Process().nice() if trajnice != osnice: if traj.use_scoop: import scoop if (not", "np_array = np.random.rand(100, 1000, 10) traj.f_add_result('l4rge', np_array) traj.f_store_item('l4rge') traj.f_remove_item('l4rge') array_list = [] for", "self.shuffle=True self.fletcher32 = False self.encoding = 'utf8' self.log_stdout=False self.wildcard_functions = None self.niceness =", "'hdf5', 'environment' def set_mode(self): self.mode = 'LOCK' self.multiproc = False self.gc_interval = None", "time.sleep(3) super(SlowResult, self)._load(load_dict) class FullStorageTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' # Test tags", "spsparse_csr[3,2+irun] = 44.5*irun matrices_csr.append(spsparse_csr.tocsr()) matrices_csc = [] for irun in range(3): spsparse_csc =", "traj = env.v_trajectory traj.v_standard_parameter=Parameter ## Create some parameters self.param_dict={} create_param_dict(self.param_dict) ### Add some", "traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct_map(traj, args1, args2, args3)", "multiproc=False, # deep_copy_data=False) # # traj = env.v_trajectory # # traj.f_add_parameter('dummy', 1) #", "self.assertEqual(self.traj.res.runs[idx].z, res[1]) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_graceful_exit(self): ###Explore self.explore_cartesian(self.traj)", "hdf5file = pt.open_file(self.filename) overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview') should = ['derived_parameters_overview', 'results_overview'] for", "'crunset'): my_set_func} class ResultSortTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' def set_mode(self): self.mode =", "traj.f_explore(cartesian_product(self.explored)) def tearDown(self): self.env.f_disable_logging() super(EnvironmentTest, self).tearDown() def setUp(self): self.set_mode() self.logfolder = make_temp_dir(os.path.join('experiments', 'tests',", "def test_expand(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) get_root_logger().info(results) traj = self.traj self.assertEqual(len(traj),", "self.check_if_z_is_correct(traj) traj_name = self.env.v_trajectory.v_name del self.env self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj =", "self.assertTrue('hi' in traj) def with_niceness(traj): if traj.multiproc: if hasattr(os, 'nice'): trajnice = traj.niceness", "root = logging.getLogger('pypet') old_level = root.level service_logger.setLevel(logging.INFO) root.setLevel(logging.INFO) traj.f_load(load_data=3) service_logger.setLevel(old_level) root.setLevel(old_level) path =", "args2, args3) for res in results: self.assertEqual(len(res), 2) self.assertTrue(isinstance(res[0], int)) self.assertTrue(isinstance(res[1], int)) idx", "self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_graceful_exit(self): ###Explore self.explore_cartesian(self.traj) results = self.env.f_run(multiply_with_graceful_exit) self.are_results_in_order(results) self.assertFalse(self.traj.f_is_completed())", "spsp.lil_matrix((2222,22))]} self.expanded['csr_mat'][0][1,2]=44.0 self.expanded['csr_mat'][1][2,2]=33 self.expanded['csr_mat'][0]=self.expanded['csr_mat'][0].tocsr() self.expanded['csr_mat'][1]=self.expanded['csr_mat'][1].tocsr() self.traj.f_expand(cartesian_product(self.expanded)) self.traj.f_store() ################## Overview TESTS ############################# def test_switch_ON_large_tables(self):", "# traj.f_add_result('kkk', list_that_changes[traj.v_idx] + traj.v_idx) # list_that_changes[traj.v_idx] = 1000 # class DeepCopyTest(TrajectoryComparator): #", "[] for irun in range(111): array_list.append(np.random.rand(10)) traj.f_add_result('m4ny', *array_list) class SimpleEnvironmentTest(TrajectoryComparator): tags = 'integration',", "# list_that_changes[traj.v_idx] = 1000 # class DeepCopyTest(TrajectoryComparator): # # def test_deep_copy_data(self): # #", "self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+\\ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj", "newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_derived_parameters=how, load_results=how) return newtraj def explore(self,traj): self.explore_dict={'x':[-1,1,2,3,4],'y':[1,1,2,2,3]} traj.f_explore(self.explore_dict) def", "full_text) env.f_disable_logging() def make_run_large_data(self): self.env.f_run(add_large_data) def make_run(self): ### Make a test run simple_arg", "self.assertFalse(self.traj.f_is_completed()) def test_f_iter_runs(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj)", "test_expand(self): ###Explore self.traj.f_add_parameter('TEST', 'test_expand') self.explore(self.traj) self.make_run() self.expand() get_root_logger().info('\\n $$$$$$$$$$$$$$$$$ Second Run $$$$$$$$$$$$$$$$$$$$$$$$') self.make_run()", "self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12]} traj.f_expand(self.expand_dict) def test_if_results_are_sorted_correctly_manual_runs(self): ###Explore self.explore(self.traj) self.traj.f_store(only_init=True) man_multiply = manual_run()(multiply_with_storing) for idx in", "env.v_trajectory traj.v_standard_parameter=Parameter traj.f_add_parameter('x',99) traj.f_add_parameter('y',99) self.env=env self.traj=traj def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False, how=2): ### Load The Trajectory", "self.multiproc = False self.ncores = 1 self.use_pool=True self.log_stdout=False self.freeze_input=False self.use_scoop = False self.log_config", "self.traj old_multiproc = self.multiproc self.multiproc = False ### Make a new single core", "'SRVC_LEAF' in node._v_attrs: if 'SRVC_INIT_COMMENT' in node._v_attrs: comment = node._v_attrs['SRVC_INIT_COMMENT'] if comment not", "idx nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d' % (idx+1) not in", "continue_folder=tmp, log_config=None, filename=self.filename) with self.assertRaises(ValueError): env1.f_run_map(multiply_args, [1], [2], [3]) with self.assertRaises(ValueError): Environment(multiproc=True, use_pool=False,", "{('$', 'crun') : my_run_func, ('$set', 'crunset'): my_set_func} class ResultSortTest(TrajectoryComparator): tags = 'integration', 'hdf5',", "*array_list) class SimpleEnvironmentTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment', 'quick' def test_make_default_file_when_giving_directory_without_slash(self): filename =", "x*y: %s != %s * %s' % (str(newtraj.crun.z),str(newtraj.x),str(newtraj.y))) traj = self.traj self.assertTrue(traj.v_idx ==", "def set_mode(self): self.mode = 'LOCK' self.multiproc = False self.gc_interval = None self.ncores =", "in node._v_attrs: comment = node._v_attrs['SRVC_INIT_COMMENT'] if comment not in ncomments: ncomments[comment] = 0", "hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(any(x > 1 for x in ncomments.values())) def my_run_func(idx): return", "traj.f_expand(self.expand_dict) self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12]} traj.f_expand(self.expand_dict) def test_if_results_are_sorted_correctly_manual_runs(self): ###Explore self.explore(self.traj) self.traj.f_store(only_init=True) man_multiply = manual_run()(multiply_with_storing) for idx", "idx class TestOtherHDF5Settings(EnvironmentTest): tags = 'integration', 'hdf5', 'environment', 'hdf5_settings' def set_mode(self): EnvironmentTest.set_mode(self) self.mode", "# deep_copy_data=True) # # traj = env.v_trajectory # # traj.f_add_parameter('dummy', 1) # traj.f_explore({'dummy':[12,", "42, 'hi!') traj.f_store() traj.f_remove_child('hi') class SlowResult(Result): def _load(self, load_dict): time.sleep(3) super(SlowResult, self)._load(load_dict) class", "env.f_run(add_one_particular_item, True) traj = load_trajectory(index=-1, filename=filename) self.assertTrue('hi' in traj) def with_niceness(traj): if traj.multiproc:", "4]}) # # list_that_should_not_change = [42, 42, 42, 42] # # env.f_run(test_runfunc, list_that_should_not_change)", "newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset = set((x.v_name for x in", "newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 6.0,", "tags = 'integration', 'hdf5', 'environment' # Test tags def test_full_store(self): filename = make_temp_dir('full_store.hdf5')", "idx) def test_f_iter_runs_auto_load(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj)", "or '/results/' in node._v_pathname): if 'SRVC_LEAF' in node._v_attrs: if 'SRVC_INIT_COMMENT' in node._v_attrs: comment", "'environment', 'quick' def test_make_default_file_when_giving_directory_without_slash(self): filename = make_temp_dir('test.hdf5') head, tail = os.path.split(filename) env =", "self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False): ### Load", "self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct_map(traj, args1, args2, args3) for res in results: self.assertEqual(len(res), 2) self.assertTrue(isinstance(res[0],", "self.env = env @unittest.skipIf(not hasattr(os, 'nice') and psutil is None, 'Niceness not supported", "overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview') should = ['derived_parameters_overview', 'results_overview'] for name in should:", "self.explore(self.traj) self.make_run() self.expand() get_root_logger().info('\\n $$$$$$$$$$$$$$$$$ Second Run $$$$$$$$$$$$$$$$$$$$$$$$') self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton()", "= False self.ncores = 1 self.use_pool=True self.pandas_format='table' self.pandas_append=False self.complib = 'lzo' self.complevel=2 self.shuffle=False", "'test_switch_off_ALL_tables') self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview = 0 self.env._traj.config.hdf5.overview.derived_parameters_overview = 0 self.env._traj.config.hdf5.overview.derived_parameters_summary = 0 self.env._traj.config.hdf5.overview.results_summary =", "traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) self.expand(self.traj) self.env.f_run(multiply) traj", "with self.assertRaises(ValueError): self.traj.parameters.new.f_remove_child('group') # group is below test not new, so ValueError thrown:", "self.assertRaises(ValueError): env3.f_run_map(multiply_args) with self.assertRaises(ValueError): Environment(use_scoop=True, immediate_postproc=True) with self.assertRaises(ValueError): Environment(use_pool=True, immediate_postproc=True) with self.assertRaises(ValueError): Environment(continuable=True,", "cartesian_product from pypet.environment import Environment from pypet.storageservice import HDF5StorageService from pypet import pypetconstants,", "%s' % (str(trajnice), str(osnice))) def add_large_data(traj): np_array = np.random.rand(100, 1000, 10) traj.f_add_result('l4rge', np_array)", "== pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx) def test_f_iter_runs_auto_load(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results)", "traj = env.v_trajectory # # traj.f_add_parameter('dummy', 1) # traj.f_explore({'dummy':[12, 3, 3, 4]}) #", "<gh_stars>10-100 __author__ = '<NAME>' import os import platform import logging import time import", "if 'SRVC_INIT_COMMENT' in node._v_attrs: comment = node._v_attrs['SRVC_INIT_COMMENT'] if comment not in ncomments: ncomments[comment]", "self.assertRaises(ValueError): Environment(multiproc=True, use_pool=False, freeze_input=True, filename=self.filename, log_config=None) env3 = Environment(log_config=None, filename=self.filename) with self.assertRaises(ValueError): env3.f_run_map(multiply_args)", "np.array(['Ocho', 'Nueve', 'Diez'])], 'Normal.int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat' : matrices_csc, 'bsr_mat' : matrices_bsr,", "self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12,13]} with self.assertRaises(ValueError): traj.f_expand(self.expand_dict) self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12]} traj.f_expand(self.expand_dict) def test_if_results_are_sorted_correctly_manual_runs(self): ###Explore self.explore(self.traj) self.traj.f_store(only_init=True) man_multiply =", "env.v_trajectory traj.v_standard_parameter=Parameter ## Create some parameters self.param_dict={} create_param_dict(self.param_dict) ### Add some parameter: add_params(traj,self.param_dict)", "= Trajectory() newtraj.f_load(name=self.traj.v_name, as_new=False, load_data=2, filename=self.filename) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) size=os.path.getsize(self.filename) size_in_mb =", "traj.parameters.f_to_dict() for param_name in par_dict: param = par_dict[param_name] if param.v_name in self.explore_dict: param.f_unlock()", "True class TestOtherHDF5Settings2(EnvironmentTest): tags = 'integration', 'hdf5', 'environment', 'hdf5_settings' def set_mode(self): EnvironmentTest.set_mode(self) self.mode", "y = traj.par.y self.assertTrue(z==x*y,' z != x*y: %s != %s * %s' %", "name in overview_group, '%s in overviews but should not!' % name) hdf5file.close() def", "z = traj.res.runs.crun.z x = traj.par.x y = traj.par.y self.assertTrue(z==x*y,' z != x*y:", "1 for x in ncomments.values())) def my_run_func(idx): return 'hello_%d' % idx def my_set_func(idx):", "True self.port = None self.graceful_exit = True def tearDown(self): self.env.f_disable_logging() super(ResultSortTest, self).tearDown() def", "not self.traj) newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset = set((x.v_name for", "self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def expand(self): self.expanded ={'Normal.trial': [1], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat'", "= [] for irun in range(3): spsparse_csc = spsp.lil_matrix((111,111)) spsparse_csc[3,2+irun] = 44.5*irun matrices_csc.append(spsparse_csc.tocsc())", "False self.log_config = True self.port = None self.graceful_exit = True def tearDown(self): self.env.f_disable_logging()", "only_empties=True) self.check_if_z_is_correct(traj) newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=self.traj.v_name, index=None, as_new=False, load_data=0) newtraj.v_auto_load = True", "explore_cartesian(self,traj): self.explore_dict=cartesian_product({'x':[-1,1,2,3,4, 5, 6],'y':[1,1,2,2,3,4,4]}) traj.f_explore(self.explore_dict) def expand(self,traj): self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12,13]} with self.assertRaises(ValueError): traj.f_expand(self.expand_dict) self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12]} traj.f_expand(self.expand_dict)", "self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) self.expand(self.traj) self.env.f_run(multiply) traj = self.traj", "spsparse_dia[3,2+irun] = 44.5*irun matrices_dia.append(spsparse_dia.tocsc().todia()) self.explore_dict={'string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])],", "tags = 'integration', 'hdf5', 'environment' def set_mode(self): self.mode = 'LOCK' self.multiproc = False", "self.env.f_run_map(multiply_args, args1, arg2=args2, arg3=args3) self.assertEqual(len(results), len(self.traj)) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton()", "get_root_logger().info(results) traj = self.traj self.assertEqual(len(traj), len(list(list(self.explore_dict.values())[0]))) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) traj_name = self.env.v_trajectory.v_name", "make_trajectory_name(self) # # env = Environment(trajectory=self.trajname,filename=self.filename, # file_title=self.trajname, log_folder=self.logfolder, # log_stdout=False, # multiproc=False,", "= 0 self.env._traj.config.hdf5.overview.derived_parameters_summary = 0 self.env._traj.config.hdf5.overview.results_summary = 0 self.env._traj.config.hdf5.purge_duplicate_comments = 0 self.env._traj.config.hdf5.overview.parameters_overview =", "%s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) traj = self.traj self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun is", "env.v_traj.v_name + '.hdf5' head, tail = os.path.split(env.v_traj.v_storage_service.filename) self.assertEqual(tail, the_file_name) class EnvironmentTest(TrajectoryComparator): tags =", "# traj.f_explore({'dummy':[12, 3, 3, 4]}) # # list_that_should_not_change = [42, 42, 42, 42]", "it should!' % name) hdf5file.close() self.traj.f_load(load_parameters=2, load_derived_parameters=2, load_results=2) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name) self.compare_trajectories(newtraj,self.traj) def", "self.mode = 'LOCK' self.multiproc = False self.ncores = 1 self.use_pool=True self.pandas_format='table' self.pandas_append=False self.complib", "= 0 self.env._traj.config.hdf5.overview.derived_parameters_overview = 0 self.env._traj.config.hdf5.overview.derived_parameters_summary = 0 self.env._traj.config.hdf5.overview.results_summary = 0 self.env._traj.config.hdf5.purge_duplicate_comments =", "self.env._traj.config.hdf5.overview.results_summary = 0 self.env._traj.config.hdf5.purge_duplicate_comments = 0 self.env._traj.config.hdf5.overview.parameters_overview = 0 self.env._traj.config.hdf5.overview.config_overview = 0 self.env._traj.config.hdf5.overview.explored_parameters_overview", "hdf5file.close() self.traj.f_load(load_parameters=2, load_derived_parameters=2, load_results=2) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name) self.compare_trajectories(newtraj,self.traj) def test_switch_off_all_tables(self): ###Explore self.traj.f_add_parameter('TEST', 'test_switch_off_ALL_tables')", "head, tail = os.path.split(env.v_traj.v_storage_service.filename) self.assertEqual(tail, the_file_name) class EnvironmentTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment'", "in traj) def with_niceness(traj): if traj.multiproc: if hasattr(os, 'nice'): trajnice = traj.niceness osnice", "is %sMB > 6MB' % str(size_in_mb)) self.compare_trajectories(mp_traj, self.traj) self.multiproc = old_multiproc def test_errors(self):", "% str(size_in_mb)) self.assertTrue(size_in_mb < 2.0, 'Size is %sMB > 6MB' % str(size_in_mb)) with", "the_file_name = env.v_traj.v_name + '.hdf5' head, tail = os.path.split(env.v_traj.v_storage_service.filename) self.assertEqual(tail, the_file_name) class EnvironmentTest(TrajectoryComparator):", "check_if_z_is_correct_map(self,traj, args1, args2, args3): for x, arg1, arg2, arg3 in zip(range(len(traj)), args1, args2,", "self.traj.hdf5.purge_duplicate_comments=0 self.make_run() hdf5file = pt.open_file(self.filename) traj_group = hdf5file.get_node(where='/', name= self.traj.v_name) for node in", "self.assertRaises(TypeError): self.traj.parameters.new.f_remove_child('test') self.traj.new.f_remove_child('test', recursive=True) self.assertTrue('new.group' not in self.traj) self.traj.new.f_load_child('test', recursive=True, load_data=pypetconstants.LOAD_SKELETON) self.assertTrue(self.traj.new.group.v_annotations.annotation, 42)", "self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj", "only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly_using_map(self): ###Explore self.explore(self.traj)", "self.traj.f_store() with pt.open_file(self.filename, mode='r') as file: nchildren = len(file.root._v_children) self.assertTrue(nchildren > 0) env2", "# # def test_deep_copy_data(self): # # self.filename = make_temp_dir('experiments/tests/HDF5/testcopy.hdf5') # self.logfolder = make_temp_dir('experiments/tests/Log')", "as_new=as_new, load_derived_parameters=how, load_results=how) return newtraj def explore(self,traj): self.explore_dict={'x':[-1,1,2,3,4],'y':[1,1,2,2,3]} traj.f_explore(self.explore_dict) def explore_cartesian(self,traj): self.explore_dict=cartesian_product({'x':[-1,1,2,3,4, 5,", "name='overview') should_not = HDF5StorageService.NAME_TABLE_MAPPING.keys() for name in should_not: name = name.split('.')[-1] # Get", "as pex import sys import unittest try: import psutil except ImportError: psutil =", "self.traj.f_add_parameter('TEST', 'test_switch_off_ALL_tables') self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview = 0 self.env._traj.config.hdf5.overview.derived_parameters_overview = 0 self.env._traj.config.hdf5.overview.derived_parameters_summary = 0 self.env._traj.config.hdf5.overview.results_summary", "traj = env.v_traj res=traj.f_add_result(SlowResult, 'iii', 42, 43, comment='llk') traj.f_store() service_logger = traj.v_storage_service._logger root", "my_run_func, ('$set', 'crunset'): my_set_func} class ResultSortTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' def set_mode(self):", "load_data=pypetconstants.LOAD_SKELETON) self.traj.f_load_item((pypetconstants.LEAF,self.traj.TestResItem,(),{'load_only': 'TestResItem'})) self.assertTrue(self.traj.TestResItem, 42) def test_store_single_group(self): self.traj.f_store() self.traj.f_add_parameter_group('new.test.group').v_annotations.f_set(42) self.traj.f_store_item('new.group') # group is", "def add_large_data(traj): np_array = np.random.rand(100, 1000, 10) traj.f_add_result('l4rge', np_array) traj.f_store_item('l4rge') traj.f_remove_item('l4rge') array_list =", "DeepCopyTest(TrajectoryComparator): # # def test_deep_copy_data(self): # # self.filename = make_temp_dir('experiments/tests/HDF5/testcopy.hdf5') # self.logfolder =", "= ['derived_parameters_overview', 'results_overview'] for name in should: self.assertTrue(name in overview_group, '%s not in", "size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 30.0, 'Size is", "self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(),", "= self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is", "[42, 42, 42, 42] # # env.f_run(test_runfunc, list_that_should_not_change) # # traj.v_auto_load=True # #", "log_stdout=False, # multiproc=False, # deep_copy_data=True) # # traj = env.v_trajectory # # traj.f_add_parameter('dummy',", ": matrices_bsr, 'dia_mat' : matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} with self.assertRaises(pex.NotUniqueNodeError): traj.f_explore(self.explore_dict) traj.f_shrink(force=True) par_dict", "def test_full_store(self): filename = make_temp_dir('full_store.hdf5') with Environment(filename=filename, log_config=get_log_config()) as env: traj = env.v_trajectory", "# # traj.f_add_parameter('dummy', 1) # traj.f_explore({'dummy':[12, 3, 3, 4]}) # # list_that_should_not_change =", "self.traj.f_add_parameter('TEST', 'test_run_complex') ###Explore self.explore_complex_params(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,", "res=traj.f_add_result(SlowResult, 'iii', 42, 43, comment='llk') traj.f_store() service_logger = traj.v_storage_service._logger root = logging.getLogger('pypet') old_level", "self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=self.traj.v_name, index=None, as_new=False, load_data=0) newtraj.v_auto_load", "for irun in range(3): spsparse_bsr = spsp.lil_matrix((111,111)) spsparse_bsr[3,2+irun] = 44.5*irun matrices_bsr.append(spsparse_bsr.tocsr().tobsr()) matrices_dia =", "matrices_bsr, 'dia_mat' : matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} with self.assertRaises(pex.NotUniqueNodeError): traj.f_explore(self.explore_dict) traj.f_shrink(force=True) par_dict =", "psutil = None try: import dill except ImportError: dill = None import scipy.sparse", "self.assertTrue(isinstance(res[0], int)) self.assertTrue(isinstance(res[1], int)) idx = res[0] self.assertEqual(self.traj.res.runs[idx].z, res[1]) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton()", "core run self.setUp() self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments = False ###Explore self.explore(self.traj) self.make_run() # newtraj", "Environment(use_scoop=True, wrap_mode='QUEUE') with self.assertRaises(ValueError): Environment(automatic_storing=False, continuable=True, continue_folder=tmp) with self.assertRaises(ValueError): Environment(port='www.nosi.de', wrap_mode='LOCK') def test_run(self):", "same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_derived_parameters=how, load_results=how) return newtraj def", "traj2 = env2.v_trajectory traj2.f_store() self.assertTrue(os.path.exists(self.filename)) with pt.open_file(self.filename, mode='r') as file: nchildren = len(file.root._v_children)", "env3 = Environment(filename=self.filename, overwrite_file=True, log_config=get_log_config()) self.assertFalse(os.path.exists(self.filename)) env2.f_disable_logging() env3.f_disable_logging() def test_time_display_of_loading(self): filename = make_temp_dir('sloooow.hdf5')", "= node._v_attrs['SRVC_INIT_COMMENT'] if comment not in ncomments: ncomments[comment] = 0 ncomments[comment] += 1", "%sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 6.0, 'Size is %sMB > 6MB' % str(size_in_mb))", "self.freeze_input=False self.use_scoop = False self.log_config = True self.port = None self.graceful_exit = True", "= idx newtraj.v_idx = idx nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d'", "enumerate(self.traj.f_iter_runs(yields='self')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is self.traj) newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx =", "self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def test_expand_after_reload(self): self.traj.f_add_parameter('TEST', 'test_expand_after_reload') ###Explore self.explore(self.traj) self.make_run() traj_name =", "1 finally: hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(all(x == 1 for x in ncomments.values())) def", "self.explore(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename) size_in_mb", "tail = os.path.split(filename) env = Environment(filename=head) the_file_name = env.v_traj.v_name + '.hdf5' head, tail", "in ncomments: ncomments[comment] = 0 ncomments[comment] += 1 finally: hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(any(x", "TestOtherHDF5Settings(EnvironmentTest): tags = 'integration', 'hdf5', 'environment', 'hdf5_settings' def set_mode(self): EnvironmentTest.set_mode(self) self.mode = 'LOCK'", "def test_if_results_are_sorted_correctly_using_map(self): ###Explore self.explore(self.traj) args1=[10*x for x in range(len(self.traj))] args2=[100*x for x in", "'LOCK' self.multiproc = False self.ncores = 1 self.use_pool=True self.log_stdout=False self.freeze_input=False self.use_scoop = False", "'huhu_%d' % idx class TestOtherHDF5Settings(EnvironmentTest): tags = 'integration', 'hdf5', 'environment', 'hdf5_settings' def set_mode(self):", "traj.par.x y = traj.par.y self.assertTrue(z==x*y,' z != x*y: %s != %s * %s'", "self.expanded['csr_mat'][0]=self.expanded['csr_mat'][0].tocsr() self.expanded['csr_mat'][1]=self.expanded['csr_mat'][1].tocsr() self.traj.f_expand(cartesian_product(self.expanded)) self.traj.f_store() ################## Overview TESTS ############################# def test_switch_ON_large_tables(self): self.traj.f_add_parameter('TEST', 'test_switch_ON_LARGE_tables') ###Explore", "load_results=2) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name) self.compare_trajectories(newtraj,self.traj) def test_switch_off_all_tables(self): ###Explore self.traj.f_add_parameter('TEST', 'test_switch_off_ALL_tables') self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview =", "self.env.f_run(simple_calculations,simple_arg,simple_kwarg=simple_kwarg) self.are_results_in_order(results) def test_a_large_run(self): get_root_logger().info('Testing large run') self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore_large(self.traj) self.make_run_large_data() self.assertTrue(self.traj.f_is_completed())", "a test run simple_arg = -13 simple_kwarg= 13.0 results = self.env.f_run(simple_calculations,simple_arg,simple_kwarg=simple_kwarg) self.are_results_in_order(results) def", "self.traj.v_name, name='overview') should = ['derived_parameters_overview', 'results_overview'] for name in should: self.assertTrue(name in overview_group,", "import logging import time import numpy as np from pypet.trajectory import Trajectory, load_trajectory", "def test_store_single_group(self): self.traj.f_store() self.traj.f_add_parameter_group('new.test.group').v_annotations.f_set(42) self.traj.f_store_item('new.group') # group is below test not new, so", "(idx+1) not in nameset) self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.res.runs.crun.z==newtraj.par.x*newtraj.par.y,'", "res[1]) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_graceful_exit(self): ###Explore self.explore_cartesian(self.traj) results", "traj.niceness osnice = psutil.Process().nice() if trajnice != osnice: if traj.use_scoop: import scoop if", "True) traj = load_trajectory(index=-1, filename=filename) self.assertTrue('hi' in traj) def with_niceness(traj): if traj.multiproc: if", "niceness; ' '%s != %s' % (str(trajnice), str(osnice))) def add_large_data(traj): np_array = np.random.rand(100,", "self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=1 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=1 self.make_run() hdf5file = pt.open_file(self.filename, mode='a') ncomments = {} try:", "= self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+\\ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)", "self.mode = 'LOCK' self.multiproc = False self.ncores = 1 self.use_pool=True self.log_stdout=False self.freeze_input=False self.use_scoop", "in enumerate(list_that_should_change): # self.assertTrue(list_that_should_change[irun] == 1000) if __name__ == '__main__': opt_args = parse_args()", "'test_run_complex') ###Explore self.explore_complex_params(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj)", "not in ncomments: ncomments[comment] = 0 ncomments[comment] += 1 finally: hdf5file.close() self.assertGreaterEqual(len(ncomments), 1)", "results: self.assertEqual(len(res), 2) self.assertTrue(isinstance(res[0], int)) self.assertTrue(isinstance(res[1], int)) idx = res[0] self.assertEqual(self.traj.res.runs[idx].z, res[1]) newtraj", "self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments = False ###Explore self.explore(self.traj) self.make_run() # newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton()", "self.assertRaises(ValueError): Environment(continuable=True, wrap_mode='QUEUE', continue_folder=tmp) with self.assertRaises(ValueError): Environment(use_scoop=True, wrap_mode='QUEUE') with self.assertRaises(ValueError): Environment(automatic_storing=False, continuable=True, continue_folder=tmp)", "has children and recursive is false with self.assertRaises(TypeError): self.traj.parameters.new.f_remove_child('test') self.traj.new.f_remove_child('test', recursive=True) self.assertTrue('new.group' not", "values are still the same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_derived_parameters=how,", "%s != %s * %s' % (str(z),str(x),str(y))) traj.v_idx=-1 traj.v_shortcuts=True # def test_runfunc(traj, list_that_changes):", "### Add some parameter: add_params(traj,self.param_dict) #remember the trajectory and the environment self.traj =", "self.expand() get_root_logger().info('\\n $$$$$$$$$$$$$$$$$ Second Run $$$$$$$$$$$$$$$$$$$$$$$$') self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)", "for irun in range(111): array_list.append(np.random.rand(10)) traj.f_add_result('m4ny', *array_list) class SimpleEnvironmentTest(TrajectoryComparator): tags = 'integration', 'hdf5',", "__author__ = '<NAME>' import os import platform import logging import time import numpy", "index=trajectory_index, as_new=as_new, load_derived_parameters=how, load_results=how) return newtraj def explore(self,traj): self.explore_dict={'x':[-1,1,2,3,4],'y':[1,1,2,2,3]} traj.f_explore(self.explore_dict) def explore_cartesian(self,traj): self.explore_dict=cartesian_product({'x':[-1,1,2,3,4,", "= 'integration', 'hdf5', 'environment' def set_mode(self): self.mode = 'LOCK' self.multiproc = False self.ncores", "# log_stdout=False, # multiproc=False, # deep_copy_data=False) # # traj = env.v_trajectory # #", "range(len(self.traj))] args2=[100*x for x in range(len(self.traj))] args3=list(range(len(self.traj))) results = self.env.f_run_map(multiply_args, args1, arg2=args2, arg3=args3)", "self.use_pool=True self.use_scoop=False self.freeze_input=False self.pandas_format='fixed' self.pandas_append=False self.complib = 'zlib' self.complevel=9 self.shuffle=True self.fletcher32 = False", "'Niceness not supported under non Unix.') def test_niceness(self): ###Explore self.explore(self.traj) self.env.f_run(with_niceness) self.assertTrue(self.traj.f_is_completed()) def", "# def test_runfunc(traj, list_that_changes): # traj.f_add_result('kkk', list_that_changes[traj.v_idx] + traj.v_idx) # list_that_changes[traj.v_idx] = 1000", "3, 4]}) # # list_that_should_not_change = [42, 42, 42, 42] # # env.f_run(test_runfunc,", "# # env = Environment(trajectory=self.trajname,filename=self.filename, # file_title=self.trajname, log_folder=self.logfolder, # log_stdout=False, # multiproc=False, #", "list_that_should_not_change) # # traj.v_auto_load=True # # for irun, val in enumerate(list_that_should_not_change): # self.assertTrue(list_that_should_not_change[irun]", "self.traj.TestResItem)) self.traj.results.f_remove_child('TestResItem') self.assertTrue('TestResItem' not in self.traj) self.traj.results.f_load_child('TestResItem', load_data=pypetconstants.LOAD_SKELETON) self.traj.f_load_item((pypetconstants.LEAF,self.traj.TestResItem,(),{'load_only': 'TestResItem'})) self.assertTrue(self.traj.TestResItem, 42) def", "self.use_pool=True self.pandas_format='table' self.pandas_append=True self.complib = 'blosc' self.complevel=2 self.shuffle=False self.fletcher32 = False self.encoding='latin1' self.graceful_exit", "+ '.hdf5' head, tail = os.path.split(env.v_traj.v_storage_service.filename) self.assertEqual(tail, the_file_name) class EnvironmentTest(TrajectoryComparator): tags = 'integration',", "dill except ImportError: dill = None import scipy.sparse as spsp import random from", "% str(size_in_mb)) def test_just_one_run(self): self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,", "spsparse_csc = spsp.lil_matrix((111,111)) spsparse_csc[3,2+irun] = 44.5*irun matrices_csc.append(spsparse_csc.tocsc()) matrices_bsr = [] for irun in", "self.traj.parameters.new.f_remove_child('test') self.traj.new.f_remove_child('test', recursive=True) self.assertTrue('new.group' not in self.traj) self.traj.new.f_load_child('test', recursive=True, load_data=pypetconstants.LOAD_SKELETON) self.assertTrue(self.traj.new.group.v_annotations.annotation, 42) self.traj.f_delete_item('new.test.group')", "# # env.f_run(test_runfunc, list_that_should_change) # # traj.v_auto_load=True # # for irun, val in", "encoding=self.encoding, niceness=self.niceness, use_scoop=self.use_scoop, port=self.port, add_time=self.add_time, timeout=self.timeout, graceful_exit=self.graceful_exit) traj = env.v_trajectory traj.v_standard_parameter=Parameter ## Create", "load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False): ### Load The Trajectory and check if the values are still the", "args1, arg2=args2, arg3=args3) self.assertEqual(len(results), len(self.traj)) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(),", "'HDF5', 'test%s.hdf5' % self.trajname)) env = Environment(trajectory=self.trajname, filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config(), results_per_run=5, wildcard_functions=self.wildcard_functions,", "test_just_one_run(self): self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) self.assertTrue(len(newtraj) ==", "!= %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj in enumerate(self.traj.f_iter_runs(yields='copy')): run_name =", "self.graceful_exit = False def explore_complex_params(self, traj): matrices_csr = [] for irun in range(3):", "with self.assertRaises(ValueError): env1.f_run_map(multiply_args, [1], [2], [3]) with self.assertRaises(ValueError): Environment(multiproc=True, use_pool=False, freeze_input=True, filename=self.filename, log_config=None)", "should!' % name) hdf5file.close() self.traj.f_load(load_parameters=2, load_derived_parameters=2, load_results=2) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name) self.compare_trajectories(newtraj,self.traj) def test_switch_off_all_tables(self):", "newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_graceful_exit(self): ###Explore self.explore_cartesian(self.traj) results =", "% node._v_name) hdf5file.close() def test_purge_duplicate_comments(self): self.explore(self.traj) with self.assertRaises(RuntimeError): self.traj.hdf5.purge_duplicate_comments = 1 self.traj.overview.results_summary =", "self.traj self.assertEqual(len(traj), len(list(list(self.explore_dict.values())[0]))) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) traj_name = self.env.v_trajectory.v_name del self.env self.env", "new, so ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_remove_child('group') # group is below test not", "traj.f_explore(self.explore_dict) def explore_cartesian(self,traj): self.explore_dict=cartesian_product({'x':[-1,1,2,3,4, 5, 6],'y':[1,1,2,2,3,4,4]}) traj.f_explore(self.explore_dict) def expand(self,traj): self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12,13]} with self.assertRaises(ValueError): traj.f_expand(self.expand_dict)", "self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def expand(self): self.expanded ={'Normal.trial':", "[1,2,3]}) env.f_run(add_one_particular_item, True) traj = load_trajectory(index=-1, filename=filename) self.assertTrue('hi' in traj) def with_niceness(traj): if", "self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=0 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=0 self.make_run() hdf5file = pt.open_file(self.filename, mode='a') ncomments = {} try:", "= 44.5*irun matrices_bsr.append(spsparse_bsr.tocsr().tobsr()) matrices_dia = [] for irun in range(3): spsparse_dia = spsp.lil_matrix((111,111))", "# # traj = env.v_trajectory # # traj.f_add_parameter('dummy', 1) # traj.f_explore({'dummy':[12, 3, 3,", "self.freeze_input=False self.pandas_format='fixed' self.pandas_append=False self.complib = 'zlib' self.complevel=9 self.shuffle=True self.fletcher32 = False self.encoding =", "with self.assertRaises(RuntimeError): self.traj.hdf5.purge_duplicate_comments = 1 self.traj.overview.results_summary = 0 self.make_run() self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=1 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=1", "hdf5file.get_node(where='/'+ self.traj.v_name, name='overview') should_not = HDF5StorageService.NAME_TABLE_MAPPING.keys() for name in should_not: name = name.split('.')[-1]", "Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.traj.res.f_remove() self.traj.dpar.f_remove() self.expand() get_root_logger().info('\\n $$$$$$$$$$$$ Second", "load_data=0) newtraj.v_auto_load = True newtraj.par.f_load_child('y', load_data=1) for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name self.traj.v_idx", "if 'SRVC_LEAF' in node._v_attrs: self.assertTrue('SRVC_INIT_COMMENT' in node._v_attrs, 'There is no comment in node", "self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_expand_after_reload(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj =", "str(size_in_mb)) def test_just_one_run(self): self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj)", "Second Run $$$$$$$$$$$$$$$$$$$$$$$$') self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def", "self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+\\ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton()", "if param.v_explored: param._shrink() self.explore_dict={'Numpy.string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'Normal.int':[1,2,3],", "with self.assertRaises(ValueError): env3.f_run_map(multiply_args) with self.assertRaises(ValueError): Environment(use_scoop=True, immediate_postproc=True) with self.assertRaises(ValueError): Environment(use_pool=True, immediate_postproc=True) with self.assertRaises(ValueError):", "self.traj self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx)", "def test_expand_after_reload(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) ==", "pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx) def test_expand(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) get_root_logger().info(results)", "false with self.assertRaises(TypeError): self.traj.parameters.new.f_remove_child('test') self.traj.new.f_remove_child('test', recursive=True) self.assertTrue('new.group' not in self.traj) self.traj.new.f_load_child('test', recursive=True, load_data=pypetconstants.LOAD_SKELETON)", "= 'LOCK' self.multiproc = False self.ncores = 1 self.use_pool=True self.pandas_format='table' self.pandas_append=False self.complib =", "= True class TestOtherHDF5Settings2(EnvironmentTest): tags = 'integration', 'hdf5', 'environment', 'hdf5_settings' def set_mode(self): EnvironmentTest.set_mode(self)", "range(3): spsparse_dia = spsp.lil_matrix((111,111)) spsparse_dia[3,2+irun] = 44.5*irun matrices_dia.append(spsparse_dia.tocsc().todia()) self.explore_dict={'string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis',", "comment = node._v_attrs['SRVC_INIT_COMMENT'] if comment not in ncomments: ncomments[comment] = 0 ncomments[comment] +=", "with self.assertRaises(ValueError): traj.f_expand(self.expand_dict) self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12]} traj.f_expand(self.expand_dict) def test_if_results_are_sorted_correctly_manual_runs(self): ###Explore self.explore(self.traj) self.traj.f_store(only_init=True) man_multiply = manual_run()(multiply_with_storing)", ": my_run_func, ('$set', 'crunset'): my_set_func} class ResultSortTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' def", "irun in range(111): array_list.append(np.random.rand(10)) traj.f_add_result('m4ny', *array_list) class SimpleEnvironmentTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment',", "test_deep_copy_data(self): # # self.filename = make_temp_dir('experiments/tests/HDF5/testcopy.hdf5') # self.logfolder = make_temp_dir('experiments/tests/Log') # self.trajname =", "'There is no comment in node %s!' % node._v_name) hdf5file.close() def test_purge_duplicate_comments(self): self.explore(self.traj)", "self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb <", "osnice = os.nice(0) else: trajnice = traj.niceness osnice = psutil.Process().nice() if trajnice !=", "'Normal.int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat' : matrices_csc, 'bsr_mat' : matrices_bsr, 'dia_mat' : matrices_dia,", "size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 6.0, 'Size", "as file: nchildren = len(file.root._v_children) self.assertTrue(nchildren > 0) env2 = Environment(filename=self.filename, log_config=get_log_config()) traj2", "my_run_func(idx): return 'hello_%d' % idx def my_set_func(idx): return 'huhu_%d' % idx class TestOtherHDF5Settings(EnvironmentTest):", "for irun, val in enumerate(list_that_should_change): # self.assertTrue(list_that_should_change[irun] == 1000) if __name__ == '__main__':", "get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 30.0, 'Size is %sMB > 30MB'", "% (str(traj.crun.z),str(traj.x),str(traj.y))) traj.v_idx=-1 def check_if_z_is_correct(self,traj): traj.v_shortcuts=False for x in range(len(traj)): traj.v_idx=x z =", "table, no the full name self.assertTrue(not name in overview_group, '%s in overviews but", "node %s!' % node._v_name) hdf5file.close() def test_purge_duplicate_comments(self): self.explore(self.traj) with self.assertRaises(RuntimeError): self.traj.hdf5.purge_duplicate_comments = 1", "={'Normal.trial': [0], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.explored['csr_mat'][0][1,2]=44.0 self.explored['csr_mat'][1][2,2]=33 self.explored['csr_mat'][0] = self.explored['csr_mat'][0].tocsr()", "# # def test_not_deep_copy_data(self): # self.filename = make_temp_dir('experiments/tests/HDF5/testcoyp2.hdf5') # self.logfolder = make_temp_dir('experiments/tests/Log') #", "open(mainfilename, mode='r') as mainf: full_text = mainf.read() self.assertTrue('nodes/s)' in full_text) env.f_disable_logging() def make_run_large_data(self):", "self.graceful_exit = True class TestOtherHDF5Settings2(EnvironmentTest): tags = 'integration', 'hdf5', 'environment', 'hdf5_settings' def set_mode(self):", "enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset = set((x.v_name for x", "args3): traj.v_idx=x self.assertTrue(traj.crun.z==traj.x*traj.y+arg1+arg2+arg3,' z != x*y: %s != %s * %s' % (str(traj.crun.z),str(traj.x),str(traj.y)))", "None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx) def test_expand(self): ###Explore self.explore(self.traj) results =", "0) env2 = Environment(filename=self.filename, log_config=get_log_config()) traj2 = env2.v_trajectory traj2.f_store() self.assertTrue(os.path.exists(self.filename)) with pt.open_file(self.filename, mode='r')", "'LOCK' self.multiproc = False self.ncores = 1 self.use_pool=True self.pandas_format='table' self.pandas_append=True self.complib = 'blosc'", "= make_temp_dir(os.path.join('experiments', 'tests', 'HDF5', 'test%s.hdf5' % self.trajname)) env = Environment(trajectory=self.trajname, filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout,", "new single core run self.setUp() self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments = False ###Explore self.explore(self.traj) self.make_run()", "= make_temp_dir('experiments/tests/HDF5/testcoyp2.hdf5') # self.logfolder = make_temp_dir('experiments/tests/Log') # self.trajname = make_trajectory_name(self) # # env", "only_empties=True) self.compare_trajectories(self.traj, newtraj) self.assertTrue(len(newtraj) == 1) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB'", "1 self.use_pool=True self.use_scoop=False self.freeze_input=False self.pandas_format='fixed' self.pandas_append=False self.complib = 'zlib' self.complevel=9 self.shuffle=True self.fletcher32 =", "freeze_input=True, filename=self.filename, log_config=None) env3 = Environment(log_config=None, filename=self.filename) with self.assertRaises(ValueError): env3.f_run_map(multiply_args) with self.assertRaises(ValueError): Environment(use_scoop=True,", "self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.assertEqual(len(traj), 5) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply)", "# traj.v_auto_load=True # # for irun, val in enumerate(list_that_should_change): # self.assertTrue(list_that_should_change[irun] == 1000)", "self.make_run() hdf5file = pt.open_file(self.filename, mode='a') ncomments = {} try: traj_group = hdf5file.get_node(where='/',name= self.traj.v_name)", "self.assertRaises(ValueError): env1.f_run_map(multiply_args, [1], [2], [3]) with self.assertRaises(ValueError): Environment(multiproc=True, use_pool=False, freeze_input=True, filename=self.filename, log_config=None) env3", "%s != %s * %s' % (str(newtraj.crun.z),str(newtraj.x),str(newtraj.y))) traj = self.traj self.assertTrue(traj.v_idx == -1)", "name in should: self.assertTrue(name in overview_group, '%s not in overviews but it should!'", "= mainf.read() self.assertTrue('nodes/s)' in full_text) env.f_disable_logging() def make_run_large_data(self): self.env.f_run(add_large_data) def make_run(self): ### Make", "self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=1 self.make_run() hdf5file = pt.open_file(self.filename, mode='a') ncomments = {} try: traj_group =", "ncores=self.ncores, wrap_mode=self.mode, use_pool=self.use_pool, gc_interval=self.gc_interval, freeze_input=self.freeze_input, fletcher32=self.fletcher32, complevel=self.complevel, complib=self.complib, shuffle=self.shuffle, pandas_append=self.pandas_append, pandas_format=self.pandas_format, encoding=self.encoding, niceness=self.niceness,", "self.assertTrue(isinstance(res[1], int)) idx = res[0] self.assertEqual(self.traj.res.runs[idx].z, res[1]) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)", "'results_overview'] for name in should: self.assertTrue(name in overview_group, '%s not in overviews but", "'hdf5', 'environment', 'hdf5_settings' def set_mode(self): EnvironmentTest.set_mode(self) self.mode = 'LOCK' self.multiproc = False self.ncores", "index=trajectory_index, as_new=as_new, load_parameters=2, load_derived_parameters=2, load_results=2, load_other_data=2) return newtraj def test_expand(self): ###Explore self.traj.f_add_parameter('TEST', 'test_expand')", "### Make a test run simple_arg = -13 simple_kwarg= 13.0 results = self.env.f_run(simple_calculations,simple_arg,simple_kwarg=simple_kwarg)", "thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_store_child('group') # group has children and recursive is false with", "def test_niceness(self): ###Explore self.explore(self.traj) self.env.f_run(with_niceness) self.assertTrue(self.traj.f_is_completed()) def test_file_overwriting(self): self.traj.f_store() with pt.open_file(self.filename, mode='r') as", "!= %s * %s' % (str(newtraj.crun.z),str(newtraj.x),str(newtraj.y))) traj = self.traj self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun", "'LOG.txt') with open(mainfilename, mode='r') as mainf: full_text = mainf.read() self.assertTrue('nodes/s)' in full_text) env.f_disable_logging()", "3, 'jj') traj.f_explore({'x': [1,2,3]}) env.f_run(add_one_particular_item, True) traj = load_trajectory(index=-1, filename=filename) self.assertTrue('hi' in traj)", "={'Normal.trial': [1], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.expanded['csr_mat'][0][1,2]=44.0 self.expanded['csr_mat'][1][2,2]=33 self.expanded['csr_mat'][0]=self.expanded['csr_mat'][0].tocsr() self.expanded['csr_mat'][1]=self.expanded['csr_mat'][1].tocsr() self.traj.f_expand(cartesian_product(self.expanded))", "self.assertTrue(all(x == 1 for x in ncomments.values())) def test_NOT_purge_duplicate_comments(self): self.explore(self.traj) self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=0 self.traj.f_get('results_summary').f_unlock()", "logging import time import numpy as np from pypet.trajectory import Trajectory, load_trajectory from", "# self.filename = make_temp_dir('experiments/tests/HDF5/testcopy.hdf5') # self.logfolder = make_temp_dir('experiments/tests/Log') # self.trajname = make_trajectory_name(self) #", "% (str(trajnice), str(osnice))) def add_large_data(traj): np_array = np.random.rand(100, 1000, 10) traj.f_add_result('l4rge', np_array) traj.f_store_item('l4rge')", "< 6.0, 'Size is %sMB > 6MB' % str(size_in_mb)) def test_just_one_run(self): self.make_run() self.assertTrue(self.traj.f_is_completed())", "idx) def test_expand(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) get_root_logger().info(results) traj = self.traj", "z != x*y: %s != %s * %s' % (str(z),str(x),str(y))) traj.v_idx=-1 traj.v_shortcuts=True #", "###Explore self.explore_cartesian(self.traj) results = self.env.f_run(multiply_with_graceful_exit) self.are_results_in_order(results) self.assertFalse(self.traj.f_is_completed()) def test_f_iter_runs(self): ###Explore self.explore(self.traj) results =", "get_root_logger().info('\\n $$$$$$$$$$$$ Second Run $$$$$$$$$$ \\n') self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)", "param.v_name in self.explore_dict: param.f_unlock() if param.v_explored: param._shrink() self.explore_dict={'Numpy.string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']),", "size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 30.0, 'Size is %sMB >", "!= %s * %s' % (str(z),str(x),str(y))) traj.v_idx=-1 traj.v_shortcuts=True # def test_runfunc(traj, list_that_changes): #", "self.traj.f_load(name=traj_name) self.expand(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+ len(list(self.explore_dict.values())[0]))", "self.assertEqual(tail, the_file_name) class EnvironmentTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' def set_mode(self): self.mode =", "self.mode = 'LOCK' self.multiproc = False self.ncores = 1 self.use_pool=True self.pandas_format='table' self.pandas_append=True self.complib", "self.explore(self.traj) self.traj.f_store(only_init=True) man_multiply = manual_run()(multiply_with_storing) for idx in self.traj.f_iter_runs(yields='idx'): self.assertTrue(isinstance(idx, int)) man_multiply(self.traj) traj", "np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.explored['csr_mat'][0][1,2]=44.0 self.explored['csr_mat'][1][2,2]=33 self.explored['csr_mat'][0] = self.explored['csr_mat'][0].tocsr() self.explored['csr_mat'][1] = self.explored['csr_mat'][0].tocsr() traj.f_explore(cartesian_product(self.explored))", "self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx) def", "traj = self.traj self.assertEqual(len(traj), len(list(list(self.explore_dict.values())[0]))) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) traj_name = self.env.v_trajectory.v_name del", "still the same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_parameters=2, load_derived_parameters=2, load_results=2,", "################## Overview TESTS ############################# def test_switch_ON_large_tables(self): self.traj.f_add_parameter('TEST', 'test_switch_ON_LARGE_tables') ###Explore self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview = 1", "= self.env.f_run(multiply) self.are_results_in_order(results) get_root_logger().info(results) traj = self.traj self.assertEqual(len(traj), len(list(list(self.explore_dict.values())[0]))) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj)", "'SRVC_LEAF' in node._v_attrs: self.assertTrue('SRVC_INIT_COMMENT' in node._v_attrs, 'There is no comment in node %s!'", "self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx =", "[np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.explored['csr_mat'][0][1,2]=44.0 self.explored['csr_mat'][1][2,2]=33 self.explored['csr_mat'][0] = self.explored['csr_mat'][0].tocsr() self.explored['csr_mat'][1] = self.explored['csr_mat'][0].tocsr()", "Make a new single core run self.setUp() self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments = False ###Explore", "TestOtherHDF5Settings2(EnvironmentTest): tags = 'integration', 'hdf5', 'environment', 'hdf5_settings' def set_mode(self): EnvironmentTest.set_mode(self) self.mode = 'LOCK'", ": matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} with self.assertRaises(pex.NotUniqueNodeError): traj.f_explore(self.explore_dict) traj.f_shrink(force=True) par_dict = traj.parameters.f_to_dict() for", "only_empties=True) self.check_if_z_is_correct(traj) traj_name = self.env.v_trajectory.v_name del self.env self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj", "!= os niceness; ' '%s != %s' % (str(trajnice), str(osnice))) def add_large_data(traj): np_array", "self.complevel=2 self.shuffle=False self.fletcher32 = False self.encoding='latin1' self.graceful_exit = True class TestOtherHDF5Settings2(EnvironmentTest): tags =", "par_dict: param = par_dict[param_name] if param.v_name in self.explore_dict: param.f_unlock() if param.v_explored: param._shrink() self.explore_dict={'Numpy.string':[np.array(['Uno',", "def expand(self): self.expanded ={'Normal.trial': [1], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.expanded['csr_mat'][0][1,2]=44.0 self.expanded['csr_mat'][1][2,2]=33", ": matrices_csc, 'bsr_mat' : matrices_bsr, 'dia_mat' : matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} with self.assertRaises(pex.NotUniqueNodeError):", "# file_title=self.trajname, log_folder=self.logfolder, # log_stdout=False, # multiproc=False, # deep_copy_data=True) # # traj =", "= self.env.v_trajectory self.traj.f_load(name=traj_name) self.expand(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) ==", "= name.split('.')[-1] # Get only the name of the table, no the full", "explore_large(self, traj): self.explored ={'Normal.trial': [0,1]} traj.f_explore(cartesian_product(self.explored)) def tearDown(self): self.env.f_disable_logging() super(EnvironmentTest, self).tearDown() def setUp(self):", "self.explored['csr_mat'][0] = self.explored['csr_mat'][0].tocsr() self.explored['csr_mat'][1] = self.explored['csr_mat'][0].tocsr() traj.f_explore(cartesian_product(self.explored)) def explore_large(self, traj): self.explored ={'Normal.trial': [0,1]}", "self.make_run() hdf5file = pt.open_file(self.filename) traj_group = hdf5file.get_node(where='/', name= self.traj.v_name) for node in traj_group._f_walk_groups():", "!= %s' % (str(trajnice), str(osnice))) def add_large_data(traj): np_array = np.random.rand(100, 1000, 10) traj.f_add_result('l4rge',", "ncomments[comment] = 0 ncomments[comment] += 1 finally: hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(any(x > 1", "pandas_append=self.pandas_append, pandas_format=self.pandas_format, encoding=self.encoding, niceness=self.niceness, use_scoop=self.use_scoop, port=self.port, add_time=self.add_time, timeout=self.timeout, graceful_exit=self.graceful_exit) traj = env.v_trajectory traj.v_standard_parameter=Parameter", "= self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def check_if_z_is_correct_map(self,traj, args1, args2, args3): for x,", "node in traj_group._f_walk_groups(): if ('/derived_parameters/' in node._v_pathname or '/results/' in node._v_pathname): if 'SRVC_LEAF'", "matrices_csc = [] for irun in range(3): spsparse_csc = spsp.lil_matrix((111,111)) spsparse_csc[3,2+irun] = 44.5*irun", "'jj') traj.f_explore({'x': [1,2,3]}) env.f_run(add_one_particular_item, True) traj = load_trajectory(index=-1, filename=filename) self.assertTrue('hi' in traj) def", "mode='r') as mainf: full_text = mainf.read() self.assertTrue('nodes/s)' in full_text) env.f_disable_logging() def make_run_large_data(self): self.env.f_run(add_large_data)", "no the full name self.assertTrue(not name in overview_group, '%s in overviews but should", "== run_name) self.assertTrue(newtraj.res.runs.crun.z==newtraj.par.x*newtraj.par.y,' z != x*y: %s != %s * %s' % (str(newtraj.crun.z),str(newtraj.x),str(newtraj.y)))", "x in ncomments.values())) def my_run_func(idx): return 'hello_%d' % idx def my_set_func(idx): return 'huhu_%d'", "traj.f_store() self.assertTrue(len(traj), 5) self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)", "nchildren = len(file.root._v_children) self.assertTrue(nchildren > 1) env3 = Environment(filename=self.filename, overwrite_file=True, log_config=get_log_config()) self.assertFalse(os.path.exists(self.filename)) env2.f_disable_logging()", "self.traj.f_iter_runs(yields='idx'): self.assertTrue(isinstance(idx, int)) man_multiply(self.traj) traj = self.traj traj.f_store() self.assertTrue(len(traj), 5) self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0]))", "results = self.env.f_run(multiply_with_graceful_exit) self.are_results_in_order(results) self.assertFalse(self.traj.f_is_completed()) def test_f_iter_runs(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results)", "= self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False): ### Load The Trajectory", "self.assertTrue('new.group' not in self.traj) self.traj.new.f_load_child('test', recursive=True, load_data=pypetconstants.LOAD_SKELETON) self.assertTrue(self.traj.new.group.v_annotations.annotation, 42) self.traj.f_delete_item('new.test.group') with self.assertRaises(pex.DataNotInStorageError): self.traj.parameters.f_load_child('new.test.group',", "self.trajname = make_trajectory_name(self) env = Environment(trajectory=self.trajname,filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config() if self.log_config else None,", "wrap_mode='QUEUE') with self.assertRaises(ValueError): Environment(automatic_storing=False, continuable=True, continue_folder=tmp) with self.assertRaises(ValueError): Environment(port='www.nosi.de', wrap_mode='LOCK') def test_run(self): self.traj.f_add_parameter('TEST',", "'hello_%d' % idx def my_set_func(idx): return 'huhu_%d' % idx class TestOtherHDF5Settings(EnvironmentTest): tags =", "self.assertTrue('nodes/s)' in full_text) env.f_disable_logging() def make_run_large_data(self): self.env.f_run(add_large_data) def make_run(self): ### Make a test", "= size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 6.0, 'Size is %sMB", "self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name self.traj.v_idx = idx", "make_temp_dir('experiments/tests/Log') # self.trajname = make_trajectory_name(self) # # env = Environment(trajectory=self.trajname,filename=self.filename, # file_title=self.trajname, log_folder=self.logfolder,", "in node._v_attrs: if 'SRVC_INIT_COMMENT' in node._v_attrs: comment = node._v_attrs['SRVC_INIT_COMMENT'] if comment not in", "def check_if_z_is_correct(self,traj): traj.v_shortcuts=False for x in range(len(traj)): traj.v_idx=x z = traj.res.runs.crun.z x =", "traj.f_store() service_logger = traj.v_storage_service._logger root = logging.getLogger('pypet') old_level = root.level service_logger.setLevel(logging.INFO) root.setLevel(logging.INFO) traj.f_load(load_data=3)", "'test_run') ###Explore self.explore_large(self.traj) self.make_run_large_data() self.assertTrue(self.traj.f_is_completed()) # Check if printing and repr work get_root_logger().info(str(self.env))", "self.explore_dict={'string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'int':[1,2,3], 'csr_mat' : matrices_csr,", "self.add_time=True self.graceful_exit = False def explore_complex_params(self, traj): matrices_csr = [] for irun in", "for x in ncomments.values())) def test_NOT_purge_duplicate_comments(self): self.explore(self.traj) self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=0 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=0 self.make_run() hdf5file", "pypet.tests.testutils.ioutils import run_suite, make_temp_dir, make_trajectory_name,\\ get_root_logger, parse_args, get_log_config, get_log_path from pypet.tests.testutils.data import create_param_dict,", "manual_run()(multiply_with_storing) for idx in self.traj.f_iter_runs(yields='idx'): self.assertTrue(isinstance(idx, int)) man_multiply(self.traj) traj = self.traj traj.f_store() self.assertTrue(len(traj),", "list_that_changes): # traj.f_add_result('kkk', list_that_changes[traj.v_idx] + traj.v_idx) # list_that_changes[traj.v_idx] = 1000 # class DeepCopyTest(TrajectoryComparator):", "self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) self.assertTrue(len(newtraj) == 1)", "traj.f_explore(self.explore_dict) def expand(self,traj): self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12,13]} with self.assertRaises(ValueError): traj.f_expand(self.expand_dict) self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12]} traj.f_expand(self.expand_dict) def test_if_results_are_sorted_correctly_manual_runs(self): ###Explore self.explore(self.traj)", "= traj.parameters.f_to_dict() for param_name in par_dict: param = par_dict[param_name] if param.v_name in self.explore_dict:", "%s' % (str(z),str(x),str(y))) traj.v_idx=-1 traj.v_shortcuts=True # def test_runfunc(traj, list_that_changes): # traj.f_add_result('kkk', list_that_changes[traj.v_idx] +", "# log_stdout=False, # multiproc=False, # deep_copy_data=True) # # traj = env.v_trajectory # #", "[1], [2], [3]) with self.assertRaises(ValueError): Environment(multiproc=True, use_pool=False, freeze_input=True, filename=self.filename, log_config=None) env3 = Environment(log_config=None,", "test_file_overwriting(self): self.traj.f_store() with pt.open_file(self.filename, mode='r') as file: nchildren = len(file.root._v_children) self.assertTrue(nchildren > 0)", "name in should_not: name = name.split('.')[-1] # Get only the name of the", "self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly_using_map(self): ###Explore self.explore(self.traj) args1=[10*x", "self.explored['csr_mat'][0].tocsr() traj.f_explore(cartesian_product(self.explored)) def explore_large(self, traj): self.explored ={'Normal.trial': [0,1]} traj.f_explore(cartesian_product(self.explored)) def tearDown(self): self.env.f_disable_logging() super(EnvironmentTest,", "self.assertTrue(isinstance(idx, int)) man_multiply(self.traj) traj = self.traj traj.f_store() self.assertTrue(len(traj), 5) self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton()", "= self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton()", "self.explored['csr_mat'][0].tocsr() self.explored['csr_mat'][1] = self.explored['csr_mat'][0].tocsr() traj.f_explore(cartesian_product(self.explored)) def explore_large(self, traj): self.explored ={'Normal.trial': [0,1]} traj.f_explore(cartesian_product(self.explored)) def", "> 0) env2 = Environment(filename=self.filename, log_config=get_log_config()) traj2 = env2.v_trajectory traj2.f_store() self.assertTrue(os.path.exists(self.filename)) with pt.open_file(self.filename,", "pt.open_file(self.filename, mode='a') ncomments = {} try: traj_group = hdf5file.get_node(where='/',name= self.traj.v_name) for node in", "x in traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d' % (idx+1) not in nameset) self.assertTrue('run_%08d' % idx in", "Trajectory, load_trajectory from pypet.utils.explore import cartesian_product from pypet.environment import Environment from pypet.storageservice import", "from pypet.trajectory import Trajectory, load_trajectory from pypet.utils.explore import cartesian_product from pypet.environment import Environment", "old_multiproc = self.multiproc self.multiproc = False ### Make a new single core run", "self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=0 self.make_run() hdf5file = pt.open_file(self.filename, mode='a') ncomments = {} try: traj_group =", "self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False): ### Load The Trajectory and", "self.filename = make_temp_dir('experiments/tests/HDF5/testcoyp2.hdf5') # self.logfolder = make_temp_dir('experiments/tests/Log') # self.trajname = make_trajectory_name(self) # #", "for irun in range(3): spsparse_csc = spsp.lil_matrix((111,111)) spsparse_csc[3,2+irun] = 44.5*irun matrices_csc.append(spsparse_csc.tocsc()) matrices_bsr =", "with self.assertRaises(TypeError): self.traj.parameters.new.f_remove_child('test') self.traj.new.f_remove_child('test', recursive=True) self.assertTrue('new.group' not in self.traj) self.traj.new.f_load_child('test', recursive=True, load_data=pypetconstants.LOAD_SKELETON) self.assertTrue(self.traj.new.group.v_annotations.annotation,", "# def test_deep_copy_data(self): # # self.filename = make_temp_dir('experiments/tests/HDF5/testcopy.hdf5') # self.logfolder = make_temp_dir('experiments/tests/Log') #", "add_time=self.add_time, timeout=self.timeout, graceful_exit=self.graceful_exit) traj = env.v_trajectory traj.v_standard_parameter=Parameter ## Create some parameters self.param_dict={} create_param_dict(self.param_dict)", "hdf5file.close() def test_store_form_tuple(self): self.traj.f_store() self.traj.f_add_result('TestResItem', 42, 43) with self.assertRaises(ValueError): self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem,(),{},5)) self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem))", "filename=self.filename, log_config=None) env3 = Environment(log_config=None, filename=self.filename) with self.assertRaises(ValueError): env3.f_run_map(multiply_args) with self.assertRaises(ValueError): Environment(use_scoop=True, immediate_postproc=True)", "'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'Normal.int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat' :", "self.traj.f_store_item('new.group') # group is below test not new, so ValueError thrown: with self.assertRaises(ValueError):", "spsparse_csc[3,2+irun] = 44.5*irun matrices_csc.append(spsparse_csc.tocsc()) matrices_bsr = [] for irun in range(3): spsparse_bsr =", "add_params, multiply,\\ simple_calculations, TrajectoryComparator, multiply_args, multiply_with_storing, \\ multiply_with_graceful_exit def add_one_particular_item(traj, store_full): traj.hi =", "[] for irun in range(3): spsparse_csc = spsp.lil_matrix((111,111)) spsparse_csc[3,2+irun] = 44.5*irun matrices_csc.append(spsparse_csc.tocsc()) matrices_bsr", "work get_root_logger().info(str(self.env)) get_root_logger().info(repr(self.env)) newtraj = Trajectory() newtraj.f_load(name=self.traj.v_name, as_new=False, load_data=2, filename=self.filename) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)", "param = par_dict[param_name] if param.v_name in self.explore_dict: param.f_unlock() if param.v_explored: param._shrink() self.explore_dict={'Numpy.string':[np.array(['Uno', 'Dos',", "scoop.IS_RUNNING or scoop.IS_ORIGIN): return raise RuntimeError('traj niceness != os niceness; ' '%s !=", "= 0 self.env._traj.config.hdf5.overview.config_overview = 0 self.env._traj.config.hdf5.overview.explored_parameters_overview = 0 self.make_run() hdf5file = pt.open_file(self.filename) overview_group", "= None self.add_time=True self.graceful_exit = False def explore_complex_params(self, traj): matrices_csr = [] for", "= os.nice(0) else: trajnice = traj.niceness osnice = psutil.Process().nice() if trajnice != osnice:", "x=traj.results.runs[irun].kkk # self.assertTrue(x==42+irun) # # def test_not_deep_copy_data(self): # self.filename = make_temp_dir('experiments/tests/HDF5/testcoyp2.hdf5') # self.logfolder", "= make_temp_dir(os.path.join('experiments','tests','HDF5','sort_tests.hdf5')) self.trajname = make_trajectory_name(self) env = Environment(trajectory=self.trajname,filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config() if self.log_config", "add_time=True, filename=filename, log_stdout=False, log_config=get_log_config(), dynamic_imports=SlowResult, display_time=0.1) traj = env.v_traj res=traj.f_add_result(SlowResult, 'iii', 42, 43,", "children and recursive is false with self.assertRaises(TypeError): self.traj.parameters.new.f_remove_child('test') self.traj.new.f_remove_child('test', recursive=True) self.assertTrue('new.group' not in", "traj.f_add_parameter('x',99) traj.f_add_parameter('y',99) self.env=env self.traj=traj def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False, how=2): ### Load The Trajectory and check", "!= osnice: if traj.use_scoop: import scoop if (not scoop.IS_RUNNING or scoop.IS_ORIGIN): return raise", "###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) get_root_logger().info(results) traj = self.traj self.assertEqual(len(traj), len(list(list(self.explore_dict.values())[0]))) self.traj.f_load_skeleton()", "newtraj) self.assertTrue(len(newtraj) == 1) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb))", "int)) self.assertTrue(isinstance(res[1], int)) idx = res[0] self.assertEqual(self.traj.res.runs[idx].z, res[1]) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(),", "self.traj.hdf5.purge_duplicate_comments=0 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=0 self.make_run() hdf5file = pt.open_file(self.filename, mode='a') ncomments = {} try: traj_group", "wildcard_functions=self.wildcard_functions, derived_parameters_per_run=5, multiproc=self.multiproc, ncores=self.ncores, wrap_mode=self.mode, use_pool=self.use_pool, gc_interval=self.gc_interval, freeze_input=self.freeze_input, fletcher32=self.fletcher32, complevel=self.complevel, complib=self.complib, shuffle=self.shuffle, pandas_append=self.pandas_append,", "% (str(z),str(x),str(y))) traj.v_idx=-1 traj.v_shortcuts=True # def test_runfunc(traj, list_that_changes): # traj.f_add_result('kkk', list_that_changes[traj.v_idx] + traj.v_idx)", "###Explore self.explore(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename)", "= Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_derived_parameters=how, load_results=how) return newtraj def explore(self,traj): self.explore_dict={'x':[-1,1,2,3,4],'y':[1,1,2,2,3]}", "spsp.lil_matrix((111,111)) spsparse_bsr[3,2+irun] = 44.5*irun matrices_bsr.append(spsparse_bsr.tocsr().tobsr()) matrices_dia = [] for irun in range(3): spsparse_dia", "self.assertRaises(ValueError): Environment(automatic_storing=False, continuable=True, continue_folder=tmp) with self.assertRaises(ValueError): Environment(port='www.nosi.de', wrap_mode='LOCK') def test_run(self): self.traj.f_add_parameter('TEST', 'test_run') ###Explore", "self.encoding='latin1' self.graceful_exit = True class TestOtherHDF5Settings2(EnvironmentTest): tags = 'integration', 'hdf5', 'environment', 'hdf5_settings' def", "traj_name = self.env.v_trajectory.v_name del self.env self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory", "traj.use_scoop: import scoop if (not scoop.IS_RUNNING or scoop.IS_ORIGIN): return raise RuntimeError('traj niceness !=", "(str(newtraj.crun.z),str(newtraj.x),str(newtraj.y))) traj = self.traj self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY)", "in should_not: name = name.split('.')[-1] # Get only the name of the table,", "self.assertTrue(x==42+irun) # # def test_not_deep_copy_data(self): # self.filename = make_temp_dir('experiments/tests/HDF5/testcoyp2.hdf5') # self.logfolder = make_temp_dir('experiments/tests/Log')", "## Create some parameters self.param_dict={} create_param_dict(self.param_dict) ### Add some parameter: add_params(traj,self.param_dict) #remember the", "# env = Environment(trajectory=self.trajname,filename=self.filename, # file_title=self.trajname, log_folder=self.logfolder, # log_stdout=False, # multiproc=False, # deep_copy_data=False)", "test_a_large_run(self): get_root_logger().info('Testing large run') self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore_large(self.traj) self.make_run_large_data() self.assertTrue(self.traj.f_is_completed()) # Check if", "self.assertTrue(self.traj.new.group.v_annotations.annotation, 42) self.traj.f_delete_item('new.test.group') with self.assertRaises(pex.DataNotInStorageError): self.traj.parameters.f_load_child('new.test.group', load_data=pypetconstants.LOAD_SKELETON) def test_switch_on_all_comments(self): self.explore(self.traj) self.traj.hdf5.purge_duplicate_comments=0 self.make_run() hdf5file", "nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.res.runs.crun.z==newtraj.par.x*newtraj.par.y,' z != x*y: %s != %s * %s'", "def test_switch_on_all_comments(self): self.explore(self.traj) self.traj.hdf5.purge_duplicate_comments=0 self.make_run() hdf5file = pt.open_file(self.filename) traj_group = hdf5file.get_node(where='/', name= self.traj.v_name)", "self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.res.runs.crun.z==newtraj.par.x*newtraj.par.y,' z != x*y: %s != %s * %s' %", "Parameter('x', 3, 'jj') traj.f_explore({'x': [1,2,3]}) env.f_run(add_one_particular_item, True) traj = load_trajectory(index=-1, filename=filename) self.assertTrue('hi' in", "only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def check_if_z_is_correct_map(self,traj, args1, args2,", "self.traj.new.f_load_child('test', recursive=True, load_data=pypetconstants.LOAD_SKELETON) self.assertTrue(self.traj.new.group.v_annotations.annotation, 42) self.traj.f_delete_item('new.test.group') with self.assertRaises(pex.DataNotInStorageError): self.traj.parameters.f_load_child('new.test.group', load_data=pypetconstants.LOAD_SKELETON) def test_switch_on_all_comments(self): self.explore(self.traj)", "= None self.timeout = None self.add_time=True self.graceful_exit = False def explore_complex_params(self, traj): matrices_csr", "== idx) def test_expand(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) get_root_logger().info(results) traj =", "env.v_trajectory traj.par.x = Parameter('x', 3, 'jj') traj.f_explore({'x': [1,2,3]}) env.f_run(add_one_particular_item, True) traj = load_trajectory(index=-1,", "for irun in range(3): spsparse_dia = spsp.lil_matrix((111,111)) spsparse_dia[3,2+irun] = 44.5*irun matrices_dia.append(spsparse_dia.tocsc().todia()) self.explore_dict={'string':[np.array(['Uno', 'Dos',", "np.array(['Ocho', 'Nueve', 'Diez'])], 'int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat' : matrices_csc, 'bsr_mat' : matrices_bsr,", "only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) for idx, run_name in enumerate(self.traj.f_iter_runs()):", "filename = make_temp_dir('full_store.hdf5') with Environment(filename=filename, log_config=get_log_config()) as env: traj = env.v_trajectory traj.par.x =", "self.explore(self.traj) self.env.f_run(with_niceness) self.assertTrue(self.traj.f_is_completed()) def test_file_overwriting(self): self.traj.f_store() with pt.open_file(self.filename, mode='r') as file: nchildren =", "self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def check_if_z_is_correct_map(self,traj,", "args1, args2, args3) for res in results: self.assertEqual(len(res), 2) self.assertTrue(isinstance(res[0], int)) self.assertTrue(isinstance(res[1], int))", "= 'integration', 'hdf5', 'environment' # Test tags def test_full_store(self): filename = make_temp_dir('full_store.hdf5') with", "# group is below test not new, so ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_remove_child('group')", "traj.f_explore({'dummy':[12, 3, 3, 4]}) # # list_that_should_not_change = [42, 42, 42, 42] #", "self.complevel=2 self.shuffle=False self.fletcher32 = True self.encoding='latin1' self.wildcard_functions = {('$', 'crun') : my_run_func, ('$set',", "overview_group, '%s not in overviews but it should!' % name) hdf5file.close() self.traj.f_load(load_parameters=2, load_derived_parameters=2,", "param_name in par_dict: param = par_dict[param_name] if param.v_name in self.explore_dict: param.f_unlock() if param.v_explored:", "= False self.encoding='latin1' self.graceful_exit = True class TestOtherHDF5Settings2(EnvironmentTest): tags = 'integration', 'hdf5', 'environment',", "in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset = set((x.v_name for", "Environment(port='www.nosi.de', wrap_mode='LOCK') def test_run(self): self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)", "= self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly_using_map(self): ###Explore self.explore(self.traj) args1=[10*x for x", "self.assertRaises(ValueError): self.traj.parameters.new.f_remove_child('group') # group is below test not new, so ValueError thrown: with", "= env.v_trajectory traj.par.x = Parameter('x', 3, 'jj') traj.f_explore({'x': [1,2,3]}) env.f_run(add_one_particular_item, True) traj =", "= False ###Explore self.explore(self.traj) self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj)", "get_log_config, get_log_path from pypet.tests.testutils.data import create_param_dict, add_params, multiply,\\ simple_calculations, TrajectoryComparator, multiply_args, multiply_with_storing, \\", "self.env.v_trajectory self.traj.f_load(name=traj_name) self.expand(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+", "def test_run_complex(self): self.traj.f_add_parameter('TEST', 'test_run_complex') ###Explore self.explore_complex_params(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(),", "hasattr(os, 'nice'): trajnice = traj.niceness osnice = os.nice(0) else: trajnice = traj.niceness osnice", "EnvironmentTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' def set_mode(self): self.mode = 'LOCK' self.multiproc =", "shuffle=self.shuffle, pandas_append=self.pandas_append, pandas_format=self.pandas_format, encoding=self.encoding, niceness=self.niceness, use_scoop=self.use_scoop, port=self.port, add_time=self.add_time, timeout=self.timeout, graceful_exit=self.graceful_exit) traj = env.v_trajectory", "run_suite, make_temp_dir, make_trajectory_name,\\ get_root_logger, parse_args, get_log_config, get_log_path from pypet.tests.testutils.data import create_param_dict, add_params, multiply,\\", "head, tail = os.path.split(filename) env = Environment(filename=head) the_file_name = env.v_traj.v_name + '.hdf5' head,", "= 'LOCK' self.multiproc = False self.ncores = 1 self.use_pool=True self.pandas_format='table' self.pandas_append=True self.complib =", "display_time=0.1) traj = env.v_traj res=traj.f_add_result(SlowResult, 'iii', 42, 43, comment='llk') traj.f_store() service_logger = traj.v_storage_service._logger", "not new, so ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_remove_child('group') # group is below test", "!= x*y: %s != %s * %s' % (str(z),str(x),str(y))) traj.v_idx=-1 traj.v_shortcuts=True # def", "self.traj.f_expand(cartesian_product(self.expanded)) self.traj.f_store() ################## Overview TESTS ############################# def test_switch_ON_large_tables(self): self.traj.f_add_parameter('TEST', 'test_switch_ON_LARGE_tables') ###Explore self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview", "None, 'Niceness not supported under non Unix.') def test_niceness(self): ###Explore self.explore(self.traj) self.env.f_run(with_niceness) self.assertTrue(self.traj.f_is_completed())", "def test_deep_copy_data(self): # # self.filename = make_temp_dir('experiments/tests/HDF5/testcopy.hdf5') # self.logfolder = make_temp_dir('experiments/tests/Log') # self.trajname", "False def explore_complex_params(self, traj): matrices_csr = [] for irun in range(3): spsparse_csr =", "traj.f_explore({'x': [1,2,3]}) env.f_run(add_one_particular_item, True) traj = load_trajectory(index=-1, filename=filename) self.assertTrue('hi' in traj) def with_niceness(traj):", "test_not_deep_copy_data(self): # self.filename = make_temp_dir('experiments/tests/HDF5/testcoyp2.hdf5') # self.logfolder = make_temp_dir('experiments/tests/Log') # self.trajname = make_trajectory_name(self)", "== pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx) def test_expand(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results)", "'<NAME>' import os import platform import logging import time import numpy as np", "load_other_data=2) return newtraj def test_expand(self): ###Explore self.traj.f_add_parameter('TEST', 'test_expand') self.explore(self.traj) self.make_run() self.expand() get_root_logger().info('\\n $$$$$$$$$$$$$$$$$", "traj2.f_store() self.assertTrue(os.path.exists(self.filename)) with pt.open_file(self.filename, mode='r') as file: nchildren = len(file.root._v_children) self.assertTrue(nchildren > 1)", "with_niceness(traj): if traj.multiproc: if hasattr(os, 'nice'): trajnice = traj.niceness osnice = os.nice(0) else:", "== len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj)", "test_expand_after_reload(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0]))", "# self.logfolder = make_temp_dir('experiments/tests/Log') # self.trajname = make_trajectory_name(self) # # env = Environment(trajectory=self.trajname,filename=self.filename,", "= self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) self.expand(self.traj) self.env.f_run(multiply) traj =", "self.assertEqual(self.traj.res.runs[idx].z, res[1]) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.assertEqual(len(traj), 5) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly(self):", "for x in range(len(traj)): traj.v_idx=x z = traj.res.runs.crun.z x = traj.par.x y =", "self.filename = make_temp_dir('experiments/tests/HDF5/testcopy.hdf5') # self.logfolder = make_temp_dir('experiments/tests/Log') # self.trajname = make_trajectory_name(self) # #", "traj = self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+\\ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj =", "42) def test_store_single_group(self): self.traj.f_store() self.traj.f_add_parameter_group('new.test.group').v_annotations.f_set(42) self.traj.f_store_item('new.group') # group is below test not new,", "only_empties=True) self.compare_trajectories(self.traj,newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb <", "'Size is %sMB > 30MB' % str(size_in_mb)) def test_two_runs(self): self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments =", "%s' % (str(traj.crun.z),str(traj.x),str(traj.y))) traj.v_idx=-1 def check_if_z_is_correct(self,traj): traj.v_shortcuts=False for x in range(len(traj)): traj.v_idx=x z", "os.nice(0) else: trajnice = traj.niceness osnice = psutil.Process().nice() if trajnice != osnice: if", "= self.load_trajectory(trajectory_name=self.traj.v_name) self.compare_trajectories(newtraj,self.traj) def test_switch_off_all_tables(self): ###Explore self.traj.f_add_parameter('TEST', 'test_switch_off_ALL_tables') self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview = 0 self.env._traj.config.hdf5.overview.derived_parameters_overview", "range(111): array_list.append(np.random.rand(10)) traj.f_add_result('m4ny', *array_list) class SimpleEnvironmentTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment', 'quick' def", "from pypet.environment import Environment from pypet.storageservice import HDF5StorageService from pypet import pypetconstants, Result,", "results = self.env.f_run_map(multiply_args, args1, arg2=args2, arg3=args3) self.assertEqual(len(results), len(self.traj)) traj = self.traj self.assertTrue(len(traj) ==", "self.traj.f_load(load_parameters=2, load_derived_parameters=2, load_results=2) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name) self.compare_trajectories(newtraj,self.traj) def test_switch_off_all_tables(self): ###Explore self.traj.f_add_parameter('TEST', 'test_switch_off_ALL_tables') self.explore(self.traj)", "derived_parameters_per_run=5, multiproc=self.multiproc, ncores=self.ncores, wrap_mode=self.mode, use_pool=self.use_pool, gc_interval=self.gc_interval, freeze_input=self.freeze_input, fletcher32=self.fletcher32, complevel=self.complevel, complib=self.complib, shuffle=self.shuffle, pandas_append=self.pandas_append, pandas_format=self.pandas_format,", "tables as pt from pypet.tests.testutils.ioutils import run_suite, make_temp_dir, make_trajectory_name,\\ get_root_logger, parse_args, get_log_config, get_log_path", "= hdf5file.get_node(where='/'+ self.traj.v_name, name='overview') should_not = HDF5StorageService.NAME_TABLE_MAPPING.keys() for name in should_not: name =", "= get_log_path(traj) mainfilename = os.path.join(path, 'LOG.txt') with open(mainfilename, mode='r') as mainf: full_text =", "is not self.traj) newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset = set((x.v_name", "None, multiproc=self.multiproc, wrap_mode=self.mode, ncores=self.ncores, use_pool=self.use_pool, use_scoop=self.use_scoop, port=self.port, freeze_input=self.freeze_input, graceful_exit=self.graceful_exit) traj = env.v_trajectory traj.v_standard_parameter=Parameter", "try: import dill except ImportError: dill = None import scipy.sparse as spsp import", "1 self.make_run() hdf5file = pt.open_file(self.filename) overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview') should = ['derived_parameters_overview',", "idx in self.traj.f_iter_runs(yields='idx'): self.assertTrue(isinstance(idx, int)) man_multiply(self.traj) traj = self.traj traj.f_store() self.assertTrue(len(traj), 5) self.assertTrue(len(traj)", "= traj.par.y self.assertTrue(z==x*y,' z != x*y: %s != %s * %s' % (str(z),str(x),str(y)))", "ncores=self.ncores, use_pool=self.use_pool, use_scoop=self.use_scoop, port=self.port, freeze_input=self.freeze_input, graceful_exit=self.graceful_exit) traj = env.v_trajectory traj.v_standard_parameter=Parameter traj.f_add_parameter('x',99) traj.f_add_parameter('y',99) self.env=env", "# # list_that_should_not_change = [42, 42, 42, 42] # # env.f_run(test_runfunc, list_that_should_not_change) #", "Environment(trajectory=self.trajname, filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config(), results_per_run=5, wildcard_functions=self.wildcard_functions, derived_parameters_per_run=5, multiproc=self.multiproc, ncores=self.ncores, wrap_mode=self.mode, use_pool=self.use_pool, gc_interval=self.gc_interval,", "the trajectory and the environment self.traj = traj self.env = env @unittest.skipIf(not hasattr(os,", "self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False): ### Load The Trajectory and check if", "test_full_store(self): filename = make_temp_dir('full_store.hdf5') with Environment(filename=filename, log_config=get_log_config()) as env: traj = env.v_trajectory traj.par.x", ": matrices_csc, 'bsr_mat' : matrices_bsr, 'dia_mat' : matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} traj.f_explore(self.explore_dict) def", "'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.expanded['csr_mat'][0][1,2]=44.0 self.expanded['csr_mat'][1][2,2]=33 self.expanded['csr_mat'][0]=self.expanded['csr_mat'][0].tocsr() self.expanded['csr_mat'][1]=self.expanded['csr_mat'][1].tocsr() self.traj.f_expand(cartesian_product(self.expanded)) self.traj.f_store() ##################", "%sMB > 6MB' % str(size_in_mb)) mp_traj = self.traj old_multiproc = self.multiproc self.multiproc =", "is None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx) def test_f_iter_runs_auto_load(self): ###Explore self.explore(self.traj) results", "scoop if (not scoop.IS_RUNNING or scoop.IS_ORIGIN): return raise RuntimeError('traj niceness != os niceness;", "irun in range(3): spsparse_csr = spsp.lil_matrix((111,111)) spsparse_csr[3,2+irun] = 44.5*irun matrices_csr.append(spsparse_csr.tocsr()) matrices_csc = []", "6.0, 'Size is %sMB > 6MB' % str(size_in_mb)) self.compare_trajectories(mp_traj, self.traj) self.multiproc = old_multiproc", "self.env._traj.config.hdf5.overview.config_overview = 0 self.env._traj.config.hdf5.overview.explored_parameters_overview = 0 self.make_run() hdf5file = pt.open_file(self.filename) overview_group = hdf5file.get_node(where='/'+", "len(self.traj)) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct_map(traj, args1, args2,", "name of the table, no the full name self.assertTrue(not name in overview_group, '%s", "in overviews but it should!' % name) hdf5file.close() self.traj.f_load(load_parameters=2, load_derived_parameters=2, load_results=2) newtraj =", "= 'integration', 'hdf5', 'environment', 'hdf5_settings' def set_mode(self): EnvironmentTest.set_mode(self) self.mode = 'LOCK' self.multiproc =", "pt.open_file(self.filename, mode='r') as file: nchildren = len(file.root._v_children) self.assertTrue(nchildren > 1) env3 = Environment(filename=self.filename,", "in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.res.runs.crun.z==newtraj.par.x*newtraj.par.y,' z != x*y: %s != %s *", "self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) traj_name = self.env.v_trajectory.v_name del self.env self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config())", "= env.v_traj.v_name + '.hdf5' head, tail = os.path.split(env.v_traj.v_storage_service.filename) self.assertEqual(tail, the_file_name) class EnvironmentTest(TrajectoryComparator): tags", "6.0, 'Size is %sMB > 6MB' % str(size_in_mb)) def test_just_one_run(self): self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj", "param.f_unlock() if param.v_explored: param._shrink() self.explore_dict={'Numpy.string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])],", "* %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj in enumerate(self.traj.f_iter_runs(yields='copy')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj", "ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_remove_child('group') # group is below test not new, so", "make_temp_dir('test.hdf5') head, tail = os.path.split(filename) env = Environment(filename=head) the_file_name = env.v_traj.v_name + '.hdf5'", "self.fletcher32 = False self.encoding = 'utf8' self.log_stdout=False self.wildcard_functions = None self.niceness = None", "'integration', 'hdf5', 'environment', 'hdf5_settings' def set_mode(self): EnvironmentTest.set_mode(self) self.mode = 'LOCK' self.multiproc = False", "with Environment(filename=filename, log_config=get_log_config()) as env: traj = env.v_trajectory traj.par.x = Parameter('x', 3, 'jj')", "test_time_display_of_loading(self): filename = make_temp_dir('sloooow.hdf5') env = Environment(trajectory='traj', add_time=True, filename=filename, log_stdout=False, log_config=get_log_config(), dynamic_imports=SlowResult, display_time=0.1)", "Environment(filename=filename, log_config=get_log_config()) as env: traj = env.v_trajectory traj.par.x = Parameter('x', 3, 'jj') traj.f_explore({'x':", "trajectory and the environment self.traj = traj self.env = env @unittest.skipIf(not hasattr(os, 'nice')", "def explore_cartesian(self,traj): self.explore_dict=cartesian_product({'x':[-1,1,2,3,4, 5, 6],'y':[1,1,2,2,3,4,4]}) traj.f_explore(self.explore_dict) def expand(self,traj): self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12,13]} with self.assertRaises(ValueError): traj.f_expand(self.expand_dict) self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12]}", "% idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s !=", "spsp.lil_matrix((111,111)) spsparse_dia[3,2+irun] = 44.5*irun matrices_dia.append(spsparse_dia.tocsc().todia()) self.explore_dict={'string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve',", "str(size_in_mb)) self.assertTrue(size_in_mb < 6.0, 'Size is %sMB > 6MB' % str(size_in_mb)) def test_just_one_run(self):", "as pt from pypet.tests.testutils.ioutils import run_suite, make_temp_dir, make_trajectory_name,\\ get_root_logger, parse_args, get_log_config, get_log_path from", "x, arg1, arg2, arg3 in zip(range(len(traj)), args1, args2, args3): traj.v_idx=x self.assertTrue(traj.crun.z==traj.x*traj.y+arg1+arg2+arg3,' z !=", "= env.v_trajectory # # traj.f_add_parameter('dummy', 1) # traj.f_explore({'dummy':[12, 3, 3, 4]}) # #", "= False def explore_complex_params(self, traj): matrices_csr = [] for irun in range(3): spsparse_csr", "'environment' # Test tags def test_full_store(self): filename = make_temp_dir('full_store.hdf5') with Environment(filename=filename, log_config=get_log_config()) as", "only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly_using_map(self): ###Explore self.explore(self.traj) args1=[10*x for x in range(len(self.traj))] args2=[100*x for", "dill = None import scipy.sparse as spsp import random from pypet import Parameter", "= None try: import dill except ImportError: dill = None import scipy.sparse as", "traj.par.y self.assertTrue(z==x*y,' z != x*y: %s != %s * %s' % (str(z),str(x),str(y))) traj.v_idx=-1", "'environment' def set_mode(self): self.mode = 'LOCK' self.multiproc = False self.gc_interval = None self.ncores", "'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat' : matrices_csc, 'bsr_mat' :", "self.pandas_append=False self.complib = 'lzo' self.complevel=2 self.shuffle=False self.fletcher32 = True self.encoding='latin1' self.wildcard_functions = {('$',", "'Size is %sMB > 6MB' % str(size_in_mb)) def test_just_one_run(self): self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj =", "is %sMB > 30MB' % str(size_in_mb)) def test_two_runs(self): self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments = False", "should_not = HDF5StorageService.NAME_TABLE_MAPPING.keys() for name in should_not: name = name.split('.')[-1] # Get only", "self.assertTrue(size_in_mb < 2.0, 'Size is %sMB > 6MB' % str(size_in_mb)) with self.assertRaises(TypeError): self.explore(self.traj)", "get_root_logger().info('\\n $$$$$$$$$$$$$$$$$ Second Run $$$$$$$$$$$$$$$$$$$$$$$$') self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,", "%s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj in enumerate(self.traj.f_iter_runs(yields='copy')): run_name", "is None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx) def test_expand(self): ###Explore self.explore(self.traj) results", "test_if_results_are_sorted_correctly_using_map(self): ###Explore self.explore(self.traj) args1=[10*x for x in range(len(self.traj))] args2=[100*x for x in range(len(self.traj))]", "make_temp_dir('cont') if dill is not None: env1 = Environment(continuable=True, continue_folder=tmp, log_config=None, filename=self.filename) with", "str(size_in_mb)) self.assertTrue(size_in_mb < 30.0, 'Size is %sMB > 30MB' % str(size_in_mb)) def test_two_runs(self):", "class SimpleEnvironmentTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment', 'quick' def test_make_default_file_when_giving_directory_without_slash(self): filename = make_temp_dir('test.hdf5')", "idx def my_set_func(idx): return 'huhu_%d' % idx class TestOtherHDF5Settings(EnvironmentTest): tags = 'integration', 'hdf5',", "load_dict): time.sleep(3) super(SlowResult, self)._load(load_dict) class FullStorageTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' # Test", "self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly_using_map(self): ###Explore", "= env.v_trajectory traj.v_standard_parameter=Parameter ## Create some parameters self.param_dict={} create_param_dict(self.param_dict) ### Add some parameter:", "!= x*y: %s != %s * %s' % (str(newtraj.crun.z),str(newtraj.x),str(newtraj.y))) traj = self.traj self.assertTrue(traj.v_idx", "%s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj in enumerate(self.traj.f_iter_runs(yields='copy')): run_name = traj.f_idx_to_run(idx)", "comment not in ncomments: ncomments[comment] = 0 ncomments[comment] += 1 finally: hdf5file.close() self.assertGreaterEqual(len(ncomments),", "idx, traj in enumerate(self.traj.f_iter_runs(yields='self')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is self.traj) newtraj.v_crun=run_name self.traj.v_idx =", "matrices_bsr, 'dia_mat' : matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} traj.f_explore(self.explore_dict) def explore(self, traj): self.explored ={'Normal.trial':", "if ('/derived_parameters/' in node._v_pathname or '/results/' in node._v_pathname): if 'SRVC_LEAF' in node._v_attrs: if", "list_that_should_change = [42, 42, 42, 42] # # env.f_run(test_runfunc, list_that_should_change) # # traj.v_auto_load=True", "= idx nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d' % (idx+1) not", "np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.expanded['csr_mat'][0][1,2]=44.0 self.expanded['csr_mat'][1][2,2]=33 self.expanded['csr_mat'][0]=self.expanded['csr_mat'][0].tocsr() self.expanded['csr_mat'][1]=self.expanded['csr_mat'][1].tocsr() self.traj.f_expand(cartesian_product(self.expanded)) self.traj.f_store() ################## Overview TESTS", "###Explore self.explore_complex_params(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def", "node._v_attrs: comment = node._v_attrs['SRVC_INIT_COMMENT'] if comment not in ncomments: ncomments[comment] = 0 ncomments[comment]", "1 self.use_pool=True self.pandas_format='table' self.pandas_append=False self.complib = 'lzo' self.complevel=2 self.shuffle=False self.fletcher32 = True self.encoding='latin1'", "not in nameset) self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z", "(str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj in enumerate(self.traj.f_iter_runs(yields='copy')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is not self.traj)", "spsp.lil_matrix((2222,22))]} self.explored['csr_mat'][0][1,2]=44.0 self.explored['csr_mat'][1][2,2]=33 self.explored['csr_mat'][0] = self.explored['csr_mat'][0].tocsr() self.explored['csr_mat'][1] = self.explored['csr_mat'][0].tocsr() traj.f_explore(cartesian_product(self.explored)) def explore_large(self, traj):", "env2.v_trajectory traj2.f_store() self.assertTrue(os.path.exists(self.filename)) with pt.open_file(self.filename, mode='r') as file: nchildren = len(file.root._v_children) self.assertTrue(nchildren >", "len(file.root._v_children) self.assertTrue(nchildren > 1) env3 = Environment(filename=self.filename, overwrite_file=True, log_config=get_log_config()) self.assertFalse(os.path.exists(self.filename)) env2.f_disable_logging() env3.f_disable_logging() def", "np.random.rand(100, 1000, 10) traj.f_add_result('l4rge', np_array) traj.f_store_item('l4rge') traj.f_remove_item('l4rge') array_list = [] for irun in", "= Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_parameters=2, load_derived_parameters=2, load_results=2, load_other_data=2) return newtraj def", "log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.traj.res.f_remove() self.traj.dpar.f_remove() self.expand() get_root_logger().info('\\n $$$$$$$$$$$$ Second Run $$$$$$$$$$", "= 'integration', 'hdf5', 'environment', 'quick' def test_make_default_file_when_giving_directory_without_slash(self): filename = make_temp_dir('test.hdf5') head, tail =", "self.env._traj.config.hdf5.purge_duplicate_comments = 0 self.env._traj.config.hdf5.overview.parameters_overview = 0 self.env._traj.config.hdf5.overview.config_overview = 0 self.env._traj.config.hdf5.overview.explored_parameters_overview = 0 self.make_run()", "self.log_config = True self.port = None self.graceful_exit = True def tearDown(self): self.env.f_disable_logging() super(ResultSortTest,", "self).tearDown() def setUp(self): self.set_mode() self.logfolder = make_temp_dir(os.path.join('experiments', 'tests', 'Log')) random.seed() self.trajname = make_trajectory_name(self)", "only_empties=True) self.assertEqual(len(traj), 5) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) self.assertEqual(len(results),", "zip(range(len(traj)), args1, args2, args3): traj.v_idx=x self.assertTrue(traj.crun.z==traj.x*traj.y+arg1+arg2+arg3,' z != x*y: %s != %s *", "still the same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_derived_parameters=how, load_results=how) return", "'SRVC_INIT_COMMENT' in node._v_attrs: comment = node._v_attrs['SRVC_INIT_COMMENT'] if comment not in ncomments: ncomments[comment] =", "self.ncores = 1 self.use_pool=True self.use_scoop=False self.freeze_input=False self.pandas_format='fixed' self.pandas_append=False self.complib = 'zlib' self.complevel=9 self.shuffle=True", "traj.f_explore({'dummy':[12, 3, 3, 4]}) # # list_that_should_change = [42, 42, 42, 42] #", "size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 30.0, 'Size", "self)._load(load_dict) class FullStorageTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' # Test tags def test_full_store(self):", "as_new=False, load_data=0) newtraj.v_auto_load = True newtraj.par.f_load_child('y', load_data=1) for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name", "return newtraj def test_expand(self): ###Explore self.traj.f_add_parameter('TEST', 'test_expand') self.explore(self.traj) self.make_run() self.expand() get_root_logger().info('\\n $$$$$$$$$$$$$$$$$ Second", "= psutil.Process().nice() if trajnice != osnice: if traj.use_scoop: import scoop if (not scoop.IS_RUNNING", "self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore_large(self.traj) self.make_run_large_data() self.assertTrue(self.traj.f_is_completed()) # Check if printing and repr work", "nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d' % (idx+1) not in nameset)", "no comment in node %s!' % node._v_name) hdf5file.close() def test_purge_duplicate_comments(self): self.explore(self.traj) with self.assertRaises(RuntimeError):", "self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) traj =", "if trajnice != osnice: if traj.use_scoop: import scoop if (not scoop.IS_RUNNING or scoop.IS_ORIGIN):", "self.logfolder = make_temp_dir(os.path.join('experiments', 'tests', 'Log')) random.seed() self.trajname = make_trajectory_name(self) self.filename = make_temp_dir(os.path.join('experiments', 'tests',", "pandas_format=self.pandas_format, encoding=self.encoding, niceness=self.niceness, use_scoop=self.use_scoop, port=self.port, add_time=self.add_time, timeout=self.timeout, graceful_exit=self.graceful_exit) traj = env.v_trajectory traj.v_standard_parameter=Parameter ##", "filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config(), results_per_run=5, wildcard_functions=self.wildcard_functions, derived_parameters_per_run=5, multiproc=self.multiproc, ncores=self.ncores, wrap_mode=self.mode, use_pool=self.use_pool, gc_interval=self.gc_interval, freeze_input=self.freeze_input,", "env3.f_run_map(multiply_args) with self.assertRaises(ValueError): Environment(use_scoop=True, immediate_postproc=True) with self.assertRaises(ValueError): Environment(use_pool=True, immediate_postproc=True) with self.assertRaises(ValueError): Environment(continuable=True, wrap_mode='QUEUE',", "self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx) def test_f_iter_runs_auto_load(self): ###Explore self.explore(self.traj)", "np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'Normal.int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat' : matrices_csc,", "'iii', 42, 43, comment='llk') traj.f_store() service_logger = traj.v_storage_service._logger root = logging.getLogger('pypet') old_level =", "== idx) def test_f_iter_runs_auto_load(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj", "self.are_results_in_order(results) get_root_logger().info(results) traj = self.traj self.assertEqual(len(traj), len(list(list(self.explore_dict.values())[0]))) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) traj_name =", "self.traj.overview.results_summary = 0 self.make_run() self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=1 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=1 self.make_run() hdf5file = pt.open_file(self.filename, mode='a')", "self.assertTrue(size_in_mb < 30.0, 'Size is %sMB > 30MB' % str(size_in_mb)) def test_two_runs(self): self.traj.f_add_parameter('TEST',", "name= self.traj.v_name) for node in traj_group._f_walk_groups(): if 'SRVC_LEAF' in node._v_attrs: self.assertTrue('SRVC_INIT_COMMENT' in node._v_attrs,", "5) self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(),", "in enumerate(self.traj.f_iter_runs(yields='copy')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is not self.traj) newtraj.v_crun=run_name self.traj.v_idx = idx", "= True self.encoding='latin1' self.wildcard_functions = {('$', 'crun') : my_run_func, ('$set', 'crunset'): my_set_func} class", "self.env.f_run(multiply_with_graceful_exit) self.are_results_in_order(results) self.assertFalse(self.traj.f_is_completed()) def test_f_iter_runs(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) traj =", "> 6MB' % str(size_in_mb)) def test_just_one_run(self): self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(),", "log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.traj.res.f_remove() self.traj.dpar.f_remove() self.expand() get_root_logger().info('\\n $$$$$$$$$$$$ Second Run", "use_pool=self.use_pool, gc_interval=self.gc_interval, freeze_input=self.freeze_input, fletcher32=self.fletcher32, complevel=self.complevel, complib=self.complib, shuffle=self.shuffle, pandas_append=self.pandas_append, pandas_format=self.pandas_format, encoding=self.encoding, niceness=self.niceness, use_scoop=self.use_scoop, port=self.port,", "###Explore self.explore(self.traj) self.make_run() # newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) size=os.path.getsize(self.filename)", "self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview = 1 self.env._traj.config.hdf5.overview.derived_parameters_overview = 1 self.make_run() hdf5file = pt.open_file(self.filename) overview_group =", "len(file.root._v_children) self.assertTrue(nchildren > 0) env2 = Environment(filename=self.filename, log_config=get_log_config()) traj2 = env2.v_trajectory traj2.f_store() self.assertTrue(os.path.exists(self.filename))", "= self.traj self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx ==", "class ResultSortTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' def set_mode(self): self.mode = 'LOCK' self.multiproc", "def setUp(self): self.set_mode() self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5','sort_tests.hdf5')) self.trajname = make_trajectory_name(self) env = Environment(trajectory=self.trajname,filename=self.filename, file_title=self.trajname,", "'integration', 'hdf5', 'environment' # Test tags def test_full_store(self): filename = make_temp_dir('full_store.hdf5') with Environment(filename=filename,", "= traj.res.runs.crun.z x = traj.par.x y = traj.par.y self.assertTrue(z==x*y,' z != x*y: %s", "= 0 self.env._traj.config.hdf5.purge_duplicate_comments = 0 self.env._traj.config.hdf5.overview.parameters_overview = 0 self.env._traj.config.hdf5.overview.config_overview = 0 self.env._traj.config.hdf5.overview.explored_parameters_overview =", "self.multiproc = False self.ncores = 1 self.use_pool=True self.pandas_format='table' self.pandas_append=False self.complib = 'lzo' self.complevel=2", "in nameset) self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z !=", "self.port = None self.timeout = None self.add_time=True self.graceful_exit = False def explore_complex_params(self, traj):", "self.set_mode() self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5','sort_tests.hdf5')) self.trajname = make_trajectory_name(self) env = Environment(trajectory=self.trajname,filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config()", "str(size_in_mb)) with self.assertRaises(TypeError): self.explore(self.traj) def test_run_complex(self): self.traj.f_add_parameter('TEST', 'test_run_complex') ###Explore self.explore_complex_params(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj", "0 ncomments[comment] += 1 finally: hdf5file.close() self.assertGreaterEqual(len(ncomments), 1) self.assertTrue(any(x > 1 for x", "###Explore self.explore(self.traj) self.traj.f_store(only_init=True) man_multiply = manual_run()(multiply_with_storing) for idx in self.traj.f_iter_runs(yields='idx'): self.assertTrue(isinstance(idx, int)) man_multiply(self.traj)", "self.traj.f_store() ################## Overview TESTS ############################# def test_switch_ON_large_tables(self): self.traj.f_add_parameter('TEST', 'test_switch_ON_LARGE_tables') ###Explore self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview =", "def test_graceful_exit(self): ###Explore self.explore_cartesian(self.traj) results = self.env.f_run(multiply_with_graceful_exit) self.are_results_in_order(results) self.assertFalse(self.traj.f_is_completed()) def test_f_iter_runs(self): ###Explore self.explore(self.traj)", "= make_temp_dir('experiments/tests/Log') # self.trajname = make_trajectory_name(self) # # env = Environment(trajectory=self.trajname,filename=self.filename, # file_title=self.trajname,", "def test_store_form_tuple(self): self.traj.f_store() self.traj.f_add_result('TestResItem', 42, 43) with self.assertRaises(ValueError): self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem,(),{},5)) self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem)) self.traj.results.f_remove_child('TestResItem')", "test not new, so ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_store_child('group') # group has children", "test_niceness(self): ###Explore self.explore(self.traj) self.env.f_run(with_niceness) self.assertTrue(self.traj.f_is_completed()) def test_file_overwriting(self): self.traj.f_store() with pt.open_file(self.filename, mode='r') as file:", "overview_group = hdf5file.get_node(where='/'+ self.traj.v_name, name='overview') should_not = HDF5StorageService.NAME_TABLE_MAPPING.keys() for name in should_not: name", "% str(size_in_mb)) self.compare_trajectories(mp_traj, self.traj) self.multiproc = old_multiproc def test_errors(self): tmp = make_temp_dir('cont') if", "self.setUp() self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments = False ###Explore self.explore(self.traj) self.make_run() # newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)", "irun in range(3): spsparse_csc = spsp.lil_matrix((111,111)) spsparse_csc[3,2+irun] = 44.5*irun matrices_csc.append(spsparse_csc.tocsc()) matrices_bsr = []", "in should: self.assertTrue(name in overview_group, '%s not in overviews but it should!' %", "new, so ValueError thrown: with self.assertRaises(ValueError): self.traj.parameters.new.f_store_child('group') # group has children and recursive", "# class DeepCopyTest(TrajectoryComparator): # # def test_deep_copy_data(self): # # self.filename = make_temp_dir('experiments/tests/HDF5/testcopy.hdf5') #", "% str(size_in_mb)) mp_traj = self.traj old_multiproc = self.multiproc self.multiproc = False ### Make", "self.env._traj.config.hdf5.overview.derived_parameters_overview = 0 self.env._traj.config.hdf5.overview.derived_parameters_summary = 0 self.env._traj.config.hdf5.overview.results_summary = 0 self.env._traj.config.hdf5.purge_duplicate_comments = 0 self.env._traj.config.hdf5.overview.parameters_overview", "= self.traj self.assertEqual(len(traj), len(list(list(self.explore_dict.values())[0]))) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) traj_name = self.env.v_trajectory.v_name del self.env", "self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(),", "RuntimeError('traj niceness != os niceness; ' '%s != %s' % (str(trajnice), str(osnice))) def", "= False ### Make a new single core run self.setUp() self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments", "tmp = make_temp_dir('cont') if dill is not None: env1 = Environment(continuable=True, continue_folder=tmp, log_config=None,", "self.explore(self.traj) with self.assertRaises(RuntimeError): self.traj.hdf5.purge_duplicate_comments = 1 self.traj.overview.results_summary = 0 self.make_run() self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=1 self.traj.f_get('results_summary').f_unlock()", "self.traj.new.f_remove_child('test', recursive=True) self.assertTrue('new.group' not in self.traj) self.traj.new.f_load_child('test', recursive=True, load_data=pypetconstants.LOAD_SKELETON) self.assertTrue(self.traj.new.group.v_annotations.annotation, 42) self.traj.f_delete_item('new.test.group') with", "'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve', 'Diez'])], 'int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat' :", "from pypet.utils.explore import cartesian_product from pypet.environment import Environment from pypet.storageservice import HDF5StorageService from", "as file: nchildren = len(file.root._v_children) self.assertTrue(nchildren > 1) env3 = Environment(filename=self.filename, overwrite_file=True, log_config=get_log_config())", "store_full): traj.hi = Result('hi', 42, 'hi!') traj.f_store() traj.f_remove_child('hi') class SlowResult(Result): def _load(self, load_dict):", "test_switch_ON_large_tables(self): self.traj.f_add_parameter('TEST', 'test_switch_ON_LARGE_tables') ###Explore self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview = 1 self.env._traj.config.hdf5.overview.derived_parameters_overview = 1 self.make_run() hdf5file", "the same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_derived_parameters=how, load_results=how) return newtraj", "x*y: %s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj in enumerate(self.traj.f_iter_runs(yields='copy')):", "self.assertTrue('SRVC_INIT_COMMENT' in node._v_attrs, 'There is no comment in node %s!' % node._v_name) hdf5file.close()", "self.traj.hdf5.purge_duplicate_comments = False ###Explore self.explore(self.traj) self.make_run() # newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)", "pypet.tests.testutils.data import create_param_dict, add_params, multiply,\\ simple_calculations, TrajectoryComparator, multiply_args, multiply_with_storing, \\ multiply_with_graceful_exit def add_one_particular_item(traj,", "recursive=True, load_data=pypetconstants.LOAD_SKELETON) self.assertTrue(self.traj.new.group.v_annotations.annotation, 42) self.traj.f_delete_item('new.test.group') with self.assertRaises(pex.DataNotInStorageError): self.traj.parameters.f_load_child('new.test.group', load_data=pypetconstants.LOAD_SKELETON) def test_switch_on_all_comments(self): self.explore(self.traj) self.traj.hdf5.purge_duplicate_comments=0", "# multiproc=False, # deep_copy_data=False) # # traj = env.v_trajectory # # traj.f_add_parameter('dummy', 1)", "# group has children and recursive is false with self.assertRaises(TypeError): self.traj.parameters.new.f_remove_child('test') self.traj.new.f_remove_child('test', recursive=True)", "('$set', 'crunset'): my_set_func} class ResultSortTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' def set_mode(self): self.mode", "are still the same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_parameters=2, load_derived_parameters=2,", "> 6MB' % str(size_in_mb)) mp_traj = self.traj old_multiproc = self.multiproc self.multiproc = False", "explore(self, traj): self.explored ={'Normal.trial': [0], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.explored['csr_mat'][0][1,2]=44.0 self.explored['csr_mat'][1][2,2]=33", "make_temp_dir('experiments/tests/HDF5/testcoyp2.hdf5') # self.logfolder = make_temp_dir('experiments/tests/Log') # self.trajname = make_trajectory_name(self) # # env =", "add_large_data(traj): np_array = np.random.rand(100, 1000, 10) traj.f_add_result('l4rge', np_array) traj.f_store_item('l4rge') traj.f_remove_item('l4rge') array_list = []", "'nice') and psutil is None, 'Niceness not supported under non Unix.') def test_niceness(self):", "self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_expand_after_reload(self): ###Explore self.explore(self.traj) results", "printing and repr work get_root_logger().info(str(self.env)) get_root_logger().info(repr(self.env)) newtraj = Trajectory() newtraj.f_load(name=self.traj.v_name, as_new=False, load_data=2, filename=self.filename)", "Environment(continuable=True, wrap_mode='QUEUE', continue_folder=tmp) with self.assertRaises(ValueError): Environment(use_scoop=True, wrap_mode='QUEUE') with self.assertRaises(ValueError): Environment(automatic_storing=False, continuable=True, continue_folder=tmp) with", "load_trajectory(index=-1, filename=filename) self.assertTrue('hi' in traj) def with_niceness(traj): if traj.multiproc: if hasattr(os, 'nice'): trajnice", "traj.f_load(load_data=3) service_logger.setLevel(old_level) root.setLevel(old_level) path = get_log_path(traj) mainfilename = os.path.join(path, 'LOG.txt') with open(mainfilename, mode='r')", "run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is self.traj) newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx", "False ###Explore self.explore(self.traj) self.make_run() # newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj)", "def test_file_overwriting(self): self.traj.f_store() with pt.open_file(self.filename, mode='r') as file: nchildren = len(file.root._v_children) self.assertTrue(nchildren >", "self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_if_results_are_sorted_correctly_using_map(self): ###Explore self.explore(self.traj) args1=[10*x for x in", "set_mode(self): self.mode = 'LOCK' self.multiproc = False self.gc_interval = None self.ncores = 1", "import cartesian_product from pypet.environment import Environment from pypet.storageservice import HDF5StorageService from pypet import", "load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False, how=2): ### Load The Trajectory and check if the values are still", "test_store_form_tuple(self): self.traj.f_store() self.traj.f_add_result('TestResItem', 42, 43) with self.assertRaises(ValueError): self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem,(),{},5)) self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem)) self.traj.results.f_remove_child('TestResItem') self.assertTrue('TestResItem'", "if printing and repr work get_root_logger().info(str(self.env)) get_root_logger().info(repr(self.env)) newtraj = Trajectory() newtraj.f_load(name=self.traj.v_name, as_new=False, load_data=2,", "###Explore self.explore(self.traj) self.make_run() traj_name = self.traj.v_name self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj =", "simple_kwarg= 13.0 results = self.env.f_run(simple_calculations,simple_arg,simple_kwarg=simple_kwarg) self.are_results_in_order(results) def test_a_large_run(self): get_root_logger().info('Testing large run') self.traj.f_add_parameter('TEST', 'test_run')", "test_if_results_are_sorted_correctly(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply) self.are_results_in_order(results) self.assertEqual(len(results), len(self.traj)) traj = self.traj self.assertTrue(len(traj)", "man_multiply = manual_run()(multiply_with_storing) for idx in self.traj.f_iter_runs(yields='idx'): self.assertTrue(isinstance(idx, int)) man_multiply(self.traj) traj = self.traj", "= Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.traj.res.f_remove() self.traj.dpar.f_remove() self.expand() get_root_logger().info('\\n $$$$$$$$$$$$", "the full name self.assertTrue(not name in overview_group, '%s in overviews but should not!'", "= False self.ncores = 1 self.use_pool=True self.log_stdout=False self.freeze_input=False self.use_scoop = False self.log_config =", "how=2): ### Load The Trajectory and check if the values are still the", "self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx", "load_data=1) for idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx", "Get only the name of the table, no the full name self.assertTrue(not name", "graceful_exit=self.graceful_exit) traj = env.v_trajectory traj.v_standard_parameter=Parameter traj.f_add_parameter('x',99) traj.f_add_parameter('y',99) self.env=env self.traj=traj def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False, how=2): ###", "newtraj.f_load(name=self.traj.v_name, index=None, as_new=False, load_data=0) newtraj.v_auto_load = True newtraj.par.f_load_child('y', load_data=1) for idx, run_name in", "self.env.f_run(multiply) self.are_results_in_order(results) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) self.expand(self.traj)", "self.assertTrue('run_%08d' % (idx+1) not in nameset) self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun ==", "$$$$$$$$$$ \\n') self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def expand(self):", "traj.f_remove_child('hi') class SlowResult(Result): def _load(self, load_dict): time.sleep(3) super(SlowResult, self)._load(load_dict) class FullStorageTest(TrajectoryComparator): tags =", "= [42, 42, 42, 42] # # env.f_run(test_runfunc, list_that_should_change) # # traj.v_auto_load=True #", "self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def test_expand_after_reload(self): self.traj.f_add_parameter('TEST', 'test_expand_after_reload') ###Explore self.explore(self.traj) self.make_run() traj_name", "full_text = mainf.read() self.assertTrue('nodes/s)' in full_text) env.f_disable_logging() def make_run_large_data(self): self.env.f_run(add_large_data) def make_run(self): ###", "z != x*y: %s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) traj = self.traj", "'test_expand_after_reload') ###Explore self.explore(self.traj) self.make_run() traj_name = self.traj.v_name self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj", "use_scoop=self.use_scoop, port=self.port, add_time=self.add_time, timeout=self.timeout, graceful_exit=self.graceful_exit) traj = env.v_trajectory traj.v_standard_parameter=Parameter ## Create some parameters", "the environment self.traj = traj self.env = env @unittest.skipIf(not hasattr(os, 'nice') and psutil", "newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=self.traj.v_name, index=None, as_new=False, load_data=0) newtraj.v_auto_load = True newtraj.par.f_load_child('y', load_data=1) for idx, run_name", "= 1 self.env._traj.config.hdf5.overview.derived_parameters_overview = 1 self.make_run() hdf5file = pt.open_file(self.filename) overview_group = hdf5file.get_node(where='/'+ self.traj.v_name,", "= make_trajectory_name(self) # # env = Environment(trajectory=self.trajname,filename=self.filename, # file_title=self.trajname, log_folder=self.logfolder, # log_stdout=False, #", "# self.filename = make_temp_dir('experiments/tests/HDF5/testcoyp2.hdf5') # self.logfolder = make_temp_dir('experiments/tests/Log') # self.trajname = make_trajectory_name(self) #", "graceful_exit=self.graceful_exit) traj = env.v_trajectory traj.v_standard_parameter=Parameter ## Create some parameters self.param_dict={} create_param_dict(self.param_dict) ### Add", "get_log_path from pypet.tests.testutils.data import create_param_dict, add_params, multiply,\\ simple_calculations, TrajectoryComparator, multiply_args, multiply_with_storing, \\ multiply_with_graceful_exit", "arg3=args3) self.assertEqual(len(results), len(self.traj)) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct_map(traj,", "= len(file.root._v_children) self.assertTrue(nchildren > 0) env2 = Environment(filename=self.filename, log_config=get_log_config()) traj2 = env2.v_trajectory traj2.f_store()", "self.multiproc = old_multiproc def test_errors(self): tmp = make_temp_dir('cont') if dill is not None:", "should = ['derived_parameters_overview', 'results_overview'] for name in should: self.assertTrue(name in overview_group, '%s not", "file: nchildren = len(file.root._v_children) self.assertTrue(nchildren > 0) env2 = Environment(filename=self.filename, log_config=get_log_config()) traj2 =", "pypet import Parameter import tables as pt from pypet.tests.testutils.ioutils import run_suite, make_temp_dir, make_trajectory_name,\\", "traj.f_expand(self.expand_dict) def test_if_results_are_sorted_correctly_manual_runs(self): ###Explore self.explore(self.traj) self.traj.f_store(only_init=True) man_multiply = manual_run()(multiply_with_storing) for idx in self.traj.f_iter_runs(yields='idx'):", "# traj.f_add_parameter('dummy', 1) # traj.f_explore({'dummy':[12, 3, 3, 4]}) # # list_that_should_change = [42,", "res[0] self.assertEqual(self.traj.res.runs[idx].z, res[1]) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def test_graceful_exit(self): ###Explore", "self.complib = 'zlib' self.complevel=9 self.shuffle=True self.fletcher32 = False self.encoding = 'utf8' self.log_stdout=False self.wildcard_functions", "1) self.assertTrue(any(x > 1 for x in ncomments.values())) def my_run_func(idx): return 'hello_%d' %", "= traj self.env = env @unittest.skipIf(not hasattr(os, 'nice') and psutil is None, 'Niceness", "hdf5file = pt.open_file(self.filename) traj_group = hdf5file.get_node(where='/', name= self.traj.v_name) for node in traj_group._f_walk_groups(): if", "self.traj = traj self.env = env @unittest.skipIf(not hasattr(os, 'nice') and psutil is None,", "self.traj.overview.results_summary=0 self.make_run() hdf5file = pt.open_file(self.filename, mode='a') ncomments = {} try: traj_group = hdf5file.get_node(where='/',name=", "run simple_arg = -13 simple_kwarg= 13.0 results = self.env.f_run(simple_calculations,simple_arg,simple_kwarg=simple_kwarg) self.are_results_in_order(results) def test_a_large_run(self): get_root_logger().info('Testing", "def test_expand(self): ###Explore self.traj.f_add_parameter('TEST', 'test_expand') self.explore(self.traj) self.make_run() self.expand() get_root_logger().info('\\n $$$$$$$$$$$$$$$$$ Second Run $$$$$$$$$$$$$$$$$$$$$$$$')", "'dia_mat' : matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} with self.assertRaises(pex.NotUniqueNodeError): traj.f_explore(self.explore_dict) traj.f_shrink(force=True) par_dict = traj.parameters.f_to_dict()", "[np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.expanded['csr_mat'][0][1,2]=44.0 self.expanded['csr_mat'][1][2,2]=33 self.expanded['csr_mat'][0]=self.expanded['csr_mat'][0].tocsr() self.expanded['csr_mat'][1]=self.expanded['csr_mat'][1].tocsr() self.traj.f_expand(cartesian_product(self.expanded)) self.traj.f_store() ################## Overview", "@unittest.skipIf(not hasattr(os, 'nice') and psutil is None, 'Niceness not supported under non Unix.')", "env = Environment(trajectory=self.trajname, filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config(), results_per_run=5, wildcard_functions=self.wildcard_functions, derived_parameters_per_run=5, multiproc=self.multiproc, ncores=self.ncores, wrap_mode=self.mode,", "in node._v_attrs: self.assertTrue('SRVC_INIT_COMMENT' in node._v_attrs, 'There is no comment in node %s!' %", "# for irun, val in enumerate(list_that_should_not_change): # self.assertTrue(list_that_should_not_change[irun] == 42) # x=traj.results.runs[irun].kkk #", "same newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=trajectory_name, index=trajectory_index, as_new=as_new, load_parameters=2, load_derived_parameters=2, load_results=2, load_other_data=2) return", "in node._v_pathname or '/results/' in node._v_pathname): if 'SRVC_LEAF' in node._v_attrs: if 'SRVC_INIT_COMMENT' in", "os niceness; ' '%s != %s' % (str(trajnice), str(osnice))) def add_large_data(traj): np_array =", "'test_run') self.traj.hdf5.purge_duplicate_comments = False ###Explore self.explore(self.traj) self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)", "index=None, as_new=False, load_data=0) newtraj.v_auto_load = True newtraj.par.f_load_child('y', load_data=1) for idx, run_name in enumerate(self.traj.f_iter_runs()):", "= size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 2.0, 'Size is %sMB", "enumerate(list_that_should_not_change): # self.assertTrue(list_that_should_not_change[irun] == 42) # x=traj.results.runs[irun].kkk # self.assertTrue(x==42+irun) # # def test_not_deep_copy_data(self):", "self.complib = 'blosc' self.complevel=2 self.shuffle=False self.fletcher32 = False self.encoding='latin1' self.graceful_exit = True class", "if 'SRVC_LEAF' in node._v_attrs: if 'SRVC_INIT_COMMENT' in node._v_attrs: comment = node._v_attrs['SRVC_INIT_COMMENT'] if comment", "self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s != %s * %s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx,", "with self.assertRaises(ValueError): Environment(use_scoop=True, wrap_mode='QUEUE') with self.assertRaises(ValueError): Environment(automatic_storing=False, continuable=True, continue_folder=tmp) with self.assertRaises(ValueError): Environment(port='www.nosi.de', wrap_mode='LOCK')", "# traj.f_add_parameter('dummy', 1) # traj.f_explore({'dummy':[12, 3, 3, 4]}) # # list_that_should_not_change = [42,", "def explore_complex_params(self, traj): matrices_csr = [] for irun in range(3): spsparse_csr = spsp.lil_matrix((111,111))", "self.pandas_format='fixed' self.pandas_append=False self.complib = 'zlib' self.complevel=9 self.shuffle=True self.fletcher32 = False self.encoding = 'utf8'", "self.traj.v_name, name='overview') should_not = HDF5StorageService.NAME_TABLE_MAPPING.keys() for name in should_not: name = name.split('.')[-1] #", "############################# def test_switch_ON_large_tables(self): self.traj.f_add_parameter('TEST', 'test_switch_ON_LARGE_tables') ###Explore self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview = 1 self.env._traj.config.hdf5.overview.derived_parameters_overview = 1", "self.assertRaises(pex.NotUniqueNodeError): traj.f_explore(self.explore_dict) traj.f_shrink(force=True) par_dict = traj.parameters.f_to_dict() for param_name in par_dict: param = par_dict[param_name]", "in node._v_pathname): if 'SRVC_LEAF' in node._v_attrs: if 'SRVC_INIT_COMMENT' in node._v_attrs: comment = node._v_attrs['SRVC_INIT_COMMENT']", "not supported under non Unix.') def test_niceness(self): ###Explore self.explore(self.traj) self.env.f_run(with_niceness) self.assertTrue(self.traj.f_is_completed()) def test_file_overwriting(self):", "single core run self.setUp() self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments = False ###Explore self.explore(self.traj) self.make_run() #", "def test_if_results_are_sorted_correctly_manual_runs(self): ###Explore self.explore(self.traj) self.traj.f_store(only_init=True) man_multiply = manual_run()(multiply_with_storing) for idx in self.traj.f_iter_runs(yields='idx'): self.assertTrue(isinstance(idx,", "%sMB > 6MB' % str(size_in_mb)) def test_just_one_run(self): self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton()", "self.trajname = make_trajectory_name(self) # # env = Environment(trajectory=self.trajname,filename=self.filename, # file_title=self.trajname, log_folder=self.logfolder, # log_stdout=False,", "log_config=None) env3 = Environment(log_config=None, filename=self.filename) with self.assertRaises(ValueError): env3.f_run_map(multiply_args) with self.assertRaises(ValueError): Environment(use_scoop=True, immediate_postproc=True) with", "self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def test_expand_after_reload(self): self.traj.f_add_parameter('TEST', 'test_expand_after_reload')", "make_temp_dir('sloooow.hdf5') env = Environment(trajectory='traj', add_time=True, filename=filename, log_stdout=False, log_config=get_log_config(), dynamic_imports=SlowResult, display_time=0.1) traj = env.v_traj", "self.assertRaises(RuntimeError): self.traj.hdf5.purge_duplicate_comments = 1 self.traj.overview.results_summary = 0 self.make_run() self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=1 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=1 self.make_run()", "self.traj.TestResItem,(),{},5)) self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem)) self.traj.results.f_remove_child('TestResItem') self.assertTrue('TestResItem' not in self.traj) self.traj.results.f_load_child('TestResItem', load_data=pypetconstants.LOAD_SKELETON) self.traj.f_load_item((pypetconstants.LEAF,self.traj.TestResItem,(),{'load_only': 'TestResItem'})) self.assertTrue(self.traj.TestResItem,", "def set_mode(self): self.mode = 'LOCK' self.multiproc = False self.ncores = 1 self.use_pool=True self.log_stdout=False", "env = Environment(trajectory=self.trajname,filename=self.filename, # file_title=self.trajname, log_folder=self.logfolder, # log_stdout=False, # multiproc=False, # deep_copy_data=True) #", "load_derived_parameters=2, load_results=2) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name) self.compare_trajectories(newtraj,self.traj) def test_switch_off_all_tables(self): ###Explore self.traj.f_add_parameter('TEST', 'test_switch_off_ALL_tables') self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview", "\\ multiply_with_graceful_exit def add_one_particular_item(traj, store_full): traj.hi = Result('hi', 42, 'hi!') traj.f_store() traj.f_remove_child('hi') class", "%s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) traj = self.traj self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_", "self.explore_dict: param.f_unlock() if param.v_explored: param._shrink() self.explore_dict={'Numpy.string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho', 'Nueve',", "hasattr(os, 'nice') and psutil is None, 'Niceness not supported under non Unix.') def", "traj.par.x = Parameter('x', 3, 'jj') traj.f_explore({'x': [1,2,3]}) env.f_run(add_one_particular_item, True) traj = load_trajectory(index=-1, filename=filename)", "class SlowResult(Result): def _load(self, load_dict): time.sleep(3) super(SlowResult, self)._load(load_dict) class FullStorageTest(TrajectoryComparator): tags = 'integration',", "filename = make_temp_dir('sloooow.hdf5') env = Environment(trajectory='traj', add_time=True, filename=filename, log_stdout=False, log_config=get_log_config(), dynamic_imports=SlowResult, display_time=0.1) traj", "= Result('hi', 42, 'hi!') traj.f_store() traj.f_remove_child('hi') class SlowResult(Result): def _load(self, load_dict): time.sleep(3) super(SlowResult,", "Trajectory() newtraj.f_load(name=self.traj.v_name, as_new=False, load_data=2, filename=self.filename) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) size=os.path.getsize(self.filename) size_in_mb = size/1000000.", "# self.assertTrue(x==42+irun) # # def test_not_deep_copy_data(self): # self.filename = make_temp_dir('experiments/tests/HDF5/testcoyp2.hdf5') # self.logfolder =", "idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.res.runs.crun.z==newtraj.par.x*newtraj.par.y,' z != x*y: %s != %s", "only_empties=True) self.check_if_z_is_correct(traj) self.expand(self.traj) self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+\\ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(),", "= self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) self.assertTrue(len(newtraj) == 1) size=os.path.getsize(self.filename) size_in_mb =", "in full_text) env.f_disable_logging() def make_run_large_data(self): self.env.f_run(add_large_data) def make_run(self): ### Make a test run", "# multiproc=False, # deep_copy_data=True) # # traj = env.v_trajectory # # traj.f_add_parameter('dummy', 1)", "self.shuffle=False self.fletcher32 = False self.encoding='latin1' self.graceful_exit = True class TestOtherHDF5Settings2(EnvironmentTest): tags = 'integration',", "array_list.append(np.random.rand(10)) traj.f_add_result('m4ny', *array_list) class SimpleEnvironmentTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment', 'quick' def test_make_default_file_when_giving_directory_without_slash(self):", "matrices_bsr = [] for irun in range(3): spsparse_bsr = spsp.lil_matrix((111,111)) spsparse_bsr[3,2+irun] = 44.5*irun", "the_file_name) class EnvironmentTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' def set_mode(self): self.mode = 'LOCK'", "traj = self.traj self.assertTrue(traj.v_idx == -1) self.assertTrue(traj.v_crun is None) self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx", "multiply_with_graceful_exit def add_one_particular_item(traj, store_full): traj.hi = Result('hi', 42, 'hi!') traj.f_store() traj.f_remove_child('hi') class SlowResult(Result):", "matrices_csc, 'bsr_mat' : matrices_bsr, 'dia_mat' : matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} with self.assertRaises(pex.NotUniqueNodeError): traj.f_explore(self.explore_dict)", "set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d' % (idx+1) not in nameset) self.assertTrue('run_%08d' %", "self.are_results_in_order(results) self.assertEqual(len(results), len(self.traj)) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj)", "def my_run_func(idx): return 'hello_%d' % idx def my_set_func(idx): return 'huhu_%d' % idx class", "newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def test_expand_after_reload(self): self.traj.f_add_parameter('TEST', 'test_expand_after_reload') ###Explore", "self.assertRaises(TypeError): self.explore(self.traj) def test_run_complex(self): self.traj.f_add_parameter('TEST', 'test_run_complex') ###Explore self.explore_complex_params(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False)", "overwrite_file=True, log_config=get_log_config()) self.assertFalse(os.path.exists(self.filename)) env2.f_disable_logging() env3.f_disable_logging() def test_time_display_of_loading(self): filename = make_temp_dir('sloooow.hdf5') env = Environment(trajectory='traj',", "[] for irun in range(3): spsparse_csr = spsp.lil_matrix((111,111)) spsparse_csr[3,2+irun] = 44.5*irun matrices_csr.append(spsparse_csr.tocsr()) matrices_csc", "complib=self.complib, shuffle=self.shuffle, pandas_append=self.pandas_append, pandas_format=self.pandas_format, encoding=self.encoding, niceness=self.niceness, use_scoop=self.use_scoop, port=self.port, add_time=self.add_time, timeout=self.timeout, graceful_exit=self.graceful_exit) traj =", "pypet.environment import Environment from pypet.storageservice import HDF5StorageService from pypet import pypetconstants, Result, manual_run", "self.trajname = make_trajectory_name(self) self.filename = make_temp_dir(os.path.join('experiments', 'tests', 'HDF5', 'test%s.hdf5' % self.trajname)) env =", "self.traj.v_name) for node in traj_group._f_walk_groups(): if ('/derived_parameters/' in node._v_pathname or '/results/' in node._v_pathname):", "for x in ncomments.values())) def my_run_func(idx): return 'hello_%d' % idx def my_set_func(idx): return", "self.assertTrue(z==x*y,' z != x*y: %s != %s * %s' % (str(z),str(x),str(y))) traj.v_idx=-1 traj.v_shortcuts=True", "make_temp_dir('full_store.hdf5') with Environment(filename=filename, log_config=get_log_config()) as env: traj = env.v_trajectory traj.par.x = Parameter('x', 3,", "self.assertTrue(not name in overview_group, '%s in overviews but should not!' % name) hdf5file.close()", "={'Normal.trial': [0,1]} traj.f_explore(cartesian_product(self.explored)) def tearDown(self): self.env.f_disable_logging() super(EnvironmentTest, self).tearDown() def setUp(self): self.set_mode() self.logfolder =", "self.traj.f_delete_item('new.test.group') with self.assertRaises(pex.DataNotInStorageError): self.traj.parameters.f_load_child('new.test.group', load_data=pypetconstants.LOAD_SKELETON) def test_switch_on_all_comments(self): self.explore(self.traj) self.traj.hdf5.purge_duplicate_comments=0 self.make_run() hdf5file = pt.open_file(self.filename)", "self.assertTrue(traj.v_crun_ == pypetconstants.RUN_NAME_DUMMY) self.assertTrue(newtraj.v_idx == idx) def test_f_iter_runs_auto_load(self): ###Explore self.explore(self.traj) results = self.env.f_run(multiply)", "if (not scoop.IS_RUNNING or scoop.IS_ORIGIN): return raise RuntimeError('traj niceness != os niceness; '", "'blosc' self.complevel=2 self.shuffle=False self.fletcher32 = False self.encoding='latin1' self.graceful_exit = True class TestOtherHDF5Settings2(EnvironmentTest): tags", "newtraj.v_idx = idx nameset = set((x.v_name for x in traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d' % (idx+1)", "# self.trajname = make_trajectory_name(self) # # env = Environment(trajectory=self.trajname,filename=self.filename, # file_title=self.trajname, log_folder=self.logfolder, #", "tearDown(self): self.env.f_disable_logging() super(ResultSortTest, self).tearDown() def setUp(self): self.set_mode() self.filename = make_temp_dir(os.path.join('experiments','tests','HDF5','sort_tests.hdf5')) self.trajname = make_trajectory_name(self)", "= self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename)", "self.use_pool=True self.log_stdout=False self.freeze_input=False self.use_scoop = False self.log_config = True self.port = None self.graceful_exit", "(str(traj.crun.z),str(traj.x),str(traj.y))) traj.v_idx=-1 def check_if_z_is_correct(self,traj): traj.v_shortcuts=False for x in range(len(traj)): traj.v_idx=x z = traj.res.runs.crun.z", "newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=self.traj.v_name, index=None, as_new=False, load_data=0) newtraj.v_auto_load = True newtraj.par.f_load_child('y', load_data=1)", "%s' % (str(newtraj.crun.z),str(traj.x),str(traj.y))) for idx, traj in enumerate(self.traj.f_iter_runs(yields='copy')): run_name = traj.f_idx_to_run(idx) self.assertTrue(traj is", "'environment' def set_mode(self): self.mode = 'LOCK' self.multiproc = False self.ncores = 1 self.use_pool=True", "# deep_copy_data=False) # # traj = env.v_trajectory # # traj.f_add_parameter('dummy', 1) # traj.f_explore({'dummy':[12,", "multiply,\\ simple_calculations, TrajectoryComparator, multiply_args, multiply_with_storing, \\ multiply_with_graceful_exit def add_one_particular_item(traj, store_full): traj.hi = Result('hi',", "None self.add_time=True self.graceful_exit = False def explore_complex_params(self, traj): matrices_csr = [] for irun", "= root.level service_logger.setLevel(logging.INFO) root.setLevel(logging.INFO) traj.f_load(load_data=3) service_logger.setLevel(old_level) root.setLevel(old_level) path = get_log_path(traj) mainfilename = os.path.join(path,", "'%s not in overviews but it should!' % name) hdf5file.close() self.traj.f_load(load_parameters=2, load_derived_parameters=2, load_results=2)", "continue_folder=tmp) with self.assertRaises(ValueError): Environment(use_scoop=True, wrap_mode='QUEUE') with self.assertRaises(ValueError): Environment(automatic_storing=False, continuable=True, continue_folder=tmp) with self.assertRaises(ValueError): Environment(port='www.nosi.de',", "niceness=self.niceness, use_scoop=self.use_scoop, port=self.port, add_time=self.add_time, timeout=self.timeout, graceful_exit=self.graceful_exit) traj = env.v_trajectory traj.v_standard_parameter=Parameter ## Create some", "environment self.traj = traj self.env = env @unittest.skipIf(not hasattr(os, 'nice') and psutil is", "= self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) for res in results:", "len(self.traj)) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) for res", "hdf5file.get_node(where='/',name= self.traj.v_name) for node in traj_group._f_walk_groups(): if ('/derived_parameters/' in node._v_pathname or '/results/' in", "= self.env.f_run_map(multiply_args, args1, arg2=args2, arg3=args3) self.assertEqual(len(results), len(self.traj)) traj = self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0]))", "self.traj.f_add_parameter('TEST', 'test_switch_ON_LARGE_tables') ###Explore self.explore(self.traj) self.env._traj.config.hdf5.overview.results_overview = 1 self.env._traj.config.hdf5.overview.derived_parameters_overview = 1 self.make_run() hdf5file =", "in traj.f_iter_nodes(predicate=(idx,)))) self.assertTrue('run_%08d' % (idx+1) not in nameset) self.assertTrue('run_%08d' % idx in nameset)", "== len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct_map(traj, args1, args2, args3) for res in results:", "self.traj.v_name self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name) self.traj.res.f_remove() self.traj.dpar.f_remove() self.expand()", "self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y: %s", "traj.f_idx_to_run(idx) self.assertTrue(traj is self.traj) newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset =", ": matrices_bsr, 'dia_mat' : matrices_dia, 'list' : [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} traj.f_explore(self.explore_dict) def explore(self, traj): self.explored", "self.explore_complex_params(self.traj) self.make_run() self.assertTrue(self.traj.f_is_completed()) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False):", ": [['fff'],[444444,444,44,4,4,4],[1,2,3,42]]} traj.f_explore(self.explore_dict) def explore(self, traj): self.explored ={'Normal.trial': [0], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat'", "import scoop if (not scoop.IS_RUNNING or scoop.IS_ORIGIN): return raise RuntimeError('traj niceness != os", "self.traj) self.traj.results.f_load_child('TestResItem', load_data=pypetconstants.LOAD_SKELETON) self.traj.f_load_item((pypetconstants.LEAF,self.traj.TestResItem,(),{'load_only': 'TestResItem'})) self.assertTrue(self.traj.TestResItem, 42) def test_store_single_group(self): self.traj.f_store() self.traj.f_add_parameter_group('new.test.group').v_annotations.f_set(42) self.traj.f_store_item('new.group') #", "'Diez'])], 'int':[1,2,3], 'csr_mat' : matrices_csr, 'csc_mat' : matrices_csc, 'bsr_mat' : matrices_bsr, 'dia_mat' :", "== len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) for res in results: self.assertEqual(len(res), 2) self.assertTrue(isinstance(res[0],", "1) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 2.0,", "val in enumerate(list_that_should_change): # self.assertTrue(list_that_should_change[irun] == 1000) if __name__ == '__main__': opt_args =", "None self.port = None self.timeout = None self.add_time=True self.graceful_exit = False def explore_complex_params(self,", "self.assertRaises(ValueError): Environment(use_scoop=True, wrap_mode='QUEUE') with self.assertRaises(ValueError): Environment(automatic_storing=False, continuable=True, continue_folder=tmp) with self.assertRaises(ValueError): Environment(port='www.nosi.de', wrap_mode='LOCK') def", "0 self.make_run() self.traj.f_get('purge_duplicate_comments').f_unlock() self.traj.hdf5.purge_duplicate_comments=1 self.traj.f_get('results_summary').f_unlock() self.traj.overview.results_summary=1 self.make_run() hdf5file = pt.open_file(self.filename, mode='a') ncomments =", "self.log_stdout=False self.freeze_input=False self.use_scoop = False self.log_config = True self.port = None self.graceful_exit =", "Run $$$$$$$$$$ \\n') self.make_run() newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def", "Environment(automatic_storing=False, continuable=True, continue_folder=tmp) with self.assertRaises(ValueError): Environment(port='www.nosi.de', wrap_mode='LOCK') def test_run(self): self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore(self.traj)", "== len(list(self.expand_dict.values())[0])+\\ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)", "class FullStorageTest(TrajectoryComparator): tags = 'integration', 'hdf5', 'environment' # Test tags def test_full_store(self): filename", "filename = make_temp_dir('test.hdf5') head, tail = os.path.split(filename) env = Environment(filename=head) the_file_name = env.v_traj.v_name", "self.traj.parameters.new.f_remove_child('group') # group is below test not new, so ValueError thrown: with self.assertRaises(ValueError):", "with self.assertRaises(ValueError): Environment(automatic_storing=False, continuable=True, continue_folder=tmp) with self.assertRaises(ValueError): Environment(port='www.nosi.de', wrap_mode='LOCK') def test_run(self): self.traj.f_add_parameter('TEST', 'test_run')", "def explore(self, traj): self.explored ={'Normal.trial': [0], 'Numpy.double': [np.array([1.0,2.0,3.0,4.0]), np.array([-1.0,3.0,5.0,7.0])], 'csr_mat' :[spsp.lil_matrix((2222,22)), spsp.lil_matrix((2222,22))]} self.explored['csr_mat'][0][1,2]=44.0", "= self.env.f_run(simple_calculations,simple_arg,simple_kwarg=simple_kwarg) self.are_results_in_order(results) def test_a_large_run(self): get_root_logger().info('Testing large run') self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore_large(self.traj) self.make_run_large_data()", "6],'y':[1,1,2,2,3,4,4]}) traj.f_explore(self.explore_dict) def expand(self,traj): self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12,13]} with self.assertRaises(ValueError): traj.f_expand(self.expand_dict) self.expand_dict={'x':[10,11,12,13],'y':[11,11,12,12]} traj.f_expand(self.expand_dict) def test_if_results_are_sorted_correctly_manual_runs(self): ###Explore", "import dill except ImportError: dill = None import scipy.sparse as spsp import random", "not in self.traj) self.traj.new.f_load_child('test', recursive=True, load_data=pypetconstants.LOAD_SKELETON) self.assertTrue(self.traj.new.group.v_annotations.annotation, 42) self.traj.f_delete_item('new.test.group') with self.assertRaises(pex.DataNotInStorageError): self.traj.parameters.f_load_child('new.test.group', load_data=pypetconstants.LOAD_SKELETON)", "env = Environment(trajectory=self.trajname,filename=self.filename, file_title=self.trajname, log_stdout=self.log_stdout, log_config=get_log_config() if self.log_config else None, multiproc=self.multiproc, wrap_mode=self.mode, ncores=self.ncores,", "run') self.traj.f_add_parameter('TEST', 'test_run') ###Explore self.explore_large(self.traj) self.make_run_large_data() self.assertTrue(self.traj.f_is_completed()) # Check if printing and repr", "= [42, 42, 42, 42] # # env.f_run(test_runfunc, list_that_should_not_change) # # traj.v_auto_load=True #", "psutil.Process().nice() if trajnice != osnice: if traj.use_scoop: import scoop if (not scoop.IS_RUNNING or", "self.assertTrue(traj is self.traj) newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset = set((x.v_name", "with pt.open_file(self.filename, mode='r') as file: nchildren = len(file.root._v_children) self.assertTrue(nchildren > 0) env2 =", "in range(len(self.traj))] args2=[100*x for x in range(len(self.traj))] args3=list(range(len(self.traj))) results = self.env.f_run_map(multiply_args, args1, arg2=args2,", "multiproc=self.multiproc, ncores=self.ncores, wrap_mode=self.mode, use_pool=self.use_pool, gc_interval=self.gc_interval, freeze_input=self.freeze_input, fletcher32=self.fletcher32, complevel=self.complevel, complib=self.complib, shuffle=self.shuffle, pandas_append=self.pandas_append, pandas_format=self.pandas_format, encoding=self.encoding,", "0 self.env._traj.config.hdf5.overview.parameters_overview = 0 self.env._traj.config.hdf5.overview.config_overview = 0 self.env._traj.config.hdf5.overview.explored_parameters_overview = 0 self.make_run() hdf5file =", "= 0 self.env._traj.config.hdf5.overview.results_summary = 0 self.env._traj.config.hdf5.purge_duplicate_comments = 0 self.env._traj.config.hdf5.overview.parameters_overview = 0 self.env._traj.config.hdf5.overview.config_overview =", "self.assertEqual(len(traj), len(list(list(self.explore_dict.values())[0]))) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) traj_name = self.env.v_trajectory.v_name del self.env self.env =", "self.assertTrue('TestResItem' not in self.traj) self.traj.results.f_load_child('TestResItem', load_data=pypetconstants.LOAD_SKELETON) self.traj.f_load_item((pypetconstants.LEAF,self.traj.TestResItem,(),{'load_only': 'TestResItem'})) self.assertTrue(self.traj.TestResItem, 42) def test_store_single_group(self): self.traj.f_store()", "pt.open_file(self.filename) traj_group = hdf5file.get_node(where='/', name= self.traj.v_name) for node in traj_group._f_walk_groups(): if 'SRVC_LEAF' in", "idx, run_name in enumerate(self.traj.f_iter_runs()): newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset =", "node._v_name) hdf5file.close() def test_purge_duplicate_comments(self): self.explore(self.traj) with self.assertRaises(RuntimeError): self.traj.hdf5.purge_duplicate_comments = 1 self.traj.overview.results_summary = 0", "def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False, how=2): ### Load The Trajectory and check if the values are", "= 1 self.use_pool=True self.pandas_format='table' self.pandas_append=True self.complib = 'blosc' self.complevel=2 self.shuffle=False self.fletcher32 = False", "'hdf5', 'environment' # Test tags def test_full_store(self): filename = make_temp_dir('full_store.hdf5') with Environment(filename=filename, log_config=get_log_config())", "self.env._traj.config.hdf5.overview.results_overview = 1 self.env._traj.config.hdf5.overview.derived_parameters_overview = 1 self.make_run() hdf5file = pt.open_file(self.filename) overview_group = hdf5file.get_node(where='/'+", "traj.f_store() traj.f_remove_child('hi') class SlowResult(Result): def _load(self, load_dict): time.sleep(3) super(SlowResult, self)._load(load_dict) class FullStorageTest(TrajectoryComparator): tags", "and the environment self.traj = traj self.env = env @unittest.skipIf(not hasattr(os, 'nice') and", "but it should!' % name) hdf5file.close() self.traj.f_load(load_parameters=2, load_derived_parameters=2, load_results=2) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name) self.compare_trajectories(newtraj,self.traj)", "str(osnice))) def add_large_data(traj): np_array = np.random.rand(100, 1000, 10) traj.f_add_result('l4rge', np_array) traj.f_store_item('l4rge') traj.f_remove_item('l4rge') array_list", "len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj,newtraj) def", "self.check_if_z_is_correct(traj) self.expand(self.traj) self.env.f_run(multiply) traj = self.traj self.assertTrue(len(traj) == len(list(self.expand_dict.values())[0])+\\ len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True)", "nameset) self.assertTrue('run_%08d' % idx in nameset) self.assertTrue(traj.v_crun == run_name) self.assertTrue(newtraj.crun.z==traj.x*traj.y,' z != x*y:", "3, 4]}) # # list_that_should_change = [42, 42, 42, 42] # # env.f_run(test_runfunc,", "42, 42] # # env.f_run(test_runfunc, list_that_should_change) # # traj.v_auto_load=True # # for irun,", "Test tags def test_full_store(self): filename = make_temp_dir('full_store.hdf5') with Environment(filename=filename, log_config=get_log_config()) as env: traj", "'hdf5', 'environment' def set_mode(self): self.mode = 'LOCK' self.multiproc = False self.ncores = 1", "multiply_with_storing, \\ multiply_with_graceful_exit def add_one_particular_item(traj, store_full): traj.hi = Result('hi', 42, 'hi!') traj.f_store() traj.f_remove_child('hi')", "str(size_in_mb)) mp_traj = self.traj old_multiproc = self.multiproc self.multiproc = False ### Make a", "not in overviews but it should!' % name) hdf5file.close() self.traj.f_load(load_parameters=2, load_derived_parameters=2, load_results=2) newtraj", "add_one_particular_item(traj, store_full): traj.hi = Result('hi', 42, 'hi!') traj.f_store() traj.f_remove_child('hi') class SlowResult(Result): def _load(self,", "is not None: env1 = Environment(continuable=True, continue_folder=tmp, log_config=None, filename=self.filename) with self.assertRaises(ValueError): env1.f_run_map(multiply_args, [1],", "[] for irun in range(3): spsparse_dia = spsp.lil_matrix((111,111)) spsparse_dia[3,2+irun] = 44.5*irun matrices_dia.append(spsparse_dia.tocsc().todia()) self.explore_dict={'string':[np.array(['Uno',", "in self.explore_dict: param.f_unlock() if param.v_explored: param._shrink() self.explore_dict={'Numpy.string':[np.array(['Uno', 'Dos', 'Tres']), np.array(['Cinco', 'Seis', 'Siette']), np.array(['Ocho',", "= self.traj self.assertTrue(len(traj) == len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct_map(traj, args1, args2, args3) for", "self.wildcard_functions = {('$', 'crun') : my_run_func, ('$set', 'crunset'): my_set_func} class ResultSortTest(TrajectoryComparator): tags =", "Environment(continuable=True, continue_folder=tmp, log_config=None, filename=self.filename) with self.assertRaises(ValueError): env1.f_run_map(multiply_args, [1], [2], [3]) with self.assertRaises(ValueError): Environment(multiproc=True,", "[2], [3]) with self.assertRaises(ValueError): Environment(multiproc=True, use_pool=False, freeze_input=True, filename=self.filename, log_config=None) env3 = Environment(log_config=None, filename=self.filename)", "traj.f_idx_to_run(idx) self.assertTrue(traj is not self.traj) newtraj.v_crun=run_name self.traj.v_idx = idx newtraj.v_idx = idx nameset", "% str(size_in_mb)) self.assertTrue(size_in_mb < 6.0, 'Size is %sMB > 6MB' % str(size_in_mb)) self.compare_trajectories(mp_traj,", "len(list(self.explore_dict.values())[0])) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.check_if_z_is_correct(traj) newtraj = Trajectory() newtraj.v_storage_service=HDF5StorageService(filename=self.filename) newtraj.f_load(name=self.traj.v_name, index=None, as_new=False, load_data=0)", "pypet.pypetexceptions as pex import sys import unittest try: import psutil except ImportError: psutil", "self.make_run() traj_name = self.traj.v_name self.env = Environment(trajectory=self.traj, log_stdout=False, log_config=get_log_config()) self.traj = self.env.v_trajectory self.traj.f_load(name=traj_name)", "newtraj = self.load_trajectory(trajectory_name=self.traj.v_name,as_new=False) self.traj.f_load_skeleton() self.traj.f_load_items(self.traj.f_to_dict().keys(), only_empties=True) self.compare_trajectories(self.traj, newtraj) def load_trajectory(self,trajectory_index=None,trajectory_name=None,as_new=False): ### Load The", "###Explore self.traj.f_add_parameter('TEST', 'test_expand') self.explore(self.traj) self.make_run() self.expand() get_root_logger().info('\\n $$$$$$$$$$$$$$$$$ Second Run $$$$$$$$$$$$$$$$$$$$$$$$') self.make_run() newtraj", "size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb < 6.0, 'Size is %sMB >", "self.assertTrue(len(newtraj) == 1) size=os.path.getsize(self.filename) size_in_mb = size/1000000. get_root_logger().info('Size is %sMB' % str(size_in_mb)) self.assertTrue(size_in_mb", "for name in should_not: name = name.split('.')[-1] # Get only the name of", "'%s != %s' % (str(trajnice), str(osnice))) def add_large_data(traj): np_array = np.random.rand(100, 1000, 10)", "with self.assertRaises(ValueError): self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem,(),{},5)) self.traj.f_store_item((pypetconstants.LEAF, self.traj.TestResItem)) self.traj.results.f_remove_child('TestResItem') self.assertTrue('TestResItem' not in self.traj) self.traj.results.f_load_child('TestResItem', load_data=pypetconstants.LOAD_SKELETON)", "False ### Make a new single core run self.setUp() self.traj.f_add_parameter('TEST', 'test_run') self.traj.hdf5.purge_duplicate_comments =" ]
[ "license terms in the LICENSE file found in the top-level # directory of", "_bytes = bytes else: _bytes = lambda x: bytes(bytearray(x)) # Core definitions COIN", "part of python-ravencoinlib, including this file, may be copied, modified, # propagated, or", "COIN nIssueUniqueAssetBurnAmount = 5 * COIN # Burn Addresses strIssueAssetBurnAddress = \"RXissueAssetXXXXXXXXXXXXXXXXXhhZGt\" strReissueAssetBurnAddress", "strReissueAssetBurnAddress = \"RXReissueAssetXXXXXXXXXXXXXXVEFAWu\" strIssueSubAssetBurnAddress = \"RXissueSubAssetXXXXXXXXXXXXXWcwhwL\" strIssueUniqueAssetBurnAddress = \"RXissueUniqueAssetXXXXXXXXXXWEAe58\" # Global Burn Address", "python-ravencoinlib. # # It is subject to the license terms in the LICENSE", "sys.version > '3': _bytes = bytes else: _bytes = lambda x: bytes(bytearray(x)) #", "file, may be copied, modified, # propagated, or distributed except according to the", "assets deployed MAX_BLOCK_WEIGHT = 8000000 MAX_BLOCK_SIGOPS = MAX_BLOCK_SIZE/50 # 25? WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = _bytes([OP_RETURN,", "SUBSIDY_HALVING_INTERVAL = 2100000 PROOF_OF_WORK_LIMIT = 2**256-1 >> 20 # Burn Amounts nIssueAssetBurnAmount =", "* from bitcoin.core.script import OP_RETURN if sys.version > '3': _bytes = bytes else:", "5 * COIN; # Burn Addresses strIssueAssetBurnAddress = \"n1issueAssetXXXXXXXXXXXXXXXXWdnemQ\" strReissueAssetBurnAddress = \"n1ReissueAssetXXXXXXXXXXXXXXWG9NLd\" strIssueSubAssetBurnAddress", "import sys import bitcoin.core from bitcoin.core import * from bitcoin.core.script import OP_RETURN if", "2000000 # after assets deployed MAX_BLOCK_WEIGHT = 8000000 MAX_BLOCK_SIGOPS = MAX_BLOCK_SIZE/50 # 25?", "= 'mainnet' GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28c60e4d5affff001ee0d47d010101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 2100000 PROOF_OF_WORK_LIMIT = 2**256-1 >> 20", "0x24, 0xaa, 0x21, 0xa9, 0xed]) class CoreMainParams(bitcoin.core.CoreChainParams): MAX_MONEY = 21000000000 * COIN NAME", "# Burn Amounts nIssueAssetBurnAmount = 500 * COIN nReissueAssetBurnAmount = 100 * COIN", "COIN; nReissueAssetBurnAmount = 100 * COIN; nIssueSubAssetBurnAmount = 100 * COIN; nIssueUniqueAssetBurnAmount =", "100 * COIN nIssueUniqueAssetBurnAmount = 5 * COIN # Burn Addresses strIssueAssetBurnAddress =", "\"n1issueUniqueAssetXXXXXXXXXXS4695i\" # Global Burn Address strGlobalBurnAddress = \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" class CoreRegTestParams(bitcoin.core.CoreTestNetParams): MAX_MONEY = 21000000000", "COIN nReissueAssetBurnAmount = 100 * COIN nIssueSubAssetBurnAmount = 100 * COIN nIssueUniqueAssetBurnAmount =", "= 100 * COIN nIssueSubAssetBurnAmount = 100 * COIN nIssueUniqueAssetBurnAmount = 5 *", "distribution. # # No part of python-ravencoinlib, including this file, may be copied,", "bytes(bytearray(x)) # Core definitions COIN = 100000000 MAX_BLOCK_SIZE = 2000000 # after assets", "nIssueAssetBurnAmount = 500 * COIN nReissueAssetBurnAmount = 100 * COIN nIssueSubAssetBurnAmount = 100", "* COIN nReissueAssetBurnAmount = 100 * COIN nIssueSubAssetBurnAmount = 100 * COIN nIssueUniqueAssetBurnAmount", "of this distribution. # # No part of python-ravencoinlib, including this file, may", "= \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" # monkey patching bitcoin.core.COIN = COIN bitcoin.core.MAX_BLOCK_SIZE = MAX_BLOCK_SIZE bitcoin.core.MAX_BLOCK_WEIGHT =", "from bitcoin.core.script import OP_RETURN if sys.version > '3': _bytes = bytes else: _bytes", "if sys.version > '3': _bytes = bytes else: _bytes = lambda x: bytes(bytearray(x))", "strIssueAssetBurnAddress = \"n1issueAssetXXXXXXXXXXXXXXXXWdnemQ\" strReissueAssetBurnAddress = \"n1ReissueAssetXXXXXXXXXXXXXXWG9NLd\" strIssueSubAssetBurnAddress = \"n1issueSubAssetXXXXXXXXXXXXXbNiH6v\" strIssueUniqueAssetBurnAddress = \"n1issueUniqueAssetXXXXXXXXXXS4695i\" #", "bitcoin.core import * from bitcoin.core.script import OP_RETURN if sys.version > '3': _bytes =", "class CoreMainParams(bitcoin.core.CoreChainParams): MAX_MONEY = 21000000000 * COIN NAME = 'mainnet' GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28c60e4d5affff001ee0d47d010101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000'))", "Burn Amounts nIssueAssetBurnAmount = 500 * COIN; nReissueAssetBurnAmount = 100 * COIN; nIssueSubAssetBurnAmount", "found in the top-level # directory of this distribution. # # No part", "nIssueSubAssetBurnAmount = 100 * COIN nIssueUniqueAssetBurnAmount = 5 * COIN # Burn Addresses", "to the license terms in the LICENSE file found in the top-level #", "MAX_MONEY = 21000000000 * COIN NAME = 'testnet' GENESIS_BLOCK = CBlock.deserialize(x('02000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff2820e0a35bffff001e8847ee000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL =", "definitions COIN = 100000000 MAX_BLOCK_SIZE = 2000000 # after assets deployed MAX_BLOCK_WEIGHT =", "COIN NAME = 'testnet' GENESIS_BLOCK = CBlock.deserialize(x('02000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff2820e0a35bffff001e8847ee000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 2100000 PROOF_OF_WORK_LIMIT = 2**256-1", "nIssueSubAssetBurnAmount = 100 * COIN; nIssueUniqueAssetBurnAmount = 5 * COIN; # Burn Addresses", "21000000000 * COIN NAME = 'regtest' GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28a621d95affff7f20010000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 150 PROOF_OF_WORK_LIMIT", "according to the terms contained in the # LICENSE file. import sys import", "import bitcoin.core from bitcoin.core import * from bitcoin.core.script import OP_RETURN if sys.version >", "= \"RXissueAssetXXXXXXXXXXXXXXXXXhhZGt\" strReissueAssetBurnAddress = \"RXReissueAssetXXXXXXXXXXXXXXVEFAWu\" strIssueSubAssetBurnAddress = \"RXissueSubAssetXXXXXXXXXXXXXWcwhwL\" strIssueUniqueAssetBurnAddress = \"RXissueUniqueAssetXXXXXXXXXXWEAe58\" # Global", "= \"n1ReissueAssetXXXXXXXXXXXXXXWG9NLd\" strIssueSubAssetBurnAddress = \"n1issueSubAssetXXXXXXXXXXXXXbNiH6v\" strIssueUniqueAssetBurnAddress = \"n1issueUniqueAssetXXXXXXXXXXS4695i\" # Global Burn Address strGlobalBurnAddress", "= WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC bitcoin.core.CoreMainParams = CoreMainParams bitcoin.core.CoreTestNetParams = CoreTestNetParams bitcoin.core.CoreRegTestParams = CoreRegTestParams def GetParams():", "strReissueAssetBurnAddress = \"n1ReissueAssetXXXXXXXXXXXXXXWG9NLd\" strIssueSubAssetBurnAddress = \"n1issueSubAssetXXXXXXXXXXXXXbNiH6v\" strIssueUniqueAssetBurnAddress = \"n1issueUniqueAssetXXXXXXXXXXS4695i\" # Global Burn Address", "= 8000000 MAX_BLOCK_SIGOPS = MAX_BLOCK_SIZE/50 # 25? WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = _bytes([OP_RETURN, 0x24, 0xaa, 0x21,", "nReissueAssetBurnAmount = 100 * COIN; nIssueSubAssetBurnAmount = 100 * COIN; nIssueUniqueAssetBurnAmount = 5", "= 2**256-1 >> 20 # Burn Amounts nIssueAssetBurnAmount = 500 * COIN; nReissueAssetBurnAmount", "copied, modified, # propagated, or distributed except according to the terms contained in", "\"n1issueAssetXXXXXXXXXXXXXXXXWdnemQ\" strReissueAssetBurnAddress = \"n1ReissueAssetXXXXXXXXXXXXXXWG9NLd\" strIssueSubAssetBurnAddress = \"n1issueSubAssetXXXXXXXXXXXXXbNiH6v\" strIssueUniqueAssetBurnAddress = \"n1issueUniqueAssetXXXXXXXXXXS4695i\" # Global Burn", "Burn Address strGlobalBurnAddress = \"RXBurnXXXXXXXXXXXXXXXXXXXXXXWUo9FV\" class CoreTestNetParams(bitcoin.core.CoreMainParams): MAX_MONEY = 21000000000 * COIN NAME", "NAME = 'mainnet' GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28c60e4d5affff001ee0d47d010101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 2100000 PROOF_OF_WORK_LIMIT = 2**256-1 >>", "It is subject to the license terms in the LICENSE file found in", "WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC bitcoin.core.CoreMainParams = CoreMainParams bitcoin.core.CoreTestNetParams = CoreTestNetParams bitcoin.core.CoreRegTestParams = CoreRegTestParams def GetParams(): return", "2**256-1 >> 20 # Burn Amounts nIssueAssetBurnAmount = 500 * COIN nReissueAssetBurnAmount =", "> '3': _bytes = bytes else: _bytes = lambda x: bytes(bytearray(x)) # Core", "= \"n1issueSubAssetXXXXXXXXXXXXXbNiH6v\" strIssueUniqueAssetBurnAddress = \"n1issueUniqueAssetXXXXXXXXXXS4695i\" # Global Burn Address strGlobalBurnAddress = \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" #", "Burn Address strGlobalBurnAddress = \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" # monkey patching bitcoin.core.COIN = COIN bitcoin.core.MAX_BLOCK_SIZE =", "21000000000 * COIN NAME = 'mainnet' GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28c60e4d5affff001ee0d47d010101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 2100000 PROOF_OF_WORK_LIMIT", "= COIN bitcoin.core.MAX_BLOCK_SIZE = MAX_BLOCK_SIZE bitcoin.core.MAX_BLOCK_WEIGHT = MAX_BLOCK_WEIGHT bitcoin.core.MAX_BLOCK_SIGOPS = MAX_BLOCK_SIGOPS bitcoin.core.WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC =", "COIN # Burn Addresses strIssueAssetBurnAddress = \"RXissueAssetXXXXXXXXXXXXXXXXXhhZGt\" strReissueAssetBurnAddress = \"RXReissueAssetXXXXXXXXXXXXXXVEFAWu\" strIssueSubAssetBurnAddress = \"RXissueSubAssetXXXXXXXXXXXXXWcwhwL\"", "<gh_stars>0 # Copyright (C) 2018 The python-ravencoinlib developers # # This file is", ">> 1 # Burn Amounts nIssueAssetBurnAmount = 500 * COIN; nReissueAssetBurnAmount = 100", "# propagated, or distributed except according to the terms contained in the #", "or distributed except according to the terms contained in the # LICENSE file.", "directory of this distribution. # # No part of python-ravencoinlib, including this file,", "# Burn Amounts nIssueAssetBurnAmount = 500 * COIN; nReissueAssetBurnAmount = 100 * COIN;", "= 2000000 # after assets deployed MAX_BLOCK_WEIGHT = 8000000 MAX_BLOCK_SIGOPS = MAX_BLOCK_SIZE/50 #", "= MAX_BLOCK_SIZE bitcoin.core.MAX_BLOCK_WEIGHT = MAX_BLOCK_WEIGHT bitcoin.core.MAX_BLOCK_SIGOPS = MAX_BLOCK_SIGOPS bitcoin.core.WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC bitcoin.core.CoreMainParams =", "COIN; nIssueSubAssetBurnAmount = 100 * COIN; nIssueUniqueAssetBurnAmount = 5 * COIN; # Burn", "to the terms contained in the # LICENSE file. import sys import bitcoin.core", "this file, may be copied, modified, # propagated, or distributed except according to", "CBlock.deserialize(x('02000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff2820e0a35bffff001e8847ee000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 2100000 PROOF_OF_WORK_LIMIT = 2**256-1 >> 20 # Burn Amounts nIssueAssetBurnAmount", "\"RXissueUniqueAssetXXXXXXXXXXWEAe58\" # Global Burn Address strGlobalBurnAddress = \"RXBurnXXXXXXXXXXXXXXXXXXXXXXWUo9FV\" class CoreTestNetParams(bitcoin.core.CoreMainParams): MAX_MONEY = 21000000000", "SUBSIDY_HALVING_INTERVAL = 150 PROOF_OF_WORK_LIMIT = 2**256-1 >> 1 # Burn Amounts nIssueAssetBurnAmount =", "= \"n1issueSubAssetXXXXXXXXXXXXXbNiH6v\" strIssueUniqueAssetBurnAddress = \"n1issueUniqueAssetXXXXXXXXXXS4695i\" # Global Burn Address strGlobalBurnAddress = \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" class", "# Global Burn Address strGlobalBurnAddress = \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" class CoreRegTestParams(bitcoin.core.CoreTestNetParams): MAX_MONEY = 21000000000 *", "bitcoin.core.MAX_BLOCK_WEIGHT = MAX_BLOCK_WEIGHT bitcoin.core.MAX_BLOCK_SIGOPS = MAX_BLOCK_SIGOPS bitcoin.core.WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC bitcoin.core.CoreMainParams = CoreMainParams bitcoin.core.CoreTestNetParams", "file found in the top-level # directory of this distribution. # # No", "Addresses strIssueAssetBurnAddress = \"n1issueAssetXXXXXXXXXXXXXXXXWdnemQ\" strReissueAssetBurnAddress = \"n1ReissueAssetXXXXXXXXXXXXXXWG9NLd\" strIssueSubAssetBurnAddress = \"n1issueSubAssetXXXXXXXXXXXXXbNiH6v\" strIssueUniqueAssetBurnAddress = \"n1issueUniqueAssetXXXXXXXXXXS4695i\"", "bitcoin.core.WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC bitcoin.core.CoreMainParams = CoreMainParams bitcoin.core.CoreTestNetParams = CoreTestNetParams bitcoin.core.CoreRegTestParams = CoreRegTestParams def", "* COIN NAME = 'testnet' GENESIS_BLOCK = CBlock.deserialize(x('02000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff2820e0a35bffff001e8847ee000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 2100000 PROOF_OF_WORK_LIMIT =", "Burn Amounts nIssueAssetBurnAmount = 500 * COIN nReissueAssetBurnAmount = 100 * COIN nIssueSubAssetBurnAmount", "LICENSE file. import sys import bitcoin.core from bitcoin.core import * from bitcoin.core.script import", "contained in the # LICENSE file. import sys import bitcoin.core from bitcoin.core import", "= 500 * COIN nReissueAssetBurnAmount = 100 * COIN nIssueSubAssetBurnAmount = 100 *", "* COIN nIssueSubAssetBurnAmount = 100 * COIN nIssueUniqueAssetBurnAmount = 5 * COIN #", "'3': _bytes = bytes else: _bytes = lambda x: bytes(bytearray(x)) # Core definitions", "= \"RXBurnXXXXXXXXXXXXXXXXXXXXXXWUo9FV\" class CoreTestNetParams(bitcoin.core.CoreMainParams): MAX_MONEY = 21000000000 * COIN NAME = 'testnet' GENESIS_BLOCK", "\"RXissueAssetXXXXXXXXXXXXXXXXXhhZGt\" strReissueAssetBurnAddress = \"RXReissueAssetXXXXXXXXXXXXXXVEFAWu\" strIssueSubAssetBurnAddress = \"RXissueSubAssetXXXXXXXXXXXXXWcwhwL\" strIssueUniqueAssetBurnAddress = \"RXissueUniqueAssetXXXXXXXXXXWEAe58\" # Global Burn", "= 'testnet' GENESIS_BLOCK = CBlock.deserialize(x('02000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff2820e0a35bffff001e8847ee000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 2100000 PROOF_OF_WORK_LIMIT = 2**256-1 >> 20", "COIN; nIssueUniqueAssetBurnAmount = 5 * COIN; # Burn Addresses strIssueAssetBurnAddress = \"n1issueAssetXXXXXXXXXXXXXXXXWdnemQ\" strReissueAssetBurnAddress", "class CoreRegTestParams(bitcoin.core.CoreTestNetParams): MAX_MONEY = 21000000000 * COIN NAME = 'regtest' GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28a621d95affff7f20010000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000'))", "MAX_BLOCK_WEIGHT = 8000000 MAX_BLOCK_SIGOPS = MAX_BLOCK_SIZE/50 # 25? WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = _bytes([OP_RETURN, 0x24, 0xaa,", "nIssueAssetBurnAmount = 500 * COIN; nReissueAssetBurnAmount = 100 * COIN; nIssueSubAssetBurnAmount = 100", "\"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" class CoreRegTestParams(bitcoin.core.CoreTestNetParams): MAX_MONEY = 21000000000 * COIN NAME = 'regtest' GENESIS_BLOCK =", "'testnet' GENESIS_BLOCK = CBlock.deserialize(x('02000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff2820e0a35bffff001e8847ee000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 2100000 PROOF_OF_WORK_LIMIT = 2**256-1 >> 20 #", "bitcoin.core.CoreMainParams = CoreMainParams bitcoin.core.CoreTestNetParams = CoreTestNetParams bitcoin.core.CoreRegTestParams = CoreRegTestParams def GetParams(): return bitcoin.core.coreparams", "# It is subject to the license terms in the LICENSE file found", "= \"RXissueUniqueAssetXXXXXXXXXXWEAe58\" # Global Burn Address strGlobalBurnAddress = \"RXBurnXXXXXXXXXXXXXXXXXXXXXXWUo9FV\" class CoreTestNetParams(bitcoin.core.CoreMainParams): MAX_MONEY =", "0xed]) class CoreMainParams(bitcoin.core.CoreChainParams): MAX_MONEY = 21000000000 * COIN NAME = 'mainnet' GENESIS_BLOCK =", "# Copyright (C) 2018 The python-ravencoinlib developers # # This file is part", "python-ravencoinlib, including this file, may be copied, modified, # propagated, or distributed except", "bitcoin.core from bitcoin.core import * from bitcoin.core.script import OP_RETURN if sys.version > '3':", "lambda x: bytes(bytearray(x)) # Core definitions COIN = 100000000 MAX_BLOCK_SIZE = 2000000 #", "'mainnet' GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28c60e4d5affff001ee0d47d010101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 2100000 PROOF_OF_WORK_LIMIT = 2**256-1 >> 20 #", "= CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28c60e4d5affff001ee0d47d010101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 2100000 PROOF_OF_WORK_LIMIT = 2**256-1 >> 20 # Burn Amounts", "Copyright (C) 2018 The python-ravencoinlib developers # # This file is part of", "Global Burn Address strGlobalBurnAddress = \"RXBurnXXXXXXXXXXXXXXXXXXXXXXWUo9FV\" class CoreTestNetParams(bitcoin.core.CoreMainParams): MAX_MONEY = 21000000000 * COIN", "NAME = 'testnet' GENESIS_BLOCK = CBlock.deserialize(x('02000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff2820e0a35bffff001e8847ee000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 2100000 PROOF_OF_WORK_LIMIT = 2**256-1 >>", "CoreRegTestParams(bitcoin.core.CoreTestNetParams): MAX_MONEY = 21000000000 * COIN NAME = 'regtest' GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28a621d95affff7f20010000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL", "MAX_BLOCK_SIZE bitcoin.core.MAX_BLOCK_WEIGHT = MAX_BLOCK_WEIGHT bitcoin.core.MAX_BLOCK_SIGOPS = MAX_BLOCK_SIGOPS bitcoin.core.WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC bitcoin.core.CoreMainParams = CoreMainParams", "PROOF_OF_WORK_LIMIT = 2**256-1 >> 20 # Burn Amounts nIssueAssetBurnAmount = 500 * COIN;", "class CoreTestNetParams(bitcoin.core.CoreMainParams): MAX_MONEY = 21000000000 * COIN NAME = 'testnet' GENESIS_BLOCK = CBlock.deserialize(x('02000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff2820e0a35bffff001e8847ee000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000'))", "distributed except according to the terms contained in the # LICENSE file. import", "CoreTestNetParams(bitcoin.core.CoreMainParams): MAX_MONEY = 21000000000 * COIN NAME = 'testnet' GENESIS_BLOCK = CBlock.deserialize(x('02000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff2820e0a35bffff001e8847ee000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL", "= 21000000000 * COIN NAME = 'testnet' GENESIS_BLOCK = CBlock.deserialize(x('02000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff2820e0a35bffff001e8847ee000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 2100000", "import * from bitcoin.core.script import OP_RETURN if sys.version > '3': _bytes = bytes", "This file is part of python-ravencoinlib. # # It is subject to the", "* COIN; nIssueUniqueAssetBurnAmount = 5 * COIN; # Burn Addresses strIssueAssetBurnAddress = \"n1issueAssetXXXXXXXXXXXXXXXXWdnemQ\"", "GENESIS_BLOCK = CBlock.deserialize(x('02000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff2820e0a35bffff001e8847ee000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 2100000 PROOF_OF_WORK_LIMIT = 2**256-1 >> 20 # Burn", "PROOF_OF_WORK_LIMIT = 2**256-1 >> 1 # Burn Amounts nIssueAssetBurnAmount = 500 * COIN;", "= 150 PROOF_OF_WORK_LIMIT = 2**256-1 >> 1 # Burn Amounts nIssueAssetBurnAmount = 500", "20 # Burn Amounts nIssueAssetBurnAmount = 500 * COIN nReissueAssetBurnAmount = 100 *", "CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28c60e4d5affff001ee0d47d010101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 2100000 PROOF_OF_WORK_LIMIT = 2**256-1 >> 20 # Burn Amounts nIssueAssetBurnAmount", "\"n1ReissueAssetXXXXXXXXXXXXXXWG9NLd\" strIssueSubAssetBurnAddress = \"n1issueSubAssetXXXXXXXXXXXXXbNiH6v\" strIssueUniqueAssetBurnAddress = \"n1issueUniqueAssetXXXXXXXXXXS4695i\" # Global Burn Address strGlobalBurnAddress =", "Address strGlobalBurnAddress = \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" # monkey patching bitcoin.core.COIN = COIN bitcoin.core.MAX_BLOCK_SIZE = MAX_BLOCK_SIZE", "= bytes else: _bytes = lambda x: bytes(bytearray(x)) # Core definitions COIN =", "python-ravencoinlib developers # # This file is part of python-ravencoinlib. # # It", "strIssueSubAssetBurnAddress = \"RXissueSubAssetXXXXXXXXXXXXXWcwhwL\" strIssueUniqueAssetBurnAddress = \"RXissueUniqueAssetXXXXXXXXXXWEAe58\" # Global Burn Address strGlobalBurnAddress = \"RXBurnXXXXXXXXXXXXXXXXXXXXXXWUo9FV\"", "# # It is subject to the license terms in the LICENSE file", "Burn Addresses strIssueAssetBurnAddress = \"RXissueAssetXXXXXXXXXXXXXXXXXhhZGt\" strReissueAssetBurnAddress = \"RXReissueAssetXXXXXXXXXXXXXXVEFAWu\" strIssueSubAssetBurnAddress = \"RXissueSubAssetXXXXXXXXXXXXXWcwhwL\" strIssueUniqueAssetBurnAddress =", "= 100 * COIN; nIssueUniqueAssetBurnAmount = 5 * COIN; # Burn Addresses strIssueAssetBurnAddress", "x: bytes(bytearray(x)) # Core definitions COIN = 100000000 MAX_BLOCK_SIZE = 2000000 # after", "0xa9, 0xed]) class CoreMainParams(bitcoin.core.CoreChainParams): MAX_MONEY = 21000000000 * COIN NAME = 'mainnet' GENESIS_BLOCK", "strIssueUniqueAssetBurnAddress = \"n1issueUniqueAssetXXXXXXXXXXS4695i\" # Global Burn Address strGlobalBurnAddress = \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" class CoreRegTestParams(bitcoin.core.CoreTestNetParams): MAX_MONEY", "= \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" class CoreRegTestParams(bitcoin.core.CoreTestNetParams): MAX_MONEY = 21000000000 * COIN NAME = 'regtest' GENESIS_BLOCK", "No part of python-ravencoinlib, including this file, may be copied, modified, # propagated,", "file. import sys import bitcoin.core from bitcoin.core import * from bitcoin.core.script import OP_RETURN", "COIN NAME = 'regtest' GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28a621d95affff7f20010000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 150 PROOF_OF_WORK_LIMIT = 2**256-1", "'regtest' GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28a621d95affff7f20010000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 150 PROOF_OF_WORK_LIMIT = 2**256-1 >> 1 #", "WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = _bytes([OP_RETURN, 0x24, 0xaa, 0x21, 0xa9, 0xed]) class CoreMainParams(bitcoin.core.CoreChainParams): MAX_MONEY = 21000000000", "= \"n1issueAssetXXXXXXXXXXXXXXXXWdnemQ\" strReissueAssetBurnAddress = \"n1ReissueAssetXXXXXXXXXXXXXXWG9NLd\" strIssueSubAssetBurnAddress = \"n1issueSubAssetXXXXXXXXXXXXXbNiH6v\" strIssueUniqueAssetBurnAddress = \"n1issueUniqueAssetXXXXXXXXXXS4695i\" # Global", "= MAX_BLOCK_SIZE/50 # 25? WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = _bytes([OP_RETURN, 0x24, 0xaa, 0x21, 0xa9, 0xed]) class", "after assets deployed MAX_BLOCK_WEIGHT = 8000000 MAX_BLOCK_SIGOPS = MAX_BLOCK_SIZE/50 # 25? WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC =", "(C) 2018 The python-ravencoinlib developers # # This file is part of python-ravencoinlib.", "bytes else: _bytes = lambda x: bytes(bytearray(x)) # Core definitions COIN = 100000000", "= 100 * COIN; nIssueSubAssetBurnAmount = 100 * COIN; nIssueUniqueAssetBurnAmount = 5 *", "in the LICENSE file found in the top-level # directory of this distribution.", "bitcoin.core.MAX_BLOCK_SIGOPS = MAX_BLOCK_SIGOPS bitcoin.core.WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC bitcoin.core.CoreMainParams = CoreMainParams bitcoin.core.CoreTestNetParams = CoreTestNetParams bitcoin.core.CoreRegTestParams", "COIN nIssueSubAssetBurnAmount = 100 * COIN nIssueUniqueAssetBurnAmount = 5 * COIN # Burn", "Amounts nIssueAssetBurnAmount = 500 * COIN nReissueAssetBurnAmount = 100 * COIN nIssueSubAssetBurnAmount =", "The python-ravencoinlib developers # # This file is part of python-ravencoinlib. # #", "modified, # propagated, or distributed except according to the terms contained in the", "of python-ravencoinlib. # # It is subject to the license terms in the", "of python-ravencoinlib, including this file, may be copied, modified, # propagated, or distributed", "= 21000000000 * COIN NAME = 'regtest' GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28a621d95affff7f20010000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 150", "= CBlock.deserialize(x('02000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff2820e0a35bffff001e8847ee000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 2100000 PROOF_OF_WORK_LIMIT = 2**256-1 >> 20 # Burn Amounts", "= CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28a621d95affff7f20010000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 150 PROOF_OF_WORK_LIMIT = 2**256-1 >> 1 # Burn Amounts", "= 21000000000 * COIN NAME = 'mainnet' GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28c60e4d5affff001ee0d47d010101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 2100000", "in the top-level # directory of this distribution. # # No part of", "MAX_BLOCK_SIGOPS = MAX_BLOCK_SIZE/50 # 25? WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = _bytes([OP_RETURN, 0x24, 0xaa, 0x21, 0xa9, 0xed])", "nReissueAssetBurnAmount = 100 * COIN nIssueSubAssetBurnAmount = 100 * COIN nIssueUniqueAssetBurnAmount = 5", "OP_RETURN if sys.version > '3': _bytes = bytes else: _bytes = lambda x:", "subject to the license terms in the LICENSE file found in the top-level", "_bytes = lambda x: bytes(bytearray(x)) # Core definitions COIN = 100000000 MAX_BLOCK_SIZE =", "file is part of python-ravencoinlib. # # It is subject to the license", "# No part of python-ravencoinlib, including this file, may be copied, modified, #", "# LICENSE file. import sys import bitcoin.core from bitcoin.core import * from bitcoin.core.script", "developers # # This file is part of python-ravencoinlib. # # It is", "= _bytes([OP_RETURN, 0x24, 0xaa, 0x21, 0xa9, 0xed]) class CoreMainParams(bitcoin.core.CoreChainParams): MAX_MONEY = 21000000000 *", "bitcoin.core.COIN = COIN bitcoin.core.MAX_BLOCK_SIZE = MAX_BLOCK_SIZE bitcoin.core.MAX_BLOCK_WEIGHT = MAX_BLOCK_WEIGHT bitcoin.core.MAX_BLOCK_SIGOPS = MAX_BLOCK_SIGOPS bitcoin.core.WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC", "deployed MAX_BLOCK_WEIGHT = 8000000 MAX_BLOCK_SIGOPS = MAX_BLOCK_SIZE/50 # 25? WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = _bytes([OP_RETURN, 0x24,", "LICENSE file found in the top-level # directory of this distribution. # #", "2**256-1 >> 1 # Burn Amounts nIssueAssetBurnAmount = 500 * COIN; nReissueAssetBurnAmount =", "is part of python-ravencoinlib. # # It is subject to the license terms", "terms in the LICENSE file found in the top-level # directory of this", "PROOF_OF_WORK_LIMIT = 2**256-1 >> 20 # Burn Amounts nIssueAssetBurnAmount = 500 * COIN", "part of python-ravencoinlib. # # It is subject to the license terms in", "else: _bytes = lambda x: bytes(bytearray(x)) # Core definitions COIN = 100000000 MAX_BLOCK_SIZE", "propagated, or distributed except according to the terms contained in the # LICENSE", "= 100 * COIN nIssueUniqueAssetBurnAmount = 5 * COIN # Burn Addresses strIssueAssetBurnAddress", "be copied, modified, # propagated, or distributed except according to the terms contained", "* COIN NAME = 'mainnet' GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28c60e4d5affff001ee0d47d010101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 2100000 PROOF_OF_WORK_LIMIT =", "COIN = 100000000 MAX_BLOCK_SIZE = 2000000 # after assets deployed MAX_BLOCK_WEIGHT = 8000000", "= 2100000 PROOF_OF_WORK_LIMIT = 2**256-1 >> 20 # Burn Amounts nIssueAssetBurnAmount = 500", "the # LICENSE file. import sys import bitcoin.core from bitcoin.core import * from", "* COIN # Burn Addresses strIssueAssetBurnAddress = \"RXissueAssetXXXXXXXXXXXXXXXXXhhZGt\" strReissueAssetBurnAddress = \"RXReissueAssetXXXXXXXXXXXXXXVEFAWu\" strIssueSubAssetBurnAddress =", "21000000000 * COIN NAME = 'testnet' GENESIS_BLOCK = CBlock.deserialize(x('02000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff2820e0a35bffff001e8847ee000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 2100000 PROOF_OF_WORK_LIMIT", "\"RXissueSubAssetXXXXXXXXXXXXXWcwhwL\" strIssueUniqueAssetBurnAddress = \"RXissueUniqueAssetXXXXXXXXXXWEAe58\" # Global Burn Address strGlobalBurnAddress = \"RXBurnXXXXXXXXXXXXXXXXXXXXXXWUo9FV\" class CoreTestNetParams(bitcoin.core.CoreMainParams):", "Global Burn Address strGlobalBurnAddress = \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" # monkey patching bitcoin.core.COIN = COIN bitcoin.core.MAX_BLOCK_SIZE", "the terms contained in the # LICENSE file. import sys import bitcoin.core from", "# This file is part of python-ravencoinlib. # # It is subject to", "is subject to the license terms in the LICENSE file found in the", "strGlobalBurnAddress = \"RXBurnXXXXXXXXXXXXXXXXXXXXXXWUo9FV\" class CoreTestNetParams(bitcoin.core.CoreMainParams): MAX_MONEY = 21000000000 * COIN NAME = 'testnet'", "strGlobalBurnAddress = \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" class CoreRegTestParams(bitcoin.core.CoreTestNetParams): MAX_MONEY = 21000000000 * COIN NAME = 'regtest'", "MAX_BLOCK_WEIGHT bitcoin.core.MAX_BLOCK_SIGOPS = MAX_BLOCK_SIGOPS bitcoin.core.WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC bitcoin.core.CoreMainParams = CoreMainParams bitcoin.core.CoreTestNetParams = CoreTestNetParams", "# after assets deployed MAX_BLOCK_WEIGHT = 8000000 MAX_BLOCK_SIGOPS = MAX_BLOCK_SIZE/50 # 25? WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC", "the top-level # directory of this distribution. # # No part of python-ravencoinlib,", "NAME = 'regtest' GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28a621d95affff7f20010000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 150 PROOF_OF_WORK_LIMIT = 2**256-1 >>", "bitcoin.core.script import OP_RETURN if sys.version > '3': _bytes = bytes else: _bytes =", "500 * COIN nReissueAssetBurnAmount = 100 * COIN nIssueSubAssetBurnAmount = 100 * COIN", "* COIN nIssueUniqueAssetBurnAmount = 5 * COIN # Burn Addresses strIssueAssetBurnAddress = \"RXissueAssetXXXXXXXXXXXXXXXXXhhZGt\"", "= 2**256-1 >> 20 # Burn Amounts nIssueAssetBurnAmount = 500 * COIN nReissueAssetBurnAmount", "_bytes([OP_RETURN, 0x24, 0xaa, 0x21, 0xa9, 0xed]) class CoreMainParams(bitcoin.core.CoreChainParams): MAX_MONEY = 21000000000 * COIN", "5 * COIN # Burn Addresses strIssueAssetBurnAddress = \"RXissueAssetXXXXXXXXXXXXXXXXXhhZGt\" strReissueAssetBurnAddress = \"RXReissueAssetXXXXXXXXXXXXXXVEFAWu\" strIssueSubAssetBurnAddress", "20 # Burn Amounts nIssueAssetBurnAmount = 500 * COIN; nReissueAssetBurnAmount = 100 *", ">> 20 # Burn Amounts nIssueAssetBurnAmount = 500 * COIN; nReissueAssetBurnAmount = 100", "MAX_MONEY = 21000000000 * COIN NAME = 'regtest' GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28a621d95affff7f20010000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL =", "GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28a621d95affff7f20010000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 150 PROOF_OF_WORK_LIMIT = 2**256-1 >> 1 # Burn", "bitcoin.core.MAX_BLOCK_SIZE = MAX_BLOCK_SIZE bitcoin.core.MAX_BLOCK_WEIGHT = MAX_BLOCK_WEIGHT bitcoin.core.MAX_BLOCK_SIGOPS = MAX_BLOCK_SIGOPS bitcoin.core.WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC bitcoin.core.CoreMainParams", "Core definitions COIN = 100000000 MAX_BLOCK_SIZE = 2000000 # after assets deployed MAX_BLOCK_WEIGHT", "GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28c60e4d5affff001ee0d47d010101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 2100000 PROOF_OF_WORK_LIMIT = 2**256-1 >> 20 # Burn", "2**256-1 >> 20 # Burn Amounts nIssueAssetBurnAmount = 500 * COIN; nReissueAssetBurnAmount =", "0xaa, 0x21, 0xa9, 0xed]) class CoreMainParams(bitcoin.core.CoreChainParams): MAX_MONEY = 21000000000 * COIN NAME =", "= 100000000 MAX_BLOCK_SIZE = 2000000 # after assets deployed MAX_BLOCK_WEIGHT = 8000000 MAX_BLOCK_SIGOPS", "# 25? WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = _bytes([OP_RETURN, 0x24, 0xaa, 0x21, 0xa9, 0xed]) class CoreMainParams(bitcoin.core.CoreChainParams): MAX_MONEY", "including this file, may be copied, modified, # propagated, or distributed except according", "# # No part of python-ravencoinlib, including this file, may be copied, modified,", "= 'regtest' GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28a621d95affff7f20010000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 150 PROOF_OF_WORK_LIMIT = 2**256-1 >> 1", "top-level # directory of this distribution. # # No part of python-ravencoinlib, including", "Burn Addresses strIssueAssetBurnAddress = \"n1issueAssetXXXXXXXXXXXXXXXXWdnemQ\" strReissueAssetBurnAddress = \"n1ReissueAssetXXXXXXXXXXXXXXWG9NLd\" strIssueSubAssetBurnAddress = \"n1issueSubAssetXXXXXXXXXXXXXbNiH6v\" strIssueUniqueAssetBurnAddress =", "= MAX_BLOCK_WEIGHT bitcoin.core.MAX_BLOCK_SIGOPS = MAX_BLOCK_SIGOPS bitcoin.core.WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC bitcoin.core.CoreMainParams = CoreMainParams bitcoin.core.CoreTestNetParams =", "CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28a621d95affff7f20010000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 150 PROOF_OF_WORK_LIMIT = 2**256-1 >> 1 # Burn Amounts nIssueAssetBurnAmount", "Burn Address strGlobalBurnAddress = \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" class CoreRegTestParams(bitcoin.core.CoreTestNetParams): MAX_MONEY = 21000000000 * COIN NAME", "100000000 MAX_BLOCK_SIZE = 2000000 # after assets deployed MAX_BLOCK_WEIGHT = 8000000 MAX_BLOCK_SIGOPS =", "* COIN; # Burn Addresses strIssueAssetBurnAddress = \"n1issueAssetXXXXXXXXXXXXXXXXWdnemQ\" strReissueAssetBurnAddress = \"n1ReissueAssetXXXXXXXXXXXXXXWG9NLd\" strIssueSubAssetBurnAddress =", "strGlobalBurnAddress = \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" # monkey patching bitcoin.core.COIN = COIN bitcoin.core.MAX_BLOCK_SIZE = MAX_BLOCK_SIZE bitcoin.core.MAX_BLOCK_WEIGHT", "terms contained in the # LICENSE file. import sys import bitcoin.core from bitcoin.core", "= 500 * COIN; nReissueAssetBurnAmount = 100 * COIN; nIssueSubAssetBurnAmount = 100 *", "# directory of this distribution. # # No part of python-ravencoinlib, including this", "COIN; # Burn Addresses strIssueAssetBurnAddress = \"n1issueAssetXXXXXXXXXXXXXXXXWdnemQ\" strReissueAssetBurnAddress = \"n1ReissueAssetXXXXXXXXXXXXXXWG9NLd\" strIssueSubAssetBurnAddress = \"n1issueSubAssetXXXXXXXXXXXXXbNiH6v\"", "# monkey patching bitcoin.core.COIN = COIN bitcoin.core.MAX_BLOCK_SIZE = MAX_BLOCK_SIZE bitcoin.core.MAX_BLOCK_WEIGHT = MAX_BLOCK_WEIGHT bitcoin.core.MAX_BLOCK_SIGOPS", "COIN NAME = 'mainnet' GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28c60e4d5affff001ee0d47d010101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 2100000 PROOF_OF_WORK_LIMIT = 2**256-1", "\"n1issueSubAssetXXXXXXXXXXXXXbNiH6v\" strIssueUniqueAssetBurnAddress = \"n1issueUniqueAssetXXXXXXXXXXS4695i\" # Global Burn Address strGlobalBurnAddress = \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" class CoreRegTestParams(bitcoin.core.CoreTestNetParams):", "# Global Burn Address strGlobalBurnAddress = \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" # monkey patching bitcoin.core.COIN = COIN", "MAX_BLOCK_SIZE/50 # 25? WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = _bytes([OP_RETURN, 0x24, 0xaa, 0x21, 0xa9, 0xed]) class CoreMainParams(bitcoin.core.CoreChainParams):", "in the # LICENSE file. import sys import bitcoin.core from bitcoin.core import *", "Addresses strIssueAssetBurnAddress = \"RXissueAssetXXXXXXXXXXXXXXXXXhhZGt\" strReissueAssetBurnAddress = \"RXReissueAssetXXXXXXXXXXXXXXVEFAWu\" strIssueSubAssetBurnAddress = \"RXissueSubAssetXXXXXXXXXXXXXWcwhwL\" strIssueUniqueAssetBurnAddress = \"RXissueUniqueAssetXXXXXXXXXXWEAe58\"", "\"RXBurnXXXXXXXXXXXXXXXXXXXXXXWUo9FV\" class CoreTestNetParams(bitcoin.core.CoreMainParams): MAX_MONEY = 21000000000 * COIN NAME = 'testnet' GENESIS_BLOCK =", "1 # Burn Amounts nIssueAssetBurnAmount = 500 * COIN; nReissueAssetBurnAmount = 100 *", "500 * COIN; nReissueAssetBurnAmount = 100 * COIN; nIssueSubAssetBurnAmount = 100 * COIN;", "= \"n1issueUniqueAssetXXXXXXXXXXS4695i\" # Global Burn Address strGlobalBurnAddress = \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" class CoreRegTestParams(bitcoin.core.CoreTestNetParams): MAX_MONEY =", "nIssueUniqueAssetBurnAmount = 5 * COIN; # Burn Addresses strIssueAssetBurnAddress = \"n1issueAssetXXXXXXXXXXXXXXXXWdnemQ\" strReissueAssetBurnAddress =", ">> 20 # Burn Amounts nIssueAssetBurnAmount = 500 * COIN nReissueAssetBurnAmount = 100", "Address strGlobalBurnAddress = \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" class CoreRegTestParams(bitcoin.core.CoreTestNetParams): MAX_MONEY = 21000000000 * COIN NAME =", "strIssueUniqueAssetBurnAddress = \"n1issueUniqueAssetXXXXXXXXXXS4695i\" # Global Burn Address strGlobalBurnAddress = \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" # monkey patching", "Amounts nIssueAssetBurnAmount = 500 * COIN; nReissueAssetBurnAmount = 100 * COIN; nIssueSubAssetBurnAmount =", "\"n1issueSubAssetXXXXXXXXXXXXXbNiH6v\" strIssueUniqueAssetBurnAddress = \"n1issueUniqueAssetXXXXXXXXXXS4695i\" # Global Burn Address strGlobalBurnAddress = \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" # monkey", "# # This file is part of python-ravencoinlib. # # It is subject", "strIssueAssetBurnAddress = \"RXissueAssetXXXXXXXXXXXXXXXXXhhZGt\" strReissueAssetBurnAddress = \"RXReissueAssetXXXXXXXXXXXXXXVEFAWu\" strIssueSubAssetBurnAddress = \"RXissueSubAssetXXXXXXXXXXXXXWcwhwL\" strIssueUniqueAssetBurnAddress = \"RXissueUniqueAssetXXXXXXXXXXWEAe58\" #", "0x21, 0xa9, 0xed]) class CoreMainParams(bitcoin.core.CoreChainParams): MAX_MONEY = 21000000000 * COIN NAME = 'mainnet'", "2100000 PROOF_OF_WORK_LIMIT = 2**256-1 >> 20 # Burn Amounts nIssueAssetBurnAmount = 500 *", "sys import bitcoin.core from bitcoin.core import * from bitcoin.core.script import OP_RETURN if sys.version", "monkey patching bitcoin.core.COIN = COIN bitcoin.core.MAX_BLOCK_SIZE = MAX_BLOCK_SIZE bitcoin.core.MAX_BLOCK_WEIGHT = MAX_BLOCK_WEIGHT bitcoin.core.MAX_BLOCK_SIGOPS =", "patching bitcoin.core.COIN = COIN bitcoin.core.MAX_BLOCK_SIZE = MAX_BLOCK_SIZE bitcoin.core.MAX_BLOCK_WEIGHT = MAX_BLOCK_WEIGHT bitcoin.core.MAX_BLOCK_SIGOPS = MAX_BLOCK_SIGOPS", "= \"n1issueUniqueAssetXXXXXXXXXXS4695i\" # Global Burn Address strGlobalBurnAddress = \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" # monkey patching bitcoin.core.COIN", "the license terms in the LICENSE file found in the top-level # directory", "this distribution. # # No part of python-ravencoinlib, including this file, may be", "strIssueSubAssetBurnAddress = \"n1issueSubAssetXXXXXXXXXXXXXbNiH6v\" strIssueUniqueAssetBurnAddress = \"n1issueUniqueAssetXXXXXXXXXXS4695i\" # Global Burn Address strGlobalBurnAddress = \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\"", "Address strGlobalBurnAddress = \"RXBurnXXXXXXXXXXXXXXXXXXXXXXWUo9FV\" class CoreTestNetParams(bitcoin.core.CoreMainParams): MAX_MONEY = 21000000000 * COIN NAME =", "# Burn Addresses strIssueAssetBurnAddress = \"n1issueAssetXXXXXXXXXXXXXXXXWdnemQ\" strReissueAssetBurnAddress = \"n1ReissueAssetXXXXXXXXXXXXXXWG9NLd\" strIssueSubAssetBurnAddress = \"n1issueSubAssetXXXXXXXXXXXXXbNiH6v\" strIssueUniqueAssetBurnAddress", "2018 The python-ravencoinlib developers # # This file is part of python-ravencoinlib. #", "nIssueUniqueAssetBurnAmount = 5 * COIN # Burn Addresses strIssueAssetBurnAddress = \"RXissueAssetXXXXXXXXXXXXXXXXXhhZGt\" strReissueAssetBurnAddress =", "8000000 MAX_BLOCK_SIGOPS = MAX_BLOCK_SIZE/50 # 25? WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = _bytes([OP_RETURN, 0x24, 0xaa, 0x21, 0xa9,", "MAX_BLOCK_SIZE = 2000000 # after assets deployed MAX_BLOCK_WEIGHT = 8000000 MAX_BLOCK_SIGOPS = MAX_BLOCK_SIZE/50", "= 5 * COIN; # Burn Addresses strIssueAssetBurnAddress = \"n1issueAssetXXXXXXXXXXXXXXXXWdnemQ\" strReissueAssetBurnAddress = \"n1ReissueAssetXXXXXXXXXXXXXXWG9NLd\"", "CoreMainParams(bitcoin.core.CoreChainParams): MAX_MONEY = 21000000000 * COIN NAME = 'mainnet' GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28c60e4d5affff001ee0d47d010101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL", "* COIN; nReissueAssetBurnAmount = 100 * COIN; nIssueSubAssetBurnAmount = 100 * COIN; nIssueUniqueAssetBurnAmount", "# Global Burn Address strGlobalBurnAddress = \"RXBurnXXXXXXXXXXXXXXXXXXXXXXWUo9FV\" class CoreTestNetParams(bitcoin.core.CoreMainParams): MAX_MONEY = 21000000000 *", "* COIN; nIssueSubAssetBurnAmount = 100 * COIN; nIssueUniqueAssetBurnAmount = 5 * COIN; #", "strIssueUniqueAssetBurnAddress = \"RXissueUniqueAssetXXXXXXXXXXWEAe58\" # Global Burn Address strGlobalBurnAddress = \"RXBurnXXXXXXXXXXXXXXXXXXXXXXWUo9FV\" class CoreTestNetParams(bitcoin.core.CoreMainParams): MAX_MONEY", "MAX_MONEY = 21000000000 * COIN NAME = 'mainnet' GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28c60e4d5affff001ee0d47d010101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL =", "Global Burn Address strGlobalBurnAddress = \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" class CoreRegTestParams(bitcoin.core.CoreTestNetParams): MAX_MONEY = 21000000000 * COIN", "= 2**256-1 >> 1 # Burn Amounts nIssueAssetBurnAmount = 500 * COIN; nReissueAssetBurnAmount", "= 5 * COIN # Burn Addresses strIssueAssetBurnAddress = \"RXissueAssetXXXXXXXXXXXXXXXXXhhZGt\" strReissueAssetBurnAddress = \"RXReissueAssetXXXXXXXXXXXXXXVEFAWu\"", "\"n1issueUniqueAssetXXXXXXXXXXS4695i\" # Global Burn Address strGlobalBurnAddress = \"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" # monkey patching bitcoin.core.COIN =", "= MAX_BLOCK_SIGOPS bitcoin.core.WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC bitcoin.core.CoreMainParams = CoreMainParams bitcoin.core.CoreTestNetParams = CoreTestNetParams bitcoin.core.CoreRegTestParams =", "COIN bitcoin.core.MAX_BLOCK_SIZE = MAX_BLOCK_SIZE bitcoin.core.MAX_BLOCK_WEIGHT = MAX_BLOCK_WEIGHT bitcoin.core.MAX_BLOCK_SIGOPS = MAX_BLOCK_SIGOPS bitcoin.core.WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC", "* COIN NAME = 'regtest' GENESIS_BLOCK = CBlock.deserialize(x('04000000000000000000000000000000000000000000000000000000000000000000000016355fae8b6a26f2fa708d39997654c44b501f308d802325359a7367a800ff28a621d95affff7f20010000000101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff570004ffff001d01044c4d5468652054696d65732030332f4a616e2f3230313820426974636f696e206973206e616d65206f66207468652067616d6520666f72206e65772067656e65726174696f6e206f66206669726d73ffffffff010088526a74000000434104678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5fac00000000')) SUBSIDY_HALVING_INTERVAL = 150 PROOF_OF_WORK_LIMIT =", "= \"RXReissueAssetXXXXXXXXXXXXXXVEFAWu\" strIssueSubAssetBurnAddress = \"RXissueSubAssetXXXXXXXXXXXXXWcwhwL\" strIssueUniqueAssetBurnAddress = \"RXissueUniqueAssetXXXXXXXXXXWEAe58\" # Global Burn Address strGlobalBurnAddress", "# Core definitions COIN = 100000000 MAX_BLOCK_SIZE = 2000000 # after assets deployed", "100 * COIN nIssueSubAssetBurnAmount = 100 * COIN nIssueUniqueAssetBurnAmount = 5 * COIN", "MAX_BLOCK_SIGOPS bitcoin.core.WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC bitcoin.core.CoreMainParams = CoreMainParams bitcoin.core.CoreTestNetParams = CoreTestNetParams bitcoin.core.CoreRegTestParams = CoreRegTestParams", "25? WITNESS_COINBASE_SCRIPTPUBKEY_MAGIC = _bytes([OP_RETURN, 0x24, 0xaa, 0x21, 0xa9, 0xed]) class CoreMainParams(bitcoin.core.CoreChainParams): MAX_MONEY =", "import OP_RETURN if sys.version > '3': _bytes = bytes else: _bytes = lambda", "# Burn Addresses strIssueAssetBurnAddress = \"RXissueAssetXXXXXXXXXXXXXXXXXhhZGt\" strReissueAssetBurnAddress = \"RXReissueAssetXXXXXXXXXXXXXXVEFAWu\" strIssueSubAssetBurnAddress = \"RXissueSubAssetXXXXXXXXXXXXXWcwhwL\" strIssueUniqueAssetBurnAddress", "\"RXReissueAssetXXXXXXXXXXXXXXVEFAWu\" strIssueSubAssetBurnAddress = \"RXissueSubAssetXXXXXXXXXXXXXWcwhwL\" strIssueUniqueAssetBurnAddress = \"RXissueUniqueAssetXXXXXXXXXXWEAe58\" # Global Burn Address strGlobalBurnAddress =", "\"n1BurnXXXXXXXXXXXXXXXXXXXXXXU1qejP\" # monkey patching bitcoin.core.COIN = COIN bitcoin.core.MAX_BLOCK_SIZE = MAX_BLOCK_SIZE bitcoin.core.MAX_BLOCK_WEIGHT = MAX_BLOCK_WEIGHT", "150 PROOF_OF_WORK_LIMIT = 2**256-1 >> 1 # Burn Amounts nIssueAssetBurnAmount = 500 *", "the LICENSE file found in the top-level # directory of this distribution. #", "= \"RXissueSubAssetXXXXXXXXXXXXXWcwhwL\" strIssueUniqueAssetBurnAddress = \"RXissueUniqueAssetXXXXXXXXXXWEAe58\" # Global Burn Address strGlobalBurnAddress = \"RXBurnXXXXXXXXXXXXXXXXXXXXXXWUo9FV\" class", "may be copied, modified, # propagated, or distributed except according to the terms", "100 * COIN; nIssueSubAssetBurnAmount = 100 * COIN; nIssueUniqueAssetBurnAmount = 5 * COIN;", "100 * COIN; nIssueUniqueAssetBurnAmount = 5 * COIN; # Burn Addresses strIssueAssetBurnAddress =", "from bitcoin.core import * from bitcoin.core.script import OP_RETURN if sys.version > '3': _bytes", "except according to the terms contained in the # LICENSE file. import sys", "= lambda x: bytes(bytearray(x)) # Core definitions COIN = 100000000 MAX_BLOCK_SIZE = 2000000" ]
[ "float(input()) puzzle_count = int(input()) dolls_count = int(input()) teddy_bears_count = int(input()) minions_count = int(input())", "= int(input()) dolls_count = int(input()) teddy_bears_count = int(input()) minions_count = int(input()) trucks_count =", "puzzle_count * 2.6 total_price_teddy_bears = teddy_bears_count * 4.10 total_price_minions = minions_count * 8.20", "{needed_money:.2f} lv left.\") else: rent = total_price * 0.1 earning_after_rent = total_price -", "* 2 total_price = total_price_puzzles + total_price_dolls + \\ total_price_teddy_bears + total_price_minions +", "= int(input()) # total_price_dolls = dolls_count * 3 total_price_puzzles = puzzle_count * 2.6", "total_price_trucks = trucks_count * 2 total_price = total_price_puzzles + total_price_dolls + \\ total_price_teddy_bears", "4.10 total_price_minions = minions_count * 8.20 total_price_trucks = trucks_count * 2 total_price =", "if earning_after_rent >= holiday_price: earning_left = earning_after_rent - holiday_price print(f\"Yes! {earning_left:.2f} lv left.\")", "left.\") else: needed_money = holiday_price - earning_after_rent print(f\"Not enough money! {needed_money:.2f} lv needed.\")", "total_price_dolls + \\ total_price_teddy_bears + total_price_minions + \\ total_price_trucks total_amount_toys = puzzle_count +", "{earning_left:.2f} lv left.\") else: needed_money = holiday_price - earning_after_rent print(f\"Not enough money! {needed_money:.2f}", "+ total_price_dolls + \\ total_price_teddy_bears + total_price_minions + \\ total_price_trucks total_amount_toys = puzzle_count", "earning_after_rent - holiday_price print(f\"Yes! {earning_left:.2f} lv left.\") else: needed_money = holiday_price - earning_after_rent", "else: rent = total_price * 0.1 earning_after_rent = total_price - rent if earning_after_rent", "else: needed_money = holiday_price - earning_after_rent print(f\"Not enough money! {needed_money:.2f} lv left.\") else:", "+ \\ total_price_trucks total_amount_toys = puzzle_count + dolls_count + teddy_bears_count + minions_count +", "print(f\"Not enough money! {needed_money:.2f} lv left.\") else: rent = total_price * 0.1 earning_after_rent", "left.\") else: needed_money = holiday_price - earning_after_rent print(f\"Not enough money! {needed_money:.2f} lv left.\")", "dolls_count * 3 total_price_puzzles = puzzle_count * 2.6 total_price_teddy_bears = teddy_bears_count * 4.10", "total_price = total_price_puzzles + total_price_dolls + \\ total_price_teddy_bears + total_price_minions + \\ total_price_trucks", "* 0.25 total_price = total_price - discount rent = total_price * 0.1 earning_after_rent", "minions_count = int(input()) trucks_count = int(input()) # total_price_dolls = dolls_count * 3 total_price_puzzles", "int(input()) trucks_count = int(input()) # total_price_dolls = dolls_count * 3 total_price_puzzles = puzzle_count", "total_price_trucks total_amount_toys = puzzle_count + dolls_count + teddy_bears_count + minions_count + trucks_count if", "print(f\"Yes! {earning_left:.2f} lv left.\") else: needed_money = holiday_price - earning_after_rent print(f\"Not enough money!", "dolls_count + teddy_bears_count + minions_count + trucks_count if total_amount_toys >= 50: discount =", "* 3 total_price_puzzles = puzzle_count * 2.6 total_price_teddy_bears = teddy_bears_count * 4.10 total_price_minions", "= puzzle_count + dolls_count + teddy_bears_count + minions_count + trucks_count if total_amount_toys >=", "discount = total_price * 0.25 total_price = total_price - discount rent = total_price", "0.25 total_price = total_price - discount rent = total_price * 0.1 earning_after_rent =", "rent if earning_after_rent >= holiday_price: earning_left = earning_after_rent - holiday_price print(f\"Yes! {earning_left:.2f} lv", "= total_price - discount rent = total_price * 0.1 earning_after_rent = total_price -", "total_price * 0.1 earning_after_rent = total_price - rent if earning_after_rent >= holiday_price: earning_left", "enough money! {needed_money:.2f} lv left.\") else: rent = total_price * 0.1 earning_after_rent =", "- discount rent = total_price * 0.1 earning_after_rent = total_price - rent if", "= int(input()) trucks_count = int(input()) # total_price_dolls = dolls_count * 3 total_price_puzzles =", "holiday_price - earning_after_rent print(f\"Not enough money! {needed_money:.2f} lv left.\") else: rent = total_price", "lv left.\") else: rent = total_price * 0.1 earning_after_rent = total_price - rent", "rent = total_price * 0.1 earning_after_rent = total_price - rent if earning_after_rent >=", "trucks_count if total_amount_toys >= 50: discount = total_price * 0.25 total_price = total_price", "total_price = total_price - discount rent = total_price * 0.1 earning_after_rent = total_price", "= total_price - rent if earning_after_rent >= holiday_price: earning_left = earning_after_rent - holiday_price", "if total_amount_toys >= 50: discount = total_price * 0.25 total_price = total_price -", "<reponame>MaggieIllustrations/softuni-github-programming holiday_price = float(input()) puzzle_count = int(input()) dolls_count = int(input()) teddy_bears_count = int(input())", "int(input()) minions_count = int(input()) trucks_count = int(input()) # total_price_dolls = dolls_count * 3", "needed_money = holiday_price - earning_after_rent print(f\"Not enough money! {needed_money:.2f} lv left.\") else: rent", "puzzle_count + dolls_count + teddy_bears_count + minions_count + trucks_count if total_amount_toys >= 50:", "= holiday_price - earning_after_rent print(f\"Not enough money! {needed_money:.2f} lv left.\") else: rent =", "total_price_minions = minions_count * 8.20 total_price_trucks = trucks_count * 2 total_price = total_price_puzzles", "+ minions_count + trucks_count if total_amount_toys >= 50: discount = total_price * 0.25", "int(input()) teddy_bears_count = int(input()) minions_count = int(input()) trucks_count = int(input()) # total_price_dolls =", "\\ total_price_trucks total_amount_toys = puzzle_count + dolls_count + teddy_bears_count + minions_count + trucks_count", "+ trucks_count if total_amount_toys >= 50: discount = total_price * 0.25 total_price =", ">= holiday_price: earning_left = earning_after_rent - holiday_price print(f\"Yes! {earning_left:.2f} lv left.\") else: needed_money", "+ \\ total_price_teddy_bears + total_price_minions + \\ total_price_trucks total_amount_toys = puzzle_count + dolls_count", "2 total_price = total_price_puzzles + total_price_dolls + \\ total_price_teddy_bears + total_price_minions + \\", "= puzzle_count * 2.6 total_price_teddy_bears = teddy_bears_count * 4.10 total_price_minions = minions_count *", "total_price_minions + \\ total_price_trucks total_amount_toys = puzzle_count + dolls_count + teddy_bears_count + minions_count", "holiday_price = float(input()) puzzle_count = int(input()) dolls_count = int(input()) teddy_bears_count = int(input()) minions_count", "= total_price_puzzles + total_price_dolls + \\ total_price_teddy_bears + total_price_minions + \\ total_price_trucks total_amount_toys", "total_price_puzzles = puzzle_count * 2.6 total_price_teddy_bears = teddy_bears_count * 4.10 total_price_minions = minions_count", "\\ total_price_teddy_bears + total_price_minions + \\ total_price_trucks total_amount_toys = puzzle_count + dolls_count +", "lv left.\") else: needed_money = holiday_price - earning_after_rent print(f\"Not enough money! {needed_money:.2f} lv", "0.1 earning_after_rent = total_price - rent if earning_after_rent >= holiday_price: earning_left = earning_after_rent", "money! {needed_money:.2f} lv left.\") else: rent = total_price * 0.1 earning_after_rent = total_price", ">= 50: discount = total_price * 0.25 total_price = total_price - discount rent", "left.\") else: rent = total_price * 0.1 earning_after_rent = total_price - rent if", "+ dolls_count + teddy_bears_count + minions_count + trucks_count if total_amount_toys >= 50: discount", "teddy_bears_count * 4.10 total_price_minions = minions_count * 8.20 total_price_trucks = trucks_count * 2", "minions_count * 8.20 total_price_trucks = trucks_count * 2 total_price = total_price_puzzles + total_price_dolls", "= int(input()) minions_count = int(input()) trucks_count = int(input()) # total_price_dolls = dolls_count *", "earning_after_rent >= holiday_price: earning_left = earning_after_rent - holiday_price print(f\"Yes! {earning_left:.2f} lv left.\") else:", "2.6 total_price_teddy_bears = teddy_bears_count * 4.10 total_price_minions = minions_count * 8.20 total_price_trucks =", "discount rent = total_price * 0.1 earning_after_rent = total_price - rent if earning_after_rent", "= dolls_count * 3 total_price_puzzles = puzzle_count * 2.6 total_price_teddy_bears = teddy_bears_count *", "= minions_count * 8.20 total_price_trucks = trucks_count * 2 total_price = total_price_puzzles +", "teddy_bears_count + minions_count + trucks_count if total_amount_toys >= 50: discount = total_price *", "- holiday_price print(f\"Yes! {earning_left:.2f} lv left.\") else: needed_money = holiday_price - earning_after_rent print(f\"Not", "+ teddy_bears_count + minions_count + trucks_count if total_amount_toys >= 50: discount = total_price", "minions_count + trucks_count if total_amount_toys >= 50: discount = total_price * 0.25 total_price", "50: discount = total_price * 0.25 total_price = total_price - discount rent =", "# total_price_dolls = dolls_count * 3 total_price_puzzles = puzzle_count * 2.6 total_price_teddy_bears =", "trucks_count * 2 total_price = total_price_puzzles + total_price_dolls + \\ total_price_teddy_bears + total_price_minions", "trucks_count = int(input()) # total_price_dolls = dolls_count * 3 total_price_puzzles = puzzle_count *", "= int(input()) teddy_bears_count = int(input()) minions_count = int(input()) trucks_count = int(input()) # total_price_dolls", "holiday_price: earning_left = earning_after_rent - holiday_price print(f\"Yes! {earning_left:.2f} lv left.\") else: needed_money =", "* 8.20 total_price_trucks = trucks_count * 2 total_price = total_price_puzzles + total_price_dolls +", "holiday_price print(f\"Yes! {earning_left:.2f} lv left.\") else: needed_money = holiday_price - earning_after_rent print(f\"Not enough", "total_price_teddy_bears = teddy_bears_count * 4.10 total_price_minions = minions_count * 8.20 total_price_trucks = trucks_count", "earning_after_rent print(f\"Not enough money! {needed_money:.2f} lv left.\") else: rent = total_price * 0.1", "* 4.10 total_price_minions = minions_count * 8.20 total_price_trucks = trucks_count * 2 total_price", "8.20 total_price_trucks = trucks_count * 2 total_price = total_price_puzzles + total_price_dolls + \\", "3 total_price_puzzles = puzzle_count * 2.6 total_price_teddy_bears = teddy_bears_count * 4.10 total_price_minions =", "earning_after_rent = total_price - rent if earning_after_rent >= holiday_price: earning_left = earning_after_rent -", "total_amount_toys >= 50: discount = total_price * 0.25 total_price = total_price - discount", "- earning_after_rent print(f\"Not enough money! {needed_money:.2f} lv left.\") else: rent = total_price *", "+ total_price_minions + \\ total_price_trucks total_amount_toys = puzzle_count + dolls_count + teddy_bears_count +", "puzzle_count = int(input()) dolls_count = int(input()) teddy_bears_count = int(input()) minions_count = int(input()) trucks_count", "total_price_dolls = dolls_count * 3 total_price_puzzles = puzzle_count * 2.6 total_price_teddy_bears = teddy_bears_count", "dolls_count = int(input()) teddy_bears_count = int(input()) minions_count = int(input()) trucks_count = int(input()) #", "total_amount_toys = puzzle_count + dolls_count + teddy_bears_count + minions_count + trucks_count if total_amount_toys", "* 0.1 earning_after_rent = total_price - rent if earning_after_rent >= holiday_price: earning_left =", "total_price_puzzles + total_price_dolls + \\ total_price_teddy_bears + total_price_minions + \\ total_price_trucks total_amount_toys =", "= total_price * 0.25 total_price = total_price - discount rent = total_price *", "total_price_teddy_bears + total_price_minions + \\ total_price_trucks total_amount_toys = puzzle_count + dolls_count + teddy_bears_count", "= total_price * 0.1 earning_after_rent = total_price - rent if earning_after_rent >= holiday_price:", "= float(input()) puzzle_count = int(input()) dolls_count = int(input()) teddy_bears_count = int(input()) minions_count =", "total_price - discount rent = total_price * 0.1 earning_after_rent = total_price - rent", "= earning_after_rent - holiday_price print(f\"Yes! {earning_left:.2f} lv left.\") else: needed_money = holiday_price -", "- rent if earning_after_rent >= holiday_price: earning_left = earning_after_rent - holiday_price print(f\"Yes! {earning_left:.2f}", "= trucks_count * 2 total_price = total_price_puzzles + total_price_dolls + \\ total_price_teddy_bears +", "= teddy_bears_count * 4.10 total_price_minions = minions_count * 8.20 total_price_trucks = trucks_count *", "teddy_bears_count = int(input()) minions_count = int(input()) trucks_count = int(input()) # total_price_dolls = dolls_count", "* 2.6 total_price_teddy_bears = teddy_bears_count * 4.10 total_price_minions = minions_count * 8.20 total_price_trucks", "int(input()) dolls_count = int(input()) teddy_bears_count = int(input()) minions_count = int(input()) trucks_count = int(input())", "total_price * 0.25 total_price = total_price - discount rent = total_price * 0.1", "total_price - rent if earning_after_rent >= holiday_price: earning_left = earning_after_rent - holiday_price print(f\"Yes!", "int(input()) # total_price_dolls = dolls_count * 3 total_price_puzzles = puzzle_count * 2.6 total_price_teddy_bears", "earning_left = earning_after_rent - holiday_price print(f\"Yes! {earning_left:.2f} lv left.\") else: needed_money = holiday_price" ]
[ "@patch(\"profile_generator.util.file.read_file\") @patch( \"profile_generator.util.file.get_full_path\", lambda *xs: \"/root/\" + \"/\".join(xs) ) def test_get_profile_template_returns_template_file_content( self, read_file:", "marshall = lambda x: x content = generator.create_profile_content(template, cfg, marshall) self.assertEqual(content, \"1\") @classmethod", "object_of, type_of class ProfileGeneratorTest(TestCase): @patch(\"sys.argv\", [\"app.py\", \"one.json\", \"two.json\"]) def test_get_config_files_returns_config_files(self) -> None: self.assertEqual([\"one.json\",", "object_of({\"a\": type_of(int)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) def test_create_profile_content_should_create_profile_content(self) -> None: template", "= generator.load_configuration_file(\"config.json\", schema) self.assertEqual({\"a\": 2}, config) read_file.assert_called_once_with(\"config.json\") @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_cannot_be_read( self, read_file: Mock", "read_file: Mock ) -> None: read_file.return_value = '{\"a\": \"$a\"}' schema = object_of({\"a\": type_of(str)})", "generator from profile_generator.generator import ( ConfigFileReadError, InvalidConfigFileError, NoConfigFileError, OutputDirCreationFailure, ProfileWriteError, TemplateFileReadError, ) from", "generator.create_output_dir()) @patch(\"profile_generator.util.file.create_dir\") def test_create_output_dir_raises_error_when_cannot_create_dir( self, create_dir: Mock ) -> None: create_dir.side_effect = OSError", "None: self.assertEqual(\"/root/profiles\", generator.create_output_dir()) @patch(\"profile_generator.util.file.create_dir\") def test_create_output_dir_raises_error_when_cannot_create_dir( self, create_dir: Mock ) -> None: create_dir.side_effect", "None: name = \"profile_name\" content = \"1\" output_dir = \"dir\" write_file.side_effect = OSError", ") def test_get_profile_template_returns_template_file_content( self, read_file: Mock ) -> None: read_file.return_value = \"file content\"", "ProfileWriteError, TemplateFileReadError, ) from profile_generator.schema import object_of, type_of class ProfileGeneratorTest(TestCase): @patch(\"sys.argv\", [\"app.py\", \"one.json\",", "content\", generator.get_profile_template()) read_file.assert_called_once_with(\"/root/templates/raw_therapee.pp3\") @patch(\"profile_generator.util.file.read_file\") def test_get_profile_template_raises_error_when_cannot_read_template_file( self, read_file: Mock ) -> None: read_file.side_effect", "None: schema = object_of({\"a\": type_of(int)}) read_file.side_effect = OSError self.assertRaises( ConfigFileReadError, generator.load_configuration_file, \"config.json\", schema,", "\"$a\"}' schema = object_of({\"a\": type_of(str)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def", "self, write_file: Mock ) -> None: name = \"profile_name\" content = \"1\" output_dir", "create_dir.side_effect = OSError self.assertRaises(OutputDirCreationFailure, generator.create_output_dir) @patch(\"profile_generator.util.file.read_file\") @patch( \"profile_generator.util.file.get_full_path\", lambda *xs: \"/root/\" + \"/\".join(xs)", ") from profile_generator.schema import object_of, type_of class ProfileGeneratorTest(TestCase): @patch(\"sys.argv\", [\"app.py\", \"one.json\", \"two.json\"]) def", "read_file.return_value = '{\"a\": false' schema = object_of({\"a\": type_of(int)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema,", "Mock ) -> None: read_file.return_value = '{\"a\": 2}' schema = object_of({\"a\": type_of(int)}) config", "def test_create_output_dir_raises_returns_created_dir_path(self) -> None: self.assertEqual(\"/root/profiles\", generator.create_output_dir()) @patch(\"profile_generator.util.file.create_dir\") def test_create_output_dir_raises_error_when_cannot_create_dir( self, create_dir: Mock )", "@patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid( self, read_file: Mock ) -> None: read_file.return_value = '{\"a\": false}'", "Mock) -> None: name = \"profile_name\" content = \"1\" output_dir = \"dir\" generator.persist_profile(name,", "@patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_cannot_be_read( self, read_file: Mock ) -> None: schema = object_of({\"a\": type_of(int)})", "= generator.create_profile_content(template, cfg, marshall) self.assertEqual(content, \"1\") @classmethod @patch(\"profile_generator.util.file.write_file\") def test_presist_profile_should_persist_profile(cls, write_file: Mock) ->", "= '{\"a\": \"$a\"}' schema = object_of({\"a\": type_of(str)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, )", "None: self.assertRaises(NoConfigFileError, generator.get_config_files) @patch( \"profile_generator.util.file.create_dir\", lambda *xs: \"/root/\" + \"/\".join(xs) ) def test_create_output_dir_raises_returns_created_dir_path(self)", "None: name = \"profile_name\" content = \"1\" output_dir = \"dir\" generator.persist_profile(name, content, output_dir)", "None: create_dir.side_effect = OSError self.assertRaises(OutputDirCreationFailure, generator.create_output_dir) @patch(\"profile_generator.util.file.read_file\") @patch( \"profile_generator.util.file.get_full_path\", lambda *xs: \"/root/\" +", "+ \"/\".join(xs) ) def test_create_output_dir_raises_returns_created_dir_path(self) -> None: self.assertEqual(\"/root/profiles\", generator.create_output_dir()) @patch(\"profile_generator.util.file.create_dir\") def test_create_output_dir_raises_error_when_cannot_create_dir( self,", "test_get_profile_template_returns_template_file_content( self, read_file: Mock ) -> None: read_file.return_value = \"file content\" self.assertEqual(\"file content\",", "self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) def test_create_profile_content_should_create_profile_content(self) -> None: template = \"{a}\"", "false' schema = object_of({\"a\": type_of(int)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) def test_create_profile_content_should_create_profile_content(self)", "InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) def test_create_profile_content_should_create_profile_content(self) -> None: template = \"{a}\" cfg", "None: template = \"{a}\" cfg = {\"a\": \"1\"} marshall = lambda x: x", "generator.persist_profile(name, content, output_dir) write_file.assert_called_once_with(content, output_dir, f\"{name}.pp3\") @patch(\"profile_generator.util.file.write_file\") def test_persist_profile_should_raise_error_when_writing_file_failed( self, write_file: Mock )", "import object_of, type_of class ProfileGeneratorTest(TestCase): @patch(\"sys.argv\", [\"app.py\", \"one.json\", \"two.json\"]) def test_get_config_files_returns_config_files(self) -> None:", "= object_of({\"a\": type_of(int)}) config = generator.load_configuration_file(\"config.json\", schema) self.assertEqual({\"a\": 2}, config) read_file.assert_called_once_with(\"config.json\") @patch(\"profile_generator.util.file.read_file\") def", "= '{\"a\": false' schema = object_of({\"a\": type_of(int)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, )", "= OSError self.assertRaises(OutputDirCreationFailure, generator.create_output_dir) @patch(\"profile_generator.util.file.read_file\") @patch( \"profile_generator.util.file.get_full_path\", lambda *xs: \"/root/\" + \"/\".join(xs) )", "test_create_output_dir_raises_returns_created_dir_path(self) -> None: self.assertEqual(\"/root/profiles\", generator.create_output_dir()) @patch(\"profile_generator.util.file.create_dir\") def test_create_output_dir_raises_error_when_cannot_create_dir( self, create_dir: Mock ) ->", "schema = object_of({\"a\": type_of(int)}) read_file.side_effect = OSError self.assertRaises( ConfigFileReadError, generator.load_configuration_file, \"config.json\", schema, )", "test_get_config_files_raises_error_when_arguments_are_missing(self) -> None: self.assertRaises(NoConfigFileError, generator.get_config_files) @patch( \"profile_generator.util.file.create_dir\", lambda *xs: \"/root/\" + \"/\".join(xs) )", "= lambda x: x content = generator.create_profile_content(template, cfg, marshall) self.assertEqual(content, \"1\") @classmethod @patch(\"profile_generator.util.file.write_file\")", "\"1\" output_dir = \"dir\" write_file.side_effect = OSError self.assertRaises( ProfileWriteError, generator.persist_profile, name, content, output_dir,", "schema = object_of({\"a\": type_of(int)}) config = generator.load_configuration_file(\"config.json\", schema) self.assertEqual({\"a\": 2}, config) read_file.assert_called_once_with(\"config.json\") @patch(\"profile_generator.util.file.read_file\")", "self.assertEqual(\"file content\", generator.get_profile_template()) read_file.assert_called_once_with(\"/root/templates/raw_therapee.pp3\") @patch(\"profile_generator.util.file.read_file\") def test_get_profile_template_raises_error_when_cannot_read_template_file( self, read_file: Mock ) -> None:", "type_of(str)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid( self, read_file: Mock", "None: self.assertEqual([\"one.json\", \"two.json\"], generator.get_config_files()) @patch(\"sys.argv\", [\"app.py\"]) def test_get_config_files_raises_error_when_arguments_are_missing(self) -> None: self.assertRaises(NoConfigFileError, generator.get_config_files) @patch(", "read_file.return_value = '{\"a\": 2}' schema = object_of({\"a\": type_of(int)}) config = generator.load_configuration_file(\"config.json\", schema) self.assertEqual({\"a\":", "generator.get_config_files()) @patch(\"sys.argv\", [\"app.py\"]) def test_get_config_files_raises_error_when_arguments_are_missing(self) -> None: self.assertRaises(NoConfigFileError, generator.get_config_files) @patch( \"profile_generator.util.file.create_dir\", lambda *xs:", ") @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_contains_variable_error( self, read_file: Mock ) -> None: read_file.return_value = '{\"a\":", ") -> None: read_file.side_effect = OSError self.assertRaises(TemplateFileReadError, generator.get_profile_template) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_loads_configuration_files( self, read_file:", "def test_get_config_files_raises_error_when_arguments_are_missing(self) -> None: self.assertRaises(NoConfigFileError, generator.get_config_files) @patch( \"profile_generator.util.file.create_dir\", lambda *xs: \"/root/\" + \"/\".join(xs)", "@patch( \"profile_generator.util.file.create_dir\", lambda *xs: \"/root/\" + \"/\".join(xs) ) def test_create_output_dir_raises_returns_created_dir_path(self) -> None: self.assertEqual(\"/root/profiles\",", "@patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_loads_configuration_files( self, read_file: Mock ) -> None: read_file.return_value = '{\"a\": 2}'", "\"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid( self, read_file: Mock ) -> None: read_file.return_value", "self.assertRaises(OutputDirCreationFailure, generator.create_output_dir) @patch(\"profile_generator.util.file.read_file\") @patch( \"profile_generator.util.file.get_full_path\", lambda *xs: \"/root/\" + \"/\".join(xs) ) def test_get_profile_template_returns_template_file_content(", "Mock, patch from profile_generator import generator from profile_generator.generator import ( ConfigFileReadError, InvalidConfigFileError, NoConfigFileError,", "@patch(\"profile_generator.util.file.write_file\") def test_persist_profile_should_raise_error_when_writing_file_failed( self, write_file: Mock ) -> None: name = \"profile_name\" content", "Mock ) -> None: name = \"profile_name\" content = \"1\" output_dir = \"dir\"", "TemplateFileReadError, ) from profile_generator.schema import object_of, type_of class ProfileGeneratorTest(TestCase): @patch(\"sys.argv\", [\"app.py\", \"one.json\", \"two.json\"])", "-> None: read_file.return_value = '{\"a\": 2}' schema = object_of({\"a\": type_of(int)}) config = generator.load_configuration_file(\"config.json\",", "config) read_file.assert_called_once_with(\"config.json\") @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_cannot_be_read( self, read_file: Mock ) -> None: schema =", "read_file: Mock ) -> None: read_file.return_value = '{\"a\": 2}' schema = object_of({\"a\": type_of(int)})", "test_presist_profile_should_persist_profile(cls, write_file: Mock) -> None: name = \"profile_name\" content = \"1\" output_dir =", "= \"profile_name\" content = \"1\" output_dir = \"dir\" write_file.side_effect = OSError self.assertRaises( ProfileWriteError,", ") @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid( self, read_file: Mock ) -> None: read_file.return_value = '{\"a\":", "self, read_file: Mock ) -> None: read_file.return_value = '{\"a\": false' schema = object_of({\"a\":", "test_get_config_files_returns_config_files(self) -> None: self.assertEqual([\"one.json\", \"two.json\"], generator.get_config_files()) @patch(\"sys.argv\", [\"app.py\"]) def test_get_config_files_raises_error_when_arguments_are_missing(self) -> None: self.assertRaises(NoConfigFileError,", "None: read_file.return_value = '{\"a\": false' schema = object_of({\"a\": type_of(int)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\",", "test_create_output_dir_raises_error_when_cannot_create_dir( self, create_dir: Mock ) -> None: create_dir.side_effect = OSError self.assertRaises(OutputDirCreationFailure, generator.create_output_dir) @patch(\"profile_generator.util.file.read_file\")", "type_of class ProfileGeneratorTest(TestCase): @patch(\"sys.argv\", [\"app.py\", \"one.json\", \"two.json\"]) def test_get_config_files_returns_config_files(self) -> None: self.assertEqual([\"one.json\", \"two.json\"],", "@patch(\"profile_generator.util.file.write_file\") def test_presist_profile_should_persist_profile(cls, write_file: Mock) -> None: name = \"profile_name\" content = \"1\"", ") -> None: name = \"profile_name\" content = \"1\" output_dir = \"dir\" write_file.side_effect", "type_of(int)}) read_file.side_effect = OSError self.assertRaises( ConfigFileReadError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_contains_variable_error(", "write_file: Mock) -> None: name = \"profile_name\" content = \"1\" output_dir = \"dir\"", "Mock ) -> None: read_file.side_effect = OSError self.assertRaises(TemplateFileReadError, generator.get_profile_template) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_loads_configuration_files( self,", "class ProfileGeneratorTest(TestCase): @patch(\"sys.argv\", [\"app.py\", \"one.json\", \"two.json\"]) def test_get_config_files_returns_config_files(self) -> None: self.assertEqual([\"one.json\", \"two.json\"], generator.get_config_files())", "self, read_file: Mock ) -> None: read_file.return_value = '{\"a\": false}' schema = object_of({\"a\":", "self.assertEqual(content, \"1\") @classmethod @patch(\"profile_generator.util.file.write_file\") def test_presist_profile_should_persist_profile(cls, write_file: Mock) -> None: name = \"profile_name\"", "f\"{name}.pp3\") @patch(\"profile_generator.util.file.write_file\") def test_persist_profile_should_raise_error_when_writing_file_failed( self, write_file: Mock ) -> None: name = \"profile_name\"", "lambda x: x content = generator.create_profile_content(template, cfg, marshall) self.assertEqual(content, \"1\") @classmethod @patch(\"profile_generator.util.file.write_file\") def", "patch from profile_generator import generator from profile_generator.generator import ( ConfigFileReadError, InvalidConfigFileError, NoConfigFileError, OutputDirCreationFailure,", "self, read_file: Mock ) -> None: read_file.side_effect = OSError self.assertRaises(TemplateFileReadError, generator.get_profile_template) @patch(\"profile_generator.util.file.read_file\") def", "ProfileGeneratorTest(TestCase): @patch(\"sys.argv\", [\"app.py\", \"one.json\", \"two.json\"]) def test_get_config_files_returns_config_files(self) -> None: self.assertEqual([\"one.json\", \"two.json\"], generator.get_config_files()) @patch(\"sys.argv\",", ") -> None: read_file.return_value = \"file content\" self.assertEqual(\"file content\", generator.get_profile_template()) read_file.assert_called_once_with(\"/root/templates/raw_therapee.pp3\") @patch(\"profile_generator.util.file.read_file\") def", "= object_of({\"a\": type_of(int)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) def test_create_profile_content_should_create_profile_content(self) -> None:", "profile_generator import generator from profile_generator.generator import ( ConfigFileReadError, InvalidConfigFileError, NoConfigFileError, OutputDirCreationFailure, ProfileWriteError, TemplateFileReadError,", "\"two.json\"], generator.get_config_files()) @patch(\"sys.argv\", [\"app.py\"]) def test_get_config_files_raises_error_when_arguments_are_missing(self) -> None: self.assertRaises(NoConfigFileError, generator.get_config_files) @patch( \"profile_generator.util.file.create_dir\", lambda", ") -> None: create_dir.side_effect = OSError self.assertRaises(OutputDirCreationFailure, generator.create_output_dir) @patch(\"profile_generator.util.file.read_file\") @patch( \"profile_generator.util.file.get_full_path\", lambda *xs:", "lambda *xs: \"/root/\" + \"/\".join(xs) ) def test_get_profile_template_returns_template_file_content( self, read_file: Mock ) ->", "object_of({\"a\": type_of(int)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid_json( self, read_file:", "None: read_file.side_effect = OSError self.assertRaises(TemplateFileReadError, generator.get_profile_template) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_loads_configuration_files( self, read_file: Mock )", "create_dir: Mock ) -> None: create_dir.side_effect = OSError self.assertRaises(OutputDirCreationFailure, generator.create_output_dir) @patch(\"profile_generator.util.file.read_file\") @patch( \"profile_generator.util.file.get_full_path\",", "read_file.return_value = '{\"a\": false}' schema = object_of({\"a\": type_of(int)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema,", "content\" self.assertEqual(\"file content\", generator.get_profile_template()) read_file.assert_called_once_with(\"/root/templates/raw_therapee.pp3\") @patch(\"profile_generator.util.file.read_file\") def test_get_profile_template_raises_error_when_cannot_read_template_file( self, read_file: Mock ) ->", "def test_presist_profile_should_persist_profile(cls, write_file: Mock) -> None: name = \"profile_name\" content = \"1\" output_dir", "cfg = {\"a\": \"1\"} marshall = lambda x: x content = generator.create_profile_content(template, cfg,", "def test_load_configuration_file_raises_error_when_contains_variable_error( self, read_file: Mock ) -> None: read_file.return_value = '{\"a\": \"$a\"}' schema", "None: read_file.return_value = '{\"a\": \"$a\"}' schema = object_of({\"a\": type_of(str)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\",", "self, read_file: Mock ) -> None: read_file.return_value = '{\"a\": \"$a\"}' schema = object_of({\"a\":", "read_file: Mock ) -> None: read_file.side_effect = OSError self.assertRaises(TemplateFileReadError, generator.get_profile_template) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_loads_configuration_files(", "-> None: schema = object_of({\"a\": type_of(int)}) read_file.side_effect = OSError self.assertRaises( ConfigFileReadError, generator.load_configuration_file, \"config.json\",", "( ConfigFileReadError, InvalidConfigFileError, NoConfigFileError, OutputDirCreationFailure, ProfileWriteError, TemplateFileReadError, ) from profile_generator.schema import object_of, type_of", ") def test_create_output_dir_raises_returns_created_dir_path(self) -> None: self.assertEqual(\"/root/profiles\", generator.create_output_dir()) @patch(\"profile_generator.util.file.create_dir\") def test_create_output_dir_raises_error_when_cannot_create_dir( self, create_dir: Mock", "-> None: self.assertEqual(\"/root/profiles\", generator.create_output_dir()) @patch(\"profile_generator.util.file.create_dir\") def test_create_output_dir_raises_error_when_cannot_create_dir( self, create_dir: Mock ) -> None:", "schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_contains_variable_error( self, read_file: Mock ) -> None: read_file.return_value =", "ConfigFileReadError, InvalidConfigFileError, NoConfigFileError, OutputDirCreationFailure, ProfileWriteError, TemplateFileReadError, ) from profile_generator.schema import object_of, type_of class", "import generator from profile_generator.generator import ( ConfigFileReadError, InvalidConfigFileError, NoConfigFileError, OutputDirCreationFailure, ProfileWriteError, TemplateFileReadError, )", "2}' schema = object_of({\"a\": type_of(int)}) config = generator.load_configuration_file(\"config.json\", schema) self.assertEqual({\"a\": 2}, config) read_file.assert_called_once_with(\"config.json\")", "test_load_configuration_file_raises_error_when_config_file_cannot_be_read( self, read_file: Mock ) -> None: schema = object_of({\"a\": type_of(int)}) read_file.side_effect =", "generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid( self, read_file: Mock ) -> None:", "-> None: template = \"{a}\" cfg = {\"a\": \"1\"} marshall = lambda x:", "read_file.return_value = \"file content\" self.assertEqual(\"file content\", generator.get_profile_template()) read_file.assert_called_once_with(\"/root/templates/raw_therapee.pp3\") @patch(\"profile_generator.util.file.read_file\") def test_get_profile_template_raises_error_when_cannot_read_template_file( self, read_file:", "{\"a\": \"1\"} marshall = lambda x: x content = generator.create_profile_content(template, cfg, marshall) self.assertEqual(content,", "= OSError self.assertRaises( ConfigFileReadError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_contains_variable_error( self, read_file:", "test_load_configuration_file_loads_configuration_files( self, read_file: Mock ) -> None: read_file.return_value = '{\"a\": 2}' schema =", "ConfigFileReadError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_contains_variable_error( self, read_file: Mock ) ->", "read_file.side_effect = OSError self.assertRaises( ConfigFileReadError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_contains_variable_error( self,", "object_of({\"a\": type_of(int)}) config = generator.load_configuration_file(\"config.json\", schema) self.assertEqual({\"a\": 2}, config) read_file.assert_called_once_with(\"config.json\") @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_cannot_be_read(", "def test_load_configuration_file_raises_error_when_config_file_is_invalid_json( self, read_file: Mock ) -> None: read_file.return_value = '{\"a\": false' schema", "= \"dir\" generator.persist_profile(name, content, output_dir) write_file.assert_called_once_with(content, output_dir, f\"{name}.pp3\") @patch(\"profile_generator.util.file.write_file\") def test_persist_profile_should_raise_error_when_writing_file_failed( self, write_file:", "schema) self.assertEqual({\"a\": 2}, config) read_file.assert_called_once_with(\"config.json\") @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_cannot_be_read( self, read_file: Mock ) ->", "false}' schema = object_of({\"a\": type_of(int)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def", "\"/root/\" + \"/\".join(xs) ) def test_create_output_dir_raises_returns_created_dir_path(self) -> None: self.assertEqual(\"/root/profiles\", generator.create_output_dir()) @patch(\"profile_generator.util.file.create_dir\") def test_create_output_dir_raises_error_when_cannot_create_dir(", "self, create_dir: Mock ) -> None: create_dir.side_effect = OSError self.assertRaises(OutputDirCreationFailure, generator.create_output_dir) @patch(\"profile_generator.util.file.read_file\") @patch(", "self.assertRaises(NoConfigFileError, generator.get_config_files) @patch( \"profile_generator.util.file.create_dir\", lambda *xs: \"/root/\" + \"/\".join(xs) ) def test_create_output_dir_raises_returns_created_dir_path(self) ->", "schema = object_of({\"a\": type_of(str)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid(", "output_dir = \"dir\" generator.persist_profile(name, content, output_dir) write_file.assert_called_once_with(content, output_dir, f\"{name}.pp3\") @patch(\"profile_generator.util.file.write_file\") def test_persist_profile_should_raise_error_when_writing_file_failed( self,", "generator.get_profile_template()) read_file.assert_called_once_with(\"/root/templates/raw_therapee.pp3\") @patch(\"profile_generator.util.file.read_file\") def test_get_profile_template_raises_error_when_cannot_read_template_file( self, read_file: Mock ) -> None: read_file.side_effect =", "cfg, marshall) self.assertEqual(content, \"1\") @classmethod @patch(\"profile_generator.util.file.write_file\") def test_presist_profile_should_persist_profile(cls, write_file: Mock) -> None: name", "def test_create_output_dir_raises_error_when_cannot_create_dir( self, create_dir: Mock ) -> None: create_dir.side_effect = OSError self.assertRaises(OutputDirCreationFailure, generator.create_output_dir)", "\"/root/\" + \"/\".join(xs) ) def test_get_profile_template_returns_template_file_content( self, read_file: Mock ) -> None: read_file.return_value", "\"1\"} marshall = lambda x: x content = generator.create_profile_content(template, cfg, marshall) self.assertEqual(content, \"1\")", "generator.create_profile_content(template, cfg, marshall) self.assertEqual(content, \"1\") @classmethod @patch(\"profile_generator.util.file.write_file\") def test_presist_profile_should_persist_profile(cls, write_file: Mock) -> None:", "self, read_file: Mock ) -> None: read_file.return_value = \"file content\" self.assertEqual(\"file content\", generator.get_profile_template())", "\"/\".join(xs) ) def test_get_profile_template_returns_template_file_content( self, read_file: Mock ) -> None: read_file.return_value = \"file", "self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid_json( self, read_file: Mock )", "InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid_json( self, read_file: Mock ) ->", "= '{\"a\": 2}' schema = object_of({\"a\": type_of(int)}) config = generator.load_configuration_file(\"config.json\", schema) self.assertEqual({\"a\": 2},", "def test_load_configuration_file_raises_error_when_config_file_cannot_be_read( self, read_file: Mock ) -> None: schema = object_of({\"a\": type_of(int)}) read_file.side_effect", "schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid_json( self, read_file: Mock ) -> None: read_file.return_value =", "= \"{a}\" cfg = {\"a\": \"1\"} marshall = lambda x: x content =", "def test_get_profile_template_returns_template_file_content( self, read_file: Mock ) -> None: read_file.return_value = \"file content\" self.assertEqual(\"file", "test_load_configuration_file_raises_error_when_config_file_is_invalid( self, read_file: Mock ) -> None: read_file.return_value = '{\"a\": false}' schema =", "read_file.side_effect = OSError self.assertRaises(TemplateFileReadError, generator.get_profile_template) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_loads_configuration_files( self, read_file: Mock ) ->", "write_file.assert_called_once_with(content, output_dir, f\"{name}.pp3\") @patch(\"profile_generator.util.file.write_file\") def test_persist_profile_should_raise_error_when_writing_file_failed( self, write_file: Mock ) -> None: name", "NoConfigFileError, OutputDirCreationFailure, ProfileWriteError, TemplateFileReadError, ) from profile_generator.schema import object_of, type_of class ProfileGeneratorTest(TestCase): @patch(\"sys.argv\",", "None: read_file.return_value = '{\"a\": 2}' schema = object_of({\"a\": type_of(int)}) config = generator.load_configuration_file(\"config.json\", schema)", "\"one.json\", \"two.json\"]) def test_get_config_files_returns_config_files(self) -> None: self.assertEqual([\"one.json\", \"two.json\"], generator.get_config_files()) @patch(\"sys.argv\", [\"app.py\"]) def test_get_config_files_raises_error_when_arguments_are_missing(self)", "Mock ) -> None: read_file.return_value = \"file content\" self.assertEqual(\"file content\", generator.get_profile_template()) read_file.assert_called_once_with(\"/root/templates/raw_therapee.pp3\") @patch(\"profile_generator.util.file.read_file\")", "@patch( \"profile_generator.util.file.get_full_path\", lambda *xs: \"/root/\" + \"/\".join(xs) ) def test_get_profile_template_returns_template_file_content( self, read_file: Mock", "generator.load_configuration_file(\"config.json\", schema) self.assertEqual({\"a\": 2}, config) read_file.assert_called_once_with(\"config.json\") @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_cannot_be_read( self, read_file: Mock )", "from unittest.mock import Mock, patch from profile_generator import generator from profile_generator.generator import (", "\"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_contains_variable_error( self, read_file: Mock ) -> None: read_file.return_value", "def test_create_profile_content_should_create_profile_content(self) -> None: template = \"{a}\" cfg = {\"a\": \"1\"} marshall =", "\"1\") @classmethod @patch(\"profile_generator.util.file.write_file\") def test_presist_profile_should_persist_profile(cls, write_file: Mock) -> None: name = \"profile_name\" content", "write_file: Mock ) -> None: name = \"profile_name\" content = \"1\" output_dir =", "object_of({\"a\": type_of(int)}) read_file.side_effect = OSError self.assertRaises( ConfigFileReadError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def", "'{\"a\": \"$a\"}' schema = object_of({\"a\": type_of(str)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\")", "profile_generator.schema import object_of, type_of class ProfileGeneratorTest(TestCase): @patch(\"sys.argv\", [\"app.py\", \"one.json\", \"two.json\"]) def test_get_config_files_returns_config_files(self) ->", "@patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_contains_variable_error( self, read_file: Mock ) -> None: read_file.return_value = '{\"a\": \"$a\"}'", "InvalidConfigFileError, NoConfigFileError, OutputDirCreationFailure, ProfileWriteError, TemplateFileReadError, ) from profile_generator.schema import object_of, type_of class ProfileGeneratorTest(TestCase):", "profile_generator.generator import ( ConfigFileReadError, InvalidConfigFileError, NoConfigFileError, OutputDirCreationFailure, ProfileWriteError, TemplateFileReadError, ) from profile_generator.schema import", "def test_get_profile_template_raises_error_when_cannot_read_template_file( self, read_file: Mock ) -> None: read_file.side_effect = OSError self.assertRaises(TemplateFileReadError, generator.get_profile_template)", "read_file: Mock ) -> None: read_file.return_value = '{\"a\": false' schema = object_of({\"a\": type_of(int)})", "generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_contains_variable_error( self, read_file: Mock ) -> None:", "\"config.json\", schema, ) def test_create_profile_content_should_create_profile_content(self) -> None: template = \"{a}\" cfg = {\"a\":", "-> None: create_dir.side_effect = OSError self.assertRaises(OutputDirCreationFailure, generator.create_output_dir) @patch(\"profile_generator.util.file.read_file\") @patch( \"profile_generator.util.file.get_full_path\", lambda *xs: \"/root/\"", "+ \"/\".join(xs) ) def test_get_profile_template_returns_template_file_content( self, read_file: Mock ) -> None: read_file.return_value =", "test_get_profile_template_raises_error_when_cannot_read_template_file( self, read_file: Mock ) -> None: read_file.side_effect = OSError self.assertRaises(TemplateFileReadError, generator.get_profile_template) @patch(\"profile_generator.util.file.read_file\")", "Mock ) -> None: read_file.return_value = '{\"a\": false}' schema = object_of({\"a\": type_of(int)}) self.assertRaises(", "read_file: Mock ) -> None: read_file.return_value = \"file content\" self.assertEqual(\"file content\", generator.get_profile_template()) read_file.assert_called_once_with(\"/root/templates/raw_therapee.pp3\")", "content = \"1\" output_dir = \"dir\" write_file.side_effect = OSError self.assertRaises( ProfileWriteError, generator.persist_profile, name,", "-> None: self.assertRaises(NoConfigFileError, generator.get_config_files) @patch( \"profile_generator.util.file.create_dir\", lambda *xs: \"/root/\" + \"/\".join(xs) ) def", "OSError self.assertRaises( ConfigFileReadError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_contains_variable_error( self, read_file: Mock", "None: read_file.return_value = '{\"a\": false}' schema = object_of({\"a\": type_of(int)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\",", "@patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid_json( self, read_file: Mock ) -> None: read_file.return_value = '{\"a\": false'", "from unittest import TestCase from unittest.mock import Mock, patch from profile_generator import generator", "marshall) self.assertEqual(content, \"1\") @classmethod @patch(\"profile_generator.util.file.write_file\") def test_presist_profile_should_persist_profile(cls, write_file: Mock) -> None: name =", "object_of({\"a\": type_of(str)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid( self, read_file:", "self.assertRaises( ConfigFileReadError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_contains_variable_error( self, read_file: Mock )", "\"profile_name\" content = \"1\" output_dir = \"dir\" generator.persist_profile(name, content, output_dir) write_file.assert_called_once_with(content, output_dir, f\"{name}.pp3\")", ") def test_create_profile_content_should_create_profile_content(self) -> None: template = \"{a}\" cfg = {\"a\": \"1\"} marshall", "name = \"profile_name\" content = \"1\" output_dir = \"dir\" write_file.side_effect = OSError self.assertRaises(", "output_dir, f\"{name}.pp3\") @patch(\"profile_generator.util.file.write_file\") def test_persist_profile_should_raise_error_when_writing_file_failed( self, write_file: Mock ) -> None: name =", "self, read_file: Mock ) -> None: schema = object_of({\"a\": type_of(int)}) read_file.side_effect = OSError", "\"dir\" generator.persist_profile(name, content, output_dir) write_file.assert_called_once_with(content, output_dir, f\"{name}.pp3\") @patch(\"profile_generator.util.file.write_file\") def test_persist_profile_should_raise_error_when_writing_file_failed( self, write_file: Mock", "read_file: Mock ) -> None: schema = object_of({\"a\": type_of(int)}) read_file.side_effect = OSError self.assertRaises(", "from profile_generator.generator import ( ConfigFileReadError, InvalidConfigFileError, NoConfigFileError, OutputDirCreationFailure, ProfileWriteError, TemplateFileReadError, ) from profile_generator.schema", "@patch(\"profile_generator.util.file.create_dir\") def test_create_output_dir_raises_error_when_cannot_create_dir( self, create_dir: Mock ) -> None: create_dir.side_effect = OSError self.assertRaises(OutputDirCreationFailure,", "InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid( self, read_file: Mock ) ->", "= object_of({\"a\": type_of(str)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid( self,", "\"profile_generator.util.file.create_dir\", lambda *xs: \"/root/\" + \"/\".join(xs) ) def test_create_output_dir_raises_returns_created_dir_path(self) -> None: self.assertEqual(\"/root/profiles\", generator.create_output_dir())", "generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid_json( self, read_file: Mock ) -> None:", "self, read_file: Mock ) -> None: read_file.return_value = '{\"a\": 2}' schema = object_of({\"a\":", "@patch(\"sys.argv\", [\"app.py\"]) def test_get_config_files_raises_error_when_arguments_are_missing(self) -> None: self.assertRaises(NoConfigFileError, generator.get_config_files) @patch( \"profile_generator.util.file.create_dir\", lambda *xs: \"/root/\"", "read_file.assert_called_once_with(\"config.json\") @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_cannot_be_read( self, read_file: Mock ) -> None: schema = object_of({\"a\":", "OSError self.assertRaises(TemplateFileReadError, generator.get_profile_template) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_loads_configuration_files( self, read_file: Mock ) -> None: read_file.return_value", "= object_of({\"a\": type_of(int)}) read_file.side_effect = OSError self.assertRaises( ConfigFileReadError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\")", "test_load_configuration_file_raises_error_when_config_file_is_invalid_json( self, read_file: Mock ) -> None: read_file.return_value = '{\"a\": false' schema =", "x content = generator.create_profile_content(template, cfg, marshall) self.assertEqual(content, \"1\") @classmethod @patch(\"profile_generator.util.file.write_file\") def test_presist_profile_should_persist_profile(cls, write_file:", "read_file.assert_called_once_with(\"/root/templates/raw_therapee.pp3\") @patch(\"profile_generator.util.file.read_file\") def test_get_profile_template_raises_error_when_cannot_read_template_file( self, read_file: Mock ) -> None: read_file.side_effect = OSError", "def test_load_configuration_file_loads_configuration_files( self, read_file: Mock ) -> None: read_file.return_value = '{\"a\": 2}' schema", "-> None: read_file.return_value = '{\"a\": false}' schema = object_of({\"a\": type_of(int)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file,", "generator.get_profile_template) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_loads_configuration_files( self, read_file: Mock ) -> None: read_file.return_value = '{\"a\":", "lambda *xs: \"/root/\" + \"/\".join(xs) ) def test_create_output_dir_raises_returns_created_dir_path(self) -> None: self.assertEqual(\"/root/profiles\", generator.create_output_dir()) @patch(\"profile_generator.util.file.create_dir\")", "name = \"profile_name\" content = \"1\" output_dir = \"dir\" generator.persist_profile(name, content, output_dir) write_file.assert_called_once_with(content,", "\"file content\" self.assertEqual(\"file content\", generator.get_profile_template()) read_file.assert_called_once_with(\"/root/templates/raw_therapee.pp3\") @patch(\"profile_generator.util.file.read_file\") def test_get_profile_template_raises_error_when_cannot_read_template_file( self, read_file: Mock )", "import ( ConfigFileReadError, InvalidConfigFileError, NoConfigFileError, OutputDirCreationFailure, ProfileWriteError, TemplateFileReadError, ) from profile_generator.schema import object_of,", "content = generator.create_profile_content(template, cfg, marshall) self.assertEqual(content, \"1\") @classmethod @patch(\"profile_generator.util.file.write_file\") def test_presist_profile_should_persist_profile(cls, write_file: Mock)", "content, output_dir) write_file.assert_called_once_with(content, output_dir, f\"{name}.pp3\") @patch(\"profile_generator.util.file.write_file\") def test_persist_profile_should_raise_error_when_writing_file_failed( self, write_file: Mock ) ->", "2}, config) read_file.assert_called_once_with(\"config.json\") @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_cannot_be_read( self, read_file: Mock ) -> None: schema", "def test_load_configuration_file_raises_error_when_config_file_is_invalid( self, read_file: Mock ) -> None: read_file.return_value = '{\"a\": false}' schema", ") -> None: read_file.return_value = '{\"a\": false}' schema = object_of({\"a\": type_of(int)}) self.assertRaises( InvalidConfigFileError,", "\"profile_name\" content = \"1\" output_dir = \"dir\" write_file.side_effect = OSError self.assertRaises( ProfileWriteError, generator.persist_profile,", "= '{\"a\": false}' schema = object_of({\"a\": type_of(int)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, )", "Mock ) -> None: schema = object_of({\"a\": type_of(int)}) read_file.side_effect = OSError self.assertRaises( ConfigFileReadError,", "test_persist_profile_should_raise_error_when_writing_file_failed( self, write_file: Mock ) -> None: name = \"profile_name\" content = \"1\"", "-> None: name = \"profile_name\" content = \"1\" output_dir = \"dir\" write_file.side_effect =", "\"{a}\" cfg = {\"a\": \"1\"} marshall = lambda x: x content = generator.create_profile_content(template,", "generator.create_output_dir) @patch(\"profile_generator.util.file.read_file\") @patch( \"profile_generator.util.file.get_full_path\", lambda *xs: \"/root/\" + \"/\".join(xs) ) def test_get_profile_template_returns_template_file_content( self,", "import Mock, patch from profile_generator import generator from profile_generator.generator import ( ConfigFileReadError, InvalidConfigFileError,", "*xs: \"/root/\" + \"/\".join(xs) ) def test_get_profile_template_returns_template_file_content( self, read_file: Mock ) -> None:", "read_file.return_value = '{\"a\": \"$a\"}' schema = object_of({\"a\": type_of(str)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema,", "self.assertEqual([\"one.json\", \"two.json\"], generator.get_config_files()) @patch(\"sys.argv\", [\"app.py\"]) def test_get_config_files_raises_error_when_arguments_are_missing(self) -> None: self.assertRaises(NoConfigFileError, generator.get_config_files) @patch( \"profile_generator.util.file.create_dir\",", "\"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid_json( self, read_file: Mock ) -> None: read_file.return_value", "= \"profile_name\" content = \"1\" output_dir = \"dir\" generator.persist_profile(name, content, output_dir) write_file.assert_called_once_with(content, output_dir,", "self.assertEqual(\"/root/profiles\", generator.create_output_dir()) @patch(\"profile_generator.util.file.create_dir\") def test_create_output_dir_raises_error_when_cannot_create_dir( self, create_dir: Mock ) -> None: create_dir.side_effect =", "= object_of({\"a\": type_of(int)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid_json( self,", "= \"1\" output_dir = \"dir\" generator.persist_profile(name, content, output_dir) write_file.assert_called_once_with(content, output_dir, f\"{name}.pp3\") @patch(\"profile_generator.util.file.write_file\") def", "\"/\".join(xs) ) def test_create_output_dir_raises_returns_created_dir_path(self) -> None: self.assertEqual(\"/root/profiles\", generator.create_output_dir()) @patch(\"profile_generator.util.file.create_dir\") def test_create_output_dir_raises_error_when_cannot_create_dir( self, create_dir:", "[\"app.py\", \"one.json\", \"two.json\"]) def test_get_config_files_returns_config_files(self) -> None: self.assertEqual([\"one.json\", \"two.json\"], generator.get_config_files()) @patch(\"sys.argv\", [\"app.py\"]) def", "OutputDirCreationFailure, ProfileWriteError, TemplateFileReadError, ) from profile_generator.schema import object_of, type_of class ProfileGeneratorTest(TestCase): @patch(\"sys.argv\", [\"app.py\",", "= \"file content\" self.assertEqual(\"file content\", generator.get_profile_template()) read_file.assert_called_once_with(\"/root/templates/raw_therapee.pp3\") @patch(\"profile_generator.util.file.read_file\") def test_get_profile_template_raises_error_when_cannot_read_template_file( self, read_file: Mock", "-> None: read_file.return_value = '{\"a\": false' schema = object_of({\"a\": type_of(int)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file,", "@classmethod @patch(\"profile_generator.util.file.write_file\") def test_presist_profile_should_persist_profile(cls, write_file: Mock) -> None: name = \"profile_name\" content =", "type_of(int)}) config = generator.load_configuration_file(\"config.json\", schema) self.assertEqual({\"a\": 2}, config) read_file.assert_called_once_with(\"config.json\") @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_cannot_be_read( self,", "-> None: self.assertEqual([\"one.json\", \"two.json\"], generator.get_config_files()) @patch(\"sys.argv\", [\"app.py\"]) def test_get_config_files_raises_error_when_arguments_are_missing(self) -> None: self.assertRaises(NoConfigFileError, generator.get_config_files)", "from profile_generator.schema import object_of, type_of class ProfileGeneratorTest(TestCase): @patch(\"sys.argv\", [\"app.py\", \"one.json\", \"two.json\"]) def test_get_config_files_returns_config_files(self)", "test_load_configuration_file_raises_error_when_contains_variable_error( self, read_file: Mock ) -> None: read_file.return_value = '{\"a\": \"$a\"}' schema =", "Mock ) -> None: read_file.return_value = '{\"a\": false' schema = object_of({\"a\": type_of(int)}) self.assertRaises(", "output_dir = \"dir\" write_file.side_effect = OSError self.assertRaises( ProfileWriteError, generator.persist_profile, name, content, output_dir, )", "= OSError self.assertRaises(TemplateFileReadError, generator.get_profile_template) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_loads_configuration_files( self, read_file: Mock ) -> None:", "self.assertRaises(TemplateFileReadError, generator.get_profile_template) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_loads_configuration_files( self, read_file: Mock ) -> None: read_file.return_value =", ") -> None: read_file.return_value = '{\"a\": false' schema = object_of({\"a\": type_of(int)}) self.assertRaises( InvalidConfigFileError,", "Mock ) -> None: create_dir.side_effect = OSError self.assertRaises(OutputDirCreationFailure, generator.create_output_dir) @patch(\"profile_generator.util.file.read_file\") @patch( \"profile_generator.util.file.get_full_path\", lambda", "-> None: read_file.side_effect = OSError self.assertRaises(TemplateFileReadError, generator.get_profile_template) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_loads_configuration_files( self, read_file: Mock", "OSError self.assertRaises(OutputDirCreationFailure, generator.create_output_dir) @patch(\"profile_generator.util.file.read_file\") @patch( \"profile_generator.util.file.get_full_path\", lambda *xs: \"/root/\" + \"/\".join(xs) ) def", "\"1\" output_dir = \"dir\" generator.persist_profile(name, content, output_dir) write_file.assert_called_once_with(content, output_dir, f\"{name}.pp3\") @patch(\"profile_generator.util.file.write_file\") def test_persist_profile_should_raise_error_when_writing_file_failed(", "-> None: read_file.return_value = \"file content\" self.assertEqual(\"file content\", generator.get_profile_template()) read_file.assert_called_once_with(\"/root/templates/raw_therapee.pp3\") @patch(\"profile_generator.util.file.read_file\") def test_get_profile_template_raises_error_when_cannot_read_template_file(", ") -> None: read_file.return_value = '{\"a\": 2}' schema = object_of({\"a\": type_of(int)}) config =", ") -> None: schema = object_of({\"a\": type_of(int)}) read_file.side_effect = OSError self.assertRaises( ConfigFileReadError, generator.load_configuration_file,", "schema, ) def test_create_profile_content_should_create_profile_content(self) -> None: template = \"{a}\" cfg = {\"a\": \"1\"}", ") @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid_json( self, read_file: Mock ) -> None: read_file.return_value = '{\"a\":", "def test_persist_profile_should_raise_error_when_writing_file_failed( self, write_file: Mock ) -> None: name = \"profile_name\" content =", "schema = object_of({\"a\": type_of(int)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) def test_create_profile_content_should_create_profile_content(self) ->", "test_create_profile_content_should_create_profile_content(self) -> None: template = \"{a}\" cfg = {\"a\": \"1\"} marshall = lambda", "*xs: \"/root/\" + \"/\".join(xs) ) def test_create_output_dir_raises_returns_created_dir_path(self) -> None: self.assertEqual(\"/root/profiles\", generator.create_output_dir()) @patch(\"profile_generator.util.file.create_dir\") def", "'{\"a\": 2}' schema = object_of({\"a\": type_of(int)}) config = generator.load_configuration_file(\"config.json\", schema) self.assertEqual({\"a\": 2}, config)", "unittest import TestCase from unittest.mock import Mock, patch from profile_generator import generator from", "def test_get_config_files_returns_config_files(self) -> None: self.assertEqual([\"one.json\", \"two.json\"], generator.get_config_files()) @patch(\"sys.argv\", [\"app.py\"]) def test_get_config_files_raises_error_when_arguments_are_missing(self) -> None:", "generator.load_configuration_file, \"config.json\", schema, ) def test_create_profile_content_should_create_profile_content(self) -> None: template = \"{a}\" cfg =", "-> None: name = \"profile_name\" content = \"1\" output_dir = \"dir\" generator.persist_profile(name, content,", "= {\"a\": \"1\"} marshall = lambda x: x content = generator.create_profile_content(template, cfg, marshall)", "schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid( self, read_file: Mock ) -> None: read_file.return_value =", "TestCase from unittest.mock import Mock, patch from profile_generator import generator from profile_generator.generator import", "schema = object_of({\"a\": type_of(int)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid_json(", "@patch(\"profile_generator.util.file.read_file\") def test_get_profile_template_raises_error_when_cannot_read_template_file( self, read_file: Mock ) -> None: read_file.side_effect = OSError self.assertRaises(TemplateFileReadError,", "type_of(int)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid_json( self, read_file: Mock", "unittest.mock import Mock, patch from profile_generator import generator from profile_generator.generator import ( ConfigFileReadError,", "\"profile_generator.util.file.get_full_path\", lambda *xs: \"/root/\" + \"/\".join(xs) ) def test_get_profile_template_returns_template_file_content( self, read_file: Mock )", "type_of(int)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) def test_create_profile_content_should_create_profile_content(self) -> None: template =", "content = \"1\" output_dir = \"dir\" generator.persist_profile(name, content, output_dir) write_file.assert_called_once_with(content, output_dir, f\"{name}.pp3\") @patch(\"profile_generator.util.file.write_file\")", "@patch(\"sys.argv\", [\"app.py\", \"one.json\", \"two.json\"]) def test_get_config_files_returns_config_files(self) -> None: self.assertEqual([\"one.json\", \"two.json\"], generator.get_config_files()) @patch(\"sys.argv\", [\"app.py\"])", "None: read_file.return_value = \"file content\" self.assertEqual(\"file content\", generator.get_profile_template()) read_file.assert_called_once_with(\"/root/templates/raw_therapee.pp3\") @patch(\"profile_generator.util.file.read_file\") def test_get_profile_template_raises_error_when_cannot_read_template_file( self,", "= \"1\" output_dir = \"dir\" write_file.side_effect = OSError self.assertRaises( ProfileWriteError, generator.persist_profile, name, content,", ") -> None: read_file.return_value = '{\"a\": \"$a\"}' schema = object_of({\"a\": type_of(str)}) self.assertRaises( InvalidConfigFileError,", "'{\"a\": false}' schema = object_of({\"a\": type_of(int)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\")", "\"two.json\"]) def test_get_config_files_returns_config_files(self) -> None: self.assertEqual([\"one.json\", \"two.json\"], generator.get_config_files()) @patch(\"sys.argv\", [\"app.py\"]) def test_get_config_files_raises_error_when_arguments_are_missing(self) ->", "Mock ) -> None: read_file.return_value = '{\"a\": \"$a\"}' schema = object_of({\"a\": type_of(str)}) self.assertRaises(", "x: x content = generator.create_profile_content(template, cfg, marshall) self.assertEqual(content, \"1\") @classmethod @patch(\"profile_generator.util.file.write_file\") def test_presist_profile_should_persist_profile(cls,", "self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_is_invalid( self, read_file: Mock )", "read_file: Mock ) -> None: read_file.return_value = '{\"a\": false}' schema = object_of({\"a\": type_of(int)})", "'{\"a\": false' schema = object_of({\"a\": type_of(int)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file, \"config.json\", schema, ) def", "from profile_generator import generator from profile_generator.generator import ( ConfigFileReadError, InvalidConfigFileError, NoConfigFileError, OutputDirCreationFailure, ProfileWriteError,", "config = generator.load_configuration_file(\"config.json\", schema) self.assertEqual({\"a\": 2}, config) read_file.assert_called_once_with(\"config.json\") @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_cannot_be_read( self, read_file:", "self.assertEqual({\"a\": 2}, config) read_file.assert_called_once_with(\"config.json\") @patch(\"profile_generator.util.file.read_file\") def test_load_configuration_file_raises_error_when_config_file_cannot_be_read( self, read_file: Mock ) -> None:", "generator.get_config_files) @patch( \"profile_generator.util.file.create_dir\", lambda *xs: \"/root/\" + \"/\".join(xs) ) def test_create_output_dir_raises_returns_created_dir_path(self) -> None:", "output_dir) write_file.assert_called_once_with(content, output_dir, f\"{name}.pp3\") @patch(\"profile_generator.util.file.write_file\") def test_persist_profile_should_raise_error_when_writing_file_failed( self, write_file: Mock ) -> None:", "template = \"{a}\" cfg = {\"a\": \"1\"} marshall = lambda x: x content", "import TestCase from unittest.mock import Mock, patch from profile_generator import generator from profile_generator.generator", "[\"app.py\"]) def test_get_config_files_raises_error_when_arguments_are_missing(self) -> None: self.assertRaises(NoConfigFileError, generator.get_config_files) @patch( \"profile_generator.util.file.create_dir\", lambda *xs: \"/root/\" +", "-> None: read_file.return_value = '{\"a\": \"$a\"}' schema = object_of({\"a\": type_of(str)}) self.assertRaises( InvalidConfigFileError, generator.load_configuration_file," ]
[ "<reponame>rdmolony/scaffold<filename>lib/conda.py def run_in_env(c, command, env): commands = ['eval \"$(conda shell.bash hook)\"', f'conda activate", "env): commands = ['eval \"$(conda shell.bash hook)\"', f'conda activate {env}'] commands.append(command) c.run(' &&", "run_in_env(c, command, env): commands = ['eval \"$(conda shell.bash hook)\"', f'conda activate {env}'] commands.append(command)", "def run_in_env(c, command, env): commands = ['eval \"$(conda shell.bash hook)\"', f'conda activate {env}']", "command, env): commands = ['eval \"$(conda shell.bash hook)\"', f'conda activate {env}'] commands.append(command) c.run('", "commands = ['eval \"$(conda shell.bash hook)\"', f'conda activate {env}'] commands.append(command) c.run(' && '.join(commands))" ]
[ "formatter = logging.formatter(format_string) file_handler.setFormatter(formatter) logger = logging.getLogger() logger.addHandler(file_handler) logging.StreamHandler() try: run() except: logging.exception('Got", "anything above log level logging DEBUG which is everything. logging.basicConfig(filename=\"filename.log\",encoding=\"utf-8\",level = logging.DEBUG) logging.debug()", "loggging file in ~/filename.log with encoding utf-8 and anything above log level logging", "\"filename.log\") file_handler.setLevel(logging.DEBUG) file_handler.set_name() format_string = \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\" formatter", "%(message)s\" formatter = logging.formatter(format_string) file_handler.setFormatter(formatter) logger = logging.getLogger() logger.addHandler(file_handler) logging.StreamHandler() try: run() except:", "DEBUG which is everything. logging.basicConfig(filename=\"filename.log\",encoding=\"utf-8\",level = logging.DEBUG) logging.debug() logging.info() logging,warning() logging.error() logging.critical() #", "handler') raise #logging level level:numeric_value CRITICAL : 50 ERROR : 40 WARNING :", "above log level logging DEBUG which is everything. logging.basicConfig(filename=\"filename.log\",encoding=\"utf-8\",level = logging.DEBUG) logging.debug() logging.info()", "exception on main handler') raise #logging level level:numeric_value CRITICAL : 50 ERROR :", "~/filename.log with encoding utf-8 and anything above log level logging DEBUG which is", "logging.info() logging,warning() logging.error() logging.critical() # One logger go to one file_handler, one logger", "one file_handler, one logger go to different file_handle with different log level. file_handler", "= logging.formatter(format_string) file_handler.setFormatter(formatter) logger = logging.getLogger() logger.addHandler(file_handler) logging.StreamHandler() try: run() except: logging.exception('Got exception", "format_string = \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\" formatter = logging.formatter(format_string) file_handler.setFormatter(formatter)", "40 WARNING : 30 INFO : 20 DEBUG : 10 NOTSET : 0", "file_handler.setFormatter(formatter) logger = logging.getLogger() logger.addHandler(file_handler) logging.StreamHandler() try: run() except: logging.exception('Got exception on main", "one logger go to different file_handle with different log level. file_handler = logging.FileHandler(filename", "logging DEBUG which is everything. logging.basicConfig(filename=\"filename.log\",encoding=\"utf-8\",level = logging.DEBUG) logging.debug() logging.info() logging,warning() logging.error() logging.critical()", "logging.formatter(format_string) file_handler.setFormatter(formatter) logger = logging.getLogger() logger.addHandler(file_handler) logging.StreamHandler() try: run() except: logging.exception('Got exception on", "log level. file_handler = logging.FileHandler(filename = \"filename.log\") file_handler.setLevel(logging.DEBUG) file_handler.set_name() format_string = \"%(asctime)s -", "file_handler.set_name() format_string = \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\" formatter = logging.formatter(format_string)", ": 50 ERROR : 40 WARNING : 30 INFO : 20 DEBUG :", "level level:numeric_value CRITICAL : 50 ERROR : 40 WARNING : 30 INFO :", "run() except: logging.exception('Got exception on main handler') raise #logging level level:numeric_value CRITICAL :", "= \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\" formatter = logging.formatter(format_string) file_handler.setFormatter(formatter) logger", "logger go to different file_handle with different log level. file_handler = logging.FileHandler(filename =", "level:numeric_value CRITICAL : 50 ERROR : 40 WARNING : 30 INFO : 20", "import logging #store loggging file in ~/filename.log with encoding utf-8 and anything above", "level logging DEBUG which is everything. logging.basicConfig(filename=\"filename.log\",encoding=\"utf-8\",level = logging.DEBUG) logging.debug() logging.info() logging,warning() logging.error()", "with encoding utf-8 and anything above log level logging DEBUG which is everything.", "go to different file_handle with different log level. file_handler = logging.FileHandler(filename = \"filename.log\")", "ERROR : 40 WARNING : 30 INFO : 20 DEBUG : 10 NOTSET", "= logging.getLogger() logger.addHandler(file_handler) logging.StreamHandler() try: run() except: logging.exception('Got exception on main handler') raise", "logging.DEBUG) logging.debug() logging.info() logging,warning() logging.error() logging.critical() # One logger go to one file_handler,", "and anything above log level logging DEBUG which is everything. logging.basicConfig(filename=\"filename.log\",encoding=\"utf-8\",level = logging.DEBUG)", "to one file_handler, one logger go to different file_handle with different log level.", "which is everything. logging.basicConfig(filename=\"filename.log\",encoding=\"utf-8\",level = logging.DEBUG) logging.debug() logging.info() logging,warning() logging.error() logging.critical() # One", "on main handler') raise #logging level level:numeric_value CRITICAL : 50 ERROR : 40", "- %(levelname)s - %(message)s\" formatter = logging.formatter(format_string) file_handler.setFormatter(formatter) logger = logging.getLogger() logger.addHandler(file_handler) logging.StreamHandler()", "#logging level level:numeric_value CRITICAL : 50 ERROR : 40 WARNING : 30 INFO", "except: logging.exception('Got exception on main handler') raise #logging level level:numeric_value CRITICAL : 50", "different file_handle with different log level. file_handler = logging.FileHandler(filename = \"filename.log\") file_handler.setLevel(logging.DEBUG) file_handler.set_name()", "logging #store loggging file in ~/filename.log with encoding utf-8 and anything above log", "One logger go to one file_handler, one logger go to different file_handle with", "logging,warning() logging.error() logging.critical() # One logger go to one file_handler, one logger go", "#store loggging file in ~/filename.log with encoding utf-8 and anything above log level", "%(name)s - %(levelname)s - %(message)s\" formatter = logging.formatter(format_string) file_handler.setFormatter(formatter) logger = logging.getLogger() logger.addHandler(file_handler)", "encoding utf-8 and anything above log level logging DEBUG which is everything. logging.basicConfig(filename=\"filename.log\",encoding=\"utf-8\",level", "file_handle with different log level. file_handler = logging.FileHandler(filename = \"filename.log\") file_handler.setLevel(logging.DEBUG) file_handler.set_name() format_string", "logging.getLogger() logger.addHandler(file_handler) logging.StreamHandler() try: run() except: logging.exception('Got exception on main handler') raise #logging", "logger.addHandler(file_handler) logging.StreamHandler() try: run() except: logging.exception('Got exception on main handler') raise #logging level", "log level logging DEBUG which is everything. logging.basicConfig(filename=\"filename.log\",encoding=\"utf-8\",level = logging.DEBUG) logging.debug() logging.info() logging,warning()", "everything. logging.basicConfig(filename=\"filename.log\",encoding=\"utf-8\",level = logging.DEBUG) logging.debug() logging.info() logging,warning() logging.error() logging.critical() # One logger go", ": 40 WARNING : 30 INFO : 20 DEBUG : 10 NOTSET :", "file in ~/filename.log with encoding utf-8 and anything above log level logging DEBUG", "level. file_handler = logging.FileHandler(filename = \"filename.log\") file_handler.setLevel(logging.DEBUG) file_handler.set_name() format_string = \"%(asctime)s - %(name)s", "in ~/filename.log with encoding utf-8 and anything above log level logging DEBUG which", "= logging.FileHandler(filename = \"filename.log\") file_handler.setLevel(logging.DEBUG) file_handler.set_name() format_string = \"%(asctime)s - %(name)s - %(levelname)s", "- %(message)s\" formatter = logging.formatter(format_string) file_handler.setFormatter(formatter) logger = logging.getLogger() logger.addHandler(file_handler) logging.StreamHandler() try: run()", "# One logger go to one file_handler, one logger go to different file_handle", "logging.exception('Got exception on main handler') raise #logging level level:numeric_value CRITICAL : 50 ERROR", "CRITICAL : 50 ERROR : 40 WARNING : 30 INFO : 20 DEBUG", "logging.critical() # One logger go to one file_handler, one logger go to different", "logging.basicConfig(filename=\"filename.log\",encoding=\"utf-8\",level = logging.DEBUG) logging.debug() logging.info() logging,warning() logging.error() logging.critical() # One logger go to", "with different log level. file_handler = logging.FileHandler(filename = \"filename.log\") file_handler.setLevel(logging.DEBUG) file_handler.set_name() format_string =", "= logging.DEBUG) logging.debug() logging.info() logging,warning() logging.error() logging.critical() # One logger go to one", "logging.StreamHandler() try: run() except: logging.exception('Got exception on main handler') raise #logging level level:numeric_value", "different log level. file_handler = logging.FileHandler(filename = \"filename.log\") file_handler.setLevel(logging.DEBUG) file_handler.set_name() format_string = \"%(asctime)s", "raise #logging level level:numeric_value CRITICAL : 50 ERROR : 40 WARNING : 30", "logging.debug() logging.info() logging,warning() logging.error() logging.critical() # One logger go to one file_handler, one", "\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\" formatter = logging.formatter(format_string) file_handler.setFormatter(formatter) logger =", "logger go to one file_handler, one logger go to different file_handle with different", "50 ERROR : 40 WARNING : 30 INFO : 20 DEBUG : 10", "logger = logging.getLogger() logger.addHandler(file_handler) logging.StreamHandler() try: run() except: logging.exception('Got exception on main handler')", "= \"filename.log\") file_handler.setLevel(logging.DEBUG) file_handler.set_name() format_string = \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"", "to different file_handle with different log level. file_handler = logging.FileHandler(filename = \"filename.log\") file_handler.setLevel(logging.DEBUG)", "go to one file_handler, one logger go to different file_handle with different log", "is everything. logging.basicConfig(filename=\"filename.log\",encoding=\"utf-8\",level = logging.DEBUG) logging.debug() logging.info() logging,warning() logging.error() logging.critical() # One logger", "utf-8 and anything above log level logging DEBUG which is everything. logging.basicConfig(filename=\"filename.log\",encoding=\"utf-8\",level =", "%(levelname)s - %(message)s\" formatter = logging.formatter(format_string) file_handler.setFormatter(formatter) logger = logging.getLogger() logger.addHandler(file_handler) logging.StreamHandler() try:", "file_handler = logging.FileHandler(filename = \"filename.log\") file_handler.setLevel(logging.DEBUG) file_handler.set_name() format_string = \"%(asctime)s - %(name)s -", "logging.FileHandler(filename = \"filename.log\") file_handler.setLevel(logging.DEBUG) file_handler.set_name() format_string = \"%(asctime)s - %(name)s - %(levelname)s -", "try: run() except: logging.exception('Got exception on main handler') raise #logging level level:numeric_value CRITICAL", "logging.error() logging.critical() # One logger go to one file_handler, one logger go to", "main handler') raise #logging level level:numeric_value CRITICAL : 50 ERROR : 40 WARNING", "file_handler, one logger go to different file_handle with different log level. file_handler =", "file_handler.setLevel(logging.DEBUG) file_handler.set_name() format_string = \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\" formatter =", "- %(name)s - %(levelname)s - %(message)s\" formatter = logging.formatter(format_string) file_handler.setFormatter(formatter) logger = logging.getLogger()" ]
[ "min_length=5, write_only=True) username = serializers.CharField(max_length=150, min_length=5, write_only=True) def validate(self, attrs): \"\"\"Validate credentials and", "password = serializers.CharField(max_length=150, min_length=6, write_only=True) def create(self, validated_data): \"\"\"Create a new user.\"\"\" return", "username = serializers.CharField(max_length=150, min_length=5, write_only=True) def validate(self, attrs): \"\"\"Validate credentials and get user", "username = attrs.get('username', '') password = attrs.get('password', '') user = authenticate(username=username, password=password) if", "for login user.\"\"\" password = serializers.CharField(max_length=150, min_length=5, write_only=True) username = serializers.CharField(max_length=150, min_length=5, write_only=True)", "authenticate from django.contrib.auth import get_user_model from django.utils.translation import gettext_lazy as _ from rest_framework", "'username': { 'required': True }, 'role': { 'required': True }, 'password': { 'required':", "from django.contrib.auth import authenticate from django.contrib.auth import get_user_model from django.utils.translation import gettext_lazy as", "and get user tokens.\"\"\" username = attrs.get('username', '') password = attrs.get('password', '') user", "User.objects.create_user(**validated_data) class Meta: \"\"\"Meta information for signup serializer.\"\"\" model = User fields =", "raise AuthenticationFailed(_('Invalid credentials')) refresh = RefreshToken.for_user(user) return {'access': str(refresh.access_token), 'refresh': str(refresh)} class Meta:", "up credentials' class LoginSerializer(serializers.Serializer): \"\"\"Serializer for login user.\"\"\" password = serializers.CharField(max_length=150, min_length=5, write_only=True)", "rest_framework_simplejwt.tokens import RefreshToken User = get_user_model() class SignupSerializer(serializers.ModelSerializer): \"\"\"Serializer for signup user.\"\"\" password", "user.\"\"\" return User.objects.create_user(**validated_data) class Meta: \"\"\"Meta information for signup serializer.\"\"\" model = User", "'') password = attrs.get('password', '') user = authenticate(username=username, password=password) if not user: raise", "{ 'required': True }, 'password': { 'required': True } } ref_name = 'Sign", "ref_name = 'Sign up credentials' class LoginSerializer(serializers.Serializer): \"\"\"Serializer for login user.\"\"\" password =", "\"\"\"Serializer for login user.\"\"\" password = serializers.CharField(max_length=150, min_length=5, write_only=True) username = serializers.CharField(max_length=150, min_length=5,", "import gettext_lazy as _ from rest_framework import serializers from rest_framework.exceptions import AuthenticationFailed from", "login user.\"\"\" password = serializers.CharField(max_length=150, min_length=5, write_only=True) username = serializers.CharField(max_length=150, min_length=5, write_only=True) def", "AuthenticationFailed(_('Invalid credentials')) refresh = RefreshToken.for_user(user) return {'access': str(refresh.access_token), 'refresh': str(refresh)} class Meta: \"\"\"Meta", "RefreshToken.for_user(user) return {'access': str(refresh.access_token), 'refresh': str(refresh)} class Meta: \"\"\"Meta information for login serializer.\"\"\"", "User = get_user_model() class SignupSerializer(serializers.ModelSerializer): \"\"\"Serializer for signup user.\"\"\" password = serializers.CharField(max_length=150, min_length=6,", "extra_kwargs = { 'username': { 'required': True }, 'role': { 'required': True },", "password=password) if not user: raise AuthenticationFailed(_('Invalid credentials')) refresh = RefreshToken.for_user(user) return {'access': str(refresh.access_token),", "user = authenticate(username=username, password=password) if not user: raise AuthenticationFailed(_('Invalid credentials')) refresh = RefreshToken.for_user(user)", "= User fields = ['username', 'name', 'role', 'password'] extra_kwargs = { 'username': {", "'Sign up credentials' class LoginSerializer(serializers.Serializer): \"\"\"Serializer for login user.\"\"\" password = serializers.CharField(max_length=150, min_length=5,", "return {'access': str(refresh.access_token), 'refresh': str(refresh)} class Meta: \"\"\"Meta information for login serializer.\"\"\" ref_name", "authenticate(username=username, password=password) if not user: raise AuthenticationFailed(_('Invalid credentials')) refresh = RefreshToken.for_user(user) return {'access':", "= RefreshToken.for_user(user) return {'access': str(refresh.access_token), 'refresh': str(refresh)} class Meta: \"\"\"Meta information for login", "tokens.\"\"\" username = attrs.get('username', '') password = attrs.get('password', '') user = authenticate(username=username, password=password)", "password = attrs.get('password', '') user = authenticate(username=username, password=password) if not user: raise AuthenticationFailed(_('Invalid", "'name', 'role', 'password'] extra_kwargs = { 'username': { 'required': True }, 'role': {", "for signup serializer.\"\"\" model = User fields = ['username', 'name', 'role', 'password'] extra_kwargs", "True }, 'password': { 'required': True } } ref_name = 'Sign up credentials'", "} ref_name = 'Sign up credentials' class LoginSerializer(serializers.Serializer): \"\"\"Serializer for login user.\"\"\" password", "= { 'username': { 'required': True }, 'role': { 'required': True }, 'password':", "not user: raise AuthenticationFailed(_('Invalid credentials')) refresh = RefreshToken.for_user(user) return {'access': str(refresh.access_token), 'refresh': str(refresh)}", "django.contrib.auth import authenticate from django.contrib.auth import get_user_model from django.utils.translation import gettext_lazy as _", "create(self, validated_data): \"\"\"Create a new user.\"\"\" return User.objects.create_user(**validated_data) class Meta: \"\"\"Meta information for", "Meta: \"\"\"Meta information for signup serializer.\"\"\" model = User fields = ['username', 'name',", "def validate(self, attrs): \"\"\"Validate credentials and get user tokens.\"\"\" username = attrs.get('username', '')", "rest_framework import serializers from rest_framework.exceptions import AuthenticationFailed from rest_framework_simplejwt.tokens import RefreshToken User =", "}, 'password': { 'required': True } } ref_name = 'Sign up credentials' class", "user.\"\"\" password = serializers.CharField(max_length=150, min_length=5, write_only=True) username = serializers.CharField(max_length=150, min_length=5, write_only=True) def validate(self,", "_ from rest_framework import serializers from rest_framework.exceptions import AuthenticationFailed from rest_framework_simplejwt.tokens import RefreshToken", "\"\"\"Serializer for signup user.\"\"\" password = serializers.CharField(max_length=150, min_length=6, write_only=True) def create(self, validated_data): \"\"\"Create", "attrs): \"\"\"Validate credentials and get user tokens.\"\"\" username = attrs.get('username', '') password =", "gettext_lazy as _ from rest_framework import serializers from rest_framework.exceptions import AuthenticationFailed from rest_framework_simplejwt.tokens", "import get_user_model from django.utils.translation import gettext_lazy as _ from rest_framework import serializers from", "class SignupSerializer(serializers.ModelSerializer): \"\"\"Serializer for signup user.\"\"\" password = serializers.CharField(max_length=150, min_length=6, write_only=True) def create(self,", "= 'Sign up credentials' class LoginSerializer(serializers.Serializer): \"\"\"Serializer for login user.\"\"\" password = serializers.CharField(max_length=150,", "import authenticate from django.contrib.auth import get_user_model from django.utils.translation import gettext_lazy as _ from", "serializer.\"\"\" model = User fields = ['username', 'name', 'role', 'password'] extra_kwargs = {", "information for signup serializer.\"\"\" model = User fields = ['username', 'name', 'role', 'password']", "['username', 'name', 'role', 'password'] extra_kwargs = { 'username': { 'required': True }, 'role':", "if not user: raise AuthenticationFailed(_('Invalid credentials')) refresh = RefreshToken.for_user(user) return {'access': str(refresh.access_token), 'refresh':", "serializers from rest_framework.exceptions import AuthenticationFailed from rest_framework_simplejwt.tokens import RefreshToken User = get_user_model() class", "serializers.CharField(max_length=150, min_length=5, write_only=True) username = serializers.CharField(max_length=150, min_length=5, write_only=True) def validate(self, attrs): \"\"\"Validate credentials", "} } ref_name = 'Sign up credentials' class LoginSerializer(serializers.Serializer): \"\"\"Serializer for login user.\"\"\"", "\"\"\"Create a new user.\"\"\" return User.objects.create_user(**validated_data) class Meta: \"\"\"Meta information for signup serializer.\"\"\"", "signup user.\"\"\" password = serializers.CharField(max_length=150, min_length=6, write_only=True) def create(self, validated_data): \"\"\"Create a new", "= ['username', 'name', 'role', 'password'] extra_kwargs = { 'username': { 'required': True },", "SignupSerializer(serializers.ModelSerializer): \"\"\"Serializer for signup user.\"\"\" password = serializers.CharField(max_length=150, min_length=6, write_only=True) def create(self, validated_data):", "'password': { 'required': True } } ref_name = 'Sign up credentials' class LoginSerializer(serializers.Serializer):", "new user.\"\"\" return User.objects.create_user(**validated_data) class Meta: \"\"\"Meta information for signup serializer.\"\"\" model =", "def create(self, validated_data): \"\"\"Create a new user.\"\"\" return User.objects.create_user(**validated_data) class Meta: \"\"\"Meta information", "validate(self, attrs): \"\"\"Validate credentials and get user tokens.\"\"\" username = attrs.get('username', '') password", "True } } ref_name = 'Sign up credentials' class LoginSerializer(serializers.Serializer): \"\"\"Serializer for login", "= attrs.get('username', '') password = attrs.get('password', '') user = authenticate(username=username, password=password) if not", "{ 'required': True }, 'role': { 'required': True }, 'password': { 'required': True", "str(refresh.access_token), 'refresh': str(refresh)} class Meta: \"\"\"Meta information for login serializer.\"\"\" ref_name = 'Login", "for signup user.\"\"\" password = serializers.CharField(max_length=150, min_length=6, write_only=True) def create(self, validated_data): \"\"\"Create a", "\"\"\"Meta information for signup serializer.\"\"\" model = User fields = ['username', 'name', 'role',", "return User.objects.create_user(**validated_data) class Meta: \"\"\"Meta information for signup serializer.\"\"\" model = User fields", "credentials and get user tokens.\"\"\" username = attrs.get('username', '') password = attrs.get('password', '')", "True }, 'role': { 'required': True }, 'password': { 'required': True } }", "attrs.get('username', '') password = attrs.get('password', '') user = authenticate(username=username, password=password) if not user:", "write_only=True) def validate(self, attrs): \"\"\"Validate credentials and get user tokens.\"\"\" username = attrs.get('username',", "credentials' class LoginSerializer(serializers.Serializer): \"\"\"Serializer for login user.\"\"\" password = serializers.CharField(max_length=150, min_length=5, write_only=True) username", "'required': True }, 'role': { 'required': True }, 'password': { 'required': True }", "'required': True } } ref_name = 'Sign up credentials' class LoginSerializer(serializers.Serializer): \"\"\"Serializer for", "from rest_framework import serializers from rest_framework.exceptions import AuthenticationFailed from rest_framework_simplejwt.tokens import RefreshToken User", "'password'] extra_kwargs = { 'username': { 'required': True }, 'role': { 'required': True", "signup serializer.\"\"\" model = User fields = ['username', 'name', 'role', 'password'] extra_kwargs =", "AuthenticationFailed from rest_framework_simplejwt.tokens import RefreshToken User = get_user_model() class SignupSerializer(serializers.ModelSerializer): \"\"\"Serializer for signup", "= serializers.CharField(max_length=150, min_length=5, write_only=True) username = serializers.CharField(max_length=150, min_length=5, write_only=True) def validate(self, attrs): \"\"\"Validate", "a new user.\"\"\" return User.objects.create_user(**validated_data) class Meta: \"\"\"Meta information for signup serializer.\"\"\" model", "= serializers.CharField(max_length=150, min_length=5, write_only=True) def validate(self, attrs): \"\"\"Validate credentials and get user tokens.\"\"\"", "'required': True }, 'password': { 'required': True } } ref_name = 'Sign up", "'refresh': str(refresh)} class Meta: \"\"\"Meta information for login serializer.\"\"\" ref_name = 'Login credentials'", "get_user_model() class SignupSerializer(serializers.ModelSerializer): \"\"\"Serializer for signup user.\"\"\" password = serializers.CharField(max_length=150, min_length=6, write_only=True) def", "import serializers from rest_framework.exceptions import AuthenticationFailed from rest_framework_simplejwt.tokens import RefreshToken User = get_user_model()", "model = User fields = ['username', 'name', 'role', 'password'] extra_kwargs = { 'username':", "credentials')) refresh = RefreshToken.for_user(user) return {'access': str(refresh.access_token), 'refresh': str(refresh)} class Meta: \"\"\"Meta information", "class LoginSerializer(serializers.Serializer): \"\"\"Serializer for login user.\"\"\" password = serializers.CharField(max_length=150, min_length=5, write_only=True) username =", "as _ from rest_framework import serializers from rest_framework.exceptions import AuthenticationFailed from rest_framework_simplejwt.tokens import", "password = serializers.CharField(max_length=150, min_length=5, write_only=True) username = serializers.CharField(max_length=150, min_length=5, write_only=True) def validate(self, attrs):", "from django.contrib.auth import get_user_model from django.utils.translation import gettext_lazy as _ from rest_framework import", "import RefreshToken User = get_user_model() class SignupSerializer(serializers.ModelSerializer): \"\"\"Serializer for signup user.\"\"\" password =", "write_only=True) username = serializers.CharField(max_length=150, min_length=5, write_only=True) def validate(self, attrs): \"\"\"Validate credentials and get", "= serializers.CharField(max_length=150, min_length=6, write_only=True) def create(self, validated_data): \"\"\"Create a new user.\"\"\" return User.objects.create_user(**validated_data)", "get_user_model from django.utils.translation import gettext_lazy as _ from rest_framework import serializers from rest_framework.exceptions", "import AuthenticationFailed from rest_framework_simplejwt.tokens import RefreshToken User = get_user_model() class SignupSerializer(serializers.ModelSerializer): \"\"\"Serializer for", "fields = ['username', 'name', 'role', 'password'] extra_kwargs = { 'username': { 'required': True", "serializers.CharField(max_length=150, min_length=5, write_only=True) def validate(self, attrs): \"\"\"Validate credentials and get user tokens.\"\"\" username", "write_only=True) def create(self, validated_data): \"\"\"Create a new user.\"\"\" return User.objects.create_user(**validated_data) class Meta: \"\"\"Meta", "{'access': str(refresh.access_token), 'refresh': str(refresh)} class Meta: \"\"\"Meta information for login serializer.\"\"\" ref_name =", "LoginSerializer(serializers.Serializer): \"\"\"Serializer for login user.\"\"\" password = serializers.CharField(max_length=150, min_length=5, write_only=True) username = serializers.CharField(max_length=150,", "\"\"\"Validate credentials and get user tokens.\"\"\" username = attrs.get('username', '') password = attrs.get('password',", "user tokens.\"\"\" username = attrs.get('username', '') password = attrs.get('password', '') user = authenticate(username=username,", "User fields = ['username', 'name', 'role', 'password'] extra_kwargs = { 'username': { 'required':", "{ 'required': True } } ref_name = 'Sign up credentials' class LoginSerializer(serializers.Serializer): \"\"\"Serializer", "user: raise AuthenticationFailed(_('Invalid credentials')) refresh = RefreshToken.for_user(user) return {'access': str(refresh.access_token), 'refresh': str(refresh)} class", "validated_data): \"\"\"Create a new user.\"\"\" return User.objects.create_user(**validated_data) class Meta: \"\"\"Meta information for signup", "{ 'username': { 'required': True }, 'role': { 'required': True }, 'password': {", "django.utils.translation import gettext_lazy as _ from rest_framework import serializers from rest_framework.exceptions import AuthenticationFailed", "from rest_framework_simplejwt.tokens import RefreshToken User = get_user_model() class SignupSerializer(serializers.ModelSerializer): \"\"\"Serializer for signup user.\"\"\"", "django.contrib.auth import get_user_model from django.utils.translation import gettext_lazy as _ from rest_framework import serializers", "= get_user_model() class SignupSerializer(serializers.ModelSerializer): \"\"\"Serializer for signup user.\"\"\" password = serializers.CharField(max_length=150, min_length=6, write_only=True)", "from rest_framework.exceptions import AuthenticationFailed from rest_framework_simplejwt.tokens import RefreshToken User = get_user_model() class SignupSerializer(serializers.ModelSerializer):", "user.\"\"\" password = serializers.CharField(max_length=150, min_length=6, write_only=True) def create(self, validated_data): \"\"\"Create a new user.\"\"\"", "class Meta: \"\"\"Meta information for signup serializer.\"\"\" model = User fields = ['username',", "serializers.CharField(max_length=150, min_length=6, write_only=True) def create(self, validated_data): \"\"\"Create a new user.\"\"\" return User.objects.create_user(**validated_data) class", "min_length=6, write_only=True) def create(self, validated_data): \"\"\"Create a new user.\"\"\" return User.objects.create_user(**validated_data) class Meta:", "get user tokens.\"\"\" username = attrs.get('username', '') password = attrs.get('password', '') user =", "rest_framework.exceptions import AuthenticationFailed from rest_framework_simplejwt.tokens import RefreshToken User = get_user_model() class SignupSerializer(serializers.ModelSerializer): \"\"\"Serializer", "min_length=5, write_only=True) def validate(self, attrs): \"\"\"Validate credentials and get user tokens.\"\"\" username =", "'role', 'password'] extra_kwargs = { 'username': { 'required': True }, 'role': { 'required':", "RefreshToken User = get_user_model() class SignupSerializer(serializers.ModelSerializer): \"\"\"Serializer for signup user.\"\"\" password = serializers.CharField(max_length=150,", "}, 'role': { 'required': True }, 'password': { 'required': True } } ref_name", "= authenticate(username=username, password=password) if not user: raise AuthenticationFailed(_('Invalid credentials')) refresh = RefreshToken.for_user(user) return", "from django.utils.translation import gettext_lazy as _ from rest_framework import serializers from rest_framework.exceptions import", "= attrs.get('password', '') user = authenticate(username=username, password=password) if not user: raise AuthenticationFailed(_('Invalid credentials'))", "'') user = authenticate(username=username, password=password) if not user: raise AuthenticationFailed(_('Invalid credentials')) refresh =", "refresh = RefreshToken.for_user(user) return {'access': str(refresh.access_token), 'refresh': str(refresh)} class Meta: \"\"\"Meta information for", "'role': { 'required': True }, 'password': { 'required': True } } ref_name =", "attrs.get('password', '') user = authenticate(username=username, password=password) if not user: raise AuthenticationFailed(_('Invalid credentials')) refresh" ]
[ "m = sum([abs(e) for e in particles[0][0]]) min_n = 0 for i, d", "d[1][0] d[0][1] += d[1][1] d[0][2] += d[1][2] def part1(data): particles = [parse(d) for", "open(\"input\",\"r\").read() def parse(particle): return [list(map(int, p[3:-1].split(\",\"))) for p in particle.split(\", \")] def step(d):", "d[1][2] def part1(data): particles = [parse(d) for d in data.split('\\n')] while True: for", "sum([abs(e) for e in d[0]]) < m: min_n = i m = sum([abs(e)", "in d[0]]) < m: min_n = i m = sum([abs(e) for e in", "def part1(data): particles = [parse(d) for d in data.split('\\n')] while True: for d", "particles: step(d) m = sum([abs(e) for e in particles[0][0]]) min_n = 0 for", "i, d in enumerate(particles): if sum([abs(e) for e in d[0]]) < m: min_n", "= open(\"input\",\"r\").read() def parse(particle): return [list(map(int, p[3:-1].split(\",\"))) for p in particle.split(\", \")] def", "def part2(data): particles = [parse(d) for d in data.split('\\n')] while True: positions =", "d in data.split('\\n')] while True: for d in particles: step(d) m = sum([abs(e)", "min_n = i m = sum([abs(e) for e in d[0]]) print(min_n) def part2(data):", "data = open(\"input\",\"r\").read() def parse(particle): return [list(map(int, p[3:-1].split(\",\"))) for p in particle.split(\", \")]", "= i m = sum([abs(e) for e in d[0]]) print(min_n) def part2(data): particles", "d[1][0] += d[2][0] d[1][1] += d[2][1] d[1][2] += d[2][2] d[0][0] += d[1][0] d[0][1]", "min_n = 0 for i, d in enumerate(particles): if sum([abs(e) for e in", "in particle.split(\", \")] def step(d): d[1][0] += d[2][0] d[1][1] += d[2][1] d[1][2] +=", "= [parse(d) for d in data.split('\\n')] while True: for d in particles: step(d)", "p[3:-1].split(\",\"))) for p in particle.split(\", \")] def step(d): d[1][0] += d[2][0] d[1][1] +=", "positions = {} delete = [] for i, d in enumerate(particles): step(d) if", "particles = [d for i, d in enumerate(particles) if i not in delete]", "particles = [parse(d) for d in data.split('\\n')] while True: for d in particles:", "{} delete = [] for i, d in enumerate(particles): step(d) if tuple(d[0]) in", "= [d for i, d in enumerate(particles) if i not in delete] print(len(particles))", "for i, d in enumerate(particles): if sum([abs(e) for e in d[0]]) < m:", "tuple(d[0]) in positions: delete += [i, positions[tuple(d[0])]] else: positions[tuple(d[0])] = i particles =", "d in enumerate(particles): if sum([abs(e) for e in d[0]]) < m: min_n =", "+= d[2][0] d[1][1] += d[2][1] d[1][2] += d[2][2] d[0][0] += d[1][0] d[0][1] +=", "print(min_n) def part2(data): particles = [parse(d) for d in data.split('\\n')] while True: positions", "i particles = [d for i, d in enumerate(particles) if i not in", "particle.split(\", \")] def step(d): d[1][0] += d[2][0] d[1][1] += d[2][1] d[1][2] += d[2][2]", "[list(map(int, p[3:-1].split(\",\"))) for p in particle.split(\", \")] def step(d): d[1][0] += d[2][0] d[1][1]", "positions[tuple(d[0])]] else: positions[tuple(d[0])] = i particles = [d for i, d in enumerate(particles)", "True: positions = {} delete = [] for i, d in enumerate(particles): step(d)", "0 for i, d in enumerate(particles): if sum([abs(e) for e in d[0]]) <", "particles = [parse(d) for d in data.split('\\n')] while True: positions = {} delete", "d[0]]) print(min_n) def part2(data): particles = [parse(d) for d in data.split('\\n')] while True:", "= [parse(d) for d in data.split('\\n')] while True: positions = {} delete =", "for p in particle.split(\", \")] def step(d): d[1][0] += d[2][0] d[1][1] += d[2][1]", "d[1][2] += d[2][2] d[0][0] += d[1][0] d[0][1] += d[1][1] d[0][2] += d[1][2] def", "sum([abs(e) for e in particles[0][0]]) min_n = 0 for i, d in enumerate(particles):", "True: for d in particles: step(d) m = sum([abs(e) for e in particles[0][0]])", "= [] for i, d in enumerate(particles): step(d) if tuple(d[0]) in positions: delete", "d[0][2] += d[1][2] def part1(data): particles = [parse(d) for d in data.split('\\n')] while", "< m: min_n = i m = sum([abs(e) for e in d[0]]) print(min_n)", "= 0 for i, d in enumerate(particles): if sum([abs(e) for e in d[0]])", "i, d in enumerate(particles): step(d) if tuple(d[0]) in positions: delete += [i, positions[tuple(d[0])]]", "[i, positions[tuple(d[0])]] else: positions[tuple(d[0])] = i particles = [d for i, d in", "e in particles[0][0]]) min_n = 0 for i, d in enumerate(particles): if sum([abs(e)", "= sum([abs(e) for e in particles[0][0]]) min_n = 0 for i, d in", "e in d[0]]) print(min_n) def part2(data): particles = [parse(d) for d in data.split('\\n')]", "part2(data): particles = [parse(d) for d in data.split('\\n')] while True: positions = {}", "in particles[0][0]]) min_n = 0 for i, d in enumerate(particles): if sum([abs(e) for", "d in particles: step(d) m = sum([abs(e) for e in particles[0][0]]) min_n =", "particles[0][0]]) min_n = 0 for i, d in enumerate(particles): if sum([abs(e) for e", "+= d[2][2] d[0][0] += d[1][0] d[0][1] += d[1][1] d[0][2] += d[1][2] def part1(data):", "d[2][1] d[1][2] += d[2][2] d[0][0] += d[1][0] d[0][1] += d[1][1] d[0][2] += d[1][2]", "else: positions[tuple(d[0])] = i particles = [d for i, d in enumerate(particles) if", "in enumerate(particles): step(d) if tuple(d[0]) in positions: delete += [i, positions[tuple(d[0])]] else: positions[tuple(d[0])]", "d[1][1] d[0][2] += d[1][2] def part1(data): particles = [parse(d) for d in data.split('\\n')]", "d[2][2] d[0][0] += d[1][0] d[0][1] += d[1][1] d[0][2] += d[1][2] def part1(data): particles", "positions[tuple(d[0])] = i particles = [d for i, d in enumerate(particles) if i", "if tuple(d[0]) in positions: delete += [i, positions[tuple(d[0])]] else: positions[tuple(d[0])] = i particles", "d[0]]) < m: min_n = i m = sum([abs(e) for e in d[0]])", "step(d): d[1][0] += d[2][0] d[1][1] += d[2][1] d[1][2] += d[2][2] d[0][0] += d[1][0]", "for i, d in enumerate(particles) if i not in delete] print(len(particles)) #part1(data) part2(data)", "+= d[1][0] d[0][1] += d[1][1] d[0][2] += d[1][2] def part1(data): particles = [parse(d)", "while True: positions = {} delete = [] for i, d in enumerate(particles):", "d in data.split('\\n')] while True: positions = {} delete = [] for i,", "data.split('\\n')] while True: positions = {} delete = [] for i, d in", "d[0][0] += d[1][0] d[0][1] += d[1][1] d[0][2] += d[1][2] def part1(data): particles =", "[] for i, d in enumerate(particles): step(d) if tuple(d[0]) in positions: delete +=", "delete += [i, positions[tuple(d[0])]] else: positions[tuple(d[0])] = i particles = [d for i,", "while True: for d in particles: step(d) m = sum([abs(e) for e in", "\")] def step(d): d[1][0] += d[2][0] d[1][1] += d[2][1] d[1][2] += d[2][2] d[0][0]", "for e in d[0]]) < m: min_n = i m = sum([abs(e) for", "e in d[0]]) < m: min_n = i m = sum([abs(e) for e", "enumerate(particles): if sum([abs(e) for e in d[0]]) < m: min_n = i m", "for e in d[0]]) print(min_n) def part2(data): particles = [parse(d) for d in", "in d[0]]) print(min_n) def part2(data): particles = [parse(d) for d in data.split('\\n')] while", "for d in data.split('\\n')] while True: for d in particles: step(d) m =", "+= [i, positions[tuple(d[0])]] else: positions[tuple(d[0])] = i particles = [d for i, d", "d[2][0] d[1][1] += d[2][1] d[1][2] += d[2][2] d[0][0] += d[1][0] d[0][1] += d[1][1]", "in particles: step(d) m = sum([abs(e) for e in particles[0][0]]) min_n = 0", "[d for i, d in enumerate(particles) if i not in delete] print(len(particles)) #part1(data)", "part1(data): particles = [parse(d) for d in data.split('\\n')] while True: for d in", "def parse(particle): return [list(map(int, p[3:-1].split(\",\"))) for p in particle.split(\", \")] def step(d): d[1][0]", "+= d[2][1] d[1][2] += d[2][2] d[0][0] += d[1][0] d[0][1] += d[1][1] d[0][2] +=", "parse(particle): return [list(map(int, p[3:-1].split(\",\"))) for p in particle.split(\", \")] def step(d): d[1][0] +=", "[parse(d) for d in data.split('\\n')] while True: positions = {} delete = []", "p in particle.split(\", \")] def step(d): d[1][0] += d[2][0] d[1][1] += d[2][1] d[1][2]", "return [list(map(int, p[3:-1].split(\",\"))) for p in particle.split(\", \")] def step(d): d[1][0] += d[2][0]", "= {} delete = [] for i, d in enumerate(particles): step(d) if tuple(d[0])", "for d in particles: step(d) m = sum([abs(e) for e in particles[0][0]]) min_n", "i m = sum([abs(e) for e in d[0]]) print(min_n) def part2(data): particles =", "positions: delete += [i, positions[tuple(d[0])]] else: positions[tuple(d[0])] = i particles = [d for", "d in enumerate(particles): step(d) if tuple(d[0]) in positions: delete += [i, positions[tuple(d[0])]] else:", "enumerate(particles): step(d) if tuple(d[0]) in positions: delete += [i, positions[tuple(d[0])]] else: positions[tuple(d[0])] =", "m: min_n = i m = sum([abs(e) for e in d[0]]) print(min_n) def", "def step(d): d[1][0] += d[2][0] d[1][1] += d[2][1] d[1][2] += d[2][2] d[0][0] +=", "data.split('\\n')] while True: for d in particles: step(d) m = sum([abs(e) for e", "delete = [] for i, d in enumerate(particles): step(d) if tuple(d[0]) in positions:", "in positions: delete += [i, positions[tuple(d[0])]] else: positions[tuple(d[0])] = i particles = [d", "step(d) if tuple(d[0]) in positions: delete += [i, positions[tuple(d[0])]] else: positions[tuple(d[0])] = i", "if sum([abs(e) for e in d[0]]) < m: min_n = i m =", "= sum([abs(e) for e in d[0]]) print(min_n) def part2(data): particles = [parse(d) for", "for d in data.split('\\n')] while True: positions = {} delete = [] for", "[parse(d) for d in data.split('\\n')] while True: for d in particles: step(d) m", "+= d[1][2] def part1(data): particles = [parse(d) for d in data.split('\\n')] while True:", "in data.split('\\n')] while True: for d in particles: step(d) m = sum([abs(e) for", "in enumerate(particles): if sum([abs(e) for e in d[0]]) < m: min_n = i", "sum([abs(e) for e in d[0]]) print(min_n) def part2(data): particles = [parse(d) for d", "m = sum([abs(e) for e in d[0]]) print(min_n) def part2(data): particles = [parse(d)", "= i particles = [d for i, d in enumerate(particles) if i not", "d[0][1] += d[1][1] d[0][2] += d[1][2] def part1(data): particles = [parse(d) for d", "d[1][1] += d[2][1] d[1][2] += d[2][2] d[0][0] += d[1][0] d[0][1] += d[1][1] d[0][2]", "step(d) m = sum([abs(e) for e in particles[0][0]]) min_n = 0 for i,", "+= d[1][1] d[0][2] += d[1][2] def part1(data): particles = [parse(d) for d in", "for e in particles[0][0]]) min_n = 0 for i, d in enumerate(particles): if", "for i, d in enumerate(particles): step(d) if tuple(d[0]) in positions: delete += [i,", "in data.split('\\n')] while True: positions = {} delete = [] for i, d" ]
[ "database_host_and_port if __name__ == \"__main__\": client = pymongo.MongoClient(database_host_and_port) db = client[\"jidata\"] db[\"issues\"].create_index('projectname') db[\"users\"].create_index('projectname')", "from properties import database_host_and_port if __name__ == \"__main__\": client = pymongo.MongoClient(database_host_and_port) db =", "pymongo from properties import database_host_and_port if __name__ == \"__main__\": client = pymongo.MongoClient(database_host_and_port) db", "== \"__main__\": client = pymongo.MongoClient(database_host_and_port) db = client[\"jidata\"] db[\"issues\"].create_index('projectname') db[\"users\"].create_index('projectname') db[\"comments\"].create_index('issue') db[\"comments\"].create_index('projectname') db[\"events\"].create_index('issue')", "<reponame>AuthEceSoftEng/jira-apache-downloader<gh_stars>0 import pymongo from properties import database_host_and_port if __name__ == \"__main__\": client =", "properties import database_host_and_port if __name__ == \"__main__\": client = pymongo.MongoClient(database_host_and_port) db = client[\"jidata\"]", "client = pymongo.MongoClient(database_host_and_port) db = client[\"jidata\"] db[\"issues\"].create_index('projectname') db[\"users\"].create_index('projectname') db[\"comments\"].create_index('issue') db[\"comments\"].create_index('projectname') db[\"events\"].create_index('issue') db[\"events\"].create_index('projectname') db[\"worklogs\"].create_index('issue')", "__name__ == \"__main__\": client = pymongo.MongoClient(database_host_and_port) db = client[\"jidata\"] db[\"issues\"].create_index('projectname') db[\"users\"].create_index('projectname') db[\"comments\"].create_index('issue') db[\"comments\"].create_index('projectname')", "= pymongo.MongoClient(database_host_and_port) db = client[\"jidata\"] db[\"issues\"].create_index('projectname') db[\"users\"].create_index('projectname') db[\"comments\"].create_index('issue') db[\"comments\"].create_index('projectname') db[\"events\"].create_index('issue') db[\"events\"].create_index('projectname') db[\"worklogs\"].create_index('issue') db[\"worklogs\"].create_index('projectname')", "if __name__ == \"__main__\": client = pymongo.MongoClient(database_host_and_port) db = client[\"jidata\"] db[\"issues\"].create_index('projectname') db[\"users\"].create_index('projectname') db[\"comments\"].create_index('issue')", "\"__main__\": client = pymongo.MongoClient(database_host_and_port) db = client[\"jidata\"] db[\"issues\"].create_index('projectname') db[\"users\"].create_index('projectname') db[\"comments\"].create_index('issue') db[\"comments\"].create_index('projectname') db[\"events\"].create_index('issue') db[\"events\"].create_index('projectname')", "import pymongo from properties import database_host_and_port if __name__ == \"__main__\": client = pymongo.MongoClient(database_host_and_port)", "import database_host_and_port if __name__ == \"__main__\": client = pymongo.MongoClient(database_host_and_port) db = client[\"jidata\"] db[\"issues\"].create_index('projectname')" ]
[ "to directory of fast5 files\") parser.add_argument('--analysis', required=False, action='store_true', dest='analysis', default=False, help=\"Remove all analysis", "fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh = fh.repack() fh.close() return counter def remove_basecall_analyses(fast5):", "signalalign analyses from a fast5 file\"\"\" assert os.path.exists(fast5), \"Fast5 path does not exist\".format(fast5)", "return counter def remove_basecall_analyses(fast5): \"\"\"Remove basecall analyses from a fast5 file\"\"\" assert os.path.exists(fast5),", "[x for x in list(fh[\"Analyses\"].keys())]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh.delete(\"Analyses\") fh =", "1 fh.delete(\"Analyses\") fh = fh.repack() fh.close() return counter def main(): args = parse_args()", "else: if args.signalalign or not args.basecall: function_to_run = remove_sa_analyses elif args.basecall: function_to_run =", "os from py3helpers.utils import list_dir from py3helpers.multiprocess import * from argparse import ArgumentParser", "x]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh = fh.repack() fh.close() return counter def", "all signalalign files\") parser.add_argument('--threads', required=False, action='store', dest='threads', default=1, type=int, help=\"number of threads to", "Fast5(fast5, read='r+') counter = 0 for analyses in [x for x in list(fh[\"Analyses\"].keys())", "action='store_true', dest='analysis', default=False, help=\"Remove all analysis files\") parser.add_argument('--basecall', required=False, action='store_true', dest='basecall', default=False, help=\"Remove", "analyses in [x for x in list(fh[\"Analyses\"].keys()) if \"SignalAlign\" in x]: fh.delete(os.path.join(\"Analyses\", analyses))", "if args.signalalign or not args.basecall: function_to_run = remove_sa_analyses elif args.basecall: function_to_run = remove_basecall_analyses", "list(fh[\"Analyses\"].keys()) if \"Basecall\" in x]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh = fh.repack()", "BasicService(function_to_run, service_name=\"forward_multiprocess_aggregate_all_variantcalls\") files = list_dir(args.dir, ext=\"fast5\") total, failure, messages, output = run_service(service.run, files,", "+= 1 fh.delete(\"Analyses\") fh = fh.repack() fh.close() return counter def main(): args =", "parse_args() function_to_run = None if args.analysis: function_to_run = remove_analyses else: if args.signalalign or", "main(): args = parse_args() function_to_run = None if args.analysis: function_to_run = remove_analyses else:", "basecall analyses from a fast5 file\"\"\" assert os.path.exists(fast5), \"Fast5 path does not exist\".format(fast5)", "0 for analyses in [x for x in list(fh[\"Analyses\"].keys()) if \"SignalAlign\" in x]:", "\"Must select --analysis, --signalalign or --basecall.\" service = BasicService(function_to_run, service_name=\"forward_multiprocess_aggregate_all_variantcalls\") files = list_dir(args.dir,", "directory of fast5 files\") parser.add_argument('--analysis', required=False, action='store_true', dest='analysis', default=False, help=\"Remove all analysis files\")", "remove_basecall_analyses(fast5): \"\"\"Remove basecall analyses from a fast5 file\"\"\" assert os.path.exists(fast5), \"Fast5 path does", "parser.add_argument('--analysis', required=False, action='store_true', dest='analysis', default=False, help=\"Remove all analysis files\") parser.add_argument('--basecall', required=False, action='store_true', dest='basecall',", "fh.repack() fh.close() return counter def main(): args = parse_args() function_to_run = None if", "messages, output = run_service(service.run, files, {}, [\"fast5\"], worker_count=args.threads) print(\"Deleted {} analysis datasets deleted", "from a fast5 file\"\"\" assert os.path.exists(fast5), \"Fast5 path does not exist\".format(fast5) fh =", "counter = 0 for analyses in [x for x in list(fh[\"Analyses\"].keys()) if \"Basecall\"", "args.signalalign or not args.basecall: function_to_run = remove_sa_analyses elif args.basecall: function_to_run = remove_basecall_analyses assert", "action='store', dest='threads', default=1, type=int, help=\"number of threads to run\") args = parser.parse_args() return", "list(fh[\"Analyses\"].keys()) if \"SignalAlign\" in x]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh = fh.repack()", "# Author: <NAME> # History: 02/06/19 Created ######################################################################## import os from py3helpers.utils import", "def remove_basecall_analyses(fast5): \"\"\"Remove basecall analyses from a fast5 file\"\"\" assert os.path.exists(fast5), \"Fast5 path", "\"Fast5 path does not exist\".format(fast5) fh = Fast5(fast5, read='r+') counter = 0 for", "help=\"Path to directory of fast5 files\") parser.add_argument('--analysis', required=False, action='store_true', dest='analysis', default=False, help=\"Remove all", "= remove_sa_analyses elif args.basecall: function_to_run = remove_basecall_analyses assert function_to_run is not None, \"Must", "args.basecall: function_to_run = remove_basecall_analyses assert function_to_run is not None, \"Must select --analysis, --signalalign", "counter def main(): args = parse_args() function_to_run = None if args.analysis: function_to_run =", "\"SignalAlign\" in x]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh = fh.repack() fh.close() return", "to run\") args = parser.parse_args() return args def remove_sa_analyses(fast5): \"\"\"Remove signalalign analyses from", "python \"\"\"Remove embedded signalalign analyses from files\"\"\" ######################################################################## # File: remove_sa_analyses.py # executable:", "\"\"\"Remove signalalign analyses from a fast5 file\"\"\" assert os.path.exists(fast5), \"Fast5 path does not", "in [x for x in list(fh[\"Analyses\"].keys()) if \"SignalAlign\" in x]: fh.delete(os.path.join(\"Analyses\", analyses)) counter", "args = parser.parse_args() return args def remove_sa_analyses(fast5): \"\"\"Remove signalalign analyses from a fast5", "in [x for x in list(fh[\"Analyses\"].keys()) if \"Basecall\" in x]: fh.delete(os.path.join(\"Analyses\", analyses)) counter", "parser.add_argument('--basecall', required=False, action='store_true', dest='basecall', default=False, help=\"Remove all basecall files\") parser.add_argument('--signalalign', required=False, action='store_true', dest='signalalign',", "assert function_to_run is not None, \"Must select --analysis, --signalalign or --basecall.\" service =", "default=False, help=\"Remove all analysis files\") parser.add_argument('--basecall', required=False, action='store_true', dest='basecall', default=False, help=\"Remove all basecall", "counter = 0 for analyses in [x for x in list(fh[\"Analyses\"].keys())]: fh.delete(os.path.join(\"Analyses\", analyses))", "= 0 for analyses in [x for x in list(fh[\"Analyses\"].keys())]: fh.delete(os.path.join(\"Analyses\", analyses)) counter", "= BasicService(function_to_run, service_name=\"forward_multiprocess_aggregate_all_variantcalls\") files = list_dir(args.dir, ext=\"fast5\") total, failure, messages, output = run_service(service.run,", "for analyses in [x for x in list(fh[\"Analyses\"].keys())]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1", "does not exist\".format(fast5) fh = Fast5(fast5, read='r+') counter = 0 for analyses in", "files\") parser.add_argument('--basecall', required=False, action='store_true', dest='basecall', default=False, help=\"Remove all basecall files\") parser.add_argument('--signalalign', required=False, action='store_true',", "not None, \"Must select --analysis, --signalalign or --basecall.\" service = BasicService(function_to_run, service_name=\"forward_multiprocess_aggregate_all_variantcalls\") files", "service = BasicService(function_to_run, service_name=\"forward_multiprocess_aggregate_all_variantcalls\") files = list_dir(args.dir, ext=\"fast5\") total, failure, messages, output =", "help=\"Remove all signalalign files\") parser.add_argument('--threads', required=False, action='store', dest='threads', default=1, type=int, help=\"number of threads", "None if args.analysis: function_to_run = remove_analyses else: if args.signalalign or not args.basecall: function_to_run", "in list(fh[\"Analyses\"].keys()) if \"Basecall\" in x]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh =", "dest='dir', type=str, default=None, help=\"Path to directory of fast5 files\") parser.add_argument('--analysis', required=False, action='store_true', dest='analysis',", "from py3helpers.utils import list_dir from py3helpers.multiprocess import * from argparse import ArgumentParser from", "def parse_args(): parser = ArgumentParser(description=__doc__) # required arguments parser.add_argument('--directory', '-d', required=True, action='store', dest='dir',", "counter += 1 fh.delete(\"Analyses\") fh = fh.repack() fh.close() return counter def main(): args", "\"Basecall\" in x]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh = fh.repack() fh.close() return", "file\"\"\" assert os.path.exists(fast5), \"Fast5 path does not exist\".format(fast5) fh = Fast5(fast5, read='r+') counter", "signalalign analyses from files\"\"\" ######################################################################## # File: remove_sa_analyses.py # executable: remove_sa_analyses.py # #", "as np def parse_args(): parser = ArgumentParser(description=__doc__) # required arguments parser.add_argument('--directory', '-d', required=True,", "for x in list(fh[\"Analyses\"].keys()) if \"Basecall\" in x]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1", "analyses in [x for x in list(fh[\"Analyses\"].keys())]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh.delete(\"Analyses\")", "path does not exist\".format(fast5) fh = Fast5(fast5, read='r+') counter = 0 for analyses", "return counter def remove_analyses(fast5): \"\"\"Remove analyses from a fast5 file\"\"\" assert os.path.exists(fast5), \"Fast5", "fh.close() return counter def main(): args = parse_args() function_to_run = None if args.analysis:", "parser.add_argument('--threads', required=False, action='store', dest='threads', default=1, type=int, help=\"number of threads to run\") args =", "dest='threads', default=1, type=int, help=\"number of threads to run\") args = parser.parse_args() return args", "args.analysis: function_to_run = remove_analyses else: if args.signalalign or not args.basecall: function_to_run = remove_sa_analyses", "import list_dir from py3helpers.multiprocess import * from argparse import ArgumentParser from signalalign.fast5 import", "threads to run\") args = parser.parse_args() return args def remove_sa_analyses(fast5): \"\"\"Remove signalalign analyses", "counter = 0 for analyses in [x for x in list(fh[\"Analyses\"].keys()) if \"SignalAlign\"", "print(\"Deleted {} analysis datasets deleted from {} files\".format(np.asarray(output).sum(), len(files))) if __name__ == '__main__':", "of threads to run\") args = parser.parse_args() return args def remove_sa_analyses(fast5): \"\"\"Remove signalalign", "analyses in [x for x in list(fh[\"Analyses\"].keys()) if \"Basecall\" in x]: fh.delete(os.path.join(\"Analyses\", analyses))", "# executable: remove_sa_analyses.py # # Author: <NAME> # History: 02/06/19 Created ######################################################################## import", "remove_analyses else: if args.signalalign or not args.basecall: function_to_run = remove_sa_analyses elif args.basecall: function_to_run", "= 0 for analyses in [x for x in list(fh[\"Analyses\"].keys()) if \"Basecall\" in", "total, failure, messages, output = run_service(service.run, files, {}, [\"fast5\"], worker_count=args.threads) print(\"Deleted {} analysis", "not args.basecall: function_to_run = remove_sa_analyses elif args.basecall: function_to_run = remove_basecall_analyses assert function_to_run is", "action='store_true', dest='signalalign', default=False, help=\"Remove all signalalign files\") parser.add_argument('--threads', required=False, action='store', dest='threads', default=1, type=int,", "type=str, default=None, help=\"Path to directory of fast5 files\") parser.add_argument('--analysis', required=False, action='store_true', dest='analysis', default=False,", "counter def remove_analyses(fast5): \"\"\"Remove analyses from a fast5 file\"\"\" assert os.path.exists(fast5), \"Fast5 path", "ArgumentParser from signalalign.fast5 import Fast5 import numpy as np def parse_args(): parser =", "fh.repack() fh.close() return counter def remove_analyses(fast5): \"\"\"Remove analyses from a fast5 file\"\"\" assert", "action='store', dest='dir', type=str, default=None, help=\"Path to directory of fast5 files\") parser.add_argument('--analysis', required=False, action='store_true',", "required=False, action='store', dest='threads', default=1, type=int, help=\"number of threads to run\") args = parser.parse_args()", "if \"Basecall\" in x]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh = fh.repack() fh.close()", "for x in list(fh[\"Analyses\"].keys())]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh.delete(\"Analyses\") fh = fh.repack()", "run_service(service.run, files, {}, [\"fast5\"], worker_count=args.threads) print(\"Deleted {} analysis datasets deleted from {} files\".format(np.asarray(output).sum(),", "help=\"Remove all basecall files\") parser.add_argument('--signalalign', required=False, action='store_true', dest='signalalign', default=False, help=\"Remove all signalalign files\")", "fh = Fast5(fast5, read='r+') counter = 0 for analyses in [x for x", "function_to_run = remove_sa_analyses elif args.basecall: function_to_run = remove_basecall_analyses assert function_to_run is not None,", "import ArgumentParser from signalalign.fast5 import Fast5 import numpy as np def parse_args(): parser", "os.path.exists(fast5), \"Fast5 path does not exist\".format(fast5) fh = Fast5(fast5, read='r+') counter = 0", "counter def remove_basecall_analyses(fast5): \"\"\"Remove basecall analyses from a fast5 file\"\"\" assert os.path.exists(fast5), \"Fast5", "\"\"\"Remove basecall analyses from a fast5 file\"\"\" assert os.path.exists(fast5), \"Fast5 path does not", "counter += 1 fh = fh.repack() fh.close() return counter def remove_analyses(fast5): \"\"\"Remove analyses", "#!/usr/bin/env python \"\"\"Remove embedded signalalign analyses from files\"\"\" ######################################################################## # File: remove_sa_analyses.py #", "[x for x in list(fh[\"Analyses\"].keys()) if \"SignalAlign\" in x]: fh.delete(os.path.join(\"Analyses\", analyses)) counter +=", "list_dir(args.dir, ext=\"fast5\") total, failure, messages, output = run_service(service.run, files, {}, [\"fast5\"], worker_count=args.threads) print(\"Deleted", "for analyses in [x for x in list(fh[\"Analyses\"].keys()) if \"Basecall\" in x]: fh.delete(os.path.join(\"Analyses\",", "<NAME> # History: 02/06/19 Created ######################################################################## import os from py3helpers.utils import list_dir from", "parser = ArgumentParser(description=__doc__) # required arguments parser.add_argument('--directory', '-d', required=True, action='store', dest='dir', type=str, default=None,", "ArgumentParser(description=__doc__) # required arguments parser.add_argument('--directory', '-d', required=True, action='store', dest='dir', type=str, default=None, help=\"Path to", "def main(): args = parse_args() function_to_run = None if args.analysis: function_to_run = remove_analyses", "+= 1 fh = fh.repack() fh.close() return counter def remove_analyses(fast5): \"\"\"Remove analyses from", "= parser.parse_args() return args def remove_sa_analyses(fast5): \"\"\"Remove signalalign analyses from a fast5 file\"\"\"", "embedded signalalign analyses from files\"\"\" ######################################################################## # File: remove_sa_analyses.py # executable: remove_sa_analyses.py #", "all basecall files\") parser.add_argument('--signalalign', required=False, action='store_true', dest='signalalign', default=False, help=\"Remove all signalalign files\") parser.add_argument('--threads',", "fast5 files\") parser.add_argument('--analysis', required=False, action='store_true', dest='analysis', default=False, help=\"Remove all analysis files\") parser.add_argument('--basecall', required=False,", "default=1, type=int, help=\"number of threads to run\") args = parser.parse_args() return args def", "parser.add_argument('--directory', '-d', required=True, action='store', dest='dir', type=str, default=None, help=\"Path to directory of fast5 files\")", "elif args.basecall: function_to_run = remove_basecall_analyses assert function_to_run is not None, \"Must select --analysis,", "of fast5 files\") parser.add_argument('--analysis', required=False, action='store_true', dest='analysis', default=False, help=\"Remove all analysis files\") parser.add_argument('--basecall',", "remove_sa_analyses.py # executable: remove_sa_analyses.py # # Author: <NAME> # History: 02/06/19 Created ########################################################################", "# # Author: <NAME> # History: 02/06/19 Created ######################################################################## import os from py3helpers.utils", "= parse_args() function_to_run = None if args.analysis: function_to_run = remove_analyses else: if args.signalalign", "######################################################################## import os from py3helpers.utils import list_dir from py3helpers.multiprocess import * from argparse", "files = list_dir(args.dir, ext=\"fast5\") total, failure, messages, output = run_service(service.run, files, {}, [\"fast5\"],", "02/06/19 Created ######################################################################## import os from py3helpers.utils import list_dir from py3helpers.multiprocess import *", "= ArgumentParser(description=__doc__) # required arguments parser.add_argument('--directory', '-d', required=True, action='store', dest='dir', type=str, default=None, help=\"Path", "fh.close() return counter def remove_analyses(fast5): \"\"\"Remove analyses from a fast5 file\"\"\" assert os.path.exists(fast5),", "= remove_analyses else: if args.signalalign or not args.basecall: function_to_run = remove_sa_analyses elif args.basecall:", "None, \"Must select --analysis, --signalalign or --basecall.\" service = BasicService(function_to_run, service_name=\"forward_multiprocess_aggregate_all_variantcalls\") files =", "not exist\".format(fast5) fh = Fast5(fast5, read='r+') counter = 0 for analyses in [x", "required=True, action='store', dest='dir', type=str, default=None, help=\"Path to directory of fast5 files\") parser.add_argument('--analysis', required=False,", "fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh = fh.repack() fh.close() return counter def remove_analyses(fast5):", "required arguments parser.add_argument('--directory', '-d', required=True, action='store', dest='dir', type=str, default=None, help=\"Path to directory of", "1 fh = fh.repack() fh.close() return counter def remove_analyses(fast5): \"\"\"Remove analyses from a", "is not None, \"Must select --analysis, --signalalign or --basecall.\" service = BasicService(function_to_run, service_name=\"forward_multiprocess_aggregate_all_variantcalls\")", "= list_dir(args.dir, ext=\"fast5\") total, failure, messages, output = run_service(service.run, files, {}, [\"fast5\"], worker_count=args.threads)", "analyses)) counter += 1 fh = fh.repack() fh.close() return counter def remove_basecall_analyses(fast5): \"\"\"Remove", "in list(fh[\"Analyses\"].keys())]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh.delete(\"Analyses\") fh = fh.repack() fh.close() return", "required=False, action='store_true', dest='signalalign', default=False, help=\"Remove all signalalign files\") parser.add_argument('--threads', required=False, action='store', dest='threads', default=1,", "from argparse import ArgumentParser from signalalign.fast5 import Fast5 import numpy as np def", "remove_sa_analyses.py # # Author: <NAME> # History: 02/06/19 Created ######################################################################## import os from", "counter += 1 fh = fh.repack() fh.close() return counter def remove_basecall_analyses(fast5): \"\"\"Remove basecall", "= run_service(service.run, files, {}, [\"fast5\"], worker_count=args.threads) print(\"Deleted {} analysis datasets deleted from {}", "help=\"Remove all analysis files\") parser.add_argument('--basecall', required=False, action='store_true', dest='basecall', default=False, help=\"Remove all basecall files\")", "analysis files\") parser.add_argument('--basecall', required=False, action='store_true', dest='basecall', default=False, help=\"Remove all basecall files\") parser.add_argument('--signalalign', required=False,", "args.basecall: function_to_run = remove_sa_analyses elif args.basecall: function_to_run = remove_basecall_analyses assert function_to_run is not", "read='r+') counter = 0 for analyses in [x for x in list(fh[\"Analyses\"].keys()) if", "failure, messages, output = run_service(service.run, files, {}, [\"fast5\"], worker_count=args.threads) print(\"Deleted {} analysis datasets", "fast5 file\"\"\" assert os.path.exists(fast5), \"Fast5 path does not exist\".format(fast5) fh = Fast5(fast5, read='r+')", "args = parse_args() function_to_run = None if args.analysis: function_to_run = remove_analyses else: if", "exist\".format(fast5) fh = Fast5(fast5, read='r+') counter = 0 for analyses in [x for", "Fast5 import numpy as np def parse_args(): parser = ArgumentParser(description=__doc__) # required arguments", "files\") parser.add_argument('--threads', required=False, action='store', dest='threads', default=1, type=int, help=\"number of threads to run\") args", "fh.close() return counter def remove_basecall_analyses(fast5): \"\"\"Remove basecall analyses from a fast5 file\"\"\" assert", "default=None, help=\"Path to directory of fast5 files\") parser.add_argument('--analysis', required=False, action='store_true', dest='analysis', default=False, help=\"Remove", "basecall files\") parser.add_argument('--signalalign', required=False, action='store_true', dest='signalalign', default=False, help=\"Remove all signalalign files\") parser.add_argument('--threads', required=False,", "# History: 02/06/19 Created ######################################################################## import os from py3helpers.utils import list_dir from py3helpers.multiprocess", "= fh.repack() fh.close() return counter def remove_basecall_analyses(fast5): \"\"\"Remove basecall analyses from a fast5", "0 for analyses in [x for x in list(fh[\"Analyses\"].keys()) if \"Basecall\" in x]:", "= remove_basecall_analyses assert function_to_run is not None, \"Must select --analysis, --signalalign or --basecall.\"", "fh = fh.repack() fh.close() return counter def main(): args = parse_args() function_to_run =", "py3helpers.utils import list_dir from py3helpers.multiprocess import * from argparse import ArgumentParser from signalalign.fast5", "[\"fast5\"], worker_count=args.threads) print(\"Deleted {} analysis datasets deleted from {} files\".format(np.asarray(output).sum(), len(files))) if __name__", "= None if args.analysis: function_to_run = remove_analyses else: if args.signalalign or not args.basecall:", "fh = fh.repack() fh.close() return counter def remove_basecall_analyses(fast5): \"\"\"Remove basecall analyses from a", "fh.delete(\"Analyses\") fh = fh.repack() fh.close() return counter def main(): args = parse_args() function_to_run", "run\") args = parser.parse_args() return args def remove_sa_analyses(fast5): \"\"\"Remove signalalign analyses from a", "{}, [\"fast5\"], worker_count=args.threads) print(\"Deleted {} analysis datasets deleted from {} files\".format(np.asarray(output).sum(), len(files))) if", "all analysis files\") parser.add_argument('--basecall', required=False, action='store_true', dest='basecall', default=False, help=\"Remove all basecall files\") parser.add_argument('--signalalign',", "files\") parser.add_argument('--analysis', required=False, action='store_true', dest='analysis', default=False, help=\"Remove all analysis files\") parser.add_argument('--basecall', required=False, action='store_true',", "fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh.delete(\"Analyses\") fh = fh.repack() fh.close() return counter def", "default=False, help=\"Remove all signalalign files\") parser.add_argument('--threads', required=False, action='store', dest='threads', default=1, type=int, help=\"number of", "default=False, help=\"Remove all basecall files\") parser.add_argument('--signalalign', required=False, action='store_true', dest='signalalign', default=False, help=\"Remove all signalalign", "argparse import ArgumentParser from signalalign.fast5 import Fast5 import numpy as np def parse_args():", "x in list(fh[\"Analyses\"].keys())]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh.delete(\"Analyses\") fh = fh.repack() fh.close()", "--basecall.\" service = BasicService(function_to_run, service_name=\"forward_multiprocess_aggregate_all_variantcalls\") files = list_dir(args.dir, ext=\"fast5\") total, failure, messages, output", "in x]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh = fh.repack() fh.close() return counter", "ext=\"fast5\") total, failure, messages, output = run_service(service.run, files, {}, [\"fast5\"], worker_count=args.threads) print(\"Deleted {}", "# File: remove_sa_analyses.py # executable: remove_sa_analyses.py # # Author: <NAME> # History: 02/06/19", "files, {}, [\"fast5\"], worker_count=args.threads) print(\"Deleted {} analysis datasets deleted from {} files\".format(np.asarray(output).sum(), len(files)))", "fh = fh.repack() fh.close() return counter def remove_analyses(fast5): \"\"\"Remove analyses from a fast5", "or --basecall.\" service = BasicService(function_to_run, service_name=\"forward_multiprocess_aggregate_all_variantcalls\") files = list_dir(args.dir, ext=\"fast5\") total, failure, messages,", "arguments parser.add_argument('--directory', '-d', required=True, action='store', dest='dir', type=str, default=None, help=\"Path to directory of fast5", "from py3helpers.multiprocess import * from argparse import ArgumentParser from signalalign.fast5 import Fast5 import", "parser.parse_args() return args def remove_sa_analyses(fast5): \"\"\"Remove signalalign analyses from a fast5 file\"\"\" assert", "'-d', required=True, action='store', dest='dir', type=str, default=None, help=\"Path to directory of fast5 files\") parser.add_argument('--analysis',", "for x in list(fh[\"Analyses\"].keys()) if \"SignalAlign\" in x]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1", "files\") parser.add_argument('--signalalign', required=False, action='store_true', dest='signalalign', default=False, help=\"Remove all signalalign files\") parser.add_argument('--threads', required=False, action='store',", "if \"SignalAlign\" in x]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh = fh.repack() fh.close()", "worker_count=args.threads) print(\"Deleted {} analysis datasets deleted from {} files\".format(np.asarray(output).sum(), len(files))) if __name__ ==", "numpy as np def parse_args(): parser = ArgumentParser(description=__doc__) # required arguments parser.add_argument('--directory', '-d',", "function_to_run = remove_basecall_analyses assert function_to_run is not None, \"Must select --analysis, --signalalign or", "* from argparse import ArgumentParser from signalalign.fast5 import Fast5 import numpy as np", "a fast5 file\"\"\" assert os.path.exists(fast5), \"Fast5 path does not exist\".format(fast5) fh = Fast5(fast5,", "= fh.repack() fh.close() return counter def remove_analyses(fast5): \"\"\"Remove analyses from a fast5 file\"\"\"", "files\"\"\" ######################################################################## # File: remove_sa_analyses.py # executable: remove_sa_analyses.py # # Author: <NAME> #", "list_dir from py3helpers.multiprocess import * from argparse import ArgumentParser from signalalign.fast5 import Fast5", "executable: remove_sa_analyses.py # # Author: <NAME> # History: 02/06/19 Created ######################################################################## import os", "signalalign.fast5 import Fast5 import numpy as np def parse_args(): parser = ArgumentParser(description=__doc__) #", "help=\"number of threads to run\") args = parser.parse_args() return args def remove_sa_analyses(fast5): \"\"\"Remove", "remove_basecall_analyses assert function_to_run is not None, \"Must select --analysis, --signalalign or --basecall.\" service", "parser.add_argument('--signalalign', required=False, action='store_true', dest='signalalign', default=False, help=\"Remove all signalalign files\") parser.add_argument('--threads', required=False, action='store', dest='threads',", "[x for x in list(fh[\"Analyses\"].keys()) if \"Basecall\" in x]: fh.delete(os.path.join(\"Analyses\", analyses)) counter +=", "History: 02/06/19 Created ######################################################################## import os from py3helpers.utils import list_dir from py3helpers.multiprocess import", "required=False, action='store_true', dest='basecall', default=False, help=\"Remove all basecall files\") parser.add_argument('--signalalign', required=False, action='store_true', dest='signalalign', default=False,", "return args def remove_sa_analyses(fast5): \"\"\"Remove signalalign analyses from a fast5 file\"\"\" assert os.path.exists(fast5),", "= Fast5(fast5, read='r+') counter = 0 for analyses in [x for x in", "# required arguments parser.add_argument('--directory', '-d', required=True, action='store', dest='dir', type=str, default=None, help=\"Path to directory", "remove_sa_analyses elif args.basecall: function_to_run = remove_basecall_analyses assert function_to_run is not None, \"Must select", "Created ######################################################################## import os from py3helpers.utils import list_dir from py3helpers.multiprocess import * from", "= fh.repack() fh.close() return counter def main(): args = parse_args() function_to_run = None", "analyses from files\"\"\" ######################################################################## # File: remove_sa_analyses.py # executable: remove_sa_analyses.py # # Author:", "= 0 for analyses in [x for x in list(fh[\"Analyses\"].keys()) if \"SignalAlign\" in", "analyses)) counter += 1 fh = fh.repack() fh.close() return counter def remove_analyses(fast5): \"\"\"Remove", "from files\"\"\" ######################################################################## # File: remove_sa_analyses.py # executable: remove_sa_analyses.py # # Author: <NAME>", "def remove_analyses(fast5): \"\"\"Remove analyses from a fast5 file\"\"\" assert os.path.exists(fast5), \"Fast5 path does", "dest='analysis', default=False, help=\"Remove all analysis files\") parser.add_argument('--basecall', required=False, action='store_true', dest='basecall', default=False, help=\"Remove all", "\"\"\"Remove embedded signalalign analyses from files\"\"\" ######################################################################## # File: remove_sa_analyses.py # executable: remove_sa_analyses.py", "py3helpers.multiprocess import * from argparse import ArgumentParser from signalalign.fast5 import Fast5 import numpy", "--analysis, --signalalign or --basecall.\" service = BasicService(function_to_run, service_name=\"forward_multiprocess_aggregate_all_variantcalls\") files = list_dir(args.dir, ext=\"fast5\") total,", "action='store_true', dest='basecall', default=False, help=\"Remove all basecall files\") parser.add_argument('--signalalign', required=False, action='store_true', dest='signalalign', default=False, help=\"Remove", "x in list(fh[\"Analyses\"].keys()) if \"SignalAlign\" in x]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh", "assert os.path.exists(fast5), \"Fast5 path does not exist\".format(fast5) fh = Fast5(fast5, read='r+') counter =", "fh.repack() fh.close() return counter def remove_basecall_analyses(fast5): \"\"\"Remove basecall analyses from a fast5 file\"\"\"", "list(fh[\"Analyses\"].keys())]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh.delete(\"Analyses\") fh = fh.repack() fh.close() return counter", "service_name=\"forward_multiprocess_aggregate_all_variantcalls\") files = list_dir(args.dir, ext=\"fast5\") total, failure, messages, output = run_service(service.run, files, {},", "signalalign files\") parser.add_argument('--threads', required=False, action='store', dest='threads', default=1, type=int, help=\"number of threads to run\")", "for analyses in [x for x in list(fh[\"Analyses\"].keys()) if \"SignalAlign\" in x]: fh.delete(os.path.join(\"Analyses\",", "args def remove_sa_analyses(fast5): \"\"\"Remove signalalign analyses from a fast5 file\"\"\" assert os.path.exists(fast5), \"Fast5", "analyses)) counter += 1 fh.delete(\"Analyses\") fh = fh.repack() fh.close() return counter def main():", "--signalalign or --basecall.\" service = BasicService(function_to_run, service_name=\"forward_multiprocess_aggregate_all_variantcalls\") files = list_dir(args.dir, ext=\"fast5\") total, failure,", "output = run_service(service.run, files, {}, [\"fast5\"], worker_count=args.threads) print(\"Deleted {} analysis datasets deleted from", "\"\"\"Remove analyses from a fast5 file\"\"\" assert os.path.exists(fast5), \"Fast5 path does not exist\".format(fast5)", "select --analysis, --signalalign or --basecall.\" service = BasicService(function_to_run, service_name=\"forward_multiprocess_aggregate_all_variantcalls\") files = list_dir(args.dir, ext=\"fast5\")", "in [x for x in list(fh[\"Analyses\"].keys())]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh.delete(\"Analyses\") fh", "dest='signalalign', default=False, help=\"Remove all signalalign files\") parser.add_argument('--threads', required=False, action='store', dest='threads', default=1, type=int, help=\"number", "######################################################################## # File: remove_sa_analyses.py # executable: remove_sa_analyses.py # # Author: <NAME> # History:", "dest='basecall', default=False, help=\"Remove all basecall files\") parser.add_argument('--signalalign', required=False, action='store_true', dest='signalalign', default=False, help=\"Remove all", "0 for analyses in [x for x in list(fh[\"Analyses\"].keys())]: fh.delete(os.path.join(\"Analyses\", analyses)) counter +=", "return counter def main(): args = parse_args() function_to_run = None if args.analysis: function_to_run", "import * from argparse import ArgumentParser from signalalign.fast5 import Fast5 import numpy as", "required=False, action='store_true', dest='analysis', default=False, help=\"Remove all analysis files\") parser.add_argument('--basecall', required=False, action='store_true', dest='basecall', default=False,", "in list(fh[\"Analyses\"].keys()) if \"SignalAlign\" in x]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh =", "+= 1 fh = fh.repack() fh.close() return counter def remove_basecall_analyses(fast5): \"\"\"Remove basecall analyses", "or not args.basecall: function_to_run = remove_sa_analyses elif args.basecall: function_to_run = remove_basecall_analyses assert function_to_run", "Fast5(fast5, read='r+') counter = 0 for analyses in [x for x in list(fh[\"Analyses\"].keys())]:", "function_to_run is not None, \"Must select --analysis, --signalalign or --basecall.\" service = BasicService(function_to_run,", "x in list(fh[\"Analyses\"].keys()) if \"Basecall\" in x]: fh.delete(os.path.join(\"Analyses\", analyses)) counter += 1 fh", "function_to_run = remove_analyses else: if args.signalalign or not args.basecall: function_to_run = remove_sa_analyses elif", "File: remove_sa_analyses.py # executable: remove_sa_analyses.py # # Author: <NAME> # History: 02/06/19 Created", "Author: <NAME> # History: 02/06/19 Created ######################################################################## import os from py3helpers.utils import list_dir", "import os from py3helpers.utils import list_dir from py3helpers.multiprocess import * from argparse import", "1 fh = fh.repack() fh.close() return counter def remove_basecall_analyses(fast5): \"\"\"Remove basecall analyses from", "read='r+') counter = 0 for analyses in [x for x in list(fh[\"Analyses\"].keys())]: fh.delete(os.path.join(\"Analyses\",", "np def parse_args(): parser = ArgumentParser(description=__doc__) # required arguments parser.add_argument('--directory', '-d', required=True, action='store',", "def remove_sa_analyses(fast5): \"\"\"Remove signalalign analyses from a fast5 file\"\"\" assert os.path.exists(fast5), \"Fast5 path", "analyses from a fast5 file\"\"\" assert os.path.exists(fast5), \"Fast5 path does not exist\".format(fast5) fh", "function_to_run = None if args.analysis: function_to_run = remove_analyses else: if args.signalalign or not", "import Fast5 import numpy as np def parse_args(): parser = ArgumentParser(description=__doc__) # required", "type=int, help=\"number of threads to run\") args = parser.parse_args() return args def remove_sa_analyses(fast5):", "remove_sa_analyses(fast5): \"\"\"Remove signalalign analyses from a fast5 file\"\"\" assert os.path.exists(fast5), \"Fast5 path does", "remove_analyses(fast5): \"\"\"Remove analyses from a fast5 file\"\"\" assert os.path.exists(fast5), \"Fast5 path does not", "if args.analysis: function_to_run = remove_analyses else: if args.signalalign or not args.basecall: function_to_run =", "{} analysis datasets deleted from {} files\".format(np.asarray(output).sum(), len(files))) if __name__ == '__main__': main()", "import numpy as np def parse_args(): parser = ArgumentParser(description=__doc__) # required arguments parser.add_argument('--directory',", "from signalalign.fast5 import Fast5 import numpy as np def parse_args(): parser = ArgumentParser(description=__doc__)", "parse_args(): parser = ArgumentParser(description=__doc__) # required arguments parser.add_argument('--directory', '-d', required=True, action='store', dest='dir', type=str," ]
[ "# return 7 # 2 list1 = [10,12,3,14,20,7,6,5] list1.sort() print(\"#2 :\",list1[-1]) # 3", "for elem in a: if elem > max0: max0 = elem return max0", "= [2,3,4,5,6,7,1,2,3] result = max_elem(list0) print(\"#1 :\",result) # return 7 # 2 list1", "= [10,12,3,14,20,7,6,5] list1.sort() print(\"#2 :\",list1[-1]) # 3 list2 = [3,5,9,7,1,5,8,8,7,5,6] max_num = max(list2)", "print(\"#2 :\",list1[-1]) # 3 list2 = [3,5,9,7,1,5,8,8,7,5,6] max_num = max(list2) print(\"#3 :\", max_num)", "7 # 2 list1 = [10,12,3,14,20,7,6,5] list1.sort() print(\"#2 :\",list1[-1]) # 3 list2 =", "[2,3,4,5,6,7,1,2,3] result = max_elem(list0) print(\"#1 :\",result) # return 7 # 2 list1 =", "elem return max0 list0 = [2,3,4,5,6,7,1,2,3] result = max_elem(list0) print(\"#1 :\",result) # return", "# 3 list2 = [3,5,9,7,1,5,8,8,7,5,6] max_num = max(list2) print(\"#3 :\", max_num) #4 from", "= max(list2) print(\"#3 :\", max_num) #4 from functools import reduce list3 = [-5,-6,-7,-99,-67,-3,-4,-9]", "if elem > max0: max0 = elem return max0 list0 = [2,3,4,5,6,7,1,2,3] result", "= [3,5,9,7,1,5,8,8,7,5,6] max_num = max(list2) print(\"#3 :\", max_num) #4 from functools import reduce", "3 list2 = [3,5,9,7,1,5,8,8,7,5,6] max_num = max(list2) print(\"#3 :\", max_num) #4 from functools", "[10,12,3,14,20,7,6,5] list1.sort() print(\"#2 :\",list1[-1]) # 3 list2 = [3,5,9,7,1,5,8,8,7,5,6] max_num = max(list2) print(\"#3", "max0: max0 = elem return max0 list0 = [2,3,4,5,6,7,1,2,3] result = max_elem(list0) print(\"#1", "print(\"#1 :\",result) # return 7 # 2 list1 = [10,12,3,14,20,7,6,5] list1.sort() print(\"#2 :\",list1[-1])", "1 def max_elem(a): max0 = a[0] for elem in a: if elem >", "> max0: max0 = elem return max0 list0 = [2,3,4,5,6,7,1,2,3] result = max_elem(list0)", ":\",list1[-1]) # 3 list2 = [3,5,9,7,1,5,8,8,7,5,6] max_num = max(list2) print(\"#3 :\", max_num) #4", "list0 = [2,3,4,5,6,7,1,2,3] result = max_elem(list0) print(\"#1 :\",result) # return 7 # 2", "list1.sort() print(\"#2 :\",list1[-1]) # 3 list2 = [3,5,9,7,1,5,8,8,7,5,6] max_num = max(list2) print(\"#3 :\",", "return 7 # 2 list1 = [10,12,3,14,20,7,6,5] list1.sort() print(\"#2 :\",list1[-1]) # 3 list2", "list2 = [3,5,9,7,1,5,8,8,7,5,6] max_num = max(list2) print(\"#3 :\", max_num) #4 from functools import", "[3,5,9,7,1,5,8,8,7,5,6] max_num = max(list2) print(\"#3 :\", max_num) #4 from functools import reduce list3", "max(list2) print(\"#3 :\", max_num) #4 from functools import reduce list3 = [-5,-6,-7,-99,-67,-3,-4,-9] print(\"#4", "elem in a: if elem > max0: max0 = elem return max0 list0", "# 2 list1 = [10,12,3,14,20,7,6,5] list1.sort() print(\"#2 :\",list1[-1]) # 3 list2 = [3,5,9,7,1,5,8,8,7,5,6]", "max_elem(list0) print(\"#1 :\",result) # return 7 # 2 list1 = [10,12,3,14,20,7,6,5] list1.sort() print(\"#2", "in a: if elem > max0: max0 = elem return max0 list0 =", "# 1 def max_elem(a): max0 = a[0] for elem in a: if elem", "elem > max0: max0 = elem return max0 list0 = [2,3,4,5,6,7,1,2,3] result =", ":\",result) # return 7 # 2 list1 = [10,12,3,14,20,7,6,5] list1.sort() print(\"#2 :\",list1[-1]) #", "return max0 list0 = [2,3,4,5,6,7,1,2,3] result = max_elem(list0) print(\"#1 :\",result) # return 7", "max_num = max(list2) print(\"#3 :\", max_num) #4 from functools import reduce list3 =", "= a[0] for elem in a: if elem > max0: max0 = elem", "def max_elem(a): max0 = a[0] for elem in a: if elem > max0:", "max0 = a[0] for elem in a: if elem > max0: max0 =", "a[0] for elem in a: if elem > max0: max0 = elem return", "max0 list0 = [2,3,4,5,6,7,1,2,3] result = max_elem(list0) print(\"#1 :\",result) # return 7 #", "= elem return max0 list0 = [2,3,4,5,6,7,1,2,3] result = max_elem(list0) print(\"#1 :\",result) #", "print(\"#3 :\", max_num) #4 from functools import reduce list3 = [-5,-6,-7,-99,-67,-3,-4,-9] print(\"#4 :\",reduce(max,", "2 list1 = [10,12,3,14,20,7,6,5] list1.sort() print(\"#2 :\",list1[-1]) # 3 list2 = [3,5,9,7,1,5,8,8,7,5,6] max_num", ":\", max_num) #4 from functools import reduce list3 = [-5,-6,-7,-99,-67,-3,-4,-9] print(\"#4 :\",reduce(max, list3))", "max_elem(a): max0 = a[0] for elem in a: if elem > max0: max0", "a: if elem > max0: max0 = elem return max0 list0 = [2,3,4,5,6,7,1,2,3]", "list1 = [10,12,3,14,20,7,6,5] list1.sort() print(\"#2 :\",list1[-1]) # 3 list2 = [3,5,9,7,1,5,8,8,7,5,6] max_num =", "= max_elem(list0) print(\"#1 :\",result) # return 7 # 2 list1 = [10,12,3,14,20,7,6,5] list1.sort()", "result = max_elem(list0) print(\"#1 :\",result) # return 7 # 2 list1 = [10,12,3,14,20,7,6,5]", "max0 = elem return max0 list0 = [2,3,4,5,6,7,1,2,3] result = max_elem(list0) print(\"#1 :\",result)" ]
[ "import wsgi # custom JSON handler JSONHandler = media.JSONHandler(dumps=orjson.dumps, loads=orjson.loads) extra_handlers = {", "custom JSON handler JSONHandler = media.JSONHandler(dumps=orjson.dumps, loads=orjson.loads) extra_handlers = { \"application/json\": JSONHandler, \"application/json;", "falcon import media from app import wsgi # custom JSON handler JSONHandler =", "JSONHandler = media.JSONHandler(dumps=orjson.dumps, loads=orjson.loads) extra_handlers = { \"application/json\": JSONHandler, \"application/json; charset=UTF-8\": JSONHandler }", "python import orjson from falcon import media from app import wsgi # custom", "media from app import wsgi # custom JSON handler JSONHandler = media.JSONHandler(dumps=orjson.dumps, loads=orjson.loads)", "wsgi # custom JSON handler JSONHandler = media.JSONHandler(dumps=orjson.dumps, loads=orjson.loads) extra_handlers = { \"application/json\":", "# custom JSON handler JSONHandler = media.JSONHandler(dumps=orjson.dumps, loads=orjson.loads) extra_handlers = { \"application/json\": JSONHandler,", "import orjson from falcon import media from app import wsgi # custom JSON", "JSON handler JSONHandler = media.JSONHandler(dumps=orjson.dumps, loads=orjson.loads) extra_handlers = { \"application/json\": JSONHandler, \"application/json; charset=UTF-8\":", "= media.JSONHandler(dumps=orjson.dumps, loads=orjson.loads) extra_handlers = { \"application/json\": JSONHandler, \"application/json; charset=UTF-8\": JSONHandler } wsgi.req_options.media_handlers.update(extra_handlers)", "import media from app import wsgi # custom JSON handler JSONHandler = media.JSONHandler(dumps=orjson.dumps,", "from falcon import media from app import wsgi # custom JSON handler JSONHandler", "#!/usr/bin/env python import orjson from falcon import media from app import wsgi #", "handler JSONHandler = media.JSONHandler(dumps=orjson.dumps, loads=orjson.loads) extra_handlers = { \"application/json\": JSONHandler, \"application/json; charset=UTF-8\": JSONHandler", "from app import wsgi # custom JSON handler JSONHandler = media.JSONHandler(dumps=orjson.dumps, loads=orjson.loads) extra_handlers", "media.JSONHandler(dumps=orjson.dumps, loads=orjson.loads) extra_handlers = { \"application/json\": JSONHandler, \"application/json; charset=UTF-8\": JSONHandler } wsgi.req_options.media_handlers.update(extra_handlers) wsgi.resp_options.media_handlers.update(extra_handlers)", "app import wsgi # custom JSON handler JSONHandler = media.JSONHandler(dumps=orjson.dumps, loads=orjson.loads) extra_handlers =", "orjson from falcon import media from app import wsgi # custom JSON handler" ]
[ "'vfj'], package_dir={\"\": folderLib}, packages=[packageName], include_package_data=True, setup_requires=pytest_runner + wheel, tests_require=[ 'pytest>=2.8', ], python_requires='>=2.7', install_requires=get_requirements('requirements.txt'),", "create the appropriate form of executable for the target platform. #entry_points={ # 'console_scripts':", "setup( name=packageName, version=get_version(), description='Low-level reader and writer for FontLab JSON (VFJ) font source", "'test'}.intersection(sys.argv) pytest_runner = ['pytest_runner'] if needs_pytest else [] needs_wheel = {'bdist_wheel'}.intersection(sys.argv) wheel =", "lines if line and not line.isspace(): requirements.add(re.sub(r'\\s+', '', line)) return sorted(requirements) needs_pytest =", "Software Development :: Libraries :: Python Modules', 'License :: OSI Approved :: BSD", "'Topic :: Multimedia :: Graphics', 'Topic :: Multimedia :: Graphics :: Graphics Conversion',", "'Topic :: Multimedia :: Graphics :: Editors :: Vector-Based', 'Topic :: Software Development", "Development :: Libraries :: Python Modules', 'License :: OSI Approved :: BSD License',", "executable for the target platform. #entry_points={ # 'console_scripts': [ # 'vfj=vfjLib:main', # ],", "# Strip comments. line = re.sub(r'^#.*|\\s#.*', '', line) # Ignore empty lines if", "entry points in preference to the # \"scripts\" keyword. Entry points provide cross-platform", "return path.join(directory, *args) def get_description(*args): readmepath = get_absolute_path('README.md') if path.exists(readmepath): long_description = open(readmepath,", "long_description_content_type='text/markdown', url='https://github.com/kateliev/vfjLib', download_url='https://github.com/kateliev/vfjLib/archive/master.zip', author='<NAME>', author_email='<EMAIL>', license='LICENSE', classifiers=[ 'Environment :: MacOS X', 'Environment ::", "the # \"scripts\" keyword. Entry points provide cross-platform support and allow # pip", "['wheel'] if needs_wheel else [] setup( name=packageName, version=get_version(), description='Low-level reader and writer for", "Graphics :: Editors :: Vector-Based', 'Topic :: Software Development :: Libraries :: Python", "requirement files.\"\"\" requirements = set() with open(get_absolute_path(*args)) as handle: for line in handle:", "3.7', ], keywords=['opentype', 'font', 'fontlab', 'vfj'], package_dir={\"\": folderLib}, packages=[packageName], include_package_data=True, setup_requires=pytest_runner + wheel,", "*args) def get_description(*args): readmepath = get_absolute_path('README.md') if path.exists(readmepath): long_description = open(readmepath, encoding='utf-8').read() else:", "(VFJ) font source files', long_description=get_description(), long_description_content_type='text/markdown', url='https://github.com/kateliev/vfjLib', download_url='https://github.com/kateliev/vfjLib/archive/master.zip', author='<NAME>', author_email='<EMAIL>', license='LICENSE', classifiers=[ 'Environment", "Microsoft :: Windows', 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers',", "else [] needs_wheel = {'bdist_wheel'}.intersection(sys.argv) wheel = ['wheel'] if needs_wheel else [] setup(", "\"\"\"Get requirements from pip requirement files.\"\"\" requirements = set() with open(get_absolute_path(*args)) as handle:", "package_dir={\"\": folderLib}, packages=[packageName], include_package_data=True, setup_requires=pytest_runner + wheel, tests_require=[ 'pytest>=2.8', ], python_requires='>=2.7', install_requires=get_requirements('requirements.txt'), #", "the appropriate form of executable for the target platform. #entry_points={ # 'console_scripts': [", "X', 'Operating System :: Microsoft :: Windows', 'Development Status :: 3 - Alpha',", "'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Topic :: Multimedia", "line = re.sub(r'^#.*|\\s#.*', '', line) # Ignore empty lines if line and not", "path.join(directory, *args) def get_description(*args): readmepath = get_absolute_path('README.md') if path.exists(readmepath): long_description = open(readmepath, encoding='utf-8').read()", "long_description = open(readmepath, encoding='utf-8').read() else: long_description = '' return long_description def get_requirements(*args): \"\"\"Get", "Modules', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python ::", "= set() with open(get_absolute_path(*args)) as handle: for line in handle: # Strip comments.", "line and not line.isspace(): requirements.add(re.sub(r'\\s+', '', line)) return sorted(requirements) needs_pytest = {'pytest', 'test'}.intersection(sys.argv)", "needs_pytest else [] needs_wheel = {'bdist_wheel'}.intersection(sys.argv) wheel = ['wheel'] if needs_wheel else []", "folderLib = 'Lib' packageName = find_packages(folderLib)[0] def get_version(*args): verpath = (folderLib, packageName, '__init__.py')", "relative pathnames into absolute pathnames.\"\"\" directory = path.dirname(path.abspath(__file__)) return path.join(directory, *args) def get_description(*args):", "long_description=get_description(), long_description_content_type='text/markdown', url='https://github.com/kateliev/vfjLib', download_url='https://github.com/kateliev/vfjLib/archive/master.zip', author='<NAME>', author_email='<EMAIL>', license='LICENSE', classifiers=[ 'Environment :: MacOS X', 'Environment", "sorted(requirements) needs_pytest = {'pytest', 'test'}.intersection(sys.argv) pytest_runner = ['pytest_runner'] if needs_pytest else [] needs_wheel", "cross-platform support and allow # pip to create the appropriate form of executable", "['pytest_runner'] if needs_pytest else [] needs_wheel = {'bdist_wheel'}.intersection(sys.argv) wheel = ['wheel'] if needs_wheel", "wheel, tests_require=[ 'pytest>=2.8', ], python_requires='>=2.7', install_requires=get_requirements('requirements.txt'), # To provide executable scripts, use entry", "python_requires='>=2.7', install_requires=get_requirements('requirements.txt'), # To provide executable scripts, use entry points in preference to", "needs_wheel = {'bdist_wheel'}.intersection(sys.argv) wheel = ['wheel'] if needs_wheel else [] setup( name=packageName, version=get_version(),", "'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows',", ":: Python Modules', 'License :: OSI Approved :: BSD License', 'Programming Language ::", "with open(get_absolute_path(*args)) as handle: for line in handle: # Strip comments. line =", "source files', long_description=get_description(), long_description_content_type='text/markdown', url='https://github.com/kateliev/vfjLib', download_url='https://github.com/kateliev/vfjLib/archive/master.zip', author='<NAME>', author_email='<EMAIL>', license='LICENSE', classifiers=[ 'Environment :: MacOS", "if needs_wheel else [] setup( name=packageName, version=get_version(), description='Low-level reader and writer for FontLab", "(folderLib, packageName, '__init__.py') verstrline = open(path.join(*verpath), \"rt\").read() VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo", ":: 3 - Alpha', 'Intended Audience :: Developers', 'Topic :: Multimedia :: Graphics',", "return sorted(requirements) needs_pytest = {'pytest', 'test'}.intersection(sys.argv) pytest_runner = ['pytest_runner'] if needs_pytest else []", "font source files', long_description=get_description(), long_description_content_type='text/markdown', url='https://github.com/kateliev/vfjLib', download_url='https://github.com/kateliev/vfjLib/archive/master.zip', author='<NAME>', author_email='<EMAIL>', license='LICENSE', classifiers=[ 'Environment ::", "from pip requirement files.\"\"\" requirements = set() with open(get_absolute_path(*args)) as handle: for line", "'Environment :: MacOS X', 'Environment :: Win32 (MS Windows)', 'Operating System :: MacOS", "else: return \"undefined\" def get_absolute_path(*args): \"\"\"Transform relative pathnames into absolute pathnames.\"\"\" directory =", "long_description def get_requirements(*args): \"\"\"Get requirements from pip requirement files.\"\"\" requirements = set() with", ":: Microsoft :: Windows', 'Development Status :: 3 - Alpha', 'Intended Audience ::", "BSD License', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python ::", "'pytest>=2.8', ], python_requires='>=2.7', install_requires=get_requirements('requirements.txt'), # To provide executable scripts, use entry points in", "if mo: return mo.group(1) else: return \"undefined\" def get_absolute_path(*args): \"\"\"Transform relative pathnames into", "\"undefined\" def get_absolute_path(*args): \"\"\"Transform relative pathnames into absolute pathnames.\"\"\" directory = path.dirname(path.abspath(__file__)) return", "import open from os import path from setuptools import find_packages, setup folderLib =", "verstrline = open(path.join(*verpath), \"rt\").read() VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VSRE, verstrline,", "re.sub(r'^#.*|\\s#.*', '', line) # Ignore empty lines if line and not line.isspace(): requirements.add(re.sub(r'\\s+',", "Strip comments. line = re.sub(r'^#.*|\\s#.*', '', line) # Ignore empty lines if line", "Editors :: Vector-Based', 'Topic :: Software Development :: Libraries :: Python Modules', 'License", "License', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.7',", "Ignore empty lines if line and not line.isspace(): requirements.add(re.sub(r'\\s+', '', line)) return sorted(requirements)", "for FontLab JSON (VFJ) font source files', long_description=get_description(), long_description_content_type='text/markdown', url='https://github.com/kateliev/vfjLib', download_url='https://github.com/kateliev/vfjLib/archive/master.zip', author='<NAME>', author_email='<EMAIL>',", "license='LICENSE', classifiers=[ 'Environment :: MacOS X', 'Environment :: Win32 (MS Windows)', 'Operating System", "# \"scripts\" keyword. Entry points provide cross-platform support and allow # pip to", "], python_requires='>=2.7', install_requires=get_requirements('requirements.txt'), # To provide executable scripts, use entry points in preference", "reader and writer for FontLab JSON (VFJ) font source files', long_description=get_description(), long_description_content_type='text/markdown', url='https://github.com/kateliev/vfjLib',", "import find_packages, setup folderLib = 'Lib' packageName = find_packages(folderLib)[0] def get_version(*args): verpath =", "return long_description def get_requirements(*args): \"\"\"Get requirements from pip requirement files.\"\"\" requirements = set()", "= re.sub(r'^#.*|\\s#.*', '', line) # Ignore empty lines if line and not line.isspace():", "if needs_pytest else [] needs_wheel = {'bdist_wheel'}.intersection(sys.argv) wheel = ['wheel'] if needs_wheel else", "provide cross-platform support and allow # pip to create the appropriate form of", "get_description(*args): readmepath = get_absolute_path('README.md') if path.exists(readmepath): long_description = open(readmepath, encoding='utf-8').read() else: long_description =", "Graphics', 'Topic :: Multimedia :: Graphics :: Graphics Conversion', 'Topic :: Multimedia ::", "'Topic :: Multimedia :: Graphics :: Graphics Conversion', 'Topic :: Multimedia :: Graphics", "to create the appropriate form of executable for the target platform. #entry_points={ #", "re.search(VSRE, verstrline, re.M) if mo: return mo.group(1) else: return \"undefined\" def get_absolute_path(*args): \"\"\"Transform", "mo.group(1) else: return \"undefined\" def get_absolute_path(*args): \"\"\"Transform relative pathnames into absolute pathnames.\"\"\" directory", "= {'bdist_wheel'}.intersection(sys.argv) wheel = ['wheel'] if needs_wheel else [] setup( name=packageName, version=get_version(), description='Low-level", "[] setup( name=packageName, version=get_version(), description='Low-level reader and writer for FontLab JSON (VFJ) font", "path from setuptools import find_packages, setup folderLib = 'Lib' packageName = find_packages(folderLib)[0] def", "files.\"\"\" requirements = set() with open(get_absolute_path(*args)) as handle: for line in handle: #", "(MS Windows)', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft", "'font', 'fontlab', 'vfj'], package_dir={\"\": folderLib}, packages=[packageName], include_package_data=True, setup_requires=pytest_runner + wheel, tests_require=[ 'pytest>=2.8', ],", "- Alpha', 'Intended Audience :: Developers', 'Topic :: Multimedia :: Graphics', 'Topic ::", ":: OSI Approved :: BSD License', 'Programming Language :: Python :: 2.7', 'Programming", "# To provide executable scripts, use entry points in preference to the #", "handle: # Strip comments. line = re.sub(r'^#.*|\\s#.*', '', line) # Ignore empty lines", "for the target platform. #entry_points={ # 'console_scripts': [ # 'vfj=vfjLib:main', # ], #},", "open(path.join(*verpath), \"rt\").read() VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VSRE, verstrline, re.M) if", ":: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Development Status", "import re import sys from codecs import open from os import path from", "<filename>setup.py from __future__ import absolute_import, division, print_function import re import sys from codecs", "'Programming Language :: Python :: 3.7', ], keywords=['opentype', 'font', 'fontlab', 'vfj'], package_dir={\"\": folderLib},", "re import sys from codecs import open from os import path from setuptools", "VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VSRE, verstrline, re.M) if mo: return", "Vector-Based', 'Topic :: Software Development :: Libraries :: Python Modules', 'License :: OSI", "folderLib}, packages=[packageName], include_package_data=True, setup_requires=pytest_runner + wheel, tests_require=[ 'pytest>=2.8', ], python_requires='>=2.7', install_requires=get_requirements('requirements.txt'), # To", "Libraries :: Python Modules', 'License :: OSI Approved :: BSD License', 'Programming Language", "# pip to create the appropriate form of executable for the target platform.", "from setuptools import find_packages, setup folderLib = 'Lib' packageName = find_packages(folderLib)[0] def get_version(*args):", "setuptools import find_packages, setup folderLib = 'Lib' packageName = find_packages(folderLib)[0] def get_version(*args): verpath", "= ['pytest_runner'] if needs_pytest else [] needs_wheel = {'bdist_wheel'}.intersection(sys.argv) wheel = ['wheel'] if", "= path.dirname(path.abspath(__file__)) return path.join(directory, *args) def get_description(*args): readmepath = get_absolute_path('README.md') if path.exists(readmepath): long_description", "def get_version(*args): verpath = (folderLib, packageName, '__init__.py') verstrline = open(path.join(*verpath), \"rt\").read() VSRE =", "open(readmepath, encoding='utf-8').read() else: long_description = '' return long_description def get_requirements(*args): \"\"\"Get requirements from", "in handle: # Strip comments. line = re.sub(r'^#.*|\\s#.*', '', line) # Ignore empty", "files', long_description=get_description(), long_description_content_type='text/markdown', url='https://github.com/kateliev/vfjLib', download_url='https://github.com/kateliev/vfjLib/archive/master.zip', author='<NAME>', author_email='<EMAIL>', license='LICENSE', classifiers=[ 'Environment :: MacOS X',", "Alpha', 'Intended Audience :: Developers', 'Topic :: Multimedia :: Graphics', 'Topic :: Multimedia", "Approved :: BSD License', 'Programming Language :: Python :: 2.7', 'Programming Language ::", "from __future__ import absolute_import, division, print_function import re import sys from codecs import", "find_packages, setup folderLib = 'Lib' packageName = find_packages(folderLib)[0] def get_version(*args): verpath = (folderLib,", "readmepath = get_absolute_path('README.md') if path.exists(readmepath): long_description = open(readmepath, encoding='utf-8').read() else: long_description = ''", "Audience :: Developers', 'Topic :: Multimedia :: Graphics', 'Topic :: Multimedia :: Graphics", "{'bdist_wheel'}.intersection(sys.argv) wheel = ['wheel'] if needs_wheel else [] setup( name=packageName, version=get_version(), description='Low-level reader", ":: Libraries :: Python Modules', 'License :: OSI Approved :: BSD License', 'Programming", ":: Developers', 'Topic :: Multimedia :: Graphics', 'Topic :: Multimedia :: Graphics ::", "keyword. Entry points provide cross-platform support and allow # pip to create the", "], keywords=['opentype', 'font', 'fontlab', 'vfj'], package_dir={\"\": folderLib}, packages=[packageName], include_package_data=True, setup_requires=pytest_runner + wheel, tests_require=[", "line in handle: # Strip comments. line = re.sub(r'^#.*|\\s#.*', '', line) # Ignore", "3 - Alpha', 'Intended Audience :: Developers', 'Topic :: Multimedia :: Graphics', 'Topic", "provide executable scripts, use entry points in preference to the # \"scripts\" keyword.", "{'pytest', 'test'}.intersection(sys.argv) pytest_runner = ['pytest_runner'] if needs_pytest else [] needs_wheel = {'bdist_wheel'}.intersection(sys.argv) wheel", "version=get_version(), description='Low-level reader and writer for FontLab JSON (VFJ) font source files', long_description=get_description(),", "needs_pytest = {'pytest', 'test'}.intersection(sys.argv) pytest_runner = ['pytest_runner'] if needs_pytest else [] needs_wheel =", "if path.exists(readmepath): long_description = open(readmepath, encoding='utf-8').read() else: long_description = '' return long_description def", "comments. line = re.sub(r'^#.*|\\s#.*', '', line) # Ignore empty lines if line and", "= 'Lib' packageName = find_packages(folderLib)[0] def get_version(*args): verpath = (folderLib, packageName, '__init__.py') verstrline", "import absolute_import, division, print_function import re import sys from codecs import open from", "Multimedia :: Graphics :: Editors :: Vector-Based', 'Topic :: Software Development :: Libraries", "Developers', 'Topic :: Multimedia :: Graphics', 'Topic :: Multimedia :: Graphics :: Graphics", ":: Graphics :: Editors :: Vector-Based', 'Topic :: Software Development :: Libraries ::", ":: MacOS X', 'Operating System :: Microsoft :: Windows', 'Development Status :: 3", "points in preference to the # \"scripts\" keyword. Entry points provide cross-platform support", "= re.search(VSRE, verstrline, re.M) if mo: return mo.group(1) else: return \"undefined\" def get_absolute_path(*args):", "pip requirement files.\"\"\" requirements = set() with open(get_absolute_path(*args)) as handle: for line in", "from os import path from setuptools import find_packages, setup folderLib = 'Lib' packageName", "import path from setuptools import find_packages, setup folderLib = 'Lib' packageName = find_packages(folderLib)[0]", "set() with open(get_absolute_path(*args)) as handle: for line in handle: # Strip comments. line", "points provide cross-platform support and allow # pip to create the appropriate form", "Graphics Conversion', 'Topic :: Multimedia :: Graphics :: Editors :: Vector-Based', 'Topic ::", "open from os import path from setuptools import find_packages, setup folderLib = 'Lib'", "else [] setup( name=packageName, version=get_version(), description='Low-level reader and writer for FontLab JSON (VFJ)", "'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 2.7',", "Conversion', 'Topic :: Multimedia :: Graphics :: Editors :: Vector-Based', 'Topic :: Software", "get_version(*args): verpath = (folderLib, packageName, '__init__.py') verstrline = open(path.join(*verpath), \"rt\").read() VSRE = r\"^__version__", ":: Editors :: Vector-Based', 'Topic :: Software Development :: Libraries :: Python Modules',", "packages=[packageName], include_package_data=True, setup_requires=pytest_runner + wheel, tests_require=[ 'pytest>=2.8', ], python_requires='>=2.7', install_requires=get_requirements('requirements.txt'), # To provide", "support and allow # pip to create the appropriate form of executable for", "os import path from setuptools import find_packages, setup folderLib = 'Lib' packageName =", "Graphics :: Graphics Conversion', 'Topic :: Multimedia :: Graphics :: Editors :: Vector-Based',", "Windows)', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft ::", "install_requires=get_requirements('requirements.txt'), # To provide executable scripts, use entry points in preference to the", "MacOS X', 'Environment :: Win32 (MS Windows)', 'Operating System :: MacOS :: MacOS", "System :: Microsoft :: Windows', 'Development Status :: 3 - Alpha', 'Intended Audience", "['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VSRE, verstrline, re.M) if mo: return mo.group(1) else: return \"undefined\"", "find_packages(folderLib)[0] def get_version(*args): verpath = (folderLib, packageName, '__init__.py') verstrline = open(path.join(*verpath), \"rt\").read() VSRE", "= open(path.join(*verpath), \"rt\").read() VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VSRE, verstrline, re.M)", "empty lines if line and not line.isspace(): requirements.add(re.sub(r'\\s+', '', line)) return sorted(requirements) needs_pytest", "line.isspace(): requirements.add(re.sub(r'\\s+', '', line)) return sorted(requirements) needs_pytest = {'pytest', 'test'}.intersection(sys.argv) pytest_runner = ['pytest_runner']", "pytest_runner = ['pytest_runner'] if needs_pytest else [] needs_wheel = {'bdist_wheel'}.intersection(sys.argv) wheel = ['wheel']", "and allow # pip to create the appropriate form of executable for the", ":: 2.7', 'Programming Language :: Python :: 3.7', ], keywords=['opentype', 'font', 'fontlab', 'vfj'],", "form of executable for the target platform. #entry_points={ # 'console_scripts': [ # 'vfj=vfjLib:main',", "in preference to the # \"scripts\" keyword. Entry points provide cross-platform support and", "2.7', 'Programming Language :: Python :: 3.7', ], keywords=['opentype', 'font', 'fontlab', 'vfj'], package_dir={\"\":", ":: Python :: 2.7', 'Programming Language :: Python :: 3.7', ], keywords=['opentype', 'font',", "def get_description(*args): readmepath = get_absolute_path('README.md') if path.exists(readmepath): long_description = open(readmepath, encoding='utf-8').read() else: long_description", "'Lib' packageName = find_packages(folderLib)[0] def get_version(*args): verpath = (folderLib, packageName, '__init__.py') verstrline =", "from codecs import open from os import path from setuptools import find_packages, setup", "name=packageName, version=get_version(), description='Low-level reader and writer for FontLab JSON (VFJ) font source files',", "path.exists(readmepath): long_description = open(readmepath, encoding='utf-8').read() else: long_description = '' return long_description def get_requirements(*args):", "JSON (VFJ) font source files', long_description=get_description(), long_description_content_type='text/markdown', url='https://github.com/kateliev/vfjLib', download_url='https://github.com/kateliev/vfjLib/archive/master.zip', author='<NAME>', author_email='<EMAIL>', license='LICENSE', classifiers=[", ":: MacOS X', 'Environment :: Win32 (MS Windows)', 'Operating System :: MacOS ::", "To provide executable scripts, use entry points in preference to the # \"scripts\"", "allow # pip to create the appropriate form of executable for the target", "'Operating System :: Microsoft :: Windows', 'Development Status :: 3 - Alpha', 'Intended", "mo: return mo.group(1) else: return \"undefined\" def get_absolute_path(*args): \"\"\"Transform relative pathnames into absolute", "requirements from pip requirement files.\"\"\" requirements = set() with open(get_absolute_path(*args)) as handle: for", "author='<NAME>', author_email='<EMAIL>', license='LICENSE', classifiers=[ 'Environment :: MacOS X', 'Environment :: Win32 (MS Windows)',", "OSI Approved :: BSD License', 'Programming Language :: Python :: 2.7', 'Programming Language", "'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.7', ],", "preference to the # \"scripts\" keyword. Entry points provide cross-platform support and allow", "line)) return sorted(requirements) needs_pytest = {'pytest', 'test'}.intersection(sys.argv) pytest_runner = ['pytest_runner'] if needs_pytest else", "absolute_import, division, print_function import re import sys from codecs import open from os", "if line and not line.isspace(): requirements.add(re.sub(r'\\s+', '', line)) return sorted(requirements) needs_pytest = {'pytest',", ":: Win32 (MS Windows)', 'Operating System :: MacOS :: MacOS X', 'Operating System", ":: Vector-Based', 'Topic :: Software Development :: Libraries :: Python Modules', 'License ::", "absolute pathnames.\"\"\" directory = path.dirname(path.abspath(__file__)) return path.join(directory, *args) def get_description(*args): readmepath = get_absolute_path('README.md')", "Entry points provide cross-platform support and allow # pip to create the appropriate", "'', line)) return sorted(requirements) needs_pytest = {'pytest', 'test'}.intersection(sys.argv) pytest_runner = ['pytest_runner'] if needs_pytest", "Windows', 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Topic ::", "get_absolute_path(*args): \"\"\"Transform relative pathnames into absolute pathnames.\"\"\" directory = path.dirname(path.abspath(__file__)) return path.join(directory, *args)", "[] needs_wheel = {'bdist_wheel'}.intersection(sys.argv) wheel = ['wheel'] if needs_wheel else [] setup( name=packageName,", "System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Development", "= r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VSRE, verstrline, re.M) if mo: return mo.group(1)", "Python :: 3.7', ], keywords=['opentype', 'font', 'fontlab', 'vfj'], package_dir={\"\": folderLib}, packages=[packageName], include_package_data=True, setup_requires=pytest_runner", "Language :: Python :: 2.7', 'Programming Language :: Python :: 3.7', ], keywords=['opentype',", "and not line.isspace(): requirements.add(re.sub(r'\\s+', '', line)) return sorted(requirements) needs_pytest = {'pytest', 'test'}.intersection(sys.argv) pytest_runner", "import sys from codecs import open from os import path from setuptools import", "return \"undefined\" def get_absolute_path(*args): \"\"\"Transform relative pathnames into absolute pathnames.\"\"\" directory = path.dirname(path.abspath(__file__))", "re.M) if mo: return mo.group(1) else: return \"undefined\" def get_absolute_path(*args): \"\"\"Transform relative pathnames", "not line.isspace(): requirements.add(re.sub(r'\\s+', '', line)) return sorted(requirements) needs_pytest = {'pytest', 'test'}.intersection(sys.argv) pytest_runner =", "print_function import re import sys from codecs import open from os import path", "as handle: for line in handle: # Strip comments. line = re.sub(r'^#.*|\\s#.*', '',", "open(get_absolute_path(*args)) as handle: for line in handle: # Strip comments. line = re.sub(r'^#.*|\\s#.*',", ":: Multimedia :: Graphics :: Editors :: Vector-Based', 'Topic :: Software Development ::", "keywords=['opentype', 'font', 'fontlab', 'vfj'], package_dir={\"\": folderLib}, packages=[packageName], include_package_data=True, setup_requires=pytest_runner + wheel, tests_require=[ 'pytest>=2.8',", ":: Python :: 3.7', ], keywords=['opentype', 'font', 'fontlab', 'vfj'], package_dir={\"\": folderLib}, packages=[packageName], include_package_data=True,", "the target platform. #entry_points={ # 'console_scripts': [ # 'vfj=vfjLib:main', # ], #}, )", "= ['wheel'] if needs_wheel else [] setup( name=packageName, version=get_version(), description='Low-level reader and writer", "handle: for line in handle: # Strip comments. line = re.sub(r'^#.*|\\s#.*', '', line)", "= open(readmepath, encoding='utf-8').read() else: long_description = '' return long_description def get_requirements(*args): \"\"\"Get requirements", ":: Graphics', 'Topic :: Multimedia :: Graphics :: Graphics Conversion', 'Topic :: Multimedia", "get_requirements(*args): \"\"\"Get requirements from pip requirement files.\"\"\" requirements = set() with open(get_absolute_path(*args)) as", "tests_require=[ 'pytest>=2.8', ], python_requires='>=2.7', install_requires=get_requirements('requirements.txt'), # To provide executable scripts, use entry points", "MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Development Status ::", "r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VSRE, verstrline, re.M) if mo: return mo.group(1) else:", "'' return long_description def get_requirements(*args): \"\"\"Get requirements from pip requirement files.\"\"\" requirements =", "= find_packages(folderLib)[0] def get_version(*args): verpath = (folderLib, packageName, '__init__.py') verstrline = open(path.join(*verpath), \"rt\").read()", "def get_requirements(*args): \"\"\"Get requirements from pip requirement files.\"\"\" requirements = set() with open(get_absolute_path(*args))", "line) # Ignore empty lines if line and not line.isspace(): requirements.add(re.sub(r'\\s+', '', line))", "division, print_function import re import sys from codecs import open from os import", "download_url='https://github.com/kateliev/vfjLib/archive/master.zip', author='<NAME>', author_email='<EMAIL>', license='LICENSE', classifiers=[ 'Environment :: MacOS X', 'Environment :: Win32 (MS", "classifiers=[ 'Environment :: MacOS X', 'Environment :: Win32 (MS Windows)', 'Operating System ::", "Multimedia :: Graphics :: Graphics Conversion', 'Topic :: Multimedia :: Graphics :: Editors", "include_package_data=True, setup_requires=pytest_runner + wheel, tests_require=[ 'pytest>=2.8', ], python_requires='>=2.7', install_requires=get_requirements('requirements.txt'), # To provide executable", ":: Windows', 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Topic", "packageName = find_packages(folderLib)[0] def get_version(*args): verpath = (folderLib, packageName, '__init__.py') verstrline = open(path.join(*verpath),", "author_email='<EMAIL>', license='LICENSE', classifiers=[ 'Environment :: MacOS X', 'Environment :: Win32 (MS Windows)', 'Operating", "MacOS X', 'Operating System :: Microsoft :: Windows', 'Development Status :: 3 -", "codecs import open from os import path from setuptools import find_packages, setup folderLib", "of executable for the target platform. #entry_points={ # 'console_scripts': [ # 'vfj=vfjLib:main', #", "writer for FontLab JSON (VFJ) font source files', long_description=get_description(), long_description_content_type='text/markdown', url='https://github.com/kateliev/vfjLib', download_url='https://github.com/kateliev/vfjLib/archive/master.zip', author='<NAME>',", "setup folderLib = 'Lib' packageName = find_packages(folderLib)[0] def get_version(*args): verpath = (folderLib, packageName,", "verpath = (folderLib, packageName, '__init__.py') verstrline = open(path.join(*verpath), \"rt\").read() VSRE = r\"^__version__ =", "sys from codecs import open from os import path from setuptools import find_packages,", "return mo.group(1) else: return \"undefined\" def get_absolute_path(*args): \"\"\"Transform relative pathnames into absolute pathnames.\"\"\"", "pathnames into absolute pathnames.\"\"\" directory = path.dirname(path.abspath(__file__)) return path.join(directory, *args) def get_description(*args): readmepath", ":: Graphics Conversion', 'Topic :: Multimedia :: Graphics :: Editors :: Vector-Based', 'Topic", "def get_absolute_path(*args): \"\"\"Transform relative pathnames into absolute pathnames.\"\"\" directory = path.dirname(path.abspath(__file__)) return path.join(directory,", "Multimedia :: Graphics', 'Topic :: Multimedia :: Graphics :: Graphics Conversion', 'Topic ::", "setup_requires=pytest_runner + wheel, tests_require=[ 'pytest>=2.8', ], python_requires='>=2.7', install_requires=get_requirements('requirements.txt'), # To provide executable scripts,", "Python :: 2.7', 'Programming Language :: Python :: 3.7', ], keywords=['opentype', 'font', 'fontlab',", "requirements = set() with open(get_absolute_path(*args)) as handle: for line in handle: # Strip", "Python Modules', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python", ":: Multimedia :: Graphics :: Graphics Conversion', 'Topic :: Multimedia :: Graphics ::", "verstrline, re.M) if mo: return mo.group(1) else: return \"undefined\" def get_absolute_path(*args): \"\"\"Transform relative", "Win32 (MS Windows)', 'Operating System :: MacOS :: MacOS X', 'Operating System ::", "+ wheel, tests_require=[ 'pytest>=2.8', ], python_requires='>=2.7', install_requires=get_requirements('requirements.txt'), # To provide executable scripts, use", "= {'pytest', 'test'}.intersection(sys.argv) pytest_runner = ['pytest_runner'] if needs_pytest else [] needs_wheel = {'bdist_wheel'}.intersection(sys.argv)", ":: Graphics :: Graphics Conversion', 'Topic :: Multimedia :: Graphics :: Editors ::", ":: BSD License', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python", "directory = path.dirname(path.abspath(__file__)) return path.join(directory, *args) def get_description(*args): readmepath = get_absolute_path('README.md') if path.exists(readmepath):", "= (folderLib, packageName, '__init__.py') verstrline = open(path.join(*verpath), \"rt\").read() VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\"", "= get_absolute_path('README.md') if path.exists(readmepath): long_description = open(readmepath, encoding='utf-8').read() else: long_description = '' return", "\"scripts\" keyword. Entry points provide cross-platform support and allow # pip to create", "pip to create the appropriate form of executable for the target platform. #entry_points={", "scripts, use entry points in preference to the # \"scripts\" keyword. Entry points", "appropriate form of executable for the target platform. #entry_points={ # 'console_scripts': [ #", "__future__ import absolute_import, division, print_function import re import sys from codecs import open", "executable scripts, use entry points in preference to the # \"scripts\" keyword. Entry", "for line in handle: # Strip comments. line = re.sub(r'^#.*|\\s#.*', '', line) #", "X', 'Environment :: Win32 (MS Windows)', 'Operating System :: MacOS :: MacOS X',", "wheel = ['wheel'] if needs_wheel else [] setup( name=packageName, version=get_version(), description='Low-level reader and", "Language :: Python :: 3.7', ], keywords=['opentype', 'font', 'fontlab', 'vfj'], package_dir={\"\": folderLib}, packages=[packageName],", "packageName, '__init__.py') verstrline = open(path.join(*verpath), \"rt\").read() VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo =", "else: long_description = '' return long_description def get_requirements(*args): \"\"\"Get requirements from pip requirement", "'Environment :: Win32 (MS Windows)', 'Operating System :: MacOS :: MacOS X', 'Operating", "pathnames.\"\"\" directory = path.dirname(path.abspath(__file__)) return path.join(directory, *args) def get_description(*args): readmepath = get_absolute_path('README.md') if", "get_absolute_path('README.md') if path.exists(readmepath): long_description = open(readmepath, encoding='utf-8').read() else: long_description = '' return long_description", "= '' return long_description def get_requirements(*args): \"\"\"Get requirements from pip requirement files.\"\"\" requirements", "'fontlab', 'vfj'], package_dir={\"\": folderLib}, packages=[packageName], include_package_data=True, setup_requires=pytest_runner + wheel, tests_require=[ 'pytest>=2.8', ], python_requires='>=2.7',", "\"rt\").read() VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VSRE, verstrline, re.M) if mo:", "mo = re.search(VSRE, verstrline, re.M) if mo: return mo.group(1) else: return \"undefined\" def", "'__init__.py') verstrline = open(path.join(*verpath), \"rt\").read() VSRE = r\"^__version__ = ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VSRE,", ":: Multimedia :: Graphics', 'Topic :: Multimedia :: Graphics :: Graphics Conversion', 'Topic", "into absolute pathnames.\"\"\" directory = path.dirname(path.abspath(__file__)) return path.join(directory, *args) def get_description(*args): readmepath =", "description='Low-level reader and writer for FontLab JSON (VFJ) font source files', long_description=get_description(), long_description_content_type='text/markdown',", "and writer for FontLab JSON (VFJ) font source files', long_description=get_description(), long_description_content_type='text/markdown', url='https://github.com/kateliev/vfjLib', download_url='https://github.com/kateliev/vfjLib/archive/master.zip',", "long_description = '' return long_description def get_requirements(*args): \"\"\"Get requirements from pip requirement files.\"\"\"", "path.dirname(path.abspath(__file__)) return path.join(directory, *args) def get_description(*args): readmepath = get_absolute_path('README.md') if path.exists(readmepath): long_description =", "url='https://github.com/kateliev/vfjLib', download_url='https://github.com/kateliev/vfjLib/archive/master.zip', author='<NAME>', author_email='<EMAIL>', license='LICENSE', classifiers=[ 'Environment :: MacOS X', 'Environment :: Win32", "'Topic :: Software Development :: Libraries :: Python Modules', 'License :: OSI Approved", "requirements.add(re.sub(r'\\s+', '', line)) return sorted(requirements) needs_pytest = {'pytest', 'test'}.intersection(sys.argv) pytest_runner = ['pytest_runner'] if", "encoding='utf-8').read() else: long_description = '' return long_description def get_requirements(*args): \"\"\"Get requirements from pip", "FontLab JSON (VFJ) font source files', long_description=get_description(), long_description_content_type='text/markdown', url='https://github.com/kateliev/vfjLib', download_url='https://github.com/kateliev/vfjLib/archive/master.zip', author='<NAME>', author_email='<EMAIL>', license='LICENSE',", "'', line) # Ignore empty lines if line and not line.isspace(): requirements.add(re.sub(r'\\s+', '',", "Status :: 3 - Alpha', 'Intended Audience :: Developers', 'Topic :: Multimedia ::", "use entry points in preference to the # \"scripts\" keyword. Entry points provide", ":: Software Development :: Libraries :: Python Modules', 'License :: OSI Approved ::", "= ['\\\"]([^'\\\"]*)['\\\"]\" mo = re.search(VSRE, verstrline, re.M) if mo: return mo.group(1) else: return", "\"\"\"Transform relative pathnames into absolute pathnames.\"\"\" directory = path.dirname(path.abspath(__file__)) return path.join(directory, *args) def", "needs_wheel else [] setup( name=packageName, version=get_version(), description='Low-level reader and writer for FontLab JSON", "to the # \"scripts\" keyword. Entry points provide cross-platform support and allow #", "# Ignore empty lines if line and not line.isspace(): requirements.add(re.sub(r'\\s+', '', line)) return", "'Intended Audience :: Developers', 'Topic :: Multimedia :: Graphics', 'Topic :: Multimedia ::", ":: 3.7', ], keywords=['opentype', 'font', 'fontlab', 'vfj'], package_dir={\"\": folderLib}, packages=[packageName], include_package_data=True, setup_requires=pytest_runner +" ]
[ "Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html Property Document: - ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace - ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname - ``p_ResourceName``:", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc:", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value\"\"\" @attr.s class ApplicationVersionSourceBundle(Property): \"\"\" AWS Object", "``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname - ``p_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename - ``p_Tier``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier - ``p_VersionLabel``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel - ``p_Tags``:", "``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3 - ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled - ``p_MaxCount``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxCountRule\" p_DeleteSourceFromS3:", "Object Type = \"AWS::ElasticBeanstalk::Environment\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname -", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCount\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount\"\"\" @attr.s class ApplicationApplicationVersionLifecycleConfig(Property): \"\"\" AWS", "typing.Union['ApplicationApplicationVersionLifecycleConfig', dict] = attr.ib( default=None, converter=ApplicationApplicationVersionLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationVersionLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLifecycleConfig\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig\"\"\" #---", "@attr.s class ConfigurationTemplateConfigurationOptionSetting(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html Property", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Name\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name\"\"\" p_Type: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace - ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname - ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value \"\"\" AWS_OBJECT_TYPE =", "Type = \"AWS::ElasticBeanstalk::Application.MaxCountRule\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html Property Document: - ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3 - ``p_Enabled``:", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name\"\"\" p_OperationsRole: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", "Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname - ``rp_SourceBundle``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-description \"\"\" AWS_OBJECT_TYPE =", "\"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" p_ServiceRole: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ServiceRole\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole\"\"\" p_VersionLifecycleConfig:", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str =", "``p_OperationsRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role - ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings - ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn - ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname - ``p_TemplateName``:", "- ``p_MaxCount``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxCountRule\" p_DeleteSourceFromS3: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)),", "rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname\"\"\" p_CNAMEPrefix: TypeHint.intrinsic_str", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description\"\"\" p_ResourceLifecycleConfig: typing.Union['ApplicationApplicationResourceLifecycleConfig', dict]", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role\"\"\" p_OptionSettings: typing.List[typing.Union['EnvironmentOptionSetting', dict]] = attr.ib( default=None, converter=EnvironmentOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(EnvironmentOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"},", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-description\"\"\"", "p_TemplateName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename\"\"\" p_Tier: typing.Union['EnvironmentTier',", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier\"\"\" p_VersionLabel: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLabel\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel\"\"\" p_Tags:", "attr.ib( default=None, converter=Tag.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"Tags\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags\"\"\" @property def rv_EndpointURL(self)", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description\"\"\" p_EnvironmentId: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentId\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid\"\"\" p_OptionSettings:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname\"\"\" p_ResourceName:", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description\"\"\"", "= \"AWS::ElasticBeanstalk::Environment\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname\"\"\"", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ServiceRole\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole\"\"\" p_VersionLifecycleConfig: typing.Union['ApplicationApplicationVersionLifecycleConfig', dict] = attr.ib( default=None,", "converter=ConfigurationTemplateConfigurationOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(ConfigurationTemplateConfigurationOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str = attr.ib( default=None,", "validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeInDays\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays\"\"\" @attr.s class ConfigurationTemplateSourceConfiguration(Property): \"\"\" AWS Object Type", "from ..core.model import ( Property, Resource, Tag, GetAtt, TypeHint, TypeCheck, ) from ..core.constant", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname - ``p_CNAMEPrefix``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description -", "p_ResourceName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str", "= attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname\"\"\" rp_TemplateName: TypeHint.intrinsic_str = attr.ib(", "attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str = attr.ib( default=None,", "\"MaxCount\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount\"\"\" @attr.s class ApplicationApplicationVersionLifecycleConfig(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\"", "default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname\"\"\" p_CNAMEPrefix: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", "Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname - ``rp_SourceBundle``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle - ``p_Description``:", "( Property, Resource, Tag, GetAtt, TypeHint, TypeCheck, ) from ..core.constant import AttrMeta #---", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname - ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.OptionSetting\" rp_Namespace: TypeHint.intrinsic_str", "attr.ib( default=None, converter=ApplicationApplicationResourceLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationResourceLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceLifecycleConfig\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig\"\"\" @attr.s class Environment(Resource): \"\"\"", "Property Document: - ``p_ServiceRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole - ``p_VersionLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" p_ServiceRole:", "- ``p_OperationsRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role - ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings - ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn - ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname -", "``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname - ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" rp_Namespace:", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentId\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid\"\"\" p_OptionSettings: typing.List[typing.Union['ConfigurationTemplateConfigurationOptionSetting', dict]] =", "= attr.ib( default=None, converter=ConfigurationTemplateConfigurationOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(ConfigurationTemplateConfigurationOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str", "AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" p_MaxAgeRule: typing.Union['ApplicationMaxAgeRule', dict] = attr.ib( default=None, converter=ApplicationMaxAgeRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxAgeRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeRule\"},", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-description \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None,", "EnvironmentTier(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment.Tier\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html Property Document: -", "``p_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description - ``p_ResourceLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application\" p_ApplicationName:", "Object Type = \"AWS::ElasticBeanstalk::ApplicationVersion\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname -", "metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3\"\"\" p_Enabled: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"},", "Application(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html Property Document: -", "Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname - ``p_CNAMEPrefix``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description - ``p_EnvironmentName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name", "rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname\"\"\" p_Description: TypeHint.intrinsic_str", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type\"\"\" p_Version: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Version\"}, ) \"\"\"Doc:", "Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname - ``rp_SourceBundle``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-description \"\"\" AWS_OBJECT_TYPE", "import ( Property, Resource, Tag, GetAtt, TypeHint, TypeCheck, ) from ..core.constant import AttrMeta", "Document: - ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name - ``p_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type - ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version \"\"\" AWS_OBJECT_TYPE =", "Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname - ``rp_TemplateName``:", "\"MaxCountRule\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule\"\"\" @attr.s class ApplicationApplicationResourceLifecycleConfig(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\"", "metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"},", "validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxAgeRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeRule\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule\"\"\" p_MaxCountRule: typing.Union['ApplicationMaxCountRule', dict] = attr.ib( default=None, converter=ApplicationMaxCountRule.from_dict,", "p_MaxAgeRule: typing.Union['ApplicationMaxAgeRule', dict] = attr.ib( default=None, converter=ApplicationMaxAgeRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxAgeRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeRule\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule\"\"\"", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description\"\"\" p_EnvironmentName: TypeHint.intrinsic_str = attr.ib(", "- ``p_MaxAgeRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule - ``p_MaxCountRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" p_MaxAgeRule: typing.Union['ApplicationMaxAgeRule', dict]", "- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description - ``p_EnvironmentName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name - ``p_OperationsRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role - ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings -", "Property Document: - ``rp_S3Bucket``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket - ``rp_S3Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" rp_S3Bucket:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description - ``p_EnvironmentId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid - ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings - ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3\"\"\" p_Enabled: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled\"\"\"", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled\"\"\" p_MaxCount: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCount\"}, ) \"\"\"Doc:", "= attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename\"\"\" @attr.s class EnvironmentTier(Property): \"\"\"", "= \"AWS::ElasticBeanstalk::Application\" p_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name\"\"\"", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name\"\"\" p_OperationsRole: TypeHint.intrinsic_str = attr.ib( default=None,", "``p_MaxAgeRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule - ``p_MaxCountRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" p_MaxAgeRule: typing.Union['ApplicationMaxAgeRule', dict] =", "Type = \"AWS::ElasticBeanstalk::Application\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html Property Document: - ``p_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name - ``p_Description``:", "p_Value: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value\"\"\" @attr.s class", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname\"\"\" p_SourceConfiguration: typing.Union['ConfigurationTemplateSourceConfiguration', dict] =", "\"EnvironmentId\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid\"\"\" p_OptionSettings: typing.List[typing.Union['ConfigurationTemplateConfigurationOptionSetting', dict]] = attr.ib( default=None, converter=ConfigurationTemplateConfigurationOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(ConfigurationTemplateConfigurationOptionSetting), iterable_validator=attr.validators.instance_of(list))),", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Bucket\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket\"\"\" rp_S3Key: TypeHint.intrinsic_str =", "\"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname\"\"\" rp_TemplateName:", "\"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname\"\"\" p_CNAMEPrefix: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"CNAMEPrefix\"}, )", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description\"\"\" p_EnvironmentName: TypeHint.intrinsic_str = attr.ib( default=None,", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename\"\"\" p_Tier: typing.Union['EnvironmentTier', dict] = attr.ib( default=None, converter=EnvironmentTier.from_dict, validator=attr.validators.optional(attr.validators.instance_of(EnvironmentTier)), metadata={AttrMeta.PROPERTY_NAME: \"Tier\"}, )", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname - ``rp_SourceBundle``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-description \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion\" rp_ApplicationName: TypeHint.intrinsic_str", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str =", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname\"\"\" p_TemplateName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, ) \"\"\"Doc:", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description\"\"\" p_EnvironmentId: TypeHint.intrinsic_str = attr.ib( default=None,", "Property Document: - ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3 - ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled - ``p_MaxAgeInDays``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays \"\"\" AWS_OBJECT_TYPE", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ServiceRole\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole\"\"\" p_VersionLifecycleConfig: typing.Union['ApplicationApplicationVersionLifecycleConfig', dict] = attr.ib( default=None, converter=ApplicationApplicationVersionLifecycleConfig.from_dict,", "= attr.ib( default=None, converter=ApplicationApplicationVersionLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationVersionLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLifecycleConfig\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig\"\"\" #--- Resource declaration", "typing.Union['ApplicationApplicationResourceLifecycleConfig', dict] = attr.ib( default=None, converter=ApplicationApplicationResourceLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationResourceLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceLifecycleConfig\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig\"\"\" @attr.s", "attr.ib( default=None, converter=ConfigurationTemplateSourceConfiguration.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ConfigurationTemplateSourceConfiguration)), metadata={AttrMeta.PROPERTY_NAME: \"SourceConfiguration\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration\"\"\" @attr.s class Application(Resource): \"\"\"", "AWS Object Type = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html Property Document: - ``p_MaxAgeRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule", "ApplicationApplicationResourceLifecycleConfig(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html Property Document: -", "\"Enabled\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled\"\"\" p_MaxAgeInDays: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeInDays\"}, )", "default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", "= \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html Property Document: - ``p_MaxAgeRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule - ``p_MaxCountRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule", "- ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn - ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname - ``p_SourceConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate\"", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname\"\"\" p_TemplateName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", "default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3\"\"\" p_Enabled: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)),", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname\"\"\" p_SourceConfiguration: typing.Union['ConfigurationTemplateSourceConfiguration', dict] = attr.ib( default=None,", "p_Value: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value\"\"\" @attr.s class", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname\"\"\" p_CNAMEPrefix: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"CNAMEPrefix\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix\"\"\" p_Description:", "GetAtt: \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#aws-properties-beanstalk-environment-return-values\"\"\" return GetAtt(resource=self, attr_name=\"EndpointURL\") @attr.s class ApplicationVersion(Resource): \"\"\" AWS Object Type", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled\"\"\" p_MaxCount: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCount\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount\"\"\" @attr.s", "= attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str = attr.ib(", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled\"\"\" p_MaxCount: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCount\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount\"\"\"", "#--- Property declaration --- @attr.s class EnvironmentOptionSetting(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment.OptionSetting\"", "validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(EnvironmentOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", "``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname - ``rp_SourceBundle``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-description \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion\" rp_ApplicationName:", "metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"},", "\"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, )", "Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname - ``p_CNAMEPrefix``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description - ``p_EnvironmentName``:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-description \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"},", "\"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.Tier\" p_Name: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Name\"}, )", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description - ``p_EnvironmentName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name - ``p_OperationsRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role - ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings - ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn", "Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html Property Document: - ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace -", "bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled\"\"\" p_MaxAgeInDays: int =", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays\"\"\" @attr.s class ConfigurationTemplateSourceConfiguration(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" Resource", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html Property Document: - ``p_ServiceRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole - ``p_VersionLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\"", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description\"\"\"", "from ..core.constant import AttrMeta #--- Property declaration --- @attr.s class EnvironmentOptionSetting(Property): \"\"\" AWS", "\"\"\" import attr import typing from ..core.model import ( Property, Resource, Tag, GetAtt,", "metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled\"\"\" p_MaxAgeInDays: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeInDays\"},", "default=None, converter=ApplicationMaxAgeRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxAgeRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeRule\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule\"\"\" p_MaxCountRule: typing.Union['ApplicationMaxCountRule', dict] = attr.ib(", "validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(ConfigurationTemplateConfigurationOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", "typing.Union['EnvironmentTier', dict] = attr.ib( default=None, converter=EnvironmentTier.from_dict, validator=attr.validators.optional(attr.validators.instance_of(EnvironmentTier)), metadata={AttrMeta.PROPERTY_NAME: \"Tier\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier\"\"\" p_VersionLabel:", "p_OptionSettings: typing.List[typing.Union['EnvironmentOptionSetting', dict]] = attr.ib( default=None, converter=EnvironmentOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(EnvironmentOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, ) \"\"\"Doc:", "p_DeleteSourceFromS3: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3\"\"\" p_Enabled: bool", "- ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" rp_Namespace: TypeHint.intrinsic_str =", "dict] = attr.ib( default=None, converter=ApplicationApplicationVersionLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationVersionLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLifecycleConfig\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig\"\"\" #--- Resource", "- ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname - ``rp_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" rp_ApplicationName: TypeHint.intrinsic_str =", "- ``rp_SourceBundle``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-description \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion\" rp_ApplicationName: TypeHint.intrinsic_str =", "``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" rp_Namespace: TypeHint.intrinsic_str = attr.ib(", "p_Enabled: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled\"\"\" p_MaxAgeInDays: int", "\"EnvironmentName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name\"\"\" p_OperationsRole: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"OperationsRole\"}, )", "AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" p_ServiceRole: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ServiceRole\"}, ) \"\"\"Doc:", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname\"\"\" p_SourceConfiguration: typing.Union['ConfigurationTemplateSourceConfiguration', dict] = attr.ib(", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str = attr.ib(", "\"Enabled\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled\"\"\" p_MaxCount: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCount\"}, )", "metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename\"\"\" p_Tier: typing.Union['EnvironmentTier', dict] = attr.ib( default=None, converter=EnvironmentTier.from_dict, validator=attr.validators.optional(attr.validators.instance_of(EnvironmentTier)),", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled - ``p_MaxCount``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxCountRule\" p_DeleteSourceFromS3: bool = attr.ib( default=None,", "= \"AWS::ElasticBeanstalk::Application\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html Property Document: - ``p_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.OptionSetting\" rp_Namespace: TypeHint.intrinsic_str = attr.ib( default=None,", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Version\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version\"\"\" @attr.s class ConfigurationTemplateConfigurationOptionSetting(Property): \"\"\" AWS", "\"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, )", "``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings - ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn - ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname - ``p_SourceConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration \"\"\" AWS_OBJECT_TYPE", "Type = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html Property Document: - ``rp_S3Bucket``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket - ``rp_S3Key``:", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLabel\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel\"\"\" p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib( default=None, converter=Tag.from_list,", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname - ``rp_SourceBundle``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-description \"\"\"", "\"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html Property Document: - ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace - ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname -", "attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname\"\"\" rp_SourceBundle: typing.Union['ApplicationVersionSourceBundle', dict] = attr.ib(", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole\"\"\" p_VersionLifecycleConfig: typing.Union['ApplicationApplicationVersionLifecycleConfig', dict] = attr.ib( default=None, converter=ApplicationApplicationVersionLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationVersionLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLifecycleConfig\"},", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description\"\"\" p_EnvironmentName: TypeHint.intrinsic_str =", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description - ``p_EnvironmentId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid -", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname\"\"\" rp_SourceBundle: typing.Union['ApplicationVersionSourceBundle', dict] = attr.ib( default=None, converter=ApplicationVersionSourceBundle.from_dict, validator=attr.validators.instance_of(ApplicationVersionSourceBundle), metadata={AttrMeta.PROPERTY_NAME: \"SourceBundle\"},", "typing.List[typing.Union['ConfigurationTemplateConfigurationOptionSetting', dict]] = attr.ib( default=None, converter=ConfigurationTemplateConfigurationOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(ConfigurationTemplateConfigurationOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings\"\"\"", "attr.ib( default=None, converter=ApplicationVersionSourceBundle.from_dict, validator=attr.validators.instance_of(ApplicationVersionSourceBundle), metadata={AttrMeta.PROPERTY_NAME: \"SourceBundle\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib(", "= \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" rp_S3Bucket: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Bucket\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket\"\"\"", "p_MaxCountRule: typing.Union['ApplicationMaxCountRule', dict] = attr.ib( default=None, converter=ApplicationMaxCountRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxCountRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCountRule\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule\"\"\"", "validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", "dict] = attr.ib( default=None, converter=ApplicationApplicationResourceLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationResourceLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceLifecycleConfig\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig\"\"\" @attr.s class", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3\"\"\" p_Enabled: bool = attr.ib( default=None,", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, ) \"\"\"Doc:", "typing from ..core.model import ( Property, Resource, Tag, GetAtt, TypeHint, TypeCheck, ) from", "attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname\"\"\" p_CNAMEPrefix: TypeHint.intrinsic_str = attr.ib( default=None,", "metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname\"\"\" rp_TemplateName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"},", "\"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" p_MaxAgeRule: typing.Union['ApplicationMaxAgeRule', dict] = attr.ib( default=None, converter=ApplicationMaxAgeRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxAgeRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeRule\"}, ) \"\"\"Doc:", "def rv_EndpointURL(self) -> GetAtt: \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#aws-properties-beanstalk-environment-return-values\"\"\" return GetAtt(resource=self, attr_name=\"EndpointURL\") @attr.s class ApplicationVersion(Resource): \"\"\"", "@attr.s class ConfigurationTemplate(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html Property", "- ``rp_S3Bucket``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket - ``rp_S3Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" rp_S3Bucket: TypeHint.intrinsic_str =", "p_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name\"\"\" p_Description: TypeHint.intrinsic_str", "typing.Union['ApplicationVersionSourceBundle', dict] = attr.ib( default=None, converter=ApplicationVersionSourceBundle.from_dict, validator=attr.validators.instance_of(ApplicationVersionSourceBundle), metadata={AttrMeta.PROPERTY_NAME: \"SourceBundle\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle\"\"\" p_Description:", "- ``p_VersionLabel``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel - ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment\" rp_ApplicationName: TypeHint.intrinsic_str =", "Property Document: - ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name - ``p_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type - ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version \"\"\" AWS_OBJECT_TYPE", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc:", "module \"\"\" import attr import typing from ..core.model import ( Property, Resource, Tag,", "\"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html Property Document: - ``rp_Namespace``:", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket\"\"\" rp_S3Key: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Key\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key\"\"\"", "validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value\"\"\" @attr.s class ApplicationMaxCountRule(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.MaxCountRule\" Resource Document:", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ServiceRole\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole\"\"\" p_VersionLifecycleConfig: typing.Union['ApplicationApplicationVersionLifecycleConfig', dict] = attr.ib(", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount\"\"\" @attr.s class ApplicationApplicationVersionLifecycleConfig(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig\"\"\" @attr.s class Environment(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment\" Resource", "dict]] = attr.ib( default=None, converter=Tag.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"Tags\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags\"\"\" @property", "@attr.s class EnvironmentOptionSetting(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment.OptionSetting\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html Property", "validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"Tags\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags\"\"\" @property def rv_EndpointURL(self) -> GetAtt: \"\"\"Doc:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name\"\"\" p_OperationsRole: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"OperationsRole\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role\"\"\" p_OptionSettings:", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str =", "\"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html Property Document: - ``p_ServiceRole``:", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ServiceRole\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole\"\"\" p_VersionLifecycleConfig: typing.Union['ApplicationApplicationVersionLifecycleConfig', dict]", "p_PlatformArn: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str", "``p_EnvironmentName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name - ``p_OperationsRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role - ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings - ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn - ``p_SolutionStackName``:", "metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description\"\"\" p_EnvironmentName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentName\"},", "Type = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html Property Document: - ``p_MaxAgeRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule - ``p_MaxCountRule``:", "= \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html Property Document: - ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace - ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key\"\"\" @attr.s class ApplicationMaxAgeRule(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" Resource", "ConfigurationTemplateSourceConfiguration(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html Property Document: -", "class EnvironmentTier(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment.Tier\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html Property Document:", "metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled\"\"\" p_MaxCount: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCount\"},", "\"OptionSettings\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, )", "p_Tier: typing.Union['EnvironmentTier', dict] = attr.ib( default=None, converter=EnvironmentTier.from_dict, validator=attr.validators.optional(attr.validators.instance_of(EnvironmentTier)), metadata={AttrMeta.PROPERTY_NAME: \"Tier\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier\"\"\"", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule - ``p_MaxCountRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" p_MaxAgeRule: typing.Union['ApplicationMaxAgeRule', dict] = attr.ib(", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3\"\"\" p_Enabled: bool = attr.ib(", "``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description - ``p_EnvironmentName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name - ``p_OperationsRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role - ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings - ``p_PlatformArn``:", "Object Type = \"AWS::ElasticBeanstalk::Application\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html Property Document: - ``p_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name -", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Name\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name\"\"\" p_Type: TypeHint.intrinsic_str = attr.ib( default=None,", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value\"\"\"", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str =", "validator=attr.validators.optional(attr.validators.instance_of(ConfigurationTemplateSourceConfiguration)), metadata={AttrMeta.PROPERTY_NAME: \"SourceConfiguration\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration\"\"\" @attr.s class Application(Resource): \"\"\" AWS Object Type", "@attr.s class ConfigurationTemplateSourceConfiguration(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html Property", "- ``rp_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),", "``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn - ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname - ``p_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename - ``p_Tier``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier - ``p_VersionLabel``:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxCountRule\" p_DeleteSourceFromS3: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"},", "attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str = attr.ib( default=None,", "\"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html Property Document: - ``p_MaxAgeRule``:", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description\"\"\" p_ResourceLifecycleConfig: typing.Union['ApplicationApplicationResourceLifecycleConfig', dict] =", "``p_Tier``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier - ``p_VersionLabel``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel - ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment\" rp_ApplicationName:", "``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-description \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME:", "default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays\"\"\" @attr.s class ConfigurationTemplateSourceConfiguration(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" Resource Document:", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name\"\"\" p_Type: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Type\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type\"\"\"", "= \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname - ``rp_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename\"\"\" p_Tier: typing.Union['EnvironmentTier', dict] = attr.ib( default=None,", "validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Key\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key\"\"\" @attr.s class ApplicationMaxAgeRule(Property): \"\"\" AWS Object Type", "Type = \"AWS::ElasticBeanstalk::Environment.Tier\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html Property Document: - ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name - ``p_Type``:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name - ``p_OperationsRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role - ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings - ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn - ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname", "dict] = attr.ib( default=None, converter=ApplicationMaxAgeRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxAgeRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeRule\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule\"\"\" p_MaxCountRule: typing.Union['ApplicationMaxCountRule',", "GetAtt(resource=self, attr_name=\"EndpointURL\") @attr.s class ApplicationVersion(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ApplicationVersion\" Resource Document:", "= attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str = attr.ib(", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename\"\"\" p_Tier: typing.Union['EnvironmentTier', dict]", "metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"},", "default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled\"\"\" p_MaxAgeInDays: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)),", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, ) \"\"\"Doc:", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname\"\"\" rp_TemplateName: TypeHint.intrinsic_str =", "``rp_S3Bucket``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket - ``rp_S3Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" rp_S3Bucket: TypeHint.intrinsic_str = attr.ib(", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename\"\"\" p_Value:", "``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace - ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname - ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value \"\"\" AWS_OBJECT_TYPE", "metadata={AttrMeta.PROPERTY_NAME: \"S3Bucket\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket\"\"\" rp_S3Key: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Key\"},", "Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname - ``rp_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" rp_ApplicationName:", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount\"\"\" @attr.s class ApplicationApplicationVersionLifecycleConfig(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" Resource", "dict] = attr.ib( default=None, converter=ApplicationVersionSourceBundle.from_dict, validator=attr.validators.instance_of(ApplicationVersionSourceBundle), metadata={AttrMeta.PROPERTY_NAME: \"SourceBundle\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle\"\"\" p_Description: TypeHint.intrinsic_str", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value\"\"\" @attr.s", "utf-8 -*- \"\"\" This module \"\"\" import attr import typing from ..core.model import", "Resource declaration --- @attr.s class ConfigurationTemplate(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate\" Resource", "\"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" rp_Namespace: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace\"\"\" rp_OptionName:", "Object Type = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html Property Document: - ``p_MaxAgeRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule -", "\"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html Property Document: - ``p_MaxAgeRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule - ``p_MaxCountRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule \"\"\"", "- ``p_VersionLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" p_ServiceRole: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", "Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description - ``p_EnvironmentId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid - ``p_OptionSettings``:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version\"\"\" @attr.s class ConfigurationTemplateConfigurationOptionSetting(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html", "metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname\"\"\" p_TemplateName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"},", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str = attr.ib( default=None,", "class ApplicationApplicationResourceLifecycleConfig(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html Property Document:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket\"\"\" rp_S3Key: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Key\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key\"\"\" @attr.s", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname\"\"\" p_SourceConfiguration:", "Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html Property Document: - ``rp_S3Bucket``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket - ``rp_S3Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key \"\"\" AWS_OBJECT_TYPE", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount\"\"\" @attr.s class ApplicationApplicationVersionLifecycleConfig(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" Resource Document:", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description\"\"\" p_EnvironmentId: TypeHint.intrinsic_str =", "``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.OptionSetting\" rp_Namespace: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME:", "Document: - ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace - ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname - ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value", "rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname\"\"\" rp_SourceBundle: typing.Union['ApplicationVersionSourceBundle',", "- ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname - ``rp_SourceBundle``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-description \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion\"", "@attr.s class ApplicationMaxAgeRule(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html Property", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3 - ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled - ``p_MaxCount``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxCountRule\" p_DeleteSourceFromS3: bool", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeInDays\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays\"\"\" @attr.s class ConfigurationTemplateSourceConfiguration(Property): \"\"\" AWS", "default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3\"\"\" p_Enabled: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)),", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description\"\"\" p_EnvironmentId: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentId\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid\"\"\"", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid\"\"\" p_OptionSettings: typing.List[typing.Union['ConfigurationTemplateConfigurationOptionSetting', dict]] = attr.ib( default=None, converter=ConfigurationTemplateConfigurationOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(ConfigurationTemplateConfigurationOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME:", "``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings - ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn - ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname - ``p_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename - ``p_Tier``:", "rp_S3Key: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Key\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key\"\"\" @attr.s class", "validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCount\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount\"\"\" @attr.s class ApplicationApplicationVersionLifecycleConfig(Property): \"\"\" AWS Object Type", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule\"\"\" @attr.s class ApplicationApplicationResourceLifecycleConfig(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" Resource", "Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html Property Document: - ``p_ServiceRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole - ``p_VersionLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig \"\"\" AWS_OBJECT_TYPE", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname - ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" rp_Namespace: TypeHint.intrinsic_str", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc:", "validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled\"\"\" p_MaxCount: int = attr.ib(", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname\"\"\" p_CNAMEPrefix: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"CNAMEPrefix\"}, ) \"\"\"Doc:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid - ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings - ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn - ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname - ``p_SourceConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration", "metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeInDays\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays\"\"\" @attr.s class ConfigurationTemplateSourceConfiguration(Property): \"\"\" AWS Object Type =", "class EnvironmentOptionSetting(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment.OptionSetting\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html Property Document:", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLabel\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel\"\"\" p_Tags: typing.List[typing.Union[Tag, dict]]", "@attr.s class Application(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html Property", "Object Type = \"AWS::ElasticBeanstalk::Environment.OptionSetting\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html Property Document: - ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace -", "Resource, Tag, GetAtt, TypeHint, TypeCheck, ) from ..core.constant import AttrMeta #--- Property declaration", "``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn - ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname - ``p_SourceConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate\" rp_ApplicationName:", "metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentId\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid\"\"\" p_OptionSettings: typing.List[typing.Union['ConfigurationTemplateConfigurationOptionSetting', dict]] = attr.ib( default=None, converter=ConfigurationTemplateConfigurationOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(ConfigurationTemplateConfigurationOptionSetting),", "default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCount\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount\"\"\" @attr.s class ApplicationApplicationVersionLifecycleConfig(Property): \"\"\" AWS Object", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname\"\"\" rp_SourceBundle: typing.Union['ApplicationVersionSourceBundle', dict] = attr.ib( default=None, converter=ApplicationVersionSourceBundle.from_dict, validator=attr.validators.instance_of(ApplicationVersionSourceBundle), metadata={AttrMeta.PROPERTY_NAME: \"SourceBundle\"}, ) \"\"\"Doc:", "``p_MaxCount``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxCountRule\" p_DeleteSourceFromS3: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig\"\"\" @attr.s class Environment(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags\"\"\" @property def rv_EndpointURL(self) -> GetAtt: \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#aws-properties-beanstalk-environment-return-values\"\"\" return GetAtt(resource=self, attr_name=\"EndpointURL\") @attr.s", "\"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, )", "Type = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html Property Document: - ``p_ServiceRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole - ``p_VersionLifecycleConfig``:", "Property Document: - ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace - ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname - ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename - ``p_Value``:", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"OperationsRole\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role\"\"\" p_OptionSettings: typing.List[typing.Union['EnvironmentOptionSetting', dict]] = attr.ib(", "typing.Union['ApplicationMaxCountRule', dict] = attr.ib( default=None, converter=ApplicationMaxCountRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxCountRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCountRule\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule\"\"\" @attr.s", "- ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname - ``p_SourceConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate\" rp_ApplicationName: TypeHint.intrinsic_str =", "\"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxCountRule\" p_DeleteSourceFromS3: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, )", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname\"\"\" rp_TemplateName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename\"\"\" @attr.s", "class ConfigurationTemplate(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html Property Document:", "- ``p_SourceConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),", "default=None, converter=ConfigurationTemplateConfigurationOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(ConfigurationTemplateConfigurationOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str = attr.ib(", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description\"\"\" p_ResourceLifecycleConfig: typing.Union['ApplicationApplicationResourceLifecycleConfig', dict] = attr.ib(", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description\"\"\" p_ResourceLifecycleConfig: typing.Union['ApplicationApplicationResourceLifecycleConfig', dict] = attr.ib( default=None, converter=ApplicationApplicationResourceLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationResourceLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceLifecycleConfig\"},", "converter=EnvironmentTier.from_dict, validator=attr.validators.optional(attr.validators.instance_of(EnvironmentTier)), metadata={AttrMeta.PROPERTY_NAME: \"Tier\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier\"\"\" p_VersionLabel: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname\"\"\" p_SourceConfiguration: typing.Union['ConfigurationTemplateSourceConfiguration', dict] = attr.ib( default=None, converter=ConfigurationTemplateSourceConfiguration.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ConfigurationTemplateSourceConfiguration)), metadata={AttrMeta.PROPERTY_NAME: \"SourceConfiguration\"},", "\"OptionName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, )", "- ``p_CNAMEPrefix``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description - ``p_EnvironmentName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name - ``p_OperationsRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role -", "converter=ApplicationVersionSourceBundle.from_dict, validator=attr.validators.instance_of(ApplicationVersionSourceBundle), metadata={AttrMeta.PROPERTY_NAME: \"SourceBundle\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig\"\"\" #--- Resource declaration --- @attr.s class ConfigurationTemplate(Resource): \"\"\" AWS Object Type", "\"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname\"\"\" rp_TemplateName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, )", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn\"\"\"", "= attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Bucket\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket\"\"\" rp_S3Key: TypeHint.intrinsic_str = attr.ib(", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"CNAMEPrefix\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib(", "AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.Tier\" p_Name: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Name\"}, ) \"\"\"Doc:", "AWS Object Type = \"AWS::ElasticBeanstalk::ApplicationVersion\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname", "p_EnvironmentName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name\"\"\" p_OperationsRole: TypeHint.intrinsic_str", "int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeInDays\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays\"\"\" @attr.s class ConfigurationTemplateSourceConfiguration(Property):", "p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib( default=None, converter=Tag.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"Tags\"}, ) \"\"\"Doc:", "validator=attr.validators.instance_of(ApplicationVersionSourceBundle), metadata={AttrMeta.PROPERTY_NAME: \"SourceBundle\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", "Type = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html Property Document: - ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3 - ``p_Enabled``:", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3\"\"\" p_Enabled: bool = attr.ib(", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled\"\"\" p_MaxAgeInDays: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeInDays\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays\"\"\"", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application\" p_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"},", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket\"\"\" rp_S3Key: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Key\"}, ) \"\"\"Doc:", "AWS Object Type = \"AWS::ElasticBeanstalk::Environment\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname", "Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname -", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname\"\"\"", "AWS Object Type = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html Property Document: - ``rp_S3Bucket``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole\"\"\" p_VersionLifecycleConfig: typing.Union['ApplicationApplicationVersionLifecycleConfig', dict] = attr.ib( default=None, converter=ApplicationApplicationVersionLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationVersionLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLifecycleConfig\"}, ) \"\"\"Doc:", "metadata={AttrMeta.PROPERTY_NAME: \"ResourceLifecycleConfig\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig\"\"\" @attr.s class Environment(Resource): \"\"\" AWS Object Type =", "#--- Resource declaration --- @attr.s class ConfigurationTemplate(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate\"", "default=None, converter=EnvironmentTier.from_dict, validator=attr.validators.optional(attr.validators.instance_of(EnvironmentTier)), metadata={AttrMeta.PROPERTY_NAME: \"Tier\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier\"\"\" p_VersionLabel: TypeHint.intrinsic_str = attr.ib( default=None,", "p_VersionLabel: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLabel\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel\"\"\" p_Tags: typing.List[typing.Union[Tag,", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname - ``rp_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\"", "p_SolutionStackName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname\"\"\" p_TemplateName: TypeHint.intrinsic_str", "attr.ib( default=None, converter=ApplicationMaxCountRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxCountRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCountRule\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule\"\"\" @attr.s class ApplicationApplicationResourceLifecycleConfig(Property): \"\"\"", "\"AWS::ElasticBeanstalk::ConfigurationTemplate\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname\"\"\" p_Description:", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentId\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid\"\"\" p_OptionSettings: typing.List[typing.Union['ConfigurationTemplateConfigurationOptionSetting', dict]] = attr.ib(", "p_MaxCount: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCount\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount\"\"\" @attr.s class", "= attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str = attr.ib(", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled - ``p_MaxAgeInDays``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" p_DeleteSourceFromS3: bool = attr.ib( default=None,", "\"OptionSettings\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, )", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name\"\"\" p_OperationsRole: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"OperationsRole\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role\"\"\"", "dict] = attr.ib( default=None, converter=EnvironmentTier.from_dict, validator=attr.validators.optional(attr.validators.instance_of(EnvironmentTier)), metadata={AttrMeta.PROPERTY_NAME: \"Tier\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier\"\"\" p_VersionLabel: TypeHint.intrinsic_str", "Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html Property Document: - ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name - ``p_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type - ``p_Version``:", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename\"\"\"", "\"Tags\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags\"\"\" @property def rv_EndpointURL(self) -> GetAtt: \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#aws-properties-beanstalk-environment-return-values\"\"\" return GetAtt(resource=self,", "AWS Object Type = \"AWS::ElasticBeanstalk::Application\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html Property Document: - ``p_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name", "Object Type = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html Property Document: - ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3 -", "\"DeleteSourceFromS3\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3\"\"\" p_Enabled: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, )", "metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"},", "AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxCountRule\" p_DeleteSourceFromS3: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, ) \"\"\"Doc:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname\"\"\" p_SourceConfiguration: typing.Union['ConfigurationTemplateSourceConfiguration', dict] = attr.ib( default=None, converter=ConfigurationTemplateSourceConfiguration.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ConfigurationTemplateSourceConfiguration)), metadata={AttrMeta.PROPERTY_NAME: \"SourceConfiguration\"}, ) \"\"\"Doc:", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, ) \"\"\"Doc:", "metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname\"\"\" rp_SourceBundle: typing.Union['ApplicationVersionSourceBundle', dict] = attr.ib( default=None, converter=ApplicationVersionSourceBundle.from_dict, validator=attr.validators.instance_of(ApplicationVersionSourceBundle),", "\"S3Key\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key\"\"\" @attr.s class ApplicationMaxAgeRule(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\"", "\"OptionName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, )", "\"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html Property Document: - ``p_DeleteSourceFromS3``:", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value\"\"\" @attr.s class ApplicationMaxCountRule(Property): \"\"\" AWS", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value\"\"\" @attr.s class ApplicationMaxCountRule(Property): \"\"\" AWS Object", "metadata={AttrMeta.PROPERTY_NAME: \"MaxCountRule\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule\"\"\" @attr.s class ApplicationApplicationResourceLifecycleConfig(Property): \"\"\" AWS Object Type =", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname\"\"\" p_TemplateName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename\"\"\" p_Tier:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" p_ServiceRole: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ServiceRole\"},", "= \"AWS::ElasticBeanstalk::ConfigurationTemplate\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentId\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid\"\"\" p_OptionSettings: typing.List[typing.Union['ConfigurationTemplateConfigurationOptionSetting', dict]]", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule\"\"\" @attr.s class ApplicationApplicationResourceLifecycleConfig(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" Resource Document:", "\"ResourceLifecycleConfig\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig\"\"\" @attr.s class Environment(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment\"", "converter=EnvironmentOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(EnvironmentOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str = attr.ib( default=None,", "= attr.ib( default=None, converter=ApplicationMaxCountRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxCountRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCountRule\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule\"\"\" @attr.s class ApplicationApplicationResourceLifecycleConfig(Property):", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname\"\"\" p_TemplateName: TypeHint.intrinsic_str = attr.ib(", "bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3\"\"\" p_Enabled: bool =", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib(", "metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"},", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, ) \"\"\"Doc:", "converter=ConfigurationTemplateSourceConfiguration.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ConfigurationTemplateSourceConfiguration)), metadata={AttrMeta.PROPERTY_NAME: \"SourceConfiguration\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration\"\"\" @attr.s class Application(Resource): \"\"\" AWS Object", "AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application\" p_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc:", "default=None, converter=ApplicationApplicationResourceLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationResourceLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceLifecycleConfig\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig\"\"\" @attr.s class Environment(Resource): \"\"\" AWS", "\"AWS::ElasticBeanstalk::Environment.OptionSetting\" rp_Namespace: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace\"\"\" rp_OptionName:", "\"AWS::ElasticBeanstalk::Application.MaxAgeRule\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html Property Document: - ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3 - ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled -", "= \"AWS::ElasticBeanstalk::Environment.OptionSetting\" rp_Namespace: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace\"\"\"", "ConfigurationTemplateConfigurationOptionSetting(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html Property Document: -", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str =", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, ) \"\"\"Doc:", "\"MaxAgeInDays\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays\"\"\" @attr.s class ConfigurationTemplateSourceConfiguration(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\"", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"},", "rp_OptionName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str", "\"S3Bucket\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket\"\"\" rp_S3Key: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Key\"}, )", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays\"\"\" @attr.s class ConfigurationTemplateSourceConfiguration(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename\"\"\" @attr.s class EnvironmentTier(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment.Tier\" Resource", "= \"AWS::ElasticBeanstalk::ConfigurationTemplate\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname\"\"\"", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str = attr.ib(", "metadata={AttrMeta.PROPERTY_NAME: \"Name\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name\"\"\" p_Type: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Type\"},", "\"SolutionStackName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname\"\"\" p_TemplateName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, )", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type\"\"\" p_Version: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Version\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version\"\"\"", "metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname\"\"\" p_SourceConfiguration: typing.Union['ConfigurationTemplateSourceConfiguration', dict] = attr.ib( default=None, converter=ConfigurationTemplateSourceConfiguration.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ConfigurationTemplateSourceConfiguration)),", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description\"\"\" p_ResourceLifecycleConfig: typing.Union['ApplicationApplicationResourceLifecycleConfig', dict] = attr.ib( default=None, converter=ApplicationApplicationResourceLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationResourceLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceLifecycleConfig\"}, ) \"\"\"Doc:", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeInDays\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays\"\"\" @attr.s class ConfigurationTemplateSourceConfiguration(Property): \"\"\"", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str = attr.ib( default=None,", "Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html Property Document: - ``p_MaxAgeRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule - ``p_MaxCountRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule \"\"\" AWS_OBJECT_TYPE", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLabel\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel\"\"\" p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib(", "metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value\"\"\" @attr.s class ApplicationVersionSourceBundle(Property): \"\"\" AWS Object Type =", "class ApplicationVersionSourceBundle(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html Property Document:", "metadata={AttrMeta.PROPERTY_NAME: \"S3Key\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key\"\"\" @attr.s class ApplicationMaxAgeRule(Property): \"\"\" AWS Object Type =", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.Tier\" p_Name: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Name\"},", "\"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description\"\"\" p_EnvironmentName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentName\"}, )", "metadata={AttrMeta.PROPERTY_NAME: \"Version\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version\"\"\" @attr.s class ConfigurationTemplateConfigurationOptionSetting(Property): \"\"\" AWS Object Type =", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled\"\"\" p_MaxCount: int = attr.ib( default=None,", "Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html Property Document: - ``p_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description - ``p_ResourceLifecycleConfig``:", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value\"\"\" @attr.s class ApplicationVersionSourceBundle(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" Resource", "Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html Property Document: - ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace - ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname - ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename", "``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.OptionSetting\" rp_Namespace: TypeHint.intrinsic_str = attr.ib(", "attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Bucket\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket\"\"\" rp_S3Key: TypeHint.intrinsic_str = attr.ib( default=None,", "- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),", "metadata={AttrMeta.PROPERTY_NAME: \"OperationsRole\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role\"\"\" p_OptionSettings: typing.List[typing.Union['EnvironmentOptionSetting', dict]] = attr.ib( default=None, converter=EnvironmentOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(EnvironmentOptionSetting),", "``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace - ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname - ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value \"\"\" AWS_OBJECT_TYPE", "attr.ib( default=None, converter=EnvironmentOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(EnvironmentOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str =", "iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", "p_PlatformArn: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str = attr.ib(", "\"VersionLabel\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel\"\"\" p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib( default=None, converter=Tag.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))),", "``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname - ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.OptionSetting\" rp_Namespace:", "AWS Object Type = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html Property Document: - ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role\"\"\" p_OptionSettings: typing.List[typing.Union['EnvironmentOptionSetting', dict]] = attr.ib( default=None, converter=EnvironmentOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(EnvironmentOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME:", "= \"AWS::ElasticBeanstalk::ApplicationVersion\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname\"\"\"", "\"CNAMEPrefix\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, )", "TypeCheck, ) from ..core.constant import AttrMeta #--- Property declaration --- @attr.s class EnvironmentOptionSetting(Property):", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"OperationsRole\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role\"\"\" p_OptionSettings: typing.List[typing.Union['EnvironmentOptionSetting', dict]] = attr.ib( default=None, converter=EnvironmentOptionSetting.from_list,", "\"Name\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name\"\"\" p_Type: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Type\"}, )", "p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description\"\"\" p_ResourceLifecycleConfig: typing.Union['ApplicationApplicationResourceLifecycleConfig',", "metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"},", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version\"\"\" @attr.s class ConfigurationTemplateConfigurationOptionSetting(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" Resource", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname\"\"\"", "p_OperationsRole: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"OperationsRole\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role\"\"\" p_OptionSettings: typing.List[typing.Union['EnvironmentOptionSetting',", "default=None, converter=Tag.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"Tags\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags\"\"\" @property def rv_EndpointURL(self) ->", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname\"\"\" p_SourceConfiguration: typing.Union['ConfigurationTemplateSourceConfiguration', dict] = attr.ib( default=None, converter=ConfigurationTemplateSourceConfiguration.from_dict,", "typing.List[typing.Union[Tag, dict]] = attr.ib( default=None, converter=Tag.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"Tags\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags\"\"\"", "Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname -", "\"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application\" p_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, )", "- ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace - ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname - ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value \"\"\"", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3\"\"\" p_Enabled: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled\"\"\"", "= \"AWS::ElasticBeanstalk::Application.MaxCountRule\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html Property Document: - ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3 - ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled", "default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", ") from ..core.constant import AttrMeta #--- Property declaration --- @attr.s class EnvironmentOptionSetting(Property): \"\"\"", "default=None, converter=ApplicationApplicationVersionLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationVersionLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLifecycleConfig\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig\"\"\" #--- Resource declaration --- @attr.s", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname\"\"\" p_TemplateName:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel\"\"\" p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib( default=None, converter=Tag.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"Tags\"}, )", "``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname - ``rp_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib(", "bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3\"\"\" p_Enabled: bool =", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value\"\"\" @attr.s class ApplicationMaxCountRule(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.MaxCountRule\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel\"\"\" p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib( default=None, converter=Tag.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME:", "dict] = attr.ib( default=None, converter=ConfigurationTemplateSourceConfiguration.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ConfigurationTemplateSourceConfiguration)), metadata={AttrMeta.PROPERTY_NAME: \"SourceConfiguration\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration\"\"\" @attr.s class", "Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname - ``rp_SourceBundle``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-description", "AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type - ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.Tier\" p_Name: TypeHint.intrinsic_str = attr.ib( default=None,", "metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname\"\"\" p_CNAMEPrefix: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"CNAMEPrefix\"},", "converter=Tag.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"Tags\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags\"\"\" @property def rv_EndpointURL(self) -> GetAtt:", "\"ResourceName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, )", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname\"\"\" p_TemplateName: TypeHint.intrinsic_str =", "Object Type = \"AWS::ElasticBeanstalk::Environment.Tier\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html Property Document: - ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name -", "= attr.ib( default=None, converter=ConfigurationTemplateSourceConfiguration.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ConfigurationTemplateSourceConfiguration)), metadata={AttrMeta.PROPERTY_NAME: \"SourceConfiguration\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration\"\"\" @attr.s class Application(Resource):", "metadata={AttrMeta.PROPERTY_NAME: \"SourceBundle\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"},", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname\"\"\" p_CNAMEPrefix: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"CNAMEPrefix\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix\"\"\"", "``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname - ``p_CNAMEPrefix``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description - ``p_EnvironmentName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name - ``p_OperationsRole``:", "rp_TemplateName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename\"\"\" @attr.s class", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname - ``p_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename - ``p_Tier``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier - ``p_VersionLabel``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel - ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags", "= \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" p_DeleteSourceFromS3: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3\"\"\"", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", "\"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html Property Document: - ``p_ServiceRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole - ``p_VersionLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig \"\"\"", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket - ``rp_S3Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" rp_S3Bucket: TypeHint.intrinsic_str = attr.ib( default=None,", "converter=ApplicationApplicationResourceLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationResourceLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceLifecycleConfig\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig\"\"\" @attr.s class Environment(Resource): \"\"\" AWS Object", "p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description\"\"\" p_EnvironmentName: TypeHint.intrinsic_str", "= attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib(", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Version\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version\"\"\" @attr.s class ConfigurationTemplateConfigurationOptionSetting(Property): \"\"\" AWS Object", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig\"\"\" #--- Resource declaration --- @attr.s class ConfigurationTemplate(Resource): \"\"\" AWS Object", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value\"\"\" @attr.s class ApplicationVersionSourceBundle(Property): \"\"\"", "``p_CNAMEPrefix``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description - ``p_EnvironmentName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name - ``p_OperationsRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role - ``p_OptionSettings``:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn - ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname - ``p_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename - ``p_Tier``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier - ``p_VersionLabel``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel", "Document: - ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3 - ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled - ``p_MaxAgeInDays``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays \"\"\" AWS_OBJECT_TYPE =", "\"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html Property Document: - ``rp_S3Bucket``:", "iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3\"\"\" p_Enabled: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, ) \"\"\"Doc:", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description\"\"\" p_EnvironmentName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name\"\"\"", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Version\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version\"\"\" @attr.s class ConfigurationTemplateConfigurationOptionSetting(Property):", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name\"\"\" p_OperationsRole: TypeHint.intrinsic_str = attr.ib(", "- ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3 - ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled - ``p_MaxCount``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxCountRule\"", "Document: - ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace - ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname - ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value", "validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationResourceLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceLifecycleConfig\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig\"\"\" @attr.s class Environment(Resource): \"\"\" AWS Object Type", "p_ServiceRole: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ServiceRole\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole\"\"\" p_VersionLifecycleConfig: typing.Union['ApplicationApplicationVersionLifecycleConfig',", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description\"\"\" p_EnvironmentId: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", "metadata={AttrMeta.PROPERTY_NAME: \"MaxCount\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount\"\"\" @attr.s class ApplicationApplicationVersionLifecycleConfig(Property): \"\"\" AWS Object Type =", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentId\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid\"\"\" p_OptionSettings: typing.List[typing.Union['ConfigurationTemplateConfigurationOptionSetting', dict]] = attr.ib( default=None, converter=ConfigurationTemplateConfigurationOptionSetting.from_list,", "dict]] = attr.ib( default=None, converter=ConfigurationTemplateConfigurationOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(ConfigurationTemplateConfigurationOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings\"\"\" p_PlatformArn:", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"CNAMEPrefix\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration\"\"\" @attr.s class Application(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application\" Resource Document:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description\"\"\" p_ResourceLifecycleConfig:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename\"\"\" @attr.s class EnvironmentTier(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment.Tier\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html", "p_EnvironmentId: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentId\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid\"\"\" p_OptionSettings: typing.List[typing.Union['ConfigurationTemplateConfigurationOptionSetting',", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html Property Document: - ``rp_S3Bucket``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket - ``rp_S3Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\"", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename\"\"\" @attr.s class EnvironmentTier(Property):", "AWS Object Type = \"AWS::ElasticBeanstalk::Environment.Tier\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html Property Document: - ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html Property Document: - ``p_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description - ``p_ResourceLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig \"\"\"", "rp_OptionName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename\"\"\" p_Tier: typing.Union['EnvironmentTier', dict] = attr.ib( default=None, converter=EnvironmentTier.from_dict, validator=attr.validators.optional(attr.validators.instance_of(EnvironmentTier)), metadata={AttrMeta.PROPERTY_NAME: \"Tier\"},", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key\"\"\" @attr.s class ApplicationMaxAgeRule(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html Property Document: - ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3 - ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled - ``p_MaxAgeInDays``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays \"\"\"", "p_ResourceLifecycleConfig: typing.Union['ApplicationApplicationResourceLifecycleConfig', dict] = attr.ib( default=None, converter=ApplicationApplicationResourceLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationResourceLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceLifecycleConfig\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig\"\"\"", "metadata={AttrMeta.PROPERTY_NAME: \"Tags\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags\"\"\" @property def rv_EndpointURL(self) -> GetAtt: \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#aws-properties-beanstalk-environment-return-values\"\"\" return", "\"OperationsRole\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role\"\"\" p_OptionSettings: typing.List[typing.Union['EnvironmentOptionSetting', dict]] = attr.ib( default=None, converter=EnvironmentOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(EnvironmentOptionSetting), iterable_validator=attr.validators.instance_of(list))),", "import typing from ..core.model import ( Property, Resource, Tag, GetAtt, TypeHint, TypeCheck, )", "attr import typing from ..core.model import ( Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,", "\"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.MaxCountRule\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html Property Document: - ``p_DeleteSourceFromS3``:", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str =", "default=None, converter=ConfigurationTemplateSourceConfiguration.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ConfigurationTemplateSourceConfiguration)), metadata={AttrMeta.PROPERTY_NAME: \"SourceConfiguration\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration\"\"\" @attr.s class Application(Resource): \"\"\" AWS", "\"Namespace\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, )", "- ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3 - ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled - ``p_MaxAgeInDays``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\"", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, ) \"\"\"Doc:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname - ``rp_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None,", "\"AWS::ElasticBeanstalk::Application\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html Property Document: - ``p_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description -", "# -*- coding: utf-8 -*- \"\"\" This module \"\"\" import attr import typing", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"},", "converter=ApplicationMaxAgeRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxAgeRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeRule\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule\"\"\" p_MaxCountRule: typing.Union['ApplicationMaxCountRule', dict] = attr.ib( default=None,", "AWS Object Type = \"AWS::ElasticBeanstalk::Application.MaxCountRule\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html Property Document: - ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3", "\"Value\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value\"\"\" @attr.s class ApplicationVersionSourceBundle(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\"", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel\"\"\" p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib( default=None, converter=Tag.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"Tags\"},", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description\"\"\"", "- ``p_MaxCountRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" p_MaxAgeRule: typing.Union['ApplicationMaxAgeRule', dict] = attr.ib( default=None,", "p_VersionLifecycleConfig: typing.Union['ApplicationApplicationVersionLifecycleConfig', dict] = attr.ib( default=None, converter=ApplicationApplicationVersionLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationVersionLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLifecycleConfig\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig\"\"\"", "metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeRule\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule\"\"\" p_MaxCountRule: typing.Union['ApplicationMaxCountRule', dict] = attr.ib( default=None, converter=ApplicationMaxCountRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxCountRule)),", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description\"\"\" p_EnvironmentName:", "validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationVersionLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLifecycleConfig\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig\"\"\" #--- Resource declaration --- @attr.s class ConfigurationTemplate(Resource):", "\"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" p_ServiceRole: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ServiceRole\"}, )", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration\"\"\" @attr.s class Application(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application\" Resource", "default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Bucket\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket\"\"\" rp_S3Key: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Type\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type\"\"\" p_Version: TypeHint.intrinsic_str = attr.ib(", "\"Namespace\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, )", "Document: - ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3 - ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled - ``p_MaxCount``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount \"\"\" AWS_OBJECT_TYPE =", "``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3 - ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled - ``p_MaxAgeInDays``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" p_DeleteSourceFromS3:", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags\"\"\" @property def rv_EndpointURL(self) -> GetAtt: \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#aws-properties-beanstalk-environment-return-values\"\"\" return GetAtt(resource=self, attr_name=\"EndpointURL\")", "metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"},", "``rp_S3Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" rp_S3Bucket: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME:", "\"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description\"\"\" p_ResourceLifecycleConfig: typing.Union['ApplicationApplicationResourceLifecycleConfig', dict] = attr.ib( default=None, converter=ApplicationApplicationResourceLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationResourceLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME:", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname\"\"\" rp_TemplateName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename\"\"\"", "class ConfigurationTemplateConfigurationOptionSetting(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html Property Document:", "typing.Union['ConfigurationTemplateSourceConfiguration', dict] = attr.ib( default=None, converter=ConfigurationTemplateSourceConfiguration.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ConfigurationTemplateSourceConfiguration)), metadata={AttrMeta.PROPERTY_NAME: \"SourceConfiguration\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration\"\"\" @attr.s", "Property Document: - ``p_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description - ``p_ResourceLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig \"\"\" AWS_OBJECT_TYPE", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename\"\"\" p_Tier: typing.Union['EnvironmentTier', dict] =", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, ) \"\"\"Doc:", "\"TemplateName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename\"\"\" @attr.s class EnvironmentTier(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment.Tier\"", "class ApplicationVersion(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ApplicationVersion\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html Property Document:", "Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname - ``p_CNAMEPrefix``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description", "= \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html Property Document: - ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3 - ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled", "- ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" rp_Namespace: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),", "Property Document: - ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3 - ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled - ``p_MaxCount``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount \"\"\" AWS_OBJECT_TYPE", "..core.model import ( Property, Resource, Tag, GetAtt, TypeHint, TypeCheck, ) from ..core.constant import", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule\"\"\" p_MaxCountRule: typing.Union['ApplicationMaxCountRule', dict] = attr.ib( default=None, converter=ApplicationMaxCountRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxCountRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCountRule\"}, ) \"\"\"Doc:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename - ``p_Tier``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier - ``p_VersionLabel``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel - ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags \"\"\" AWS_OBJECT_TYPE =", "= attr.ib( default=None, converter=EnvironmentTier.from_dict, validator=attr.validators.optional(attr.validators.instance_of(EnvironmentTier)), metadata={AttrMeta.PROPERTY_NAME: \"Tier\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier\"\"\" p_VersionLabel: TypeHint.intrinsic_str =", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier\"\"\" p_VersionLabel: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLabel\"}, ) \"\"\"Doc:", "AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" p_DeleteSourceFromS3: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, ) \"\"\"Doc:", "- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description - ``p_EnvironmentId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid - ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings - ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn -", "``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" rp_Namespace: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role\"\"\" p_OptionSettings: typing.List[typing.Union['EnvironmentOptionSetting', dict]] = attr.ib( default=None, converter=EnvironmentOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(EnvironmentOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, )", "metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name\"\"\" p_OperationsRole: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"OperationsRole\"},", "\"SourceConfiguration\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration\"\"\" @attr.s class Application(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application\"", "metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"},", "Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html Property Document: - ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace - ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname - ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", "- ``p_ServiceRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole - ``p_VersionLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" p_ServiceRole: TypeHint.intrinsic_str =", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier - ``p_VersionLabel``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel - ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment\" rp_ApplicationName: TypeHint.intrinsic_str", "AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key\"\"\" @attr.s class ApplicationMaxAgeRule(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" Resource Document:", "\"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, )", "ApplicationApplicationVersionLifecycleConfig(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html Property Document: -", "\"TemplateName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename\"\"\" p_Tier: typing.Union['EnvironmentTier', dict] = attr.ib( default=None, converter=EnvironmentTier.from_dict, validator=attr.validators.optional(attr.validators.instance_of(EnvironmentTier)), metadata={AttrMeta.PROPERTY_NAME:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings - ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn - ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname - ``p_SourceConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration \"\"\" AWS_OBJECT_TYPE =", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags\"\"\" @property def rv_EndpointURL(self) -> GetAtt: \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#aws-properties-beanstalk-environment-return-values\"\"\" return GetAtt(resource=self, attr_name=\"EndpointURL\") @attr.s class", "= \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname\"\"\"", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3\"\"\" p_Enabled: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled\"\"\" p_MaxCount:", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename\"\"\" @attr.s class EnvironmentTier(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment.Tier\" Resource Document:", "\"PlatformArn\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, )", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Type\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type\"\"\" p_Version: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", "default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeInDays\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays\"\"\" @attr.s class ConfigurationTemplateSourceConfiguration(Property): \"\"\" AWS Object", "EnvironmentOptionSetting(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment.OptionSetting\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html Property Document: -", "-*- coding: utf-8 -*- \"\"\" This module \"\"\" import attr import typing from", "AWS Object Type = \"AWS::ElasticBeanstalk::Environment.OptionSetting\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html Property Document: - ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value\"\"\" @attr.s", "- ``rp_S3Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" rp_S3Bucket: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),", "\"AWS::ElasticBeanstalk::Environment.Tier\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html Property Document: - ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name - ``p_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type -", "p_Version: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Version\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version\"\"\" @attr.s class", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html Property Document: - ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace - ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname - ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename -", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn - ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname - ``p_SourceConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate\" rp_ApplicationName: TypeHint.intrinsic_str", "attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None,", "attr.ib( default=None, converter=ConfigurationTemplateConfigurationOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(ConfigurationTemplateConfigurationOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str =", "metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description\"\"\" p_ResourceLifecycleConfig: typing.Union['ApplicationApplicationResourceLifecycleConfig', dict] = attr.ib( default=None, converter=ApplicationApplicationResourceLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationResourceLifecycleConfig)),", "Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description - ``p_EnvironmentId``:", "default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname\"\"\" rp_SourceBundle: typing.Union['ApplicationVersionSourceBundle', dict] = attr.ib( default=None,", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule\"\"\" p_MaxCountRule: typing.Union['ApplicationMaxCountRule', dict] = attr.ib( default=None, converter=ApplicationMaxCountRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxCountRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCountRule\"}, )", "p_Name: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Name\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name\"\"\" p_Type: TypeHint.intrinsic_str", "= attr.ib( default=None, converter=ApplicationVersionSourceBundle.from_dict, validator=attr.validators.instance_of(ApplicationVersionSourceBundle), metadata={AttrMeta.PROPERTY_NAME: \"SourceBundle\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle\"\"\" p_Description: TypeHint.intrinsic_str =", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname\"\"\" p_CNAMEPrefix: TypeHint.intrinsic_str =", "class ApplicationApplicationVersionLifecycleConfig(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html Property Document:", "attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str = attr.ib( default=None,", "ApplicationVersion(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ApplicationVersion\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html Property Document: -", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname - ``p_SourceConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None,", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description\"\"\" p_ResourceLifecycleConfig: typing.Union['ApplicationApplicationResourceLifecycleConfig', dict] = attr.ib( default=None,", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description\"\"\" p_EnvironmentName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", "default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Version\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version\"\"\" @attr.s class ConfigurationTemplateConfigurationOptionSetting(Property): \"\"\" AWS Object Type", "- ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname - ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\"", "= \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" rp_Namespace: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace\"\"\"", "= \"AWS::ElasticBeanstalk::Environment.OptionSetting\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html Property Document: - ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace - ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname", "\"VersionLifecycleConfig\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig\"\"\" #--- Resource declaration --- @attr.s class ConfigurationTemplate(Resource): \"\"\" AWS", "\"AWS::ElasticBeanstalk::Application.MaxCountRule\" p_DeleteSourceFromS3: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3\"\"\" p_Enabled:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn\"\"\" p_SolutionStackName:", "Document: - ``p_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description - ``p_ResourceLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig \"\"\" AWS_OBJECT_TYPE =", "\"SourceBundle\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, )", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Key\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key\"\"\" @attr.s class ApplicationMaxAgeRule(Property):", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname\"\"\"", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3\"\"\" p_Enabled: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, ) \"\"\"Doc:", "p_SourceConfiguration: typing.Union['ConfigurationTemplateSourceConfiguration', dict] = attr.ib( default=None, converter=ConfigurationTemplateSourceConfiguration.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ConfigurationTemplateSourceConfiguration)), metadata={AttrMeta.PROPERTY_NAME: \"SourceConfiguration\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration\"\"\"", "ApplicationVersionSourceBundle(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html Property Document: -", "Document: - ``p_ServiceRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole - ``p_VersionLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" p_ServiceRole: TypeHint.intrinsic_str", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid\"\"\" p_OptionSettings: typing.List[typing.Union['ConfigurationTemplateConfigurationOptionSetting', dict]] = attr.ib( default=None, converter=ConfigurationTemplateConfigurationOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(ConfigurationTemplateConfigurationOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, )", "= attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Key\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key\"\"\" @attr.s class ApplicationMaxAgeRule(Property): \"\"\"", "TypeHint, TypeCheck, ) from ..core.constant import AttrMeta #--- Property declaration --- @attr.s class", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", "metadata={AttrMeta.PROPERTY_NAME: \"VersionLifecycleConfig\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig\"\"\" #--- Resource declaration --- @attr.s class ConfigurationTemplate(Resource): \"\"\"", "``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname - ``p_SourceConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib(", "= \"AWS::ElasticBeanstalk::Environment\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname - ``p_CNAMEPrefix``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value\"\"\" @attr.s class ApplicationVersionSourceBundle(Property):", "metadata={AttrMeta.PROPERTY_NAME: \"CNAMEPrefix\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"},", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentId\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid\"\"\" p_OptionSettings: typing.List[typing.Union['ConfigurationTemplateConfigurationOptionSetting', dict]] = attr.ib( default=None,", "``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME:", "Type = \"AWS::ElasticBeanstalk::Environment.OptionSetting\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html Property Document: - ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace - ``rp_OptionName``:", "AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3\"\"\" p_Enabled: bool = attr.ib( default=None,", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description\"\"\" p_ResourceLifecycleConfig: typing.Union['ApplicationApplicationResourceLifecycleConfig', dict] = attr.ib( default=None, converter=ApplicationApplicationResourceLifecycleConfig.from_dict,", "This module \"\"\" import attr import typing from ..core.model import ( Property, Resource,", "- ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace - ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname - ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value \"\"\"", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig\"\"\" @attr.s class Environment(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment\" Resource Document:", "Object Type = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html Property Document: - ``rp_S3Bucket``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket -", "metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3\"\"\" p_Enabled: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"},", "\"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html Property Document: - ``rp_ApplicationName``:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value\"\"\" @attr.s class ApplicationVersionSourceBundle(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html", "= \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" p_ServiceRole: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ServiceRole\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole\"\"\"", "Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html Property Document: - ``p_MaxAgeRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule - ``p_MaxCountRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule \"\"\" AWS_OBJECT_TYPE =", "\"ServiceRole\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole\"\"\" p_VersionLifecycleConfig: typing.Union['ApplicationApplicationVersionLifecycleConfig', dict] = attr.ib( default=None, converter=ApplicationApplicationVersionLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationVersionLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME:", "Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html Property Document: - ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3 - ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled - ``p_MaxCount``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Type\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type\"\"\" p_Version: TypeHint.intrinsic_str = attr.ib( default=None,", "= \"AWS::ElasticBeanstalk::Application.MaxCountRule\" p_DeleteSourceFromS3: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3\"\"\"", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description\"\"\" p_EnvironmentName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentName\"}, ) \"\"\"Doc:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description - ``p_ResourceLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application\" p_ApplicationName: TypeHint.intrinsic_str", "class Environment(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html Property Document:", "bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled\"\"\" p_MaxCount: int =", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description - ``p_EnvironmentId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid - ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings - ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn - ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value\"\"\" @attr.s class ApplicationVersionSourceBundle(Property): \"\"\" AWS", "- ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings - ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn - ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname - ``p_SourceConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration \"\"\"", "= \"AWS::ElasticBeanstalk::Environment.Tier\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html Property Document: - ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name - ``p_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, ) \"\"\"Doc:", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole\"\"\" p_VersionLifecycleConfig: typing.Union['ApplicationApplicationVersionLifecycleConfig', dict] = attr.ib( default=None, converter=ApplicationApplicationVersionLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationVersionLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLifecycleConfig\"}, )", "rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname\"\"\" rp_TemplateName: TypeHint.intrinsic_str", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None,", "Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname - ``p_CNAMEPrefix``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix - ``p_Description``:", "- ``p_EnvironmentName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name - ``p_OperationsRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role - ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings - ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn -", "metadata={AttrMeta.PROPERTY_NAME: \"VersionLabel\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel\"\"\" p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib( default=None, converter=Tag.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag),", "default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Key\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key\"\"\" @attr.s class ApplicationMaxAgeRule(Property): \"\"\" AWS Object", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name - ``p_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type - ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.Tier\" p_Name: TypeHint.intrinsic_str", "- ``p_Tier``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier - ``p_VersionLabel``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel - ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment\"", "\"AWS::ElasticBeanstalk::Environment.Tier\" p_Name: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Name\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name\"\"\" p_Type:", "attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Key\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key\"\"\" @attr.s class ApplicationMaxAgeRule(Property): \"\"\" AWS", "Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html Property Document: - ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace - ``rp_OptionName``:", "--- @attr.s class ConfigurationTemplate(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html", "validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3\"\"\" p_Enabled: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME:", "class Application(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html Property Document:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html Property Document: - ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3 - ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled - ``p_MaxCount``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount \"\"\"", "- ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled - ``p_MaxAgeInDays``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" p_DeleteSourceFromS3: bool =", "p_OptionSettings: typing.List[typing.Union['ConfigurationTemplateConfigurationOptionSetting', dict]] = attr.ib( default=None, converter=ConfigurationTemplateConfigurationOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(ConfigurationTemplateConfigurationOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, ) \"\"\"Doc:", "Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html Property Document: - ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name - ``p_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type - ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version", "\"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname - ``rp_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename \"\"\"", "- ``p_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type - ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.Tier\" p_Name: TypeHint.intrinsic_str =", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule\"\"\" @attr.s class ApplicationApplicationResourceLifecycleConfig(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html", "``p_ServiceRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole - ``p_VersionLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" p_ServiceRole: TypeHint.intrinsic_str = attr.ib(", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings - ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn - ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname - ``p_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename - ``p_Tier``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier", "AttrMeta #--- Property declaration --- @attr.s class EnvironmentOptionSetting(Property): \"\"\" AWS Object Type =", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration\"\"\" @attr.s class Application(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html", "= attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname\"\"\" rp_SourceBundle: typing.Union['ApplicationVersionSourceBundle', dict] =", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description\"\"\" p_EnvironmentName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name\"\"\" p_OperationsRole:", "= attr.ib( default=None, converter=Tag.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"Tags\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags\"\"\" @property def", "metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"},", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description\"\"\" p_EnvironmentName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", "Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description - ``p_EnvironmentId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid - ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename\"\"\" p_Tier: typing.Union['EnvironmentTier', dict] = attr.ib( default=None, converter=EnvironmentTier.from_dict,", "ApplicationMaxCountRule(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.MaxCountRule\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html Property Document: -", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn\"\"\"", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname\"\"\" p_SourceConfiguration: typing.Union['ConfigurationTemplateSourceConfiguration', dict]", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role - ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings - ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn - ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname - ``p_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, ) \"\"\"Doc:", "p_ResourceName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname\"\"\" p_TemplateName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"CNAMEPrefix\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None,", "validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled\"\"\" p_MaxAgeInDays: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME:", "class ConfigurationTemplateSourceConfiguration(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html Property Document:", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid\"\"\" p_OptionSettings: typing.List[typing.Union['ConfigurationTemplateConfigurationOptionSetting', dict]] = attr.ib( default=None, converter=ConfigurationTemplateConfigurationOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(ConfigurationTemplateConfigurationOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"},", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"OperationsRole\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role\"\"\" p_OptionSettings: typing.List[typing.Union['EnvironmentOptionSetting', dict]]", "import AttrMeta #--- Property declaration --- @attr.s class EnvironmentOptionSetting(Property): \"\"\" AWS Object Type", "- ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname - ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.OptionSetting\"", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value\"\"\" @attr.s class ApplicationVersionSourceBundle(Property): \"\"\" AWS Object Type", "rp_S3Bucket: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Bucket\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket\"\"\" rp_S3Key: TypeHint.intrinsic_str", "``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.Tier\" p_Name: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", "p_DeleteSourceFromS3: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3\"\"\" p_Enabled: bool", "metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"},", "metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"},", "attr.ib( default=None, converter=ApplicationApplicationVersionLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationVersionLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLifecycleConfig\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig\"\"\" #--- Resource declaration ---", "= \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" p_MaxAgeRule: typing.Union['ApplicationMaxAgeRule', dict] = attr.ib( default=None, converter=ApplicationMaxAgeRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxAgeRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeRule\"}, )", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", "p_MaxAgeInDays: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeInDays\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays\"\"\" @attr.s class", "-> GetAtt: \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#aws-properties-beanstalk-environment-return-values\"\"\" return GetAtt(resource=self, attr_name=\"EndpointURL\") @attr.s class ApplicationVersion(Resource): \"\"\" AWS Object", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", "\"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, )", "Type = \"AWS::ElasticBeanstalk::ApplicationVersion\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname - ``rp_SourceBundle``:", "default=None, converter=ApplicationMaxCountRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxCountRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCountRule\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule\"\"\" @attr.s class ApplicationApplicationResourceLifecycleConfig(Property): \"\"\" AWS", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name\"\"\" p_OperationsRole: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", "Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname - ``rp_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename \"\"\" AWS_OBJECT_TYPE", "Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html Property Document: - ``p_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description - ``p_ResourceLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname\"\"\" rp_TemplateName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, ) \"\"\"Doc:", "\"Version\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version\"\"\" @attr.s class ConfigurationTemplateConfigurationOptionSetting(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\"", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str =", "rp_Namespace: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str", "- ``p_EnvironmentId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid - ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings - ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn - ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname -", "\"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html Property Document: - ``rp_ApplicationName``:", "= attr.ib( default=None, converter=EnvironmentOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(EnvironmentOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str", "Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html Property Document: - ``p_ServiceRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole - ``p_VersionLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig \"\"\" AWS_OBJECT_TYPE =", "\"PlatformArn\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, )", "Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html Property Document: - ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace - ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname - ``p_ResourceName``:", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled\"\"\" p_MaxAgeInDays: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeInDays\"}, ) \"\"\"Doc:", "validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled\"\"\" p_MaxCount: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME:", "\"AWS::ElasticBeanstalk::Environment\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname\"\"\" p_CNAMEPrefix:", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLabel\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel\"\"\" p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib( default=None,", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value\"\"\" @attr.s class ApplicationMaxCountRule(Property): \"\"\"", "\"AWS::ElasticBeanstalk::ApplicationVersion\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname\"\"\" rp_SourceBundle:", "\"Type\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type\"\"\" p_Version: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Version\"}, )", "Property declaration --- @attr.s class EnvironmentOptionSetting(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment.OptionSetting\" Resource", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html Property Document: - ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace - ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname - ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename -", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"OperationsRole\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role\"\"\" p_OptionSettings: typing.List[typing.Union['EnvironmentOptionSetting', dict]] =", "Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html Property Document: - ``rp_S3Bucket``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket - ``rp_S3Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key \"\"\" AWS_OBJECT_TYPE =", "class ApplicationMaxCountRule(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.MaxCountRule\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html Property Document:", "p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description\"\"\" p_EnvironmentId: TypeHint.intrinsic_str", "= \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html Property Document: - ``rp_S3Bucket``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket - ``rp_S3Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename\"\"\" p_Tier: typing.Union['EnvironmentTier', dict] = attr.ib(", "\"AWS::ElasticBeanstalk::ApplicationVersion\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname - ``rp_SourceBundle``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle -", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description - ``p_ResourceLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application\" p_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None,", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled\"\"\" p_MaxAgeInDays: int = attr.ib(", "metadata={AttrMeta.PROPERTY_NAME: \"Tier\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier\"\"\" p_VersionLabel: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLabel\"},", "iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"Tags\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags\"\"\" @property def rv_EndpointURL(self) -> GetAtt: \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#aws-properties-beanstalk-environment-return-values\"\"\"", "return GetAtt(resource=self, attr_name=\"EndpointURL\") @attr.s class ApplicationVersion(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ApplicationVersion\" Resource", "- ``p_MaxAgeInDays``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" p_DeleteSourceFromS3: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)),", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" rp_Namespace: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"},", "AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc:", "attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename\"\"\" @attr.s class EnvironmentTier(Property): \"\"\" AWS", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description\"\"\" p_ResourceLifecycleConfig: typing.Union['ApplicationApplicationResourceLifecycleConfig', dict] = attr.ib( default=None, converter=ApplicationApplicationResourceLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationResourceLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceLifecycleConfig\"}, )", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname\"\"\"", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#aws-properties-beanstalk-environment-return-values\"\"\" return GetAtt(resource=self, attr_name=\"EndpointURL\") @attr.s class ApplicationVersion(Resource): \"\"\" AWS Object Type =", "class ApplicationMaxAgeRule(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html Property Document:", "= \"AWS::ElasticBeanstalk::Environment.Tier\" p_Name: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Name\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name\"\"\"", "``rp_SourceBundle``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-description \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib(", "validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename\"\"\" @attr.s class EnvironmentTier(Property): \"\"\" AWS Object Type", "\"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment.Tier\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html Property Document: - ``p_Name``:", "Property Document: - ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace - ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname - ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename - ``p_Value``:", "= attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str = attr.ib(", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLabel\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel\"\"\" p_Tags: typing.List[typing.Union[Tag, dict]] =", "``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description - ``p_ResourceLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application\" p_ApplicationName: TypeHint.intrinsic_str = attr.ib(", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname - ``p_CNAMEPrefix``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description - ``p_EnvironmentName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name - ``p_OperationsRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role", "- ``p_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description - ``p_ResourceLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application\"", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name\"\"\" p_OperationsRole: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"OperationsRole\"}, ) \"\"\"Doc:", "\"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ApplicationVersion\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html Property Document: - ``rp_ApplicationName``:", "p_Enabled: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled\"\"\" p_MaxCount: int", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" rp_S3Bucket: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Bucket\"},", "Document: - ``p_MaxAgeRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule - ``p_MaxCountRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" p_MaxAgeRule: typing.Union['ApplicationMaxAgeRule',", "- ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings - ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn - ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname - ``p_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename -", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" rp_Namespace: TypeHint.intrinsic_str = attr.ib( default=None,", "Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html Property Document: - ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3 - ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled - ``p_MaxAgeInDays``:", "coding: utf-8 -*- \"\"\" This module \"\"\" import attr import typing from ..core.model", "- ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn - ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname - ``p_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename - ``p_Tier``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier -", "typing.Union['ApplicationMaxAgeRule', dict] = attr.ib( default=None, converter=ApplicationMaxAgeRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxAgeRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeRule\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule\"\"\" p_MaxCountRule:", "GetAtt, TypeHint, TypeCheck, ) from ..core.constant import AttrMeta #--- Property declaration --- @attr.s", "\"AWS::ElasticBeanstalk::Application.MaxAgeRule\" p_DeleteSourceFromS3: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3\"\"\" p_Enabled:", "AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" rp_Namespace: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, ) \"\"\"Doc:", "validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname\"\"\" rp_TemplateName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME:", "Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html Property Document: - ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3 - ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled - ``p_MaxAgeInDays``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value\"\"\"", "Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html Property Document: - ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3 - ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled - ``p_MaxCount``:", "``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description - ``p_EnvironmentId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid - ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings - ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn - ``p_SolutionStackName``:", "@attr.s class Environment(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html Property", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Name\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name\"\"\" p_Type: TypeHint.intrinsic_str =", "= attr.ib( default=None, converter=ApplicationApplicationResourceLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationResourceLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceLifecycleConfig\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig\"\"\" @attr.s class Environment(Resource):", "attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname\"\"\" rp_TemplateName: TypeHint.intrinsic_str = attr.ib( default=None,", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCount\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount\"\"\" @attr.s class ApplicationApplicationVersionLifecycleConfig(Property): \"\"\"", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier\"\"\" p_VersionLabel: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLabel\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel\"\"\"", "metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description\"\"\" p_EnvironmentId: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentId\"},", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Name\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name\"\"\" p_Type: TypeHint.intrinsic_str = attr.ib(", "default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname\"\"\" rp_TemplateName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),", "\"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description\"\"\" p_EnvironmentId: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentId\"}, )", "\"AWS::ElasticBeanstalk::Application\" p_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name\"\"\" p_Description:", "\"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment.OptionSetting\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html Property Document: - ``rp_Namespace``:", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled\"\"\" p_MaxAgeInDays: int = attr.ib( default=None,", "metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"},", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname\"\"\" rp_SourceBundle: typing.Union['ApplicationVersionSourceBundle', dict] = attr.ib( default=None, converter=ApplicationVersionSourceBundle.from_dict, validator=attr.validators.instance_of(ApplicationVersionSourceBundle), metadata={AttrMeta.PROPERTY_NAME: \"SourceBundle\"}, )", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Type\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type\"\"\" p_Version: TypeHint.intrinsic_str =", "default=None, converter=ApplicationVersionSourceBundle.from_dict, validator=attr.validators.instance_of(ApplicationVersionSourceBundle), metadata={AttrMeta.PROPERTY_NAME: \"SourceBundle\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None,", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname\"\"\" p_SourceConfiguration: typing.Union['ConfigurationTemplateSourceConfiguration', dict] = attr.ib( default=None, converter=ConfigurationTemplateSourceConfiguration.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ConfigurationTemplateSourceConfiguration)), metadata={AttrMeta.PROPERTY_NAME: \"SourceConfiguration\"}, )", "converter=ApplicationApplicationVersionLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationVersionLifecycleConfig)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLifecycleConfig\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig\"\"\" #--- Resource declaration --- @attr.s class", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole - ``p_VersionLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" p_ServiceRole: TypeHint.intrinsic_str = attr.ib( default=None,", "default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", "``p_VersionLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" p_ServiceRole: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", "``p_EnvironmentId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid - ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings - ``p_PlatformArn``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn - ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname - ``p_SourceConfiguration``:", "``p_ResourceLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application\" p_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", "-*- \"\"\" This module \"\"\" import attr import typing from ..core.model import (", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"},", "validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME:", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value\"\"\" @attr.s class ApplicationMaxCountRule(Property): \"\"\" AWS Object Type", "``rp_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME:", "@attr.s class EnvironmentTier(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment.Tier\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html Property", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" p_MaxAgeRule: typing.Union['ApplicationMaxAgeRule', dict] = attr.ib( default=None, converter=ApplicationMaxAgeRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxAgeRule)),", "default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled\"\"\" p_MaxCount: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)),", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname\"\"\" p_Description: TypeHint.intrinsic_str =", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str =", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel - ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None,", "``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled - ``p_MaxAgeInDays``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" p_DeleteSourceFromS3: bool = attr.ib(", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name\"\"\" p_Type: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Type\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type\"\"\" p_Version:", "Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname - ``p_Description``:", "\"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" p_MaxAgeRule: typing.Union['ApplicationMaxAgeRule', dict] = attr.ib( default=None, converter=ApplicationMaxAgeRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxAgeRule)), metadata={AttrMeta.PROPERTY_NAME:", "metadata={AttrMeta.PROPERTY_NAME: \"Type\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type\"\"\" p_Version: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Version\"},", "\"ResourceName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, )", "- ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled - ``p_MaxCount``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxCountRule\" p_DeleteSourceFromS3: bool =", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description\"\"\" p_EnvironmentId: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentId\"}, ) \"\"\"Doc:", "- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-description \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),", "``p_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename - ``p_Tier``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier - ``p_VersionLabel``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel - ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags \"\"\" AWS_OBJECT_TYPE", "converter=ApplicationMaxCountRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxCountRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCountRule\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule\"\"\" @attr.s class ApplicationApplicationResourceLifecycleConfig(Property): \"\"\" AWS Object", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.OptionSetting\" rp_Namespace: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"},", "= \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html Property Document: - ``p_ServiceRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole - ``p_VersionLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename\"\"\" p_Tier: typing.Union['EnvironmentTier', dict] = attr.ib( default=None, converter=EnvironmentTier.from_dict, validator=attr.validators.optional(attr.validators.instance_of(EnvironmentTier)), metadata={AttrMeta.PROPERTY_NAME: \"Tier\"}, ) \"\"\"Doc:", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name\"\"\" p_Type: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Type\"}, ) \"\"\"Doc:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename\"\"\" p_Value:", "Object Type = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html Property Document: - ``p_ServiceRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole -", "- ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname - ``p_CNAMEPrefix``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description - ``p_EnvironmentName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name -", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Type\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type\"\"\" p_Version: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", "validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME:", "Type = \"AWS::ElasticBeanstalk::Environment\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname - ``p_CNAMEPrefix``:", "- ``p_ResourceLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application\" p_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname\"\"\" p_ResourceName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename\"\"\"", "default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename\"\"\" @attr.s class EnvironmentTier(Property): \"\"\" AWS Object", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description\"\"\" p_EnvironmentId: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", "\"AWS::ElasticBeanstalk::Environment\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname - ``p_CNAMEPrefix``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix -", "rp_Namespace: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-optionname\"\"\" p_ResourceName:", "= \"AWS::ElasticBeanstalk::ApplicationVersion\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname - ``rp_SourceBundle``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle", "AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" rp_S3Bucket: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Bucket\"}, ) \"\"\"Doc:", "Object Type = \"AWS::ElasticBeanstalk::Application.MaxCountRule\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html Property Document: - ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3 -", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"OperationsRole\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role\"\"\" p_OptionSettings: typing.List[typing.Union['EnvironmentOptionSetting', dict]] = attr.ib( default=None,", "int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCount\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount\"\"\" @attr.s class ApplicationApplicationVersionLifecycleConfig(Property):", "\"SolutionStackName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname\"\"\" p_SourceConfiguration: typing.Union['ConfigurationTemplateSourceConfiguration', dict] = attr.ib( default=None, converter=ConfigurationTemplateSourceConfiguration.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ConfigurationTemplateSourceConfiguration)), metadata={AttrMeta.PROPERTY_NAME:", "``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name - ``p_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type - ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.Tier\" p_Name:", "\"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, )", "--- @attr.s class EnvironmentOptionSetting(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment.OptionSetting\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html", "\"AWS::ElasticBeanstalk::Environment.OptionSetting\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html Property Document: - ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace - ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname -", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value\"\"\" @attr.s class ApplicationVersionSourceBundle(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" Resource Document:", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str = attr.ib( default=None,", "rp_SourceBundle: typing.Union['ApplicationVersionSourceBundle', dict] = attr.ib( default=None, converter=ApplicationVersionSourceBundle.from_dict, validator=attr.validators.instance_of(ApplicationVersionSourceBundle), metadata={AttrMeta.PROPERTY_NAME: \"SourceBundle\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-sourcebundle\"\"\"", "\"Value\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value\"\"\" @attr.s class ApplicationMaxCountRule(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.MaxCountRule\"", "metadata={AttrMeta.PROPERTY_NAME: \"SourceConfiguration\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration\"\"\" @attr.s class Application(Resource): \"\"\" AWS Object Type =", "- ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description - ``p_EnvironmentId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid - ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings -", "Tag, GetAtt, TypeHint, TypeCheck, ) from ..core.constant import AttrMeta #--- Property declaration ---", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3 - ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled - ``p_MaxAgeInDays``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" p_DeleteSourceFromS3: bool", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str = attr.ib( default=None,", "Document: - ``rp_S3Bucket``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket - ``rp_S3Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" rp_S3Bucket: TypeHint.intrinsic_str", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn\"\"\" p_SolutionStackName:", "attr_name=\"EndpointURL\") @attr.s class ApplicationVersion(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ApplicationVersion\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html", "- ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.OptionSetting\" rp_Namespace: TypeHint.intrinsic_str =", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Name\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name\"\"\" p_Type: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description\"\"\" p_EnvironmentId:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled\"\"\" p_MaxAgeInDays: int = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(int)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeInDays\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays\"\"\" @attr.s", "\"DeleteSourceFromS3\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3\"\"\" p_Enabled: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, )", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" p_DeleteSourceFromS3: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"},", "rv_EndpointURL(self) -> GetAtt: \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#aws-properties-beanstalk-environment-return-values\"\"\" return GetAtt(resource=self, attr_name=\"EndpointURL\") @attr.s class ApplicationVersion(Resource): \"\"\" AWS", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value\"\"\" @attr.s class ApplicationMaxCountRule(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.MaxCountRule\" Resource", "..core.constant import AttrMeta #--- Property declaration --- @attr.s class EnvironmentOptionSetting(Property): \"\"\" AWS Object", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html Property Document: - ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name - ``p_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type - ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version \"\"\"", "p_Type: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Type\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type\"\"\" p_Version: TypeHint.intrinsic_str", "p_CNAMEPrefix: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"CNAMEPrefix\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix\"\"\" p_Description: TypeHint.intrinsic_str", "AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.OptionSetting\" rp_Namespace: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, ) \"\"\"Doc:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-description - ``p_EnvironmentName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name - ``p_OperationsRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-operations-role - ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings", "AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc:", "``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled - ``p_MaxCount``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-maxcount \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxCountRule\" p_DeleteSourceFromS3: bool = attr.ib(", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace - ``rp_OptionName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-optionname - ``p_ResourceName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-elasticbeanstalk-environment-optionsetting-resourcename - ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value \"\"\" AWS_OBJECT_TYPE =", "attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str = attr.ib( default=None,", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname\"\"\" rp_SourceBundle: typing.Union['ApplicationVersionSourceBundle', dict]", "metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename\"\"\" @attr.s class EnvironmentTier(Property): \"\"\" AWS Object Type =", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description\"\"\" p_EnvironmentId: TypeHint.intrinsic_str = attr.ib(", "\"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" rp_Namespace: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, )", "@attr.s class ApplicationMaxCountRule(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.MaxCountRule\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html Property", "\"MaxAgeRule\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule\"\"\" p_MaxCountRule: typing.Union['ApplicationMaxCountRule', dict] = attr.ib( default=None, converter=ApplicationMaxCountRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxCountRule)), metadata={AttrMeta.PROPERTY_NAME:", "- ``p_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename - ``p_Tier``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier - ``p_VersionLabel``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel - ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags \"\"\"", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Version\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version\"\"\" @attr.s class ConfigurationTemplateConfigurationOptionSetting(Property): \"\"\"", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ServiceRole\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole\"\"\" p_VersionLifecycleConfig: typing.Union['ApplicationApplicationVersionLifecycleConfig', dict] =", "validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxCountRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCountRule\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule\"\"\" @attr.s class ApplicationApplicationResourceLifecycleConfig(Property): \"\"\" AWS Object Type", "attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname\"\"\" p_TemplateName: TypeHint.intrinsic_str = attr.ib( default=None,", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-deletesourcefroms3\"\"\" p_Enabled: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"Enabled\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-enabled\"\"\" p_MaxAgeInDays:", "\"AWS::ElasticBeanstalk::Application.MaxCountRule\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html Property Document: - ``p_DeleteSourceFromS3``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3 - ``p_Enabled``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-enabled -", "- ``p_Value``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-option-settings.html#cfn-beanstalk-optionsettings-value \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.OptionSetting\" rp_Namespace: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),", "validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxcountrule.html#cfn-elasticbeanstalk-application-maxcountrule-deletesourcefroms3\"\"\" p_Enabled: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME:", "@attr.s class ApplicationVersionSourceBundle(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html Property", "\"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html Property Document: - ``p_ApplicationName``:", "AWS Object Type = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html Property Document: - ``p_ServiceRole``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole", "\"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html Property Document: - ``rp_ApplicationName``:", "\"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html Property Document: - ``rp_S3Bucket``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket - ``rp_S3Key``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3key \"\"\"", "``p_VersionLabel``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel - ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-elasticbeanstalk-environment-tags \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib(", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version\"\"\" @attr.s class ConfigurationTemplateConfigurationOptionSetting(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" Resource Document:", "dict] = attr.ib( default=None, converter=ApplicationMaxCountRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxCountRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCountRule\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule\"\"\" @attr.s class", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#aws-properties-beanstalk-environment-return-values\"\"\" return GetAtt(resource=self, attr_name=\"EndpointURL\") @attr.s class ApplicationVersion(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ApplicationVersion\"", "metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value\"\"\" @attr.s class ApplicationMaxCountRule(Property): \"\"\" AWS Object Type =", "\"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" rp_S3Bucket: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Bucket\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket\"\"\" rp_S3Key:", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type\"\"\" p_Version: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Version\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version\"\"\" @attr.s", "metadata={AttrMeta.PROPERTY_NAME: \"ServiceRole\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-servicerole\"\"\" p_VersionLifecycleConfig: typing.Union['ApplicationApplicationVersionLifecycleConfig', dict] = attr.ib( default=None, converter=ApplicationApplicationVersionLifecycleConfig.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationApplicationVersionLifecycleConfig)),", "validator=attr.validators.optional(attr.validators.instance_of(EnvironmentTier)), metadata={AttrMeta.PROPERTY_NAME: \"Tier\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier\"\"\" p_VersionLabel: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Description\"}, ) \"\"\"Doc:", "@property def rv_EndpointURL(self) -> GetAtt: \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#aws-properties-beanstalk-environment-return-values\"\"\" return GetAtt(resource=self, attr_name=\"EndpointURL\") @attr.s class ApplicationVersion(Resource):", "default=None, converter=EnvironmentOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(EnvironmentOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings\"\"\" p_PlatformArn: TypeHint.intrinsic_str = attr.ib(", "Property Document: - ``p_MaxAgeRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule - ``p_MaxCountRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" p_MaxAgeRule:", "@attr.s class ApplicationApplicationResourceLifecycleConfig(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.ApplicationResourceLifecycleConfig\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html Property", "declaration --- @attr.s class EnvironmentOptionSetting(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment.OptionSetting\" Resource Document:", "``p_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type - ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.Tier\" p_Name: TypeHint.intrinsic_str = attr.ib(", "Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description - ``p_EnvironmentId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-name\"\"\" p_Description: TypeHint.intrinsic_str =", "Environment(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Environment\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html Property Document: -", "default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"PlatformArn\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-platformarn\"\"\" p_SolutionStackName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", "Property, Resource, Tag, GetAtt, TypeHint, TypeCheck, ) from ..core.constant import AttrMeta #--- Property", "\"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname\"\"\" p_TemplateName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"TemplateName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename\"\"\"", "\"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ApplicationVersion.SourceBundle\" rp_S3Bucket: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Bucket\"}, )", "- ``p_SolutionStackName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-solutionstackname - ``p_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-templatename - ``p_Tier``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier - ``p_VersionLabel``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-versionlabel -", "ApplicationMaxAgeRule(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html Property Document: -", "\"Tier\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier\"\"\" p_VersionLabel: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"VersionLabel\"}, )", "- ``p_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-name - ``p_Type``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-type - ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.Tier\"", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"EnvironmentName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-name\"\"\" p_OperationsRole: TypeHint.intrinsic_str =", "- ``p_Version``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment-tier.html#cfn-beanstalk-env-tier-version \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.Tier\" p_Name: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)),", "validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname\"\"\" rp_SourceBundle: typing.Union['ApplicationVersionSourceBundle', dict] = attr.ib( default=None, converter=ApplicationVersionSourceBundle.from_dict,", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationresourcelifecycleconfig.html#cfn-elasticbeanstalk-application-applicationresourcelifecycleconfig-versionlifecycleconfig\"\"\" #--- Resource declaration --- @attr.s class ConfigurationTemplate(Resource): \"\"\" AWS Object Type =", "AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc:", "Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname - ``rp_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename \"\"\" AWS_OBJECT_TYPE =", "\"AWS::ElasticBeanstalk::ConfigurationTemplate\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html Property Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description -", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"Value\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-value\"\"\" @attr.s class ApplicationMaxCountRule(Property):", "@attr.s class ApplicationApplicationVersionLifecycleConfig(Property): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html Property", "``p_MaxCountRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\" p_MaxAgeRule: typing.Union['ApplicationMaxAgeRule', dict] = attr.ib( default=None, converter=ApplicationMaxAgeRule.from_dict,", "= attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname\"\"\" p_CNAMEPrefix: TypeHint.intrinsic_str = attr.ib(", "ConfigurationTemplate(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html Property Document: -", "typing.List[typing.Union['EnvironmentOptionSetting', dict]] = attr.ib( default=None, converter=EnvironmentOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(EnvironmentOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings\"\"\"", "\"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html#cfn-elasticbeanstalk-applicationversion-applicationname\"\"\" rp_SourceBundle: typing.Union['ApplicationVersionSourceBundle', dict] = attr.ib( default=None, converter=ApplicationVersionSourceBundle.from_dict, validator=attr.validators.instance_of(ApplicationVersionSourceBundle), metadata={AttrMeta.PROPERTY_NAME:", "validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"S3Bucket\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-sourcebundle.html#cfn-beanstalk-sourcebundle-s3bucket\"\"\" rp_S3Key: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME:", "Document: - ``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-applicationname - ``rp_TemplateName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-sourceconfiguration.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration-templatename \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate.SourceConfiguration\" rp_ApplicationName: TypeHint.intrinsic_str", "``rp_ApplicationName``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-applicationname - ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-description - ``p_EnvironmentId``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-environmentid - ``p_OptionSettings``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-optionsettings - ``p_PlatformArn``:", "``p_SourceConfiguration``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-sourceconfiguration \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::ConfigurationTemplate\" rp_ApplicationName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME:", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule\"\"\" p_MaxCountRule: typing.Union['ApplicationMaxCountRule', dict] = attr.ib( default=None, converter=ApplicationMaxCountRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxCountRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxCountRule\"},", "\"\"\" This module \"\"\" import attr import typing from ..core.model import ( Property,", "= attr.ib( default=None, converter=ApplicationMaxAgeRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxAgeRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeRule\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule\"\"\" p_MaxCountRule: typing.Union['ApplicationMaxCountRule', dict]", "p_SolutionStackName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"SolutionStackName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticbeanstalk-configurationtemplate.html#cfn-elasticbeanstalk-configurationtemplate-solutionstackname\"\"\" p_SourceConfiguration: typing.Union['ConfigurationTemplateSourceConfiguration',", "attr.ib( default=None, converter=ApplicationMaxAgeRule.from_dict, validator=attr.validators.optional(attr.validators.instance_of(ApplicationMaxAgeRule)), metadata={AttrMeta.PROPERTY_NAME: \"MaxAgeRule\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule\"\"\" p_MaxCountRule: typing.Union['ApplicationMaxCountRule', dict] =", "declaration --- @attr.s class ConfigurationTemplate(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate\" Resource Document:", "validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"ApplicationName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-applicationname\"\"\" p_CNAMEPrefix: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:", "attr.ib( default=None, converter=EnvironmentTier.from_dict, validator=attr.validators.optional(attr.validators.instance_of(EnvironmentTier)), metadata={AttrMeta.PROPERTY_NAME: \"Tier\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-tier\"\"\" p_VersionLabel: TypeHint.intrinsic_str = attr.ib(", "\"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Environment.OptionSetting\" rp_Namespace: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"Namespace\"}, )", "``p_MaxAgeInDays``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-maxagerule.html#cfn-elasticbeanstalk-application-maxagerule-maxageindays \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" p_DeleteSourceFromS3: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME:", "import attr import typing from ..core.model import ( Property, Resource, Tag, GetAtt, TypeHint,", "AWS Object Type = \"AWS::ElasticBeanstalk::ConfigurationTemplate.ConfigurationOptionSetting\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html Property Document: - ``rp_Namespace``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace", "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html Property Document: - ``p_MaxAgeRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxagerule - ``p_MaxCountRule``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-application-applicationversionlifecycleconfig.html#cfn-elasticbeanstalk-application-applicationversionlifecycleconfig-maxcountrule \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.ApplicationVersionLifecycleConfig\"", "\"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application.MaxAgeRule\" p_DeleteSourceFromS3: bool = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(bool)), metadata={AttrMeta.PROPERTY_NAME: \"DeleteSourceFromS3\"}, )", "dict]] = attr.ib( default=None, converter=EnvironmentOptionSetting.from_list, validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(EnvironmentOptionSetting), iterable_validator=attr.validators.instance_of(list))), metadata={AttrMeta.PROPERTY_NAME: \"OptionSettings\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-optionsettings\"\"\" p_PlatformArn:", "= attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"ResourceName\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-resourcename\"\"\" p_Value: TypeHint.intrinsic_str = attr.ib(", "@attr.s class ApplicationVersion(Resource): \"\"\" AWS Object Type = \"AWS::ElasticBeanstalk::ApplicationVersion\" Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-version.html Property", "- ``p_Description``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-description - ``p_ResourceLifecycleConfig``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk.html#cfn-elasticbeanstalk-application-resourcelifecycleconfig \"\"\" AWS_OBJECT_TYPE = \"AWS::ElasticBeanstalk::Application\" p_ApplicationName: TypeHint.intrinsic_str =", ") \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticbeanstalk-configurationtemplate-configurationoptionsetting.html#cfn-elasticbeanstalk-configurationtemplate-configurationoptionsetting-namespace\"\"\" rp_OptionName: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), metadata={AttrMeta.PROPERTY_NAME: \"OptionName\"}, ) \"\"\"Doc:", "TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"CNAMEPrefix\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix\"\"\" p_Description: TypeHint.intrinsic_str =", "validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME: \"CNAMEPrefix\"}, ) \"\"\"Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-beanstalk-environment.html#cfn-beanstalk-environment-cnameprefix\"\"\" p_Description: TypeHint.intrinsic_str = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(TypeCheck.intrinsic_str_type)), metadata={AttrMeta.PROPERTY_NAME:" ]
[ "only needed when the array to deserialize isn't a native cupy array. \"\"\"", "if not x.flags.c_contiguous: x = cupy.array(x, copy=True) header = x.__cuda_array_interface__.copy() return header, [x]", "cupy array. \"\"\" def __init__(self, ary): cai = ary.__cuda_array_interface__ cai_cupy_vsn = cupy.ndarray(0).__cuda_array_interface__[\"version\"] if", "PatchedCudaArrayInterface: \"\"\"This class do two things: 1) Makes sure that __cuda_array_interface__['strides'] behaves as", "del self.base @cuda_serialize.register(cupy.ndarray) def serialize_cupy_ndarray(x): # Making sure `x` is behaving if not", "ary): cai = ary.__cuda_array_interface__ cai_cupy_vsn = cupy.ndarray(0).__cuda_array_interface__[\"version\"] if cai.get(\"strides\") is None and cai_cupy_vsn", "self.base = ary def __del__(self): # Making sure that the cuda context is", "None) self.__cuda_array_interface__ = cai # Save a ref to ary so it won't", "@cuda_serialize.register(cupy.ndarray) def serialize_cupy_ndarray(x): # Making sure `x` is behaving if not x.flags.c_contiguous: x", "cupy.ndarray(0).__cuda_array_interface__[\"version\"] if cai.get(\"strides\") is None and cai_cupy_vsn < 2: cai.pop(\"strides\", None) self.__cuda_array_interface__ =", "active when deallocating the base cuda array. Notice, this is only needed when", "serialization GPU arrays. \"\"\" import cupy from .cuda import cuda_serialize, cuda_deserialize class PatchedCudaArrayInterface:", "the base cuda array. Notice, this is only needed when the array to", "array to deserialize isn't a native cupy array. \"\"\" def __init__(self, ary): cai", "header, [x] @cuda_deserialize.register(cupy.ndarray) def deserialize_cupy_array(header, frames): (frame,) = frames if not isinstance(frame, cupy.ndarray):", "(frame,) = frames if not isinstance(frame, cupy.ndarray): frame = PatchedCudaArrayInterface(frame) arr = cupy.ndarray(", "import cuda_serialize, cuda_deserialize class PatchedCudaArrayInterface: \"\"\"This class do two things: 1) Makes sure", "if cai.get(\"strides\") is None and cai_cupy_vsn < 2: cai.pop(\"strides\", None) self.__cuda_array_interface__ = cai", "Notice, this is only needed when the array to deserialize isn't a native", "class PatchedCudaArrayInterface: \"\"\"This class do two things: 1) Makes sure that __cuda_array_interface__['strides'] behaves", "the array to deserialize isn't a native cupy array. \"\"\" def __init__(self, ary):", "import numba.cuda numba.cuda.current_context() except ImportError: pass del self.base @cuda_serialize.register(cupy.ndarray) def serialize_cupy_ndarray(x): # Making", "of scope self.base = ary def __del__(self): # Making sure that the cuda", "\"\"\"This class do two things: 1) Makes sure that __cuda_array_interface__['strides'] behaves as specified", "context is active # when deallocating the base cuda array try: import numba.cuda", "= ary def __del__(self): # Making sure that the cuda context is active", "cupy.ndarray): frame = PatchedCudaArrayInterface(frame) arr = cupy.ndarray( header[\"shape\"], dtype=header[\"typestr\"], memptr=cupy.asarray(frame).data ) return arr", "cai # Save a ref to ary so it won't go out of", "Makes sure that __cuda_array_interface__['strides'] behaves as specified in the protocol. 2) Makes sure", "def __del__(self): # Making sure that the cuda context is active # when", "= cupy.array(x, copy=True) header = x.__cuda_array_interface__.copy() return header, [x] @cuda_deserialize.register(cupy.ndarray) def deserialize_cupy_array(header, frames):", "def serialize_cupy_ndarray(x): # Making sure `x` is behaving if not x.flags.c_contiguous: x =", "try: import numba.cuda numba.cuda.current_context() except ImportError: pass del self.base @cuda_serialize.register(cupy.ndarray) def serialize_cupy_ndarray(x): #", "= ary.__cuda_array_interface__ cai_cupy_vsn = cupy.ndarray(0).__cuda_array_interface__[\"version\"] if cai.get(\"strides\") is None and cai_cupy_vsn < 2:", "x = cupy.array(x, copy=True) header = x.__cuda_array_interface__.copy() return header, [x] @cuda_deserialize.register(cupy.ndarray) def deserialize_cupy_array(header,", "Making sure that the cuda context is active # when deallocating the base", "sure that __cuda_array_interface__['strides'] behaves as specified in the protocol. 2) Makes sure that", "cupy.array(x, copy=True) header = x.__cuda_array_interface__.copy() return header, [x] @cuda_deserialize.register(cupy.ndarray) def deserialize_cupy_array(header, frames): (frame,)", "behaving if not x.flags.c_contiguous: x = cupy.array(x, copy=True) header = x.__cuda_array_interface__.copy() return header,", "cuda_serialize, cuda_deserialize class PatchedCudaArrayInterface: \"\"\"This class do two things: 1) Makes sure that", "# when deallocating the base cuda array try: import numba.cuda numba.cuda.current_context() except ImportError:", "is active # when deallocating the base cuda array try: import numba.cuda numba.cuda.current_context()", "so it won't go out of scope self.base = ary def __del__(self): #", "if not isinstance(frame, cupy.ndarray): frame = PatchedCudaArrayInterface(frame) arr = cupy.ndarray( header[\"shape\"], dtype=header[\"typestr\"], memptr=cupy.asarray(frame).data", "array. \"\"\" def __init__(self, ary): cai = ary.__cuda_array_interface__ cai_cupy_vsn = cupy.ndarray(0).__cuda_array_interface__[\"version\"] if cai.get(\"strides\")", "[x] @cuda_deserialize.register(cupy.ndarray) def deserialize_cupy_array(header, frames): (frame,) = frames if not isinstance(frame, cupy.ndarray): frame", "context is active when deallocating the base cuda array. Notice, this is only", "sure that the cuda context is active # when deallocating the base cuda", "isn't a native cupy array. \"\"\" def __init__(self, ary): cai = ary.__cuda_array_interface__ cai_cupy_vsn", "cuda context is active # when deallocating the base cuda array try: import", "pass del self.base @cuda_serialize.register(cupy.ndarray) def serialize_cupy_ndarray(x): # Making sure `x` is behaving if", "isinstance(frame, cupy.ndarray): frame = PatchedCudaArrayInterface(frame) arr = cupy.ndarray( header[\"shape\"], dtype=header[\"typestr\"], memptr=cupy.asarray(frame).data ) return", "it won't go out of scope self.base = ary def __del__(self): # Making", "cai.get(\"strides\") is None and cai_cupy_vsn < 2: cai.pop(\"strides\", None) self.__cuda_array_interface__ = cai #", "def __init__(self, ary): cai = ary.__cuda_array_interface__ cai_cupy_vsn = cupy.ndarray(0).__cuda_array_interface__[\"version\"] if cai.get(\"strides\") is None", "sure `x` is behaving if not x.flags.c_contiguous: x = cupy.array(x, copy=True) header =", "scope self.base = ary def __del__(self): # Making sure that the cuda context", "native cupy array. \"\"\" def __init__(self, ary): cai = ary.__cuda_array_interface__ cai_cupy_vsn = cupy.ndarray(0).__cuda_array_interface__[\"version\"]", "cuda array try: import numba.cuda numba.cuda.current_context() except ImportError: pass del self.base @cuda_serialize.register(cupy.ndarray) def", "protocol. 2) Makes sure that the cuda context is active when deallocating the", "def deserialize_cupy_array(header, frames): (frame,) = frames if not isinstance(frame, cupy.ndarray): frame = PatchedCudaArrayInterface(frame)", "the base cuda array try: import numba.cuda numba.cuda.current_context() except ImportError: pass del self.base", "1) Makes sure that __cuda_array_interface__['strides'] behaves as specified in the protocol. 2) Makes", "needed when the array to deserialize isn't a native cupy array. \"\"\" def", "ary def __del__(self): # Making sure that the cuda context is active #", "out of scope self.base = ary def __del__(self): # Making sure that the", "the cuda context is active # when deallocating the base cuda array try:", "not isinstance(frame, cupy.ndarray): frame = PatchedCudaArrayInterface(frame) arr = cupy.ndarray( header[\"shape\"], dtype=header[\"typestr\"], memptr=cupy.asarray(frame).data )", "that the cuda context is active when deallocating the base cuda array. Notice,", "2) Makes sure that the cuda context is active when deallocating the base", "cupy from .cuda import cuda_serialize, cuda_deserialize class PatchedCudaArrayInterface: \"\"\"This class do two things:", "Save a ref to ary so it won't go out of scope self.base", "__cuda_array_interface__['strides'] behaves as specified in the protocol. 2) Makes sure that the cuda", "= frames if not isinstance(frame, cupy.ndarray): frame = PatchedCudaArrayInterface(frame) arr = cupy.ndarray( header[\"shape\"],", "# Save a ref to ary so it won't go out of scope", "__init__(self, ary): cai = ary.__cuda_array_interface__ cai_cupy_vsn = cupy.ndarray(0).__cuda_array_interface__[\"version\"] if cai.get(\"strides\") is None and", "ImportError: pass del self.base @cuda_serialize.register(cupy.ndarray) def serialize_cupy_ndarray(x): # Making sure `x` is behaving", "numba.cuda.current_context() except ImportError: pass del self.base @cuda_serialize.register(cupy.ndarray) def serialize_cupy_ndarray(x): # Making sure `x`", "when deallocating the base cuda array. Notice, this is only needed when the", "cai.pop(\"strides\", None) self.__cuda_array_interface__ = cai # Save a ref to ary so it", "return header, [x] @cuda_deserialize.register(cupy.ndarray) def deserialize_cupy_array(header, frames): (frame,) = frames if not isinstance(frame,", "that __cuda_array_interface__['strides'] behaves as specified in the protocol. 2) Makes sure that the", "frames if not isinstance(frame, cupy.ndarray): frame = PatchedCudaArrayInterface(frame) arr = cupy.ndarray( header[\"shape\"], dtype=header[\"typestr\"],", "__del__(self): # Making sure that the cuda context is active # when deallocating", "ary so it won't go out of scope self.base = ary def __del__(self):", "the cuda context is active when deallocating the base cuda array. Notice, this", "< 2: cai.pop(\"strides\", None) self.__cuda_array_interface__ = cai # Save a ref to ary", "cuda_deserialize class PatchedCudaArrayInterface: \"\"\"This class do two things: 1) Makes sure that __cuda_array_interface__['strides']", "from .cuda import cuda_serialize, cuda_deserialize class PatchedCudaArrayInterface: \"\"\"This class do two things: 1)", "base cuda array try: import numba.cuda numba.cuda.current_context() except ImportError: pass del self.base @cuda_serialize.register(cupy.ndarray)", "when the array to deserialize isn't a native cupy array. \"\"\" def __init__(self,", "cai_cupy_vsn = cupy.ndarray(0).__cuda_array_interface__[\"version\"] if cai.get(\"strides\") is None and cai_cupy_vsn < 2: cai.pop(\"strides\", None)", "None and cai_cupy_vsn < 2: cai.pop(\"strides\", None) self.__cuda_array_interface__ = cai # Save a", "array. Notice, this is only needed when the array to deserialize isn't a", "x.__cuda_array_interface__.copy() return header, [x] @cuda_deserialize.register(cupy.ndarray) def deserialize_cupy_array(header, frames): (frame,) = frames if not", "class do two things: 1) Makes sure that __cuda_array_interface__['strides'] behaves as specified in", "Efficient serialization GPU arrays. \"\"\" import cupy from .cuda import cuda_serialize, cuda_deserialize class", "\"\"\" import cupy from .cuda import cuda_serialize, cuda_deserialize class PatchedCudaArrayInterface: \"\"\"This class do", "cuda context is active when deallocating the base cuda array. Notice, this is", "array try: import numba.cuda numba.cuda.current_context() except ImportError: pass del self.base @cuda_serialize.register(cupy.ndarray) def serialize_cupy_ndarray(x):", "two things: 1) Makes sure that __cuda_array_interface__['strides'] behaves as specified in the protocol.", "specified in the protocol. 2) Makes sure that the cuda context is active", "numba.cuda numba.cuda.current_context() except ImportError: pass del self.base @cuda_serialize.register(cupy.ndarray) def serialize_cupy_ndarray(x): # Making sure", "copy=True) header = x.__cuda_array_interface__.copy() return header, [x] @cuda_deserialize.register(cupy.ndarray) def deserialize_cupy_array(header, frames): (frame,) =", ".cuda import cuda_serialize, cuda_deserialize class PatchedCudaArrayInterface: \"\"\"This class do two things: 1) Makes", "is behaving if not x.flags.c_contiguous: x = cupy.array(x, copy=True) header = x.__cuda_array_interface__.copy() return", "header = x.__cuda_array_interface__.copy() return header, [x] @cuda_deserialize.register(cupy.ndarray) def deserialize_cupy_array(header, frames): (frame,) = frames", "as specified in the protocol. 2) Makes sure that the cuda context is", "a ref to ary so it won't go out of scope self.base =", "Making sure `x` is behaving if not x.flags.c_contiguous: x = cupy.array(x, copy=True) header", "things: 1) Makes sure that __cuda_array_interface__['strides'] behaves as specified in the protocol. 2)", "Makes sure that the cuda context is active when deallocating the base cuda", "the protocol. 2) Makes sure that the cuda context is active when deallocating", "cuda array. Notice, this is only needed when the array to deserialize isn't", "# Making sure that the cuda context is active # when deallocating the", "in the protocol. 2) Makes sure that the cuda context is active when", "is active when deallocating the base cuda array. Notice, this is only needed", "is only needed when the array to deserialize isn't a native cupy array.", "is None and cai_cupy_vsn < 2: cai.pop(\"strides\", None) self.__cuda_array_interface__ = cai # Save", "self.base @cuda_serialize.register(cupy.ndarray) def serialize_cupy_ndarray(x): # Making sure `x` is behaving if not x.flags.c_contiguous:", "active # when deallocating the base cuda array try: import numba.cuda numba.cuda.current_context() except", "x.flags.c_contiguous: x = cupy.array(x, copy=True) header = x.__cuda_array_interface__.copy() return header, [x] @cuda_deserialize.register(cupy.ndarray) def", "except ImportError: pass del self.base @cuda_serialize.register(cupy.ndarray) def serialize_cupy_ndarray(x): # Making sure `x` is", "to ary so it won't go out of scope self.base = ary def", "serialize_cupy_ndarray(x): # Making sure `x` is behaving if not x.flags.c_contiguous: x = cupy.array(x,", "not x.flags.c_contiguous: x = cupy.array(x, copy=True) header = x.__cuda_array_interface__.copy() return header, [x] @cuda_deserialize.register(cupy.ndarray)", "do two things: 1) Makes sure that __cuda_array_interface__['strides'] behaves as specified in the", "\"\"\" def __init__(self, ary): cai = ary.__cuda_array_interface__ cai_cupy_vsn = cupy.ndarray(0).__cuda_array_interface__[\"version\"] if cai.get(\"strides\") is", "2: cai.pop(\"strides\", None) self.__cuda_array_interface__ = cai # Save a ref to ary so", "\"\"\" Efficient serialization GPU arrays. \"\"\" import cupy from .cuda import cuda_serialize, cuda_deserialize", "and cai_cupy_vsn < 2: cai.pop(\"strides\", None) self.__cuda_array_interface__ = cai # Save a ref", "cai_cupy_vsn < 2: cai.pop(\"strides\", None) self.__cuda_array_interface__ = cai # Save a ref to", "cai = ary.__cuda_array_interface__ cai_cupy_vsn = cupy.ndarray(0).__cuda_array_interface__[\"version\"] if cai.get(\"strides\") is None and cai_cupy_vsn <", "this is only needed when the array to deserialize isn't a native cupy", "deserialize isn't a native cupy array. \"\"\" def __init__(self, ary): cai = ary.__cuda_array_interface__", "to deserialize isn't a native cupy array. \"\"\" def __init__(self, ary): cai =", "a native cupy array. \"\"\" def __init__(self, ary): cai = ary.__cuda_array_interface__ cai_cupy_vsn =", "GPU arrays. \"\"\" import cupy from .cuda import cuda_serialize, cuda_deserialize class PatchedCudaArrayInterface: \"\"\"This", "= cupy.ndarray(0).__cuda_array_interface__[\"version\"] if cai.get(\"strides\") is None and cai_cupy_vsn < 2: cai.pop(\"strides\", None) self.__cuda_array_interface__", "`x` is behaving if not x.flags.c_contiguous: x = cupy.array(x, copy=True) header = x.__cuda_array_interface__.copy()", "ref to ary so it won't go out of scope self.base = ary", "= cai # Save a ref to ary so it won't go out", "@cuda_deserialize.register(cupy.ndarray) def deserialize_cupy_array(header, frames): (frame,) = frames if not isinstance(frame, cupy.ndarray): frame =", "go out of scope self.base = ary def __del__(self): # Making sure that", "self.__cuda_array_interface__ = cai # Save a ref to ary so it won't go", "deallocating the base cuda array. Notice, this is only needed when the array", "ary.__cuda_array_interface__ cai_cupy_vsn = cupy.ndarray(0).__cuda_array_interface__[\"version\"] if cai.get(\"strides\") is None and cai_cupy_vsn < 2: cai.pop(\"strides\",", "when deallocating the base cuda array try: import numba.cuda numba.cuda.current_context() except ImportError: pass", "deallocating the base cuda array try: import numba.cuda numba.cuda.current_context() except ImportError: pass del", "deserialize_cupy_array(header, frames): (frame,) = frames if not isinstance(frame, cupy.ndarray): frame = PatchedCudaArrayInterface(frame) arr", "won't go out of scope self.base = ary def __del__(self): # Making sure", "behaves as specified in the protocol. 2) Makes sure that the cuda context", "that the cuda context is active # when deallocating the base cuda array", "= x.__cuda_array_interface__.copy() return header, [x] @cuda_deserialize.register(cupy.ndarray) def deserialize_cupy_array(header, frames): (frame,) = frames if", "frames): (frame,) = frames if not isinstance(frame, cupy.ndarray): frame = PatchedCudaArrayInterface(frame) arr =", "arrays. \"\"\" import cupy from .cuda import cuda_serialize, cuda_deserialize class PatchedCudaArrayInterface: \"\"\"This class", "# Making sure `x` is behaving if not x.flags.c_contiguous: x = cupy.array(x, copy=True)", "import cupy from .cuda import cuda_serialize, cuda_deserialize class PatchedCudaArrayInterface: \"\"\"This class do two", "sure that the cuda context is active when deallocating the base cuda array.", "base cuda array. Notice, this is only needed when the array to deserialize" ]
[]
[ "in mapping scheme for {obj.__class__}\" ) continue mapped_key = mapping[key] if isinstance(mapped_key, str):", ") mapping = cls.__mappings__[object_type] result: Dict[str, Any] = {} for key, attribute in", "value in kwargs.items(): self.__setattr__(key, value) def __getattr__(self, attribute_name): if attribute_name not in self.__schema__.properties:", "value: Any) -> None: if attribute_name not in self.__schema__.properties: raise AttributeError( f\"Attribute `{attribute_name}`", "validate(value, property_meta) self.__data__[attribute_name] = value def to_dict(self) -> dict: result = {} for", "ValueError( f\"Property `{key}` is not nullable, \" f\"and must be defined in mapping", "Any) -> None: if attribute_name not in self.__schema__.properties: raise AttributeError( f\"Attribute `{attribute_name}` is", "return cls(**obj) object_type: Type = type(obj) if object_type not in cls.__mappings__: raise ValueError(", "required=required) klass.__schema__ = schema_definition return klass class Schema(metaclass=SchemaMeta): __data__: Dict[str, Any] __schema__: Object", "not specified in {self}.\" ) property_meta = self.__schema__.properties[attribute_name] if isinstance(property_meta, String): try: value", "__setattr__(self, attribute_name: str, value: Any) -> None: if attribute_name not in self.__schema__.properties: raise", "not be mapped to {cls.__name__}. \" f\"Have you forgot to define mapping for", "property_meta = self.__schema__.properties[attribute_name] if isinstance(property_meta, String): try: value = property_meta.format_value(value) except ValueError as", "cls.__schema__.properties.items(): if key not in mapping: if attribute.nullable: result[key] = None else: raise", "-> None: if attribute_name not in self.__schema__.properties: raise AttributeError( f\"Attribute `{attribute_name}` is not", "isinstance(obj, dict): return cls(**obj) object_type: Type = type(obj) if object_type not in cls.__mappings__:", "in cls.__mappings__: raise ValueError( f\"Object of type {object_type} could not be mapped to", "create_from(cls, obj: object) -> \"Schema\": if isinstance(obj, dict): return cls(**obj) object_type: Type =", "f\"Property `{key}` is not nullable, \" f\"and must be defined in mapping scheme", "= {} for key, attribute in cls.__schema__.properties.items(): if key not in mapping: if", "dict): return cls(**obj) object_type: Type = type(obj) if object_type not in cls.__mappings__: raise", "{} for key, value in self.__data__.items(): if self.__schema__[key].write_only: continue result[key] = value return", "as e: raise ValidationError(str(e)) else: validate(value, property_meta) self.__data__[attribute_name] = value def to_dict(self) ->", "Any] = {} for key, attribute in cls.__schema__.properties.items(): if key not in mapping:", "name, bases, namespace) klass = super().__new__(mcs, name, bases, namespace) required = [] if", "mapping for the object?\" ) mapping = cls.__mappings__[object_type] result: Dict[str, Any] = {}", "if attribute_name in self.__data__ else None ) def __setattr__(self, attribute_name: str, value: Any)", "object?\" ) mapping = cls.__mappings__[object_type] result: Dict[str, Any] = {} for key, attribute", "Dict] def __init__(self, **kwargs) -> None: super().__setattr__(\"__data__\", {}) for key, value in kwargs.items():", "schema_definition return klass class Schema(metaclass=SchemaMeta): __data__: Dict[str, Any] __schema__: Object __mappings__: Dict[Type, Dict]", "is not nullable, \" f\"and must be defined in mapping scheme for {obj.__class__}\"", "in kwargs.items(): self.__setattr__(key, value) def __getattr__(self, attribute_name): if attribute_name not in self.__schema__.properties: raise", "if \"required\" in kwargs: required = kwargs[\"required\"] schema_definition = Object(properties=klass.__annotations__, required=required) klass.__schema__ =", "nullable, \" f\"and must be defined in mapping scheme for {obj.__class__}\" ) continue", "from abc import ABCMeta from typing import Any from typing import Dict from", "return ( self.__data__[attribute_name] if attribute_name in self.__data__ else None ) def __setattr__(self, attribute_name:", "mapped_key == 1: result[key] = getattr(obj, key) elif callable(mapped_key): result[key] = mapped_key(obj) else:", "namespace) klass = super().__new__(mcs, name, bases, namespace) required = [] if \"required\" in", "from typing import Any from typing import Dict from typing import List from", "= value return result @classmethod def create_from(cls, obj: object) -> \"Schema\": if isinstance(obj,", "namespace: dict, **kwargs): if ( f\"{namespace['__module__']}.{namespace['__qualname__']}\" == \"opyapi.schema.schema.Schema\" ): return super().__new__(mcs, name, bases,", "def __new__(mcs: \"SchemaMeta\", name: str, bases: tuple, namespace: dict, **kwargs): if ( f\"{namespace['__module__']}.{namespace['__qualname__']}\"", "Dict from typing import List from typing import Type from .errors import ValidationError", "None ) def __setattr__(self, attribute_name: str, value: Any) -> None: if attribute_name not", "cls(**obj) object_type: Type = type(obj) if object_type not in cls.__mappings__: raise ValueError( f\"Object", "= schema_definition return klass class Schema(metaclass=SchemaMeta): __data__: Dict[str, Any] __schema__: Object __mappings__: Dict[Type,", "True or mapped_key == 1: result[key] = getattr(obj, key) elif callable(mapped_key): result[key] =", "raise AttributeError( f\"Attribute `{attribute_name}` is not specified in {self}.\" ) property_meta = self.__schema__.properties[attribute_name]", "= {} for key, value in self.__data__.items(): if self.__schema__[key].write_only: continue result[key] = value", "be mapped to {cls.__name__}. \" f\"Have you forgot to define mapping for the", "{}) for key, value in kwargs.items(): self.__setattr__(key, value) def __getattr__(self, attribute_name): if attribute_name", "object_type: Type = type(obj) if object_type not in cls.__mappings__: raise ValueError( f\"Object of", "is not specified in {self}.\" ) return ( self.__data__[attribute_name] if attribute_name in self.__data__", "= mapping[key] if isinstance(mapped_key, str): result[key] = getattr(obj, mapped_key) elif mapped_key is True", "name, bases, namespace) required = [] if \"required\" in kwargs: required = kwargs[\"required\"]", "f\"{namespace['__module__']}.{namespace['__qualname__']}\" == \"opyapi.schema.schema.Schema\" ): return super().__new__(mcs, name, bases, namespace) klass = super().__new__(mcs, name,", "in cls.__schema__.properties.items(): if key not in mapping: if attribute.nullable: result[key] = None else:", "= super().__new__(mcs, name, bases, namespace) required = [] if \"required\" in kwargs: required", "{obj.__class__}\" ) continue mapped_key = mapping[key] if isinstance(mapped_key, str): result[key] = getattr(obj, mapped_key)", "__getattr__(self, attribute_name): if attribute_name not in self.__schema__.properties: raise AttributeError( f\"Attribute `{attribute_name}` is not", "[] if \"required\" in kwargs: required = kwargs[\"required\"] schema_definition = Object(properties=klass.__annotations__, required=required) klass.__schema__", "None: if attribute_name not in self.__schema__.properties: raise AttributeError( f\"Attribute `{attribute_name}` is not specified", "import validate class SchemaMeta(ABCMeta): def __new__(mcs: \"SchemaMeta\", name: str, bases: tuple, namespace: dict,", "mapping scheme for {obj.__class__}\" ) continue mapped_key = mapping[key] if isinstance(mapped_key, str): result[key]", "is True or mapped_key == 1: result[key] = getattr(obj, key) elif callable(mapped_key): result[key]", "for the object?\" ) mapping = cls.__mappings__[object_type] result: Dict[str, Any] = {} for", "from .types import Object from .types import String from .validators.validate import validate class", "super().__setattr__(\"__data__\", {}) for key, value in kwargs.items(): self.__setattr__(key, value) def __getattr__(self, attribute_name): if", "@classmethod def create_from(cls, obj: object) -> \"Schema\": if isinstance(obj, dict): return cls(**obj) object_type:", "`{key}` is not nullable, \" f\"and must be defined in mapping scheme for", "class Schema(metaclass=SchemaMeta): __data__: Dict[str, Any] __schema__: Object __mappings__: Dict[Type, Dict] def __init__(self, **kwargs)", "bases: tuple, namespace: dict, **kwargs): if ( f\"{namespace['__module__']}.{namespace['__qualname__']}\" == \"opyapi.schema.schema.Schema\" ): return super().__new__(mcs,", "if ( f\"{namespace['__module__']}.{namespace['__qualname__']}\" == \"opyapi.schema.schema.Schema\" ): return super().__new__(mcs, name, bases, namespace) klass =", "typing import Dict from typing import List from typing import Type from .errors", "{cls.__name__}. \" f\"Have you forgot to define mapping for the object?\" ) mapping", "typing import List from typing import Type from .errors import ValidationError from .types", "if attribute_name not in self.__schema__.properties: raise AttributeError( f\"Attribute `{attribute_name}` is not specified in", "self.__data__ else None ) def __setattr__(self, attribute_name: str, value: Any) -> None: if", "else: raise ValueError( f\"Property `{key}` is not nullable, \" f\"and must be defined", "{object_type} could not be mapped to {cls.__name__}. \" f\"Have you forgot to define", "None else: raise ValueError( f\"Property `{key}` is not nullable, \" f\"and must be", "in self.__schema__.properties: raise AttributeError( f\"Attribute `{attribute_name}` is not specified in {self}.\" ) property_meta", "Object from .types import String from .validators.validate import validate class SchemaMeta(ABCMeta): def __new__(mcs:", "if isinstance(property_meta, String): try: value = property_meta.format_value(value) except ValueError as e: raise ValidationError(str(e))", "except ValueError as e: raise ValidationError(str(e)) else: validate(value, property_meta) self.__data__[attribute_name] = value def", "not in mapping: if attribute.nullable: result[key] = None else: raise ValueError( f\"Property `{key}`", "bases, namespace) klass = super().__new__(mcs, name, bases, namespace) required = [] if \"required\"", "f\"Attribute `{attribute_name}` is not specified in {self}.\" ) property_meta = self.__schema__.properties[attribute_name] if isinstance(property_meta,", "or mapped_key == 1: result[key] = getattr(obj, key) elif callable(mapped_key): result[key] = mapped_key(obj)", "define mapping for the object?\" ) mapping = cls.__mappings__[object_type] result: Dict[str, Any] =", "validate class SchemaMeta(ABCMeta): def __new__(mcs: \"SchemaMeta\", name: str, bases: tuple, namespace: dict, **kwargs):", "from .validators.validate import validate class SchemaMeta(ABCMeta): def __new__(mcs: \"SchemaMeta\", name: str, bases: tuple,", "of type {object_type} could not be mapped to {cls.__name__}. \" f\"Have you forgot", "\" f\"and must be defined in mapping scheme for {obj.__class__}\" ) continue mapped_key", "== \"opyapi.schema.schema.Schema\" ): return super().__new__(mcs, name, bases, namespace) klass = super().__new__(mcs, name, bases,", "dict: result = {} for key, value in self.__data__.items(): if self.__schema__[key].write_only: continue result[key]", "ABCMeta from typing import Any from typing import Dict from typing import List", "import ABCMeta from typing import Any from typing import Dict from typing import", "mapped_key is True or mapped_key == 1: result[key] = getattr(obj, key) elif callable(mapped_key):", "def to_dict(self) -> dict: result = {} for key, value in self.__data__.items(): if", ") def __setattr__(self, attribute_name: str, value: Any) -> None: if attribute_name not in", "if key not in mapping: if attribute.nullable: result[key] = None else: raise ValueError(", "result[key] = getattr(obj, key) elif callable(mapped_key): result[key] = mapped_key(obj) else: raise ValueError( f\"Property", "name: str, bases: tuple, namespace: dict, **kwargs): if ( f\"{namespace['__module__']}.{namespace['__qualname__']}\" == \"opyapi.schema.schema.Schema\" ):", ") return ( self.__data__[attribute_name] if attribute_name in self.__data__ else None ) def __setattr__(self,", "callable(mapped_key): result[key] = mapped_key(obj) else: raise ValueError( f\"Property {key} has invalid mapping setting", "__mappings__: Dict[Type, Dict] def __init__(self, **kwargs) -> None: super().__setattr__(\"__data__\", {}) for key, value", "for key, value in self.__data__.items(): if self.__schema__[key].write_only: continue result[key] = value return result", "result = {} for key, value in self.__data__.items(): if self.__schema__[key].write_only: continue result[key] =", "result[key] = None else: raise ValueError( f\"Property `{key}` is not nullable, \" f\"and", "else: validate(value, property_meta) self.__data__[attribute_name] = value def to_dict(self) -> dict: result = {}", "= Object(properties=klass.__annotations__, required=required) klass.__schema__ = schema_definition return klass class Schema(metaclass=SchemaMeta): __data__: Dict[str, Any]", "scheme for {obj.__class__}\" ) continue mapped_key = mapping[key] if isinstance(mapped_key, str): result[key] =", "for {obj.__class__}\" ) continue mapped_key = mapping[key] if isinstance(mapped_key, str): result[key] = getattr(obj,", "Dict[Type, Dict] def __init__(self, **kwargs) -> None: super().__setattr__(\"__data__\", {}) for key, value in", "import ValidationError from .types import Object from .types import String from .validators.validate import", "raise AttributeError( f\"Attribute `{attribute_name}` is not specified in {self}.\" ) return ( self.__data__[attribute_name]", "self.__data__[attribute_name] if attribute_name in self.__data__ else None ) def __setattr__(self, attribute_name: str, value:", "bases, namespace) required = [] if \"required\" in kwargs: required = kwargs[\"required\"] schema_definition", "kwargs: required = kwargs[\"required\"] schema_definition = Object(properties=klass.__annotations__, required=required) klass.__schema__ = schema_definition return klass", "type {object_type} could not be mapped to {cls.__name__}. \" f\"Have you forgot to", "for key, attribute in cls.__schema__.properties.items(): if key not in mapping: if attribute.nullable: result[key]", "( self.__data__[attribute_name] if attribute_name in self.__data__ else None ) def __setattr__(self, attribute_name: str,", "cls.__mappings__: raise ValueError( f\"Object of type {object_type} could not be mapped to {cls.__name__}.", "super().__new__(mcs, name, bases, namespace) required = [] if \"required\" in kwargs: required =", "{} for key, attribute in cls.__schema__.properties.items(): if key not in mapping: if attribute.nullable:", "try: value = property_meta.format_value(value) except ValueError as e: raise ValidationError(str(e)) else: validate(value, property_meta)", "import Type from .errors import ValidationError from .types import Object from .types import", "Type from .errors import ValidationError from .types import Object from .types import String", "from typing import Type from .errors import ValidationError from .types import Object from", "you forgot to define mapping for the object?\" ) mapping = cls.__mappings__[object_type] result:", "self.__setattr__(key, value) def __getattr__(self, attribute_name): if attribute_name not in self.__schema__.properties: raise AttributeError( f\"Attribute", "continue mapped_key = mapping[key] if isinstance(mapped_key, str): result[key] = getattr(obj, mapped_key) elif mapped_key", "e: raise ValidationError(str(e)) else: validate(value, property_meta) self.__data__[attribute_name] = value def to_dict(self) -> dict:", "key) elif callable(mapped_key): result[key] = mapped_key(obj) else: raise ValueError( f\"Property {key} has invalid", "required = [] if \"required\" in kwargs: required = kwargs[\"required\"] schema_definition = Object(properties=klass.__annotations__,", "{key} has invalid mapping setting for object {obj.__class__}.\" ) return cls(**result) __all__ =", "not nullable, \" f\"and must be defined in mapping scheme for {obj.__class__}\" )", "= getattr(obj, key) elif callable(mapped_key): result[key] = mapped_key(obj) else: raise ValueError( f\"Property {key}", "mapping[key] if isinstance(mapped_key, str): result[key] = getattr(obj, mapped_key) elif mapped_key is True or", "to define mapping for the object?\" ) mapping = cls.__mappings__[object_type] result: Dict[str, Any]", ".types import Object from .types import String from .validators.validate import validate class SchemaMeta(ABCMeta):", "type(obj) if object_type not in cls.__mappings__: raise ValueError( f\"Object of type {object_type} could", "f\"and must be defined in mapping scheme for {obj.__class__}\" ) continue mapped_key =", "\"Schema\": if isinstance(obj, dict): return cls(**obj) object_type: Type = type(obj) if object_type not", "value def to_dict(self) -> dict: result = {} for key, value in self.__data__.items():", "String): try: value = property_meta.format_value(value) except ValueError as e: raise ValidationError(str(e)) else: validate(value,", "schema_definition = Object(properties=klass.__annotations__, required=required) klass.__schema__ = schema_definition return klass class Schema(metaclass=SchemaMeta): __data__: Dict[str,", "forgot to define mapping for the object?\" ) mapping = cls.__mappings__[object_type] result: Dict[str,", "= property_meta.format_value(value) except ValueError as e: raise ValidationError(str(e)) else: validate(value, property_meta) self.__data__[attribute_name] =", "= None else: raise ValueError( f\"Property `{key}` is not nullable, \" f\"and must", "self.__schema__.properties: raise AttributeError( f\"Attribute `{attribute_name}` is not specified in {self}.\" ) property_meta =", "mapping = cls.__mappings__[object_type] result: Dict[str, Any] = {} for key, attribute in cls.__schema__.properties.items():", "**kwargs): if ( f\"{namespace['__module__']}.{namespace['__qualname__']}\" == \"opyapi.schema.schema.Schema\" ): return super().__new__(mcs, name, bases, namespace) klass", "`{attribute_name}` is not specified in {self}.\" ) return ( self.__data__[attribute_name] if attribute_name in", "in kwargs: required = kwargs[\"required\"] schema_definition = Object(properties=klass.__annotations__, required=required) klass.__schema__ = schema_definition return", "if object_type not in cls.__mappings__: raise ValueError( f\"Object of type {object_type} could not", "obj: object) -> \"Schema\": if isinstance(obj, dict): return cls(**obj) object_type: Type = type(obj)", "must be defined in mapping scheme for {obj.__class__}\" ) continue mapped_key = mapping[key]", "attribute.nullable: result[key] = None else: raise ValueError( f\"Property `{key}` is not nullable, \"", "List from typing import Type from .errors import ValidationError from .types import Object", "Object __mappings__: Dict[Type, Dict] def __init__(self, **kwargs) -> None: super().__setattr__(\"__data__\", {}) for key,", "object) -> \"Schema\": if isinstance(obj, dict): return cls(**obj) object_type: Type = type(obj) if", "kwargs[\"required\"] schema_definition = Object(properties=klass.__annotations__, required=required) klass.__schema__ = schema_definition return klass class Schema(metaclass=SchemaMeta): __data__:", "mapped_key) elif mapped_key is True or mapped_key == 1: result[key] = getattr(obj, key)", "return super().__new__(mcs, name, bases, namespace) klass = super().__new__(mcs, name, bases, namespace) required =", "__init__(self, **kwargs) -> None: super().__setattr__(\"__data__\", {}) for key, value in kwargs.items(): self.__setattr__(key, value)", "import String from .validators.validate import validate class SchemaMeta(ABCMeta): def __new__(mcs: \"SchemaMeta\", name: str,", "self.__schema__.properties: raise AttributeError( f\"Attribute `{attribute_name}` is not specified in {self}.\" ) return (", "in self.__data__.items(): if self.__schema__[key].write_only: continue result[key] = value return result @classmethod def create_from(cls,", "result @classmethod def create_from(cls, obj: object) -> \"Schema\": if isinstance(obj, dict): return cls(**obj)", "in {self}.\" ) property_meta = self.__schema__.properties[attribute_name] if isinstance(property_meta, String): try: value = property_meta.format_value(value)", "elif mapped_key is True or mapped_key == 1: result[key] = getattr(obj, key) elif", ".errors import ValidationError from .types import Object from .types import String from .validators.validate", ".validators.validate import validate class SchemaMeta(ABCMeta): def __new__(mcs: \"SchemaMeta\", name: str, bases: tuple, namespace:", "raise ValueError( f\"Property {key} has invalid mapping setting for object {obj.__class__}.\" ) return", "value in self.__data__.items(): if self.__schema__[key].write_only: continue result[key] = value return result @classmethod def", "super().__new__(mcs, name, bases, namespace) klass = super().__new__(mcs, name, bases, namespace) required = []", "kwargs.items(): self.__setattr__(key, value) def __getattr__(self, attribute_name): if attribute_name not in self.__schema__.properties: raise AttributeError(", "specified in {self}.\" ) property_meta = self.__schema__.properties[attribute_name] if isinstance(property_meta, String): try: value =", "ValueError( f\"Object of type {object_type} could not be mapped to {cls.__name__}. \" f\"Have", "typing import Any from typing import Dict from typing import List from typing", "raise ValidationError(str(e)) else: validate(value, property_meta) self.__data__[attribute_name] = value def to_dict(self) -> dict: result", "raise ValueError( f\"Object of type {object_type} could not be mapped to {cls.__name__}. \"", "cls.__mappings__[object_type] result: Dict[str, Any] = {} for key, attribute in cls.__schema__.properties.items(): if key", "getattr(obj, mapped_key) elif mapped_key is True or mapped_key == 1: result[key] = getattr(obj,", "__new__(mcs: \"SchemaMeta\", name: str, bases: tuple, namespace: dict, **kwargs): if ( f\"{namespace['__module__']}.{namespace['__qualname__']}\" ==", "value) def __getattr__(self, attribute_name): if attribute_name not in self.__schema__.properties: raise AttributeError( f\"Attribute `{attribute_name}`", "None: super().__setattr__(\"__data__\", {}) for key, value in kwargs.items(): self.__setattr__(key, value) def __getattr__(self, attribute_name):", "ValueError( f\"Property {key} has invalid mapping setting for object {obj.__class__}.\" ) return cls(**result)", "( f\"{namespace['__module__']}.{namespace['__qualname__']}\" == \"opyapi.schema.schema.Schema\" ): return super().__new__(mcs, name, bases, namespace) klass = super().__new__(mcs,", "for key, value in kwargs.items(): self.__setattr__(key, value) def __getattr__(self, attribute_name): if attribute_name not", "str): result[key] = getattr(obj, mapped_key) elif mapped_key is True or mapped_key == 1:", "def __getattr__(self, attribute_name): if attribute_name not in self.__schema__.properties: raise AttributeError( f\"Attribute `{attribute_name}` is", "result[key] = value return result @classmethod def create_from(cls, obj: object) -> \"Schema\": if", "**kwargs) -> None: super().__setattr__(\"__data__\", {}) for key, value in kwargs.items(): self.__setattr__(key, value) def", "from .errors import ValidationError from .types import Object from .types import String from", "Dict[str, Any] __schema__: Object __mappings__: Dict[Type, Dict] def __init__(self, **kwargs) -> None: super().__setattr__(\"__data__\",", "typing import Type from .errors import ValidationError from .types import Object from .types", "value = property_meta.format_value(value) except ValueError as e: raise ValidationError(str(e)) else: validate(value, property_meta) self.__data__[attribute_name]", "elif callable(mapped_key): result[key] = mapped_key(obj) else: raise ValueError( f\"Property {key} has invalid mapping", "attribute_name in self.__data__ else None ) def __setattr__(self, attribute_name: str, value: Any) ->", "result[key] = getattr(obj, mapped_key) elif mapped_key is True or mapped_key == 1: result[key]", "-> dict: result = {} for key, value in self.__data__.items(): if self.__schema__[key].write_only: continue", "\"opyapi.schema.schema.Schema\" ): return super().__new__(mcs, name, bases, namespace) klass = super().__new__(mcs, name, bases, namespace)", "\"SchemaMeta\", name: str, bases: tuple, namespace: dict, **kwargs): if ( f\"{namespace['__module__']}.{namespace['__qualname__']}\" == \"opyapi.schema.schema.Schema\"", "in self.__data__ else None ) def __setattr__(self, attribute_name: str, value: Any) -> None:", "isinstance(property_meta, String): try: value = property_meta.format_value(value) except ValueError as e: raise ValidationError(str(e)) else:", "\" f\"Have you forgot to define mapping for the object?\" ) mapping =", "abc import ABCMeta from typing import Any from typing import Dict from typing", "klass.__schema__ = schema_definition return klass class Schema(metaclass=SchemaMeta): __data__: Dict[str, Any] __schema__: Object __mappings__:", "SchemaMeta(ABCMeta): def __new__(mcs: \"SchemaMeta\", name: str, bases: tuple, namespace: dict, **kwargs): if (", "import Dict from typing import List from typing import Type from .errors import", "str, value: Any) -> None: if attribute_name not in self.__schema__.properties: raise AttributeError( f\"Attribute", "key, value in kwargs.items(): self.__setattr__(key, value) def __getattr__(self, attribute_name): if attribute_name not in", "return result @classmethod def create_from(cls, obj: object) -> \"Schema\": if isinstance(obj, dict): return", "self.__data__[attribute_name] = value def to_dict(self) -> dict: result = {} for key, value", "Dict[str, Any] = {} for key, attribute in cls.__schema__.properties.items(): if key not in", ") property_meta = self.__schema__.properties[attribute_name] if isinstance(property_meta, String): try: value = property_meta.format_value(value) except ValueError", "Any] __schema__: Object __mappings__: Dict[Type, Dict] def __init__(self, **kwargs) -> None: super().__setattr__(\"__data__\", {})", "to {cls.__name__}. \" f\"Have you forgot to define mapping for the object?\" )", "property_meta) self.__data__[attribute_name] = value def to_dict(self) -> dict: result = {} for key,", "= cls.__mappings__[object_type] result: Dict[str, Any] = {} for key, attribute in cls.__schema__.properties.items(): if", ") continue mapped_key = mapping[key] if isinstance(mapped_key, str): result[key] = getattr(obj, mapped_key) elif", "Object(properties=klass.__annotations__, required=required) klass.__schema__ = schema_definition return klass class Schema(metaclass=SchemaMeta): __data__: Dict[str, Any] __schema__:", ".types import String from .validators.validate import validate class SchemaMeta(ABCMeta): def __new__(mcs: \"SchemaMeta\", name:", "not in cls.__mappings__: raise ValueError( f\"Object of type {object_type} could not be mapped", "klass = super().__new__(mcs, name, bases, namespace) required = [] if \"required\" in kwargs:", "mapped_key = mapping[key] if isinstance(mapped_key, str): result[key] = getattr(obj, mapped_key) elif mapped_key is", "be defined in mapping scheme for {obj.__class__}\" ) continue mapped_key = mapping[key] if", "== 1: result[key] = getattr(obj, key) elif callable(mapped_key): result[key] = mapped_key(obj) else: raise", "result: Dict[str, Any] = {} for key, attribute in cls.__schema__.properties.items(): if key not", "self.__schema__.properties[attribute_name] if isinstance(property_meta, String): try: value = property_meta.format_value(value) except ValueError as e: raise", "__schema__: Object __mappings__: Dict[Type, Dict] def __init__(self, **kwargs) -> None: super().__setattr__(\"__data__\", {}) for", "to_dict(self) -> dict: result = {} for key, value in self.__data__.items(): if self.__schema__[key].write_only:", "result[key] = mapped_key(obj) else: raise ValueError( f\"Property {key} has invalid mapping setting for", "defined in mapping scheme for {obj.__class__}\" ) continue mapped_key = mapping[key] if isinstance(mapped_key,", "ValidationError(str(e)) else: validate(value, property_meta) self.__data__[attribute_name] = value def to_dict(self) -> dict: result =", "def __setattr__(self, attribute_name: str, value: Any) -> None: if attribute_name not in self.__schema__.properties:", "from typing import List from typing import Type from .errors import ValidationError from", "getattr(obj, key) elif callable(mapped_key): result[key] = mapped_key(obj) else: raise ValueError( f\"Property {key} has", "str, bases: tuple, namespace: dict, **kwargs): if ( f\"{namespace['__module__']}.{namespace['__qualname__']}\" == \"opyapi.schema.schema.Schema\" ): return", "tuple, namespace: dict, **kwargs): if ( f\"{namespace['__module__']}.{namespace['__qualname__']}\" == \"opyapi.schema.schema.Schema\" ): return super().__new__(mcs, name,", "attribute_name): if attribute_name not in self.__schema__.properties: raise AttributeError( f\"Attribute `{attribute_name}` is not specified", "the object?\" ) mapping = cls.__mappings__[object_type] result: Dict[str, Any] = {} for key,", "ValidationError from .types import Object from .types import String from .validators.validate import validate", "namespace) required = [] if \"required\" in kwargs: required = kwargs[\"required\"] schema_definition =", "dict, **kwargs): if ( f\"{namespace['__module__']}.{namespace['__qualname__']}\" == \"opyapi.schema.schema.Schema\" ): return super().__new__(mcs, name, bases, namespace)", "else: raise ValueError( f\"Property {key} has invalid mapping setting for object {obj.__class__}.\" )", "ValueError as e: raise ValidationError(str(e)) else: validate(value, property_meta) self.__data__[attribute_name] = value def to_dict(self)", "key not in mapping: if attribute.nullable: result[key] = None else: raise ValueError( f\"Property", "mapping: if attribute.nullable: result[key] = None else: raise ValueError( f\"Property `{key}` is not", "import Any from typing import Dict from typing import List from typing import", "AttributeError( f\"Attribute `{attribute_name}` is not specified in {self}.\" ) return ( self.__data__[attribute_name] if", "__data__: Dict[str, Any] __schema__: Object __mappings__: Dict[Type, Dict] def __init__(self, **kwargs) -> None:", "-> \"Schema\": if isinstance(obj, dict): return cls(**obj) object_type: Type = type(obj) if object_type", "def create_from(cls, obj: object) -> \"Schema\": if isinstance(obj, dict): return cls(**obj) object_type: Type", "return klass class Schema(metaclass=SchemaMeta): __data__: Dict[str, Any] __schema__: Object __mappings__: Dict[Type, Dict] def", "else None ) def __setattr__(self, attribute_name: str, value: Any) -> None: if attribute_name", "mapped to {cls.__name__}. \" f\"Have you forgot to define mapping for the object?\"", "key, value in self.__data__.items(): if self.__schema__[key].write_only: continue result[key] = value return result @classmethod", "from typing import Dict from typing import List from typing import Type from", "String from .validators.validate import validate class SchemaMeta(ABCMeta): def __new__(mcs: \"SchemaMeta\", name: str, bases:", "not in self.__schema__.properties: raise AttributeError( f\"Attribute `{attribute_name}` is not specified in {self}.\" )", "attribute in cls.__schema__.properties.items(): if key not in mapping: if attribute.nullable: result[key] = None", "in self.__schema__.properties: raise AttributeError( f\"Attribute `{attribute_name}` is not specified in {self}.\" ) return", "= type(obj) if object_type not in cls.__mappings__: raise ValueError( f\"Object of type {object_type}", "self.__data__.items(): if self.__schema__[key].write_only: continue result[key] = value return result @classmethod def create_from(cls, obj:", "if isinstance(mapped_key, str): result[key] = getattr(obj, mapped_key) elif mapped_key is True or mapped_key", "not specified in {self}.\" ) return ( self.__data__[attribute_name] if attribute_name in self.__data__ else", "Type = type(obj) if object_type not in cls.__mappings__: raise ValueError( f\"Object of type", "attribute_name: str, value: Any) -> None: if attribute_name not in self.__schema__.properties: raise AttributeError(", "continue result[key] = value return result @classmethod def create_from(cls, obj: object) -> \"Schema\":", "= self.__schema__.properties[attribute_name] if isinstance(property_meta, String): try: value = property_meta.format_value(value) except ValueError as e:", "f\"Object of type {object_type} could not be mapped to {cls.__name__}. \" f\"Have you", "1: result[key] = getattr(obj, key) elif callable(mapped_key): result[key] = mapped_key(obj) else: raise ValueError(", "Schema(metaclass=SchemaMeta): __data__: Dict[str, Any] __schema__: Object __mappings__: Dict[Type, Dict] def __init__(self, **kwargs) ->", "value return result @classmethod def create_from(cls, obj: object) -> \"Schema\": if isinstance(obj, dict):", "= getattr(obj, mapped_key) elif mapped_key is True or mapped_key == 1: result[key] =", "Any from typing import Dict from typing import List from typing import Type", "f\"Attribute `{attribute_name}` is not specified in {self}.\" ) return ( self.__data__[attribute_name] if attribute_name", "= kwargs[\"required\"] schema_definition = Object(properties=klass.__annotations__, required=required) klass.__schema__ = schema_definition return klass class Schema(metaclass=SchemaMeta):", "): return super().__new__(mcs, name, bases, namespace) klass = super().__new__(mcs, name, bases, namespace) required", "from .types import String from .validators.validate import validate class SchemaMeta(ABCMeta): def __new__(mcs: \"SchemaMeta\",", "attribute_name not in self.__schema__.properties: raise AttributeError( f\"Attribute `{attribute_name}` is not specified in {self}.\"", "<reponame>dkraczkowski/opyapi from abc import ABCMeta from typing import Any from typing import Dict", "def __init__(self, **kwargs) -> None: super().__setattr__(\"__data__\", {}) for key, value in kwargs.items(): self.__setattr__(key,", "if self.__schema__[key].write_only: continue result[key] = value return result @classmethod def create_from(cls, obj: object)", "if isinstance(obj, dict): return cls(**obj) object_type: Type = type(obj) if object_type not in", "import Object from .types import String from .validators.validate import validate class SchemaMeta(ABCMeta): def", "class SchemaMeta(ABCMeta): def __new__(mcs: \"SchemaMeta\", name: str, bases: tuple, namespace: dict, **kwargs): if", "could not be mapped to {cls.__name__}. \" f\"Have you forgot to define mapping", "is not specified in {self}.\" ) property_meta = self.__schema__.properties[attribute_name] if isinstance(property_meta, String): try:", "raise ValueError( f\"Property `{key}` is not nullable, \" f\"and must be defined in", "mapped_key(obj) else: raise ValueError( f\"Property {key} has invalid mapping setting for object {obj.__class__}.\"", "import List from typing import Type from .errors import ValidationError from .types import", "in mapping: if attribute.nullable: result[key] = None else: raise ValueError( f\"Property `{key}` is", "= mapped_key(obj) else: raise ValueError( f\"Property {key} has invalid mapping setting for object", "property_meta.format_value(value) except ValueError as e: raise ValidationError(str(e)) else: validate(value, property_meta) self.__data__[attribute_name] = value", "required = kwargs[\"required\"] schema_definition = Object(properties=klass.__annotations__, required=required) klass.__schema__ = schema_definition return klass class", "f\"Property {key} has invalid mapping setting for object {obj.__class__}.\" ) return cls(**result) __all__", "{self}.\" ) property_meta = self.__schema__.properties[attribute_name] if isinstance(property_meta, String): try: value = property_meta.format_value(value) except", "in {self}.\" ) return ( self.__data__[attribute_name] if attribute_name in self.__data__ else None )", "has invalid mapping setting for object {obj.__class__}.\" ) return cls(**result) __all__ = [\"Schema\"]", "self.__schema__[key].write_only: continue result[key] = value return result @classmethod def create_from(cls, obj: object) ->", "klass class Schema(metaclass=SchemaMeta): __data__: Dict[str, Any] __schema__: Object __mappings__: Dict[Type, Dict] def __init__(self,", "\"required\" in kwargs: required = kwargs[\"required\"] schema_definition = Object(properties=klass.__annotations__, required=required) klass.__schema__ = schema_definition", "`{attribute_name}` is not specified in {self}.\" ) property_meta = self.__schema__.properties[attribute_name] if isinstance(property_meta, String):", "= value def to_dict(self) -> dict: result = {} for key, value in", "AttributeError( f\"Attribute `{attribute_name}` is not specified in {self}.\" ) property_meta = self.__schema__.properties[attribute_name] if", "f\"Have you forgot to define mapping for the object?\" ) mapping = cls.__mappings__[object_type]", "if attribute.nullable: result[key] = None else: raise ValueError( f\"Property `{key}` is not nullable,", "isinstance(mapped_key, str): result[key] = getattr(obj, mapped_key) elif mapped_key is True or mapped_key ==", "-> None: super().__setattr__(\"__data__\", {}) for key, value in kwargs.items(): self.__setattr__(key, value) def __getattr__(self,", "{self}.\" ) return ( self.__data__[attribute_name] if attribute_name in self.__data__ else None ) def", "key, attribute in cls.__schema__.properties.items(): if key not in mapping: if attribute.nullable: result[key] =", "specified in {self}.\" ) return ( self.__data__[attribute_name] if attribute_name in self.__data__ else None", "object_type not in cls.__mappings__: raise ValueError( f\"Object of type {object_type} could not be", "= [] if \"required\" in kwargs: required = kwargs[\"required\"] schema_definition = Object(properties=klass.__annotations__, required=required)" ]
[ "2): degree.append(i ** 3 + value) for item in degree: str_degree = str(item)", "item in degree: str_degree = str(item) pred_sum = 0 for x in str_degree:", "+= int_degree del_7 = pred_sum % 7 if del_7 == 0: sum_numers +=", "for i in range(1, 1000, 2): degree.append(i ** 3 + value) for item", "str(item) pred_sum = 0 for x in str_degree: int_degree = int(x) pred_sum +=", "pred_sum = 0 for x in str_degree: int_degree = int(x) pred_sum += int_degree", "pred_sum += int_degree del_7 = pred_sum % 7 if del_7 == 0: sum_numers", "degree: str_degree = str(item) pred_sum = 0 for x in str_degree: int_degree =", "** 3 + value) for item in degree: str_degree = str(item) pred_sum =", "degree = [] sum_numers = 0 for i in range(1, 1000, 2): degree.append(i", "function(value): degree = [] sum_numers = 0 for i in range(1, 1000, 2):", "<gh_stars>0 def function(value): degree = [] sum_numers = 0 for i in range(1,", "sum_numers = 0 for i in range(1, 1000, 2): degree.append(i ** 3 +", "= int(x) pred_sum += int_degree del_7 = pred_sum % 7 if del_7 ==", "int(x) pred_sum += int_degree del_7 = pred_sum % 7 if del_7 == 0:", "pred_sum % 7 if del_7 == 0: sum_numers += int(str_degree) print(sum_numers) function(0) function(17)", "int_degree del_7 = pred_sum % 7 if del_7 == 0: sum_numers += int(str_degree)", "del_7 = pred_sum % 7 if del_7 == 0: sum_numers += int(str_degree) print(sum_numers)", "def function(value): degree = [] sum_numers = 0 for i in range(1, 1000,", "range(1, 1000, 2): degree.append(i ** 3 + value) for item in degree: str_degree", "int_degree = int(x) pred_sum += int_degree del_7 = pred_sum % 7 if del_7", "in str_degree: int_degree = int(x) pred_sum += int_degree del_7 = pred_sum % 7", "= 0 for x in str_degree: int_degree = int(x) pred_sum += int_degree del_7", "for x in str_degree: int_degree = int(x) pred_sum += int_degree del_7 = pred_sum", "i in range(1, 1000, 2): degree.append(i ** 3 + value) for item in", "[] sum_numers = 0 for i in range(1, 1000, 2): degree.append(i ** 3", "value) for item in degree: str_degree = str(item) pred_sum = 0 for x", "degree.append(i ** 3 + value) for item in degree: str_degree = str(item) pred_sum", "x in str_degree: int_degree = int(x) pred_sum += int_degree del_7 = pred_sum %", "1000, 2): degree.append(i ** 3 + value) for item in degree: str_degree =", "= pred_sum % 7 if del_7 == 0: sum_numers += int(str_degree) print(sum_numers) function(0)", "+ value) for item in degree: str_degree = str(item) pred_sum = 0 for", "= 0 for i in range(1, 1000, 2): degree.append(i ** 3 + value)", "str_degree = str(item) pred_sum = 0 for x in str_degree: int_degree = int(x)", "in range(1, 1000, 2): degree.append(i ** 3 + value) for item in degree:", "= str(item) pred_sum = 0 for x in str_degree: int_degree = int(x) pred_sum", "str_degree: int_degree = int(x) pred_sum += int_degree del_7 = pred_sum % 7 if", "0 for i in range(1, 1000, 2): degree.append(i ** 3 + value) for", "3 + value) for item in degree: str_degree = str(item) pred_sum = 0", "for item in degree: str_degree = str(item) pred_sum = 0 for x in", "= [] sum_numers = 0 for i in range(1, 1000, 2): degree.append(i **", "0 for x in str_degree: int_degree = int(x) pred_sum += int_degree del_7 =", "in degree: str_degree = str(item) pred_sum = 0 for x in str_degree: int_degree" ]
[ "find what the day is on that date. Input Format A single line", "date. Your task is to find what the day is on that date.", "5th 2015 was WEDNESDAY. ''' # Enter your code here. Read input from", "import calendar as cal day={0:'MONDAY',1:'TUESDAY',2:'WEDNESDAY',3:'THURSDAY',4:'FRIDAY',5:'SATURDAY',6:'SUNDAY'} n=list(map(int,input().split())) if n[2] in range(2001,3000): n1=cal.weekday(n[2],n[0],n[1]) for i", "date. Input Format A single line of input containing the space separated month,", "Sample Input 08 05 2015 Sample Output WEDNESDAY Explanation The day on August", "A single line of input containing the space separated month, day and year,", "here. Read input from STDIN. Print output to STDOUT import calendar as cal", "Output Format Output the correct day in capital letters. Sample Input 08 05", "Format A single line of input containing the space separated month, day and", "of input containing the space separated month, day and year, respectively, in format.", "n[2] in range(2001,3000): n1=cal.weekday(n[2],n[0],n[1]) for i in day: if i==n1: print(day[i]) ''' output:", "Task You are given a date. Your task is to find what the", "range(2001,3000): n1=cal.weekday(n[2],n[0],n[1]) for i in day: if i==n1: print(day[i]) ''' output: 08 05", "Explanation The day on August 5th 2015 was WEDNESDAY. ''' # Enter your", "capital letters. Sample Input 08 05 2015 Sample Output WEDNESDAY Explanation The day", "is on that date. Input Format A single line of input containing the", "to STDOUT import calendar as cal day={0:'MONDAY',1:'TUESDAY',2:'WEDNESDAY',3:'THURSDAY',4:'FRIDAY',5:'SATURDAY',6:'SUNDAY'} n=list(map(int,input().split())) if n[2] in range(2001,3000): n1=cal.weekday(n[2],n[0],n[1])", "is to find what the day is on that date. Input Format A", "correct day in capital letters. Sample Input 08 05 2015 Sample Output WEDNESDAY", "WEDNESDAY. ''' # Enter your code here. Read input from STDIN. Print output", "2015 was WEDNESDAY. ''' # Enter your code here. Read input from STDIN.", "to find what the day is on that date. Input Format A single", "code here. Read input from STDIN. Print output to STDOUT import calendar as", "i in day: if i==n1: print(day[i]) ''' output: 08 05 2015 WEDNESDAY '''", "format. Constraints * 2000<year<3000 Output Format Output the correct day in capital letters.", "2000<year<3000 Output Format Output the correct day in capital letters. Sample Input 08", "You are given a date. Your task is to find what the day", "year, respectively, in format. Constraints * 2000<year<3000 Output Format Output the correct day", "STDOUT import calendar as cal day={0:'MONDAY',1:'TUESDAY',2:'WEDNESDAY',3:'THURSDAY',4:'FRIDAY',5:'SATURDAY',6:'SUNDAY'} n=list(map(int,input().split())) if n[2] in range(2001,3000): n1=cal.weekday(n[2],n[0],n[1]) for", "the correct day in capital letters. Sample Input 08 05 2015 Sample Output", "your code here. Read input from STDIN. Print output to STDOUT import calendar", "input from STDIN. Print output to STDOUT import calendar as cal day={0:'MONDAY',1:'TUESDAY',2:'WEDNESDAY',3:'THURSDAY',4:'FRIDAY',5:'SATURDAY',6:'SUNDAY'} n=list(map(int,input().split()))", "output to STDOUT import calendar as cal day={0:'MONDAY',1:'TUESDAY',2:'WEDNESDAY',3:'THURSDAY',4:'FRIDAY',5:'SATURDAY',6:'SUNDAY'} n=list(map(int,input().split())) if n[2] in range(2001,3000):", "calendar as cal day={0:'MONDAY',1:'TUESDAY',2:'WEDNESDAY',3:'THURSDAY',4:'FRIDAY',5:'SATURDAY',6:'SUNDAY'} n=list(map(int,input().split())) if n[2] in range(2001,3000): n1=cal.weekday(n[2],n[0],n[1]) for i in", "given a date. Your task is to find what the day is on", "as cal day={0:'MONDAY',1:'TUESDAY',2:'WEDNESDAY',3:'THURSDAY',4:'FRIDAY',5:'SATURDAY',6:'SUNDAY'} n=list(map(int,input().split())) if n[2] in range(2001,3000): n1=cal.weekday(n[2],n[0],n[1]) for i in day:", "STDIN. Print output to STDOUT import calendar as cal day={0:'MONDAY',1:'TUESDAY',2:'WEDNESDAY',3:'THURSDAY',4:'FRIDAY',5:'SATURDAY',6:'SUNDAY'} n=list(map(int,input().split())) if n[2]", "Input 08 05 2015 Sample Output WEDNESDAY Explanation The day on August 5th", "August 5th 2015 was WEDNESDAY. ''' # Enter your code here. Read input", "task is to find what the day is on that date. Input Format", "Output the correct day in capital letters. Sample Input 08 05 2015 Sample", "respectively, in format. Constraints * 2000<year<3000 Output Format Output the correct day in", "# Enter your code here. Read input from STDIN. Print output to STDOUT", "was WEDNESDAY. ''' # Enter your code here. Read input from STDIN. Print", "''' # Enter your code here. Read input from STDIN. Print output to", "on August 5th 2015 was WEDNESDAY. ''' # Enter your code here. Read", "n=list(map(int,input().split())) if n[2] in range(2001,3000): n1=cal.weekday(n[2],n[0],n[1]) for i in day: if i==n1: print(day[i])", "Enter your code here. Read input from STDIN. Print output to STDOUT import", "cal day={0:'MONDAY',1:'TUESDAY',2:'WEDNESDAY',3:'THURSDAY',4:'FRIDAY',5:'SATURDAY',6:'SUNDAY'} n=list(map(int,input().split())) if n[2] in range(2001,3000): n1=cal.weekday(n[2],n[0],n[1]) for i in day: if", "line of input containing the space separated month, day and year, respectively, in", "month, day and year, respectively, in format. Constraints * 2000<year<3000 Output Format Output", "containing the space separated month, day and year, respectively, in format. Constraints *", "a date. Your task is to find what the day is on that", "Sample Output WEDNESDAY Explanation The day on August 5th 2015 was WEDNESDAY. '''", "that date. Input Format A single line of input containing the space separated", "day is on that date. Input Format A single line of input containing", "separated month, day and year, respectively, in format. Constraints * 2000<year<3000 Output Format", "Format Output the correct day in capital letters. Sample Input 08 05 2015", "Input Format A single line of input containing the space separated month, day", "Your task is to find what the day is on that date. Input", "2015 Sample Output WEDNESDAY Explanation The day on August 5th 2015 was WEDNESDAY.", "from STDIN. Print output to STDOUT import calendar as cal day={0:'MONDAY',1:'TUESDAY',2:'WEDNESDAY',3:'THURSDAY',4:'FRIDAY',5:'SATURDAY',6:'SUNDAY'} n=list(map(int,input().split())) if", "letters. Sample Input 08 05 2015 Sample Output WEDNESDAY Explanation The day on", "WEDNESDAY Explanation The day on August 5th 2015 was WEDNESDAY. ''' # Enter", "day={0:'MONDAY',1:'TUESDAY',2:'WEDNESDAY',3:'THURSDAY',4:'FRIDAY',5:'SATURDAY',6:'SUNDAY'} n=list(map(int,input().split())) if n[2] in range(2001,3000): n1=cal.weekday(n[2],n[0],n[1]) for i in day: if i==n1:", "on that date. Input Format A single line of input containing the space", "the space separated month, day and year, respectively, in format. Constraints * 2000<year<3000", "in capital letters. Sample Input 08 05 2015 Sample Output WEDNESDAY Explanation The", "day in capital letters. Sample Input 08 05 2015 Sample Output WEDNESDAY Explanation", "in format. Constraints * 2000<year<3000 Output Format Output the correct day in capital", "05 2015 Sample Output WEDNESDAY Explanation The day on August 5th 2015 was", "Output WEDNESDAY Explanation The day on August 5th 2015 was WEDNESDAY. ''' #", "The day on August 5th 2015 was WEDNESDAY. ''' # Enter your code", "for i in day: if i==n1: print(day[i]) ''' output: 08 05 2015 WEDNESDAY", "* 2000<year<3000 Output Format Output the correct day in capital letters. Sample Input", "n1=cal.weekday(n[2],n[0],n[1]) for i in day: if i==n1: print(day[i]) ''' output: 08 05 2015", "if n[2] in range(2001,3000): n1=cal.weekday(n[2],n[0],n[1]) for i in day: if i==n1: print(day[i]) '''", "what the day is on that date. Input Format A single line of", "08 05 2015 Sample Output WEDNESDAY Explanation The day on August 5th 2015", "in range(2001,3000): n1=cal.weekday(n[2],n[0],n[1]) for i in day: if i==n1: print(day[i]) ''' output: 08", "day and year, respectively, in format. Constraints * 2000<year<3000 Output Format Output the", "''' Task You are given a date. Your task is to find what", "input containing the space separated month, day and year, respectively, in format. Constraints", "Constraints * 2000<year<3000 Output Format Output the correct day in capital letters. Sample", "the day is on that date. Input Format A single line of input", "Read input from STDIN. Print output to STDOUT import calendar as cal day={0:'MONDAY',1:'TUESDAY',2:'WEDNESDAY',3:'THURSDAY',4:'FRIDAY',5:'SATURDAY',6:'SUNDAY'}", "are given a date. Your task is to find what the day is", "day on August 5th 2015 was WEDNESDAY. ''' # Enter your code here.", "space separated month, day and year, respectively, in format. Constraints * 2000<year<3000 Output", "single line of input containing the space separated month, day and year, respectively,", "and year, respectively, in format. Constraints * 2000<year<3000 Output Format Output the correct", "Print output to STDOUT import calendar as cal day={0:'MONDAY',1:'TUESDAY',2:'WEDNESDAY',3:'THURSDAY',4:'FRIDAY',5:'SATURDAY',6:'SUNDAY'} n=list(map(int,input().split())) if n[2] in" ]
[ "['phone', 'password'] def __init__(self, status=0, data={}): if isinstance(data, list): for val in data:", "UserWrapper(Wrapper): filter = ['phone', 'password'] def __init__(self, status=0, data={}): if isinstance(data, list): for", "__init__(self, status=0, data={}): if isinstance(data, list): for val in data: self.remove_key(val) else: self.remove_key(data)", "filter = ['phone', 'password'] def __init__(self, status=0, data={}): if isinstance(data, list): for val", "core.bean.wrapper import * class UserWrapper(Wrapper): filter = ['phone', 'password'] def __init__(self, status=0, data={}):", "def __init__(self, status=0, data={}): if isinstance(data, list): for val in data: self.remove_key(val) else:", "data={}): if isinstance(data, list): for val in data: self.remove_key(val) else: self.remove_key(data) super().__init__(status, data)", "status=0, data={}): if isinstance(data, list): for val in data: self.remove_key(val) else: self.remove_key(data) super().__init__(status,", "'password'] def __init__(self, status=0, data={}): if isinstance(data, list): for val in data: self.remove_key(val)", "* class UserWrapper(Wrapper): filter = ['phone', 'password'] def __init__(self, status=0, data={}): if isinstance(data,", "<gh_stars>0 from core.bean.wrapper import * class UserWrapper(Wrapper): filter = ['phone', 'password'] def __init__(self,", "from core.bean.wrapper import * class UserWrapper(Wrapper): filter = ['phone', 'password'] def __init__(self, status=0,", "import * class UserWrapper(Wrapper): filter = ['phone', 'password'] def __init__(self, status=0, data={}): if", "class UserWrapper(Wrapper): filter = ['phone', 'password'] def __init__(self, status=0, data={}): if isinstance(data, list):", "= ['phone', 'password'] def __init__(self, status=0, data={}): if isinstance(data, list): for val in" ]
[ "return render_template(\"index.html\") @app.route(\"/login\") def login(): redirect_uri = url_for('auth', _external=True) return oauth.fitbit.authorize_redirect(\"\") @app.route('/auth') def", "session, redirect from authlib.integrations.flask_client import OAuth from azure.keyvault.secrets import SecretClient from azure.identity import", "from authlib.integrations.flask_client import OAuth from azure.keyvault.secrets import SecretClient from azure.identity import DefaultAzureCredential from", "auth(): token = oauth.fitbit.authorize_access_token() secretName = session[\"user\"] = token[\"user_id\"] secretValue = token[\"refresh_token\"] app.secret_key", "the service reset the key client.begin_delete_secret(secretName) # sync data with FHIR API using", "# Send the batch of events to the event hub. await producer.send_batch(event_data_batch) if", "async with producer: # Create a batch. event_data_batch = await producer.create_batch() for item", "= 4))) # Send the batch of events to the event hub. await", "1: Bring user to homepage to offer sync service with device cloud (fitbit", "os, json import cmd import asyncio from fitbit import Fitbit from flask import", "render_template(\"index.html\") @app.route(\"/login\") def login(): redirect_uri = url_for('auth', _external=True) return oauth.fitbit.authorize_redirect(\"\") @app.route('/auth') def auth():", "azure.keyvault.secrets import SecretClient from azure.identity import DefaultAzureCredential from azure.core.exceptions import ResourceExistsError from azure.eventhub.aio", "from azure.core.exceptions import ResourceExistsError from azure.eventhub.aio import EventHubProducerClient from azure.eventhub import EventData app", "producer.create_batch() for item in result: print(item) event_data_batch.add(EventData(json.dumps(item, indent = 4))) # Send the", "result: print(item) event_data_batch.add(EventData(json.dumps(item, indent = 4))) # Send the batch of events to", "oauth.register(name=\"fitbit\") # Step 1: Bring user to homepage to offer sync service with", "token[\"user_id\"] secretValue = token[\"refresh_token\"] app.secret_key = token[\"access_token\"] client = SecretClient(vault_url=app.config[\"VAULT_URL\"], credential=DefaultAzureCredential()) try: client.set_secret(secretName,", "url_for('auth', _external=True) return oauth.fitbit.authorize_redirect(\"\") @app.route('/auth') def auth(): token = oauth.fitbit.authorize_access_token() secretName = session[\"user\"]", "secretValue = token[\"refresh_token\"] app.secret_key = token[\"access_token\"] client = SecretClient(vault_url=app.config[\"VAULT_URL\"], credential=DefaultAzureCredential()) try: client.set_secret(secretName, secretValue)", "Flask(__name__) app.config.from_object('config') app.secret_key = '!secret' oauth = OAuth(app) client = oauth.register(name=\"fitbit\") # Step", "@app.route('/sync') async def sync(): fit_client = Fitbit(user=session[\"user\"], access_token=app.secret_key) result = fit_client.init_sync() # Create", "azure.core.exceptions import ResourceExistsError from azure.eventhub.aio import EventHubProducerClient from azure.eventhub import EventData app =", "example) @app.route('/') def home(): return render_template(\"index.html\") @app.route(\"/login\") def login(): redirect_uri = url_for('auth', _external=True)", "a producer client to send messages to the event hub. # Specify a", "client = oauth.register(name=\"fitbit\") # Step 1: Bring user to homepage to offer sync", "sync(): fit_client = Fitbit(user=session[\"user\"], access_token=app.secret_key) result = fit_client.init_sync() # Create a producer client", "4))) # Send the batch of events to the event hub. await producer.send_batch(event_data_batch)", "batch of events to the event hub. await producer.send_batch(event_data_batch) if __name__ == '__main__':", "redirect from authlib.integrations.flask_client import OAuth from azure.keyvault.secrets import SecretClient from azure.identity import DefaultAzureCredential", "= Flask(__name__) app.config.from_object('config') app.secret_key = '!secret' oauth = OAuth(app) client = oauth.register(name=\"fitbit\") #", "key client.begin_delete_secret(secretName) # sync data with FHIR API using Io[M]T Connector loop =", "secretName = session[\"user\"] = token[\"user_id\"] secretValue = token[\"refresh_token\"] app.secret_key = token[\"access_token\"] client =", "import asyncio from fitbit import Fitbit from flask import Flask, render_template, url_for, session,", "result = fit_client.init_sync() # Create a producer client to send messages to the", "device cloud (fitbit in this example) @app.route('/') def home(): return render_template(\"index.html\") @app.route(\"/login\") def", "token[\"access_token\"] client = SecretClient(vault_url=app.config[\"VAULT_URL\"], credential=DefaultAzureCredential()) try: client.set_secret(secretName, secretValue) except ResourceExistsError: # assume user", "API using Io[M]T Connector loop = asyncio.new_event_loop() loop.run_until_complete(sync()) return \"Successful Sync\" @app.route('/sync') async", "OAuth from azure.keyvault.secrets import SecretClient from azure.identity import DefaultAzureCredential from azure.core.exceptions import ResourceExistsError", "hubs namespace and # the event hub name. producer = EventHubProducerClient.from_connection_string(conn_str=app.config[\"EVENT_HUB_CONN_STR\"]) async with", "the event hub. # Specify a connection string to your event hubs namespace", "oauth.fitbit.authorize_access_token() secretName = session[\"user\"] = token[\"user_id\"] secretValue = token[\"refresh_token\"] app.secret_key = token[\"access_token\"] client", "sync service with device cloud (fitbit in this example) @app.route('/') def home(): return", "asyncio.new_event_loop() loop.run_until_complete(sync()) return \"Successful Sync\" @app.route('/sync') async def sync(): fit_client = Fitbit(user=session[\"user\"], access_token=app.secret_key)", "of events to the event hub. await producer.send_batch(event_data_batch) if __name__ == '__main__': app.run()", "data with FHIR API using Io[M]T Connector loop = asyncio.new_event_loop() loop.run_until_complete(sync()) return \"Successful", "= oauth.register(name=\"fitbit\") # Step 1: Bring user to homepage to offer sync service", "with FHIR API using Io[M]T Connector loop = asyncio.new_event_loop() loop.run_until_complete(sync()) return \"Successful Sync\"", "producer: # Create a batch. event_data_batch = await producer.create_batch() for item in result:", "def login(): redirect_uri = url_for('auth', _external=True) return oauth.fitbit.authorize_redirect(\"\") @app.route('/auth') def auth(): token =", "azure.identity import DefaultAzureCredential from azure.core.exceptions import ResourceExistsError from azure.eventhub.aio import EventHubProducerClient from azure.eventhub", "from azure.identity import DefaultAzureCredential from azure.core.exceptions import ResourceExistsError from azure.eventhub.aio import EventHubProducerClient from", "oauth.fitbit.authorize_redirect(\"\") @app.route('/auth') def auth(): token = oauth.fitbit.authorize_access_token() secretName = session[\"user\"] = token[\"user_id\"] secretValue", "and # the event hub name. producer = EventHubProducerClient.from_connection_string(conn_str=app.config[\"EVENT_HUB_CONN_STR\"]) async with producer: #", "= token[\"user_id\"] secretValue = token[\"refresh_token\"] app.secret_key = token[\"access_token\"] client = SecretClient(vault_url=app.config[\"VAULT_URL\"], credential=DefaultAzureCredential()) try:", "= token[\"refresh_token\"] app.secret_key = token[\"access_token\"] client = SecretClient(vault_url=app.config[\"VAULT_URL\"], credential=DefaultAzureCredential()) try: client.set_secret(secretName, secretValue) except", "from azure.keyvault.secrets import SecretClient from azure.identity import DefaultAzureCredential from azure.core.exceptions import ResourceExistsError from", "= url_for('auth', _external=True) return oauth.fitbit.authorize_redirect(\"\") @app.route('/auth') def auth(): token = oauth.fitbit.authorize_access_token() secretName =", "name. producer = EventHubProducerClient.from_connection_string(conn_str=app.config[\"EVENT_HUB_CONN_STR\"]) async with producer: # Create a batch. event_data_batch =", "user to homepage to offer sync service with device cloud (fitbit in this", "cmd import asyncio from fitbit import Fitbit from flask import Flask, render_template, url_for,", "oauth = OAuth(app) client = oauth.register(name=\"fitbit\") # Step 1: Bring user to homepage", "= fit_client.init_sync() # Create a producer client to send messages to the event", "# the event hub name. producer = EventHubProducerClient.from_connection_string(conn_str=app.config[\"EVENT_HUB_CONN_STR\"]) async with producer: # Create", "assume user has renabled the service reset the key client.begin_delete_secret(secretName) # sync data", "flask import Flask, render_template, url_for, session, redirect from authlib.integrations.flask_client import OAuth from azure.keyvault.secrets", "to the event hub. # Specify a connection string to your event hubs", "fit_client.init_sync() # Create a producer client to send messages to the event hub.", "azure.eventhub import EventData app = Flask(__name__) app.config.from_object('config') app.secret_key = '!secret' oauth = OAuth(app)", "from azure.eventhub import EventData app = Flask(__name__) app.config.from_object('config') app.secret_key = '!secret' oauth =", "# Specify a connection string to your event hubs namespace and # the", "event hub name. producer = EventHubProducerClient.from_connection_string(conn_str=app.config[\"EVENT_HUB_CONN_STR\"]) async with producer: # Create a batch.", "import ResourceExistsError from azure.eventhub.aio import EventHubProducerClient from azure.eventhub import EventData app = Flask(__name__)", "# assume user has renabled the service reset the key client.begin_delete_secret(secretName) # sync", "app.config.from_object('config') app.secret_key = '!secret' oauth = OAuth(app) client = oauth.register(name=\"fitbit\") # Step 1:", "send messages to the event hub. # Specify a connection string to your", "app.secret_key = '!secret' oauth = OAuth(app) client = oauth.register(name=\"fitbit\") # Step 1: Bring", "offer sync service with device cloud (fitbit in this example) @app.route('/') def home():", "@app.route(\"/login\") def login(): redirect_uri = url_for('auth', _external=True) return oauth.fitbit.authorize_redirect(\"\") @app.route('/auth') def auth(): token", "service reset the key client.begin_delete_secret(secretName) # sync data with FHIR API using Io[M]T", "producer = EventHubProducerClient.from_connection_string(conn_str=app.config[\"EVENT_HUB_CONN_STR\"]) async with producer: # Create a batch. event_data_batch = await", "asyncio from fitbit import Fitbit from flask import Flask, render_template, url_for, session, redirect", "namespace and # the event hub name. producer = EventHubProducerClient.from_connection_string(conn_str=app.config[\"EVENT_HUB_CONN_STR\"]) async with producer:", "json import cmd import asyncio from fitbit import Fitbit from flask import Flask,", "app.secret_key = token[\"access_token\"] client = SecretClient(vault_url=app.config[\"VAULT_URL\"], credential=DefaultAzureCredential()) try: client.set_secret(secretName, secretValue) except ResourceExistsError: #", "except ResourceExistsError: # assume user has renabled the service reset the key client.begin_delete_secret(secretName)", "= asyncio.new_event_loop() loop.run_until_complete(sync()) return \"Successful Sync\" @app.route('/sync') async def sync(): fit_client = Fitbit(user=session[\"user\"],", "Create a producer client to send messages to the event hub. # Specify", "hub name. producer = EventHubProducerClient.from_connection_string(conn_str=app.config[\"EVENT_HUB_CONN_STR\"]) async with producer: # Create a batch. event_data_batch", "import DefaultAzureCredential from azure.core.exceptions import ResourceExistsError from azure.eventhub.aio import EventHubProducerClient from azure.eventhub import", "def auth(): token = oauth.fitbit.authorize_access_token() secretName = session[\"user\"] = token[\"user_id\"] secretValue = token[\"refresh_token\"]", "# sync data with FHIR API using Io[M]T Connector loop = asyncio.new_event_loop() loop.run_until_complete(sync())", "to your event hubs namespace and # the event hub name. producer =", "ResourceExistsError: # assume user has renabled the service reset the key client.begin_delete_secret(secretName) #", "a connection string to your event hubs namespace and # the event hub", "Send the batch of events to the event hub. await producer.send_batch(event_data_batch) if __name__", "= SecretClient(vault_url=app.config[\"VAULT_URL\"], credential=DefaultAzureCredential()) try: client.set_secret(secretName, secretValue) except ResourceExistsError: # assume user has renabled", "login(): redirect_uri = url_for('auth', _external=True) return oauth.fitbit.authorize_redirect(\"\") @app.route('/auth') def auth(): token = oauth.fitbit.authorize_access_token()", "this example) @app.route('/') def home(): return render_template(\"index.html\") @app.route(\"/login\") def login(): redirect_uri = url_for('auth',", "import SecretClient from azure.identity import DefaultAzureCredential from azure.core.exceptions import ResourceExistsError from azure.eventhub.aio import", "import EventHubProducerClient from azure.eventhub import EventData app = Flask(__name__) app.config.from_object('config') app.secret_key = '!secret'", "event_data_batch.add(EventData(json.dumps(item, indent = 4))) # Send the batch of events to the event", "authlib.integrations.flask_client import OAuth from azure.keyvault.secrets import SecretClient from azure.identity import DefaultAzureCredential from azure.core.exceptions", "@app.route('/') def home(): return render_template(\"index.html\") @app.route(\"/login\") def login(): redirect_uri = url_for('auth', _external=True) return", "client.begin_delete_secret(secretName) # sync data with FHIR API using Io[M]T Connector loop = asyncio.new_event_loop()", "DefaultAzureCredential from azure.core.exceptions import ResourceExistsError from azure.eventhub.aio import EventHubProducerClient from azure.eventhub import EventData", "string to your event hubs namespace and # the event hub name. producer", "the event hub name. producer = EventHubProducerClient.from_connection_string(conn_str=app.config[\"EVENT_HUB_CONN_STR\"]) async with producer: # Create a", "Fitbit from flask import Flask, render_template, url_for, session, redirect from authlib.integrations.flask_client import OAuth", "Bring user to homepage to offer sync service with device cloud (fitbit in", "Specify a connection string to your event hubs namespace and # the event", "batch. event_data_batch = await producer.create_batch() for item in result: print(item) event_data_batch.add(EventData(json.dumps(item, indent =", "'!secret' oauth = OAuth(app) client = oauth.register(name=\"fitbit\") # Step 1: Bring user to", "_external=True) return oauth.fitbit.authorize_redirect(\"\") @app.route('/auth') def auth(): token = oauth.fitbit.authorize_access_token() secretName = session[\"user\"] =", "from azure.eventhub.aio import EventHubProducerClient from azure.eventhub import EventData app = Flask(__name__) app.config.from_object('config') app.secret_key", "client to send messages to the event hub. # Specify a connection string", "secretValue) except ResourceExistsError: # assume user has renabled the service reset the key", "Step 1: Bring user to homepage to offer sync service with device cloud", "@app.route('/auth') def auth(): token = oauth.fitbit.authorize_access_token() secretName = session[\"user\"] = token[\"user_id\"] secretValue =", "to offer sync service with device cloud (fitbit in this example) @app.route('/') def", "EventHubProducerClient from azure.eventhub import EventData app = Flask(__name__) app.config.from_object('config') app.secret_key = '!secret' oauth", "reset the key client.begin_delete_secret(secretName) # sync data with FHIR API using Io[M]T Connector", "import cmd import asyncio from fitbit import Fitbit from flask import Flask, render_template,", "FHIR API using Io[M]T Connector loop = asyncio.new_event_loop() loop.run_until_complete(sync()) return \"Successful Sync\" @app.route('/sync')", "# Create a producer client to send messages to the event hub. #", "to send messages to the event hub. # Specify a connection string to", "\"Successful Sync\" @app.route('/sync') async def sync(): fit_client = Fitbit(user=session[\"user\"], access_token=app.secret_key) result = fit_client.init_sync()", "try: client.set_secret(secretName, secretValue) except ResourceExistsError: # assume user has renabled the service reset", "= Fitbit(user=session[\"user\"], access_token=app.secret_key) result = fit_client.init_sync() # Create a producer client to send", "Connector loop = asyncio.new_event_loop() loop.run_until_complete(sync()) return \"Successful Sync\" @app.route('/sync') async def sync(): fit_client", "= token[\"access_token\"] client = SecretClient(vault_url=app.config[\"VAULT_URL\"], credential=DefaultAzureCredential()) try: client.set_secret(secretName, secretValue) except ResourceExistsError: # assume", "to homepage to offer sync service with device cloud (fitbit in this example)", "loop.run_until_complete(sync()) return \"Successful Sync\" @app.route('/sync') async def sync(): fit_client = Fitbit(user=session[\"user\"], access_token=app.secret_key) result", "def sync(): fit_client = Fitbit(user=session[\"user\"], access_token=app.secret_key) result = fit_client.init_sync() # Create a producer", "event_data_batch = await producer.create_batch() for item in result: print(item) event_data_batch.add(EventData(json.dumps(item, indent = 4)))", "Fitbit(user=session[\"user\"], access_token=app.secret_key) result = fit_client.init_sync() # Create a producer client to send messages", "homepage to offer sync service with device cloud (fitbit in this example) @app.route('/')", "has renabled the service reset the key client.begin_delete_secret(secretName) # sync data with FHIR", "= EventHubProducerClient.from_connection_string(conn_str=app.config[\"EVENT_HUB_CONN_STR\"]) async with producer: # Create a batch. event_data_batch = await producer.create_batch()", "service with device cloud (fitbit in this example) @app.route('/') def home(): return render_template(\"index.html\")", "loop = asyncio.new_event_loop() loop.run_until_complete(sync()) return \"Successful Sync\" @app.route('/sync') async def sync(): fit_client =", "with device cloud (fitbit in this example) @app.route('/') def home(): return render_template(\"index.html\") @app.route(\"/login\")", "azure.eventhub.aio import EventHubProducerClient from azure.eventhub import EventData app = Flask(__name__) app.config.from_object('config') app.secret_key =", "fitbit import Fitbit from flask import Flask, render_template, url_for, session, redirect from authlib.integrations.flask_client", "import OAuth from azure.keyvault.secrets import SecretClient from azure.identity import DefaultAzureCredential from azure.core.exceptions import", "Flask, render_template, url_for, session, redirect from authlib.integrations.flask_client import OAuth from azure.keyvault.secrets import SecretClient", "user has renabled the service reset the key client.begin_delete_secret(secretName) # sync data with", "fit_client = Fitbit(user=session[\"user\"], access_token=app.secret_key) result = fit_client.init_sync() # Create a producer client to", "producer client to send messages to the event hub. # Specify a connection", "= session[\"user\"] = token[\"user_id\"] secretValue = token[\"refresh_token\"] app.secret_key = token[\"access_token\"] client = SecretClient(vault_url=app.config[\"VAULT_URL\"],", "renabled the service reset the key client.begin_delete_secret(secretName) # sync data with FHIR API", "import Flask, render_template, url_for, session, redirect from authlib.integrations.flask_client import OAuth from azure.keyvault.secrets import", "await producer.create_batch() for item in result: print(item) event_data_batch.add(EventData(json.dumps(item, indent = 4))) # Send", "connection string to your event hubs namespace and # the event hub name.", "# Create a batch. event_data_batch = await producer.create_batch() for item in result: print(item)", "def home(): return render_template(\"index.html\") @app.route(\"/login\") def login(): redirect_uri = url_for('auth', _external=True) return oauth.fitbit.authorize_redirect(\"\")", "ResourceExistsError from azure.eventhub.aio import EventHubProducerClient from azure.eventhub import EventData app = Flask(__name__) app.config.from_object('config')", "your event hubs namespace and # the event hub name. producer = EventHubProducerClient.from_connection_string(conn_str=app.config[\"EVENT_HUB_CONN_STR\"])", "SecretClient from azure.identity import DefaultAzureCredential from azure.core.exceptions import ResourceExistsError from azure.eventhub.aio import EventHubProducerClient", "client = SecretClient(vault_url=app.config[\"VAULT_URL\"], credential=DefaultAzureCredential()) try: client.set_secret(secretName, secretValue) except ResourceExistsError: # assume user has", "token[\"refresh_token\"] app.secret_key = token[\"access_token\"] client = SecretClient(vault_url=app.config[\"VAULT_URL\"], credential=DefaultAzureCredential()) try: client.set_secret(secretName, secretValue) except ResourceExistsError:", "= await producer.create_batch() for item in result: print(item) event_data_batch.add(EventData(json.dumps(item, indent = 4))) #", "access_token=app.secret_key) result = fit_client.init_sync() # Create a producer client to send messages to", "return oauth.fitbit.authorize_redirect(\"\") @app.route('/auth') def auth(): token = oauth.fitbit.authorize_access_token() secretName = session[\"user\"] = token[\"user_id\"]", "the batch of events to the event hub. await producer.send_batch(event_data_batch) if __name__ ==", "(fitbit in this example) @app.route('/') def home(): return render_template(\"index.html\") @app.route(\"/login\") def login(): redirect_uri", "with producer: # Create a batch. event_data_batch = await producer.create_batch() for item in", "async def sync(): fit_client = Fitbit(user=session[\"user\"], access_token=app.secret_key) result = fit_client.init_sync() # Create a", "for item in result: print(item) event_data_batch.add(EventData(json.dumps(item, indent = 4))) # Send the batch", "hub. # Specify a connection string to your event hubs namespace and #", "sync data with FHIR API using Io[M]T Connector loop = asyncio.new_event_loop() loop.run_until_complete(sync()) return", "import EventData app = Flask(__name__) app.config.from_object('config') app.secret_key = '!secret' oauth = OAuth(app) client", "url_for, session, redirect from authlib.integrations.flask_client import OAuth from azure.keyvault.secrets import SecretClient from azure.identity", "redirect_uri = url_for('auth', _external=True) return oauth.fitbit.authorize_redirect(\"\") @app.route('/auth') def auth(): token = oauth.fitbit.authorize_access_token() secretName", "token = oauth.fitbit.authorize_access_token() secretName = session[\"user\"] = token[\"user_id\"] secretValue = token[\"refresh_token\"] app.secret_key =", "item in result: print(item) event_data_batch.add(EventData(json.dumps(item, indent = 4))) # Send the batch of", "Sync\" @app.route('/sync') async def sync(): fit_client = Fitbit(user=session[\"user\"], access_token=app.secret_key) result = fit_client.init_sync() #", "using Io[M]T Connector loop = asyncio.new_event_loop() loop.run_until_complete(sync()) return \"Successful Sync\" @app.route('/sync') async def", "# Step 1: Bring user to homepage to offer sync service with device", "cloud (fitbit in this example) @app.route('/') def home(): return render_template(\"index.html\") @app.route(\"/login\") def login():", "Io[M]T Connector loop = asyncio.new_event_loop() loop.run_until_complete(sync()) return \"Successful Sync\" @app.route('/sync') async def sync():", "a batch. event_data_batch = await producer.create_batch() for item in result: print(item) event_data_batch.add(EventData(json.dumps(item, indent", "OAuth(app) client = oauth.register(name=\"fitbit\") # Step 1: Bring user to homepage to offer", "session[\"user\"] = token[\"user_id\"] secretValue = token[\"refresh_token\"] app.secret_key = token[\"access_token\"] client = SecretClient(vault_url=app.config[\"VAULT_URL\"], credential=DefaultAzureCredential())", "event hub. # Specify a connection string to your event hubs namespace and", "= oauth.fitbit.authorize_access_token() secretName = session[\"user\"] = token[\"user_id\"] secretValue = token[\"refresh_token\"] app.secret_key = token[\"access_token\"]", "print(item) event_data_batch.add(EventData(json.dumps(item, indent = 4))) # Send the batch of events to the", "EventHubProducerClient.from_connection_string(conn_str=app.config[\"EVENT_HUB_CONN_STR\"]) async with producer: # Create a batch. event_data_batch = await producer.create_batch() for", "credential=DefaultAzureCredential()) try: client.set_secret(secretName, secretValue) except ResourceExistsError: # assume user has renabled the service", "render_template, url_for, session, redirect from authlib.integrations.flask_client import OAuth from azure.keyvault.secrets import SecretClient from", "indent = 4))) # Send the batch of events to the event hub.", "Create a batch. event_data_batch = await producer.create_batch() for item in result: print(item) event_data_batch.add(EventData(json.dumps(item,", "the key client.begin_delete_secret(secretName) # sync data with FHIR API using Io[M]T Connector loop", "return \"Successful Sync\" @app.route('/sync') async def sync(): fit_client = Fitbit(user=session[\"user\"], access_token=app.secret_key) result =", "messages to the event hub. # Specify a connection string to your event", "SecretClient(vault_url=app.config[\"VAULT_URL\"], credential=DefaultAzureCredential()) try: client.set_secret(secretName, secretValue) except ResourceExistsError: # assume user has renabled the", "from flask import Flask, render_template, url_for, session, redirect from authlib.integrations.flask_client import OAuth from", "event hubs namespace and # the event hub name. producer = EventHubProducerClient.from_connection_string(conn_str=app.config[\"EVENT_HUB_CONN_STR\"]) async", "= OAuth(app) client = oauth.register(name=\"fitbit\") # Step 1: Bring user to homepage to", "home(): return render_template(\"index.html\") @app.route(\"/login\") def login(): redirect_uri = url_for('auth', _external=True) return oauth.fitbit.authorize_redirect(\"\") @app.route('/auth')", "import Fitbit from flask import Flask, render_template, url_for, session, redirect from authlib.integrations.flask_client import", "EventData app = Flask(__name__) app.config.from_object('config') app.secret_key = '!secret' oauth = OAuth(app) client =", "import os, json import cmd import asyncio from fitbit import Fitbit from flask", "from fitbit import Fitbit from flask import Flask, render_template, url_for, session, redirect from", "in this example) @app.route('/') def home(): return render_template(\"index.html\") @app.route(\"/login\") def login(): redirect_uri =", "app = Flask(__name__) app.config.from_object('config') app.secret_key = '!secret' oauth = OAuth(app) client = oauth.register(name=\"fitbit\")", "in result: print(item) event_data_batch.add(EventData(json.dumps(item, indent = 4))) # Send the batch of events", "= '!secret' oauth = OAuth(app) client = oauth.register(name=\"fitbit\") # Step 1: Bring user", "client.set_secret(secretName, secretValue) except ResourceExistsError: # assume user has renabled the service reset the" ]
[ "from __future__ import absolute_import import yaml from os.path import join class BaseComm: def", "raise NotImplementedError def _load_config(self): config_file = open(join(self.CONFIG_FOLDER, 'comms.yml'), 'rb') yaml_config = yaml.load(config_file.read()) config_file.close()", "def _load_config(self): config_file = open(join(self.CONFIG_FOLDER, 'comms.yml'), 'rb') yaml_config = yaml.load(config_file.read()) config_file.close() return yaml_config", "import join class BaseComm: def __init__(self, config): self.config = config def setup(self): raise", "yaml from os.path import join class BaseComm: def __init__(self, config): self.config = config", "class BaseComm: def __init__(self, config): self.config = config def setup(self): raise NotImplementedError def", "setup(self): raise NotImplementedError def communicate(self): raise NotImplementedError def _load_config(self): config_file = open(join(self.CONFIG_FOLDER, 'comms.yml'),", "BaseComm: def __init__(self, config): self.config = config def setup(self): raise NotImplementedError def communicate(self):", "os.path import join class BaseComm: def __init__(self, config): self.config = config def setup(self):", "def __init__(self, config): self.config = config def setup(self): raise NotImplementedError def communicate(self): raise", "communicate(self): raise NotImplementedError def _load_config(self): config_file = open(join(self.CONFIG_FOLDER, 'comms.yml'), 'rb') yaml_config = yaml.load(config_file.read())", "import yaml from os.path import join class BaseComm: def __init__(self, config): self.config =", "def setup(self): raise NotImplementedError def communicate(self): raise NotImplementedError def _load_config(self): config_file = open(join(self.CONFIG_FOLDER,", "NotImplementedError def _load_config(self): config_file = open(join(self.CONFIG_FOLDER, 'comms.yml'), 'rb') yaml_config = yaml.load(config_file.read()) config_file.close() return", "from os.path import join class BaseComm: def __init__(self, config): self.config = config def", "def communicate(self): raise NotImplementedError def _load_config(self): config_file = open(join(self.CONFIG_FOLDER, 'comms.yml'), 'rb') yaml_config =", "import absolute_import import yaml from os.path import join class BaseComm: def __init__(self, config):", "absolute_import import yaml from os.path import join class BaseComm: def __init__(self, config): self.config", "config): self.config = config def setup(self): raise NotImplementedError def communicate(self): raise NotImplementedError def", "<gh_stars>1-10 from __future__ import absolute_import import yaml from os.path import join class BaseComm:", "config def setup(self): raise NotImplementedError def communicate(self): raise NotImplementedError def _load_config(self): config_file =", "__init__(self, config): self.config = config def setup(self): raise NotImplementedError def communicate(self): raise NotImplementedError", "raise NotImplementedError def communicate(self): raise NotImplementedError def _load_config(self): config_file = open(join(self.CONFIG_FOLDER, 'comms.yml'), 'rb')", "= config def setup(self): raise NotImplementedError def communicate(self): raise NotImplementedError def _load_config(self): config_file", "__future__ import absolute_import import yaml from os.path import join class BaseComm: def __init__(self,", "self.config = config def setup(self): raise NotImplementedError def communicate(self): raise NotImplementedError def _load_config(self):", "NotImplementedError def communicate(self): raise NotImplementedError def _load_config(self): config_file = open(join(self.CONFIG_FOLDER, 'comms.yml'), 'rb') yaml_config", "join class BaseComm: def __init__(self, config): self.config = config def setup(self): raise NotImplementedError" ]