body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
|---|---|---|---|---|---|---|---|---|---|
9e93be2ff1bde5ea4952adb78c0ce9746f7cad36f0f3a9bc2c3797245f800d74
|
def _validate_types(ctxt, parsed_spec):
'Validate all types are correct.'
for idl_type in parsed_spec.symbols.types:
_validate_type(ctxt, idl_type)
|
Validate all types are correct.
|
buildscripts/idl/idl/binder.py
|
_validate_types
|
sgweon/mongo
| 0
|
python
|
def _validate_types(ctxt, parsed_spec):
for idl_type in parsed_spec.symbols.types:
_validate_type(ctxt, idl_type)
|
def _validate_types(ctxt, parsed_spec):
for idl_type in parsed_spec.symbols.types:
_validate_type(ctxt, idl_type)<|docstring|>Validate all types are correct.<|endoftext|>
|
6a5745f477bf38aae9bff27393cc7ef4f865817c09a0dbcafa7e3afc2465d9fd
|
def _bind_struct(ctxt, parsed_spec, struct):
'\n Bind a struct.\n\n - Validating a struct and fields.\n - Create the idl.ast version from the idl.syntax tree.\n '
ast_struct = ast.Struct(struct.file_name, struct.line, struct.column)
ast_struct.name = struct.name
ast_struct.description = struct.description
ast_struct.strict = struct.strict
if ast_struct.name.startswith('array<'):
ctxt.add_array_not_valid_error(ast_struct, 'struct', ast_struct.name)
for field in struct.fields:
ast_field = _bind_field(ctxt, parsed_spec, field)
if ast_field:
ast_struct.fields.append(ast_field)
return ast_struct
|
Bind a struct.
- Validating a struct and fields.
- Create the idl.ast version from the idl.syntax tree.
|
buildscripts/idl/idl/binder.py
|
_bind_struct
|
sgweon/mongo
| 0
|
python
|
def _bind_struct(ctxt, parsed_spec, struct):
'\n Bind a struct.\n\n - Validating a struct and fields.\n - Create the idl.ast version from the idl.syntax tree.\n '
ast_struct = ast.Struct(struct.file_name, struct.line, struct.column)
ast_struct.name = struct.name
ast_struct.description = struct.description
ast_struct.strict = struct.strict
if ast_struct.name.startswith('array<'):
ctxt.add_array_not_valid_error(ast_struct, 'struct', ast_struct.name)
for field in struct.fields:
ast_field = _bind_field(ctxt, parsed_spec, field)
if ast_field:
ast_struct.fields.append(ast_field)
return ast_struct
|
def _bind_struct(ctxt, parsed_spec, struct):
'\n Bind a struct.\n\n - Validating a struct and fields.\n - Create the idl.ast version from the idl.syntax tree.\n '
ast_struct = ast.Struct(struct.file_name, struct.line, struct.column)
ast_struct.name = struct.name
ast_struct.description = struct.description
ast_struct.strict = struct.strict
if ast_struct.name.startswith('array<'):
ctxt.add_array_not_valid_error(ast_struct, 'struct', ast_struct.name)
for field in struct.fields:
ast_field = _bind_field(ctxt, parsed_spec, field)
if ast_field:
ast_struct.fields.append(ast_field)
return ast_struct<|docstring|>Bind a struct.
- Validating a struct and fields.
- Create the idl.ast version from the idl.syntax tree.<|endoftext|>
|
02958faafd707d16c75ae8fe02945cc314184615273bb38554adc5102c34b0c7
|
def _validate_ignored_field(ctxt, field):
'Validate that for ignored fields, no other properties are set.'
if field.optional:
ctxt.add_ignored_field_must_be_empty_error(field, field.name, 'optional')
if (field.default is not None):
ctxt.add_ignored_field_must_be_empty_error(field, field.name, 'default')
|
Validate that for ignored fields, no other properties are set.
|
buildscripts/idl/idl/binder.py
|
_validate_ignored_field
|
sgweon/mongo
| 0
|
python
|
def _validate_ignored_field(ctxt, field):
if field.optional:
ctxt.add_ignored_field_must_be_empty_error(field, field.name, 'optional')
if (field.default is not None):
ctxt.add_ignored_field_must_be_empty_error(field, field.name, 'default')
|
def _validate_ignored_field(ctxt, field):
if field.optional:
ctxt.add_ignored_field_must_be_empty_error(field, field.name, 'optional')
if (field.default is not None):
ctxt.add_ignored_field_must_be_empty_error(field, field.name, 'default')<|docstring|>Validate that for ignored fields, no other properties are set.<|endoftext|>
|
ff9ba9e5ea34afbc247211189ddad8803988804534a5362a22e3a445ccdf2374
|
def _validate_field_of_type_struct(ctxt, field):
'Validate that for fields with a type of struct, no other properties are set.'
if (field.default is not None):
ctxt.add_ignored_field_must_be_empty_error(field, field.name, 'default')
|
Validate that for fields with a type of struct, no other properties are set.
|
buildscripts/idl/idl/binder.py
|
_validate_field_of_type_struct
|
sgweon/mongo
| 0
|
python
|
def _validate_field_of_type_struct(ctxt, field):
if (field.default is not None):
ctxt.add_ignored_field_must_be_empty_error(field, field.name, 'default')
|
def _validate_field_of_type_struct(ctxt, field):
if (field.default is not None):
ctxt.add_ignored_field_must_be_empty_error(field, field.name, 'default')<|docstring|>Validate that for fields with a type of struct, no other properties are set.<|endoftext|>
|
fa45e5a83506884784ee6b1063197937198caaf58859c44a5d8b0c1841822f2a
|
def _bind_field(ctxt, parsed_spec, field):
'\n Bind a field from the idl.syntax tree.\n\n - Create the idl.ast version from the idl.syntax tree.\n - Validate the resulting type is correct.\n '
ast_field = ast.Field(field.file_name, field.line, field.column)
ast_field.name = field.name
ast_field.description = field.description
ast_field.optional = field.optional
ast_field.cpp_name = field.name
if field.cpp_name:
ast_field.cpp_name = field.cpp_name
if ast_field.name.startswith('array<'):
ctxt.add_array_not_valid_error(ast_field, 'field', ast_field.name)
if field.ignore:
ast_field.ignore = field.ignore
_validate_ignored_field(ctxt, field)
return ast_field
(struct, idltype) = parsed_spec.symbols.resolve_field_type(ctxt, field)
if ((not struct) and (not idltype)):
return None
if syntax.parse_array_type(field.type):
ast_field.array = True
if (field.default or (idltype and idltype.default)):
ctxt.add_array_no_default(field, field.name)
if struct:
ast_field.struct_type = struct.name
ast_field.bson_serialization_type = ['object']
_validate_field_of_type_struct(ctxt, field)
else:
ast_field.cpp_type = idltype.cpp_type
ast_field.bson_serialization_type = idltype.bson_serialization_type
ast_field.bindata_subtype = idltype.bindata_subtype
ast_field.serializer = idltype.serializer
ast_field.deserializer = idltype.deserializer
ast_field.default = idltype.default
if field.default:
ast_field.default = field.default
_validate_type_properties(ctxt, ast_field, 'field')
return ast_field
|
Bind a field from the idl.syntax tree.
- Create the idl.ast version from the idl.syntax tree.
- Validate the resulting type is correct.
|
buildscripts/idl/idl/binder.py
|
_bind_field
|
sgweon/mongo
| 0
|
python
|
def _bind_field(ctxt, parsed_spec, field):
'\n Bind a field from the idl.syntax tree.\n\n - Create the idl.ast version from the idl.syntax tree.\n - Validate the resulting type is correct.\n '
ast_field = ast.Field(field.file_name, field.line, field.column)
ast_field.name = field.name
ast_field.description = field.description
ast_field.optional = field.optional
ast_field.cpp_name = field.name
if field.cpp_name:
ast_field.cpp_name = field.cpp_name
if ast_field.name.startswith('array<'):
ctxt.add_array_not_valid_error(ast_field, 'field', ast_field.name)
if field.ignore:
ast_field.ignore = field.ignore
_validate_ignored_field(ctxt, field)
return ast_field
(struct, idltype) = parsed_spec.symbols.resolve_field_type(ctxt, field)
if ((not struct) and (not idltype)):
return None
if syntax.parse_array_type(field.type):
ast_field.array = True
if (field.default or (idltype and idltype.default)):
ctxt.add_array_no_default(field, field.name)
if struct:
ast_field.struct_type = struct.name
ast_field.bson_serialization_type = ['object']
_validate_field_of_type_struct(ctxt, field)
else:
ast_field.cpp_type = idltype.cpp_type
ast_field.bson_serialization_type = idltype.bson_serialization_type
ast_field.bindata_subtype = idltype.bindata_subtype
ast_field.serializer = idltype.serializer
ast_field.deserializer = idltype.deserializer
ast_field.default = idltype.default
if field.default:
ast_field.default = field.default
_validate_type_properties(ctxt, ast_field, 'field')
return ast_field
|
def _bind_field(ctxt, parsed_spec, field):
'\n Bind a field from the idl.syntax tree.\n\n - Create the idl.ast version from the idl.syntax tree.\n - Validate the resulting type is correct.\n '
ast_field = ast.Field(field.file_name, field.line, field.column)
ast_field.name = field.name
ast_field.description = field.description
ast_field.optional = field.optional
ast_field.cpp_name = field.name
if field.cpp_name:
ast_field.cpp_name = field.cpp_name
if ast_field.name.startswith('array<'):
ctxt.add_array_not_valid_error(ast_field, 'field', ast_field.name)
if field.ignore:
ast_field.ignore = field.ignore
_validate_ignored_field(ctxt, field)
return ast_field
(struct, idltype) = parsed_spec.symbols.resolve_field_type(ctxt, field)
if ((not struct) and (not idltype)):
return None
if syntax.parse_array_type(field.type):
ast_field.array = True
if (field.default or (idltype and idltype.default)):
ctxt.add_array_no_default(field, field.name)
if struct:
ast_field.struct_type = struct.name
ast_field.bson_serialization_type = ['object']
_validate_field_of_type_struct(ctxt, field)
else:
ast_field.cpp_type = idltype.cpp_type
ast_field.bson_serialization_type = idltype.bson_serialization_type
ast_field.bindata_subtype = idltype.bindata_subtype
ast_field.serializer = idltype.serializer
ast_field.deserializer = idltype.deserializer
ast_field.default = idltype.default
if field.default:
ast_field.default = field.default
_validate_type_properties(ctxt, ast_field, 'field')
return ast_field<|docstring|>Bind a field from the idl.syntax tree.
- Create the idl.ast version from the idl.syntax tree.
- Validate the resulting type is correct.<|endoftext|>
|
f5dd59d7fcd3eac28cc975e6f1a64a0bec3525dfb872230dc553c10d03b2cded
|
def _bind_globals(parsed_spec):
'Bind the globals object from the idl.syntax tree into the idl.ast tree by doing a deep copy.'
if parsed_spec.globals:
ast_global = ast.Global(parsed_spec.globals.file_name, parsed_spec.globals.line, parsed_spec.globals.column)
ast_global.cpp_namespace = parsed_spec.globals.cpp_namespace
ast_global.cpp_includes = parsed_spec.globals.cpp_includes
else:
ast_global = ast.Global('<implicit>', 0, 0)
ast_global.cpp_namespace = 'mongo'
return ast_global
|
Bind the globals object from the idl.syntax tree into the idl.ast tree by doing a deep copy.
|
buildscripts/idl/idl/binder.py
|
_bind_globals
|
sgweon/mongo
| 0
|
python
|
def _bind_globals(parsed_spec):
if parsed_spec.globals:
ast_global = ast.Global(parsed_spec.globals.file_name, parsed_spec.globals.line, parsed_spec.globals.column)
ast_global.cpp_namespace = parsed_spec.globals.cpp_namespace
ast_global.cpp_includes = parsed_spec.globals.cpp_includes
else:
ast_global = ast.Global('<implicit>', 0, 0)
ast_global.cpp_namespace = 'mongo'
return ast_global
|
def _bind_globals(parsed_spec):
if parsed_spec.globals:
ast_global = ast.Global(parsed_spec.globals.file_name, parsed_spec.globals.line, parsed_spec.globals.column)
ast_global.cpp_namespace = parsed_spec.globals.cpp_namespace
ast_global.cpp_includes = parsed_spec.globals.cpp_includes
else:
ast_global = ast.Global('<implicit>', 0, 0)
ast_global.cpp_namespace = 'mongo'
return ast_global<|docstring|>Bind the globals object from the idl.syntax tree into the idl.ast tree by doing a deep copy.<|endoftext|>
|
c13f35838d37e90dad44d434dca0c641aaff13785d44c0ada3dc2211b2afb167
|
def bind(parsed_spec):
'Read an idl.syntax, create an idl.ast tree, and validate the final IDL Specification.'
ctxt = errors.ParserContext('unknown', errors.ParserErrorCollection())
bound_spec = ast.IDLAST()
bound_spec.globals = _bind_globals(parsed_spec)
_validate_types(ctxt, parsed_spec)
for struct in parsed_spec.symbols.structs:
if (not struct.imported):
bound_spec.structs.append(_bind_struct(ctxt, parsed_spec, struct))
if ctxt.errors.has_errors():
return ast.IDLBoundSpec(None, ctxt.errors)
else:
return ast.IDLBoundSpec(bound_spec, None)
|
Read an idl.syntax, create an idl.ast tree, and validate the final IDL Specification.
|
buildscripts/idl/idl/binder.py
|
bind
|
sgweon/mongo
| 0
|
python
|
def bind(parsed_spec):
ctxt = errors.ParserContext('unknown', errors.ParserErrorCollection())
bound_spec = ast.IDLAST()
bound_spec.globals = _bind_globals(parsed_spec)
_validate_types(ctxt, parsed_spec)
for struct in parsed_spec.symbols.structs:
if (not struct.imported):
bound_spec.structs.append(_bind_struct(ctxt, parsed_spec, struct))
if ctxt.errors.has_errors():
return ast.IDLBoundSpec(None, ctxt.errors)
else:
return ast.IDLBoundSpec(bound_spec, None)
|
def bind(parsed_spec):
ctxt = errors.ParserContext('unknown', errors.ParserErrorCollection())
bound_spec = ast.IDLAST()
bound_spec.globals = _bind_globals(parsed_spec)
_validate_types(ctxt, parsed_spec)
for struct in parsed_spec.symbols.structs:
if (not struct.imported):
bound_spec.structs.append(_bind_struct(ctxt, parsed_spec, struct))
if ctxt.errors.has_errors():
return ast.IDLBoundSpec(None, ctxt.errors)
else:
return ast.IDLBoundSpec(bound_spec, None)<|docstring|>Read an idl.syntax, create an idl.ast tree, and validate the final IDL Specification.<|endoftext|>
|
f88cda6a26861619c9f59e646b56d274f79c5dda9c18957b44c3053ec37b9103
|
def r_learning(env, alpha=0.01, beta=0.01, max_steps=50):
' R Learning algorithm (Schwartz - 1993) to solve the average reward problem\n\n Args:\n env - the object of the environment. In this case, the RiverSwim object.\n alpha - learning rate to update the Q-values\n beta - learning rate to update the gain, rho\n max_steps = maximum number of steps for r-learning\n \n Returns:\n rho - array with all rho values given by r-learning\n values - array of all values for the recurrent state, during all interations of r-learning\n '
(env.state_values, env.state_q_values) = env.init_values()
rho = 0.0
epsilon = 0.3
rhos = []
ves = []
for t in tqdm(range(max_steps)):
if ((t % 20) == 0):
state = random.choice(env.states)
if ((t % 1000) == 0):
rhos.append(rho)
ves.append(env.state_values[sI])
action = env.getAction(state, epsilon, t)
(new_state, reward) = env.step(state, action)
sample = ((reward - rho) + np.max(env.state_q_values[new_state]))
env.state_q_values[state][action] = (((1 - alpha) * env.state_q_values[state][action]) + (alpha * sample))
env.state_q_values[(0, :)] = 0.0
env.policy[state] = np.argmax(env.state_q_values[state])
env.state_values[state] = np.max(env.state_q_values[state])
rho = (rho + (beta * (((reward - rho) + np.max(env.state_q_values[new_state])) - np.max(env.state_q_values[state]))))
if (new_state == 0):
state = random.choice(env.states[1:])
else:
state = new_state
return (rhos, ves)
|
R Learning algorithm (Schwartz - 1993) to solve the average reward problem
Args:
env - the object of the environment. In this case, the RiverSwim object.
alpha - learning rate to update the Q-values
beta - learning rate to update the gain, rho
max_steps = maximum number of steps for r-learning
Returns:
rho - array with all rho values given by r-learning
values - array of all values for the recurrent state, during all interations of r-learning
|
RiverSwim/rlearning.py
|
r_learning
|
nudging-SMDP/nudging-supplementary-material
| 2
|
python
|
def r_learning(env, alpha=0.01, beta=0.01, max_steps=50):
' R Learning algorithm (Schwartz - 1993) to solve the average reward problem\n\n Args:\n env - the object of the environment. In this case, the RiverSwim object.\n alpha - learning rate to update the Q-values\n beta - learning rate to update the gain, rho\n max_steps = maximum number of steps for r-learning\n \n Returns:\n rho - array with all rho values given by r-learning\n values - array of all values for the recurrent state, during all interations of r-learning\n '
(env.state_values, env.state_q_values) = env.init_values()
rho = 0.0
epsilon = 0.3
rhos = []
ves = []
for t in tqdm(range(max_steps)):
if ((t % 20) == 0):
state = random.choice(env.states)
if ((t % 1000) == 0):
rhos.append(rho)
ves.append(env.state_values[sI])
action = env.getAction(state, epsilon, t)
(new_state, reward) = env.step(state, action)
sample = ((reward - rho) + np.max(env.state_q_values[new_state]))
env.state_q_values[state][action] = (((1 - alpha) * env.state_q_values[state][action]) + (alpha * sample))
env.state_q_values[(0, :)] = 0.0
env.policy[state] = np.argmax(env.state_q_values[state])
env.state_values[state] = np.max(env.state_q_values[state])
rho = (rho + (beta * (((reward - rho) + np.max(env.state_q_values[new_state])) - np.max(env.state_q_values[state]))))
if (new_state == 0):
state = random.choice(env.states[1:])
else:
state = new_state
return (rhos, ves)
|
def r_learning(env, alpha=0.01, beta=0.01, max_steps=50):
' R Learning algorithm (Schwartz - 1993) to solve the average reward problem\n\n Args:\n env - the object of the environment. In this case, the RiverSwim object.\n alpha - learning rate to update the Q-values\n beta - learning rate to update the gain, rho\n max_steps = maximum number of steps for r-learning\n \n Returns:\n rho - array with all rho values given by r-learning\n values - array of all values for the recurrent state, during all interations of r-learning\n '
(env.state_values, env.state_q_values) = env.init_values()
rho = 0.0
epsilon = 0.3
rhos = []
ves = []
for t in tqdm(range(max_steps)):
if ((t % 20) == 0):
state = random.choice(env.states)
if ((t % 1000) == 0):
rhos.append(rho)
ves.append(env.state_values[sI])
action = env.getAction(state, epsilon, t)
(new_state, reward) = env.step(state, action)
sample = ((reward - rho) + np.max(env.state_q_values[new_state]))
env.state_q_values[state][action] = (((1 - alpha) * env.state_q_values[state][action]) + (alpha * sample))
env.state_q_values[(0, :)] = 0.0
env.policy[state] = np.argmax(env.state_q_values[state])
env.state_values[state] = np.max(env.state_q_values[state])
rho = (rho + (beta * (((reward - rho) + np.max(env.state_q_values[new_state])) - np.max(env.state_q_values[state]))))
if (new_state == 0):
state = random.choice(env.states[1:])
else:
state = new_state
return (rhos, ves)<|docstring|>R Learning algorithm (Schwartz - 1993) to solve the average reward problem
Args:
env - the object of the environment. In this case, the RiverSwim object.
alpha - learning rate to update the Q-values
beta - learning rate to update the gain, rho
max_steps = maximum number of steps for r-learning
Returns:
rho - array with all rho values given by r-learning
values - array of all values for the recurrent state, during all interations of r-learning<|endoftext|>
|
96d74f5ffae0b93afb890e688beb5931237c71fae7adcb61ab042327d16b23ad
|
def makeFusekiConfig(names0=['labMacambiraLaleniaLog3', 'labMacambiraLaleniaLog2', 'foradoeixo', 'gmane-linux-audio-users', 'gmane-linux-audio-devel', 'gmane-politics-organizations-metareciclagem', 'arenaNETmundial_tw', 'matehackers'], names1=[('music_tw', 15), ('obama_tw', 3), ('porn_tw', 8), ('god_tw', 2), ('art_tw', 6)], names2=['participabr', 'cidadedemocratica', 'aa', 'gmane-comp-gcc-libstdcPP-devel'], empty=True, names0_=[], names1_=[], names2_=[]):
'Makes a apache/jena/fuseki configuration file.\n\n # names0 follows the simplest pattern\n # names1 follows the pattern with multiple files\n # names2 dont follow pattern.\n # names0_, names1_ and names2_ are given for appending to initial list.\n\n Loads RDF files on the fly or afterwards (empty=True default)\n with P.config.loadFuseki(makeFusekiConfig())\n '
body = ''
execline = []
if (not empty):
for (i, name) in enumerate((names2 + names2_)):
if ('gcc' in name):
url = 'https://raw.githubusercontent.com/OpenLinkedSocialData/{}/master/rdf/{}Translate.owl'.format(name, name.replace('P', '+'))
if ('participabr' in name):
url = 'https://raw.githubusercontent.com/OpenLinkedSocialData/opa/master/participaTriplestore.rdf'
if ('cidadedemocratica' in name):
url = 'file:/disco/triplas/cdTriplestore.rdf'
if ('aa' in name):
url = 'https://raw.githubusercontent.com/OpenLinkedSocialData/aa01/master/rdf/aaTriplestore.rdf'
body += '\n<#service{}> rdf:type fuseki:Service ;\n fuseki:name "{}" ; \n fuseki:serviceQuery "query" ;\n fuseki:dataset \n [ rdf:type ja:RDFDataset ;\n ja:defaultGraph \n [\n a ja:MemoryModel ;\n ja:content \n [ ja:externalContent <{}> ] ;\n ] ;\n ] .\n '.format(i, name, url)
for (i, name) in enumerate((names1 + names1_)):
i += len(names2)
urls = ''
for count in range((name[1] + 1)):
url = 'https://raw.githubusercontent.com/OpenLinkedSocialData/{}/master/rdf/{}Translate{:05d}.owl'.format(name[0], name[0], count)
urls += ' ja:content [ ja:externalContent <{}> ] ;\n '.format(url)
body += '\n<#service{}> rdf:type fuseki:Service ;\n fuseki:name "{}" ; \n fuseki:serviceQuery "query" ;\n fuseki:dataset \n [ rdf:type ja:RDFDataset ;\n ja:defaultGraph \n [\n a ja:MemoryModel ;\n {}\n ] ;\n ] .\n '.format(i, name[0], urls)
for (i, name) in enumerate((names0 + names0_)):
i += (len(names1) + len(names2))
url = 'https://raw.githubusercontent.com/OpenLinkedSocialData/{}/master/rdf/{}Translate.owl'.format(name, name)
body += '\n<#service{}> rdf:type fuseki:Service ;\n fuseki:name "{}" ; \n fuseki:serviceQuery "query" ;\n fuseki:dataset \n [ rdf:type ja:RDFDataset ;\n ja:defaultGraph \n [\n a ja:MemoryModel ;\n ja:content \n [ ja:externalContent <{}> ] ;\n ] ;\n ] .\n '.format(i, name, url)
if empty:
for (i, name) in enumerate((names2 + names2_)):
body += '\n <#service{}> rdf:type fuseki:Service ;\n fuseki:name "{}" ; \n fuseki:serviceQuery "sparql" ;\n fuseki:serviceQuery "query" ;\n fuseki:serviceUpdate "update" ;\n fuseki:serviceUpload "upload" ;\n fuseki:serviceReadWriteGraphStore "data" ; \n fuseki:serviceReadGraphStore "get" ;\n fuseki:dataset [\n rdf:type ja:RDFDataset ;\n ] ;\n .\n '.format(i, name)
if ('gcc' in name):
execline += ['./s-put http://localhost:82/{} default https://raw.githubusercontent.com/OpenLinkedSocialData/{}/master/rdf/{}Translate.owl'.format(name, name, name.replace('P', '+'))]
if ('participabr' in name):
execline += ['./s-put http://localhost:82/{} default https://raw.githubusercontent.com/OpenLinkedSocialData/opa/master/participaTriplestore.rdf'.format(name)]
if ('cidadedemocratica' in name):
execline += ['./s-put http://localhost:82/{} default /disco/triplas/cdTriplestore.rdf'.format(name)]
if ('aa' in name):
execline += ['./s-put http://localhost:82/{} default https://raw.githubusercontent.com/OpenLinkedSocialData/aa01/master/rdf/aaTriplestore.rdf'.format(name)]
for (i, name) in enumerate((names1 + names1_)):
i += len(names2)
body += '\n <#service{}> rdf:type fuseki:Service ;\n fuseki:name "{}" ; \n fuseki:serviceQuery "sparql" ;\n fuseki:serviceQuery "query" ;\n fuseki:serviceUpdate "update" ;\n fuseki:serviceUpload "upload" ;\n fuseki:serviceReadWriteGraphStore "data" ; \n fuseki:serviceReadGraphStore "get" ;\n fuseki:dataset [\n rdf:type ja:RDFDataset ;\n ] ;\n .\n '.format(i, name[0])
count = 0
for arq in range((name[1] + 1)):
execline += ['./s-put http://localhost:82/{} default https://raw.githubusercontent.com/OpenLinkedSocialData/{}/master/rdf/{}Translate{:05d}.owl'.format(name[0], name[0], name[0], count)]
count += 1
for (i, name) in enumerate((names0 + names0_)):
i += (len(names1) + len(names2))
body += '\n <#service{}> rdf:type fuseki:Service ;\n fuseki:name "{}" ; \n fuseki:serviceQuery "sparql" ;\n fuseki:serviceQuery "query" ;\n fuseki:serviceUpdate "update" ;\n fuseki:serviceUpload "upload" ;\n fuseki:serviceReadWriteGraphStore "data" ; \n fuseki:serviceReadGraphStore "get" ;\n fuseki:dataset [\n rdf:type ja:RDFDataset ;\n ] ;\n .\n '.format(i, name)
execline += ['./s-put http://localhost:82/{} default https://raw.githubusercontent.com/OpenLinkedSocialData/{}/master/rdf/{}Translate.owl'.format(name, name, name)]
body += '\n <#service{}> rdf:type fuseki:Service ;\n # URI of the dataset -- http://host:port/dsfoo\n fuseki:name "dsfoo" ; \n fuseki:serviceQuery "sparql" ;\n fuseki:serviceQuery "query" ;\n fuseki:serviceUpdate "update" ;\n fuseki:serviceUpload "upload" ;\n fuseki:serviceReadWriteGraphStore "data" ; \n fuseki:serviceReadGraphStore "get" ;\n fuseki:dataset [\n rdf:type ja:RDFDataset ;\n ] ;\n .\n '.format(((len(names0) + len(names1)) + len(names2)))
header = (('\n @prefix : <#> .\n @prefix fuseki: <http://jena.apache.org/fuseki#> .\n @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\n\n @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .\n @prefix tdb: <http://jena.hpl.hp.com/2008/tdb#> .\n @prefix ja: <http://jena.hpl.hp.com/2005/11/Assembler#> .\n @prefix tw: <http://purl.org/socialparticipation/tw/> .\n @prefix irc: <http://purl.org/socialparticipation/irc/> .\n @prefix fb: <http://purl.org/socialparticipation/fb/> .\n @prefix opa: <http://purl.org/socialparticipation/opa/> .\n @prefix ocd: <http://purl.org/socialparticipation/ocd/> .\n @prefix aa: <http://purl.org/socialparticipation/aa/> .\n @prefix gmane: <http://purl.org/socialparticipation/gmane/> .\n\n [] rdf:type fuseki:Server ;\n fuseki:services (' + ' '.join(['<#service{}>'.format(i) for i in range((((len(names0) + len(names1)) + len(names2)) + 1))])) + ' ) .\n\n')
fname = 'configAuto.ttl'
f = open(fname, 'w')
f.write((header + body))
f.close()
c('{} written'.format(fname))
return execline
|
Makes a apache/jena/fuseki configuration file.
# names0 follows the simplest pattern
# names1 follows the pattern with multiple files
# names2 dont follow pattern.
# names0_, names1_ and names2_ are given for appending to initial list.
Loads RDF files on the fly or afterwards (empty=True default)
with P.config.loadFuseki(makeFusekiConfig())
|
percolation/sparql/config.py
|
makeFusekiConfig
|
ttm/percolate
| 1
|
python
|
def makeFusekiConfig(names0=['labMacambiraLaleniaLog3', 'labMacambiraLaleniaLog2', 'foradoeixo', 'gmane-linux-audio-users', 'gmane-linux-audio-devel', 'gmane-politics-organizations-metareciclagem', 'arenaNETmundial_tw', 'matehackers'], names1=[('music_tw', 15), ('obama_tw', 3), ('porn_tw', 8), ('god_tw', 2), ('art_tw', 6)], names2=['participabr', 'cidadedemocratica', 'aa', 'gmane-comp-gcc-libstdcPP-devel'], empty=True, names0_=[], names1_=[], names2_=[]):
'Makes a apache/jena/fuseki configuration file.\n\n # names0 follows the simplest pattern\n # names1 follows the pattern with multiple files\n # names2 dont follow pattern.\n # names0_, names1_ and names2_ are given for appending to initial list.\n\n Loads RDF files on the fly or afterwards (empty=True default)\n with P.config.loadFuseki(makeFusekiConfig())\n '
body =
execline = []
if (not empty):
for (i, name) in enumerate((names2 + names2_)):
if ('gcc' in name):
url = 'https://raw.githubusercontent.com/OpenLinkedSocialData/{}/master/rdf/{}Translate.owl'.format(name, name.replace('P', '+'))
if ('participabr' in name):
url = 'https://raw.githubusercontent.com/OpenLinkedSocialData/opa/master/participaTriplestore.rdf'
if ('cidadedemocratica' in name):
url = 'file:/disco/triplas/cdTriplestore.rdf'
if ('aa' in name):
url = 'https://raw.githubusercontent.com/OpenLinkedSocialData/aa01/master/rdf/aaTriplestore.rdf'
body += '\n<#service{}> rdf:type fuseki:Service ;\n fuseki:name "{}" ; \n fuseki:serviceQuery "query" ;\n fuseki:dataset \n [ rdf:type ja:RDFDataset ;\n ja:defaultGraph \n [\n a ja:MemoryModel ;\n ja:content \n [ ja:externalContent <{}> ] ;\n ] ;\n ] .\n '.format(i, name, url)
for (i, name) in enumerate((names1 + names1_)):
i += len(names2)
urls =
for count in range((name[1] + 1)):
url = 'https://raw.githubusercontent.com/OpenLinkedSocialData/{}/master/rdf/{}Translate{:05d}.owl'.format(name[0], name[0], count)
urls += ' ja:content [ ja:externalContent <{}> ] ;\n '.format(url)
body += '\n<#service{}> rdf:type fuseki:Service ;\n fuseki:name "{}" ; \n fuseki:serviceQuery "query" ;\n fuseki:dataset \n [ rdf:type ja:RDFDataset ;\n ja:defaultGraph \n [\n a ja:MemoryModel ;\n {}\n ] ;\n ] .\n '.format(i, name[0], urls)
for (i, name) in enumerate((names0 + names0_)):
i += (len(names1) + len(names2))
url = 'https://raw.githubusercontent.com/OpenLinkedSocialData/{}/master/rdf/{}Translate.owl'.format(name, name)
body += '\n<#service{}> rdf:type fuseki:Service ;\n fuseki:name "{}" ; \n fuseki:serviceQuery "query" ;\n fuseki:dataset \n [ rdf:type ja:RDFDataset ;\n ja:defaultGraph \n [\n a ja:MemoryModel ;\n ja:content \n [ ja:externalContent <{}> ] ;\n ] ;\n ] .\n '.format(i, name, url)
if empty:
for (i, name) in enumerate((names2 + names2_)):
body += '\n <#service{}> rdf:type fuseki:Service ;\n fuseki:name "{}" ; \n fuseki:serviceQuery "sparql" ;\n fuseki:serviceQuery "query" ;\n fuseki:serviceUpdate "update" ;\n fuseki:serviceUpload "upload" ;\n fuseki:serviceReadWriteGraphStore "data" ; \n fuseki:serviceReadGraphStore "get" ;\n fuseki:dataset [\n rdf:type ja:RDFDataset ;\n ] ;\n .\n '.format(i, name)
if ('gcc' in name):
execline += ['./s-put http://localhost:82/{} default https://raw.githubusercontent.com/OpenLinkedSocialData/{}/master/rdf/{}Translate.owl'.format(name, name, name.replace('P', '+'))]
if ('participabr' in name):
execline += ['./s-put http://localhost:82/{} default https://raw.githubusercontent.com/OpenLinkedSocialData/opa/master/participaTriplestore.rdf'.format(name)]
if ('cidadedemocratica' in name):
execline += ['./s-put http://localhost:82/{} default /disco/triplas/cdTriplestore.rdf'.format(name)]
if ('aa' in name):
execline += ['./s-put http://localhost:82/{} default https://raw.githubusercontent.com/OpenLinkedSocialData/aa01/master/rdf/aaTriplestore.rdf'.format(name)]
for (i, name) in enumerate((names1 + names1_)):
i += len(names2)
body += '\n <#service{}> rdf:type fuseki:Service ;\n fuseki:name "{}" ; \n fuseki:serviceQuery "sparql" ;\n fuseki:serviceQuery "query" ;\n fuseki:serviceUpdate "update" ;\n fuseki:serviceUpload "upload" ;\n fuseki:serviceReadWriteGraphStore "data" ; \n fuseki:serviceReadGraphStore "get" ;\n fuseki:dataset [\n rdf:type ja:RDFDataset ;\n ] ;\n .\n '.format(i, name[0])
count = 0
for arq in range((name[1] + 1)):
execline += ['./s-put http://localhost:82/{} default https://raw.githubusercontent.com/OpenLinkedSocialData/{}/master/rdf/{}Translate{:05d}.owl'.format(name[0], name[0], name[0], count)]
count += 1
for (i, name) in enumerate((names0 + names0_)):
i += (len(names1) + len(names2))
body += '\n <#service{}> rdf:type fuseki:Service ;\n fuseki:name "{}" ; \n fuseki:serviceQuery "sparql" ;\n fuseki:serviceQuery "query" ;\n fuseki:serviceUpdate "update" ;\n fuseki:serviceUpload "upload" ;\n fuseki:serviceReadWriteGraphStore "data" ; \n fuseki:serviceReadGraphStore "get" ;\n fuseki:dataset [\n rdf:type ja:RDFDataset ;\n ] ;\n .\n '.format(i, name)
execline += ['./s-put http://localhost:82/{} default https://raw.githubusercontent.com/OpenLinkedSocialData/{}/master/rdf/{}Translate.owl'.format(name, name, name)]
body += '\n <#service{}> rdf:type fuseki:Service ;\n # URI of the dataset -- http://host:port/dsfoo\n fuseki:name "dsfoo" ; \n fuseki:serviceQuery "sparql" ;\n fuseki:serviceQuery "query" ;\n fuseki:serviceUpdate "update" ;\n fuseki:serviceUpload "upload" ;\n fuseki:serviceReadWriteGraphStore "data" ; \n fuseki:serviceReadGraphStore "get" ;\n fuseki:dataset [\n rdf:type ja:RDFDataset ;\n ] ;\n .\n '.format(((len(names0) + len(names1)) + len(names2)))
header = (('\n @prefix : <#> .\n @prefix fuseki: <http://jena.apache.org/fuseki#> .\n @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\n\n @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .\n @prefix tdb: <http://jena.hpl.hp.com/2008/tdb#> .\n @prefix ja: <http://jena.hpl.hp.com/2005/11/Assembler#> .\n @prefix tw: <http://purl.org/socialparticipation/tw/> .\n @prefix irc: <http://purl.org/socialparticipation/irc/> .\n @prefix fb: <http://purl.org/socialparticipation/fb/> .\n @prefix opa: <http://purl.org/socialparticipation/opa/> .\n @prefix ocd: <http://purl.org/socialparticipation/ocd/> .\n @prefix aa: <http://purl.org/socialparticipation/aa/> .\n @prefix gmane: <http://purl.org/socialparticipation/gmane/> .\n\n [] rdf:type fuseki:Server ;\n fuseki:services (' + ' '.join(['<#service{}>'.format(i) for i in range((((len(names0) + len(names1)) + len(names2)) + 1))])) + ' ) .\n\n')
fname = 'configAuto.ttl'
f = open(fname, 'w')
f.write((header + body))
f.close()
c('{} written'.format(fname))
return execline
|
def makeFusekiConfig(names0=['labMacambiraLaleniaLog3', 'labMacambiraLaleniaLog2', 'foradoeixo', 'gmane-linux-audio-users', 'gmane-linux-audio-devel', 'gmane-politics-organizations-metareciclagem', 'arenaNETmundial_tw', 'matehackers'], names1=[('music_tw', 15), ('obama_tw', 3), ('porn_tw', 8), ('god_tw', 2), ('art_tw', 6)], names2=['participabr', 'cidadedemocratica', 'aa', 'gmane-comp-gcc-libstdcPP-devel'], empty=True, names0_=[], names1_=[], names2_=[]):
'Makes a apache/jena/fuseki configuration file.\n\n # names0 follows the simplest pattern\n # names1 follows the pattern with multiple files\n # names2 dont follow pattern.\n # names0_, names1_ and names2_ are given for appending to initial list.\n\n Loads RDF files on the fly or afterwards (empty=True default)\n with P.config.loadFuseki(makeFusekiConfig())\n '
body =
execline = []
if (not empty):
for (i, name) in enumerate((names2 + names2_)):
if ('gcc' in name):
url = 'https://raw.githubusercontent.com/OpenLinkedSocialData/{}/master/rdf/{}Translate.owl'.format(name, name.replace('P', '+'))
if ('participabr' in name):
url = 'https://raw.githubusercontent.com/OpenLinkedSocialData/opa/master/participaTriplestore.rdf'
if ('cidadedemocratica' in name):
url = 'file:/disco/triplas/cdTriplestore.rdf'
if ('aa' in name):
url = 'https://raw.githubusercontent.com/OpenLinkedSocialData/aa01/master/rdf/aaTriplestore.rdf'
body += '\n<#service{}> rdf:type fuseki:Service ;\n fuseki:name "{}" ; \n fuseki:serviceQuery "query" ;\n fuseki:dataset \n [ rdf:type ja:RDFDataset ;\n ja:defaultGraph \n [\n a ja:MemoryModel ;\n ja:content \n [ ja:externalContent <{}> ] ;\n ] ;\n ] .\n '.format(i, name, url)
for (i, name) in enumerate((names1 + names1_)):
i += len(names2)
urls =
for count in range((name[1] + 1)):
url = 'https://raw.githubusercontent.com/OpenLinkedSocialData/{}/master/rdf/{}Translate{:05d}.owl'.format(name[0], name[0], count)
urls += ' ja:content [ ja:externalContent <{}> ] ;\n '.format(url)
body += '\n<#service{}> rdf:type fuseki:Service ;\n fuseki:name "{}" ; \n fuseki:serviceQuery "query" ;\n fuseki:dataset \n [ rdf:type ja:RDFDataset ;\n ja:defaultGraph \n [\n a ja:MemoryModel ;\n {}\n ] ;\n ] .\n '.format(i, name[0], urls)
for (i, name) in enumerate((names0 + names0_)):
i += (len(names1) + len(names2))
url = 'https://raw.githubusercontent.com/OpenLinkedSocialData/{}/master/rdf/{}Translate.owl'.format(name, name)
body += '\n<#service{}> rdf:type fuseki:Service ;\n fuseki:name "{}" ; \n fuseki:serviceQuery "query" ;\n fuseki:dataset \n [ rdf:type ja:RDFDataset ;\n ja:defaultGraph \n [\n a ja:MemoryModel ;\n ja:content \n [ ja:externalContent <{}> ] ;\n ] ;\n ] .\n '.format(i, name, url)
if empty:
for (i, name) in enumerate((names2 + names2_)):
body += '\n <#service{}> rdf:type fuseki:Service ;\n fuseki:name "{}" ; \n fuseki:serviceQuery "sparql" ;\n fuseki:serviceQuery "query" ;\n fuseki:serviceUpdate "update" ;\n fuseki:serviceUpload "upload" ;\n fuseki:serviceReadWriteGraphStore "data" ; \n fuseki:serviceReadGraphStore "get" ;\n fuseki:dataset [\n rdf:type ja:RDFDataset ;\n ] ;\n .\n '.format(i, name)
if ('gcc' in name):
execline += ['./s-put http://localhost:82/{} default https://raw.githubusercontent.com/OpenLinkedSocialData/{}/master/rdf/{}Translate.owl'.format(name, name, name.replace('P', '+'))]
if ('participabr' in name):
execline += ['./s-put http://localhost:82/{} default https://raw.githubusercontent.com/OpenLinkedSocialData/opa/master/participaTriplestore.rdf'.format(name)]
if ('cidadedemocratica' in name):
execline += ['./s-put http://localhost:82/{} default /disco/triplas/cdTriplestore.rdf'.format(name)]
if ('aa' in name):
execline += ['./s-put http://localhost:82/{} default https://raw.githubusercontent.com/OpenLinkedSocialData/aa01/master/rdf/aaTriplestore.rdf'.format(name)]
for (i, name) in enumerate((names1 + names1_)):
i += len(names2)
body += '\n <#service{}> rdf:type fuseki:Service ;\n fuseki:name "{}" ; \n fuseki:serviceQuery "sparql" ;\n fuseki:serviceQuery "query" ;\n fuseki:serviceUpdate "update" ;\n fuseki:serviceUpload "upload" ;\n fuseki:serviceReadWriteGraphStore "data" ; \n fuseki:serviceReadGraphStore "get" ;\n fuseki:dataset [\n rdf:type ja:RDFDataset ;\n ] ;\n .\n '.format(i, name[0])
count = 0
for arq in range((name[1] + 1)):
execline += ['./s-put http://localhost:82/{} default https://raw.githubusercontent.com/OpenLinkedSocialData/{}/master/rdf/{}Translate{:05d}.owl'.format(name[0], name[0], name[0], count)]
count += 1
for (i, name) in enumerate((names0 + names0_)):
i += (len(names1) + len(names2))
body += '\n <#service{}> rdf:type fuseki:Service ;\n fuseki:name "{}" ; \n fuseki:serviceQuery "sparql" ;\n fuseki:serviceQuery "query" ;\n fuseki:serviceUpdate "update" ;\n fuseki:serviceUpload "upload" ;\n fuseki:serviceReadWriteGraphStore "data" ; \n fuseki:serviceReadGraphStore "get" ;\n fuseki:dataset [\n rdf:type ja:RDFDataset ;\n ] ;\n .\n '.format(i, name)
execline += ['./s-put http://localhost:82/{} default https://raw.githubusercontent.com/OpenLinkedSocialData/{}/master/rdf/{}Translate.owl'.format(name, name, name)]
body += '\n <#service{}> rdf:type fuseki:Service ;\n # URI of the dataset -- http://host:port/dsfoo\n fuseki:name "dsfoo" ; \n fuseki:serviceQuery "sparql" ;\n fuseki:serviceQuery "query" ;\n fuseki:serviceUpdate "update" ;\n fuseki:serviceUpload "upload" ;\n fuseki:serviceReadWriteGraphStore "data" ; \n fuseki:serviceReadGraphStore "get" ;\n fuseki:dataset [\n rdf:type ja:RDFDataset ;\n ] ;\n .\n '.format(((len(names0) + len(names1)) + len(names2)))
header = (('\n @prefix : <#> .\n @prefix fuseki: <http://jena.apache.org/fuseki#> .\n @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .\n\n @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .\n @prefix tdb: <http://jena.hpl.hp.com/2008/tdb#> .\n @prefix ja: <http://jena.hpl.hp.com/2005/11/Assembler#> .\n @prefix tw: <http://purl.org/socialparticipation/tw/> .\n @prefix irc: <http://purl.org/socialparticipation/irc/> .\n @prefix fb: <http://purl.org/socialparticipation/fb/> .\n @prefix opa: <http://purl.org/socialparticipation/opa/> .\n @prefix ocd: <http://purl.org/socialparticipation/ocd/> .\n @prefix aa: <http://purl.org/socialparticipation/aa/> .\n @prefix gmane: <http://purl.org/socialparticipation/gmane/> .\n\n [] rdf:type fuseki:Server ;\n fuseki:services (' + ' '.join(['<#service{}>'.format(i) for i in range((((len(names0) + len(names1)) + len(names2)) + 1))])) + ' ) .\n\n')
fname = 'configAuto.ttl'
f = open(fname, 'w')
f.write((header + body))
f.close()
c('{} written'.format(fname))
return execline<|docstring|>Makes a apache/jena/fuseki configuration file.
# names0 follows the simplest pattern
# names1 follows the pattern with multiple files
# names2 dont follow pattern.
# names0_, names1_ and names2_ are given for appending to initial list.
Loads RDF files on the fly or afterwards (empty=True default)
with P.config.loadFuseki(makeFusekiConfig())<|endoftext|>
|
6d467890383e317f78b18283d09b265c6541c7c2fa6bfbf7a45d2f764670b091
|
def get_average_temperature_during_heating_season(temperature, t_threshold=15):
'\n returns average temperature during heating season\n input:\n temperature : pd.Series(Index=time, values=temperature)\n t_threshold : threshold temperature for heating degree days (HDD)\n returns:\n average temperature\n '
t_average_daily = temperature.resample('1D').mean()
return t_average_daily.loc[(t_average_daily < t_threshold)].mean()
|
returns average temperature during heating season
input:
temperature : pd.Series(Index=time, values=temperature)
t_threshold : threshold temperature for heating degree days (HDD)
returns:
average temperature
|
scripts/build_retro_cost.py
|
get_average_temperature_during_heating_season
|
koen-vg/pypsa-eur-sec
| 42
|
python
|
def get_average_temperature_during_heating_season(temperature, t_threshold=15):
'\n returns average temperature during heating season\n input:\n temperature : pd.Series(Index=time, values=temperature)\n t_threshold : threshold temperature for heating degree days (HDD)\n returns:\n average temperature\n '
t_average_daily = temperature.resample('1D').mean()
return t_average_daily.loc[(t_average_daily < t_threshold)].mean()
|
def get_average_temperature_during_heating_season(temperature, t_threshold=15):
'\n returns average temperature during heating season\n input:\n temperature : pd.Series(Index=time, values=temperature)\n t_threshold : threshold temperature for heating degree days (HDD)\n returns:\n average temperature\n '
t_average_daily = temperature.resample('1D').mean()
return t_average_daily.loc[(t_average_daily < t_threshold)].mean()<|docstring|>returns average temperature during heating season
input:
temperature : pd.Series(Index=time, values=temperature)
t_threshold : threshold temperature for heating degree days (HDD)
returns:
average temperature<|endoftext|>
|
ee48ff87486ec284b69f36751c4abf9f66f6c953aea4dca804e72c0eead4fd78
|
def prepare_building_stock_data():
'\n reads building stock data and cleans up the format, returns\n --------\n u_values: pd.DataFrame current U-values\n area_tot: heated floor area per country and sector [Mm²]\n area: heated floor area [Mm²] for country, sector, building\n type and period\n\n '
building_data = pd.read_csv(snakemake.input.building_stock, usecols=list(range(13)))
building_data['type'].replace({'Covered area: heated [Mm²]': 'Heated area [Mm²]', 'Windows ': 'Window', 'Windows': 'Window', 'Walls ': 'Wall', 'Walls': 'Wall', 'Roof ': 'Roof', 'Floor ': 'Floor'}, inplace=True)
building_data.country_code = building_data.country_code.str.upper()
building_data['subsector'].replace({'Hotels and Restaurants': 'Hotels and restaurants'}, inplace=True)
building_data['sector'].replace({'Residential sector': 'residential', 'Service sector': 'services'}, inplace=True)
u_values = building_data[(building_data.feature.str.contains('U-values') & (building_data.subsector != 'Total'))]
components = list(u_values.type.unique())
country_iso_dic = building_data.set_index('country')['country_code'].to_dict()
country_iso_dic.update({'Norway': 'NO', 'Iceland': 'IS', 'Montenegro': 'ME', 'Serbia': 'RS', 'Albania': 'AL', 'United Kingdom': 'GB', 'Bosnia and Herzegovina': 'BA', 'Switzerland': 'CH'})
area = building_data[((building_data.type == 'Heated area [Mm²]') & (building_data.subsector != 'Total'))]
area_tot = area.groupby(['country', 'sector']).sum()
area = pd.concat([area, area.apply((lambda x: (x.value / area_tot.value.loc[(x.country, x.sector)])), axis=1).rename('weight')], axis=1)
area = area.groupby(['country', 'sector', 'subsector', 'bage']).sum()
area_tot.rename(index=country_iso_dic, inplace=True)
area_missing = pd.read_csv(snakemake.input.floor_area_missing, index_col=[0, 1], usecols=[0, 1, 2, 3], encoding='ISO-8859-1')
area_tot = area_tot.append(area_missing.unstack(level=(- 1)).dropna().stack())
area_tot = area_tot.loc[(~ area_tot.index.duplicated(keep='last'))]
pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)
pop_layout['ct'] = pop_layout.index.str[:2]
ct_total = pop_layout.total.groupby(pop_layout['ct']).sum()
area_per_pop = area_tot.unstack().reindex(index=ct_total.index).apply((lambda x: (x / ct_total[x.index])))
missing_area_ct = ct_total.index.difference(area_tot.index.levels[0])
for ct in missing_area_ct.intersection(ct_total.index):
averaged_data = pd.DataFrame((area_per_pop.value.reindex(map_for_missings[ct]).mean() * ct_total[ct]), columns=['value'])
index = pd.MultiIndex.from_product([[ct], averaged_data.index.to_list()])
averaged_data.index = index
averaged_data['estimated'] = 1
if (ct not in area_tot.index.levels[0]):
area_tot = area_tot.append(averaged_data, sort=True)
else:
area_tot.loc[averaged_data.index] = averaged_data
u_values_PL = pd.read_csv(snakemake.input.u_values_PL)
u_values_PL.component.replace({'Walls': 'Wall', 'Windows': 'Window'}, inplace=True)
area_PL = area.loc['Poland'].reset_index()
data_PL = pd.DataFrame(columns=u_values.columns, index=area_PL.index)
data_PL['country'] = 'Poland'
data_PL['country_code'] = 'PL'
for col in ['sector', 'subsector', 'bage']:
data_PL[col] = area_PL[col]
data_PL['btype'] = area_PL['subsector']
data_PL_final = pd.DataFrame()
for component in components:
data_PL['type'] = component
data_PL['value'] = data_PL.apply((lambda x: u_values_PL[((u_values_PL.component == component) & (u_values_PL.sector == x['sector']))][x['bage']].iloc[0]), axis=1)
data_PL_final = data_PL_final.append(data_PL)
u_values = pd.concat([u_values, data_PL_final]).reset_index(drop=True)
u_values.loc[(((u_values.type == 'Window') & (u_values.value < 0.8)), 'value')] = 0.8
u_values.drop(['topic', 'feature', 'detail', 'estimated', 'unit'], axis=1, inplace=True, errors='ignore')
u_values.subsector.replace(rename_sectors, inplace=True)
u_values.btype.replace(rename_sectors, inplace=True)
u_values['assumed_subsector'] = u_values.subsector
u_values.loc[((~ u_values.subsector.isin(rename_sectors.values())), 'assumed_subsector')] = 'MFH'
u_values.country_code.replace({'UK': 'GB'}, inplace=True)
u_values.bage.replace({'Berfore 1945': 'Before 1945'}, inplace=True)
u_values = u_values[(~ u_values.bage.isna())]
u_values.set_index(['country_code', 'subsector', 'bage', 'type'], inplace=True)
countries = ct_total.index
area_tot = area_tot.loc[countries]
return (u_values, country_iso_dic, countries, area_tot, area)
|
reads building stock data and cleans up the format, returns
--------
u_values: pd.DataFrame current U-values
area_tot: heated floor area per country and sector [Mm²]
area: heated floor area [Mm²] for country, sector, building
type and period
|
scripts/build_retro_cost.py
|
prepare_building_stock_data
|
koen-vg/pypsa-eur-sec
| 42
|
python
|
def prepare_building_stock_data():
'\n reads building stock data and cleans up the format, returns\n --------\n u_values: pd.DataFrame current U-values\n area_tot: heated floor area per country and sector [Mm²]\n area: heated floor area [Mm²] for country, sector, building\n type and period\n\n '
building_data = pd.read_csv(snakemake.input.building_stock, usecols=list(range(13)))
building_data['type'].replace({'Covered area: heated [Mm²]': 'Heated area [Mm²]', 'Windows ': 'Window', 'Windows': 'Window', 'Walls ': 'Wall', 'Walls': 'Wall', 'Roof ': 'Roof', 'Floor ': 'Floor'}, inplace=True)
building_data.country_code = building_data.country_code.str.upper()
building_data['subsector'].replace({'Hotels and Restaurants': 'Hotels and restaurants'}, inplace=True)
building_data['sector'].replace({'Residential sector': 'residential', 'Service sector': 'services'}, inplace=True)
u_values = building_data[(building_data.feature.str.contains('U-values') & (building_data.subsector != 'Total'))]
components = list(u_values.type.unique())
country_iso_dic = building_data.set_index('country')['country_code'].to_dict()
country_iso_dic.update({'Norway': 'NO', 'Iceland': 'IS', 'Montenegro': 'ME', 'Serbia': 'RS', 'Albania': 'AL', 'United Kingdom': 'GB', 'Bosnia and Herzegovina': 'BA', 'Switzerland': 'CH'})
area = building_data[((building_data.type == 'Heated area [Mm²]') & (building_data.subsector != 'Total'))]
area_tot = area.groupby(['country', 'sector']).sum()
area = pd.concat([area, area.apply((lambda x: (x.value / area_tot.value.loc[(x.country, x.sector)])), axis=1).rename('weight')], axis=1)
area = area.groupby(['country', 'sector', 'subsector', 'bage']).sum()
area_tot.rename(index=country_iso_dic, inplace=True)
area_missing = pd.read_csv(snakemake.input.floor_area_missing, index_col=[0, 1], usecols=[0, 1, 2, 3], encoding='ISO-8859-1')
area_tot = area_tot.append(area_missing.unstack(level=(- 1)).dropna().stack())
area_tot = area_tot.loc[(~ area_tot.index.duplicated(keep='last'))]
pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)
pop_layout['ct'] = pop_layout.index.str[:2]
ct_total = pop_layout.total.groupby(pop_layout['ct']).sum()
area_per_pop = area_tot.unstack().reindex(index=ct_total.index).apply((lambda x: (x / ct_total[x.index])))
missing_area_ct = ct_total.index.difference(area_tot.index.levels[0])
for ct in missing_area_ct.intersection(ct_total.index):
averaged_data = pd.DataFrame((area_per_pop.value.reindex(map_for_missings[ct]).mean() * ct_total[ct]), columns=['value'])
index = pd.MultiIndex.from_product([[ct], averaged_data.index.to_list()])
averaged_data.index = index
averaged_data['estimated'] = 1
if (ct not in area_tot.index.levels[0]):
area_tot = area_tot.append(averaged_data, sort=True)
else:
area_tot.loc[averaged_data.index] = averaged_data
u_values_PL = pd.read_csv(snakemake.input.u_values_PL)
u_values_PL.component.replace({'Walls': 'Wall', 'Windows': 'Window'}, inplace=True)
area_PL = area.loc['Poland'].reset_index()
data_PL = pd.DataFrame(columns=u_values.columns, index=area_PL.index)
data_PL['country'] = 'Poland'
data_PL['country_code'] = 'PL'
for col in ['sector', 'subsector', 'bage']:
data_PL[col] = area_PL[col]
data_PL['btype'] = area_PL['subsector']
data_PL_final = pd.DataFrame()
for component in components:
data_PL['type'] = component
data_PL['value'] = data_PL.apply((lambda x: u_values_PL[((u_values_PL.component == component) & (u_values_PL.sector == x['sector']))][x['bage']].iloc[0]), axis=1)
data_PL_final = data_PL_final.append(data_PL)
u_values = pd.concat([u_values, data_PL_final]).reset_index(drop=True)
u_values.loc[(((u_values.type == 'Window') & (u_values.value < 0.8)), 'value')] = 0.8
u_values.drop(['topic', 'feature', 'detail', 'estimated', 'unit'], axis=1, inplace=True, errors='ignore')
u_values.subsector.replace(rename_sectors, inplace=True)
u_values.btype.replace(rename_sectors, inplace=True)
u_values['assumed_subsector'] = u_values.subsector
u_values.loc[((~ u_values.subsector.isin(rename_sectors.values())), 'assumed_subsector')] = 'MFH'
u_values.country_code.replace({'UK': 'GB'}, inplace=True)
u_values.bage.replace({'Berfore 1945': 'Before 1945'}, inplace=True)
u_values = u_values[(~ u_values.bage.isna())]
u_values.set_index(['country_code', 'subsector', 'bage', 'type'], inplace=True)
countries = ct_total.index
area_tot = area_tot.loc[countries]
return (u_values, country_iso_dic, countries, area_tot, area)
|
def prepare_building_stock_data():
'\n reads building stock data and cleans up the format, returns\n --------\n u_values: pd.DataFrame current U-values\n area_tot: heated floor area per country and sector [Mm²]\n area: heated floor area [Mm²] for country, sector, building\n type and period\n\n '
building_data = pd.read_csv(snakemake.input.building_stock, usecols=list(range(13)))
building_data['type'].replace({'Covered area: heated [Mm²]': 'Heated area [Mm²]', 'Windows ': 'Window', 'Windows': 'Window', 'Walls ': 'Wall', 'Walls': 'Wall', 'Roof ': 'Roof', 'Floor ': 'Floor'}, inplace=True)
building_data.country_code = building_data.country_code.str.upper()
building_data['subsector'].replace({'Hotels and Restaurants': 'Hotels and restaurants'}, inplace=True)
building_data['sector'].replace({'Residential sector': 'residential', 'Service sector': 'services'}, inplace=True)
u_values = building_data[(building_data.feature.str.contains('U-values') & (building_data.subsector != 'Total'))]
components = list(u_values.type.unique())
country_iso_dic = building_data.set_index('country')['country_code'].to_dict()
country_iso_dic.update({'Norway': 'NO', 'Iceland': 'IS', 'Montenegro': 'ME', 'Serbia': 'RS', 'Albania': 'AL', 'United Kingdom': 'GB', 'Bosnia and Herzegovina': 'BA', 'Switzerland': 'CH'})
area = building_data[((building_data.type == 'Heated area [Mm²]') & (building_data.subsector != 'Total'))]
area_tot = area.groupby(['country', 'sector']).sum()
area = pd.concat([area, area.apply((lambda x: (x.value / area_tot.value.loc[(x.country, x.sector)])), axis=1).rename('weight')], axis=1)
area = area.groupby(['country', 'sector', 'subsector', 'bage']).sum()
area_tot.rename(index=country_iso_dic, inplace=True)
area_missing = pd.read_csv(snakemake.input.floor_area_missing, index_col=[0, 1], usecols=[0, 1, 2, 3], encoding='ISO-8859-1')
area_tot = area_tot.append(area_missing.unstack(level=(- 1)).dropna().stack())
area_tot = area_tot.loc[(~ area_tot.index.duplicated(keep='last'))]
pop_layout = pd.read_csv(snakemake.input.clustered_pop_layout, index_col=0)
pop_layout['ct'] = pop_layout.index.str[:2]
ct_total = pop_layout.total.groupby(pop_layout['ct']).sum()
area_per_pop = area_tot.unstack().reindex(index=ct_total.index).apply((lambda x: (x / ct_total[x.index])))
missing_area_ct = ct_total.index.difference(area_tot.index.levels[0])
for ct in missing_area_ct.intersection(ct_total.index):
averaged_data = pd.DataFrame((area_per_pop.value.reindex(map_for_missings[ct]).mean() * ct_total[ct]), columns=['value'])
index = pd.MultiIndex.from_product([[ct], averaged_data.index.to_list()])
averaged_data.index = index
averaged_data['estimated'] = 1
if (ct not in area_tot.index.levels[0]):
area_tot = area_tot.append(averaged_data, sort=True)
else:
area_tot.loc[averaged_data.index] = averaged_data
u_values_PL = pd.read_csv(snakemake.input.u_values_PL)
u_values_PL.component.replace({'Walls': 'Wall', 'Windows': 'Window'}, inplace=True)
area_PL = area.loc['Poland'].reset_index()
data_PL = pd.DataFrame(columns=u_values.columns, index=area_PL.index)
data_PL['country'] = 'Poland'
data_PL['country_code'] = 'PL'
for col in ['sector', 'subsector', 'bage']:
data_PL[col] = area_PL[col]
data_PL['btype'] = area_PL['subsector']
data_PL_final = pd.DataFrame()
for component in components:
data_PL['type'] = component
data_PL['value'] = data_PL.apply((lambda x: u_values_PL[((u_values_PL.component == component) & (u_values_PL.sector == x['sector']))][x['bage']].iloc[0]), axis=1)
data_PL_final = data_PL_final.append(data_PL)
u_values = pd.concat([u_values, data_PL_final]).reset_index(drop=True)
u_values.loc[(((u_values.type == 'Window') & (u_values.value < 0.8)), 'value')] = 0.8
u_values.drop(['topic', 'feature', 'detail', 'estimated', 'unit'], axis=1, inplace=True, errors='ignore')
u_values.subsector.replace(rename_sectors, inplace=True)
u_values.btype.replace(rename_sectors, inplace=True)
u_values['assumed_subsector'] = u_values.subsector
u_values.loc[((~ u_values.subsector.isin(rename_sectors.values())), 'assumed_subsector')] = 'MFH'
u_values.country_code.replace({'UK': 'GB'}, inplace=True)
u_values.bage.replace({'Berfore 1945': 'Before 1945'}, inplace=True)
u_values = u_values[(~ u_values.bage.isna())]
u_values.set_index(['country_code', 'subsector', 'bage', 'type'], inplace=True)
countries = ct_total.index
area_tot = area_tot.loc[countries]
return (u_values, country_iso_dic, countries, area_tot, area)<|docstring|>reads building stock data and cleans up the format, returns
--------
u_values: pd.DataFrame current U-values
area_tot: heated floor area per country and sector [Mm²]
area: heated floor area [Mm²] for country, sector, building
type and period<|endoftext|>
|
b42347eaf818324578206c787e22fa1fdcd5551e9138df6355f304aa062cdcee
|
def prepare_building_topology(u_values, same_building_topology=True):
'\n reads in typical building topologies (e.g. average surface of building elements)\n and typical losses trough thermal bridging and air ventilation\n '
data_tabula = pd.read_csv(snakemake.input.data_tabula, skiprows=(lambda x: (x in range(1, 11))), low_memory=False).iloc[:2974]
parameters = ['Code_Country', 'Code_BuildingSizeClass', 'Year1_Building', 'Year2_Building', 'A_C_Ref', 'A_Roof_1', 'A_Roof_2', 'A_Wall_1', 'A_Wall_2', 'A_Floor_1', 'A_Floor_2', 'A_Window_1', 'A_Window_2', 'n_air_use', 'n_air_infiltration', 'delta_U_ThermalBridging', 'F_red_temp', 'Number_BuildingVariant']
data_tabula = data_tabula[parameters]
building_elements = ['Roof', 'Wall', 'Floor', 'Window']
for element in building_elements:
elements = ['A_{}_1'.format(element), 'A_{}_2'.format(element)]
data_tabula = pd.concat([data_tabula.drop(elements, axis=1), data_tabula[elements].sum(axis=1).rename('A_{}'.format(element))], axis=1)
data_tabula = data_tabula.loc[pd.concat([(data_tabula[col] != 0) for col in ['A_Wall', 'A_Floor', 'A_Window', 'A_Roof', 'A_C_Ref']], axis=1).all(axis=1)]
data_tabula = data_tabula[data_tabula.Number_BuildingVariant.isin([1, 2, 3])]
data_tabula = data_tabula[data_tabula.Code_BuildingSizeClass.isin(['AB', 'SFH', 'MFH', 'TH'])]
def map_periods(build_year1, build_year2):
periods = {(0, 1945): 'Before 1945', (1945, 1969): '1945 - 1969', (1970, 1979): '1970 - 1979', (1980, 1989): '1980 - 1989', (1990, 1999): '1990 - 1999', (2000, 2010): '2000 - 2010', (2010, 10000): 'Post 2010'}
minimum = 100000.0
for key in periods:
diff = (abs((build_year1 - key[0])) + abs((build_year2 - key[1])))
if (diff < minimum):
minimum = diff
searched_period = periods[key]
return searched_period
data_tabula['bage'] = data_tabula.apply((lambda x: map_periods(x.Year1_Building, x.Year2_Building)), axis=1)
data_tabula = data_tabula.set_index(['Code_Country', 'Code_BuildingSizeClass', 'bage', 'Number_BuildingVariant'])
area_cols = ['A_C_Ref', 'A_Floor', 'A_Roof', 'A_Wall', 'A_Window']
typical_building = data_tabula.groupby(level=[1, 2]).mean().rename(index={'TH': 'SFH'}).groupby(level=[0, 1]).mean()
data_tabula = data_tabula[(~ data_tabula.index.duplicated(keep='first'))]
hotmaps_data_i = u_values.reset_index().set_index(['country_code', 'assumed_subsector', 'bage']).index
missing_ct = data_tabula.unstack().reindex(hotmaps_data_i.unique())
cols_constant = ['Year1_Building', 'Year2_Building', 'A_C_Ref', 'A_Roof', 'A_Wall', 'A_Floor', 'A_Window']
for col in cols_constant:
missing_ct[col] = missing_ct[col].combine_first(missing_ct[col].groupby(level=[0, 1, 2]).mean())
missing_ct = missing_ct.unstack().unstack().fillna(missing_ct.unstack().unstack().mean())
data_tabula = missing_ct.stack(level=[(- 1), (- 2), (- 3)], dropna=False)
if same_building_topology:
typical_building = typical_building.reindex(data_tabula.droplevel(0).index).set_index(data_tabula.index)
data_tabula.update(typical_building[area_cols])
data_tabula['A_envelope'] = data_tabula[['A_{}'.format(element) for element in building_elements]].sum(axis=1)
return data_tabula
|
reads in typical building topologies (e.g. average surface of building elements)
and typical losses trough thermal bridging and air ventilation
|
scripts/build_retro_cost.py
|
prepare_building_topology
|
koen-vg/pypsa-eur-sec
| 42
|
python
|
def prepare_building_topology(u_values, same_building_topology=True):
'\n reads in typical building topologies (e.g. average surface of building elements)\n and typical losses trough thermal bridging and air ventilation\n '
data_tabula = pd.read_csv(snakemake.input.data_tabula, skiprows=(lambda x: (x in range(1, 11))), low_memory=False).iloc[:2974]
parameters = ['Code_Country', 'Code_BuildingSizeClass', 'Year1_Building', 'Year2_Building', 'A_C_Ref', 'A_Roof_1', 'A_Roof_2', 'A_Wall_1', 'A_Wall_2', 'A_Floor_1', 'A_Floor_2', 'A_Window_1', 'A_Window_2', 'n_air_use', 'n_air_infiltration', 'delta_U_ThermalBridging', 'F_red_temp', 'Number_BuildingVariant']
data_tabula = data_tabula[parameters]
building_elements = ['Roof', 'Wall', 'Floor', 'Window']
for element in building_elements:
elements = ['A_{}_1'.format(element), 'A_{}_2'.format(element)]
data_tabula = pd.concat([data_tabula.drop(elements, axis=1), data_tabula[elements].sum(axis=1).rename('A_{}'.format(element))], axis=1)
data_tabula = data_tabula.loc[pd.concat([(data_tabula[col] != 0) for col in ['A_Wall', 'A_Floor', 'A_Window', 'A_Roof', 'A_C_Ref']], axis=1).all(axis=1)]
data_tabula = data_tabula[data_tabula.Number_BuildingVariant.isin([1, 2, 3])]
data_tabula = data_tabula[data_tabula.Code_BuildingSizeClass.isin(['AB', 'SFH', 'MFH', 'TH'])]
def map_periods(build_year1, build_year2):
periods = {(0, 1945): 'Before 1945', (1945, 1969): '1945 - 1969', (1970, 1979): '1970 - 1979', (1980, 1989): '1980 - 1989', (1990, 1999): '1990 - 1999', (2000, 2010): '2000 - 2010', (2010, 10000): 'Post 2010'}
minimum = 100000.0
for key in periods:
diff = (abs((build_year1 - key[0])) + abs((build_year2 - key[1])))
if (diff < minimum):
minimum = diff
searched_period = periods[key]
return searched_period
data_tabula['bage'] = data_tabula.apply((lambda x: map_periods(x.Year1_Building, x.Year2_Building)), axis=1)
data_tabula = data_tabula.set_index(['Code_Country', 'Code_BuildingSizeClass', 'bage', 'Number_BuildingVariant'])
area_cols = ['A_C_Ref', 'A_Floor', 'A_Roof', 'A_Wall', 'A_Window']
typical_building = data_tabula.groupby(level=[1, 2]).mean().rename(index={'TH': 'SFH'}).groupby(level=[0, 1]).mean()
data_tabula = data_tabula[(~ data_tabula.index.duplicated(keep='first'))]
hotmaps_data_i = u_values.reset_index().set_index(['country_code', 'assumed_subsector', 'bage']).index
missing_ct = data_tabula.unstack().reindex(hotmaps_data_i.unique())
cols_constant = ['Year1_Building', 'Year2_Building', 'A_C_Ref', 'A_Roof', 'A_Wall', 'A_Floor', 'A_Window']
for col in cols_constant:
missing_ct[col] = missing_ct[col].combine_first(missing_ct[col].groupby(level=[0, 1, 2]).mean())
missing_ct = missing_ct.unstack().unstack().fillna(missing_ct.unstack().unstack().mean())
data_tabula = missing_ct.stack(level=[(- 1), (- 2), (- 3)], dropna=False)
if same_building_topology:
typical_building = typical_building.reindex(data_tabula.droplevel(0).index).set_index(data_tabula.index)
data_tabula.update(typical_building[area_cols])
data_tabula['A_envelope'] = data_tabula[['A_{}'.format(element) for element in building_elements]].sum(axis=1)
return data_tabula
|
def prepare_building_topology(u_values, same_building_topology=True):
'\n reads in typical building topologies (e.g. average surface of building elements)\n and typical losses trough thermal bridging and air ventilation\n '
data_tabula = pd.read_csv(snakemake.input.data_tabula, skiprows=(lambda x: (x in range(1, 11))), low_memory=False).iloc[:2974]
parameters = ['Code_Country', 'Code_BuildingSizeClass', 'Year1_Building', 'Year2_Building', 'A_C_Ref', 'A_Roof_1', 'A_Roof_2', 'A_Wall_1', 'A_Wall_2', 'A_Floor_1', 'A_Floor_2', 'A_Window_1', 'A_Window_2', 'n_air_use', 'n_air_infiltration', 'delta_U_ThermalBridging', 'F_red_temp', 'Number_BuildingVariant']
data_tabula = data_tabula[parameters]
building_elements = ['Roof', 'Wall', 'Floor', 'Window']
for element in building_elements:
elements = ['A_{}_1'.format(element), 'A_{}_2'.format(element)]
data_tabula = pd.concat([data_tabula.drop(elements, axis=1), data_tabula[elements].sum(axis=1).rename('A_{}'.format(element))], axis=1)
data_tabula = data_tabula.loc[pd.concat([(data_tabula[col] != 0) for col in ['A_Wall', 'A_Floor', 'A_Window', 'A_Roof', 'A_C_Ref']], axis=1).all(axis=1)]
data_tabula = data_tabula[data_tabula.Number_BuildingVariant.isin([1, 2, 3])]
data_tabula = data_tabula[data_tabula.Code_BuildingSizeClass.isin(['AB', 'SFH', 'MFH', 'TH'])]
def map_periods(build_year1, build_year2):
periods = {(0, 1945): 'Before 1945', (1945, 1969): '1945 - 1969', (1970, 1979): '1970 - 1979', (1980, 1989): '1980 - 1989', (1990, 1999): '1990 - 1999', (2000, 2010): '2000 - 2010', (2010, 10000): 'Post 2010'}
minimum = 100000.0
for key in periods:
diff = (abs((build_year1 - key[0])) + abs((build_year2 - key[1])))
if (diff < minimum):
minimum = diff
searched_period = periods[key]
return searched_period
data_tabula['bage'] = data_tabula.apply((lambda x: map_periods(x.Year1_Building, x.Year2_Building)), axis=1)
data_tabula = data_tabula.set_index(['Code_Country', 'Code_BuildingSizeClass', 'bage', 'Number_BuildingVariant'])
area_cols = ['A_C_Ref', 'A_Floor', 'A_Roof', 'A_Wall', 'A_Window']
typical_building = data_tabula.groupby(level=[1, 2]).mean().rename(index={'TH': 'SFH'}).groupby(level=[0, 1]).mean()
data_tabula = data_tabula[(~ data_tabula.index.duplicated(keep='first'))]
hotmaps_data_i = u_values.reset_index().set_index(['country_code', 'assumed_subsector', 'bage']).index
missing_ct = data_tabula.unstack().reindex(hotmaps_data_i.unique())
cols_constant = ['Year1_Building', 'Year2_Building', 'A_C_Ref', 'A_Roof', 'A_Wall', 'A_Floor', 'A_Window']
for col in cols_constant:
missing_ct[col] = missing_ct[col].combine_first(missing_ct[col].groupby(level=[0, 1, 2]).mean())
missing_ct = missing_ct.unstack().unstack().fillna(missing_ct.unstack().unstack().mean())
data_tabula = missing_ct.stack(level=[(- 1), (- 2), (- 3)], dropna=False)
if same_building_topology:
typical_building = typical_building.reindex(data_tabula.droplevel(0).index).set_index(data_tabula.index)
data_tabula.update(typical_building[area_cols])
data_tabula['A_envelope'] = data_tabula[['A_{}'.format(element) for element in building_elements]].sum(axis=1)
return data_tabula<|docstring|>reads in typical building topologies (e.g. average surface of building elements)
and typical losses trough thermal bridging and air ventilation<|endoftext|>
|
e6a1b6ce72beb621b960c47079b5eb06a66e29e542b0fee7a1bc2cc12dda7b4d
|
def prepare_cost_retro(country_iso_dic):
'\n read and prepare retro costs, annualises them if annualise_cost=True\n '
cost_retro = pd.read_csv(snakemake.input.cost_germany, nrows=4, index_col=0, usecols=[0, 1, 2, 3])
cost_retro.rename((lambda x: x.capitalize()), inplace=True)
window_assumptions = pd.read_csv(snakemake.input.window_assumptions, skiprows=[1], usecols=[0, 1, 2, 3], nrows=2)
if annualise_cost:
cost_retro[['cost_fix', 'cost_var']] = cost_retro[['cost_fix', 'cost_var']].apply((lambda x: ((x * interest_rate) / (1 - ((1 + interest_rate) ** (- cost_retro.loc[(x.index, 'life_time')]))))))
if construction_index:
cost_w = pd.read_csv(snakemake.input.construction_index, skiprows=3, nrows=32, index_col=0)
cost_w = (cost_w['2018'] / cost_w.loc[('Germany', '2018')]).rename(index=country_iso_dic)
else:
cost_w = None
if tax_weighting:
tax_w = pd.read_csv(snakemake.input.tax_w, header=12, nrows=39, index_col=0, usecols=[0, 4])
tax_w.rename(index=country_iso_dic, inplace=True)
tax_w = tax_w.apply(pd.to_numeric, errors='coerce').iloc[(:, 0)]
tax_w.dropna(inplace=True)
else:
tax_w = None
return (cost_retro, window_assumptions, cost_w, tax_w)
|
read and prepare retro costs, annualises them if annualise_cost=True
|
scripts/build_retro_cost.py
|
prepare_cost_retro
|
koen-vg/pypsa-eur-sec
| 42
|
python
|
def prepare_cost_retro(country_iso_dic):
'\n \n '
cost_retro = pd.read_csv(snakemake.input.cost_germany, nrows=4, index_col=0, usecols=[0, 1, 2, 3])
cost_retro.rename((lambda x: x.capitalize()), inplace=True)
window_assumptions = pd.read_csv(snakemake.input.window_assumptions, skiprows=[1], usecols=[0, 1, 2, 3], nrows=2)
if annualise_cost:
cost_retro[['cost_fix', 'cost_var']] = cost_retro[['cost_fix', 'cost_var']].apply((lambda x: ((x * interest_rate) / (1 - ((1 + interest_rate) ** (- cost_retro.loc[(x.index, 'life_time')]))))))
if construction_index:
cost_w = pd.read_csv(snakemake.input.construction_index, skiprows=3, nrows=32, index_col=0)
cost_w = (cost_w['2018'] / cost_w.loc[('Germany', '2018')]).rename(index=country_iso_dic)
else:
cost_w = None
if tax_weighting:
tax_w = pd.read_csv(snakemake.input.tax_w, header=12, nrows=39, index_col=0, usecols=[0, 4])
tax_w.rename(index=country_iso_dic, inplace=True)
tax_w = tax_w.apply(pd.to_numeric, errors='coerce').iloc[(:, 0)]
tax_w.dropna(inplace=True)
else:
tax_w = None
return (cost_retro, window_assumptions, cost_w, tax_w)
|
def prepare_cost_retro(country_iso_dic):
'\n \n '
cost_retro = pd.read_csv(snakemake.input.cost_germany, nrows=4, index_col=0, usecols=[0, 1, 2, 3])
cost_retro.rename((lambda x: x.capitalize()), inplace=True)
window_assumptions = pd.read_csv(snakemake.input.window_assumptions, skiprows=[1], usecols=[0, 1, 2, 3], nrows=2)
if annualise_cost:
cost_retro[['cost_fix', 'cost_var']] = cost_retro[['cost_fix', 'cost_var']].apply((lambda x: ((x * interest_rate) / (1 - ((1 + interest_rate) ** (- cost_retro.loc[(x.index, 'life_time')]))))))
if construction_index:
cost_w = pd.read_csv(snakemake.input.construction_index, skiprows=3, nrows=32, index_col=0)
cost_w = (cost_w['2018'] / cost_w.loc[('Germany', '2018')]).rename(index=country_iso_dic)
else:
cost_w = None
if tax_weighting:
tax_w = pd.read_csv(snakemake.input.tax_w, header=12, nrows=39, index_col=0, usecols=[0, 4])
tax_w.rename(index=country_iso_dic, inplace=True)
tax_w = tax_w.apply(pd.to_numeric, errors='coerce').iloc[(:, 0)]
tax_w.dropna(inplace=True)
else:
tax_w = None
return (cost_retro, window_assumptions, cost_w, tax_w)<|docstring|>read and prepare retro costs, annualises them if annualise_cost=True<|endoftext|>
|
3d610cc84d6efebba59d8d070641d518f71d2d443c980c99964f83ac3b49125b
|
def prepare_temperature_data():
'\n returns the temperature dependent data for each country:\n\n d_heat : length of heating season pd.Series(index=countries) [days/year]\n on those days, daily average temperature is below\n threshold temperature t_threshold\n temperature_factor : accumulated difference between internal and\n external temperature pd.Series(index=countries) ([K]) * [days/year]\n\n temperature_factor = (t_threshold - temperature_average_d_heat) * d_heat * 1/365\n\n '
temperature = xr.open_dataarray(snakemake.input.air_temperature).to_pandas()
d_heat = (temperature.groupby(temperature.columns.str[:2], axis=1).mean().resample('1D').mean() < t_threshold).sum()
temperature_average_d_heat = temperature.groupby(temperature.columns.str[:2], axis=1).mean().apply((lambda x: get_average_temperature_during_heating_season(x, t_threshold=15)))
temperature_factor = ((((t_threshold - temperature_average_d_heat) * d_heat) * 1) / 365)
return (d_heat, temperature_factor)
|
returns the temperature dependent data for each country:
d_heat : length of heating season pd.Series(index=countries) [days/year]
on those days, daily average temperature is below
threshold temperature t_threshold
temperature_factor : accumulated difference between internal and
external temperature pd.Series(index=countries) ([K]) * [days/year]
temperature_factor = (t_threshold - temperature_average_d_heat) * d_heat * 1/365
|
scripts/build_retro_cost.py
|
prepare_temperature_data
|
koen-vg/pypsa-eur-sec
| 42
|
python
|
def prepare_temperature_data():
'\n returns the temperature dependent data for each country:\n\n d_heat : length of heating season pd.Series(index=countries) [days/year]\n on those days, daily average temperature is below\n threshold temperature t_threshold\n temperature_factor : accumulated difference between internal and\n external temperature pd.Series(index=countries) ([K]) * [days/year]\n\n temperature_factor = (t_threshold - temperature_average_d_heat) * d_heat * 1/365\n\n '
temperature = xr.open_dataarray(snakemake.input.air_temperature).to_pandas()
d_heat = (temperature.groupby(temperature.columns.str[:2], axis=1).mean().resample('1D').mean() < t_threshold).sum()
temperature_average_d_heat = temperature.groupby(temperature.columns.str[:2], axis=1).mean().apply((lambda x: get_average_temperature_during_heating_season(x, t_threshold=15)))
temperature_factor = ((((t_threshold - temperature_average_d_heat) * d_heat) * 1) / 365)
return (d_heat, temperature_factor)
|
def prepare_temperature_data():
'\n returns the temperature dependent data for each country:\n\n d_heat : length of heating season pd.Series(index=countries) [days/year]\n on those days, daily average temperature is below\n threshold temperature t_threshold\n temperature_factor : accumulated difference between internal and\n external temperature pd.Series(index=countries) ([K]) * [days/year]\n\n temperature_factor = (t_threshold - temperature_average_d_heat) * d_heat * 1/365\n\n '
temperature = xr.open_dataarray(snakemake.input.air_temperature).to_pandas()
d_heat = (temperature.groupby(temperature.columns.str[:2], axis=1).mean().resample('1D').mean() < t_threshold).sum()
temperature_average_d_heat = temperature.groupby(temperature.columns.str[:2], axis=1).mean().apply((lambda x: get_average_temperature_during_heating_season(x, t_threshold=15)))
temperature_factor = ((((t_threshold - temperature_average_d_heat) * d_heat) * 1) / 365)
return (d_heat, temperature_factor)<|docstring|>returns the temperature dependent data for each country:
d_heat : length of heating season pd.Series(index=countries) [days/year]
on those days, daily average temperature is below
threshold temperature t_threshold
temperature_factor : accumulated difference between internal and
external temperature pd.Series(index=countries) ([K]) * [days/year]
temperature_factor = (t_threshold - temperature_average_d_heat) * d_heat * 1/365<|endoftext|>
|
268131e0917f065a109db081a2631f27e106f3a17ee8027e7e2e535b8d0b5cb1
|
def window_limit(l, window_assumptions):
'\n define limit u value from which on window is retrofitted\n '
m = (window_assumptions.diff()['u_limit'] / window_assumptions.diff()['strength']).dropna().iloc[0]
a = (window_assumptions['u_limit'][0] - (m * window_assumptions['strength'][0]))
return ((m * l) + a)
|
define limit u value from which on window is retrofitted
|
scripts/build_retro_cost.py
|
window_limit
|
koen-vg/pypsa-eur-sec
| 42
|
python
|
def window_limit(l, window_assumptions):
'\n \n '
m = (window_assumptions.diff()['u_limit'] / window_assumptions.diff()['strength']).dropna().iloc[0]
a = (window_assumptions['u_limit'][0] - (m * window_assumptions['strength'][0]))
return ((m * l) + a)
|
def window_limit(l, window_assumptions):
'\n \n '
m = (window_assumptions.diff()['u_limit'] / window_assumptions.diff()['strength']).dropna().iloc[0]
a = (window_assumptions['u_limit'][0] - (m * window_assumptions['strength'][0]))
return ((m * l) + a)<|docstring|>define limit u value from which on window is retrofitted<|endoftext|>
|
eabcf6c49928911bcc4db10b3b047f63f603c62105bf06144778e3aed7a7d582
|
def u_retro_window(l, window_assumptions):
'\n define retrofitting value depending on renovation strength\n '
m = (window_assumptions.diff()['u_value'] / window_assumptions.diff()['strength']).dropna().iloc[0]
a = (window_assumptions['u_value'][0] - (m * window_assumptions['strength'][0]))
return max(((m * l) + a), 0.8)
|
define retrofitting value depending on renovation strength
|
scripts/build_retro_cost.py
|
u_retro_window
|
koen-vg/pypsa-eur-sec
| 42
|
python
|
def u_retro_window(l, window_assumptions):
'\n \n '
m = (window_assumptions.diff()['u_value'] / window_assumptions.diff()['strength']).dropna().iloc[0]
a = (window_assumptions['u_value'][0] - (m * window_assumptions['strength'][0]))
return max(((m * l) + a), 0.8)
|
def u_retro_window(l, window_assumptions):
'\n \n '
m = (window_assumptions.diff()['u_value'] / window_assumptions.diff()['strength']).dropna().iloc[0]
a = (window_assumptions['u_value'][0] - (m * window_assumptions['strength'][0]))
return max(((m * l) + a), 0.8)<|docstring|>define retrofitting value depending on renovation strength<|endoftext|>
|
203ed3086d31ceac187f783e89ded7515c660ad2dbb1bda65afbef1ed926fc80
|
def window_cost(u, cost_retro, window_assumptions):
'\n get costs for new windows depending on u value\n\n '
m = (window_assumptions.diff()['cost'] / window_assumptions.diff()['u_value']).dropna().iloc[0]
a = (window_assumptions['cost'][0] - (m * window_assumptions['u_value'][0]))
window_cost = ((m * u) + a)
if annualise_cost:
window_cost = ((window_cost * interest_rate) / (1 - ((1 + interest_rate) ** (- cost_retro.loc[('Window', 'life_time')]))))
return window_cost
|
get costs for new windows depending on u value
|
scripts/build_retro_cost.py
|
window_cost
|
koen-vg/pypsa-eur-sec
| 42
|
python
|
def window_cost(u, cost_retro, window_assumptions):
'\n \n\n '
m = (window_assumptions.diff()['cost'] / window_assumptions.diff()['u_value']).dropna().iloc[0]
a = (window_assumptions['cost'][0] - (m * window_assumptions['u_value'][0]))
window_cost = ((m * u) + a)
if annualise_cost:
window_cost = ((window_cost * interest_rate) / (1 - ((1 + interest_rate) ** (- cost_retro.loc[('Window', 'life_time')]))))
return window_cost
|
def window_cost(u, cost_retro, window_assumptions):
'\n \n\n '
m = (window_assumptions.diff()['cost'] / window_assumptions.diff()['u_value']).dropna().iloc[0]
a = (window_assumptions['cost'][0] - (m * window_assumptions['u_value'][0]))
window_cost = ((m * u) + a)
if annualise_cost:
window_cost = ((window_cost * interest_rate) / (1 - ((1 + interest_rate) ** (- cost_retro.loc[('Window', 'life_time')]))))
return window_cost<|docstring|>get costs for new windows depending on u value<|endoftext|>
|
fad3dba55e67196288694dc39a8d6d875f93849eff79f8cafa19bc2a0b7543b2
|
def calculate_costs(u_values, l, cost_retro, window_assumptions):
'\n returns costs for a given retrofitting strength weighted by the average\n surface/volume ratio of the component for each building type\n '
return u_values.apply((lambda x: (((((((cost_retro.loc[(x.name[3], 'cost_var')] * 100) * float(l)) * l_weight.loc[x.name[3]][0]) + cost_retro.loc[(x.name[3], 'cost_fix')]) * x.A_element) / x.A_C_Ref) if (x.name[3] != 'Window') else (((window_cost(x['new_U_{}'.format(l)], cost_retro, window_assumptions) * x.A_element) / x.A_C_Ref) if (x.value > window_limit(float(l), window_assumptions)) else 0))), axis=1)
|
returns costs for a given retrofitting strength weighted by the average
surface/volume ratio of the component for each building type
|
scripts/build_retro_cost.py
|
calculate_costs
|
koen-vg/pypsa-eur-sec
| 42
|
python
|
def calculate_costs(u_values, l, cost_retro, window_assumptions):
'\n returns costs for a given retrofitting strength weighted by the average\n surface/volume ratio of the component for each building type\n '
return u_values.apply((lambda x: (((((((cost_retro.loc[(x.name[3], 'cost_var')] * 100) * float(l)) * l_weight.loc[x.name[3]][0]) + cost_retro.loc[(x.name[3], 'cost_fix')]) * x.A_element) / x.A_C_Ref) if (x.name[3] != 'Window') else (((window_cost(x['new_U_{}'.format(l)], cost_retro, window_assumptions) * x.A_element) / x.A_C_Ref) if (x.value > window_limit(float(l), window_assumptions)) else 0))), axis=1)
|
def calculate_costs(u_values, l, cost_retro, window_assumptions):
'\n returns costs for a given retrofitting strength weighted by the average\n surface/volume ratio of the component for each building type\n '
return u_values.apply((lambda x: (((((((cost_retro.loc[(x.name[3], 'cost_var')] * 100) * float(l)) * l_weight.loc[x.name[3]][0]) + cost_retro.loc[(x.name[3], 'cost_fix')]) * x.A_element) / x.A_C_Ref) if (x.name[3] != 'Window') else (((window_cost(x['new_U_{}'.format(l)], cost_retro, window_assumptions) * x.A_element) / x.A_C_Ref) if (x.value > window_limit(float(l), window_assumptions)) else 0))), axis=1)<|docstring|>returns costs for a given retrofitting strength weighted by the average
surface/volume ratio of the component for each building type<|endoftext|>
|
466859d5c1e30bf77faddeaa3faa214757a7176c9bec1f20b0fe1c8aafb00dee
|
def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035):
'\n calculate U-values after building retrofitting, depending on the old\n U-values (u_values). This is for simple insulation measuers, adding\n an additional layer of insulation.\n\n They depend for the components Roof, Wall, Floor on the additional\n insulation thickness (l), and the weighting for the corresponding\n component (l_weight).\n\n Windows are renovated to new ones with U-value (function: u_retro_window(l))\n only if the are worse insulated than a certain limit value\n (function: window_limit).\n\n Parameters\n ----------\n u_values: pd.DataFrame\n l: string\n l_weight: pd.DataFrame (component, weight)\n k: thermal conductivity\n\n '
return u_values.apply((lambda x: ((k / ((k / x.value) + (float(l) * l_weight.loc[x.name[3]]))) if (x.name[3] != 'Window') else (min(x.value, u_retro_window(float(l), window_assumptions)) if (x.value > window_limit(float(l), window_assumptions)) else x.value))), axis=1)
|
calculate U-values after building retrofitting, depending on the old
U-values (u_values). This is for simple insulation measuers, adding
an additional layer of insulation.
They depend for the components Roof, Wall, Floor on the additional
insulation thickness (l), and the weighting for the corresponding
component (l_weight).
Windows are renovated to new ones with U-value (function: u_retro_window(l))
only if the are worse insulated than a certain limit value
(function: window_limit).
Parameters
----------
u_values: pd.DataFrame
l: string
l_weight: pd.DataFrame (component, weight)
k: thermal conductivity
|
scripts/build_retro_cost.py
|
calculate_new_u
|
koen-vg/pypsa-eur-sec
| 42
|
python
|
def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035):
'\n calculate U-values after building retrofitting, depending on the old\n U-values (u_values). This is for simple insulation measuers, adding\n an additional layer of insulation.\n\n They depend for the components Roof, Wall, Floor on the additional\n insulation thickness (l), and the weighting for the corresponding\n component (l_weight).\n\n Windows are renovated to new ones with U-value (function: u_retro_window(l))\n only if the are worse insulated than a certain limit value\n (function: window_limit).\n\n Parameters\n ----------\n u_values: pd.DataFrame\n l: string\n l_weight: pd.DataFrame (component, weight)\n k: thermal conductivity\n\n '
return u_values.apply((lambda x: ((k / ((k / x.value) + (float(l) * l_weight.loc[x.name[3]]))) if (x.name[3] != 'Window') else (min(x.value, u_retro_window(float(l), window_assumptions)) if (x.value > window_limit(float(l), window_assumptions)) else x.value))), axis=1)
|
def calculate_new_u(u_values, l, l_weight, window_assumptions, k=0.035):
'\n calculate U-values after building retrofitting, depending on the old\n U-values (u_values). This is for simple insulation measuers, adding\n an additional layer of insulation.\n\n They depend for the components Roof, Wall, Floor on the additional\n insulation thickness (l), and the weighting for the corresponding\n component (l_weight).\n\n Windows are renovated to new ones with U-value (function: u_retro_window(l))\n only if the are worse insulated than a certain limit value\n (function: window_limit).\n\n Parameters\n ----------\n u_values: pd.DataFrame\n l: string\n l_weight: pd.DataFrame (component, weight)\n k: thermal conductivity\n\n '
return u_values.apply((lambda x: ((k / ((k / x.value) + (float(l) * l_weight.loc[x.name[3]]))) if (x.name[3] != 'Window') else (min(x.value, u_retro_window(float(l), window_assumptions)) if (x.value > window_limit(float(l), window_assumptions)) else x.value))), axis=1)<|docstring|>calculate U-values after building retrofitting, depending on the old
U-values (u_values). This is for simple insulation measuers, adding
an additional layer of insulation.
They depend for the components Roof, Wall, Floor on the additional
insulation thickness (l), and the weighting for the corresponding
component (l_weight).
Windows are renovated to new ones with U-value (function: u_retro_window(l))
only if the are worse insulated than a certain limit value
(function: window_limit).
Parameters
----------
u_values: pd.DataFrame
l: string
l_weight: pd.DataFrame (component, weight)
k: thermal conductivity<|endoftext|>
|
064169b8b8cdc7a4fd6fc555b10ac8e045ef025bd952f52f0bfbb2e69b050e01
|
def map_tabula_to_hotmaps(df_tabula, df_hotmaps, column_prefix):
'\n maps tabula data to hotmaps data with wished column name prefix\n\n Parameters\n ----------\n df_tabula : pd.Series\n tabula data with pd.MultiIndex\n df_hotmaps : pd.DataFrame\n dataframe with hotmaps pd.MultiIndex\n column_prefix : string\n column prefix to rename column names of df_tabula\n\n Returns\n -------\n pd.DataFrame (index=df_hotmaps.index)\n returns df_tabula with hotmaps index\n\n '
values = df_tabula.unstack().reindex(df_hotmaps.rename(index=(lambda x: ('MFH' if (x not in rename_sectors.values()) else x)), level=1).index)
values.columns = pd.MultiIndex.from_product([[column_prefix], values.columns])
values.index = df_hotmaps.index
return values
|
maps tabula data to hotmaps data with wished column name prefix
Parameters
----------
df_tabula : pd.Series
tabula data with pd.MultiIndex
df_hotmaps : pd.DataFrame
dataframe with hotmaps pd.MultiIndex
column_prefix : string
column prefix to rename column names of df_tabula
Returns
-------
pd.DataFrame (index=df_hotmaps.index)
returns df_tabula with hotmaps index
|
scripts/build_retro_cost.py
|
map_tabula_to_hotmaps
|
koen-vg/pypsa-eur-sec
| 42
|
python
|
def map_tabula_to_hotmaps(df_tabula, df_hotmaps, column_prefix):
'\n maps tabula data to hotmaps data with wished column name prefix\n\n Parameters\n ----------\n df_tabula : pd.Series\n tabula data with pd.MultiIndex\n df_hotmaps : pd.DataFrame\n dataframe with hotmaps pd.MultiIndex\n column_prefix : string\n column prefix to rename column names of df_tabula\n\n Returns\n -------\n pd.DataFrame (index=df_hotmaps.index)\n returns df_tabula with hotmaps index\n\n '
values = df_tabula.unstack().reindex(df_hotmaps.rename(index=(lambda x: ('MFH' if (x not in rename_sectors.values()) else x)), level=1).index)
values.columns = pd.MultiIndex.from_product([[column_prefix], values.columns])
values.index = df_hotmaps.index
return values
|
def map_tabula_to_hotmaps(df_tabula, df_hotmaps, column_prefix):
'\n maps tabula data to hotmaps data with wished column name prefix\n\n Parameters\n ----------\n df_tabula : pd.Series\n tabula data with pd.MultiIndex\n df_hotmaps : pd.DataFrame\n dataframe with hotmaps pd.MultiIndex\n column_prefix : string\n column prefix to rename column names of df_tabula\n\n Returns\n -------\n pd.DataFrame (index=df_hotmaps.index)\n returns df_tabula with hotmaps index\n\n '
values = df_tabula.unstack().reindex(df_hotmaps.rename(index=(lambda x: ('MFH' if (x not in rename_sectors.values()) else x)), level=1).index)
values.columns = pd.MultiIndex.from_product([[column_prefix], values.columns])
values.index = df_hotmaps.index
return values<|docstring|>maps tabula data to hotmaps data with wished column name prefix
Parameters
----------
df_tabula : pd.Series
tabula data with pd.MultiIndex
df_hotmaps : pd.DataFrame
dataframe with hotmaps pd.MultiIndex
column_prefix : string
column prefix to rename column names of df_tabula
Returns
-------
pd.DataFrame (index=df_hotmaps.index)
returns df_tabula with hotmaps index<|endoftext|>
|
e7d13760eb01b2ca15d91ab4289761fd32f2adeab6399e8d8b6e6965b62b190a
|
def get_solar_gains_per_year(window_area):
'\n returns solar heat gains during heating season in [kWh/a] depending on\n the window area [m^2] of the building, assuming a equal distributed window\n orientation (east, south, north, west)\n '
return sum((((((external_shading * frame_area_fraction) * non_perpendicular) * 0.25) * window_area) * solar_global_radiation))
|
returns solar heat gains during heating season in [kWh/a] depending on
the window area [m^2] of the building, assuming a equal distributed window
orientation (east, south, north, west)
|
scripts/build_retro_cost.py
|
get_solar_gains_per_year
|
koen-vg/pypsa-eur-sec
| 42
|
python
|
def get_solar_gains_per_year(window_area):
'\n returns solar heat gains during heating season in [kWh/a] depending on\n the window area [m^2] of the building, assuming a equal distributed window\n orientation (east, south, north, west)\n '
return sum((((((external_shading * frame_area_fraction) * non_perpendicular) * 0.25) * window_area) * solar_global_radiation))
|
def get_solar_gains_per_year(window_area):
'\n returns solar heat gains during heating season in [kWh/a] depending on\n the window area [m^2] of the building, assuming a equal distributed window\n orientation (east, south, north, west)\n '
return sum((((((external_shading * frame_area_fraction) * non_perpendicular) * 0.25) * window_area) * solar_global_radiation))<|docstring|>returns solar heat gains during heating season in [kWh/a] depending on
the window area [m^2] of the building, assuming a equal distributed window
orientation (east, south, north, west)<|endoftext|>
|
9ac2753509a0252f88bac594e9d367328dd194bba78b0ac3d87428c651080899
|
def map_to_lstrength(l_strength, df):
'\n renames column names from a pandas dataframe to map tabula retrofitting\n strengths [2 = moderate, 3 = ambitious] to l_strength\n '
middle = (len(l_strength) // 2)
map_to_l = pd.MultiIndex.from_arrays([((middle * [2]) + (len(l_strength[middle:]) * [3])), l_strength])
l_strength_df = df.stack((- 2)).reindex(map_to_l, axis=1, level=0).droplevel(0, axis=1).unstack().swaplevel(axis=1).dropna(axis=1)
return pd.concat([df.drop([2, 3], axis=1, level=1), l_strength_df], axis=1)
|
renames column names from a pandas dataframe to map tabula retrofitting
strengths [2 = moderate, 3 = ambitious] to l_strength
|
scripts/build_retro_cost.py
|
map_to_lstrength
|
koen-vg/pypsa-eur-sec
| 42
|
python
|
def map_to_lstrength(l_strength, df):
'\n renames column names from a pandas dataframe to map tabula retrofitting\n strengths [2 = moderate, 3 = ambitious] to l_strength\n '
middle = (len(l_strength) // 2)
map_to_l = pd.MultiIndex.from_arrays([((middle * [2]) + (len(l_strength[middle:]) * [3])), l_strength])
l_strength_df = df.stack((- 2)).reindex(map_to_l, axis=1, level=0).droplevel(0, axis=1).unstack().swaplevel(axis=1).dropna(axis=1)
return pd.concat([df.drop([2, 3], axis=1, level=1), l_strength_df], axis=1)
|
def map_to_lstrength(l_strength, df):
'\n renames column names from a pandas dataframe to map tabula retrofitting\n strengths [2 = moderate, 3 = ambitious] to l_strength\n '
middle = (len(l_strength) // 2)
map_to_l = pd.MultiIndex.from_arrays([((middle * [2]) + (len(l_strength[middle:]) * [3])), l_strength])
l_strength_df = df.stack((- 2)).reindex(map_to_l, axis=1, level=0).droplevel(0, axis=1).unstack().swaplevel(axis=1).dropna(axis=1)
return pd.concat([df.drop([2, 3], axis=1, level=1), l_strength_df], axis=1)<|docstring|>renames column names from a pandas dataframe to map tabula retrofitting
strengths [2 = moderate, 3 = ambitious] to l_strength<|endoftext|>
|
f1ddd49466276235b685ee8c774ab6a75e933118ad21171145e53a2490799115
|
def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor):
"\n calculates total annual heat losses Q_ht for different insulation thiknesses\n (l_strength), depening on current insulation state (u_values), standard\n building topologies and air ventilation from TABULA (data_tabula) and\n the accumulated difference between internal and external temperature\n during the heating season (temperature_factor).\n\n Total annual heat losses Q_ht constitute from losses by:\n (1) transmission (H_tr_e)\n (2) thermal bridges (H_tb)\n (3) ventilation (H_ve)\n weighted by a factor (F_red_temp) which is taken account for non-uniform heating\n and the temperature factor of the heating season\n\n Q_ht [W/m^2] = (H_tr_e + H_tb + H_ve) [W/m^2K] * F_red_temp * temperature_factor [K]\n\n returns Q_ht as pd.DataFrame(index=['country_code', 'subsector', 'bage'],\n columns=[current (1.) + retrofitted (l_strength)])\n\n "
for l in l_strength:
u_values['new_U_{}'.format(l)] = calculate_new_u(u_values, l, l_weight, window_assumptions)
area_element = data_tabula[['A_{}'.format(e) for e in u_values.index.levels[3]]].rename(columns=(lambda x: x[2:])).stack().unstack((- 2)).stack()
u_values['A_element'] = map_tabula_to_hotmaps(area_element, u_values, 'A_element').xs(1, level=1, axis=1)
columns = (['value'] + ['new_U_{}'.format(l) for l in l_strength])
heat_transfer = pd.concat([u_values[columns].mul(u_values.A_element, axis=0), u_values.A_element], axis=1)
heat_transfer.index = u_values.index
heat_transfer = heat_transfer.groupby(level=[0, 1, 2]).sum()
heat_transfer.rename(columns={'A_element': 'A_envelope'}, inplace=True)
heat_transfer['A_C_Ref'] = map_tabula_to_hotmaps(data_tabula.A_C_Ref, heat_transfer, 'A_C_Ref').xs(1.0, level=1, axis=1)
u_values['A_C_Ref'] = map_tabula_to_hotmaps(data_tabula.A_C_Ref, u_values, 'A_C_Ref').xs(1.0, level=1, axis=1)
heat_transfer_perm2 = heat_transfer[columns].div(heat_transfer.A_C_Ref, axis=0)
heat_transfer_perm2.columns = pd.MultiIndex.from_product([['H_tr_e'], ([1.0] + l_strength)])
H_tb_tabula = ((data_tabula.delta_U_ThermalBridging * data_tabula.A_envelope) / data_tabula.A_C_Ref)
heat_transfer_perm2 = pd.concat([heat_transfer_perm2, map_tabula_to_hotmaps(H_tb_tabula, heat_transfer_perm2, 'H_tb')], axis=1)
H_ve_tabula = (((data_tabula.n_air_infiltration + data_tabula.n_air_use) * c_p_air) * h_room)
heat_transfer_perm2 = pd.concat([heat_transfer_perm2, map_tabula_to_hotmaps(H_ve_tabula, heat_transfer_perm2, 'H_ve')], axis=1)
F_red_temp = map_tabula_to_hotmaps(data_tabula.F_red_temp, heat_transfer_perm2, 'F_red_temp')
heat_transfer_perm2 = map_to_lstrength(l_strength, heat_transfer_perm2)
F_red_temp = map_to_lstrength(l_strength, F_red_temp)
Q_ht = heat_transfer_perm2.groupby(level=1, axis=1).sum().mul(F_red_temp.droplevel(0, axis=1)).mul(temperature_factor.reindex(heat_transfer_perm2.index, level=0), axis=0)
return (Q_ht, heat_transfer_perm2)
|
calculates total annual heat losses Q_ht for different insulation thiknesses
(l_strength), depening on current insulation state (u_values), standard
building topologies and air ventilation from TABULA (data_tabula) and
the accumulated difference between internal and external temperature
during the heating season (temperature_factor).
Total annual heat losses Q_ht constitute from losses by:
(1) transmission (H_tr_e)
(2) thermal bridges (H_tb)
(3) ventilation (H_ve)
weighted by a factor (F_red_temp) which is taken account for non-uniform heating
and the temperature factor of the heating season
Q_ht [W/m^2] = (H_tr_e + H_tb + H_ve) [W/m^2K] * F_red_temp * temperature_factor [K]
returns Q_ht as pd.DataFrame(index=['country_code', 'subsector', 'bage'],
columns=[current (1.) + retrofitted (l_strength)])
|
scripts/build_retro_cost.py
|
calculate_heat_losses
|
koen-vg/pypsa-eur-sec
| 42
|
python
|
def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor):
"\n calculates total annual heat losses Q_ht for different insulation thiknesses\n (l_strength), depening on current insulation state (u_values), standard\n building topologies and air ventilation from TABULA (data_tabula) and\n the accumulated difference between internal and external temperature\n during the heating season (temperature_factor).\n\n Total annual heat losses Q_ht constitute from losses by:\n (1) transmission (H_tr_e)\n (2) thermal bridges (H_tb)\n (3) ventilation (H_ve)\n weighted by a factor (F_red_temp) which is taken account for non-uniform heating\n and the temperature factor of the heating season\n\n Q_ht [W/m^2] = (H_tr_e + H_tb + H_ve) [W/m^2K] * F_red_temp * temperature_factor [K]\n\n returns Q_ht as pd.DataFrame(index=['country_code', 'subsector', 'bage'],\n columns=[current (1.) + retrofitted (l_strength)])\n\n "
for l in l_strength:
u_values['new_U_{}'.format(l)] = calculate_new_u(u_values, l, l_weight, window_assumptions)
area_element = data_tabula[['A_{}'.format(e) for e in u_values.index.levels[3]]].rename(columns=(lambda x: x[2:])).stack().unstack((- 2)).stack()
u_values['A_element'] = map_tabula_to_hotmaps(area_element, u_values, 'A_element').xs(1, level=1, axis=1)
columns = (['value'] + ['new_U_{}'.format(l) for l in l_strength])
heat_transfer = pd.concat([u_values[columns].mul(u_values.A_element, axis=0), u_values.A_element], axis=1)
heat_transfer.index = u_values.index
heat_transfer = heat_transfer.groupby(level=[0, 1, 2]).sum()
heat_transfer.rename(columns={'A_element': 'A_envelope'}, inplace=True)
heat_transfer['A_C_Ref'] = map_tabula_to_hotmaps(data_tabula.A_C_Ref, heat_transfer, 'A_C_Ref').xs(1.0, level=1, axis=1)
u_values['A_C_Ref'] = map_tabula_to_hotmaps(data_tabula.A_C_Ref, u_values, 'A_C_Ref').xs(1.0, level=1, axis=1)
heat_transfer_perm2 = heat_transfer[columns].div(heat_transfer.A_C_Ref, axis=0)
heat_transfer_perm2.columns = pd.MultiIndex.from_product([['H_tr_e'], ([1.0] + l_strength)])
H_tb_tabula = ((data_tabula.delta_U_ThermalBridging * data_tabula.A_envelope) / data_tabula.A_C_Ref)
heat_transfer_perm2 = pd.concat([heat_transfer_perm2, map_tabula_to_hotmaps(H_tb_tabula, heat_transfer_perm2, 'H_tb')], axis=1)
H_ve_tabula = (((data_tabula.n_air_infiltration + data_tabula.n_air_use) * c_p_air) * h_room)
heat_transfer_perm2 = pd.concat([heat_transfer_perm2, map_tabula_to_hotmaps(H_ve_tabula, heat_transfer_perm2, 'H_ve')], axis=1)
F_red_temp = map_tabula_to_hotmaps(data_tabula.F_red_temp, heat_transfer_perm2, 'F_red_temp')
heat_transfer_perm2 = map_to_lstrength(l_strength, heat_transfer_perm2)
F_red_temp = map_to_lstrength(l_strength, F_red_temp)
Q_ht = heat_transfer_perm2.groupby(level=1, axis=1).sum().mul(F_red_temp.droplevel(0, axis=1)).mul(temperature_factor.reindex(heat_transfer_perm2.index, level=0), axis=0)
return (Q_ht, heat_transfer_perm2)
|
def calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor):
"\n calculates total annual heat losses Q_ht for different insulation thiknesses\n (l_strength), depening on current insulation state (u_values), standard\n building topologies and air ventilation from TABULA (data_tabula) and\n the accumulated difference between internal and external temperature\n during the heating season (temperature_factor).\n\n Total annual heat losses Q_ht constitute from losses by:\n (1) transmission (H_tr_e)\n (2) thermal bridges (H_tb)\n (3) ventilation (H_ve)\n weighted by a factor (F_red_temp) which is taken account for non-uniform heating\n and the temperature factor of the heating season\n\n Q_ht [W/m^2] = (H_tr_e + H_tb + H_ve) [W/m^2K] * F_red_temp * temperature_factor [K]\n\n returns Q_ht as pd.DataFrame(index=['country_code', 'subsector', 'bage'],\n columns=[current (1.) + retrofitted (l_strength)])\n\n "
for l in l_strength:
u_values['new_U_{}'.format(l)] = calculate_new_u(u_values, l, l_weight, window_assumptions)
area_element = data_tabula[['A_{}'.format(e) for e in u_values.index.levels[3]]].rename(columns=(lambda x: x[2:])).stack().unstack((- 2)).stack()
u_values['A_element'] = map_tabula_to_hotmaps(area_element, u_values, 'A_element').xs(1, level=1, axis=1)
columns = (['value'] + ['new_U_{}'.format(l) for l in l_strength])
heat_transfer = pd.concat([u_values[columns].mul(u_values.A_element, axis=0), u_values.A_element], axis=1)
heat_transfer.index = u_values.index
heat_transfer = heat_transfer.groupby(level=[0, 1, 2]).sum()
heat_transfer.rename(columns={'A_element': 'A_envelope'}, inplace=True)
heat_transfer['A_C_Ref'] = map_tabula_to_hotmaps(data_tabula.A_C_Ref, heat_transfer, 'A_C_Ref').xs(1.0, level=1, axis=1)
u_values['A_C_Ref'] = map_tabula_to_hotmaps(data_tabula.A_C_Ref, u_values, 'A_C_Ref').xs(1.0, level=1, axis=1)
heat_transfer_perm2 = heat_transfer[columns].div(heat_transfer.A_C_Ref, axis=0)
heat_transfer_perm2.columns = pd.MultiIndex.from_product([['H_tr_e'], ([1.0] + l_strength)])
H_tb_tabula = ((data_tabula.delta_U_ThermalBridging * data_tabula.A_envelope) / data_tabula.A_C_Ref)
heat_transfer_perm2 = pd.concat([heat_transfer_perm2, map_tabula_to_hotmaps(H_tb_tabula, heat_transfer_perm2, 'H_tb')], axis=1)
H_ve_tabula = (((data_tabula.n_air_infiltration + data_tabula.n_air_use) * c_p_air) * h_room)
heat_transfer_perm2 = pd.concat([heat_transfer_perm2, map_tabula_to_hotmaps(H_ve_tabula, heat_transfer_perm2, 'H_ve')], axis=1)
F_red_temp = map_tabula_to_hotmaps(data_tabula.F_red_temp, heat_transfer_perm2, 'F_red_temp')
heat_transfer_perm2 = map_to_lstrength(l_strength, heat_transfer_perm2)
F_red_temp = map_to_lstrength(l_strength, F_red_temp)
Q_ht = heat_transfer_perm2.groupby(level=1, axis=1).sum().mul(F_red_temp.droplevel(0, axis=1)).mul(temperature_factor.reindex(heat_transfer_perm2.index, level=0), axis=0)
return (Q_ht, heat_transfer_perm2)<|docstring|>calculates total annual heat losses Q_ht for different insulation thiknesses
(l_strength), depening on current insulation state (u_values), standard
building topologies and air ventilation from TABULA (data_tabula) and
the accumulated difference between internal and external temperature
during the heating season (temperature_factor).
Total annual heat losses Q_ht constitute from losses by:
(1) transmission (H_tr_e)
(2) thermal bridges (H_tb)
(3) ventilation (H_ve)
weighted by a factor (F_red_temp) which is taken account for non-uniform heating
and the temperature factor of the heating season
Q_ht [W/m^2] = (H_tr_e + H_tb + H_ve) [W/m^2K] * F_red_temp * temperature_factor [K]
returns Q_ht as pd.DataFrame(index=['country_code', 'subsector', 'bage'],
columns=[current (1.) + retrofitted (l_strength)])<|endoftext|>
|
7b35e271955b4be38c730bfc5fbe61c4fe7bfaf49115450f96287e31c0ad17b7
|
def calculate_heat_gains(data_tabula, heat_transfer_perm2, d_heat):
'\n calculates heat gains Q_gain [W/m^2], which consititure from gains by:\n (1) solar radiation\n (2) internal heat gains\n\n '
H_solar = (((data_tabula.A_Window.apply((lambda x: get_solar_gains_per_year(x))) / data_tabula.A_C_Ref) * 1000.0) / 8760)
Q_gain = map_tabula_to_hotmaps(H_solar, heat_transfer_perm2, 'H_solar').xs(1.0, level=1, axis=1)
Q_gain['H_int'] = (((phi_int * d_heat) * 1) / 365).reindex(index=heat_transfer_perm2.index, level=0)
return Q_gain
|
calculates heat gains Q_gain [W/m^2], which consititure from gains by:
(1) solar radiation
(2) internal heat gains
|
scripts/build_retro_cost.py
|
calculate_heat_gains
|
koen-vg/pypsa-eur-sec
| 42
|
python
|
def calculate_heat_gains(data_tabula, heat_transfer_perm2, d_heat):
'\n calculates heat gains Q_gain [W/m^2], which consititure from gains by:\n (1) solar radiation\n (2) internal heat gains\n\n '
H_solar = (((data_tabula.A_Window.apply((lambda x: get_solar_gains_per_year(x))) / data_tabula.A_C_Ref) * 1000.0) / 8760)
Q_gain = map_tabula_to_hotmaps(H_solar, heat_transfer_perm2, 'H_solar').xs(1.0, level=1, axis=1)
Q_gain['H_int'] = (((phi_int * d_heat) * 1) / 365).reindex(index=heat_transfer_perm2.index, level=0)
return Q_gain
|
def calculate_heat_gains(data_tabula, heat_transfer_perm2, d_heat):
'\n calculates heat gains Q_gain [W/m^2], which consititure from gains by:\n (1) solar radiation\n (2) internal heat gains\n\n '
H_solar = (((data_tabula.A_Window.apply((lambda x: get_solar_gains_per_year(x))) / data_tabula.A_C_Ref) * 1000.0) / 8760)
Q_gain = map_tabula_to_hotmaps(H_solar, heat_transfer_perm2, 'H_solar').xs(1.0, level=1, axis=1)
Q_gain['H_int'] = (((phi_int * d_heat) * 1) / 365).reindex(index=heat_transfer_perm2.index, level=0)
return Q_gain<|docstring|>calculates heat gains Q_gain [W/m^2], which consititure from gains by:
(1) solar radiation
(2) internal heat gains<|endoftext|>
|
aaaaf7748c5eb547f9074e267c6ae0f2a4f662c0658f18f7e5a2fca35e660b6d
|
def calculate_gain_utilisation_factor(heat_transfer_perm2, Q_ht, Q_gain):
'\n calculates gain utilisation factor nu\n '
tau = (c_m / heat_transfer_perm2.groupby(level=1, axis=1).sum())
alpha = (alpha_H_0 + (tau / tau_H_0))
gamma = (1 / Q_ht).mul(Q_gain.sum(axis=1), axis=0)
nu = ((1 - (gamma ** alpha)) / (1 - (gamma ** (alpha + 1))))
return nu
|
calculates gain utilisation factor nu
|
scripts/build_retro_cost.py
|
calculate_gain_utilisation_factor
|
koen-vg/pypsa-eur-sec
| 42
|
python
|
def calculate_gain_utilisation_factor(heat_transfer_perm2, Q_ht, Q_gain):
'\n \n '
tau = (c_m / heat_transfer_perm2.groupby(level=1, axis=1).sum())
alpha = (alpha_H_0 + (tau / tau_H_0))
gamma = (1 / Q_ht).mul(Q_gain.sum(axis=1), axis=0)
nu = ((1 - (gamma ** alpha)) / (1 - (gamma ** (alpha + 1))))
return nu
|
def calculate_gain_utilisation_factor(heat_transfer_perm2, Q_ht, Q_gain):
'\n \n '
tau = (c_m / heat_transfer_perm2.groupby(level=1, axis=1).sum())
alpha = (alpha_H_0 + (tau / tau_H_0))
gamma = (1 / Q_ht).mul(Q_gain.sum(axis=1), axis=0)
nu = ((1 - (gamma ** alpha)) / (1 - (gamma ** (alpha + 1))))
return nu<|docstring|>calculates gain utilisation factor nu<|endoftext|>
|
a6bef8559e01277e2944f3bdd84a22f7d6e4b2ee37c77b7d2853c55e4a76aa39
|
def calculate_space_heat_savings(u_values, data_tabula, l_strength, temperature_factor, d_heat):
'\n calculates space heat savings (dE_space [per unit of unrefurbished state])\n through retrofitting of the thermal envelope by additional insulation\n material (l_strength[m])\n '
(Q_ht, heat_transfer_perm2) = calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor)
Q_gain = calculate_heat_gains(data_tabula, heat_transfer_perm2, d_heat)
nu = calculate_gain_utilisation_factor(heat_transfer_perm2, Q_ht, Q_gain)
E_space = (Q_ht - nu.mul(Q_gain.sum(axis=1), axis=0))
dE_space = E_space.div(E_space[1.0], axis=0).iloc[(:, 1:)]
dE_space.columns = pd.MultiIndex.from_product([['dE'], l_strength])
return dE_space
|
calculates space heat savings (dE_space [per unit of unrefurbished state])
through retrofitting of the thermal envelope by additional insulation
material (l_strength[m])
|
scripts/build_retro_cost.py
|
calculate_space_heat_savings
|
koen-vg/pypsa-eur-sec
| 42
|
python
|
def calculate_space_heat_savings(u_values, data_tabula, l_strength, temperature_factor, d_heat):
'\n calculates space heat savings (dE_space [per unit of unrefurbished state])\n through retrofitting of the thermal envelope by additional insulation\n material (l_strength[m])\n '
(Q_ht, heat_transfer_perm2) = calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor)
Q_gain = calculate_heat_gains(data_tabula, heat_transfer_perm2, d_heat)
nu = calculate_gain_utilisation_factor(heat_transfer_perm2, Q_ht, Q_gain)
E_space = (Q_ht - nu.mul(Q_gain.sum(axis=1), axis=0))
dE_space = E_space.div(E_space[1.0], axis=0).iloc[(:, 1:)]
dE_space.columns = pd.MultiIndex.from_product([['dE'], l_strength])
return dE_space
|
def calculate_space_heat_savings(u_values, data_tabula, l_strength, temperature_factor, d_heat):
'\n calculates space heat savings (dE_space [per unit of unrefurbished state])\n through retrofitting of the thermal envelope by additional insulation\n material (l_strength[m])\n '
(Q_ht, heat_transfer_perm2) = calculate_heat_losses(u_values, data_tabula, l_strength, temperature_factor)
Q_gain = calculate_heat_gains(data_tabula, heat_transfer_perm2, d_heat)
nu = calculate_gain_utilisation_factor(heat_transfer_perm2, Q_ht, Q_gain)
E_space = (Q_ht - nu.mul(Q_gain.sum(axis=1), axis=0))
dE_space = E_space.div(E_space[1.0], axis=0).iloc[(:, 1:)]
dE_space.columns = pd.MultiIndex.from_product([['dE'], l_strength])
return dE_space<|docstring|>calculates space heat savings (dE_space [per unit of unrefurbished state])
through retrofitting of the thermal envelope by additional insulation
material (l_strength[m])<|endoftext|>
|
7dff60e83868d5820cf7def6f790aaa7ff1fb4bfd5f97ec0af37df3a49b86a3f
|
def calculate_retro_costs(u_values, l_strength, cost_retro):
'\n returns costs of different retrofitting measures\n '
costs = pd.concat([calculate_costs(u_values, l, cost_retro, window_assumptions).rename(l) for l in l_strength], axis=1)
cost_tot = costs.groupby(level=['country_code', 'subsector', 'bage']).sum()
cost_tot.columns = pd.MultiIndex.from_product([['cost'], cost_tot.columns])
return cost_tot
|
returns costs of different retrofitting measures
|
scripts/build_retro_cost.py
|
calculate_retro_costs
|
koen-vg/pypsa-eur-sec
| 42
|
python
|
def calculate_retro_costs(u_values, l_strength, cost_retro):
'\n \n '
costs = pd.concat([calculate_costs(u_values, l, cost_retro, window_assumptions).rename(l) for l in l_strength], axis=1)
cost_tot = costs.groupby(level=['country_code', 'subsector', 'bage']).sum()
cost_tot.columns = pd.MultiIndex.from_product([['cost'], cost_tot.columns])
return cost_tot
|
def calculate_retro_costs(u_values, l_strength, cost_retro):
'\n \n '
costs = pd.concat([calculate_costs(u_values, l, cost_retro, window_assumptions).rename(l) for l in l_strength], axis=1)
cost_tot = costs.groupby(level=['country_code', 'subsector', 'bage']).sum()
cost_tot.columns = pd.MultiIndex.from_product([['cost'], cost_tot.columns])
return cost_tot<|docstring|>returns costs of different retrofitting measures<|endoftext|>
|
b422dcf5692b5a7798ef3b210631f437af81bd728318063d8d4997abfdea9561
|
def sample_dE_costs_area(area, area_tot, costs, dE_space, countries, construction_index, tax_weighting):
'\n bring costs and energy savings together, fill area and costs per energy\n savings for missing countries, weight costs,\n determine "moderate" and "ambitious" retrofitting\n '
sub_to_sector_dict = area.reset_index().replace(rename_sectors).set_index('subsector')['sector'].to_dict()
area_reordered = area.rename(index=country_iso_dic, level=0).rename(index=rename_sectors, level=2).reset_index().rename(columns={'country': 'country_code'}).set_index(['country_code', 'subsector', 'bage'])
cost_dE = pd.concat([costs, dE_space], axis=1).mul(area_reordered.weight, axis=0).rename(sub_to_sector_dict, level=1).groupby(level=[0, 1]).sum()
for ct in countries.difference(cost_dE.index.levels[0]):
averaged_data = cost_dE.reindex(index=map_for_missings[ct], level=0).mean(level=1).set_index(pd.MultiIndex.from_product([[ct], cost_dE.index.levels[1]]))
cost_dE = cost_dE.append(averaged_data)
if construction_index:
for ct in list((map_for_missings.keys() - cost_w.index)):
cost_w.loc[ct] = cost_w.reindex(index=map_for_missings[ct]).mean()
cost_dE.cost = cost_dE.cost.mul(cost_w, level=0, axis=0)
if tax_weighting:
for ct in list((map_for_missings.keys() - tax_w.index)):
tax_w[ct] = tax_w.reindex(index=map_for_missings[ct]).mean()
cost_dE.cost = cost_dE.cost.mul(tax_w, level=0, axis=0)
cost_dE = cost_dE.reindex(countries, level=0)
sec_w = (area_tot.value / area_tot.value.groupby(level=0).sum())
tot = cost_dE.mul(sec_w, axis=0).groupby(level='country_code').sum().set_index(pd.MultiIndex.from_product([cost_dE.index.unique(level='country_code'), ['tot']]))
cost_dE = cost_dE.append(tot).unstack().stack()
summed_area = pd.DataFrame(area_tot.groupby('country').sum()).set_index(pd.MultiIndex.from_product([area_tot.index.unique(level='country'), ['tot']]))
area_tot = area_tot.append(summed_area).unstack().stack()
cost_per_saving = (cost_dE['cost'] / (1 - cost_dE['dE']))
moderate_min = cost_per_saving.idxmin(axis=1)
moderate_dE_cost = pd.concat([cost_dE.loc[i].xs(moderate_min.loc[i], level=1) for i in moderate_min.index], axis=1).T
moderate_dE_cost.columns = pd.MultiIndex.from_product([moderate_dE_cost.columns, ['moderate']])
ambitious_dE_cost = cost_dE.xs('0.26', level=1, axis=1)
ambitious_dE_cost.columns = pd.MultiIndex.from_product([ambitious_dE_cost.columns, ['ambitious']])
cost_dE_new = pd.concat([moderate_dE_cost, ambitious_dE_cost], axis=1)
return (cost_dE_new, area_tot)
|
bring costs and energy savings together, fill area and costs per energy
savings for missing countries, weight costs,
determine "moderate" and "ambitious" retrofitting
|
scripts/build_retro_cost.py
|
sample_dE_costs_area
|
koen-vg/pypsa-eur-sec
| 42
|
python
|
def sample_dE_costs_area(area, area_tot, costs, dE_space, countries, construction_index, tax_weighting):
'\n bring costs and energy savings together, fill area and costs per energy\n savings for missing countries, weight costs,\n determine "moderate" and "ambitious" retrofitting\n '
sub_to_sector_dict = area.reset_index().replace(rename_sectors).set_index('subsector')['sector'].to_dict()
area_reordered = area.rename(index=country_iso_dic, level=0).rename(index=rename_sectors, level=2).reset_index().rename(columns={'country': 'country_code'}).set_index(['country_code', 'subsector', 'bage'])
cost_dE = pd.concat([costs, dE_space], axis=1).mul(area_reordered.weight, axis=0).rename(sub_to_sector_dict, level=1).groupby(level=[0, 1]).sum()
for ct in countries.difference(cost_dE.index.levels[0]):
averaged_data = cost_dE.reindex(index=map_for_missings[ct], level=0).mean(level=1).set_index(pd.MultiIndex.from_product([[ct], cost_dE.index.levels[1]]))
cost_dE = cost_dE.append(averaged_data)
if construction_index:
for ct in list((map_for_missings.keys() - cost_w.index)):
cost_w.loc[ct] = cost_w.reindex(index=map_for_missings[ct]).mean()
cost_dE.cost = cost_dE.cost.mul(cost_w, level=0, axis=0)
if tax_weighting:
for ct in list((map_for_missings.keys() - tax_w.index)):
tax_w[ct] = tax_w.reindex(index=map_for_missings[ct]).mean()
cost_dE.cost = cost_dE.cost.mul(tax_w, level=0, axis=0)
cost_dE = cost_dE.reindex(countries, level=0)
sec_w = (area_tot.value / area_tot.value.groupby(level=0).sum())
tot = cost_dE.mul(sec_w, axis=0).groupby(level='country_code').sum().set_index(pd.MultiIndex.from_product([cost_dE.index.unique(level='country_code'), ['tot']]))
cost_dE = cost_dE.append(tot).unstack().stack()
summed_area = pd.DataFrame(area_tot.groupby('country').sum()).set_index(pd.MultiIndex.from_product([area_tot.index.unique(level='country'), ['tot']]))
area_tot = area_tot.append(summed_area).unstack().stack()
cost_per_saving = (cost_dE['cost'] / (1 - cost_dE['dE']))
moderate_min = cost_per_saving.idxmin(axis=1)
moderate_dE_cost = pd.concat([cost_dE.loc[i].xs(moderate_min.loc[i], level=1) for i in moderate_min.index], axis=1).T
moderate_dE_cost.columns = pd.MultiIndex.from_product([moderate_dE_cost.columns, ['moderate']])
ambitious_dE_cost = cost_dE.xs('0.26', level=1, axis=1)
ambitious_dE_cost.columns = pd.MultiIndex.from_product([ambitious_dE_cost.columns, ['ambitious']])
cost_dE_new = pd.concat([moderate_dE_cost, ambitious_dE_cost], axis=1)
return (cost_dE_new, area_tot)
|
def sample_dE_costs_area(area, area_tot, costs, dE_space, countries, construction_index, tax_weighting):
'\n bring costs and energy savings together, fill area and costs per energy\n savings for missing countries, weight costs,\n determine "moderate" and "ambitious" retrofitting\n '
sub_to_sector_dict = area.reset_index().replace(rename_sectors).set_index('subsector')['sector'].to_dict()
area_reordered = area.rename(index=country_iso_dic, level=0).rename(index=rename_sectors, level=2).reset_index().rename(columns={'country': 'country_code'}).set_index(['country_code', 'subsector', 'bage'])
cost_dE = pd.concat([costs, dE_space], axis=1).mul(area_reordered.weight, axis=0).rename(sub_to_sector_dict, level=1).groupby(level=[0, 1]).sum()
for ct in countries.difference(cost_dE.index.levels[0]):
averaged_data = cost_dE.reindex(index=map_for_missings[ct], level=0).mean(level=1).set_index(pd.MultiIndex.from_product([[ct], cost_dE.index.levels[1]]))
cost_dE = cost_dE.append(averaged_data)
if construction_index:
for ct in list((map_for_missings.keys() - cost_w.index)):
cost_w.loc[ct] = cost_w.reindex(index=map_for_missings[ct]).mean()
cost_dE.cost = cost_dE.cost.mul(cost_w, level=0, axis=0)
if tax_weighting:
for ct in list((map_for_missings.keys() - tax_w.index)):
tax_w[ct] = tax_w.reindex(index=map_for_missings[ct]).mean()
cost_dE.cost = cost_dE.cost.mul(tax_w, level=0, axis=0)
cost_dE = cost_dE.reindex(countries, level=0)
sec_w = (area_tot.value / area_tot.value.groupby(level=0).sum())
tot = cost_dE.mul(sec_w, axis=0).groupby(level='country_code').sum().set_index(pd.MultiIndex.from_product([cost_dE.index.unique(level='country_code'), ['tot']]))
cost_dE = cost_dE.append(tot).unstack().stack()
summed_area = pd.DataFrame(area_tot.groupby('country').sum()).set_index(pd.MultiIndex.from_product([area_tot.index.unique(level='country'), ['tot']]))
area_tot = area_tot.append(summed_area).unstack().stack()
cost_per_saving = (cost_dE['cost'] / (1 - cost_dE['dE']))
moderate_min = cost_per_saving.idxmin(axis=1)
moderate_dE_cost = pd.concat([cost_dE.loc[i].xs(moderate_min.loc[i], level=1) for i in moderate_min.index], axis=1).T
moderate_dE_cost.columns = pd.MultiIndex.from_product([moderate_dE_cost.columns, ['moderate']])
ambitious_dE_cost = cost_dE.xs('0.26', level=1, axis=1)
ambitious_dE_cost.columns = pd.MultiIndex.from_product([ambitious_dE_cost.columns, ['ambitious']])
cost_dE_new = pd.concat([moderate_dE_cost, ambitious_dE_cost], axis=1)
return (cost_dE_new, area_tot)<|docstring|>bring costs and energy savings together, fill area and costs per energy
savings for missing countries, weight costs,
determine "moderate" and "ambitious" retrofitting<|endoftext|>
|
9b6da2d526237295a49ed966db9732c6ba07aa66357ad9fb140aac01d0f0c0ef
|
def process(process_id):
'Checks status of running task'
proc = category_batch_processing.AsyncResult(process_id)
return jsonify({'state': proc.state})
|
Checks status of running task
|
app/views/processes.py
|
process
|
valentinDruzhinin/CategoryMappingApp
| 0
|
python
|
def process(process_id):
proc = category_batch_processing.AsyncResult(process_id)
return jsonify({'state': proc.state})
|
def process(process_id):
proc = category_batch_processing.AsyncResult(process_id)
return jsonify({'state': proc.state})<|docstring|>Checks status of running task<|endoftext|>
|
6fd1cf14ca9f21d1121f2b4cd3582ceaba3b306fcf160768b5bc3ed88c77edaa
|
def find_files(directory, pattern='.*\\..+'):
'Recursively finds all files matching the pattern.'
files = []
for (root, dirnames, filenames) in os.walk(directory):
for filename in filenames:
if re.match(pattern, filename):
files.append(os.path.join(root, filename))
return files
|
Recursively finds all files matching the pattern.
|
iohandler/datareader.py
|
find_files
|
JeremyCCHsu/tf-vaegan
| 97
|
python
|
def find_files(directory, pattern='.*\\..+'):
files = []
for (root, dirnames, filenames) in os.walk(directory):
for filename in filenames:
if re.match(pattern, filename):
files.append(os.path.join(root, filename))
return files
|
def find_files(directory, pattern='.*\\..+'):
files = []
for (root, dirnames, filenames) in os.walk(directory):
for filename in filenames:
if re.match(pattern, filename):
files.append(os.path.join(root, filename))
return files<|docstring|>Recursively finds all files matching the pattern.<|endoftext|>
|
8d69198d4985ae213075ca374a4d93eff76a3f9c886145c37307e82c7b50134c
|
def img_reader(datadir, img_dims, batch_size, rtype='tanh', pattern='.*\\.jpg', ext='jpg', num_threads=10, shuffle=True):
"\n\toutput: [batch_size, h, w, c] images, scaled to [0., 1.]\n\t***[BUG] shuffle=False doesn't seem to work correctly!!! *****\n\t"
files = find_files(datadir, pattern)
if (not shuffle):
files = sorted(files)
print(('Reading from dir: {}\n'.format(datadir) + ' {:d} files found with pattern: {}'.format(len(files), pattern)))
capacity = int((0.5 * len(files)))
min_after_dequeue = int((0.2 * capacity))
info = dict(capacity=capacity, min_after_dequeue=min_after_dequeue, n_files=len(files))
if ((ext == 'jpg') or (ext == 'jpeg')):
decoder = tf.image.decode_jpeg
elif (ext == 'png'):
decoder = tf.image.decode_png
else:
raise ValueError(('Unsupported file type: {:s}.'.format(ext) + ' (only *.png and *.jpg are supported'))
with tf.variable_scope('input'):
(h, w, c) = img_dims
if (not shuffle):
filename_queue = tf.train.string_input_producer(files, shuffle=shuffle)
else:
filename_queue = tf.train.string_input_producer(files)
reader = tf.WholeFileReader()
(key, value) = reader.read(filename_queue)
img = decoder(value, channels=c)
img = tf.image.crop_to_bounding_box(img, 0, 0, h, w)
img = tf.to_float(img)
if (rtype == 'tanh'):
img = (tf.div(img, 127.5) - 1.0)
elif (rtype == 'sigmoid'):
img = tf.div(img, 255.0)
else:
raise ValueError(('Unsupported range type: {:s}.'.format(rtype) + '(sigmoid or tanh)'))
img = tf.expand_dims(img, 0)
if shuffle:
imgs = tf.train.shuffle_batch([img], batch_size=batch_size, num_threads=num_threads, capacity=capacity, enqueue_many=True, min_after_dequeue=min_after_dequeue)
else:
imgs = tf.train.batch([img], batch_size=batch_size, num_threads=num_threads, enqueue_many=True, capacity=capacity)
return (imgs, info)
|
output: [batch_size, h, w, c] images, scaled to [0., 1.]
***[BUG] shuffle=False doesn't seem to work correctly!!! *****
|
iohandler/datareader.py
|
img_reader
|
JeremyCCHsu/tf-vaegan
| 97
|
python
|
def img_reader(datadir, img_dims, batch_size, rtype='tanh', pattern='.*\\.jpg', ext='jpg', num_threads=10, shuffle=True):
"\n\toutput: [batch_size, h, w, c] images, scaled to [0., 1.]\n\t***[BUG] shuffle=False doesn't seem to work correctly!!! *****\n\t"
files = find_files(datadir, pattern)
if (not shuffle):
files = sorted(files)
print(('Reading from dir: {}\n'.format(datadir) + ' {:d} files found with pattern: {}'.format(len(files), pattern)))
capacity = int((0.5 * len(files)))
min_after_dequeue = int((0.2 * capacity))
info = dict(capacity=capacity, min_after_dequeue=min_after_dequeue, n_files=len(files))
if ((ext == 'jpg') or (ext == 'jpeg')):
decoder = tf.image.decode_jpeg
elif (ext == 'png'):
decoder = tf.image.decode_png
else:
raise ValueError(('Unsupported file type: {:s}.'.format(ext) + ' (only *.png and *.jpg are supported'))
with tf.variable_scope('input'):
(h, w, c) = img_dims
if (not shuffle):
filename_queue = tf.train.string_input_producer(files, shuffle=shuffle)
else:
filename_queue = tf.train.string_input_producer(files)
reader = tf.WholeFileReader()
(key, value) = reader.read(filename_queue)
img = decoder(value, channels=c)
img = tf.image.crop_to_bounding_box(img, 0, 0, h, w)
img = tf.to_float(img)
if (rtype == 'tanh'):
img = (tf.div(img, 127.5) - 1.0)
elif (rtype == 'sigmoid'):
img = tf.div(img, 255.0)
else:
raise ValueError(('Unsupported range type: {:s}.'.format(rtype) + '(sigmoid or tanh)'))
img = tf.expand_dims(img, 0)
if shuffle:
imgs = tf.train.shuffle_batch([img], batch_size=batch_size, num_threads=num_threads, capacity=capacity, enqueue_many=True, min_after_dequeue=min_after_dequeue)
else:
imgs = tf.train.batch([img], batch_size=batch_size, num_threads=num_threads, enqueue_many=True, capacity=capacity)
return (imgs, info)
|
def img_reader(datadir, img_dims, batch_size, rtype='tanh', pattern='.*\\.jpg', ext='jpg', num_threads=10, shuffle=True):
"\n\toutput: [batch_size, h, w, c] images, scaled to [0., 1.]\n\t***[BUG] shuffle=False doesn't seem to work correctly!!! *****\n\t"
files = find_files(datadir, pattern)
if (not shuffle):
files = sorted(files)
print(('Reading from dir: {}\n'.format(datadir) + ' {:d} files found with pattern: {}'.format(len(files), pattern)))
capacity = int((0.5 * len(files)))
min_after_dequeue = int((0.2 * capacity))
info = dict(capacity=capacity, min_after_dequeue=min_after_dequeue, n_files=len(files))
if ((ext == 'jpg') or (ext == 'jpeg')):
decoder = tf.image.decode_jpeg
elif (ext == 'png'):
decoder = tf.image.decode_png
else:
raise ValueError(('Unsupported file type: {:s}.'.format(ext) + ' (only *.png and *.jpg are supported'))
with tf.variable_scope('input'):
(h, w, c) = img_dims
if (not shuffle):
filename_queue = tf.train.string_input_producer(files, shuffle=shuffle)
else:
filename_queue = tf.train.string_input_producer(files)
reader = tf.WholeFileReader()
(key, value) = reader.read(filename_queue)
img = decoder(value, channels=c)
img = tf.image.crop_to_bounding_box(img, 0, 0, h, w)
img = tf.to_float(img)
if (rtype == 'tanh'):
img = (tf.div(img, 127.5) - 1.0)
elif (rtype == 'sigmoid'):
img = tf.div(img, 255.0)
else:
raise ValueError(('Unsupported range type: {:s}.'.format(rtype) + '(sigmoid or tanh)'))
img = tf.expand_dims(img, 0)
if shuffle:
imgs = tf.train.shuffle_batch([img], batch_size=batch_size, num_threads=num_threads, capacity=capacity, enqueue_many=True, min_after_dequeue=min_after_dequeue)
else:
imgs = tf.train.batch([img], batch_size=batch_size, num_threads=num_threads, enqueue_many=True, capacity=capacity)
return (imgs, info)<|docstring|>output: [batch_size, h, w, c] images, scaled to [0., 1.]
***[BUG] shuffle=False doesn't seem to work correctly!!! *****<|endoftext|>
|
61fbcf46526d9821bdf2952b2a47d06b9d58405ab9d57cfc359086a1b83618f6
|
def supervised_training_iter(modnet, optimizer, image, trimap, gt_matte, semantic_scale=10.0, detail_scale=10.0, matte_scale=1.0):
' Supervised training iteration of MODNet\n This function trains MODNet for one iteration in a labeled dataset.\n\n Arguments:\n modnet (torch.nn.Module): instance of MODNet\n optimizer (torch.optim.Optimizer): optimizer for supervised training \n image (torch.autograd.Variable): input RGB image\n its pixel values should be normalized\n trimap (torch.autograd.Variable): trimap used to calculate the losses\n its pixel values can be 0, 0.5, or 1\n (foreground=1, background=0, unknown=0.5)\n gt_matte (torch.autograd.Variable): ground truth alpha matte\n its pixel values are between [0, 1]\n semantic_scale (float): scale of the semantic loss\n NOTE: please adjust according to your dataset\n detail_scale (float): scale of the detail loss\n NOTE: please adjust according to your dataset\n matte_scale (float): scale of the matte loss\n NOTE: please adjust according to your dataset\n \n Returns:\n semantic_loss (torch.Tensor): loss of the semantic estimation [Low-Resolution (LR) Branch]\n detail_loss (torch.Tensor): loss of the detail prediction [High-Resolution (HR) Branch]\n matte_loss (torch.Tensor): loss of the semantic-detail fusion [Fusion Branch]\n\n Example:\n import torch\n from src.models.modnet import MODNet\n from src.trainer import supervised_training_iter\n\n bs = 16 # batch size\n lr = 0.01 # learn rate\n epochs = 40 # total epochs\n\n modnet = torch.nn.DataParallel(MODNet()).cuda()\n optimizer = torch.optim.SGD(modnet.parameters(), lr=lr, momentum=0.9)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=int(0.25 * epochs), gamma=0.1)\n\n dataloader = CREATE_YOUR_DATALOADER(bs) # NOTE: please finish this function\n\n for epoch in range(0, epochs):\n for idx, (image, trimap, gt_matte) in enumerate(dataloader):\n semantic_loss, detail_loss, matte_loss = supervised_training_iter(modnet, optimizer, image, trimap, gt_matte)\n lr_scheduler.step()\n '
global blurer
modnet.train()
optimizer.zero_grad()
(pred_semantic, pred_detail, pred_matte) = modnet(image, False)
boundaries = ((trimap < 0.5) + (trimap > 0.5))
gt_semantic = F.interpolate(gt_matte, scale_factor=(1 / 16), mode='bilinear')
gt_semantic = blurer(gt_semantic)
semantic_loss = torch.mean(F.mse_loss(pred_semantic, gt_semantic))
semantic_loss = (semantic_scale * semantic_loss)
pred_boundary_detail = torch.where(boundaries, trimap, pred_detail)
gt_detail = torch.where(boundaries, trimap, gt_matte)
detail_loss = torch.mean(F.l1_loss(pred_boundary_detail, gt_detail))
detail_loss = (detail_scale * detail_loss)
pred_boundary_matte = torch.where(boundaries, trimap, pred_matte)
matte_l1_loss = (F.l1_loss(pred_matte, gt_matte) + (4.0 * F.l1_loss(pred_boundary_matte, gt_matte)))
matte_compositional_loss = (F.l1_loss((image * pred_matte), (image * gt_matte)) + (4.0 * F.l1_loss((image * pred_boundary_matte), (image * gt_matte))))
matte_loss = torch.mean((matte_l1_loss + matte_compositional_loss))
matte_loss = (matte_scale * matte_loss)
loss = ((semantic_loss + detail_loss) + matte_loss)
loss.backward()
optimizer.step()
return (semantic_loss, detail_loss, matte_loss)
|
Supervised training iteration of MODNet
This function trains MODNet for one iteration in a labeled dataset.
Arguments:
modnet (torch.nn.Module): instance of MODNet
optimizer (torch.optim.Optimizer): optimizer for supervised training
image (torch.autograd.Variable): input RGB image
its pixel values should be normalized
trimap (torch.autograd.Variable): trimap used to calculate the losses
its pixel values can be 0, 0.5, or 1
(foreground=1, background=0, unknown=0.5)
gt_matte (torch.autograd.Variable): ground truth alpha matte
its pixel values are between [0, 1]
semantic_scale (float): scale of the semantic loss
NOTE: please adjust according to your dataset
detail_scale (float): scale of the detail loss
NOTE: please adjust according to your dataset
matte_scale (float): scale of the matte loss
NOTE: please adjust according to your dataset
Returns:
semantic_loss (torch.Tensor): loss of the semantic estimation [Low-Resolution (LR) Branch]
detail_loss (torch.Tensor): loss of the detail prediction [High-Resolution (HR) Branch]
matte_loss (torch.Tensor): loss of the semantic-detail fusion [Fusion Branch]
Example:
import torch
from src.models.modnet import MODNet
from src.trainer import supervised_training_iter
bs = 16 # batch size
lr = 0.01 # learn rate
epochs = 40 # total epochs
modnet = torch.nn.DataParallel(MODNet()).cuda()
optimizer = torch.optim.SGD(modnet.parameters(), lr=lr, momentum=0.9)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=int(0.25 * epochs), gamma=0.1)
dataloader = CREATE_YOUR_DATALOADER(bs) # NOTE: please finish this function
for epoch in range(0, epochs):
for idx, (image, trimap, gt_matte) in enumerate(dataloader):
semantic_loss, detail_loss, matte_loss = supervised_training_iter(modnet, optimizer, image, trimap, gt_matte)
lr_scheduler.step()
|
src/trainer.py
|
supervised_training_iter
|
actboy/MODNet
| 0
|
python
|
def supervised_training_iter(modnet, optimizer, image, trimap, gt_matte, semantic_scale=10.0, detail_scale=10.0, matte_scale=1.0):
' Supervised training iteration of MODNet\n This function trains MODNet for one iteration in a labeled dataset.\n\n Arguments:\n modnet (torch.nn.Module): instance of MODNet\n optimizer (torch.optim.Optimizer): optimizer for supervised training \n image (torch.autograd.Variable): input RGB image\n its pixel values should be normalized\n trimap (torch.autograd.Variable): trimap used to calculate the losses\n its pixel values can be 0, 0.5, or 1\n (foreground=1, background=0, unknown=0.5)\n gt_matte (torch.autograd.Variable): ground truth alpha matte\n its pixel values are between [0, 1]\n semantic_scale (float): scale of the semantic loss\n NOTE: please adjust according to your dataset\n detail_scale (float): scale of the detail loss\n NOTE: please adjust according to your dataset\n matte_scale (float): scale of the matte loss\n NOTE: please adjust according to your dataset\n \n Returns:\n semantic_loss (torch.Tensor): loss of the semantic estimation [Low-Resolution (LR) Branch]\n detail_loss (torch.Tensor): loss of the detail prediction [High-Resolution (HR) Branch]\n matte_loss (torch.Tensor): loss of the semantic-detail fusion [Fusion Branch]\n\n Example:\n import torch\n from src.models.modnet import MODNet\n from src.trainer import supervised_training_iter\n\n bs = 16 # batch size\n lr = 0.01 # learn rate\n epochs = 40 # total epochs\n\n modnet = torch.nn.DataParallel(MODNet()).cuda()\n optimizer = torch.optim.SGD(modnet.parameters(), lr=lr, momentum=0.9)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=int(0.25 * epochs), gamma=0.1)\n\n dataloader = CREATE_YOUR_DATALOADER(bs) # NOTE: please finish this function\n\n for epoch in range(0, epochs):\n for idx, (image, trimap, gt_matte) in enumerate(dataloader):\n semantic_loss, detail_loss, matte_loss = supervised_training_iter(modnet, optimizer, image, trimap, gt_matte)\n lr_scheduler.step()\n '
global blurer
modnet.train()
optimizer.zero_grad()
(pred_semantic, pred_detail, pred_matte) = modnet(image, False)
boundaries = ((trimap < 0.5) + (trimap > 0.5))
gt_semantic = F.interpolate(gt_matte, scale_factor=(1 / 16), mode='bilinear')
gt_semantic = blurer(gt_semantic)
semantic_loss = torch.mean(F.mse_loss(pred_semantic, gt_semantic))
semantic_loss = (semantic_scale * semantic_loss)
pred_boundary_detail = torch.where(boundaries, trimap, pred_detail)
gt_detail = torch.where(boundaries, trimap, gt_matte)
detail_loss = torch.mean(F.l1_loss(pred_boundary_detail, gt_detail))
detail_loss = (detail_scale * detail_loss)
pred_boundary_matte = torch.where(boundaries, trimap, pred_matte)
matte_l1_loss = (F.l1_loss(pred_matte, gt_matte) + (4.0 * F.l1_loss(pred_boundary_matte, gt_matte)))
matte_compositional_loss = (F.l1_loss((image * pred_matte), (image * gt_matte)) + (4.0 * F.l1_loss((image * pred_boundary_matte), (image * gt_matte))))
matte_loss = torch.mean((matte_l1_loss + matte_compositional_loss))
matte_loss = (matte_scale * matte_loss)
loss = ((semantic_loss + detail_loss) + matte_loss)
loss.backward()
optimizer.step()
return (semantic_loss, detail_loss, matte_loss)
|
def supervised_training_iter(modnet, optimizer, image, trimap, gt_matte, semantic_scale=10.0, detail_scale=10.0, matte_scale=1.0):
' Supervised training iteration of MODNet\n This function trains MODNet for one iteration in a labeled dataset.\n\n Arguments:\n modnet (torch.nn.Module): instance of MODNet\n optimizer (torch.optim.Optimizer): optimizer for supervised training \n image (torch.autograd.Variable): input RGB image\n its pixel values should be normalized\n trimap (torch.autograd.Variable): trimap used to calculate the losses\n its pixel values can be 0, 0.5, or 1\n (foreground=1, background=0, unknown=0.5)\n gt_matte (torch.autograd.Variable): ground truth alpha matte\n its pixel values are between [0, 1]\n semantic_scale (float): scale of the semantic loss\n NOTE: please adjust according to your dataset\n detail_scale (float): scale of the detail loss\n NOTE: please adjust according to your dataset\n matte_scale (float): scale of the matte loss\n NOTE: please adjust according to your dataset\n \n Returns:\n semantic_loss (torch.Tensor): loss of the semantic estimation [Low-Resolution (LR) Branch]\n detail_loss (torch.Tensor): loss of the detail prediction [High-Resolution (HR) Branch]\n matte_loss (torch.Tensor): loss of the semantic-detail fusion [Fusion Branch]\n\n Example:\n import torch\n from src.models.modnet import MODNet\n from src.trainer import supervised_training_iter\n\n bs = 16 # batch size\n lr = 0.01 # learn rate\n epochs = 40 # total epochs\n\n modnet = torch.nn.DataParallel(MODNet()).cuda()\n optimizer = torch.optim.SGD(modnet.parameters(), lr=lr, momentum=0.9)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=int(0.25 * epochs), gamma=0.1)\n\n dataloader = CREATE_YOUR_DATALOADER(bs) # NOTE: please finish this function\n\n for epoch in range(0, epochs):\n for idx, (image, trimap, gt_matte) in enumerate(dataloader):\n semantic_loss, detail_loss, matte_loss = supervised_training_iter(modnet, optimizer, image, trimap, gt_matte)\n lr_scheduler.step()\n '
global blurer
modnet.train()
optimizer.zero_grad()
(pred_semantic, pred_detail, pred_matte) = modnet(image, False)
boundaries = ((trimap < 0.5) + (trimap > 0.5))
gt_semantic = F.interpolate(gt_matte, scale_factor=(1 / 16), mode='bilinear')
gt_semantic = blurer(gt_semantic)
semantic_loss = torch.mean(F.mse_loss(pred_semantic, gt_semantic))
semantic_loss = (semantic_scale * semantic_loss)
pred_boundary_detail = torch.where(boundaries, trimap, pred_detail)
gt_detail = torch.where(boundaries, trimap, gt_matte)
detail_loss = torch.mean(F.l1_loss(pred_boundary_detail, gt_detail))
detail_loss = (detail_scale * detail_loss)
pred_boundary_matte = torch.where(boundaries, trimap, pred_matte)
matte_l1_loss = (F.l1_loss(pred_matte, gt_matte) + (4.0 * F.l1_loss(pred_boundary_matte, gt_matte)))
matte_compositional_loss = (F.l1_loss((image * pred_matte), (image * gt_matte)) + (4.0 * F.l1_loss((image * pred_boundary_matte), (image * gt_matte))))
matte_loss = torch.mean((matte_l1_loss + matte_compositional_loss))
matte_loss = (matte_scale * matte_loss)
loss = ((semantic_loss + detail_loss) + matte_loss)
loss.backward()
optimizer.step()
return (semantic_loss, detail_loss, matte_loss)<|docstring|>Supervised training iteration of MODNet
This function trains MODNet for one iteration in a labeled dataset.
Arguments:
modnet (torch.nn.Module): instance of MODNet
optimizer (torch.optim.Optimizer): optimizer for supervised training
image (torch.autograd.Variable): input RGB image
its pixel values should be normalized
trimap (torch.autograd.Variable): trimap used to calculate the losses
its pixel values can be 0, 0.5, or 1
(foreground=1, background=0, unknown=0.5)
gt_matte (torch.autograd.Variable): ground truth alpha matte
its pixel values are between [0, 1]
semantic_scale (float): scale of the semantic loss
NOTE: please adjust according to your dataset
detail_scale (float): scale of the detail loss
NOTE: please adjust according to your dataset
matte_scale (float): scale of the matte loss
NOTE: please adjust according to your dataset
Returns:
semantic_loss (torch.Tensor): loss of the semantic estimation [Low-Resolution (LR) Branch]
detail_loss (torch.Tensor): loss of the detail prediction [High-Resolution (HR) Branch]
matte_loss (torch.Tensor): loss of the semantic-detail fusion [Fusion Branch]
Example:
import torch
from src.models.modnet import MODNet
from src.trainer import supervised_training_iter
bs = 16 # batch size
lr = 0.01 # learn rate
epochs = 40 # total epochs
modnet = torch.nn.DataParallel(MODNet()).cuda()
optimizer = torch.optim.SGD(modnet.parameters(), lr=lr, momentum=0.9)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=int(0.25 * epochs), gamma=0.1)
dataloader = CREATE_YOUR_DATALOADER(bs) # NOTE: please finish this function
for epoch in range(0, epochs):
for idx, (image, trimap, gt_matte) in enumerate(dataloader):
semantic_loss, detail_loss, matte_loss = supervised_training_iter(modnet, optimizer, image, trimap, gt_matte)
lr_scheduler.step()<|endoftext|>
|
c355b82006720e2765e4c806554c320257e72b13b4069b45bf7f7aaf95c7ccc5
|
def soc_adaptation_iter(modnet, backup_modnet, optimizer, image, soc_semantic_scale=100.0, soc_detail_scale=1.0):
' Self-Supervised sub-objective consistency (SOC) adaptation iteration of MODNet\n This function fine-tunes MODNet for one iteration in an unlabeled dataset.\n Note that SOC can only fine-tune a converged MODNet, i.e., MODNet that has been \n trained in a labeled dataset.\n\n Arguments:\n modnet (torch.nn.Module): instance of MODNet\n backup_modnet (torch.nn.Module): backup of the trained MODNet\n optimizer (torch.optim.Optimizer): optimizer for self-supervised SOC \n image (torch.autograd.Variable): input RGB image\n its pixel values should be normalized\n soc_semantic_scale (float): scale of the SOC semantic loss \n NOTE: please adjust according to your dataset\n soc_detail_scale (float): scale of the SOC detail loss\n NOTE: please adjust according to your dataset\n \n Returns:\n soc_semantic_loss (torch.Tensor): loss of the semantic SOC\n soc_detail_loss (torch.Tensor): loss of the detail SOC\n\n Example:\n import copy\n import torch\n from src.models.modnet import MODNet\n from src.trainer import soc_adaptation_iter\n\n bs = 1 # batch size\n lr = 0.00001 # learn rate\n epochs = 10 # total epochs\n\n modnet = torch.nn.DataParallel(MODNet()).cuda()\n modnet = LOAD_TRAINED_CKPT() # NOTE: please finish this function\n\n optimizer = torch.optim.Adam(modnet.parameters(), lr=lr, betas=(0.9, 0.99))\n dataloader = CREATE_YOUR_DATALOADER(bs) # NOTE: please finish this function\n\n for epoch in range(0, epochs):\n backup_modnet = copy.deepcopy(modnet)\n for idx, (image) in enumerate(dataloader):\n soc_semantic_loss, soc_detail_loss = soc_adaptation_iter(modnet, backup_modnet, optimizer, image)\n '
global blurer
backup_modnet.eval()
modnet.train()
modnet.module.freeze_norm()
optimizer.zero_grad()
(pred_semantic, pred_detail, pred_matte) = modnet(image, False)
with torch.no_grad():
(_, pred_backup_detail, pred_backup_matte) = backup_modnet(image, False)
pred_matte_fg = (pred_matte.detach() > 0.1).float()
pred_semantic_fg = (pred_semantic.detach() > 0.1).float()
pred_semantic_fg = F.interpolate(pred_semantic_fg, scale_factor=16, mode='bilinear')
pred_fg = (pred_matte_fg * pred_semantic_fg)
(n, c, h, w) = pred_matte.shape
np_pred_fg = pred_fg.data.cpu().numpy()
np_boundaries = np.zeros([n, c, h, w])
for sdx in range(0, n):
sample_np_boundaries = np_boundaries[(sdx, 0, ...)]
sample_np_pred_fg = np_pred_fg[(sdx, 0, ...)]
side = int((((h + w) / 2) * 0.05))
dilated = grey_dilation(sample_np_pred_fg, size=(side, side))
eroded = grey_erosion(sample_np_pred_fg, size=(side, side))
sample_np_boundaries[np.where(((dilated - eroded) != 0))] = 1
np_boundaries[(sdx, 0, ...)] = sample_np_boundaries
boundaries = torch.tensor(np_boundaries).float().cuda()
downsampled_pred_matte = blurer(F.interpolate(pred_matte, scale_factor=(1 / 16), mode='bilinear'))
pseudo_gt_semantic = downsampled_pred_matte.detach()
pseudo_gt_semantic = (pseudo_gt_semantic * (pseudo_gt_semantic > 0.01).float())
pseudo_gt_matte = pred_semantic.detach()
pseudo_gt_matte = (pseudo_gt_matte * (pseudo_gt_matte > 0.01).float())
soc_semantic_loss = (F.mse_loss(pred_semantic, pseudo_gt_semantic) + F.mse_loss(downsampled_pred_matte, pseudo_gt_matte))
soc_semantic_loss = (soc_semantic_scale * torch.mean(soc_semantic_loss))
backup_detail_loss = (boundaries * F.l1_loss(pred_detail, pred_backup_detail, reduction='none'))
backup_detail_loss = (torch.sum(backup_detail_loss, dim=(1, 2, 3)) / torch.sum(boundaries, dim=(1, 2, 3)))
backup_detail_loss = torch.mean(backup_detail_loss)
backup_matte_loss = (boundaries * F.l1_loss(pred_matte, pred_backup_matte, reduction='none'))
backup_matte_loss = (torch.sum(backup_matte_loss, dim=(1, 2, 3)) / torch.sum(boundaries, dim=(1, 2, 3)))
backup_matte_loss = torch.mean(backup_matte_loss)
soc_detail_loss = (soc_detail_scale * (backup_detail_loss + backup_matte_loss))
loss = (soc_semantic_loss + soc_detail_loss)
loss.backward()
optimizer.step()
return (soc_semantic_loss, soc_detail_loss)
|
Self-Supervised sub-objective consistency (SOC) adaptation iteration of MODNet
This function fine-tunes MODNet for one iteration in an unlabeled dataset.
Note that SOC can only fine-tune a converged MODNet, i.e., MODNet that has been
trained in a labeled dataset.
Arguments:
modnet (torch.nn.Module): instance of MODNet
backup_modnet (torch.nn.Module): backup of the trained MODNet
optimizer (torch.optim.Optimizer): optimizer for self-supervised SOC
image (torch.autograd.Variable): input RGB image
its pixel values should be normalized
soc_semantic_scale (float): scale of the SOC semantic loss
NOTE: please adjust according to your dataset
soc_detail_scale (float): scale of the SOC detail loss
NOTE: please adjust according to your dataset
Returns:
soc_semantic_loss (torch.Tensor): loss of the semantic SOC
soc_detail_loss (torch.Tensor): loss of the detail SOC
Example:
import copy
import torch
from src.models.modnet import MODNet
from src.trainer import soc_adaptation_iter
bs = 1 # batch size
lr = 0.00001 # learn rate
epochs = 10 # total epochs
modnet = torch.nn.DataParallel(MODNet()).cuda()
modnet = LOAD_TRAINED_CKPT() # NOTE: please finish this function
optimizer = torch.optim.Adam(modnet.parameters(), lr=lr, betas=(0.9, 0.99))
dataloader = CREATE_YOUR_DATALOADER(bs) # NOTE: please finish this function
for epoch in range(0, epochs):
backup_modnet = copy.deepcopy(modnet)
for idx, (image) in enumerate(dataloader):
soc_semantic_loss, soc_detail_loss = soc_adaptation_iter(modnet, backup_modnet, optimizer, image)
|
src/trainer.py
|
soc_adaptation_iter
|
actboy/MODNet
| 0
|
python
|
def soc_adaptation_iter(modnet, backup_modnet, optimizer, image, soc_semantic_scale=100.0, soc_detail_scale=1.0):
' Self-Supervised sub-objective consistency (SOC) adaptation iteration of MODNet\n This function fine-tunes MODNet for one iteration in an unlabeled dataset.\n Note that SOC can only fine-tune a converged MODNet, i.e., MODNet that has been \n trained in a labeled dataset.\n\n Arguments:\n modnet (torch.nn.Module): instance of MODNet\n backup_modnet (torch.nn.Module): backup of the trained MODNet\n optimizer (torch.optim.Optimizer): optimizer for self-supervised SOC \n image (torch.autograd.Variable): input RGB image\n its pixel values should be normalized\n soc_semantic_scale (float): scale of the SOC semantic loss \n NOTE: please adjust according to your dataset\n soc_detail_scale (float): scale of the SOC detail loss\n NOTE: please adjust according to your dataset\n \n Returns:\n soc_semantic_loss (torch.Tensor): loss of the semantic SOC\n soc_detail_loss (torch.Tensor): loss of the detail SOC\n\n Example:\n import copy\n import torch\n from src.models.modnet import MODNet\n from src.trainer import soc_adaptation_iter\n\n bs = 1 # batch size\n lr = 0.00001 # learn rate\n epochs = 10 # total epochs\n\n modnet = torch.nn.DataParallel(MODNet()).cuda()\n modnet = LOAD_TRAINED_CKPT() # NOTE: please finish this function\n\n optimizer = torch.optim.Adam(modnet.parameters(), lr=lr, betas=(0.9, 0.99))\n dataloader = CREATE_YOUR_DATALOADER(bs) # NOTE: please finish this function\n\n for epoch in range(0, epochs):\n backup_modnet = copy.deepcopy(modnet)\n for idx, (image) in enumerate(dataloader):\n soc_semantic_loss, soc_detail_loss = soc_adaptation_iter(modnet, backup_modnet, optimizer, image)\n '
global blurer
backup_modnet.eval()
modnet.train()
modnet.module.freeze_norm()
optimizer.zero_grad()
(pred_semantic, pred_detail, pred_matte) = modnet(image, False)
with torch.no_grad():
(_, pred_backup_detail, pred_backup_matte) = backup_modnet(image, False)
pred_matte_fg = (pred_matte.detach() > 0.1).float()
pred_semantic_fg = (pred_semantic.detach() > 0.1).float()
pred_semantic_fg = F.interpolate(pred_semantic_fg, scale_factor=16, mode='bilinear')
pred_fg = (pred_matte_fg * pred_semantic_fg)
(n, c, h, w) = pred_matte.shape
np_pred_fg = pred_fg.data.cpu().numpy()
np_boundaries = np.zeros([n, c, h, w])
for sdx in range(0, n):
sample_np_boundaries = np_boundaries[(sdx, 0, ...)]
sample_np_pred_fg = np_pred_fg[(sdx, 0, ...)]
side = int((((h + w) / 2) * 0.05))
dilated = grey_dilation(sample_np_pred_fg, size=(side, side))
eroded = grey_erosion(sample_np_pred_fg, size=(side, side))
sample_np_boundaries[np.where(((dilated - eroded) != 0))] = 1
np_boundaries[(sdx, 0, ...)] = sample_np_boundaries
boundaries = torch.tensor(np_boundaries).float().cuda()
downsampled_pred_matte = blurer(F.interpolate(pred_matte, scale_factor=(1 / 16), mode='bilinear'))
pseudo_gt_semantic = downsampled_pred_matte.detach()
pseudo_gt_semantic = (pseudo_gt_semantic * (pseudo_gt_semantic > 0.01).float())
pseudo_gt_matte = pred_semantic.detach()
pseudo_gt_matte = (pseudo_gt_matte * (pseudo_gt_matte > 0.01).float())
soc_semantic_loss = (F.mse_loss(pred_semantic, pseudo_gt_semantic) + F.mse_loss(downsampled_pred_matte, pseudo_gt_matte))
soc_semantic_loss = (soc_semantic_scale * torch.mean(soc_semantic_loss))
backup_detail_loss = (boundaries * F.l1_loss(pred_detail, pred_backup_detail, reduction='none'))
backup_detail_loss = (torch.sum(backup_detail_loss, dim=(1, 2, 3)) / torch.sum(boundaries, dim=(1, 2, 3)))
backup_detail_loss = torch.mean(backup_detail_loss)
backup_matte_loss = (boundaries * F.l1_loss(pred_matte, pred_backup_matte, reduction='none'))
backup_matte_loss = (torch.sum(backup_matte_loss, dim=(1, 2, 3)) / torch.sum(boundaries, dim=(1, 2, 3)))
backup_matte_loss = torch.mean(backup_matte_loss)
soc_detail_loss = (soc_detail_scale * (backup_detail_loss + backup_matte_loss))
loss = (soc_semantic_loss + soc_detail_loss)
loss.backward()
optimizer.step()
return (soc_semantic_loss, soc_detail_loss)
|
def soc_adaptation_iter(modnet, backup_modnet, optimizer, image, soc_semantic_scale=100.0, soc_detail_scale=1.0):
' Self-Supervised sub-objective consistency (SOC) adaptation iteration of MODNet\n This function fine-tunes MODNet for one iteration in an unlabeled dataset.\n Note that SOC can only fine-tune a converged MODNet, i.e., MODNet that has been \n trained in a labeled dataset.\n\n Arguments:\n modnet (torch.nn.Module): instance of MODNet\n backup_modnet (torch.nn.Module): backup of the trained MODNet\n optimizer (torch.optim.Optimizer): optimizer for self-supervised SOC \n image (torch.autograd.Variable): input RGB image\n its pixel values should be normalized\n soc_semantic_scale (float): scale of the SOC semantic loss \n NOTE: please adjust according to your dataset\n soc_detail_scale (float): scale of the SOC detail loss\n NOTE: please adjust according to your dataset\n \n Returns:\n soc_semantic_loss (torch.Tensor): loss of the semantic SOC\n soc_detail_loss (torch.Tensor): loss of the detail SOC\n\n Example:\n import copy\n import torch\n from src.models.modnet import MODNet\n from src.trainer import soc_adaptation_iter\n\n bs = 1 # batch size\n lr = 0.00001 # learn rate\n epochs = 10 # total epochs\n\n modnet = torch.nn.DataParallel(MODNet()).cuda()\n modnet = LOAD_TRAINED_CKPT() # NOTE: please finish this function\n\n optimizer = torch.optim.Adam(modnet.parameters(), lr=lr, betas=(0.9, 0.99))\n dataloader = CREATE_YOUR_DATALOADER(bs) # NOTE: please finish this function\n\n for epoch in range(0, epochs):\n backup_modnet = copy.deepcopy(modnet)\n for idx, (image) in enumerate(dataloader):\n soc_semantic_loss, soc_detail_loss = soc_adaptation_iter(modnet, backup_modnet, optimizer, image)\n '
global blurer
backup_modnet.eval()
modnet.train()
modnet.module.freeze_norm()
optimizer.zero_grad()
(pred_semantic, pred_detail, pred_matte) = modnet(image, False)
with torch.no_grad():
(_, pred_backup_detail, pred_backup_matte) = backup_modnet(image, False)
pred_matte_fg = (pred_matte.detach() > 0.1).float()
pred_semantic_fg = (pred_semantic.detach() > 0.1).float()
pred_semantic_fg = F.interpolate(pred_semantic_fg, scale_factor=16, mode='bilinear')
pred_fg = (pred_matte_fg * pred_semantic_fg)
(n, c, h, w) = pred_matte.shape
np_pred_fg = pred_fg.data.cpu().numpy()
np_boundaries = np.zeros([n, c, h, w])
for sdx in range(0, n):
sample_np_boundaries = np_boundaries[(sdx, 0, ...)]
sample_np_pred_fg = np_pred_fg[(sdx, 0, ...)]
side = int((((h + w) / 2) * 0.05))
dilated = grey_dilation(sample_np_pred_fg, size=(side, side))
eroded = grey_erosion(sample_np_pred_fg, size=(side, side))
sample_np_boundaries[np.where(((dilated - eroded) != 0))] = 1
np_boundaries[(sdx, 0, ...)] = sample_np_boundaries
boundaries = torch.tensor(np_boundaries).float().cuda()
downsampled_pred_matte = blurer(F.interpolate(pred_matte, scale_factor=(1 / 16), mode='bilinear'))
pseudo_gt_semantic = downsampled_pred_matte.detach()
pseudo_gt_semantic = (pseudo_gt_semantic * (pseudo_gt_semantic > 0.01).float())
pseudo_gt_matte = pred_semantic.detach()
pseudo_gt_matte = (pseudo_gt_matte * (pseudo_gt_matte > 0.01).float())
soc_semantic_loss = (F.mse_loss(pred_semantic, pseudo_gt_semantic) + F.mse_loss(downsampled_pred_matte, pseudo_gt_matte))
soc_semantic_loss = (soc_semantic_scale * torch.mean(soc_semantic_loss))
backup_detail_loss = (boundaries * F.l1_loss(pred_detail, pred_backup_detail, reduction='none'))
backup_detail_loss = (torch.sum(backup_detail_loss, dim=(1, 2, 3)) / torch.sum(boundaries, dim=(1, 2, 3)))
backup_detail_loss = torch.mean(backup_detail_loss)
backup_matte_loss = (boundaries * F.l1_loss(pred_matte, pred_backup_matte, reduction='none'))
backup_matte_loss = (torch.sum(backup_matte_loss, dim=(1, 2, 3)) / torch.sum(boundaries, dim=(1, 2, 3)))
backup_matte_loss = torch.mean(backup_matte_loss)
soc_detail_loss = (soc_detail_scale * (backup_detail_loss + backup_matte_loss))
loss = (soc_semantic_loss + soc_detail_loss)
loss.backward()
optimizer.step()
return (soc_semantic_loss, soc_detail_loss)<|docstring|>Self-Supervised sub-objective consistency (SOC) adaptation iteration of MODNet
This function fine-tunes MODNet for one iteration in an unlabeled dataset.
Note that SOC can only fine-tune a converged MODNet, i.e., MODNet that has been
trained in a labeled dataset.
Arguments:
modnet (torch.nn.Module): instance of MODNet
backup_modnet (torch.nn.Module): backup of the trained MODNet
optimizer (torch.optim.Optimizer): optimizer for self-supervised SOC
image (torch.autograd.Variable): input RGB image
its pixel values should be normalized
soc_semantic_scale (float): scale of the SOC semantic loss
NOTE: please adjust according to your dataset
soc_detail_scale (float): scale of the SOC detail loss
NOTE: please adjust according to your dataset
Returns:
soc_semantic_loss (torch.Tensor): loss of the semantic SOC
soc_detail_loss (torch.Tensor): loss of the detail SOC
Example:
import copy
import torch
from src.models.modnet import MODNet
from src.trainer import soc_adaptation_iter
bs = 1 # batch size
lr = 0.00001 # learn rate
epochs = 10 # total epochs
modnet = torch.nn.DataParallel(MODNet()).cuda()
modnet = LOAD_TRAINED_CKPT() # NOTE: please finish this function
optimizer = torch.optim.Adam(modnet.parameters(), lr=lr, betas=(0.9, 0.99))
dataloader = CREATE_YOUR_DATALOADER(bs) # NOTE: please finish this function
for epoch in range(0, epochs):
backup_modnet = copy.deepcopy(modnet)
for idx, (image) in enumerate(dataloader):
soc_semantic_loss, soc_detail_loss = soc_adaptation_iter(modnet, backup_modnet, optimizer, image)<|endoftext|>
|
bbb7bc6c2ee569eef1ce50268e18639168a656f3f76ac6234f717079d2fb94ec
|
def __init__(self, channels, kernel_size):
' \n Arguments:\n channels (int): Channel for input tensor\n kernel_size (int): Size of the kernel used in blurring\n '
super(GaussianBlurLayer, self).__init__()
self.channels = channels
self.kernel_size = kernel_size
assert ((self.kernel_size % 2) != 0)
self.op = nn.Sequential(nn.ReflectionPad2d(math.floor((self.kernel_size / 2))), nn.Conv2d(channels, channels, self.kernel_size, stride=1, padding=0, bias=None, groups=channels))
self._init_kernel()
|
Arguments:
channels (int): Channel for input tensor
kernel_size (int): Size of the kernel used in blurring
|
src/trainer.py
|
__init__
|
actboy/MODNet
| 0
|
python
|
def __init__(self, channels, kernel_size):
' \n Arguments:\n channels (int): Channel for input tensor\n kernel_size (int): Size of the kernel used in blurring\n '
super(GaussianBlurLayer, self).__init__()
self.channels = channels
self.kernel_size = kernel_size
assert ((self.kernel_size % 2) != 0)
self.op = nn.Sequential(nn.ReflectionPad2d(math.floor((self.kernel_size / 2))), nn.Conv2d(channels, channels, self.kernel_size, stride=1, padding=0, bias=None, groups=channels))
self._init_kernel()
|
def __init__(self, channels, kernel_size):
' \n Arguments:\n channels (int): Channel for input tensor\n kernel_size (int): Size of the kernel used in blurring\n '
super(GaussianBlurLayer, self).__init__()
self.channels = channels
self.kernel_size = kernel_size
assert ((self.kernel_size % 2) != 0)
self.op = nn.Sequential(nn.ReflectionPad2d(math.floor((self.kernel_size / 2))), nn.Conv2d(channels, channels, self.kernel_size, stride=1, padding=0, bias=None, groups=channels))
self._init_kernel()<|docstring|>Arguments:
channels (int): Channel for input tensor
kernel_size (int): Size of the kernel used in blurring<|endoftext|>
|
a2bf2798fa806f713e5a50611a79ebd894ad377b38376a4881ead8313cb717f5
|
def forward(self, x):
'\n Arguments:\n x (torch.Tensor): input 4D tensor\n Returns:\n torch.Tensor: Blurred version of the input \n '
if (not (len(list(x.shape)) == 4)):
print("'GaussianBlurLayer' requires a 4D tensor as input\n")
exit()
elif (not (x.shape[1] == self.channels)):
print("In 'GaussianBlurLayer', the required channel ({0}) isnot the same as input ({1})\n".format(self.channels, x.shape[1]))
exit()
return self.op(x)
|
Arguments:
x (torch.Tensor): input 4D tensor
Returns:
torch.Tensor: Blurred version of the input
|
src/trainer.py
|
forward
|
actboy/MODNet
| 0
|
python
|
def forward(self, x):
'\n Arguments:\n x (torch.Tensor): input 4D tensor\n Returns:\n torch.Tensor: Blurred version of the input \n '
if (not (len(list(x.shape)) == 4)):
print("'GaussianBlurLayer' requires a 4D tensor as input\n")
exit()
elif (not (x.shape[1] == self.channels)):
print("In 'GaussianBlurLayer', the required channel ({0}) isnot the same as input ({1})\n".format(self.channels, x.shape[1]))
exit()
return self.op(x)
|
def forward(self, x):
'\n Arguments:\n x (torch.Tensor): input 4D tensor\n Returns:\n torch.Tensor: Blurred version of the input \n '
if (not (len(list(x.shape)) == 4)):
print("'GaussianBlurLayer' requires a 4D tensor as input\n")
exit()
elif (not (x.shape[1] == self.channels)):
print("In 'GaussianBlurLayer', the required channel ({0}) isnot the same as input ({1})\n".format(self.channels, x.shape[1]))
exit()
return self.op(x)<|docstring|>Arguments:
x (torch.Tensor): input 4D tensor
Returns:
torch.Tensor: Blurred version of the input<|endoftext|>
|
23181380bfae3ab6d113d9f4e0668b6d13c8fadb43d0718293740ea6ed47d809
|
def gauss2d_kernel(n, sigma):
'\n Return a circular 2D Gaussian.\n\n The center of the Gaussian is centered at ``n//2``.\n\n Args:\n n (:obj:`int`):\n The length of the axis of the square (n x n) output\n array.\n sigma (:obj:`float`):\n The circular (symmetric) standard deviation of the 2D\n Gaussian\n\n Returns:\n `numpy.ndarray`_: Gridded representation of the 2D Gaussian.\n '
(x, y) = np.meshgrid(*(((np.arange(n, dtype=float) - (n // 2)),) * 2))
d = (2 * (sigma ** 2))
g = ((np.exp(((- ((x ** 2) + (y ** 2))) / d)) / d) / np.pi)
return (g / np.sum(g))
|
Return a circular 2D Gaussian.
The center of the Gaussian is centered at ``n//2``.
Args:
n (:obj:`int`):
The length of the axis of the square (n x n) output
array.
sigma (:obj:`float`):
The circular (symmetric) standard deviation of the 2D
Gaussian
Returns:
`numpy.ndarray`_: Gridded representation of the 2D Gaussian.
|
nirvana/models/beam.py
|
gauss2d_kernel
|
briandigiorgio/BarFit
| 1
|
python
|
def gauss2d_kernel(n, sigma):
'\n Return a circular 2D Gaussian.\n\n The center of the Gaussian is centered at ``n//2``.\n\n Args:\n n (:obj:`int`):\n The length of the axis of the square (n x n) output\n array.\n sigma (:obj:`float`):\n The circular (symmetric) standard deviation of the 2D\n Gaussian\n\n Returns:\n `numpy.ndarray`_: Gridded representation of the 2D Gaussian.\n '
(x, y) = np.meshgrid(*(((np.arange(n, dtype=float) - (n // 2)),) * 2))
d = (2 * (sigma ** 2))
g = ((np.exp(((- ((x ** 2) + (y ** 2))) / d)) / d) / np.pi)
return (g / np.sum(g))
|
def gauss2d_kernel(n, sigma):
'\n Return a circular 2D Gaussian.\n\n The center of the Gaussian is centered at ``n//2``.\n\n Args:\n n (:obj:`int`):\n The length of the axis of the square (n x n) output\n array.\n sigma (:obj:`float`):\n The circular (symmetric) standard deviation of the 2D\n Gaussian\n\n Returns:\n `numpy.ndarray`_: Gridded representation of the 2D Gaussian.\n '
(x, y) = np.meshgrid(*(((np.arange(n, dtype=float) - (n // 2)),) * 2))
d = (2 * (sigma ** 2))
g = ((np.exp(((- ((x ** 2) + (y ** 2))) / d)) / d) / np.pi)
return (g / np.sum(g))<|docstring|>Return a circular 2D Gaussian.
The center of the Gaussian is centered at ``n//2``.
Args:
n (:obj:`int`):
The length of the axis of the square (n x n) output
array.
sigma (:obj:`float`):
The circular (symmetric) standard deviation of the 2D
Gaussian
Returns:
`numpy.ndarray`_: Gridded representation of the 2D Gaussian.<|endoftext|>
|
2195031e961e25378b97acb6379a91ab19c9511f18f2b8696848d41b794a119e
|
def convolve_fft(data, kernel, kernel_fft=False, return_fft=False):
"\n Convolve data with a kernel.\n\n This is inspired by astropy.convolution.convolve_fft, but\n stripped down to what's needed for the expected application. That\n has the benefit of cutting down on the execution time, but limits\n its use.\n\n Beware:\n - ``data`` and ``kernel`` must have the same shape.\n - For the sum of all pixels in the convolved image to be the\n same as the input data, the kernel must sum to unity.\n - Padding is never added by default.\n\n Args:\n data (`numpy.ndarray`_):\n Data to convolve.\n kernel (`numpy.ndarray`_):\n The convolution kernel, which must have the same shape as\n ``data``. If ``kernel_fft`` is True, this is the FFT of\n the kernel image; otherwise, this is the direct kernel\n image with the center of the kernel at the center of the\n array.\n kernel_fft (:obj:`bool`, optional):\n Flag that the provided ``kernel`` array is actually the\n FFT of the kernel, not its direct image.\n return_fft (:obj:`bool`, optional):\n Flag to return the FFT of the convolved image, instead of\n the direct image.\n\n Returns:\n `numpy.ndarray`_: The convolved image, or its FFT, with the\n same shape as the provided ``data`` array.\n\n Raises:\n ValueError:\n Raised if ``data`` and ``kernel`` do not have the same\n shape or if any of their values are not finite.\n "
if (data.shape != kernel.shape):
raise ValueError('Data and kernel must have the same shape.')
if ((not np.all(np.isfinite(data))) or (not np.all(np.isfinite(kernel)))):
print('**********************************')
print(f'nans in data: {(~ np.isfinite(data)).sum()}, nans in kernel: {(~ np.isfinite(kernel)).sum()}')
raise ValueError('Data and kernel must both have valid values.')
datafft = np.fft.fftn(data)
kernfft = (kernel if kernel_fft else np.fft.fftn(np.fft.ifftshift(kernel)))
fftmult = (datafft * kernfft)
return (fftmult if return_fft else np.fft.ifftn(fftmult).real)
|
Convolve data with a kernel.
This is inspired by astropy.convolution.convolve_fft, but
stripped down to what's needed for the expected application. That
has the benefit of cutting down on the execution time, but limits
its use.
Beware:
- ``data`` and ``kernel`` must have the same shape.
- For the sum of all pixels in the convolved image to be the
same as the input data, the kernel must sum to unity.
- Padding is never added by default.
Args:
data (`numpy.ndarray`_):
Data to convolve.
kernel (`numpy.ndarray`_):
The convolution kernel, which must have the same shape as
``data``. If ``kernel_fft`` is True, this is the FFT of
the kernel image; otherwise, this is the direct kernel
image with the center of the kernel at the center of the
array.
kernel_fft (:obj:`bool`, optional):
Flag that the provided ``kernel`` array is actually the
FFT of the kernel, not its direct image.
return_fft (:obj:`bool`, optional):
Flag to return the FFT of the convolved image, instead of
the direct image.
Returns:
`numpy.ndarray`_: The convolved image, or its FFT, with the
same shape as the provided ``data`` array.
Raises:
ValueError:
Raised if ``data`` and ``kernel`` do not have the same
shape or if any of their values are not finite.
|
nirvana/models/beam.py
|
convolve_fft
|
briandigiorgio/BarFit
| 1
|
python
|
def convolve_fft(data, kernel, kernel_fft=False, return_fft=False):
"\n Convolve data with a kernel.\n\n This is inspired by astropy.convolution.convolve_fft, but\n stripped down to what's needed for the expected application. That\n has the benefit of cutting down on the execution time, but limits\n its use.\n\n Beware:\n - ``data`` and ``kernel`` must have the same shape.\n - For the sum of all pixels in the convolved image to be the\n same as the input data, the kernel must sum to unity.\n - Padding is never added by default.\n\n Args:\n data (`numpy.ndarray`_):\n Data to convolve.\n kernel (`numpy.ndarray`_):\n The convolution kernel, which must have the same shape as\n ``data``. If ``kernel_fft`` is True, this is the FFT of\n the kernel image; otherwise, this is the direct kernel\n image with the center of the kernel at the center of the\n array.\n kernel_fft (:obj:`bool`, optional):\n Flag that the provided ``kernel`` array is actually the\n FFT of the kernel, not its direct image.\n return_fft (:obj:`bool`, optional):\n Flag to return the FFT of the convolved image, instead of\n the direct image.\n\n Returns:\n `numpy.ndarray`_: The convolved image, or its FFT, with the\n same shape as the provided ``data`` array.\n\n Raises:\n ValueError:\n Raised if ``data`` and ``kernel`` do not have the same\n shape or if any of their values are not finite.\n "
if (data.shape != kernel.shape):
raise ValueError('Data and kernel must have the same shape.')
if ((not np.all(np.isfinite(data))) or (not np.all(np.isfinite(kernel)))):
print('**********************************')
print(f'nans in data: {(~ np.isfinite(data)).sum()}, nans in kernel: {(~ np.isfinite(kernel)).sum()}')
raise ValueError('Data and kernel must both have valid values.')
datafft = np.fft.fftn(data)
kernfft = (kernel if kernel_fft else np.fft.fftn(np.fft.ifftshift(kernel)))
fftmult = (datafft * kernfft)
return (fftmult if return_fft else np.fft.ifftn(fftmult).real)
|
def convolve_fft(data, kernel, kernel_fft=False, return_fft=False):
"\n Convolve data with a kernel.\n\n This is inspired by astropy.convolution.convolve_fft, but\n stripped down to what's needed for the expected application. That\n has the benefit of cutting down on the execution time, but limits\n its use.\n\n Beware:\n - ``data`` and ``kernel`` must have the same shape.\n - For the sum of all pixels in the convolved image to be the\n same as the input data, the kernel must sum to unity.\n - Padding is never added by default.\n\n Args:\n data (`numpy.ndarray`_):\n Data to convolve.\n kernel (`numpy.ndarray`_):\n The convolution kernel, which must have the same shape as\n ``data``. If ``kernel_fft`` is True, this is the FFT of\n the kernel image; otherwise, this is the direct kernel\n image with the center of the kernel at the center of the\n array.\n kernel_fft (:obj:`bool`, optional):\n Flag that the provided ``kernel`` array is actually the\n FFT of the kernel, not its direct image.\n return_fft (:obj:`bool`, optional):\n Flag to return the FFT of the convolved image, instead of\n the direct image.\n\n Returns:\n `numpy.ndarray`_: The convolved image, or its FFT, with the\n same shape as the provided ``data`` array.\n\n Raises:\n ValueError:\n Raised if ``data`` and ``kernel`` do not have the same\n shape or if any of their values are not finite.\n "
if (data.shape != kernel.shape):
raise ValueError('Data and kernel must have the same shape.')
if ((not np.all(np.isfinite(data))) or (not np.all(np.isfinite(kernel)))):
print('**********************************')
print(f'nans in data: {(~ np.isfinite(data)).sum()}, nans in kernel: {(~ np.isfinite(kernel)).sum()}')
raise ValueError('Data and kernel must both have valid values.')
datafft = np.fft.fftn(data)
kernfft = (kernel if kernel_fft else np.fft.fftn(np.fft.ifftshift(kernel)))
fftmult = (datafft * kernfft)
return (fftmult if return_fft else np.fft.ifftn(fftmult).real)<|docstring|>Convolve data with a kernel.
This is inspired by astropy.convolution.convolve_fft, but
stripped down to what's needed for the expected application. That
has the benefit of cutting down on the execution time, but limits
its use.
Beware:
- ``data`` and ``kernel`` must have the same shape.
- For the sum of all pixels in the convolved image to be the
same as the input data, the kernel must sum to unity.
- Padding is never added by default.
Args:
data (`numpy.ndarray`_):
Data to convolve.
kernel (`numpy.ndarray`_):
The convolution kernel, which must have the same shape as
``data``. If ``kernel_fft`` is True, this is the FFT of
the kernel image; otherwise, this is the direct kernel
image with the center of the kernel at the center of the
array.
kernel_fft (:obj:`bool`, optional):
Flag that the provided ``kernel`` array is actually the
FFT of the kernel, not its direct image.
return_fft (:obj:`bool`, optional):
Flag to return the FFT of the convolved image, instead of
the direct image.
Returns:
`numpy.ndarray`_: The convolved image, or its FFT, with the
same shape as the provided ``data`` array.
Raises:
ValueError:
Raised if ``data`` and ``kernel`` do not have the same
shape or if any of their values are not finite.<|endoftext|>
|
36106bf46c16cfc624b2143156ed274cea9e336d1ea4727a7d8d178ea6055f94
|
def construct_beam(psf, aperture, return_fft=False):
'\n Construct the beam profile.\n\n This is a simple wrapper for :func:`convolve_fft`. Nominally,\n both arrays should sum to unity.\n\n Args:\n psf (`numpy.ndarray`_):\n An image of the point-spread function of the\n observations. Must have the same shape as ``aperture``.\n aperture (`numpy.ndarray`_):\n Monochromatic image of the spectrograph aperture. Must\n have the same shape as ``psf``.\n return_fft (:obj:`bool`, optional):\n Flag to return the FFT of the beam profile, instead of\n its the direct image.\n\n Returns:\n `numpy.ndarray`_: The 2D image of the beam profile, or its\n FFT, with the same shape as the provided ``psf`` and\n ``aperture`` arrays.\n '
return convolve_fft(psf, aperture, return_fft=return_fft)
|
Construct the beam profile.
This is a simple wrapper for :func:`convolve_fft`. Nominally,
both arrays should sum to unity.
Args:
psf (`numpy.ndarray`_):
An image of the point-spread function of the
observations. Must have the same shape as ``aperture``.
aperture (`numpy.ndarray`_):
Monochromatic image of the spectrograph aperture. Must
have the same shape as ``psf``.
return_fft (:obj:`bool`, optional):
Flag to return the FFT of the beam profile, instead of
its the direct image.
Returns:
`numpy.ndarray`_: The 2D image of the beam profile, or its
FFT, with the same shape as the provided ``psf`` and
``aperture`` arrays.
|
nirvana/models/beam.py
|
construct_beam
|
briandigiorgio/BarFit
| 1
|
python
|
def construct_beam(psf, aperture, return_fft=False):
'\n Construct the beam profile.\n\n This is a simple wrapper for :func:`convolve_fft`. Nominally,\n both arrays should sum to unity.\n\n Args:\n psf (`numpy.ndarray`_):\n An image of the point-spread function of the\n observations. Must have the same shape as ``aperture``.\n aperture (`numpy.ndarray`_):\n Monochromatic image of the spectrograph aperture. Must\n have the same shape as ``psf``.\n return_fft (:obj:`bool`, optional):\n Flag to return the FFT of the beam profile, instead of\n its the direct image.\n\n Returns:\n `numpy.ndarray`_: The 2D image of the beam profile, or its\n FFT, with the same shape as the provided ``psf`` and\n ``aperture`` arrays.\n '
return convolve_fft(psf, aperture, return_fft=return_fft)
|
def construct_beam(psf, aperture, return_fft=False):
'\n Construct the beam profile.\n\n This is a simple wrapper for :func:`convolve_fft`. Nominally,\n both arrays should sum to unity.\n\n Args:\n psf (`numpy.ndarray`_):\n An image of the point-spread function of the\n observations. Must have the same shape as ``aperture``.\n aperture (`numpy.ndarray`_):\n Monochromatic image of the spectrograph aperture. Must\n have the same shape as ``psf``.\n return_fft (:obj:`bool`, optional):\n Flag to return the FFT of the beam profile, instead of\n its the direct image.\n\n Returns:\n `numpy.ndarray`_: The 2D image of the beam profile, or its\n FFT, with the same shape as the provided ``psf`` and\n ``aperture`` arrays.\n '
return convolve_fft(psf, aperture, return_fft=return_fft)<|docstring|>Construct the beam profile.
This is a simple wrapper for :func:`convolve_fft`. Nominally,
both arrays should sum to unity.
Args:
psf (`numpy.ndarray`_):
An image of the point-spread function of the
observations. Must have the same shape as ``aperture``.
aperture (`numpy.ndarray`_):
Monochromatic image of the spectrograph aperture. Must
have the same shape as ``psf``.
return_fft (:obj:`bool`, optional):
Flag to return the FFT of the beam profile, instead of
its the direct image.
Returns:
`numpy.ndarray`_: The 2D image of the beam profile, or its
FFT, with the same shape as the provided ``psf`` and
``aperture`` arrays.<|endoftext|>
|
4f3cdfd0167138e4ea54cf0aa23b7082c9ea2e9a8eb52bcda075600401158258
|
def smear(v, beam, beam_fft=False, sb=None, sig=None, cnvfftw=None, verbose=False):
'\n Get the beam-smeared surface brightness, velocity, and velocity\n dispersion fields.\n \n Args:\n v (`numpy.ndarray`_):\n 2D array with the discretely sampled velocity field. Must\n be square.\n beam (`numpy.ndarray`_):\n An image of the beam profile or its precomputed FFT. Must\n be the same shape as ``v``. If the beam profile is\n provided, it is expected to be normalized to unity.\n beam_fft (:obj:`bool`, optional):\n Flag that the provided data for ``beam`` is actually the\n precomputed FFT of the beam profile.\n sb (`numpy.ndarray`_, optional):\n 2D array with the surface brightness of the object. This\n is used to weight the convolution of the kinematic fields\n according to the luminosity distribution of the object.\n Must have the same shape as ``v``. If None, the\n convolution is unweighted.\n sig (`numpy.ndarray`_, optional):\n 2D array with the velocity dispersion measurements. Must\n have the same shape as ``v``.\n cnvfftw (:class:`~nirvana.models.beam.ConvolveFFTW`, optional):\n An object that expedites the convolutions using\n FFTW/pyFFTW. If None, the convolution is done using numpy\n FFT routines.\n\n Returns:\n :obj:`tuple`: Tuple of three objects, which are nominally the\n beam-smeared surface brightness, velocity, and velocity\n dispersion fields. The first and last objects in the tuple\n can be None, if ``sb`` or ``sig`` are not provided,\n respectively. The 2nd returned object is always the\n beam-smeared velocity field.\n\n Raises:\n ValueError:\n Raised if the provided arrays are not 2D or if the shapes\n of the arrays are not all the same.\n '
if (v.ndim != 2):
raise ValueError('Can only accept 2D images.')
if (beam.shape != v.shape):
raise ValueError('Input beam and velocity field array sizes must match.')
if ((sb is not None) and (sb.shape != v.shape)):
raise ValueError('Input surface-brightness and velocity field array sizes must match.')
if ((sig is not None) and (sig.shape != v.shape)):
raise ValueError('Input velocity dispersion and velocity field array sizes must match.')
_cnv = (convolve_fft if (cnvfftw is None) else cnvfftw)
bfft = (beam if beam_fft else (np.fft.fftn(np.fft.ifftshift(beam)) if (cnvfftw is None) else cnvfftw.fft(beam, shift=True)))
if verbose:
print('Convolving surface brightness...')
mom0 = _cnv((np.ones(v.shape, dtype=float) if (sb is None) else sb), bfft, kernel_fft=True)
if verbose:
print('Convolving velocity field...', sb, v)
mom1 = _cnv((v if (sb is None) else (sb * v)), bfft, kernel_fft=True)
if (mom0 is not None):
mom1 /= (mom0 + (mom0 == 0.0))
if (sig is None):
return (mom0, mom1, None)
_sig = (np.square(v) + np.square(sig))
if verbose:
print('Convolving velocity dispersion...')
mom2 = _cnv((_sig if (sb is None) else (sb * _sig)), bfft, kernel_fft=True)
if (mom0 is not None):
mom2 /= (mom0 + (mom0 == 0.0))
mom2 -= (mom1 ** 2)
mom2[(mom2 < 0)] = 0.0
return (mom0, mom1, np.sqrt(mom2))
|
Get the beam-smeared surface brightness, velocity, and velocity
dispersion fields.
Args:
v (`numpy.ndarray`_):
2D array with the discretely sampled velocity field. Must
be square.
beam (`numpy.ndarray`_):
An image of the beam profile or its precomputed FFT. Must
be the same shape as ``v``. If the beam profile is
provided, it is expected to be normalized to unity.
beam_fft (:obj:`bool`, optional):
Flag that the provided data for ``beam`` is actually the
precomputed FFT of the beam profile.
sb (`numpy.ndarray`_, optional):
2D array with the surface brightness of the object. This
is used to weight the convolution of the kinematic fields
according to the luminosity distribution of the object.
Must have the same shape as ``v``. If None, the
convolution is unweighted.
sig (`numpy.ndarray`_, optional):
2D array with the velocity dispersion measurements. Must
have the same shape as ``v``.
cnvfftw (:class:`~nirvana.models.beam.ConvolveFFTW`, optional):
An object that expedites the convolutions using
FFTW/pyFFTW. If None, the convolution is done using numpy
FFT routines.
Returns:
:obj:`tuple`: Tuple of three objects, which are nominally the
beam-smeared surface brightness, velocity, and velocity
dispersion fields. The first and last objects in the tuple
can be None, if ``sb`` or ``sig`` are not provided,
respectively. The 2nd returned object is always the
beam-smeared velocity field.
Raises:
ValueError:
Raised if the provided arrays are not 2D or if the shapes
of the arrays are not all the same.
|
nirvana/models/beam.py
|
smear
|
briandigiorgio/BarFit
| 1
|
python
|
def smear(v, beam, beam_fft=False, sb=None, sig=None, cnvfftw=None, verbose=False):
'\n Get the beam-smeared surface brightness, velocity, and velocity\n dispersion fields.\n \n Args:\n v (`numpy.ndarray`_):\n 2D array with the discretely sampled velocity field. Must\n be square.\n beam (`numpy.ndarray`_):\n An image of the beam profile or its precomputed FFT. Must\n be the same shape as ``v``. If the beam profile is\n provided, it is expected to be normalized to unity.\n beam_fft (:obj:`bool`, optional):\n Flag that the provided data for ``beam`` is actually the\n precomputed FFT of the beam profile.\n sb (`numpy.ndarray`_, optional):\n 2D array with the surface brightness of the object. This\n is used to weight the convolution of the kinematic fields\n according to the luminosity distribution of the object.\n Must have the same shape as ``v``. If None, the\n convolution is unweighted.\n sig (`numpy.ndarray`_, optional):\n 2D array with the velocity dispersion measurements. Must\n have the same shape as ``v``.\n cnvfftw (:class:`~nirvana.models.beam.ConvolveFFTW`, optional):\n An object that expedites the convolutions using\n FFTW/pyFFTW. If None, the convolution is done using numpy\n FFT routines.\n\n Returns:\n :obj:`tuple`: Tuple of three objects, which are nominally the\n beam-smeared surface brightness, velocity, and velocity\n dispersion fields. The first and last objects in the tuple\n can be None, if ``sb`` or ``sig`` are not provided,\n respectively. The 2nd returned object is always the\n beam-smeared velocity field.\n\n Raises:\n ValueError:\n Raised if the provided arrays are not 2D or if the shapes\n of the arrays are not all the same.\n '
if (v.ndim != 2):
raise ValueError('Can only accept 2D images.')
if (beam.shape != v.shape):
raise ValueError('Input beam and velocity field array sizes must match.')
if ((sb is not None) and (sb.shape != v.shape)):
raise ValueError('Input surface-brightness and velocity field array sizes must match.')
if ((sig is not None) and (sig.shape != v.shape)):
raise ValueError('Input velocity dispersion and velocity field array sizes must match.')
_cnv = (convolve_fft if (cnvfftw is None) else cnvfftw)
bfft = (beam if beam_fft else (np.fft.fftn(np.fft.ifftshift(beam)) if (cnvfftw is None) else cnvfftw.fft(beam, shift=True)))
if verbose:
print('Convolving surface brightness...')
mom0 = _cnv((np.ones(v.shape, dtype=float) if (sb is None) else sb), bfft, kernel_fft=True)
if verbose:
print('Convolving velocity field...', sb, v)
mom1 = _cnv((v if (sb is None) else (sb * v)), bfft, kernel_fft=True)
if (mom0 is not None):
mom1 /= (mom0 + (mom0 == 0.0))
if (sig is None):
return (mom0, mom1, None)
_sig = (np.square(v) + np.square(sig))
if verbose:
print('Convolving velocity dispersion...')
mom2 = _cnv((_sig if (sb is None) else (sb * _sig)), bfft, kernel_fft=True)
if (mom0 is not None):
mom2 /= (mom0 + (mom0 == 0.0))
mom2 -= (mom1 ** 2)
mom2[(mom2 < 0)] = 0.0
return (mom0, mom1, np.sqrt(mom2))
|
def smear(v, beam, beam_fft=False, sb=None, sig=None, cnvfftw=None, verbose=False):
'\n Get the beam-smeared surface brightness, velocity, and velocity\n dispersion fields.\n \n Args:\n v (`numpy.ndarray`_):\n 2D array with the discretely sampled velocity field. Must\n be square.\n beam (`numpy.ndarray`_):\n An image of the beam profile or its precomputed FFT. Must\n be the same shape as ``v``. If the beam profile is\n provided, it is expected to be normalized to unity.\n beam_fft (:obj:`bool`, optional):\n Flag that the provided data for ``beam`` is actually the\n precomputed FFT of the beam profile.\n sb (`numpy.ndarray`_, optional):\n 2D array with the surface brightness of the object. This\n is used to weight the convolution of the kinematic fields\n according to the luminosity distribution of the object.\n Must have the same shape as ``v``. If None, the\n convolution is unweighted.\n sig (`numpy.ndarray`_, optional):\n 2D array with the velocity dispersion measurements. Must\n have the same shape as ``v``.\n cnvfftw (:class:`~nirvana.models.beam.ConvolveFFTW`, optional):\n An object that expedites the convolutions using\n FFTW/pyFFTW. If None, the convolution is done using numpy\n FFT routines.\n\n Returns:\n :obj:`tuple`: Tuple of three objects, which are nominally the\n beam-smeared surface brightness, velocity, and velocity\n dispersion fields. The first and last objects in the tuple\n can be None, if ``sb`` or ``sig`` are not provided,\n respectively. The 2nd returned object is always the\n beam-smeared velocity field.\n\n Raises:\n ValueError:\n Raised if the provided arrays are not 2D or if the shapes\n of the arrays are not all the same.\n '
if (v.ndim != 2):
raise ValueError('Can only accept 2D images.')
if (beam.shape != v.shape):
raise ValueError('Input beam and velocity field array sizes must match.')
if ((sb is not None) and (sb.shape != v.shape)):
raise ValueError('Input surface-brightness and velocity field array sizes must match.')
if ((sig is not None) and (sig.shape != v.shape)):
raise ValueError('Input velocity dispersion and velocity field array sizes must match.')
_cnv = (convolve_fft if (cnvfftw is None) else cnvfftw)
bfft = (beam if beam_fft else (np.fft.fftn(np.fft.ifftshift(beam)) if (cnvfftw is None) else cnvfftw.fft(beam, shift=True)))
if verbose:
print('Convolving surface brightness...')
mom0 = _cnv((np.ones(v.shape, dtype=float) if (sb is None) else sb), bfft, kernel_fft=True)
if verbose:
print('Convolving velocity field...', sb, v)
mom1 = _cnv((v if (sb is None) else (sb * v)), bfft, kernel_fft=True)
if (mom0 is not None):
mom1 /= (mom0 + (mom0 == 0.0))
if (sig is None):
return (mom0, mom1, None)
_sig = (np.square(v) + np.square(sig))
if verbose:
print('Convolving velocity dispersion...')
mom2 = _cnv((_sig if (sb is None) else (sb * _sig)), bfft, kernel_fft=True)
if (mom0 is not None):
mom2 /= (mom0 + (mom0 == 0.0))
mom2 -= (mom1 ** 2)
mom2[(mom2 < 0)] = 0.0
return (mom0, mom1, np.sqrt(mom2))<|docstring|>Get the beam-smeared surface brightness, velocity, and velocity
dispersion fields.
Args:
v (`numpy.ndarray`_):
2D array with the discretely sampled velocity field. Must
be square.
beam (`numpy.ndarray`_):
An image of the beam profile or its precomputed FFT. Must
be the same shape as ``v``. If the beam profile is
provided, it is expected to be normalized to unity.
beam_fft (:obj:`bool`, optional):
Flag that the provided data for ``beam`` is actually the
precomputed FFT of the beam profile.
sb (`numpy.ndarray`_, optional):
2D array with the surface brightness of the object. This
is used to weight the convolution of the kinematic fields
according to the luminosity distribution of the object.
Must have the same shape as ``v``. If None, the
convolution is unweighted.
sig (`numpy.ndarray`_, optional):
2D array with the velocity dispersion measurements. Must
have the same shape as ``v``.
cnvfftw (:class:`~nirvana.models.beam.ConvolveFFTW`, optional):
An object that expedites the convolutions using
FFTW/pyFFTW. If None, the convolution is done using numpy
FFT routines.
Returns:
:obj:`tuple`: Tuple of three objects, which are nominally the
beam-smeared surface brightness, velocity, and velocity
dispersion fields. The first and last objects in the tuple
can be None, if ``sb`` or ``sig`` are not provided,
respectively. The 2nd returned object is always the
beam-smeared velocity field.
Raises:
ValueError:
Raised if the provided arrays are not 2D or if the shapes
of the arrays are not all the same.<|endoftext|>
|
6961f94541708609d62731c45c25860e05b6d0cc84b0640dcc5ef6617a8d790b
|
def deriv_smear(v, dv, beam, beam_fft=False, sb=None, dsb=None, sig=None, dsig=None, cnvfftw=None):
'\n Get the beam-smeared surface brightness, velocity, and velocity\n dispersion fields and their derivatives.\n\n Args:\n v (`numpy.ndarray`_):\n 2D array with the discretely sampled velocity field. Must be square.\n dv (`numpy.ndarray`_):\n 2D arrays with velocity field derivatives with respect to a set of\n model parameters. The shape of the first two axes must match ``v``;\n the third axis is the number of parameters. The `numpy.ndarray`_\n *must* have three dimensions, even if the derivative is w.r.t. a\n single parameter.\n beam (`numpy.ndarray`_):\n An image of the beam profile or its precomputed FFT. Must be the\n same shape as ``v``. If the beam profile is provided, it is expected\n to be normalized to unity.\n beam_fft (:obj:`bool`, optional):\n Flag that the provided data for ``beam`` is actually the precomputed\n FFT of the beam profile.\n sb (`numpy.ndarray`_, optional):\n 2D array with the surface brightness of the object. This is used to\n weight the convolution of the kinematic fields according to the\n luminosity distribution of the object. Must have the same shape as\n ``v``. If None, the convolution is unweighted.\n dsb (`numpy.ndarray`_, optional):\n 2D arrays with the derivative of the surface brightness of the\n object with respect to a set of parameters. Must have the same\n shape as ``dv``. If None, the surface brightness derivatives are\n assumed to be 0.\n sig (`numpy.ndarray`_, optional):\n 2D array with the velocity dispersion measurements. Must have the\n same shape as ``v``.\n dsig (`numpy.ndarray`_, optional):\n 2D arrays with the derivative of the velocity dispersion\n measurements with respect to a set of model parameters. Must have\n the same shape as ``dv``.\n cnvfftw (:class:`~nirvana.models.beam.ConvolveFFTW`, optional):\n An object that expedites the convolutions using FFTW/pyFFTW. If\n None, the convolution is done using numpy FFT routines.\n\n Returns:\n :obj:`tuple`: Tuple of six `numpy.ndarray`_ objects, which are nominally\n the beam-smeared surface brightness, velocity, and velocity dispersion\n fields, and their derivatives, respectively.\n\n Raises:\n ValueError:\n Raised if the provided arrays are not 2D or if the shapes\n of the arrays are not all the same.\n '
if (v.ndim != 2):
raise ValueError('Can only accept 2D images.')
if (dv.ndim != 3):
raise ValueError('Velocity-field derivative array must be 3D.')
if (v.shape != dv.shape[:2]):
raise ValueError('Shape of first two axes of dv must match shape of v.')
if (beam.shape != v.shape):
raise ValueError('Input beam and velocity field array sizes must match.')
if ((sb is not None) and (sb.shape != v.shape)):
raise ValueError('Input surface-brightness and velocity field array sizes must match.')
if ((sb is None) and (dsb is not None)):
raise ValueError('Must provide surface-brightness if providing its derivative.')
if ((dsb is not None) and (dsb.shape != dv.shape)):
raise ValueError('Surface-brightness derivative array shape must match dv.')
if ((sig is not None) and (sig.shape != v.shape)):
raise ValueError('Input velocity dispersion and velocity field array sizes must match.')
if ((sig is None) and (dsig is not None)):
raise ValueError('Must provide velocity dispersion if providing its derivative.')
if ((dsig is not None) and (dsig.shape != dv.shape)):
raise ValueError('Velocity dispersion derivative array shape must match dv.')
_cnv = (convolve_fft if (cnvfftw is None) else cnvfftw)
bfft = (beam if beam_fft else (np.fft.fftn(np.fft.ifftshift(beam)) if (cnvfftw is None) else cnvfftw.fft(beam, shift=True)))
npar = dv.shape[(- 1)]
_sb = (np.ones(v.shape, dtype=float) if (sb is None) else sb)
mom0 = _cnv(_sb, bfft, kernel_fft=True)
dmom0 = None
if (dsb is not None):
dmom0 = dsb.copy()
for i in range(npar):
dmom0[(..., i)] = _cnv(dsb[(..., i)], bfft, kernel_fft=True)
inv_mom0 = (1.0 / (mom0 + (mom0 == 0.0)))
mom1 = (_cnv((_sb * v), bfft, kernel_fft=True) * inv_mom0)
dmom1 = dv.copy()
for i in range(npar):
dmom1[(..., i)] = (_cnv((_sb * dv[(..., i)]), bfft, kernel_fft=True) * inv_mom0)
if (dsb is not None):
dmom1[(..., i)] += (_cnv((v * dsb[(..., i)]), bfft, kernel_fft=True) * inv_mom0)
dmom1[(..., i)] -= ((mom1 * inv_mom0) * dmom0[(..., i)])
if (sig is None):
return (mom0, mom1, None, dmom0, dmom1, None)
_sig = (np.square(v) + np.square(sig))
mom2 = ((_cnv((_sb * _sig), bfft, kernel_fft=True) * inv_mom0) - (mom1 ** 2))
mom2[(mom2 < 0)] = 0.0
_mom2 = np.sqrt(mom2)
_inv_mom2 = (1.0 / (_mom2 + (_mom2 == 0.0)))
dmom2 = dv.copy()
for i in range(npar):
dmom2[(..., i)] = ((_cnv((((2 * _sb) * v) * dv[(..., i)]), bfft, kernel_fft=True) * inv_mom0) - ((2 * mom1) * dmom1[(..., i)]))
if (dsb is not None):
dmom2[(..., i)] += (_cnv((_sig * dsb[(..., i)]), bfft, kernel_fft=True) * inv_mom0)
dmom2[(..., i)] -= ((mom2 * inv_mom0) * dmom0[(..., i)])
if (dsig is not None):
dmom2[(..., i)] += (_cnv((((2 * _sb) * sig) * dsig[(..., i)]), bfft, kernel_fft=True) * inv_mom0)
dmom2[(..., i)] *= (_inv_mom2 / 2)
return (mom0, mom1, _mom2, dmom0, dmom1, dmom2)
|
Get the beam-smeared surface brightness, velocity, and velocity
dispersion fields and their derivatives.
Args:
v (`numpy.ndarray`_):
2D array with the discretely sampled velocity field. Must be square.
dv (`numpy.ndarray`_):
2D arrays with velocity field derivatives with respect to a set of
model parameters. The shape of the first two axes must match ``v``;
the third axis is the number of parameters. The `numpy.ndarray`_
*must* have three dimensions, even if the derivative is w.r.t. a
single parameter.
beam (`numpy.ndarray`_):
An image of the beam profile or its precomputed FFT. Must be the
same shape as ``v``. If the beam profile is provided, it is expected
to be normalized to unity.
beam_fft (:obj:`bool`, optional):
Flag that the provided data for ``beam`` is actually the precomputed
FFT of the beam profile.
sb (`numpy.ndarray`_, optional):
2D array with the surface brightness of the object. This is used to
weight the convolution of the kinematic fields according to the
luminosity distribution of the object. Must have the same shape as
``v``. If None, the convolution is unweighted.
dsb (`numpy.ndarray`_, optional):
2D arrays with the derivative of the surface brightness of the
object with respect to a set of parameters. Must have the same
shape as ``dv``. If None, the surface brightness derivatives are
assumed to be 0.
sig (`numpy.ndarray`_, optional):
2D array with the velocity dispersion measurements. Must have the
same shape as ``v``.
dsig (`numpy.ndarray`_, optional):
2D arrays with the derivative of the velocity dispersion
measurements with respect to a set of model parameters. Must have
the same shape as ``dv``.
cnvfftw (:class:`~nirvana.models.beam.ConvolveFFTW`, optional):
An object that expedites the convolutions using FFTW/pyFFTW. If
None, the convolution is done using numpy FFT routines.
Returns:
:obj:`tuple`: Tuple of six `numpy.ndarray`_ objects, which are nominally
the beam-smeared surface brightness, velocity, and velocity dispersion
fields, and their derivatives, respectively.
Raises:
ValueError:
Raised if the provided arrays are not 2D or if the shapes
of the arrays are not all the same.
|
nirvana/models/beam.py
|
deriv_smear
|
briandigiorgio/BarFit
| 1
|
python
|
def deriv_smear(v, dv, beam, beam_fft=False, sb=None, dsb=None, sig=None, dsig=None, cnvfftw=None):
'\n Get the beam-smeared surface brightness, velocity, and velocity\n dispersion fields and their derivatives.\n\n Args:\n v (`numpy.ndarray`_):\n 2D array with the discretely sampled velocity field. Must be square.\n dv (`numpy.ndarray`_):\n 2D arrays with velocity field derivatives with respect to a set of\n model parameters. The shape of the first two axes must match ``v``;\n the third axis is the number of parameters. The `numpy.ndarray`_\n *must* have three dimensions, even if the derivative is w.r.t. a\n single parameter.\n beam (`numpy.ndarray`_):\n An image of the beam profile or its precomputed FFT. Must be the\n same shape as ``v``. If the beam profile is provided, it is expected\n to be normalized to unity.\n beam_fft (:obj:`bool`, optional):\n Flag that the provided data for ``beam`` is actually the precomputed\n FFT of the beam profile.\n sb (`numpy.ndarray`_, optional):\n 2D array with the surface brightness of the object. This is used to\n weight the convolution of the kinematic fields according to the\n luminosity distribution of the object. Must have the same shape as\n ``v``. If None, the convolution is unweighted.\n dsb (`numpy.ndarray`_, optional):\n 2D arrays with the derivative of the surface brightness of the\n object with respect to a set of parameters. Must have the same\n shape as ``dv``. If None, the surface brightness derivatives are\n assumed to be 0.\n sig (`numpy.ndarray`_, optional):\n 2D array with the velocity dispersion measurements. Must have the\n same shape as ``v``.\n dsig (`numpy.ndarray`_, optional):\n 2D arrays with the derivative of the velocity dispersion\n measurements with respect to a set of model parameters. Must have\n the same shape as ``dv``.\n cnvfftw (:class:`~nirvana.models.beam.ConvolveFFTW`, optional):\n An object that expedites the convolutions using FFTW/pyFFTW. If\n None, the convolution is done using numpy FFT routines.\n\n Returns:\n :obj:`tuple`: Tuple of six `numpy.ndarray`_ objects, which are nominally\n the beam-smeared surface brightness, velocity, and velocity dispersion\n fields, and their derivatives, respectively.\n\n Raises:\n ValueError:\n Raised if the provided arrays are not 2D or if the shapes\n of the arrays are not all the same.\n '
if (v.ndim != 2):
raise ValueError('Can only accept 2D images.')
if (dv.ndim != 3):
raise ValueError('Velocity-field derivative array must be 3D.')
if (v.shape != dv.shape[:2]):
raise ValueError('Shape of first two axes of dv must match shape of v.')
if (beam.shape != v.shape):
raise ValueError('Input beam and velocity field array sizes must match.')
if ((sb is not None) and (sb.shape != v.shape)):
raise ValueError('Input surface-brightness and velocity field array sizes must match.')
if ((sb is None) and (dsb is not None)):
raise ValueError('Must provide surface-brightness if providing its derivative.')
if ((dsb is not None) and (dsb.shape != dv.shape)):
raise ValueError('Surface-brightness derivative array shape must match dv.')
if ((sig is not None) and (sig.shape != v.shape)):
raise ValueError('Input velocity dispersion and velocity field array sizes must match.')
if ((sig is None) and (dsig is not None)):
raise ValueError('Must provide velocity dispersion if providing its derivative.')
if ((dsig is not None) and (dsig.shape != dv.shape)):
raise ValueError('Velocity dispersion derivative array shape must match dv.')
_cnv = (convolve_fft if (cnvfftw is None) else cnvfftw)
bfft = (beam if beam_fft else (np.fft.fftn(np.fft.ifftshift(beam)) if (cnvfftw is None) else cnvfftw.fft(beam, shift=True)))
npar = dv.shape[(- 1)]
_sb = (np.ones(v.shape, dtype=float) if (sb is None) else sb)
mom0 = _cnv(_sb, bfft, kernel_fft=True)
dmom0 = None
if (dsb is not None):
dmom0 = dsb.copy()
for i in range(npar):
dmom0[(..., i)] = _cnv(dsb[(..., i)], bfft, kernel_fft=True)
inv_mom0 = (1.0 / (mom0 + (mom0 == 0.0)))
mom1 = (_cnv((_sb * v), bfft, kernel_fft=True) * inv_mom0)
dmom1 = dv.copy()
for i in range(npar):
dmom1[(..., i)] = (_cnv((_sb * dv[(..., i)]), bfft, kernel_fft=True) * inv_mom0)
if (dsb is not None):
dmom1[(..., i)] += (_cnv((v * dsb[(..., i)]), bfft, kernel_fft=True) * inv_mom0)
dmom1[(..., i)] -= ((mom1 * inv_mom0) * dmom0[(..., i)])
if (sig is None):
return (mom0, mom1, None, dmom0, dmom1, None)
_sig = (np.square(v) + np.square(sig))
mom2 = ((_cnv((_sb * _sig), bfft, kernel_fft=True) * inv_mom0) - (mom1 ** 2))
mom2[(mom2 < 0)] = 0.0
_mom2 = np.sqrt(mom2)
_inv_mom2 = (1.0 / (_mom2 + (_mom2 == 0.0)))
dmom2 = dv.copy()
for i in range(npar):
dmom2[(..., i)] = ((_cnv((((2 * _sb) * v) * dv[(..., i)]), bfft, kernel_fft=True) * inv_mom0) - ((2 * mom1) * dmom1[(..., i)]))
if (dsb is not None):
dmom2[(..., i)] += (_cnv((_sig * dsb[(..., i)]), bfft, kernel_fft=True) * inv_mom0)
dmom2[(..., i)] -= ((mom2 * inv_mom0) * dmom0[(..., i)])
if (dsig is not None):
dmom2[(..., i)] += (_cnv((((2 * _sb) * sig) * dsig[(..., i)]), bfft, kernel_fft=True) * inv_mom0)
dmom2[(..., i)] *= (_inv_mom2 / 2)
return (mom0, mom1, _mom2, dmom0, dmom1, dmom2)
|
def deriv_smear(v, dv, beam, beam_fft=False, sb=None, dsb=None, sig=None, dsig=None, cnvfftw=None):
'\n Get the beam-smeared surface brightness, velocity, and velocity\n dispersion fields and their derivatives.\n\n Args:\n v (`numpy.ndarray`_):\n 2D array with the discretely sampled velocity field. Must be square.\n dv (`numpy.ndarray`_):\n 2D arrays with velocity field derivatives with respect to a set of\n model parameters. The shape of the first two axes must match ``v``;\n the third axis is the number of parameters. The `numpy.ndarray`_\n *must* have three dimensions, even if the derivative is w.r.t. a\n single parameter.\n beam (`numpy.ndarray`_):\n An image of the beam profile or its precomputed FFT. Must be the\n same shape as ``v``. If the beam profile is provided, it is expected\n to be normalized to unity.\n beam_fft (:obj:`bool`, optional):\n Flag that the provided data for ``beam`` is actually the precomputed\n FFT of the beam profile.\n sb (`numpy.ndarray`_, optional):\n 2D array with the surface brightness of the object. This is used to\n weight the convolution of the kinematic fields according to the\n luminosity distribution of the object. Must have the same shape as\n ``v``. If None, the convolution is unweighted.\n dsb (`numpy.ndarray`_, optional):\n 2D arrays with the derivative of the surface brightness of the\n object with respect to a set of parameters. Must have the same\n shape as ``dv``. If None, the surface brightness derivatives are\n assumed to be 0.\n sig (`numpy.ndarray`_, optional):\n 2D array with the velocity dispersion measurements. Must have the\n same shape as ``v``.\n dsig (`numpy.ndarray`_, optional):\n 2D arrays with the derivative of the velocity dispersion\n measurements with respect to a set of model parameters. Must have\n the same shape as ``dv``.\n cnvfftw (:class:`~nirvana.models.beam.ConvolveFFTW`, optional):\n An object that expedites the convolutions using FFTW/pyFFTW. If\n None, the convolution is done using numpy FFT routines.\n\n Returns:\n :obj:`tuple`: Tuple of six `numpy.ndarray`_ objects, which are nominally\n the beam-smeared surface brightness, velocity, and velocity dispersion\n fields, and their derivatives, respectively.\n\n Raises:\n ValueError:\n Raised if the provided arrays are not 2D or if the shapes\n of the arrays are not all the same.\n '
if (v.ndim != 2):
raise ValueError('Can only accept 2D images.')
if (dv.ndim != 3):
raise ValueError('Velocity-field derivative array must be 3D.')
if (v.shape != dv.shape[:2]):
raise ValueError('Shape of first two axes of dv must match shape of v.')
if (beam.shape != v.shape):
raise ValueError('Input beam and velocity field array sizes must match.')
if ((sb is not None) and (sb.shape != v.shape)):
raise ValueError('Input surface-brightness and velocity field array sizes must match.')
if ((sb is None) and (dsb is not None)):
raise ValueError('Must provide surface-brightness if providing its derivative.')
if ((dsb is not None) and (dsb.shape != dv.shape)):
raise ValueError('Surface-brightness derivative array shape must match dv.')
if ((sig is not None) and (sig.shape != v.shape)):
raise ValueError('Input velocity dispersion and velocity field array sizes must match.')
if ((sig is None) and (dsig is not None)):
raise ValueError('Must provide velocity dispersion if providing its derivative.')
if ((dsig is not None) and (dsig.shape != dv.shape)):
raise ValueError('Velocity dispersion derivative array shape must match dv.')
_cnv = (convolve_fft if (cnvfftw is None) else cnvfftw)
bfft = (beam if beam_fft else (np.fft.fftn(np.fft.ifftshift(beam)) if (cnvfftw is None) else cnvfftw.fft(beam, shift=True)))
npar = dv.shape[(- 1)]
_sb = (np.ones(v.shape, dtype=float) if (sb is None) else sb)
mom0 = _cnv(_sb, bfft, kernel_fft=True)
dmom0 = None
if (dsb is not None):
dmom0 = dsb.copy()
for i in range(npar):
dmom0[(..., i)] = _cnv(dsb[(..., i)], bfft, kernel_fft=True)
inv_mom0 = (1.0 / (mom0 + (mom0 == 0.0)))
mom1 = (_cnv((_sb * v), bfft, kernel_fft=True) * inv_mom0)
dmom1 = dv.copy()
for i in range(npar):
dmom1[(..., i)] = (_cnv((_sb * dv[(..., i)]), bfft, kernel_fft=True) * inv_mom0)
if (dsb is not None):
dmom1[(..., i)] += (_cnv((v * dsb[(..., i)]), bfft, kernel_fft=True) * inv_mom0)
dmom1[(..., i)] -= ((mom1 * inv_mom0) * dmom0[(..., i)])
if (sig is None):
return (mom0, mom1, None, dmom0, dmom1, None)
_sig = (np.square(v) + np.square(sig))
mom2 = ((_cnv((_sb * _sig), bfft, kernel_fft=True) * inv_mom0) - (mom1 ** 2))
mom2[(mom2 < 0)] = 0.0
_mom2 = np.sqrt(mom2)
_inv_mom2 = (1.0 / (_mom2 + (_mom2 == 0.0)))
dmom2 = dv.copy()
for i in range(npar):
dmom2[(..., i)] = ((_cnv((((2 * _sb) * v) * dv[(..., i)]), bfft, kernel_fft=True) * inv_mom0) - ((2 * mom1) * dmom1[(..., i)]))
if (dsb is not None):
dmom2[(..., i)] += (_cnv((_sig * dsb[(..., i)]), bfft, kernel_fft=True) * inv_mom0)
dmom2[(..., i)] -= ((mom2 * inv_mom0) * dmom0[(..., i)])
if (dsig is not None):
dmom2[(..., i)] += (_cnv((((2 * _sb) * sig) * dsig[(..., i)]), bfft, kernel_fft=True) * inv_mom0)
dmom2[(..., i)] *= (_inv_mom2 / 2)
return (mom0, mom1, _mom2, dmom0, dmom1, dmom2)<|docstring|>Get the beam-smeared surface brightness, velocity, and velocity
dispersion fields and their derivatives.
Args:
v (`numpy.ndarray`_):
2D array with the discretely sampled velocity field. Must be square.
dv (`numpy.ndarray`_):
2D arrays with velocity field derivatives with respect to a set of
model parameters. The shape of the first two axes must match ``v``;
the third axis is the number of parameters. The `numpy.ndarray`_
*must* have three dimensions, even if the derivative is w.r.t. a
single parameter.
beam (`numpy.ndarray`_):
An image of the beam profile or its precomputed FFT. Must be the
same shape as ``v``. If the beam profile is provided, it is expected
to be normalized to unity.
beam_fft (:obj:`bool`, optional):
Flag that the provided data for ``beam`` is actually the precomputed
FFT of the beam profile.
sb (`numpy.ndarray`_, optional):
2D array with the surface brightness of the object. This is used to
weight the convolution of the kinematic fields according to the
luminosity distribution of the object. Must have the same shape as
``v``. If None, the convolution is unweighted.
dsb (`numpy.ndarray`_, optional):
2D arrays with the derivative of the surface brightness of the
object with respect to a set of parameters. Must have the same
shape as ``dv``. If None, the surface brightness derivatives are
assumed to be 0.
sig (`numpy.ndarray`_, optional):
2D array with the velocity dispersion measurements. Must have the
same shape as ``v``.
dsig (`numpy.ndarray`_, optional):
2D arrays with the derivative of the velocity dispersion
measurements with respect to a set of model parameters. Must have
the same shape as ``dv``.
cnvfftw (:class:`~nirvana.models.beam.ConvolveFFTW`, optional):
An object that expedites the convolutions using FFTW/pyFFTW. If
None, the convolution is done using numpy FFT routines.
Returns:
:obj:`tuple`: Tuple of six `numpy.ndarray`_ objects, which are nominally
the beam-smeared surface brightness, velocity, and velocity dispersion
fields, and their derivatives, respectively.
Raises:
ValueError:
Raised if the provided arrays are not 2D or if the shapes
of the arrays are not all the same.<|endoftext|>
|
a99080cbcd0fc181cff1cf7b68bc9f7a2b4626650ec1f0521f04cd0ce73d553f
|
def __call__(self, data, kernel, kernel_fft=False, return_fft=False):
'\n Convolve data with a kernel using FFTW.\n\n This method is identical to :func:`convolve_fft`, but uses\n the pre-established memory working space setup during the\n instantiation of the object.\n\n Beware:\n - ``data`` and ``kernel`` must have the same shape.\n - For the sum of all pixels in the convolved image to be the\n same as the input data, the kernel must sum to unity.\n - Padding is never added by default.\n\n Args:\n data (`numpy.ndarray`_):\n Data to convolve. Data type must be `numpy.float64` and shape\n must match :attr:`shape`.\n kernel (`numpy.ndarray`_):\n The convolution kernel, which must have the same shape as\n ``data``. If ``kernel_fft`` is True, this is the FFT of the\n kernel image and must have type `numpy.complex128`_; otherwise,\n this is the direct kernel image with the center of the kernel at\n the center of the array and must have type `numpy.float64`_.\n kernel_fft (:obj:`bool`, optional):\n Flag that the provided ``kernel`` array is actually the\n FFT of the kernel, not its direct image.\n return_fft (:obj:`bool`, optional):\n Flag to return the FFT of the convolved image, instead of\n the direct image.\n\n Returns:\n `numpy.ndarray`_: The convolved image, or its FFT, with the\n same shape as the provided ``data`` array.\n\n Raises:\n ValueError:\n Raised if ``data`` and ``kernel`` do not have the expected shape\n or if any of their values are not finite.\n TypeError:\n Raised if the data types of either ``data`` or ``kernel`` do not\n match the expected values (numpy.float64 for direct data,\n numpy.complex128 for Fourier Transform data).\n '
if ((not np.all(np.isfinite(data))) or (not np.all(np.isfinite(kernel)))):
raise ValueError('Data and kernel must both have valid values.')
self.fft(data)
if (kernel.shape != self.shape):
raise ValueError('Kernel has incorrect shape for this instance of ConvolveFFTW.')
if kernel_fft:
if (kernel.dtype.type is not np.complex128):
raise TypeError('Kernel FFT must be of type numpy.complex128.')
self.kern_fft[...] = kernel
else:
if (kernel.dtype.type is not np.float64):
raise TypeError('Kernel must be of type numpy.float64.')
self.kern.real[...] = np.fft.ifftshift(kernel)
self.kfft()
if return_fft:
return (self.data_fft * self.kern_fft)
self.data_fft *= self.kern_fft
self.ifft()
return self.dcnv.real.copy()
|
Convolve data with a kernel using FFTW.
This method is identical to :func:`convolve_fft`, but uses
the pre-established memory working space setup during the
instantiation of the object.
Beware:
- ``data`` and ``kernel`` must have the same shape.
- For the sum of all pixels in the convolved image to be the
same as the input data, the kernel must sum to unity.
- Padding is never added by default.
Args:
data (`numpy.ndarray`_):
Data to convolve. Data type must be `numpy.float64` and shape
must match :attr:`shape`.
kernel (`numpy.ndarray`_):
The convolution kernel, which must have the same shape as
``data``. If ``kernel_fft`` is True, this is the FFT of the
kernel image and must have type `numpy.complex128`_; otherwise,
this is the direct kernel image with the center of the kernel at
the center of the array and must have type `numpy.float64`_.
kernel_fft (:obj:`bool`, optional):
Flag that the provided ``kernel`` array is actually the
FFT of the kernel, not its direct image.
return_fft (:obj:`bool`, optional):
Flag to return the FFT of the convolved image, instead of
the direct image.
Returns:
`numpy.ndarray`_: The convolved image, or its FFT, with the
same shape as the provided ``data`` array.
Raises:
ValueError:
Raised if ``data`` and ``kernel`` do not have the expected shape
or if any of their values are not finite.
TypeError:
Raised if the data types of either ``data`` or ``kernel`` do not
match the expected values (numpy.float64 for direct data,
numpy.complex128 for Fourier Transform data).
|
nirvana/models/beam.py
|
__call__
|
briandigiorgio/BarFit
| 1
|
python
|
def __call__(self, data, kernel, kernel_fft=False, return_fft=False):
'\n Convolve data with a kernel using FFTW.\n\n This method is identical to :func:`convolve_fft`, but uses\n the pre-established memory working space setup during the\n instantiation of the object.\n\n Beware:\n - ``data`` and ``kernel`` must have the same shape.\n - For the sum of all pixels in the convolved image to be the\n same as the input data, the kernel must sum to unity.\n - Padding is never added by default.\n\n Args:\n data (`numpy.ndarray`_):\n Data to convolve. Data type must be `numpy.float64` and shape\n must match :attr:`shape`.\n kernel (`numpy.ndarray`_):\n The convolution kernel, which must have the same shape as\n ``data``. If ``kernel_fft`` is True, this is the FFT of the\n kernel image and must have type `numpy.complex128`_; otherwise,\n this is the direct kernel image with the center of the kernel at\n the center of the array and must have type `numpy.float64`_.\n kernel_fft (:obj:`bool`, optional):\n Flag that the provided ``kernel`` array is actually the\n FFT of the kernel, not its direct image.\n return_fft (:obj:`bool`, optional):\n Flag to return the FFT of the convolved image, instead of\n the direct image.\n\n Returns:\n `numpy.ndarray`_: The convolved image, or its FFT, with the\n same shape as the provided ``data`` array.\n\n Raises:\n ValueError:\n Raised if ``data`` and ``kernel`` do not have the expected shape\n or if any of their values are not finite.\n TypeError:\n Raised if the data types of either ``data`` or ``kernel`` do not\n match the expected values (numpy.float64 for direct data,\n numpy.complex128 for Fourier Transform data).\n '
if ((not np.all(np.isfinite(data))) or (not np.all(np.isfinite(kernel)))):
raise ValueError('Data and kernel must both have valid values.')
self.fft(data)
if (kernel.shape != self.shape):
raise ValueError('Kernel has incorrect shape for this instance of ConvolveFFTW.')
if kernel_fft:
if (kernel.dtype.type is not np.complex128):
raise TypeError('Kernel FFT must be of type numpy.complex128.')
self.kern_fft[...] = kernel
else:
if (kernel.dtype.type is not np.float64):
raise TypeError('Kernel must be of type numpy.float64.')
self.kern.real[...] = np.fft.ifftshift(kernel)
self.kfft()
if return_fft:
return (self.data_fft * self.kern_fft)
self.data_fft *= self.kern_fft
self.ifft()
return self.dcnv.real.copy()
|
def __call__(self, data, kernel, kernel_fft=False, return_fft=False):
'\n Convolve data with a kernel using FFTW.\n\n This method is identical to :func:`convolve_fft`, but uses\n the pre-established memory working space setup during the\n instantiation of the object.\n\n Beware:\n - ``data`` and ``kernel`` must have the same shape.\n - For the sum of all pixels in the convolved image to be the\n same as the input data, the kernel must sum to unity.\n - Padding is never added by default.\n\n Args:\n data (`numpy.ndarray`_):\n Data to convolve. Data type must be `numpy.float64` and shape\n must match :attr:`shape`.\n kernel (`numpy.ndarray`_):\n The convolution kernel, which must have the same shape as\n ``data``. If ``kernel_fft`` is True, this is the FFT of the\n kernel image and must have type `numpy.complex128`_; otherwise,\n this is the direct kernel image with the center of the kernel at\n the center of the array and must have type `numpy.float64`_.\n kernel_fft (:obj:`bool`, optional):\n Flag that the provided ``kernel`` array is actually the\n FFT of the kernel, not its direct image.\n return_fft (:obj:`bool`, optional):\n Flag to return the FFT of the convolved image, instead of\n the direct image.\n\n Returns:\n `numpy.ndarray`_: The convolved image, or its FFT, with the\n same shape as the provided ``data`` array.\n\n Raises:\n ValueError:\n Raised if ``data`` and ``kernel`` do not have the expected shape\n or if any of their values are not finite.\n TypeError:\n Raised if the data types of either ``data`` or ``kernel`` do not\n match the expected values (numpy.float64 for direct data,\n numpy.complex128 for Fourier Transform data).\n '
if ((not np.all(np.isfinite(data))) or (not np.all(np.isfinite(kernel)))):
raise ValueError('Data and kernel must both have valid values.')
self.fft(data)
if (kernel.shape != self.shape):
raise ValueError('Kernel has incorrect shape for this instance of ConvolveFFTW.')
if kernel_fft:
if (kernel.dtype.type is not np.complex128):
raise TypeError('Kernel FFT must be of type numpy.complex128.')
self.kern_fft[...] = kernel
else:
if (kernel.dtype.type is not np.float64):
raise TypeError('Kernel must be of type numpy.float64.')
self.kern.real[...] = np.fft.ifftshift(kernel)
self.kfft()
if return_fft:
return (self.data_fft * self.kern_fft)
self.data_fft *= self.kern_fft
self.ifft()
return self.dcnv.real.copy()<|docstring|>Convolve data with a kernel using FFTW.
This method is identical to :func:`convolve_fft`, but uses
the pre-established memory working space setup during the
instantiation of the object.
Beware:
- ``data`` and ``kernel`` must have the same shape.
- For the sum of all pixels in the convolved image to be the
same as the input data, the kernel must sum to unity.
- Padding is never added by default.
Args:
data (`numpy.ndarray`_):
Data to convolve. Data type must be `numpy.float64` and shape
must match :attr:`shape`.
kernel (`numpy.ndarray`_):
The convolution kernel, which must have the same shape as
``data``. If ``kernel_fft`` is True, this is the FFT of the
kernel image and must have type `numpy.complex128`_; otherwise,
this is the direct kernel image with the center of the kernel at
the center of the array and must have type `numpy.float64`_.
kernel_fft (:obj:`bool`, optional):
Flag that the provided ``kernel`` array is actually the
FFT of the kernel, not its direct image.
return_fft (:obj:`bool`, optional):
Flag to return the FFT of the convolved image, instead of
the direct image.
Returns:
`numpy.ndarray`_: The convolved image, or its FFT, with the
same shape as the provided ``data`` array.
Raises:
ValueError:
Raised if ``data`` and ``kernel`` do not have the expected shape
or if any of their values are not finite.
TypeError:
Raised if the data types of either ``data`` or ``kernel`` do not
match the expected values (numpy.float64 for direct data,
numpy.complex128 for Fourier Transform data).<|endoftext|>
|
a174bf646ea4922a7f219a7a52dd9f4e7757dbb6a4a9def2dc11ad11a5b60a0b
|
def __reduce__(self):
'\n Internal method for pickling.\n\n Returns:\n :obj:`tuple`: Tuple of the class type and the arguments needed for\n instantiating the class.\n '
return (self.__class__, (self.shape,))
|
Internal method for pickling.
Returns:
:obj:`tuple`: Tuple of the class type and the arguments needed for
instantiating the class.
|
nirvana/models/beam.py
|
__reduce__
|
briandigiorgio/BarFit
| 1
|
python
|
def __reduce__(self):
'\n Internal method for pickling.\n\n Returns:\n :obj:`tuple`: Tuple of the class type and the arguments needed for\n instantiating the class.\n '
return (self.__class__, (self.shape,))
|
def __reduce__(self):
'\n Internal method for pickling.\n\n Returns:\n :obj:`tuple`: Tuple of the class type and the arguments needed for\n instantiating the class.\n '
return (self.__class__, (self.shape,))<|docstring|>Internal method for pickling.
Returns:
:obj:`tuple`: Tuple of the class type and the arguments needed for
instantiating the class.<|endoftext|>
|
29a2a6d0a56682976a4da08800d273f1734cda6b3f7a29efaa74f0e3a019f06a
|
def fft(self, data, copy=True, shift=False):
'\n Calculate the FFT of the provided data array.\n\n Args:\n data (`numpy.ndarray`_):\n Data for FFT computation. Data type must be `numpy.float64` and\n shape must match :attr:`shape`.\n copy (:obj:`bool`, optional):\n The result of the FFT is computed using the\n :attr:`data_fft` workspace. If False, the\n :attr:`data_fft` *is* the returned array; if True,\n returned array is a copy.\n shift (:obj:`bool`, optional):\n Before computing, use ``numpy.fft.iffshift`` to shift\n the spatial coordinates of the image such that the 0\n frequency component of the FFT is shifted to the\n center of the image.\n\n Returns:\n `numpy.ndarray`_: The FFT of the provided data.\n\n Raises:\n ValueError:\n Raised if the shape of the data array does not match\n :attr:`shape`.\n TypeError:\n Raised if the type of the array is not np.float64.\n '
if (data.shape != self.shape):
raise ValueError('Data has incorrect shape for this instance of ConvolveFFTW.')
if (data.dtype.type is not np.float64):
raise TypeError('Data must be of type numpy.float64.')
self.data.real[...] = (np.fft.ifftshift(data) if shift else data)
self.dfft()
return (self.data_fft.copy() if copy else self.data_fft)
|
Calculate the FFT of the provided data array.
Args:
data (`numpy.ndarray`_):
Data for FFT computation. Data type must be `numpy.float64` and
shape must match :attr:`shape`.
copy (:obj:`bool`, optional):
The result of the FFT is computed using the
:attr:`data_fft` workspace. If False, the
:attr:`data_fft` *is* the returned array; if True,
returned array is a copy.
shift (:obj:`bool`, optional):
Before computing, use ``numpy.fft.iffshift`` to shift
the spatial coordinates of the image such that the 0
frequency component of the FFT is shifted to the
center of the image.
Returns:
`numpy.ndarray`_: The FFT of the provided data.
Raises:
ValueError:
Raised if the shape of the data array does not match
:attr:`shape`.
TypeError:
Raised if the type of the array is not np.float64.
|
nirvana/models/beam.py
|
fft
|
briandigiorgio/BarFit
| 1
|
python
|
def fft(self, data, copy=True, shift=False):
'\n Calculate the FFT of the provided data array.\n\n Args:\n data (`numpy.ndarray`_):\n Data for FFT computation. Data type must be `numpy.float64` and\n shape must match :attr:`shape`.\n copy (:obj:`bool`, optional):\n The result of the FFT is computed using the\n :attr:`data_fft` workspace. If False, the\n :attr:`data_fft` *is* the returned array; if True,\n returned array is a copy.\n shift (:obj:`bool`, optional):\n Before computing, use ``numpy.fft.iffshift`` to shift\n the spatial coordinates of the image such that the 0\n frequency component of the FFT is shifted to the\n center of the image.\n\n Returns:\n `numpy.ndarray`_: The FFT of the provided data.\n\n Raises:\n ValueError:\n Raised if the shape of the data array does not match\n :attr:`shape`.\n TypeError:\n Raised if the type of the array is not np.float64.\n '
if (data.shape != self.shape):
raise ValueError('Data has incorrect shape for this instance of ConvolveFFTW.')
if (data.dtype.type is not np.float64):
raise TypeError('Data must be of type numpy.float64.')
self.data.real[...] = (np.fft.ifftshift(data) if shift else data)
self.dfft()
return (self.data_fft.copy() if copy else self.data_fft)
|
def fft(self, data, copy=True, shift=False):
'\n Calculate the FFT of the provided data array.\n\n Args:\n data (`numpy.ndarray`_):\n Data for FFT computation. Data type must be `numpy.float64` and\n shape must match :attr:`shape`.\n copy (:obj:`bool`, optional):\n The result of the FFT is computed using the\n :attr:`data_fft` workspace. If False, the\n :attr:`data_fft` *is* the returned array; if True,\n returned array is a copy.\n shift (:obj:`bool`, optional):\n Before computing, use ``numpy.fft.iffshift`` to shift\n the spatial coordinates of the image such that the 0\n frequency component of the FFT is shifted to the\n center of the image.\n\n Returns:\n `numpy.ndarray`_: The FFT of the provided data.\n\n Raises:\n ValueError:\n Raised if the shape of the data array does not match\n :attr:`shape`.\n TypeError:\n Raised if the type of the array is not np.float64.\n '
if (data.shape != self.shape):
raise ValueError('Data has incorrect shape for this instance of ConvolveFFTW.')
if (data.dtype.type is not np.float64):
raise TypeError('Data must be of type numpy.float64.')
self.data.real[...] = (np.fft.ifftshift(data) if shift else data)
self.dfft()
return (self.data_fft.copy() if copy else self.data_fft)<|docstring|>Calculate the FFT of the provided data array.
Args:
data (`numpy.ndarray`_):
Data for FFT computation. Data type must be `numpy.float64` and
shape must match :attr:`shape`.
copy (:obj:`bool`, optional):
The result of the FFT is computed using the
:attr:`data_fft` workspace. If False, the
:attr:`data_fft` *is* the returned array; if True,
returned array is a copy.
shift (:obj:`bool`, optional):
Before computing, use ``numpy.fft.iffshift`` to shift
the spatial coordinates of the image such that the 0
frequency component of the FFT is shifted to the
center of the image.
Returns:
`numpy.ndarray`_: The FFT of the provided data.
Raises:
ValueError:
Raised if the shape of the data array does not match
:attr:`shape`.
TypeError:
Raised if the type of the array is not np.float64.<|endoftext|>
|
1b3fd770e972baf23fc76bcb245bed1ba7ebbfa91ad3f408f830352556f1eb20
|
def transform(vector: list) -> list:
'\n Transform vector 2D vector using current transformation in context.\n :param vector: 2D vector [x, y]\n :return:\n '
return TRANSFORMATION_CTX(vector)
|
Transform vector 2D vector using current transformation in context.
:param vector: 2D vector [x, y]
:return:
|
homework/transformations/transformation.py
|
transform
|
vanam/example_pip_package
| 0
|
python
|
def transform(vector: list) -> list:
'\n Transform vector 2D vector using current transformation in context.\n :param vector: 2D vector [x, y]\n :return:\n '
return TRANSFORMATION_CTX(vector)
|
def transform(vector: list) -> list:
'\n Transform vector 2D vector using current transformation in context.\n :param vector: 2D vector [x, y]\n :return:\n '
return TRANSFORMATION_CTX(vector)<|docstring|>Transform vector 2D vector using current transformation in context.
:param vector: 2D vector [x, y]
:return:<|endoftext|>
|
5be35dd043d52eaa0641cf8ad91adfbdcdebacda4492266cc742ec4220a26e12
|
def __init__(self, matrix: np.ndarray):
'\n\n :param matrix: Affine matrix of shape `[3, 3]`\n '
self._matrix = matrix
|
:param matrix: Affine matrix of shape `[3, 3]`
|
homework/transformations/transformation.py
|
__init__
|
vanam/example_pip_package
| 0
|
python
|
def __init__(self, matrix: np.ndarray):
'\n\n \n '
self._matrix = matrix
|
def __init__(self, matrix: np.ndarray):
'\n\n \n '
self._matrix = matrix<|docstring|>:param matrix: Affine matrix of shape `[3, 3]`<|endoftext|>
|
857909f232a20d55986930d29711e5fa2df0813c01e1fbd366ce381a9392d014
|
def __call__(self, vector: list) -> list:
'\n Call transformation on a 2D vector\n :param vector: 2D vector [x, y]\n :return: Transformed 2D vector\n '
if (len(vector) == 2):
vector = (vector + [1])
vector = np.asarray([vector]).T
return list(np.dot(self._matrix, vector).T[0][0:2])
|
Call transformation on a 2D vector
:param vector: 2D vector [x, y]
:return: Transformed 2D vector
|
homework/transformations/transformation.py
|
__call__
|
vanam/example_pip_package
| 0
|
python
|
def __call__(self, vector: list) -> list:
'\n Call transformation on a 2D vector\n :param vector: 2D vector [x, y]\n :return: Transformed 2D vector\n '
if (len(vector) == 2):
vector = (vector + [1])
vector = np.asarray([vector]).T
return list(np.dot(self._matrix, vector).T[0][0:2])
|
def __call__(self, vector: list) -> list:
'\n Call transformation on a 2D vector\n :param vector: 2D vector [x, y]\n :return: Transformed 2D vector\n '
if (len(vector) == 2):
vector = (vector + [1])
vector = np.asarray([vector]).T
return list(np.dot(self._matrix, vector).T[0][0:2])<|docstring|>Call transformation on a 2D vector
:param vector: 2D vector [x, y]
:return: Transformed 2D vector<|endoftext|>
|
56cb34dab40b579934876ac432250c7cf053f5cb5fdc82d68d365b02ae3da325
|
def __matmul__(self, other: 'Transformation'):
'\n Compose transformations.\n '
return Transformation(np.dot(self._matrix, other._matrix))
|
Compose transformations.
|
homework/transformations/transformation.py
|
__matmul__
|
vanam/example_pip_package
| 0
|
python
|
def __matmul__(self, other: 'Transformation'):
'\n \n '
return Transformation(np.dot(self._matrix, other._matrix))
|
def __matmul__(self, other: 'Transformation'):
'\n \n '
return Transformation(np.dot(self._matrix, other._matrix))<|docstring|>Compose transformations.<|endoftext|>
|
370985cb4c3fc903a157ee063a2bee1ee0a78c37341621fe6770921b92dc7d5a
|
@property
def matrix(self) -> np.ndarray:
'\n :return: Affine matrix of shape `[3, 3]` of the transformation.\n '
return self._matrix
|
:return: Affine matrix of shape `[3, 3]` of the transformation.
|
homework/transformations/transformation.py
|
matrix
|
vanam/example_pip_package
| 0
|
python
|
@property
def matrix(self) -> np.ndarray:
'\n \n '
return self._matrix
|
@property
def matrix(self) -> np.ndarray:
'\n \n '
return self._matrix<|docstring|>:return: Affine matrix of shape `[3, 3]` of the transformation.<|endoftext|>
|
0178feb784a418d2e8169ca5b9876f1483f3f1ce74e23dcdd7ed8371db4ddea0
|
@property
def reverse_matrix(self) -> np.ndarray:
'\n :return: Affine matrix of shape `[3, 3]` of the reverse transformation.\n '
return np.linalg.inv(self._matrix)
|
:return: Affine matrix of shape `[3, 3]` of the reverse transformation.
|
homework/transformations/transformation.py
|
reverse_matrix
|
vanam/example_pip_package
| 0
|
python
|
@property
def reverse_matrix(self) -> np.ndarray:
'\n \n '
return np.linalg.inv(self._matrix)
|
@property
def reverse_matrix(self) -> np.ndarray:
'\n \n '
return np.linalg.inv(self._matrix)<|docstring|>:return: Affine matrix of shape `[3, 3]` of the reverse transformation.<|endoftext|>
|
2779bdaaf0f0379c735949b2e9c2d736aa38d729aca921c48cba3f6529d11924
|
def __enter__(self):
"\n Operation performed upon entering a context using 'with'.\n "
global TRANSFORMATION_CTX
TRANSFORMATION_CTX = (self @ TRANSFORMATION_CTX)
|
Operation performed upon entering a context using 'with'.
|
homework/transformations/transformation.py
|
__enter__
|
vanam/example_pip_package
| 0
|
python
|
def __enter__(self):
"\n \n "
global TRANSFORMATION_CTX
TRANSFORMATION_CTX = (self @ TRANSFORMATION_CTX)
|
def __enter__(self):
"\n \n "
global TRANSFORMATION_CTX
TRANSFORMATION_CTX = (self @ TRANSFORMATION_CTX)<|docstring|>Operation performed upon entering a context using 'with'.<|endoftext|>
|
82a57e680596e8a22bf13a711ee8ad21326571e3bcfaa93df86d91f9cb6a3c39
|
def __exit__(self, *args):
"\n Operation performed upon exiting a context using 'with'.\n "
global TRANSFORMATION_CTX
TRANSFORMATION_CTX = (Transformation(self.reverse_matrix) @ TRANSFORMATION_CTX)
|
Operation performed upon exiting a context using 'with'.
|
homework/transformations/transformation.py
|
__exit__
|
vanam/example_pip_package
| 0
|
python
|
def __exit__(self, *args):
"\n \n "
global TRANSFORMATION_CTX
TRANSFORMATION_CTX = (Transformation(self.reverse_matrix) @ TRANSFORMATION_CTX)
|
def __exit__(self, *args):
"\n \n "
global TRANSFORMATION_CTX
TRANSFORMATION_CTX = (Transformation(self.reverse_matrix) @ TRANSFORMATION_CTX)<|docstring|>Operation performed upon exiting a context using 'with'.<|endoftext|>
|
16c7d04243a16046d70c8140ee04abb252b8ddb57bd430c1a59414215942dad4
|
def __init__(self, scale: int):
'\n |s 0 0|\n |0 s 0|\n |0 0 1|\n\n :param scale: Symmetric scaling (same in <em>x</em> and <em>y</em>)\n '
matrix = np.eye(3)
matrix[0][0] = scale
matrix[1][1] = scale
super().__init__(matrix)
|
|s 0 0|
|0 s 0|
|0 0 1|
:param scale: Symmetric scaling (same in <em>x</em> and <em>y</em>)
|
homework/transformations/transformation.py
|
__init__
|
vanam/example_pip_package
| 0
|
python
|
def __init__(self, scale: int):
'\n |s 0 0|\n |0 s 0|\n |0 0 1|\n\n :param scale: Symmetric scaling (same in <em>x</em> and <em>y</em>)\n '
matrix = np.eye(3)
matrix[0][0] = scale
matrix[1][1] = scale
super().__init__(matrix)
|
def __init__(self, scale: int):
'\n |s 0 0|\n |0 s 0|\n |0 0 1|\n\n :param scale: Symmetric scaling (same in <em>x</em> and <em>y</em>)\n '
matrix = np.eye(3)
matrix[0][0] = scale
matrix[1][1] = scale
super().__init__(matrix)<|docstring|>|s 0 0|
|0 s 0|
|0 0 1|
:param scale: Symmetric scaling (same in <em>x</em> and <em>y</em>)<|endoftext|>
|
6b25e2b124164a6b73ef61c51f4a0e74e28fa0aa983829f1b758f445f842c95b
|
def __init__(self, shift: list):
'\n |1 0 t_x|\n |0 1 t_y|\n |0 0 1 |\n\n :param shift: Translation [t_x, t_y]\n '
matrix = np.eye(3)
matrix[0][2] = shift[0]
matrix[1][2] = shift[1]
super().__init__(matrix)
|
|1 0 t_x|
|0 1 t_y|
|0 0 1 |
:param shift: Translation [t_x, t_y]
|
homework/transformations/transformation.py
|
__init__
|
vanam/example_pip_package
| 0
|
python
|
def __init__(self, shift: list):
'\n |1 0 t_x|\n |0 1 t_y|\n |0 0 1 |\n\n :param shift: Translation [t_x, t_y]\n '
matrix = np.eye(3)
matrix[0][2] = shift[0]
matrix[1][2] = shift[1]
super().__init__(matrix)
|
def __init__(self, shift: list):
'\n |1 0 t_x|\n |0 1 t_y|\n |0 0 1 |\n\n :param shift: Translation [t_x, t_y]\n '
matrix = np.eye(3)
matrix[0][2] = shift[0]
matrix[1][2] = shift[1]
super().__init__(matrix)<|docstring|>|1 0 t_x|
|0 1 t_y|
|0 0 1 |
:param shift: Translation [t_x, t_y]<|endoftext|>
|
6f83565950bb837eb3d903dd4e47b5a633f8cb826617e8be33a32b59167caacb
|
def __init__(self, angle: float):
'\n |cos(a) -sin(a) 0|\n |sin(a) cos(a) 0|\n |0 0 1|\n\n :param angle: Angle of rotation in radian.\n '
matrix = np.eye(3)
matrix[0][0] = np.cos(angle)
matrix[0][1] = (- np.sin(angle))
matrix[1][0] = np.sin(angle)
matrix[1][1] = np.cos(angle)
super().__init__(matrix)
|
|cos(a) -sin(a) 0|
|sin(a) cos(a) 0|
|0 0 1|
:param angle: Angle of rotation in radian.
|
homework/transformations/transformation.py
|
__init__
|
vanam/example_pip_package
| 0
|
python
|
def __init__(self, angle: float):
'\n |cos(a) -sin(a) 0|\n |sin(a) cos(a) 0|\n |0 0 1|\n\n :param angle: Angle of rotation in radian.\n '
matrix = np.eye(3)
matrix[0][0] = np.cos(angle)
matrix[0][1] = (- np.sin(angle))
matrix[1][0] = np.sin(angle)
matrix[1][1] = np.cos(angle)
super().__init__(matrix)
|
def __init__(self, angle: float):
'\n |cos(a) -sin(a) 0|\n |sin(a) cos(a) 0|\n |0 0 1|\n\n :param angle: Angle of rotation in radian.\n '
matrix = np.eye(3)
matrix[0][0] = np.cos(angle)
matrix[0][1] = (- np.sin(angle))
matrix[1][0] = np.sin(angle)
matrix[1][1] = np.cos(angle)
super().__init__(matrix)<|docstring|>|cos(a) -sin(a) 0|
|sin(a) cos(a) 0|
|0 0 1|
:param angle: Angle of rotation in radian.<|endoftext|>
|
e66a5e082eac1acc6d6367bad671ae4164b8bab8856cefde12f2e1b0f700248f
|
def mergeLabelLists(llist1, weight1, llist2, weight2, combfn):
'Combine values in two label lists according to the passed combfn\n\tfunction, and passed weights for each label list.'
allLabels = set(llist1.items()).union(set(llist2.items()))
for (label, value) in allLabels:
if ((label in llist1.keys()) and (label in llist2.keys())):
llist1[label] = combfn(llist1[label], weight1, llist2[label], weight2)
elif (label in llist2.keys()):
llist1[label] = (weight2 * llist2[label])
else:
llist1[label] = (weight1 * llist1[label])
|
Combine values in two label lists according to the passed combfn
function, and passed weights for each label list.
|
src/lg.py
|
mergeLabelLists
|
CurrenWong/lgeval
| 1
|
python
|
def mergeLabelLists(llist1, weight1, llist2, weight2, combfn):
'Combine values in two label lists according to the passed combfn\n\tfunction, and passed weights for each label list.'
allLabels = set(llist1.items()).union(set(llist2.items()))
for (label, value) in allLabels:
if ((label in llist1.keys()) and (label in llist2.keys())):
llist1[label] = combfn(llist1[label], weight1, llist2[label], weight2)
elif (label in llist2.keys()):
llist1[label] = (weight2 * llist2[label])
else:
llist1[label] = (weight1 * llist1[label])
|
def mergeLabelLists(llist1, weight1, llist2, weight2, combfn):
'Combine values in two label lists according to the passed combfn\n\tfunction, and passed weights for each label list.'
allLabels = set(llist1.items()).union(set(llist2.items()))
for (label, value) in allLabels:
if ((label in llist1.keys()) and (label in llist2.keys())):
llist1[label] = combfn(llist1[label], weight1, llist2[label], weight2)
elif (label in llist2.keys()):
llist1[label] = (weight2 * llist2[label])
else:
llist1[label] = (weight1 * llist1[label])<|docstring|>Combine values in two label lists according to the passed combfn
function, and passed weights for each label list.<|endoftext|>
|
b9bd73104887250b13c3b5a40007d8d26232a74efca38bd809e0097b495c6740
|
def mergeMaps(map1, weight1, map2, weight2, combfn):
'Combine values in two maps according to the passed combfn\n\tfunction, and passed weights for each map.'
objects1 = map1.keys()
objects2 = map2.keys()
allObjects = set(objects1).union(set(objects2))
for object in allObjects:
if ((object in objects1) and (object in objects2)):
mergeLabelLists(map1[object], weight1, map2[object], weight2, combfn)
elif (object in objects2):
map1[object] = copy.deepcopy(map2[object])
for (label, value) in map1[object].items():
map1[object][label] = (weight2 * value)
map1[object]['_'] = weight1
else:
for (label, value) in map1[object].items():
map1[object][label] = (weight1 * value)
map1[object]['_'] = weight2
|
Combine values in two maps according to the passed combfn
function, and passed weights for each map.
|
src/lg.py
|
mergeMaps
|
CurrenWong/lgeval
| 1
|
python
|
def mergeMaps(map1, weight1, map2, weight2, combfn):
'Combine values in two maps according to the passed combfn\n\tfunction, and passed weights for each map.'
objects1 = map1.keys()
objects2 = map2.keys()
allObjects = set(objects1).union(set(objects2))
for object in allObjects:
if ((object in objects1) and (object in objects2)):
mergeLabelLists(map1[object], weight1, map2[object], weight2, combfn)
elif (object in objects2):
map1[object] = copy.deepcopy(map2[object])
for (label, value) in map1[object].items():
map1[object][label] = (weight2 * value)
map1[object]['_'] = weight1
else:
for (label, value) in map1[object].items():
map1[object][label] = (weight1 * value)
map1[object]['_'] = weight2
|
def mergeMaps(map1, weight1, map2, weight2, combfn):
'Combine values in two maps according to the passed combfn\n\tfunction, and passed weights for each map.'
objects1 = map1.keys()
objects2 = map2.keys()
allObjects = set(objects1).union(set(objects2))
for object in allObjects:
if ((object in objects1) and (object in objects2)):
mergeLabelLists(map1[object], weight1, map2[object], weight2, combfn)
elif (object in objects2):
map1[object] = copy.deepcopy(map2[object])
for (label, value) in map1[object].items():
map1[object][label] = (weight2 * value)
map1[object]['_'] = weight1
else:
for (label, value) in map1[object].items():
map1[object][label] = (weight1 * value)
map1[object]['_'] = weight2<|docstring|>Combine values in two maps according to the passed combfn
function, and passed weights for each map.<|endoftext|>
|
4f9bba54ebc1654a2f076713566b5da0882e291f8bfb10efc144c883deef89de
|
def getEdgesToNeighbours(nodes, edges):
'return all edges which are coming from one of the nodes to out of these nodes'
neigb = set([])
for (n1, n2) in edges:
if ((n1 in nodes) and (not (n2 in nodes))):
neigb.add((n1, n2))
return neigb
|
return all edges which are coming from one of the nodes to out of these nodes
|
src/lg.py
|
getEdgesToNeighbours
|
CurrenWong/lgeval
| 1
|
python
|
def getEdgesToNeighbours(nodes, edges):
neigb = set([])
for (n1, n2) in edges:
if ((n1 in nodes) and (not (n2 in nodes))):
neigb.add((n1, n2))
return neigb
|
def getEdgesToNeighbours(nodes, edges):
neigb = set([])
for (n1, n2) in edges:
if ((n1 in nodes) and (not (n2 in nodes))):
neigb.add((n1, n2))
return neigb<|docstring|>return all edges which are coming from one of the nodes to out of these nodes<|endoftext|>
|
54accf342a397d86133c9659aacfe7f7a290dd5b7ef2adb9c3a1232defe018a3
|
def getEdgesBetweenThem(nodes, edges):
'return all edges which are coming from one of the nodes to out of these nodes'
edg = set([])
for (n1, n2) in edges:
if ((n1 in nodes) and (n2 in nodes)):
edg.add((n1, n2))
return edg
|
return all edges which are coming from one of the nodes to out of these nodes
|
src/lg.py
|
getEdgesBetweenThem
|
CurrenWong/lgeval
| 1
|
python
|
def getEdgesBetweenThem(nodes, edges):
edg = set([])
for (n1, n2) in edges:
if ((n1 in nodes) and (n2 in nodes)):
edg.add((n1, n2))
return edg
|
def getEdgesBetweenThem(nodes, edges):
edg = set([])
for (n1, n2) in edges:
if ((n1 in nodes) and (n2 in nodes)):
edg.add((n1, n2))
return edg<|docstring|>return all edges which are coming from one of the nodes to out of these nodes<|endoftext|>
|
c2a15f496ba804cf8f15992001af52a9995309c7569a9d988bebf566e1e69013
|
def __init__(self, *args):
'Graph data is read from a CSV file or provided node and edge label\n\t\tdictionaries. If invalid entries are found, the error flag is set to\n\t\ttrue, and graph input continues. In .lg files, blank lines are\n\t\tignored, and # may be used for comment lines in CSV graph files.'
self.error = False
self.gweight = 1.0
self.nlabels = {}
self.elabels = {}
self.absentNodes = set([])
self.absentEdges = set([])
self.hiddenEdges = {}
self.cmpNodes = compareTools.cmpNodes
self.cmpEdges = compareTools.cmpEdges
fileName = None
nodeLabels = {}
edgeLabels = {}
validAsteriskEdges = set()
invalidAsteriskNodes = set()
if (len(args) == 1):
fileName = args[0]
self.file = fileName
elif (len(args) == 2):
nodeLabels = args[0]
edgeLabels = args[1]
if (fileName == None):
self.file = None
for nid in nodeLabels.keys():
if (not isinstance(nid, str)):
nid = str(nid)
newdict = {}
for label in nodeLabels[nid].keys():
if (not isinstance(nid, str)):
label = str(label)
if (not isinstance(nodeLabels[nid][label], float)):
self.error = True
sys.stderr.write(((((((' !! Invalid weight for node ' + nid) + ', label "') + label) + '": ') + str(nodeLabels[nid][label])) + '\n'))
newdict[label] = nodeLabels[nid][label]
self.nlabels[nid] = newdict
for eid in edgeLabels.keys():
if ((not isinstance(eid[0], str)) or (not isinstance(eid[1], str))):
eid[0] = str(eid[0])
eid[1] = str(eid[1])
newdict = {}
for label in edgeLabels[eid].keys():
if (not isinstance(label, str)):
label = str(label)
if (not isinstance(edgeLabels[eid][label], float)):
self.error = True
sys.stderr.write(((((((' !! Invalid weight for edge ' + str(eid)) + ', label "') + label) + '": ') + str(edgeLabels[eid][label])) + '\n'))
newdict[label] = edgeLabels[eid][label]
self.elabels[eid] = newdict
else:
MIN_NODE_ENTRY_LENGTH = 3
MIN_EDGE_ENTRY_LENGTH = 4
MIN_OBJECT_ENTRY_LENGTH = 5
MIN_OBJECT_EDGE_ENTRY_LENGTH = 5
try:
fileReader = csv.reader(open(fileName))
except:
sys.stderr.write(((' !! IO Error (cannot open): ' + fileName) + '\n'))
self.error = True
return
objectDict = dict([])
for row in fileReader:
if ((len(row) == 0) or ((len(row) == 1) and (row[0].strip() == ''))):
continue
entryType = row[0].strip()
if (entryType == 'N'):
if (len(row) < MIN_NODE_ENTRY_LENGTH):
sys.stderr.write(((((' !! Invalid node entry length: ' + str(len(row))) + '\n\t') + str(row)) + '\n'))
self.error = True
else:
nid = row[1].strip()
if (nid in self.nlabels.keys()):
nlabelDict = self.nlabels[nid]
nlabel = row[2].strip()
nlabelDict[nlabel] = float(row[3])
else:
nid = row[1].strip()
nlabel = row[2].strip()
if (len(row) > MIN_NODE_ENTRY_LENGTH):
self.nlabels[nid] = {nlabel: float(row[3])}
else:
self.nlabels[nid] = {nlabel: 1.0}
elif (entryType == 'E'):
if (len(row) < MIN_EDGE_ENTRY_LENGTH):
sys.stderr.write(((((' !! Invalid edge entry length: ' + str(len(row))) + '\n\t') + str(row)) + '\n'))
self.error = True
else:
primPair = (row[1].strip(), row[2].strip())
if (primPair[0] == primPair[1]):
sys.stderr.write(((((' !! Invalid self-edge (' + self.file) + '):\n\t') + str(row)) + '\n'))
self.error = True
nid = primPair[0]
if (nid in self.nlabels.keys()):
nlabelDict = self.nlabels[nid]
nlabel = row[3].strip()
nlabelDict[nlabel] = float(row[4])
elif (primPair in self.elabels.keys()):
elabelDict = self.elabels[primPair]
elabel = row[3].strip()
if (elabel == '*'):
if ((primPair[0] in self.nlabels) and (primPair[1] in self.nlabels) and (self.nlabels[primPair[0]] == self.nlabels[primPair[1]])):
elabel = list(self.nlabels[primPair[0]].keys())[0]
validAsteriskEdges.add(primPair)
else:
sys.stderr.write(((((((((' !! * edge used with ambiguous node labels (' + str(self.nlabels[primPair[0]])) + ' vs. ') + str(self.nlabels[primtPair[1]])) + ') in ') + self.file) + '):\n\t') + ', '.join(row)) + '\n'))
elabel = 'MergeError'
self.nlabels[primPair[0]] = {elabel: 1.0}
self.nlabels[primPair[1]] = {elabel: 1.0}
self.error = True
invalidAsteriskNodes.add(primPair[0])
invalidAsteriskNodes.add(primPair[1])
if (len(row) > MIN_EDGE_ENTRY_LENGTH):
elabelDict[elabel] = float(row[4])
else:
elabelDict[elabel] = 1.0
else:
primPair = (row[1].strip(), row[2].strip())
elabel = row[3].strip()
if (elabel == '*'):
if ((primPair[0] in self.nlabels) and (primPair[1] in self.nlabels) and (self.nlabels[primPair[0]] == self.nlabels[primPair[1]])):
elabel = list(self.nlabels[primPair[0]].keys())[0]
validAsteriskEdges.add(primPair)
else:
sys.stderr.write(((((((((' !! * edge used with ambiguous node labels (' + str(self.nlabels[primPair[0]])) + ' vs. ') + str(self.nlabels[primPair[1]])) + ') in ') + self.file) + '):\n\t') + ', '.join(row)) + '\n'))
elabel = 'MergeError'
self.nlabels[primPair[0]] = {elabel: 1.0}
self.nlabels[primPair[1]] = {elabel: 1.0}
self.error = True
invalidAsteriskNodes.add(primPair[0])
invalidAsteriskNodes.add(primPair[1])
self.elabels[primPair] = {elabel: float(row[4])}
elif (entryType == 'O'):
if (len(row) < MIN_OBJECT_ENTRY_LENGTH):
sys.stderr.write(((((' !! Invalid object entry length: ' + str(len(row))) + '\n\t') + str(row)) + '\n'))
self.error = True
else:
rawnodeList = row[4:]
oid = row[1].strip()
nlabel = row[2].strip()
nValue = float(row[3].strip())
nodeList = []
for n in rawnodeList:
nid = n.strip()
nodeList.append(nid)
if (nid in self.nlabels.keys()):
nlabelDict = self.nlabels[nid]
nlabelDict[nlabel] = nValue
else:
self.nlabels[nid] = {nlabel: nValue}
objectDict[oid] = nodeList
for nid1 in nodeList:
for nid2 in nodeList:
if (nid1 != nid2):
primPair = (nid1, nid2)
elabel = nlabel
if (primPair in self.elabels.keys()):
elabelDict = self.elabels[primPair]
elabelDict[elabel] = nValue
else:
self.elabels[primPair] = {elabel: nValue}
elif ((entryType == 'R') or (entryType == 'EO')):
if (len(row) < MIN_OBJECT_EDGE_ENTRY_LENGTH):
sys.stderr.write(((((' !! Invalid object entry length: ' + str(len(row))) + '\n\t') + str(row)) + '\n'))
self.error = True
else:
oid1 = row[1].strip()
oid2 = row[2].strip()
elabel = row[3].strip()
eValue = float(row[4].strip())
validRelationship = True
if (not (oid1 in objectDict)):
sys.stderr.write(((((' !! Invalid object id: "' + oid1) + '" - IGNORING relationship:\n\t') + str(row)) + '\n'))
self.error = True
validRelationship = False
if (not (oid2 in objectDict)):
sys.stderr.write(((((' !! Invalid object id: "' + oid2) + '" - IGNORING relationship:\n\t') + str(row)) + '\n'))
self.error = True
validRelationship = False
if validRelationship:
nodeList1 = objectDict[oid1]
nodeList2 = objectDict[oid2]
for nid1 in nodeList1:
for nid2 in nodeList2:
if (nid1 != nid2):
primPair = (nid1, nid2)
if (primPair in self.elabels.keys()):
elabelDict = self.elabels[primPair]
elabelDict[elabel] = eValue
else:
self.elabels[primPair] = {elabel: eValue}
else:
sys.stderr.write(((((' !! Invalid self-edge (' + self.file) + '):\n\t') + str(row)) + '\n'))
self.error = True
elif ((len(entryType.strip()) > 0) and (entryType.strip()[0] == '#')):
pass
else:
sys.stderr.write(((' !! Invalid graph entry type (expected N, E, O, R or EO): ' + str(row)) + '\n'))
self.error = True
anonNode = False
anodeList = []
for elabel in self.elabels.keys():
nid1 = elabel[0]
nid2 = elabel[1]
if (not (nid1 in self.nlabels.keys())):
self.nlabels[nid1] = {'_': 1.0}
anodeList = (anodeList + [nid1])
anonNode = True
if (not (nid2 in self.nlabels.keys())):
self.nlabels[nid2] = {'_': 1.0}
anodeList = (anodeList + [nid2])
anonNode = True
if anonNode:
sys.stderr.write(((' ** Anonymous labels created for:\n\t' + str(anodeList)) + '\n'))
invalidAsteriskNodeList = sorted(list(invalidAsteriskNodes))
while (len(invalidAsteriskNodeList) > 0):
nextPrimId = invalidAsteriskNodeList.pop()
for (parent, child) in validAsteriskEdges:
otherId = None
if (parent == nextPrimId):
otherId = child
if (child == nextPrimId):
otherId = parent
if (otherId != None):
if (not (otherId in invalidAsteriskNodes)):
invalidAsteriskNodes.add(otherId)
invalidAsteriskNodeList.append(otherId)
self.nlabels[otherId] = {'MergeError': 1.0}
self.elabels[(parent, child)] = {'MergeError': 1.0}
|
Graph data is read from a CSV file or provided node and edge label
dictionaries. If invalid entries are found, the error flag is set to
true, and graph input continues. In .lg files, blank lines are
ignored, and # may be used for comment lines in CSV graph files.
|
src/lg.py
|
__init__
|
CurrenWong/lgeval
| 1
|
python
|
def __init__(self, *args):
'Graph data is read from a CSV file or provided node and edge label\n\t\tdictionaries. If invalid entries are found, the error flag is set to\n\t\ttrue, and graph input continues. In .lg files, blank lines are\n\t\tignored, and # may be used for comment lines in CSV graph files.'
self.error = False
self.gweight = 1.0
self.nlabels = {}
self.elabels = {}
self.absentNodes = set([])
self.absentEdges = set([])
self.hiddenEdges = {}
self.cmpNodes = compareTools.cmpNodes
self.cmpEdges = compareTools.cmpEdges
fileName = None
nodeLabels = {}
edgeLabels = {}
validAsteriskEdges = set()
invalidAsteriskNodes = set()
if (len(args) == 1):
fileName = args[0]
self.file = fileName
elif (len(args) == 2):
nodeLabels = args[0]
edgeLabels = args[1]
if (fileName == None):
self.file = None
for nid in nodeLabels.keys():
if (not isinstance(nid, str)):
nid = str(nid)
newdict = {}
for label in nodeLabels[nid].keys():
if (not isinstance(nid, str)):
label = str(label)
if (not isinstance(nodeLabels[nid][label], float)):
self.error = True
sys.stderr.write(((((((' !! Invalid weight for node ' + nid) + ', label "') + label) + '": ') + str(nodeLabels[nid][label])) + '\n'))
newdict[label] = nodeLabels[nid][label]
self.nlabels[nid] = newdict
for eid in edgeLabels.keys():
if ((not isinstance(eid[0], str)) or (not isinstance(eid[1], str))):
eid[0] = str(eid[0])
eid[1] = str(eid[1])
newdict = {}
for label in edgeLabels[eid].keys():
if (not isinstance(label, str)):
label = str(label)
if (not isinstance(edgeLabels[eid][label], float)):
self.error = True
sys.stderr.write(((((((' !! Invalid weight for edge ' + str(eid)) + ', label "') + label) + '": ') + str(edgeLabels[eid][label])) + '\n'))
newdict[label] = edgeLabels[eid][label]
self.elabels[eid] = newdict
else:
MIN_NODE_ENTRY_LENGTH = 3
MIN_EDGE_ENTRY_LENGTH = 4
MIN_OBJECT_ENTRY_LENGTH = 5
MIN_OBJECT_EDGE_ENTRY_LENGTH = 5
try:
fileReader = csv.reader(open(fileName))
except:
sys.stderr.write(((' !! IO Error (cannot open): ' + fileName) + '\n'))
self.error = True
return
objectDict = dict([])
for row in fileReader:
if ((len(row) == 0) or ((len(row) == 1) and (row[0].strip() == ))):
continue
entryType = row[0].strip()
if (entryType == 'N'):
if (len(row) < MIN_NODE_ENTRY_LENGTH):
sys.stderr.write(((((' !! Invalid node entry length: ' + str(len(row))) + '\n\t') + str(row)) + '\n'))
self.error = True
else:
nid = row[1].strip()
if (nid in self.nlabels.keys()):
nlabelDict = self.nlabels[nid]
nlabel = row[2].strip()
nlabelDict[nlabel] = float(row[3])
else:
nid = row[1].strip()
nlabel = row[2].strip()
if (len(row) > MIN_NODE_ENTRY_LENGTH):
self.nlabels[nid] = {nlabel: float(row[3])}
else:
self.nlabels[nid] = {nlabel: 1.0}
elif (entryType == 'E'):
if (len(row) < MIN_EDGE_ENTRY_LENGTH):
sys.stderr.write(((((' !! Invalid edge entry length: ' + str(len(row))) + '\n\t') + str(row)) + '\n'))
self.error = True
else:
primPair = (row[1].strip(), row[2].strip())
if (primPair[0] == primPair[1]):
sys.stderr.write(((((' !! Invalid self-edge (' + self.file) + '):\n\t') + str(row)) + '\n'))
self.error = True
nid = primPair[0]
if (nid in self.nlabels.keys()):
nlabelDict = self.nlabels[nid]
nlabel = row[3].strip()
nlabelDict[nlabel] = float(row[4])
elif (primPair in self.elabels.keys()):
elabelDict = self.elabels[primPair]
elabel = row[3].strip()
if (elabel == '*'):
if ((primPair[0] in self.nlabels) and (primPair[1] in self.nlabels) and (self.nlabels[primPair[0]] == self.nlabels[primPair[1]])):
elabel = list(self.nlabels[primPair[0]].keys())[0]
validAsteriskEdges.add(primPair)
else:
sys.stderr.write(((((((((' !! * edge used with ambiguous node labels (' + str(self.nlabels[primPair[0]])) + ' vs. ') + str(self.nlabels[primtPair[1]])) + ') in ') + self.file) + '):\n\t') + ', '.join(row)) + '\n'))
elabel = 'MergeError'
self.nlabels[primPair[0]] = {elabel: 1.0}
self.nlabels[primPair[1]] = {elabel: 1.0}
self.error = True
invalidAsteriskNodes.add(primPair[0])
invalidAsteriskNodes.add(primPair[1])
if (len(row) > MIN_EDGE_ENTRY_LENGTH):
elabelDict[elabel] = float(row[4])
else:
elabelDict[elabel] = 1.0
else:
primPair = (row[1].strip(), row[2].strip())
elabel = row[3].strip()
if (elabel == '*'):
if ((primPair[0] in self.nlabels) and (primPair[1] in self.nlabels) and (self.nlabels[primPair[0]] == self.nlabels[primPair[1]])):
elabel = list(self.nlabels[primPair[0]].keys())[0]
validAsteriskEdges.add(primPair)
else:
sys.stderr.write(((((((((' !! * edge used with ambiguous node labels (' + str(self.nlabels[primPair[0]])) + ' vs. ') + str(self.nlabels[primPair[1]])) + ') in ') + self.file) + '):\n\t') + ', '.join(row)) + '\n'))
elabel = 'MergeError'
self.nlabels[primPair[0]] = {elabel: 1.0}
self.nlabels[primPair[1]] = {elabel: 1.0}
self.error = True
invalidAsteriskNodes.add(primPair[0])
invalidAsteriskNodes.add(primPair[1])
self.elabels[primPair] = {elabel: float(row[4])}
elif (entryType == 'O'):
if (len(row) < MIN_OBJECT_ENTRY_LENGTH):
sys.stderr.write(((((' !! Invalid object entry length: ' + str(len(row))) + '\n\t') + str(row)) + '\n'))
self.error = True
else:
rawnodeList = row[4:]
oid = row[1].strip()
nlabel = row[2].strip()
nValue = float(row[3].strip())
nodeList = []
for n in rawnodeList:
nid = n.strip()
nodeList.append(nid)
if (nid in self.nlabels.keys()):
nlabelDict = self.nlabels[nid]
nlabelDict[nlabel] = nValue
else:
self.nlabels[nid] = {nlabel: nValue}
objectDict[oid] = nodeList
for nid1 in nodeList:
for nid2 in nodeList:
if (nid1 != nid2):
primPair = (nid1, nid2)
elabel = nlabel
if (primPair in self.elabels.keys()):
elabelDict = self.elabels[primPair]
elabelDict[elabel] = nValue
else:
self.elabels[primPair] = {elabel: nValue}
elif ((entryType == 'R') or (entryType == 'EO')):
if (len(row) < MIN_OBJECT_EDGE_ENTRY_LENGTH):
sys.stderr.write(((((' !! Invalid object entry length: ' + str(len(row))) + '\n\t') + str(row)) + '\n'))
self.error = True
else:
oid1 = row[1].strip()
oid2 = row[2].strip()
elabel = row[3].strip()
eValue = float(row[4].strip())
validRelationship = True
if (not (oid1 in objectDict)):
sys.stderr.write(((((' !! Invalid object id: "' + oid1) + '" - IGNORING relationship:\n\t') + str(row)) + '\n'))
self.error = True
validRelationship = False
if (not (oid2 in objectDict)):
sys.stderr.write(((((' !! Invalid object id: "' + oid2) + '" - IGNORING relationship:\n\t') + str(row)) + '\n'))
self.error = True
validRelationship = False
if validRelationship:
nodeList1 = objectDict[oid1]
nodeList2 = objectDict[oid2]
for nid1 in nodeList1:
for nid2 in nodeList2:
if (nid1 != nid2):
primPair = (nid1, nid2)
if (primPair in self.elabels.keys()):
elabelDict = self.elabels[primPair]
elabelDict[elabel] = eValue
else:
self.elabels[primPair] = {elabel: eValue}
else:
sys.stderr.write(((((' !! Invalid self-edge (' + self.file) + '):\n\t') + str(row)) + '\n'))
self.error = True
elif ((len(entryType.strip()) > 0) and (entryType.strip()[0] == '#')):
pass
else:
sys.stderr.write(((' !! Invalid graph entry type (expected N, E, O, R or EO): ' + str(row)) + '\n'))
self.error = True
anonNode = False
anodeList = []
for elabel in self.elabels.keys():
nid1 = elabel[0]
nid2 = elabel[1]
if (not (nid1 in self.nlabels.keys())):
self.nlabels[nid1] = {'_': 1.0}
anodeList = (anodeList + [nid1])
anonNode = True
if (not (nid2 in self.nlabels.keys())):
self.nlabels[nid2] = {'_': 1.0}
anodeList = (anodeList + [nid2])
anonNode = True
if anonNode:
sys.stderr.write(((' ** Anonymous labels created for:\n\t' + str(anodeList)) + '\n'))
invalidAsteriskNodeList = sorted(list(invalidAsteriskNodes))
while (len(invalidAsteriskNodeList) > 0):
nextPrimId = invalidAsteriskNodeList.pop()
for (parent, child) in validAsteriskEdges:
otherId = None
if (parent == nextPrimId):
otherId = child
if (child == nextPrimId):
otherId = parent
if (otherId != None):
if (not (otherId in invalidAsteriskNodes)):
invalidAsteriskNodes.add(otherId)
invalidAsteriskNodeList.append(otherId)
self.nlabels[otherId] = {'MergeError': 1.0}
self.elabels[(parent, child)] = {'MergeError': 1.0}
|
def __init__(self, *args):
'Graph data is read from a CSV file or provided node and edge label\n\t\tdictionaries. If invalid entries are found, the error flag is set to\n\t\ttrue, and graph input continues. In .lg files, blank lines are\n\t\tignored, and # may be used for comment lines in CSV graph files.'
self.error = False
self.gweight = 1.0
self.nlabels = {}
self.elabels = {}
self.absentNodes = set([])
self.absentEdges = set([])
self.hiddenEdges = {}
self.cmpNodes = compareTools.cmpNodes
self.cmpEdges = compareTools.cmpEdges
fileName = None
nodeLabels = {}
edgeLabels = {}
validAsteriskEdges = set()
invalidAsteriskNodes = set()
if (len(args) == 1):
fileName = args[0]
self.file = fileName
elif (len(args) == 2):
nodeLabels = args[0]
edgeLabels = args[1]
if (fileName == None):
self.file = None
for nid in nodeLabels.keys():
if (not isinstance(nid, str)):
nid = str(nid)
newdict = {}
for label in nodeLabels[nid].keys():
if (not isinstance(nid, str)):
label = str(label)
if (not isinstance(nodeLabels[nid][label], float)):
self.error = True
sys.stderr.write(((((((' !! Invalid weight for node ' + nid) + ', label "') + label) + '": ') + str(nodeLabels[nid][label])) + '\n'))
newdict[label] = nodeLabels[nid][label]
self.nlabels[nid] = newdict
for eid in edgeLabels.keys():
if ((not isinstance(eid[0], str)) or (not isinstance(eid[1], str))):
eid[0] = str(eid[0])
eid[1] = str(eid[1])
newdict = {}
for label in edgeLabels[eid].keys():
if (not isinstance(label, str)):
label = str(label)
if (not isinstance(edgeLabels[eid][label], float)):
self.error = True
sys.stderr.write(((((((' !! Invalid weight for edge ' + str(eid)) + ', label "') + label) + '": ') + str(edgeLabels[eid][label])) + '\n'))
newdict[label] = edgeLabels[eid][label]
self.elabels[eid] = newdict
else:
MIN_NODE_ENTRY_LENGTH = 3
MIN_EDGE_ENTRY_LENGTH = 4
MIN_OBJECT_ENTRY_LENGTH = 5
MIN_OBJECT_EDGE_ENTRY_LENGTH = 5
try:
fileReader = csv.reader(open(fileName))
except:
sys.stderr.write(((' !! IO Error (cannot open): ' + fileName) + '\n'))
self.error = True
return
objectDict = dict([])
for row in fileReader:
if ((len(row) == 0) or ((len(row) == 1) and (row[0].strip() == ))):
continue
entryType = row[0].strip()
if (entryType == 'N'):
if (len(row) < MIN_NODE_ENTRY_LENGTH):
sys.stderr.write(((((' !! Invalid node entry length: ' + str(len(row))) + '\n\t') + str(row)) + '\n'))
self.error = True
else:
nid = row[1].strip()
if (nid in self.nlabels.keys()):
nlabelDict = self.nlabels[nid]
nlabel = row[2].strip()
nlabelDict[nlabel] = float(row[3])
else:
nid = row[1].strip()
nlabel = row[2].strip()
if (len(row) > MIN_NODE_ENTRY_LENGTH):
self.nlabels[nid] = {nlabel: float(row[3])}
else:
self.nlabels[nid] = {nlabel: 1.0}
elif (entryType == 'E'):
if (len(row) < MIN_EDGE_ENTRY_LENGTH):
sys.stderr.write(((((' !! Invalid edge entry length: ' + str(len(row))) + '\n\t') + str(row)) + '\n'))
self.error = True
else:
primPair = (row[1].strip(), row[2].strip())
if (primPair[0] == primPair[1]):
sys.stderr.write(((((' !! Invalid self-edge (' + self.file) + '):\n\t') + str(row)) + '\n'))
self.error = True
nid = primPair[0]
if (nid in self.nlabels.keys()):
nlabelDict = self.nlabels[nid]
nlabel = row[3].strip()
nlabelDict[nlabel] = float(row[4])
elif (primPair in self.elabels.keys()):
elabelDict = self.elabels[primPair]
elabel = row[3].strip()
if (elabel == '*'):
if ((primPair[0] in self.nlabels) and (primPair[1] in self.nlabels) and (self.nlabels[primPair[0]] == self.nlabels[primPair[1]])):
elabel = list(self.nlabels[primPair[0]].keys())[0]
validAsteriskEdges.add(primPair)
else:
sys.stderr.write(((((((((' !! * edge used with ambiguous node labels (' + str(self.nlabels[primPair[0]])) + ' vs. ') + str(self.nlabels[primtPair[1]])) + ') in ') + self.file) + '):\n\t') + ', '.join(row)) + '\n'))
elabel = 'MergeError'
self.nlabels[primPair[0]] = {elabel: 1.0}
self.nlabels[primPair[1]] = {elabel: 1.0}
self.error = True
invalidAsteriskNodes.add(primPair[0])
invalidAsteriskNodes.add(primPair[1])
if (len(row) > MIN_EDGE_ENTRY_LENGTH):
elabelDict[elabel] = float(row[4])
else:
elabelDict[elabel] = 1.0
else:
primPair = (row[1].strip(), row[2].strip())
elabel = row[3].strip()
if (elabel == '*'):
if ((primPair[0] in self.nlabels) and (primPair[1] in self.nlabels) and (self.nlabels[primPair[0]] == self.nlabels[primPair[1]])):
elabel = list(self.nlabels[primPair[0]].keys())[0]
validAsteriskEdges.add(primPair)
else:
sys.stderr.write(((((((((' !! * edge used with ambiguous node labels (' + str(self.nlabels[primPair[0]])) + ' vs. ') + str(self.nlabels[primPair[1]])) + ') in ') + self.file) + '):\n\t') + ', '.join(row)) + '\n'))
elabel = 'MergeError'
self.nlabels[primPair[0]] = {elabel: 1.0}
self.nlabels[primPair[1]] = {elabel: 1.0}
self.error = True
invalidAsteriskNodes.add(primPair[0])
invalidAsteriskNodes.add(primPair[1])
self.elabels[primPair] = {elabel: float(row[4])}
elif (entryType == 'O'):
if (len(row) < MIN_OBJECT_ENTRY_LENGTH):
sys.stderr.write(((((' !! Invalid object entry length: ' + str(len(row))) + '\n\t') + str(row)) + '\n'))
self.error = True
else:
rawnodeList = row[4:]
oid = row[1].strip()
nlabel = row[2].strip()
nValue = float(row[3].strip())
nodeList = []
for n in rawnodeList:
nid = n.strip()
nodeList.append(nid)
if (nid in self.nlabels.keys()):
nlabelDict = self.nlabels[nid]
nlabelDict[nlabel] = nValue
else:
self.nlabels[nid] = {nlabel: nValue}
objectDict[oid] = nodeList
for nid1 in nodeList:
for nid2 in nodeList:
if (nid1 != nid2):
primPair = (nid1, nid2)
elabel = nlabel
if (primPair in self.elabels.keys()):
elabelDict = self.elabels[primPair]
elabelDict[elabel] = nValue
else:
self.elabels[primPair] = {elabel: nValue}
elif ((entryType == 'R') or (entryType == 'EO')):
if (len(row) < MIN_OBJECT_EDGE_ENTRY_LENGTH):
sys.stderr.write(((((' !! Invalid object entry length: ' + str(len(row))) + '\n\t') + str(row)) + '\n'))
self.error = True
else:
oid1 = row[1].strip()
oid2 = row[2].strip()
elabel = row[3].strip()
eValue = float(row[4].strip())
validRelationship = True
if (not (oid1 in objectDict)):
sys.stderr.write(((((' !! Invalid object id: "' + oid1) + '" - IGNORING relationship:\n\t') + str(row)) + '\n'))
self.error = True
validRelationship = False
if (not (oid2 in objectDict)):
sys.stderr.write(((((' !! Invalid object id: "' + oid2) + '" - IGNORING relationship:\n\t') + str(row)) + '\n'))
self.error = True
validRelationship = False
if validRelationship:
nodeList1 = objectDict[oid1]
nodeList2 = objectDict[oid2]
for nid1 in nodeList1:
for nid2 in nodeList2:
if (nid1 != nid2):
primPair = (nid1, nid2)
if (primPair in self.elabels.keys()):
elabelDict = self.elabels[primPair]
elabelDict[elabel] = eValue
else:
self.elabels[primPair] = {elabel: eValue}
else:
sys.stderr.write(((((' !! Invalid self-edge (' + self.file) + '):\n\t') + str(row)) + '\n'))
self.error = True
elif ((len(entryType.strip()) > 0) and (entryType.strip()[0] == '#')):
pass
else:
sys.stderr.write(((' !! Invalid graph entry type (expected N, E, O, R or EO): ' + str(row)) + '\n'))
self.error = True
anonNode = False
anodeList = []
for elabel in self.elabels.keys():
nid1 = elabel[0]
nid2 = elabel[1]
if (not (nid1 in self.nlabels.keys())):
self.nlabels[nid1] = {'_': 1.0}
anodeList = (anodeList + [nid1])
anonNode = True
if (not (nid2 in self.nlabels.keys())):
self.nlabels[nid2] = {'_': 1.0}
anodeList = (anodeList + [nid2])
anonNode = True
if anonNode:
sys.stderr.write(((' ** Anonymous labels created for:\n\t' + str(anodeList)) + '\n'))
invalidAsteriskNodeList = sorted(list(invalidAsteriskNodes))
while (len(invalidAsteriskNodeList) > 0):
nextPrimId = invalidAsteriskNodeList.pop()
for (parent, child) in validAsteriskEdges:
otherId = None
if (parent == nextPrimId):
otherId = child
if (child == nextPrimId):
otherId = parent
if (otherId != None):
if (not (otherId in invalidAsteriskNodes)):
invalidAsteriskNodes.add(otherId)
invalidAsteriskNodeList.append(otherId)
self.nlabels[otherId] = {'MergeError': 1.0}
self.elabels[(parent, child)] = {'MergeError': 1.0}<|docstring|>Graph data is read from a CSV file or provided node and edge label
dictionaries. If invalid entries are found, the error flag is set to
true, and graph input continues. In .lg files, blank lines are
ignored, and # may be used for comment lines in CSV graph files.<|endoftext|>
|
eb0dd4a344af750b470811e34cc5e800ce78bbd3dc7dbd6467d41b3691454540
|
def csvObject(self):
'Construct CSV data file using object-relationship format. Currently \n\t\tweight values are only placeholders (i.e. 1.0 is always used).'
outputString = ''
(segmentPrimitiveMap, primitiveSegmentMap, rootSegments, segmentEdges) = self.segmentGraph()
outputString += ('# ' + os.path.split(self.file)[1])
outputString += '\n\n'
outputString += (('# ' + str(len(segmentPrimitiveMap.keys()))) + ' Objects')
outputString += '\n'
outputString += '# FORMAT: O, Object ID, Label, Weight, [ Primitive ID List ]'
outputString += '\n'
for objectId in sorted(segmentPrimitiveMap.keys()):
for label in sorted(segmentPrimitiveMap[objectId][1]):
outputString += (((('O, ' + objectId) + ', ') + label) + ', 1.0')
for primitiveId in sorted(segmentPrimitiveMap[objectId][0]):
outputString += (', ' + primitiveId)
outputString += '\n'
outputString += '\n'
outputString += (('# ' + str(len(segmentEdges.keys()))) + ' Relationships (Pairs of Objects)')
outputString += '\n'
outputString += '# FORMAT: R, Object ID (parent), Object ID (child), Label, Weight'
outputString += '\n'
for (parentObj, childObj) in sorted(segmentEdges.keys()):
for relationship in sorted(segmentEdges[(parentObj, childObj)].keys()):
outputString += (((('R, ' + parentObj) + ', ') + childObj) + ', ')
outputString += (relationship + ', 1.0')
outputString += '\n'
return outputString
|
Construct CSV data file using object-relationship format. Currently
weight values are only placeholders (i.e. 1.0 is always used).
|
src/lg.py
|
csvObject
|
CurrenWong/lgeval
| 1
|
python
|
def csvObject(self):
'Construct CSV data file using object-relationship format. Currently \n\t\tweight values are only placeholders (i.e. 1.0 is always used).'
outputString =
(segmentPrimitiveMap, primitiveSegmentMap, rootSegments, segmentEdges) = self.segmentGraph()
outputString += ('# ' + os.path.split(self.file)[1])
outputString += '\n\n'
outputString += (('# ' + str(len(segmentPrimitiveMap.keys()))) + ' Objects')
outputString += '\n'
outputString += '# FORMAT: O, Object ID, Label, Weight, [ Primitive ID List ]'
outputString += '\n'
for objectId in sorted(segmentPrimitiveMap.keys()):
for label in sorted(segmentPrimitiveMap[objectId][1]):
outputString += (((('O, ' + objectId) + ', ') + label) + ', 1.0')
for primitiveId in sorted(segmentPrimitiveMap[objectId][0]):
outputString += (', ' + primitiveId)
outputString += '\n'
outputString += '\n'
outputString += (('# ' + str(len(segmentEdges.keys()))) + ' Relationships (Pairs of Objects)')
outputString += '\n'
outputString += '# FORMAT: R, Object ID (parent), Object ID (child), Label, Weight'
outputString += '\n'
for (parentObj, childObj) in sorted(segmentEdges.keys()):
for relationship in sorted(segmentEdges[(parentObj, childObj)].keys()):
outputString += (((('R, ' + parentObj) + ', ') + childObj) + ', ')
outputString += (relationship + ', 1.0')
outputString += '\n'
return outputString
|
def csvObject(self):
'Construct CSV data file using object-relationship format. Currently \n\t\tweight values are only placeholders (i.e. 1.0 is always used).'
outputString =
(segmentPrimitiveMap, primitiveSegmentMap, rootSegments, segmentEdges) = self.segmentGraph()
outputString += ('# ' + os.path.split(self.file)[1])
outputString += '\n\n'
outputString += (('# ' + str(len(segmentPrimitiveMap.keys()))) + ' Objects')
outputString += '\n'
outputString += '# FORMAT: O, Object ID, Label, Weight, [ Primitive ID List ]'
outputString += '\n'
for objectId in sorted(segmentPrimitiveMap.keys()):
for label in sorted(segmentPrimitiveMap[objectId][1]):
outputString += (((('O, ' + objectId) + ', ') + label) + ', 1.0')
for primitiveId in sorted(segmentPrimitiveMap[objectId][0]):
outputString += (', ' + primitiveId)
outputString += '\n'
outputString += '\n'
outputString += (('# ' + str(len(segmentEdges.keys()))) + ' Relationships (Pairs of Objects)')
outputString += '\n'
outputString += '# FORMAT: R, Object ID (parent), Object ID (child), Label, Weight'
outputString += '\n'
for (parentObj, childObj) in sorted(segmentEdges.keys()):
for relationship in sorted(segmentEdges[(parentObj, childObj)].keys()):
outputString += (((('R, ' + parentObj) + ', ') + childObj) + ', ')
outputString += (relationship + ', 1.0')
outputString += '\n'
return outputString<|docstring|>Construct CSV data file using object-relationship format. Currently
weight values are only placeholders (i.e. 1.0 is always used).<|endoftext|>
|
58ab28f3d8d92507d4979e9a880967e92669fc359970a541cf0659a4d2813dc3
|
def csv(self):
'Construct CSV data file representation as a string.'
sstring = ''
nlist = []
elist = []
for nkey in self.nlabels.keys():
nodeLabels = self.nlabels[nkey]
for nlabel in nodeLabels.keys():
nstring = (((((('N,' + nkey) + ',') + nlabel) + ',') + str(nodeLabels[nlabel])) + '\n')
nlist = (nlist + [nstring])
for npair in self.elabels.keys():
edgeLabels = self.elabels[npair]
for elabel in edgeLabels.keys():
estring = (((((((('E,' + npair[0]) + ',') + npair[1]) + ',') + elabel) + ',') + str(edgeLabels[elabel])) + '\n')
elist = (elist + [estring])
nlist.sort()
elist.sort()
sstring += (('# ' + os.path.split(self.file)[1]) + '\n\n')
sstring += (('# ' + str(len(nlist))) + ' Nodes\n')
sstring += '# FORMAT: N, Primitive ID, Label, Weight\n'
for nstring in nlist:
sstring = (sstring + nstring)
sstring += '\n'
sstring += (('# ' + str(len(elist))) + ' Edges\n')
sstring += '# FORMAT: E, Primitive ID (parent), Primitive ID (child), Label, Weight\n'
for estring in elist:
sstring = (sstring + estring)
return sstring
|
Construct CSV data file representation as a string.
|
src/lg.py
|
csv
|
CurrenWong/lgeval
| 1
|
python
|
def csv(self):
sstring =
nlist = []
elist = []
for nkey in self.nlabels.keys():
nodeLabels = self.nlabels[nkey]
for nlabel in nodeLabels.keys():
nstring = (((((('N,' + nkey) + ',') + nlabel) + ',') + str(nodeLabels[nlabel])) + '\n')
nlist = (nlist + [nstring])
for npair in self.elabels.keys():
edgeLabels = self.elabels[npair]
for elabel in edgeLabels.keys():
estring = (((((((('E,' + npair[0]) + ',') + npair[1]) + ',') + elabel) + ',') + str(edgeLabels[elabel])) + '\n')
elist = (elist + [estring])
nlist.sort()
elist.sort()
sstring += (('# ' + os.path.split(self.file)[1]) + '\n\n')
sstring += (('# ' + str(len(nlist))) + ' Nodes\n')
sstring += '# FORMAT: N, Primitive ID, Label, Weight\n'
for nstring in nlist:
sstring = (sstring + nstring)
sstring += '\n'
sstring += (('# ' + str(len(elist))) + ' Edges\n')
sstring += '# FORMAT: E, Primitive ID (parent), Primitive ID (child), Label, Weight\n'
for estring in elist:
sstring = (sstring + estring)
return sstring
|
def csv(self):
sstring =
nlist = []
elist = []
for nkey in self.nlabels.keys():
nodeLabels = self.nlabels[nkey]
for nlabel in nodeLabels.keys():
nstring = (((((('N,' + nkey) + ',') + nlabel) + ',') + str(nodeLabels[nlabel])) + '\n')
nlist = (nlist + [nstring])
for npair in self.elabels.keys():
edgeLabels = self.elabels[npair]
for elabel in edgeLabels.keys():
estring = (((((((('E,' + npair[0]) + ',') + npair[1]) + ',') + elabel) + ',') + str(edgeLabels[elabel])) + '\n')
elist = (elist + [estring])
nlist.sort()
elist.sort()
sstring += (('# ' + os.path.split(self.file)[1]) + '\n\n')
sstring += (('# ' + str(len(nlist))) + ' Nodes\n')
sstring += '# FORMAT: N, Primitive ID, Label, Weight\n'
for nstring in nlist:
sstring = (sstring + nstring)
sstring += '\n'
sstring += (('# ' + str(len(elist))) + ' Edges\n')
sstring += '# FORMAT: E, Primitive ID (parent), Primitive ID (child), Label, Weight\n'
for estring in elist:
sstring = (sstring + estring)
return sstring<|docstring|>Construct CSV data file representation as a string.<|endoftext|>
|
9de1a7b59c7f18764b70c7fce1bd3d908c61d1ff6dea82efe7856b8055c65a38
|
def segmentGraph(self):
'Return dictionaries from segments to strokes, strokes to segments,\n\t\tsegments without parents, and edges labeled as segment (w. symbol label).'
primitiveSegmentMap = {}
segmentPrimitiveMap = {}
segmentEdges = {}
self.hideUnlabeledEdges()
primSets = {}
for (node, labs) in self.nlabels.items():
primSets[node] = {}
for l in labs:
(cost, _) = self.cmpNodes([l], [])
if (cost > 0):
primSets[node][l] = set([node])
for (n1, n2) in self.elabels.keys():
commonLabels = set(self.nlabels[n1].keys()).intersection(self.nlabels[n2].keys(), self.elabels[(n1, n2)].keys())
for l in commonLabels:
(cost, _) = self.cmpNodes([l], [])
if (cost > 0):
primSets[n1][l].add(n2)
primSets[n2][l].add(n1)
i = 0
segmentList = []
rootSegments = set([])
for (primitive, segments) in primSets.items():
if (not (primitive in primitiveSegmentMap)):
primitiveSegmentMap[primitive] = {}
for lab in segments.keys():
alreadySegmented = False
for j in range(len(segmentList)):
if (segments[lab] == segmentList[j]['prim']):
if (not (primitive in primitiveSegmentMap)):
primitiveSegmentMap[primitive] = {}
primitiveSegmentMap[primitive][lab] = ('Obj' + str(j))
alreadySegmented = True
if (lab not in segmentList[j]['label']):
segmentPrimitiveMap[('Obj' + str(j))][1].append(lab)
segmentList[j]['label'].add(lab)
break
if (not alreadySegmented):
newSegment = ('Obj' + str(i))
segmentList = (segmentList + [{'label': {lab}, 'prim': primSets[primitive][lab]}])
segmentPrimitiveMap[newSegment] = (segments[lab], [lab])
primitiveSegmentMap[primitive][lab] = newSegment
rootSegments.add(newSegment)
i += 1
for ((n1, n2), elabs) in self.elabels.items():
segment1 = primitiveSegmentMap[n1]
segment2 = primitiveSegmentMap[n2]
possibleRelationLabels = set(elabs.keys()).difference(self.nlabels[n1].keys(), self.nlabels[n2].keys())
if (len(possibleRelationLabels) != 0):
for (l1, pset1) in segment1.items():
for (l2, pset2) in segment2.items():
if (pset1 != pset2):
theRelationLab = possibleRelationLabels
for p1 in primSets[n1][l1]:
for p2 in primSets[n2][l2]:
if ((p1, p2) in self.elabels):
theRelationLab &= set(self.elabels[(p1, p2)].keys())
else:
theRelationLab = set([])
if (len(theRelationLab) == 0):
break
if (len(theRelationLab) == 0):
break
if (len(theRelationLab) != 0):
if (pset2 in rootSegments):
rootSegments.remove(pset2)
for label in theRelationLab:
(cost, _) = self.cmpNodes([label], [])
if (cost > 0):
if ((pset1, pset2) in segmentEdges):
if (label in segmentEdges[(pset1, pset2)]):
segmentEdges[(pset1, pset2)][label] += self.elabels[(n1, n2)][label]
else:
segmentEdges[(pset1, pset2)][label] = self.elabels[(n1, n2)][label]
else:
segmentEdges[(pset1, pset2)] = {}
segmentEdges[(pset1, pset2)][label] = self.elabels[(n1, n2)][label]
self.restoreUnlabeledEdges()
return (segmentPrimitiveMap, primitiveSegmentMap, list(rootSegments), segmentEdges)
|
Return dictionaries from segments to strokes, strokes to segments,
segments without parents, and edges labeled as segment (w. symbol label).
|
src/lg.py
|
segmentGraph
|
CurrenWong/lgeval
| 1
|
python
|
def segmentGraph(self):
'Return dictionaries from segments to strokes, strokes to segments,\n\t\tsegments without parents, and edges labeled as segment (w. symbol label).'
primitiveSegmentMap = {}
segmentPrimitiveMap = {}
segmentEdges = {}
self.hideUnlabeledEdges()
primSets = {}
for (node, labs) in self.nlabels.items():
primSets[node] = {}
for l in labs:
(cost, _) = self.cmpNodes([l], [])
if (cost > 0):
primSets[node][l] = set([node])
for (n1, n2) in self.elabels.keys():
commonLabels = set(self.nlabels[n1].keys()).intersection(self.nlabels[n2].keys(), self.elabels[(n1, n2)].keys())
for l in commonLabels:
(cost, _) = self.cmpNodes([l], [])
if (cost > 0):
primSets[n1][l].add(n2)
primSets[n2][l].add(n1)
i = 0
segmentList = []
rootSegments = set([])
for (primitive, segments) in primSets.items():
if (not (primitive in primitiveSegmentMap)):
primitiveSegmentMap[primitive] = {}
for lab in segments.keys():
alreadySegmented = False
for j in range(len(segmentList)):
if (segments[lab] == segmentList[j]['prim']):
if (not (primitive in primitiveSegmentMap)):
primitiveSegmentMap[primitive] = {}
primitiveSegmentMap[primitive][lab] = ('Obj' + str(j))
alreadySegmented = True
if (lab not in segmentList[j]['label']):
segmentPrimitiveMap[('Obj' + str(j))][1].append(lab)
segmentList[j]['label'].add(lab)
break
if (not alreadySegmented):
newSegment = ('Obj' + str(i))
segmentList = (segmentList + [{'label': {lab}, 'prim': primSets[primitive][lab]}])
segmentPrimitiveMap[newSegment] = (segments[lab], [lab])
primitiveSegmentMap[primitive][lab] = newSegment
rootSegments.add(newSegment)
i += 1
for ((n1, n2), elabs) in self.elabels.items():
segment1 = primitiveSegmentMap[n1]
segment2 = primitiveSegmentMap[n2]
possibleRelationLabels = set(elabs.keys()).difference(self.nlabels[n1].keys(), self.nlabels[n2].keys())
if (len(possibleRelationLabels) != 0):
for (l1, pset1) in segment1.items():
for (l2, pset2) in segment2.items():
if (pset1 != pset2):
theRelationLab = possibleRelationLabels
for p1 in primSets[n1][l1]:
for p2 in primSets[n2][l2]:
if ((p1, p2) in self.elabels):
theRelationLab &= set(self.elabels[(p1, p2)].keys())
else:
theRelationLab = set([])
if (len(theRelationLab) == 0):
break
if (len(theRelationLab) == 0):
break
if (len(theRelationLab) != 0):
if (pset2 in rootSegments):
rootSegments.remove(pset2)
for label in theRelationLab:
(cost, _) = self.cmpNodes([label], [])
if (cost > 0):
if ((pset1, pset2) in segmentEdges):
if (label in segmentEdges[(pset1, pset2)]):
segmentEdges[(pset1, pset2)][label] += self.elabels[(n1, n2)][label]
else:
segmentEdges[(pset1, pset2)][label] = self.elabels[(n1, n2)][label]
else:
segmentEdges[(pset1, pset2)] = {}
segmentEdges[(pset1, pset2)][label] = self.elabels[(n1, n2)][label]
self.restoreUnlabeledEdges()
return (segmentPrimitiveMap, primitiveSegmentMap, list(rootSegments), segmentEdges)
|
def segmentGraph(self):
'Return dictionaries from segments to strokes, strokes to segments,\n\t\tsegments without parents, and edges labeled as segment (w. symbol label).'
primitiveSegmentMap = {}
segmentPrimitiveMap = {}
segmentEdges = {}
self.hideUnlabeledEdges()
primSets = {}
for (node, labs) in self.nlabels.items():
primSets[node] = {}
for l in labs:
(cost, _) = self.cmpNodes([l], [])
if (cost > 0):
primSets[node][l] = set([node])
for (n1, n2) in self.elabels.keys():
commonLabels = set(self.nlabels[n1].keys()).intersection(self.nlabels[n2].keys(), self.elabels[(n1, n2)].keys())
for l in commonLabels:
(cost, _) = self.cmpNodes([l], [])
if (cost > 0):
primSets[n1][l].add(n2)
primSets[n2][l].add(n1)
i = 0
segmentList = []
rootSegments = set([])
for (primitive, segments) in primSets.items():
if (not (primitive in primitiveSegmentMap)):
primitiveSegmentMap[primitive] = {}
for lab in segments.keys():
alreadySegmented = False
for j in range(len(segmentList)):
if (segments[lab] == segmentList[j]['prim']):
if (not (primitive in primitiveSegmentMap)):
primitiveSegmentMap[primitive] = {}
primitiveSegmentMap[primitive][lab] = ('Obj' + str(j))
alreadySegmented = True
if (lab not in segmentList[j]['label']):
segmentPrimitiveMap[('Obj' + str(j))][1].append(lab)
segmentList[j]['label'].add(lab)
break
if (not alreadySegmented):
newSegment = ('Obj' + str(i))
segmentList = (segmentList + [{'label': {lab}, 'prim': primSets[primitive][lab]}])
segmentPrimitiveMap[newSegment] = (segments[lab], [lab])
primitiveSegmentMap[primitive][lab] = newSegment
rootSegments.add(newSegment)
i += 1
for ((n1, n2), elabs) in self.elabels.items():
segment1 = primitiveSegmentMap[n1]
segment2 = primitiveSegmentMap[n2]
possibleRelationLabels = set(elabs.keys()).difference(self.nlabels[n1].keys(), self.nlabels[n2].keys())
if (len(possibleRelationLabels) != 0):
for (l1, pset1) in segment1.items():
for (l2, pset2) in segment2.items():
if (pset1 != pset2):
theRelationLab = possibleRelationLabels
for p1 in primSets[n1][l1]:
for p2 in primSets[n2][l2]:
if ((p1, p2) in self.elabels):
theRelationLab &= set(self.elabels[(p1, p2)].keys())
else:
theRelationLab = set([])
if (len(theRelationLab) == 0):
break
if (len(theRelationLab) == 0):
break
if (len(theRelationLab) != 0):
if (pset2 in rootSegments):
rootSegments.remove(pset2)
for label in theRelationLab:
(cost, _) = self.cmpNodes([label], [])
if (cost > 0):
if ((pset1, pset2) in segmentEdges):
if (label in segmentEdges[(pset1, pset2)]):
segmentEdges[(pset1, pset2)][label] += self.elabels[(n1, n2)][label]
else:
segmentEdges[(pset1, pset2)][label] = self.elabels[(n1, n2)][label]
else:
segmentEdges[(pset1, pset2)] = {}
segmentEdges[(pset1, pset2)][label] = self.elabels[(n1, n2)][label]
self.restoreUnlabeledEdges()
return (segmentPrimitiveMap, primitiveSegmentMap, list(rootSegments), segmentEdges)<|docstring|>Return dictionaries from segments to strokes, strokes to segments,
segments without parents, and edges labeled as segment (w. symbol label).<|endoftext|>
|
6f8a95845fc00050b1f047b3e9a90cbf9400b1cdfe7f2de58d498a9f69ece512
|
def compareSegments(self, lg2):
'Compute the number of differing segments, and record disagreements.\n\t\tThe primitives in each graph should be of the same number and names\n\t\t(identifiers). Nodes are merged that have identical (label,value)\n\t\tpairs on nodes and all incoming and outgoing edges.'
(sp1, ps1, _, sre1) = self.segmentGraph()
(sp2, ps2, _, sre2) = lg2.segmentGraph()
allNodes = set(ps1.keys())
assert (allNodes == set(ps2.keys()))
edgeDiffCount = 0
edgeDiffClassCount = 0
segDiffs = {}
correctSegments = set([])
correctSegmentsAndClass = set([])
undirDiffClassSet = set([])
for primitive in ps1.keys():
edgeFromP1 = {}
edgeFromP2 = {}
for (lab1, seg1) in ps1[primitive].items():
for p in sp1[seg1][0]:
if ((p != primitive) and ((p, primitive) in self.elabels.keys()) and (lab1 in self.elabels[(p, primitive)].keys())):
if (p in edgeFromP1):
edgeFromP1[p].append(lab1)
else:
edgeFromP1[p] = [lab1]
for (lab2, seg2) in ps2[primitive].items():
for p in sp2[seg2][0]:
if ((p != primitive) and ((p, primitive) in lg2.elabels.keys()) and (lab2 in lg2.elabels[(p, primitive)].keys())):
if (p in edgeFromP2):
edgeFromP2[p].append(lab2)
else:
edgeFromP2[p] = [lab2]
diff1 = set([])
diff2 = set([])
commonPrim = set(edgeFromP1.keys()).intersection(edgeFromP2.keys())
for p in commonPrim:
(cost, diff) = self.cmpNodes(edgeFromP1[p], edgeFromP2[p])
edgeDiffCount = (edgeDiffCount + cost)
for (l1, l2) in diff:
if ((l1 in self.nlabels[p].keys()) and (l2 in lg2.nlabels[p].keys())):
edgeDiffClassCount += 1
elif (cost > 0):
diff1.add(p)
diff2.add(p)
if ((not ((p, primitive) in undirDiffClassSet)) and (not ((primitive, p) in undirDiffClassSet))):
undirDiffClassSet.add((primitive, p))
for p in (set(edgeFromP1.keys()) - commonPrim):
(cost, diff) = self.cmpNodes(edgeFromP1[p], [])
edgeDiffCount = (edgeDiffCount + cost)
diff1.add(p)
for p in (set(edgeFromP2.keys()) - commonPrim):
(cost, diff) = self.cmpNodes(edgeFromP2[p], [])
edgeDiffCount = (edgeDiffCount + cost)
diff2.add(p)
if ((len(diff1) + len(diff2)) > 0):
segDiffs[primitive] = (diff1, diff2)
targets = {}
targetObjIds = {}
matchedTargets = set()
for ObjID in sp2.keys():
if ('ABSENT' not in sp2[ObjID][1]):
primitiveTupleList = tuple(sorted(list(sp2[ObjID][0])))
targets[primitiveTupleList] = sp2[ObjID][1]
targetObjIds[primitiveTupleList] = ObjID
for ObjID in sp1.keys():
if ('ABSENT' in sp1[ObjID][1]):
continue
primitiveTupleList = tuple(sorted(list(sp1[ObjID][0])))
if ((primitiveTupleList in targets.keys()) and (not (primitiveTupleList in matchedTargets))):
matchedTargets.add(primitiveTupleList)
correctSegments.add(ObjID)
outputLabels = set(sp1[ObjID][1])
matchingLabels = list(outputLabels.intersection(targets[primitiveTupleList]))
if (len(matchingLabels) > 0):
ObjIDRepeats = ([ObjID] * len(matchingLabels))
correctSegmentsAndClass.add(tuple(zip(ObjIDRepeats, list(matchingLabels))))
nbSegmClass = 0
for (_, labs) in sp2.items():
nbSegmClass += len(labs[1])
segRelErrors = 0
correctSegRels = 0
correctSegRelLocations = 0
primRelEdgeDiffs = {}
for thisPair in sre1.keys():
misLabeled = False
falsePositive = False
thisParentIds = set(sp1[thisPair[0]][0])
thisChildIds = set(sp1[thisPair[1]][0])
primitiveTupleListParent = tuple(sorted(list(thisParentIds)))
primitiveTupleListChild = tuple(sorted(list(thisChildIds)))
targetObjNameParent = None
targetObjNameChild = None
if (primitiveTupleListParent in targetObjIds.keys()):
targetObjNameParent = targetObjIds[primitiveTupleListParent]
if (primitiveTupleListChild in targetObjIds.keys()):
targetObjNameChild = targetObjIds[primitiveTupleListChild]
if (not ((thisPair[0] in correctSegments) and (thisPair[1] in correctSegments))):
falsePositive = True
elif (not ((targetObjNameParent, targetObjNameChild) in sre2.keys())):
falsePositive = True
elif (not (sorted(sre1[thisPair].keys()) == sorted(sre2[(targetObjNameParent, targetObjNameChild)].keys()))):
misLabeled = True
if (falsePositive or misLabeled):
self.error = True
segRelErrors += 1
primRelEdgeDiffs[thisPair] = [('Error', 1.0)]
else:
correctSegRels += 1
if (not falsePositive):
correctSegRelLocations += 1
lg2.removeAbsent()
self.removeAbsent()
(sp2orig, ps2orig, _, sre2orig) = lg2.segmentGraph()
(sp1orig, ps1orig, _, sre1orig) = self.segmentGraph()
nLg2Objs = len(sp2orig.keys())
nLg1Objs = len(sp1orig.keys())
nLg1ObjsWithAbsent = len(sp1.keys())
lg2.addAbsent(self)
self.addAbsent(lg2)
hasCorrectSegments = (1 if ((len(correctSegments) == nLg2Objs) and (len(correctSegments) == nLg1ObjsWithAbsent)) else 0)
hasCorrectSegmentsAndLabels = (1 if ((len(correctSegmentsAndClass) == nLg2Objs) and (len(correctSegmentsAndClass) == nLg1ObjsWithAbsent)) else 0)
hasCorrectRelationLocations = (1 if ((correctSegRelLocations == len(sre1.keys())) and (correctSegRelLocations == len(sre2.keys()))) else 0)
hasCorrectRelationsAndLabels = (1 if ((correctSegRels == len(sre1.keys())) and (correctSegRels == len(sre2.keys()))) else 0)
hasCorrectStructure = (hasCorrectRelationLocations and hasCorrectSegments)
metrics = [('edgeDiffClassCount', edgeDiffClassCount), ('undirDiffClassCount', len(undirDiffClassSet)), ('nSeg', nLg2Objs), ('detectedSeg', nLg1Objs), ('dSegRelEdges', len(sre1.keys())), ('CorrectSegments', len(correctSegments)), ('CorrectSegmentsAndClass', len(correctSegmentsAndClass)), ('ClassError', (nbSegmClass - len(correctSegmentsAndClass))), ('CorrectSegRels', correctSegRels), ('CorrectSegRelLocations', correctSegRelLocations), ('SegRelErrors', segRelErrors), ('hasCorrectSegments', hasCorrectSegments), ('hasCorrectSegLab', hasCorrectSegmentsAndLabels), ('hasCorrectRelationLocations', hasCorrectRelationLocations), ('hasCorrectRelLab', hasCorrectRelationsAndLabels), ('hasCorrectStructure', hasCorrectStructure)]
segEdgeMismatch = (edgeDiffCount - edgeDiffClassCount)
return (segEdgeMismatch, segDiffs, correctSegments, metrics, primRelEdgeDiffs)
|
Compute the number of differing segments, and record disagreements.
The primitives in each graph should be of the same number and names
(identifiers). Nodes are merged that have identical (label,value)
pairs on nodes and all incoming and outgoing edges.
|
src/lg.py
|
compareSegments
|
CurrenWong/lgeval
| 1
|
python
|
def compareSegments(self, lg2):
'Compute the number of differing segments, and record disagreements.\n\t\tThe primitives in each graph should be of the same number and names\n\t\t(identifiers). Nodes are merged that have identical (label,value)\n\t\tpairs on nodes and all incoming and outgoing edges.'
(sp1, ps1, _, sre1) = self.segmentGraph()
(sp2, ps2, _, sre2) = lg2.segmentGraph()
allNodes = set(ps1.keys())
assert (allNodes == set(ps2.keys()))
edgeDiffCount = 0
edgeDiffClassCount = 0
segDiffs = {}
correctSegments = set([])
correctSegmentsAndClass = set([])
undirDiffClassSet = set([])
for primitive in ps1.keys():
edgeFromP1 = {}
edgeFromP2 = {}
for (lab1, seg1) in ps1[primitive].items():
for p in sp1[seg1][0]:
if ((p != primitive) and ((p, primitive) in self.elabels.keys()) and (lab1 in self.elabels[(p, primitive)].keys())):
if (p in edgeFromP1):
edgeFromP1[p].append(lab1)
else:
edgeFromP1[p] = [lab1]
for (lab2, seg2) in ps2[primitive].items():
for p in sp2[seg2][0]:
if ((p != primitive) and ((p, primitive) in lg2.elabels.keys()) and (lab2 in lg2.elabels[(p, primitive)].keys())):
if (p in edgeFromP2):
edgeFromP2[p].append(lab2)
else:
edgeFromP2[p] = [lab2]
diff1 = set([])
diff2 = set([])
commonPrim = set(edgeFromP1.keys()).intersection(edgeFromP2.keys())
for p in commonPrim:
(cost, diff) = self.cmpNodes(edgeFromP1[p], edgeFromP2[p])
edgeDiffCount = (edgeDiffCount + cost)
for (l1, l2) in diff:
if ((l1 in self.nlabels[p].keys()) and (l2 in lg2.nlabels[p].keys())):
edgeDiffClassCount += 1
elif (cost > 0):
diff1.add(p)
diff2.add(p)
if ((not ((p, primitive) in undirDiffClassSet)) and (not ((primitive, p) in undirDiffClassSet))):
undirDiffClassSet.add((primitive, p))
for p in (set(edgeFromP1.keys()) - commonPrim):
(cost, diff) = self.cmpNodes(edgeFromP1[p], [])
edgeDiffCount = (edgeDiffCount + cost)
diff1.add(p)
for p in (set(edgeFromP2.keys()) - commonPrim):
(cost, diff) = self.cmpNodes(edgeFromP2[p], [])
edgeDiffCount = (edgeDiffCount + cost)
diff2.add(p)
if ((len(diff1) + len(diff2)) > 0):
segDiffs[primitive] = (diff1, diff2)
targets = {}
targetObjIds = {}
matchedTargets = set()
for ObjID in sp2.keys():
if ('ABSENT' not in sp2[ObjID][1]):
primitiveTupleList = tuple(sorted(list(sp2[ObjID][0])))
targets[primitiveTupleList] = sp2[ObjID][1]
targetObjIds[primitiveTupleList] = ObjID
for ObjID in sp1.keys():
if ('ABSENT' in sp1[ObjID][1]):
continue
primitiveTupleList = tuple(sorted(list(sp1[ObjID][0])))
if ((primitiveTupleList in targets.keys()) and (not (primitiveTupleList in matchedTargets))):
matchedTargets.add(primitiveTupleList)
correctSegments.add(ObjID)
outputLabels = set(sp1[ObjID][1])
matchingLabels = list(outputLabels.intersection(targets[primitiveTupleList]))
if (len(matchingLabels) > 0):
ObjIDRepeats = ([ObjID] * len(matchingLabels))
correctSegmentsAndClass.add(tuple(zip(ObjIDRepeats, list(matchingLabels))))
nbSegmClass = 0
for (_, labs) in sp2.items():
nbSegmClass += len(labs[1])
segRelErrors = 0
correctSegRels = 0
correctSegRelLocations = 0
primRelEdgeDiffs = {}
for thisPair in sre1.keys():
misLabeled = False
falsePositive = False
thisParentIds = set(sp1[thisPair[0]][0])
thisChildIds = set(sp1[thisPair[1]][0])
primitiveTupleListParent = tuple(sorted(list(thisParentIds)))
primitiveTupleListChild = tuple(sorted(list(thisChildIds)))
targetObjNameParent = None
targetObjNameChild = None
if (primitiveTupleListParent in targetObjIds.keys()):
targetObjNameParent = targetObjIds[primitiveTupleListParent]
if (primitiveTupleListChild in targetObjIds.keys()):
targetObjNameChild = targetObjIds[primitiveTupleListChild]
if (not ((thisPair[0] in correctSegments) and (thisPair[1] in correctSegments))):
falsePositive = True
elif (not ((targetObjNameParent, targetObjNameChild) in sre2.keys())):
falsePositive = True
elif (not (sorted(sre1[thisPair].keys()) == sorted(sre2[(targetObjNameParent, targetObjNameChild)].keys()))):
misLabeled = True
if (falsePositive or misLabeled):
self.error = True
segRelErrors += 1
primRelEdgeDiffs[thisPair] = [('Error', 1.0)]
else:
correctSegRels += 1
if (not falsePositive):
correctSegRelLocations += 1
lg2.removeAbsent()
self.removeAbsent()
(sp2orig, ps2orig, _, sre2orig) = lg2.segmentGraph()
(sp1orig, ps1orig, _, sre1orig) = self.segmentGraph()
nLg2Objs = len(sp2orig.keys())
nLg1Objs = len(sp1orig.keys())
nLg1ObjsWithAbsent = len(sp1.keys())
lg2.addAbsent(self)
self.addAbsent(lg2)
hasCorrectSegments = (1 if ((len(correctSegments) == nLg2Objs) and (len(correctSegments) == nLg1ObjsWithAbsent)) else 0)
hasCorrectSegmentsAndLabels = (1 if ((len(correctSegmentsAndClass) == nLg2Objs) and (len(correctSegmentsAndClass) == nLg1ObjsWithAbsent)) else 0)
hasCorrectRelationLocations = (1 if ((correctSegRelLocations == len(sre1.keys())) and (correctSegRelLocations == len(sre2.keys()))) else 0)
hasCorrectRelationsAndLabels = (1 if ((correctSegRels == len(sre1.keys())) and (correctSegRels == len(sre2.keys()))) else 0)
hasCorrectStructure = (hasCorrectRelationLocations and hasCorrectSegments)
metrics = [('edgeDiffClassCount', edgeDiffClassCount), ('undirDiffClassCount', len(undirDiffClassSet)), ('nSeg', nLg2Objs), ('detectedSeg', nLg1Objs), ('dSegRelEdges', len(sre1.keys())), ('CorrectSegments', len(correctSegments)), ('CorrectSegmentsAndClass', len(correctSegmentsAndClass)), ('ClassError', (nbSegmClass - len(correctSegmentsAndClass))), ('CorrectSegRels', correctSegRels), ('CorrectSegRelLocations', correctSegRelLocations), ('SegRelErrors', segRelErrors), ('hasCorrectSegments', hasCorrectSegments), ('hasCorrectSegLab', hasCorrectSegmentsAndLabels), ('hasCorrectRelationLocations', hasCorrectRelationLocations), ('hasCorrectRelLab', hasCorrectRelationsAndLabels), ('hasCorrectStructure', hasCorrectStructure)]
segEdgeMismatch = (edgeDiffCount - edgeDiffClassCount)
return (segEdgeMismatch, segDiffs, correctSegments, metrics, primRelEdgeDiffs)
|
def compareSegments(self, lg2):
'Compute the number of differing segments, and record disagreements.\n\t\tThe primitives in each graph should be of the same number and names\n\t\t(identifiers). Nodes are merged that have identical (label,value)\n\t\tpairs on nodes and all incoming and outgoing edges.'
(sp1, ps1, _, sre1) = self.segmentGraph()
(sp2, ps2, _, sre2) = lg2.segmentGraph()
allNodes = set(ps1.keys())
assert (allNodes == set(ps2.keys()))
edgeDiffCount = 0
edgeDiffClassCount = 0
segDiffs = {}
correctSegments = set([])
correctSegmentsAndClass = set([])
undirDiffClassSet = set([])
for primitive in ps1.keys():
edgeFromP1 = {}
edgeFromP2 = {}
for (lab1, seg1) in ps1[primitive].items():
for p in sp1[seg1][0]:
if ((p != primitive) and ((p, primitive) in self.elabels.keys()) and (lab1 in self.elabels[(p, primitive)].keys())):
if (p in edgeFromP1):
edgeFromP1[p].append(lab1)
else:
edgeFromP1[p] = [lab1]
for (lab2, seg2) in ps2[primitive].items():
for p in sp2[seg2][0]:
if ((p != primitive) and ((p, primitive) in lg2.elabels.keys()) and (lab2 in lg2.elabels[(p, primitive)].keys())):
if (p in edgeFromP2):
edgeFromP2[p].append(lab2)
else:
edgeFromP2[p] = [lab2]
diff1 = set([])
diff2 = set([])
commonPrim = set(edgeFromP1.keys()).intersection(edgeFromP2.keys())
for p in commonPrim:
(cost, diff) = self.cmpNodes(edgeFromP1[p], edgeFromP2[p])
edgeDiffCount = (edgeDiffCount + cost)
for (l1, l2) in diff:
if ((l1 in self.nlabels[p].keys()) and (l2 in lg2.nlabels[p].keys())):
edgeDiffClassCount += 1
elif (cost > 0):
diff1.add(p)
diff2.add(p)
if ((not ((p, primitive) in undirDiffClassSet)) and (not ((primitive, p) in undirDiffClassSet))):
undirDiffClassSet.add((primitive, p))
for p in (set(edgeFromP1.keys()) - commonPrim):
(cost, diff) = self.cmpNodes(edgeFromP1[p], [])
edgeDiffCount = (edgeDiffCount + cost)
diff1.add(p)
for p in (set(edgeFromP2.keys()) - commonPrim):
(cost, diff) = self.cmpNodes(edgeFromP2[p], [])
edgeDiffCount = (edgeDiffCount + cost)
diff2.add(p)
if ((len(diff1) + len(diff2)) > 0):
segDiffs[primitive] = (diff1, diff2)
targets = {}
targetObjIds = {}
matchedTargets = set()
for ObjID in sp2.keys():
if ('ABSENT' not in sp2[ObjID][1]):
primitiveTupleList = tuple(sorted(list(sp2[ObjID][0])))
targets[primitiveTupleList] = sp2[ObjID][1]
targetObjIds[primitiveTupleList] = ObjID
for ObjID in sp1.keys():
if ('ABSENT' in sp1[ObjID][1]):
continue
primitiveTupleList = tuple(sorted(list(sp1[ObjID][0])))
if ((primitiveTupleList in targets.keys()) and (not (primitiveTupleList in matchedTargets))):
matchedTargets.add(primitiveTupleList)
correctSegments.add(ObjID)
outputLabels = set(sp1[ObjID][1])
matchingLabels = list(outputLabels.intersection(targets[primitiveTupleList]))
if (len(matchingLabels) > 0):
ObjIDRepeats = ([ObjID] * len(matchingLabels))
correctSegmentsAndClass.add(tuple(zip(ObjIDRepeats, list(matchingLabels))))
nbSegmClass = 0
for (_, labs) in sp2.items():
nbSegmClass += len(labs[1])
segRelErrors = 0
correctSegRels = 0
correctSegRelLocations = 0
primRelEdgeDiffs = {}
for thisPair in sre1.keys():
misLabeled = False
falsePositive = False
thisParentIds = set(sp1[thisPair[0]][0])
thisChildIds = set(sp1[thisPair[1]][0])
primitiveTupleListParent = tuple(sorted(list(thisParentIds)))
primitiveTupleListChild = tuple(sorted(list(thisChildIds)))
targetObjNameParent = None
targetObjNameChild = None
if (primitiveTupleListParent in targetObjIds.keys()):
targetObjNameParent = targetObjIds[primitiveTupleListParent]
if (primitiveTupleListChild in targetObjIds.keys()):
targetObjNameChild = targetObjIds[primitiveTupleListChild]
if (not ((thisPair[0] in correctSegments) and (thisPair[1] in correctSegments))):
falsePositive = True
elif (not ((targetObjNameParent, targetObjNameChild) in sre2.keys())):
falsePositive = True
elif (not (sorted(sre1[thisPair].keys()) == sorted(sre2[(targetObjNameParent, targetObjNameChild)].keys()))):
misLabeled = True
if (falsePositive or misLabeled):
self.error = True
segRelErrors += 1
primRelEdgeDiffs[thisPair] = [('Error', 1.0)]
else:
correctSegRels += 1
if (not falsePositive):
correctSegRelLocations += 1
lg2.removeAbsent()
self.removeAbsent()
(sp2orig, ps2orig, _, sre2orig) = lg2.segmentGraph()
(sp1orig, ps1orig, _, sre1orig) = self.segmentGraph()
nLg2Objs = len(sp2orig.keys())
nLg1Objs = len(sp1orig.keys())
nLg1ObjsWithAbsent = len(sp1.keys())
lg2.addAbsent(self)
self.addAbsent(lg2)
hasCorrectSegments = (1 if ((len(correctSegments) == nLg2Objs) and (len(correctSegments) == nLg1ObjsWithAbsent)) else 0)
hasCorrectSegmentsAndLabels = (1 if ((len(correctSegmentsAndClass) == nLg2Objs) and (len(correctSegmentsAndClass) == nLg1ObjsWithAbsent)) else 0)
hasCorrectRelationLocations = (1 if ((correctSegRelLocations == len(sre1.keys())) and (correctSegRelLocations == len(sre2.keys()))) else 0)
hasCorrectRelationsAndLabels = (1 if ((correctSegRels == len(sre1.keys())) and (correctSegRels == len(sre2.keys()))) else 0)
hasCorrectStructure = (hasCorrectRelationLocations and hasCorrectSegments)
metrics = [('edgeDiffClassCount', edgeDiffClassCount), ('undirDiffClassCount', len(undirDiffClassSet)), ('nSeg', nLg2Objs), ('detectedSeg', nLg1Objs), ('dSegRelEdges', len(sre1.keys())), ('CorrectSegments', len(correctSegments)), ('CorrectSegmentsAndClass', len(correctSegmentsAndClass)), ('ClassError', (nbSegmClass - len(correctSegmentsAndClass))), ('CorrectSegRels', correctSegRels), ('CorrectSegRelLocations', correctSegRelLocations), ('SegRelErrors', segRelErrors), ('hasCorrectSegments', hasCorrectSegments), ('hasCorrectSegLab', hasCorrectSegmentsAndLabels), ('hasCorrectRelationLocations', hasCorrectRelationLocations), ('hasCorrectRelLab', hasCorrectRelationsAndLabels), ('hasCorrectStructure', hasCorrectStructure)]
segEdgeMismatch = (edgeDiffCount - edgeDiffClassCount)
return (segEdgeMismatch, segDiffs, correctSegments, metrics, primRelEdgeDiffs)<|docstring|>Compute the number of differing segments, and record disagreements.
The primitives in each graph should be of the same number and names
(identifiers). Nodes are merged that have identical (label,value)
pairs on nodes and all incoming and outgoing edges.<|endoftext|>
|
b30a779437e76b7b759f7496d63007659ec46bc517e3b8fcdf38bed8340389ca
|
def compare(self, lg2):
'Returns: 1. a list of (metric,value) pairs,\n\t\t2. a list of (n1,n2) node disagreements, 3. (e1,e2) pairs\n\t\tfor edge disagreements, 4. dictionary from primitives to\n\t\tdisagreeing segment graph edges for (self, lg2). Node and \n\t\tedge labels are compared using label sets without values, and\n\t\t*not* labels sorted by value.'
metrics = []
nodeconflicts = []
edgeconflicts = []
allNodes = set(lg2.nlabels.keys()).union(self.nlabels.keys())
numNodes = len(allNodes)
(sp2, ps2, _, sre2) = lg2.segmentGraph()
nSegRelEdges = len(sre2)
self.matchAbsent(lg2)
nlabelMismatch = 0
numEdges = (numNodes * (numNodes - 1))
numLabels = (numNodes + numEdges)
elabelMismatch = 0
nodeClassError = set()
for nid in allNodes:
(cost, errL) = self.cmpNodes(self.nlabels[nid].keys(), lg2.nlabels[nid].keys())
if (cost > 0):
nlabelMismatch = (nlabelMismatch + cost)
for (l1, l2) in errL:
nodeconflicts = (nodeconflicts + [(nid, [(l1, 1.0)], [(l2, 1.0)])])
nodeClassError = nodeClassError.union([nid])
nodeEdgeError = set()
for (graph, oGraph) in [(self, lg2), (lg2, self)]:
for npair in graph.elabels.keys():
if ((not (npair in oGraph.elabels)) and (not (graph.elabels[npair] == ['_']))):
(cost, errL) = self.cmpEdges(graph.elabels[npair].keys(), ['_'])
elabelMismatch = (elabelMismatch + cost)
(a, b) = npair
nodeEdgeError.update([a, b])
if (graph == self):
for (l1, l2) in errL:
edgeconflicts.append((npair, [(l1, 1.0)], [(l2, 1.0)]))
else:
for (l1, l2) in errL:
edgeconflicts.append((npair, [(l2, 1.0)], [(l1, 1.0)]))
nodeError = nodeClassError.union(nodeEdgeError)
for npair in self.elabels.keys():
if (npair in lg2.elabels.keys()):
(cost, errL) = self.cmpEdges(self.elabels[npair].keys(), lg2.elabels[npair].keys())
if (cost > 0):
elabelMismatch = (elabelMismatch + cost)
(a, b) = npair
nodeEdgeError.update([a, b])
for (l1, l2) in errL:
edgeconflicts.append((npair, [(l1, 1.0)], [(l2, 1.0)]))
(segEdgeMismatch, segDiffs, correctSegs, segmentMetrics, segRelDiffs) = self.compareSegments(lg2)
badPairs = {}
for ((n1, n2), _, _) in edgeconflicts:
if (not ((n2, n1) in badPairs)):
badPairs[(n1, n2)] = True
incorrectPairs = len(badPairs)
badSegPairs = set([])
for node in segDiffs.keys():
for other in segDiffs[node][0]:
if ((node != other) and ((other, node) not in badSegPairs)):
badSegPairs.add((node, other))
for other in segDiffs[node][1]:
if ((node != other) and ((other, node) not in badSegPairs)):
badSegPairs.add((node, other))
segPairErrors = len(badSegPairs)
cerror = ('D_C', nlabelMismatch)
lerror = ('D_L', elabelMismatch)
serror = ('D_S', segEdgeMismatch)
rerror = ('D_R', (elabelMismatch - segEdgeMismatch))
aerror = ('D_B', (nlabelMismatch + elabelMismatch))
if ((len(self.nlabels.keys()) == 1) and ((len(self.absentNodes) > 0) or (len(lg2.absentNodes) > 0))):
elabelMismatch = 1
segEdgeMismatch = 1
errorVal = 0.0
if (numEdges > 0):
errorVal += (math.sqrt((float(segEdgeMismatch) / numEdges)) + math.sqrt((float(elabelMismatch) / numEdges)))
if (numNodes > 0):
errorVal += (float(nlabelMismatch) / numNodes)
errorVal = (errorVal / 3.0)
eerror = ('D_E(%)', errorVal)
metrics = (metrics + [aerror, cerror, lerror, rerror, serror, eerror, ('nNodes', numNodes), ('nEdges', numEdges), ('nSegRelEdges', nSegRelEdges), ('dPairs', incorrectPairs), ('segPairErrors', segPairErrors), ('nodeCorrect', (numNodes - len(nodeError)))])
metrics = (metrics + segmentMetrics)
return (metrics, nodeconflicts, edgeconflicts, segDiffs, correctSegs, segRelDiffs)
|
Returns: 1. a list of (metric,value) pairs,
2. a list of (n1,n2) node disagreements, 3. (e1,e2) pairs
for edge disagreements, 4. dictionary from primitives to
disagreeing segment graph edges for (self, lg2). Node and
edge labels are compared using label sets without values, and
*not* labels sorted by value.
|
src/lg.py
|
compare
|
CurrenWong/lgeval
| 1
|
python
|
def compare(self, lg2):
'Returns: 1. a list of (metric,value) pairs,\n\t\t2. a list of (n1,n2) node disagreements, 3. (e1,e2) pairs\n\t\tfor edge disagreements, 4. dictionary from primitives to\n\t\tdisagreeing segment graph edges for (self, lg2). Node and \n\t\tedge labels are compared using label sets without values, and\n\t\t*not* labels sorted by value.'
metrics = []
nodeconflicts = []
edgeconflicts = []
allNodes = set(lg2.nlabels.keys()).union(self.nlabels.keys())
numNodes = len(allNodes)
(sp2, ps2, _, sre2) = lg2.segmentGraph()
nSegRelEdges = len(sre2)
self.matchAbsent(lg2)
nlabelMismatch = 0
numEdges = (numNodes * (numNodes - 1))
numLabels = (numNodes + numEdges)
elabelMismatch = 0
nodeClassError = set()
for nid in allNodes:
(cost, errL) = self.cmpNodes(self.nlabels[nid].keys(), lg2.nlabels[nid].keys())
if (cost > 0):
nlabelMismatch = (nlabelMismatch + cost)
for (l1, l2) in errL:
nodeconflicts = (nodeconflicts + [(nid, [(l1, 1.0)], [(l2, 1.0)])])
nodeClassError = nodeClassError.union([nid])
nodeEdgeError = set()
for (graph, oGraph) in [(self, lg2), (lg2, self)]:
for npair in graph.elabels.keys():
if ((not (npair in oGraph.elabels)) and (not (graph.elabels[npair] == ['_']))):
(cost, errL) = self.cmpEdges(graph.elabels[npair].keys(), ['_'])
elabelMismatch = (elabelMismatch + cost)
(a, b) = npair
nodeEdgeError.update([a, b])
if (graph == self):
for (l1, l2) in errL:
edgeconflicts.append((npair, [(l1, 1.0)], [(l2, 1.0)]))
else:
for (l1, l2) in errL:
edgeconflicts.append((npair, [(l2, 1.0)], [(l1, 1.0)]))
nodeError = nodeClassError.union(nodeEdgeError)
for npair in self.elabels.keys():
if (npair in lg2.elabels.keys()):
(cost, errL) = self.cmpEdges(self.elabels[npair].keys(), lg2.elabels[npair].keys())
if (cost > 0):
elabelMismatch = (elabelMismatch + cost)
(a, b) = npair
nodeEdgeError.update([a, b])
for (l1, l2) in errL:
edgeconflicts.append((npair, [(l1, 1.0)], [(l2, 1.0)]))
(segEdgeMismatch, segDiffs, correctSegs, segmentMetrics, segRelDiffs) = self.compareSegments(lg2)
badPairs = {}
for ((n1, n2), _, _) in edgeconflicts:
if (not ((n2, n1) in badPairs)):
badPairs[(n1, n2)] = True
incorrectPairs = len(badPairs)
badSegPairs = set([])
for node in segDiffs.keys():
for other in segDiffs[node][0]:
if ((node != other) and ((other, node) not in badSegPairs)):
badSegPairs.add((node, other))
for other in segDiffs[node][1]:
if ((node != other) and ((other, node) not in badSegPairs)):
badSegPairs.add((node, other))
segPairErrors = len(badSegPairs)
cerror = ('D_C', nlabelMismatch)
lerror = ('D_L', elabelMismatch)
serror = ('D_S', segEdgeMismatch)
rerror = ('D_R', (elabelMismatch - segEdgeMismatch))
aerror = ('D_B', (nlabelMismatch + elabelMismatch))
if ((len(self.nlabels.keys()) == 1) and ((len(self.absentNodes) > 0) or (len(lg2.absentNodes) > 0))):
elabelMismatch = 1
segEdgeMismatch = 1
errorVal = 0.0
if (numEdges > 0):
errorVal += (math.sqrt((float(segEdgeMismatch) / numEdges)) + math.sqrt((float(elabelMismatch) / numEdges)))
if (numNodes > 0):
errorVal += (float(nlabelMismatch) / numNodes)
errorVal = (errorVal / 3.0)
eerror = ('D_E(%)', errorVal)
metrics = (metrics + [aerror, cerror, lerror, rerror, serror, eerror, ('nNodes', numNodes), ('nEdges', numEdges), ('nSegRelEdges', nSegRelEdges), ('dPairs', incorrectPairs), ('segPairErrors', segPairErrors), ('nodeCorrect', (numNodes - len(nodeError)))])
metrics = (metrics + segmentMetrics)
return (metrics, nodeconflicts, edgeconflicts, segDiffs, correctSegs, segRelDiffs)
|
def compare(self, lg2):
'Returns: 1. a list of (metric,value) pairs,\n\t\t2. a list of (n1,n2) node disagreements, 3. (e1,e2) pairs\n\t\tfor edge disagreements, 4. dictionary from primitives to\n\t\tdisagreeing segment graph edges for (self, lg2). Node and \n\t\tedge labels are compared using label sets without values, and\n\t\t*not* labels sorted by value.'
metrics = []
nodeconflicts = []
edgeconflicts = []
allNodes = set(lg2.nlabels.keys()).union(self.nlabels.keys())
numNodes = len(allNodes)
(sp2, ps2, _, sre2) = lg2.segmentGraph()
nSegRelEdges = len(sre2)
self.matchAbsent(lg2)
nlabelMismatch = 0
numEdges = (numNodes * (numNodes - 1))
numLabels = (numNodes + numEdges)
elabelMismatch = 0
nodeClassError = set()
for nid in allNodes:
(cost, errL) = self.cmpNodes(self.nlabels[nid].keys(), lg2.nlabels[nid].keys())
if (cost > 0):
nlabelMismatch = (nlabelMismatch + cost)
for (l1, l2) in errL:
nodeconflicts = (nodeconflicts + [(nid, [(l1, 1.0)], [(l2, 1.0)])])
nodeClassError = nodeClassError.union([nid])
nodeEdgeError = set()
for (graph, oGraph) in [(self, lg2), (lg2, self)]:
for npair in graph.elabels.keys():
if ((not (npair in oGraph.elabels)) and (not (graph.elabels[npair] == ['_']))):
(cost, errL) = self.cmpEdges(graph.elabels[npair].keys(), ['_'])
elabelMismatch = (elabelMismatch + cost)
(a, b) = npair
nodeEdgeError.update([a, b])
if (graph == self):
for (l1, l2) in errL:
edgeconflicts.append((npair, [(l1, 1.0)], [(l2, 1.0)]))
else:
for (l1, l2) in errL:
edgeconflicts.append((npair, [(l2, 1.0)], [(l1, 1.0)]))
nodeError = nodeClassError.union(nodeEdgeError)
for npair in self.elabels.keys():
if (npair in lg2.elabels.keys()):
(cost, errL) = self.cmpEdges(self.elabels[npair].keys(), lg2.elabels[npair].keys())
if (cost > 0):
elabelMismatch = (elabelMismatch + cost)
(a, b) = npair
nodeEdgeError.update([a, b])
for (l1, l2) in errL:
edgeconflicts.append((npair, [(l1, 1.0)], [(l2, 1.0)]))
(segEdgeMismatch, segDiffs, correctSegs, segmentMetrics, segRelDiffs) = self.compareSegments(lg2)
badPairs = {}
for ((n1, n2), _, _) in edgeconflicts:
if (not ((n2, n1) in badPairs)):
badPairs[(n1, n2)] = True
incorrectPairs = len(badPairs)
badSegPairs = set([])
for node in segDiffs.keys():
for other in segDiffs[node][0]:
if ((node != other) and ((other, node) not in badSegPairs)):
badSegPairs.add((node, other))
for other in segDiffs[node][1]:
if ((node != other) and ((other, node) not in badSegPairs)):
badSegPairs.add((node, other))
segPairErrors = len(badSegPairs)
cerror = ('D_C', nlabelMismatch)
lerror = ('D_L', elabelMismatch)
serror = ('D_S', segEdgeMismatch)
rerror = ('D_R', (elabelMismatch - segEdgeMismatch))
aerror = ('D_B', (nlabelMismatch + elabelMismatch))
if ((len(self.nlabels.keys()) == 1) and ((len(self.absentNodes) > 0) or (len(lg2.absentNodes) > 0))):
elabelMismatch = 1
segEdgeMismatch = 1
errorVal = 0.0
if (numEdges > 0):
errorVal += (math.sqrt((float(segEdgeMismatch) / numEdges)) + math.sqrt((float(elabelMismatch) / numEdges)))
if (numNodes > 0):
errorVal += (float(nlabelMismatch) / numNodes)
errorVal = (errorVal / 3.0)
eerror = ('D_E(%)', errorVal)
metrics = (metrics + [aerror, cerror, lerror, rerror, serror, eerror, ('nNodes', numNodes), ('nEdges', numEdges), ('nSegRelEdges', nSegRelEdges), ('dPairs', incorrectPairs), ('segPairErrors', segPairErrors), ('nodeCorrect', (numNodes - len(nodeError)))])
metrics = (metrics + segmentMetrics)
return (metrics, nodeconflicts, edgeconflicts, segDiffs, correctSegs, segRelDiffs)<|docstring|>Returns: 1. a list of (metric,value) pairs,
2. a list of (n1,n2) node disagreements, 3. (e1,e2) pairs
for edge disagreements, 4. dictionary from primitives to
disagreeing segment graph edges for (self, lg2). Node and
edge labels are compared using label sets without values, and
*not* labels sorted by value.<|endoftext|>
|
61b307237e16f0911c018d0c3713fb610a7f54d7618ca6f112d4890761de66d9
|
def separateTreeEdges(self):
'Return a list of root nodes, and two lists of edges corresponding to \n\t\ttree/forest edges, and the remaining edges.'
(segmentPrimitiveMap, primitiveSegmentMap, noparentSegments, segmentEdges) = self.segmentGraph()
nodeParentMap = {}
nodeChildMap = {}
rootNodes = set(segmentPrimitiveMap.keys())
for (parent, child) in segmentEdges:
if (not (child in nodeParentMap.keys())):
nodeParentMap[child] = [parent]
rootNodes.remove(child)
else:
nodeParentMap[child] += [parent]
if (not (parent in nodeChildMap.keys())):
nodeChildMap[parent] = [child]
else:
nodeChildMap[parent] += [child]
fringe = list(rootNodes)
nonTreeEdges = set([])
while (len(fringe) > 0):
nextNode = fringe.pop(0)
if (nextNode in nodeChildMap.keys()):
children = copy.deepcopy(nodeChildMap[nextNode])
for child in children:
numChildParents = len(nodeParentMap[child])
if (numChildParents == 1):
fringe += [child]
else:
nonTreeEdges.add((nextNode, child))
nodeChildMap[nextNode].remove(child)
nodeParentMap[child].remove(nextNode)
treeEdges = []
for node in nodeChildMap:
for child in nodeChildMap[node]:
treeEdges += [(node, child)]
return (list(rootNodes), treeEdges, list(nonTreeEdges))
|
Return a list of root nodes, and two lists of edges corresponding to
tree/forest edges, and the remaining edges.
|
src/lg.py
|
separateTreeEdges
|
CurrenWong/lgeval
| 1
|
python
|
def separateTreeEdges(self):
'Return a list of root nodes, and two lists of edges corresponding to \n\t\ttree/forest edges, and the remaining edges.'
(segmentPrimitiveMap, primitiveSegmentMap, noparentSegments, segmentEdges) = self.segmentGraph()
nodeParentMap = {}
nodeChildMap = {}
rootNodes = set(segmentPrimitiveMap.keys())
for (parent, child) in segmentEdges:
if (not (child in nodeParentMap.keys())):
nodeParentMap[child] = [parent]
rootNodes.remove(child)
else:
nodeParentMap[child] += [parent]
if (not (parent in nodeChildMap.keys())):
nodeChildMap[parent] = [child]
else:
nodeChildMap[parent] += [child]
fringe = list(rootNodes)
nonTreeEdges = set([])
while (len(fringe) > 0):
nextNode = fringe.pop(0)
if (nextNode in nodeChildMap.keys()):
children = copy.deepcopy(nodeChildMap[nextNode])
for child in children:
numChildParents = len(nodeParentMap[child])
if (numChildParents == 1):
fringe += [child]
else:
nonTreeEdges.add((nextNode, child))
nodeChildMap[nextNode].remove(child)
nodeParentMap[child].remove(nextNode)
treeEdges = []
for node in nodeChildMap:
for child in nodeChildMap[node]:
treeEdges += [(node, child)]
return (list(rootNodes), treeEdges, list(nonTreeEdges))
|
def separateTreeEdges(self):
'Return a list of root nodes, and two lists of edges corresponding to \n\t\ttree/forest edges, and the remaining edges.'
(segmentPrimitiveMap, primitiveSegmentMap, noparentSegments, segmentEdges) = self.segmentGraph()
nodeParentMap = {}
nodeChildMap = {}
rootNodes = set(segmentPrimitiveMap.keys())
for (parent, child) in segmentEdges:
if (not (child in nodeParentMap.keys())):
nodeParentMap[child] = [parent]
rootNodes.remove(child)
else:
nodeParentMap[child] += [parent]
if (not (parent in nodeChildMap.keys())):
nodeChildMap[parent] = [child]
else:
nodeChildMap[parent] += [child]
fringe = list(rootNodes)
nonTreeEdges = set([])
while (len(fringe) > 0):
nextNode = fringe.pop(0)
if (nextNode in nodeChildMap.keys()):
children = copy.deepcopy(nodeChildMap[nextNode])
for child in children:
numChildParents = len(nodeParentMap[child])
if (numChildParents == 1):
fringe += [child]
else:
nonTreeEdges.add((nextNode, child))
nodeChildMap[nextNode].remove(child)
nodeParentMap[child].remove(nextNode)
treeEdges = []
for node in nodeChildMap:
for child in nodeChildMap[node]:
treeEdges += [(node, child)]
return (list(rootNodes), treeEdges, list(nonTreeEdges))<|docstring|>Return a list of root nodes, and two lists of edges corresponding to
tree/forest edges, and the remaining edges.<|endoftext|>
|
0ac1be620c6a19e445ccb0aeabf6506b5dd41724673e41dfa4b2e371237e998b
|
def removeAbsent(self):
'Remove any absent edges from both graphs, and empty the fields\n\t\trecording empty objects.'
for absEdge in self.absentEdges:
del self.elabels[absEdge]
for absNode in self.absentNodes:
del self.nlabels[absNode]
self.absentNodes = set([])
self.absentEdges = set([])
|
Remove any absent edges from both graphs, and empty the fields
recording empty objects.
|
src/lg.py
|
removeAbsent
|
CurrenWong/lgeval
| 1
|
python
|
def removeAbsent(self):
'Remove any absent edges from both graphs, and empty the fields\n\t\trecording empty objects.'
for absEdge in self.absentEdges:
del self.elabels[absEdge]
for absNode in self.absentNodes:
del self.nlabels[absNode]
self.absentNodes = set([])
self.absentEdges = set([])
|
def removeAbsent(self):
'Remove any absent edges from both graphs, and empty the fields\n\t\trecording empty objects.'
for absEdge in self.absentEdges:
del self.elabels[absEdge]
for absNode in self.absentNodes:
del self.nlabels[absNode]
self.absentNodes = set([])
self.absentEdges = set([])<|docstring|>Remove any absent edges from both graphs, and empty the fields
recording empty objects.<|endoftext|>
|
a877380e46d36b4968449dd137d2b852a22509c9903e0cbf9c1d45bdde1e8d24
|
def addAbsent(self, lg2):
'Identify edges in other graph but not the current one.'
selfNodes = set(self.nlabels.keys())
lg2Nodes = set(lg2.nlabels.keys())
self.absentNodes = lg2Nodes.difference(selfNodes)
if (len(self.absentNodes) > 0):
sys.stderr.write(((((((' !! Inserting ABSENT nodes for:\n ' + self.file) + ' vs.\n ') + lg2.file) + '\n ') + str(sorted(list(self.absentNodes)))) + '\n'))
self.error = True
for missingNode in self.absentNodes:
self.nlabels[missingNode] = {'ABSENT': 1.0}
|
Identify edges in other graph but not the current one.
|
src/lg.py
|
addAbsent
|
CurrenWong/lgeval
| 1
|
python
|
def addAbsent(self, lg2):
selfNodes = set(self.nlabels.keys())
lg2Nodes = set(lg2.nlabels.keys())
self.absentNodes = lg2Nodes.difference(selfNodes)
if (len(self.absentNodes) > 0):
sys.stderr.write(((((((' !! Inserting ABSENT nodes for:\n ' + self.file) + ' vs.\n ') + lg2.file) + '\n ') + str(sorted(list(self.absentNodes)))) + '\n'))
self.error = True
for missingNode in self.absentNodes:
self.nlabels[missingNode] = {'ABSENT': 1.0}
|
def addAbsent(self, lg2):
selfNodes = set(self.nlabels.keys())
lg2Nodes = set(lg2.nlabels.keys())
self.absentNodes = lg2Nodes.difference(selfNodes)
if (len(self.absentNodes) > 0):
sys.stderr.write(((((((' !! Inserting ABSENT nodes for:\n ' + self.file) + ' vs.\n ') + lg2.file) + '\n ') + str(sorted(list(self.absentNodes)))) + '\n'))
self.error = True
for missingNode in self.absentNodes:
self.nlabels[missingNode] = {'ABSENT': 1.0}<|docstring|>Identify edges in other graph but not the current one.<|endoftext|>
|
1f42b0aec603a2497e225966d44553d4e5a754da419da3d4df6b68394a65bedf
|
def matchAbsent(self, lg2):
'Add all missing primitives and edges between this graph and\n\t\tthe passed graph. **Modifies both the object and argument graph lg2.'
self.removeAbsent()
self.addAbsent(lg2)
lg2.removeAbsent()
lg2.addAbsent(self)
|
Add all missing primitives and edges between this graph and
the passed graph. **Modifies both the object and argument graph lg2.
|
src/lg.py
|
matchAbsent
|
CurrenWong/lgeval
| 1
|
python
|
def matchAbsent(self, lg2):
'Add all missing primitives and edges between this graph and\n\t\tthe passed graph. **Modifies both the object and argument graph lg2.'
self.removeAbsent()
self.addAbsent(lg2)
lg2.removeAbsent()
lg2.addAbsent(self)
|
def matchAbsent(self, lg2):
'Add all missing primitives and edges between this graph and\n\t\tthe passed graph. **Modifies both the object and argument graph lg2.'
self.removeAbsent()
self.addAbsent(lg2)
lg2.removeAbsent()
lg2.addAbsent(self)<|docstring|>Add all missing primitives and edges between this graph and
the passed graph. **Modifies both the object and argument graph lg2.<|endoftext|>
|
54409b2010b02a4328689579efa562dad0d613c7d758137c10de81adec37d489
|
def hideUnlabeledEdges(self):
'Move all missing/unlabeled edges to the hiddenEdges field.'
for edge in self.elabels.keys():
if (set(self.elabels[edge].keys()) == set(['_'])):
self.hiddenEdges[edge] = self.elabels[edge]
del self.elabels[edge]
|
Move all missing/unlabeled edges to the hiddenEdges field.
|
src/lg.py
|
hideUnlabeledEdges
|
CurrenWong/lgeval
| 1
|
python
|
def hideUnlabeledEdges(self):
for edge in self.elabels.keys():
if (set(self.elabels[edge].keys()) == set(['_'])):
self.hiddenEdges[edge] = self.elabels[edge]
del self.elabels[edge]
|
def hideUnlabeledEdges(self):
for edge in self.elabels.keys():
if (set(self.elabels[edge].keys()) == set(['_'])):
self.hiddenEdges[edge] = self.elabels[edge]
del self.elabels[edge]<|docstring|>Move all missing/unlabeled edges to the hiddenEdges field.<|endoftext|>
|
56a72cd5df94505cd300e8dcdce1c574e7e4564461e14af7d1a65a148b2fc758
|
def restoreUnlabeledEdges(self):
'Move all edges in the hiddenEdges field back to the set of\n\t\tedges for the graph.'
for edge in self.hiddenEdges.keys():
self.elabels[edge] = self.hiddenEdges[edge]
del self.hiddenEdges[edge]
|
Move all edges in the hiddenEdges field back to the set of
edges for the graph.
|
src/lg.py
|
restoreUnlabeledEdges
|
CurrenWong/lgeval
| 1
|
python
|
def restoreUnlabeledEdges(self):
'Move all edges in the hiddenEdges field back to the set of\n\t\tedges for the graph.'
for edge in self.hiddenEdges.keys():
self.elabels[edge] = self.hiddenEdges[edge]
del self.hiddenEdges[edge]
|
def restoreUnlabeledEdges(self):
'Move all edges in the hiddenEdges field back to the set of\n\t\tedges for the graph.'
for edge in self.hiddenEdges.keys():
self.elabels[edge] = self.hiddenEdges[edge]
del self.hiddenEdges[edge]<|docstring|>Move all edges in the hiddenEdges field back to the set of
edges for the graph.<|endoftext|>
|
40956e3a68a0d2072aa437b0227524ca2b6daf5c6a462d902d5b5e49c3782a48
|
def merge(self, lg2, ncombfn, ecombfn):
"New node/edge labels are added from lg2 with common primitives. The\n\t value for common node/edge labels updated using ncombfn and\n\t ecombfn respectiveley: each function is applied to current values to\n\t obtain the new value (i.e. v1' = fn(v1,v2))."
self.matchAbsent(lg2)
mergeMaps(self.nlabels, self.gweight, lg2.nlabels, lg2.gweight, ncombfn)
mergeMaps(self.elabels, self.gweight, lg2.elabels, lg2.gweight, ecombfn)
|
New node/edge labels are added from lg2 with common primitives. The
value for common node/edge labels updated using ncombfn and
ecombfn respectiveley: each function is applied to current values to
obtain the new value (i.e. v1' = fn(v1,v2)).
|
src/lg.py
|
merge
|
CurrenWong/lgeval
| 1
|
python
|
def merge(self, lg2, ncombfn, ecombfn):
"New node/edge labels are added from lg2 with common primitives. The\n\t value for common node/edge labels updated using ncombfn and\n\t ecombfn respectiveley: each function is applied to current values to\n\t obtain the new value (i.e. v1' = fn(v1,v2))."
self.matchAbsent(lg2)
mergeMaps(self.nlabels, self.gweight, lg2.nlabels, lg2.gweight, ncombfn)
mergeMaps(self.elabels, self.gweight, lg2.elabels, lg2.gweight, ecombfn)
|
def merge(self, lg2, ncombfn, ecombfn):
"New node/edge labels are added from lg2 with common primitives. The\n\t value for common node/edge labels updated using ncombfn and\n\t ecombfn respectiveley: each function is applied to current values to\n\t obtain the new value (i.e. v1' = fn(v1,v2))."
self.matchAbsent(lg2)
mergeMaps(self.nlabels, self.gweight, lg2.nlabels, lg2.gweight, ncombfn)
mergeMaps(self.elabels, self.gweight, lg2.elabels, lg2.gweight, ecombfn)<|docstring|>New node/edge labels are added from lg2 with common primitives. The
value for common node/edge labels updated using ncombfn and
ecombfn respectiveley: each function is applied to current values to
obtain the new value (i.e. v1' = fn(v1,v2)).<|endoftext|>
|
549b88ed6b0937205663bad3f7460f4f07bbbe9587a6d4236f8502e18c20bda6
|
def addWeightedLabelValues(self, lg2):
'Merge two graphs, adding the values for each node/edge label.'
def addValues(v1, w1, v2, w2):
return ((w1 * v1) + (w2 * v2))
self.merge(lg2, addValues, addValues)
|
Merge two graphs, adding the values for each node/edge label.
|
src/lg.py
|
addWeightedLabelValues
|
CurrenWong/lgeval
| 1
|
python
|
def addWeightedLabelValues(self, lg2):
def addValues(v1, w1, v2, w2):
return ((w1 * v1) + (w2 * v2))
self.merge(lg2, addValues, addValues)
|
def addWeightedLabelValues(self, lg2):
def addValues(v1, w1, v2, w2):
return ((w1 * v1) + (w2 * v2))
self.merge(lg2, addValues, addValues)<|docstring|>Merge two graphs, adding the values for each node/edge label.<|endoftext|>
|
122ec512f1c941151f49f8f1481af3439bddd88c797f7026d7de3d2eedcf1cbf
|
def keepOnlyCorrectLab(self, gt):
'Keep only correct labels compared with the gt. Use the\n\t\tlabel ERROR_N and ERROR_E for node and edges errors. Use the \n\t\tcompareTools to compare the labels with ground truth.'
allNodes = set(gt.nlabels.keys()).union(self.nlabels.keys())
self.matchAbsent(gt)
for nid in allNodes:
(cost, _) = self.cmpNodes(self.nlabels[nid].keys(), gt.nlabels[nid].keys())
if (cost > 0):
self.nlabels[nid] = {'ERROR_N': 1.0}
else:
self.nlabels[nid] = gt.nlabels[nid]
for (graph, oGraph) in [(self, gt), (gt, self)]:
for npair in graph.elabels.keys():
cost = 0
if (not (npair in oGraph.elabels)):
(cost, errL) = self.cmpEdges(graph.elabels[npair].keys(), ['_'])
else:
(cost, errL) = self.cmpEdges(graph.elabels[npair].keys(), oGraph.elabels[npair].keys())
if (cost > 0):
self.elabels[npair] = {'ERROR_E': 1.0}
elif (npair in gt.elabels):
self.elabels[npair] = gt.elabels[npair]
else:
self.elabels[npair] = {'_': 1.0}
|
Keep only correct labels compared with the gt. Use the
label ERROR_N and ERROR_E for node and edges errors. Use the
compareTools to compare the labels with ground truth.
|
src/lg.py
|
keepOnlyCorrectLab
|
CurrenWong/lgeval
| 1
|
python
|
def keepOnlyCorrectLab(self, gt):
'Keep only correct labels compared with the gt. Use the\n\t\tlabel ERROR_N and ERROR_E for node and edges errors. Use the \n\t\tcompareTools to compare the labels with ground truth.'
allNodes = set(gt.nlabels.keys()).union(self.nlabels.keys())
self.matchAbsent(gt)
for nid in allNodes:
(cost, _) = self.cmpNodes(self.nlabels[nid].keys(), gt.nlabels[nid].keys())
if (cost > 0):
self.nlabels[nid] = {'ERROR_N': 1.0}
else:
self.nlabels[nid] = gt.nlabels[nid]
for (graph, oGraph) in [(self, gt), (gt, self)]:
for npair in graph.elabels.keys():
cost = 0
if (not (npair in oGraph.elabels)):
(cost, errL) = self.cmpEdges(graph.elabels[npair].keys(), ['_'])
else:
(cost, errL) = self.cmpEdges(graph.elabels[npair].keys(), oGraph.elabels[npair].keys())
if (cost > 0):
self.elabels[npair] = {'ERROR_E': 1.0}
elif (npair in gt.elabels):
self.elabels[npair] = gt.elabels[npair]
else:
self.elabels[npair] = {'_': 1.0}
|
def keepOnlyCorrectLab(self, gt):
'Keep only correct labels compared with the gt. Use the\n\t\tlabel ERROR_N and ERROR_E for node and edges errors. Use the \n\t\tcompareTools to compare the labels with ground truth.'
allNodes = set(gt.nlabels.keys()).union(self.nlabels.keys())
self.matchAbsent(gt)
for nid in allNodes:
(cost, _) = self.cmpNodes(self.nlabels[nid].keys(), gt.nlabels[nid].keys())
if (cost > 0):
self.nlabels[nid] = {'ERROR_N': 1.0}
else:
self.nlabels[nid] = gt.nlabels[nid]
for (graph, oGraph) in [(self, gt), (gt, self)]:
for npair in graph.elabels.keys():
cost = 0
if (not (npair in oGraph.elabels)):
(cost, errL) = self.cmpEdges(graph.elabels[npair].keys(), ['_'])
else:
(cost, errL) = self.cmpEdges(graph.elabels[npair].keys(), oGraph.elabels[npair].keys())
if (cost > 0):
self.elabels[npair] = {'ERROR_E': 1.0}
elif (npair in gt.elabels):
self.elabels[npair] = gt.elabels[npair]
else:
self.elabels[npair] = {'_': 1.0}<|docstring|>Keep only correct labels compared with the gt. Use the
label ERROR_N and ERROR_E for node and edges errors. Use the
compareTools to compare the labels with ground truth.<|endoftext|>
|
74fabbf2e4d3da85fba09ca4e6716fe1774cbb6c4f63018904b86db97ad72ab3
|
def selectMaxLabels(self):
'Filter for labels with maximum confidence. NOTE: this will\n\t\tkeep all maximum value labels found in each map, e.g. if two\n\t\tclassifications have the same likelihood for a node.'
for object in self.nlabels.keys():
max = (- 1.0)
maxPairs = {}
for (label, value) in self.nlabels[object].items():
if (value > max):
max = value
maxPairs = {label: value}
elif (value == max):
maxPairs[label] = value
self.nlabels[object] = maxPairs
for edge in self.elabels.keys():
max = (- 1.0)
maxPairs = {}
for (label, value) in self.elabels[edge].items():
if (value > max):
max = value
maxPairs = {label: value}
elif (value == max):
maxPairs[label] = value
self.elabels[edge] = maxPairs
|
Filter for labels with maximum confidence. NOTE: this will
keep all maximum value labels found in each map, e.g. if two
classifications have the same likelihood for a node.
|
src/lg.py
|
selectMaxLabels
|
CurrenWong/lgeval
| 1
|
python
|
def selectMaxLabels(self):
'Filter for labels with maximum confidence. NOTE: this will\n\t\tkeep all maximum value labels found in each map, e.g. if two\n\t\tclassifications have the same likelihood for a node.'
for object in self.nlabels.keys():
max = (- 1.0)
maxPairs = {}
for (label, value) in self.nlabels[object].items():
if (value > max):
max = value
maxPairs = {label: value}
elif (value == max):
maxPairs[label] = value
self.nlabels[object] = maxPairs
for edge in self.elabels.keys():
max = (- 1.0)
maxPairs = {}
for (label, value) in self.elabels[edge].items():
if (value > max):
max = value
maxPairs = {label: value}
elif (value == max):
maxPairs[label] = value
self.elabels[edge] = maxPairs
|
def selectMaxLabels(self):
'Filter for labels with maximum confidence. NOTE: this will\n\t\tkeep all maximum value labels found in each map, e.g. if two\n\t\tclassifications have the same likelihood for a node.'
for object in self.nlabels.keys():
max = (- 1.0)
maxPairs = {}
for (label, value) in self.nlabels[object].items():
if (value > max):
max = value
maxPairs = {label: value}
elif (value == max):
maxPairs[label] = value
self.nlabels[object] = maxPairs
for edge in self.elabels.keys():
max = (- 1.0)
maxPairs = {}
for (label, value) in self.elabels[edge].items():
if (value > max):
max = value
maxPairs = {label: value}
elif (value == max):
maxPairs[label] = value
self.elabels[edge] = maxPairs<|docstring|>Filter for labels with maximum confidence. NOTE: this will
keep all maximum value labels found in each map, e.g. if two
classifications have the same likelihood for a node.<|endoftext|>
|
f8a1d8c23c1c7ce3b94a1df0d2e4917d5939c938c398bb177f7e689444340aae
|
def invertValues(self):
'Substract all node and edge label values from 1.0, to \n\t\tinvert the values. Attempting to invert a value outside [0,1] will\n\t\tset the error flag on the object.'
for node in self.nlabels.keys():
for label in self.nlabels[node]:
currentValue = self.nlabels[node][label]
if ((currentValue < 0.0) or (currentValue > 1.0)):
sys.stderr.write((((((('\n !! Attempted to invert node: ' + node) + ' label "') + label) + '" with value ') + str(currentValue)) + '\n'))
self.error = True
else:
self.nlabels[node][label] = (1.0 - currentValue)
for edge in self.elabels.keys():
for label in self.elabels[edge]:
currentValue = self.elabels[edge][label]
if ((currentValue < 0.0) or (currentValue > 1.0)):
sys.stderr.write((((((('\n !! Attempted to invert edge: ' + str(edge)) + ' label "') + label) + '" with value ') + str(currentValue)) + '\n'))
self.error = True
else:
self.elabels[edge][label] = (1.0 - currentValue)
|
Substract all node and edge label values from 1.0, to
invert the values. Attempting to invert a value outside [0,1] will
set the error flag on the object.
|
src/lg.py
|
invertValues
|
CurrenWong/lgeval
| 1
|
python
|
def invertValues(self):
'Substract all node and edge label values from 1.0, to \n\t\tinvert the values. Attempting to invert a value outside [0,1] will\n\t\tset the error flag on the object.'
for node in self.nlabels.keys():
for label in self.nlabels[node]:
currentValue = self.nlabels[node][label]
if ((currentValue < 0.0) or (currentValue > 1.0)):
sys.stderr.write((((((('\n !! Attempted to invert node: ' + node) + ' label "') + label) + '" with value ') + str(currentValue)) + '\n'))
self.error = True
else:
self.nlabels[node][label] = (1.0 - currentValue)
for edge in self.elabels.keys():
for label in self.elabels[edge]:
currentValue = self.elabels[edge][label]
if ((currentValue < 0.0) or (currentValue > 1.0)):
sys.stderr.write((((((('\n !! Attempted to invert edge: ' + str(edge)) + ' label "') + label) + '" with value ') + str(currentValue)) + '\n'))
self.error = True
else:
self.elabels[edge][label] = (1.0 - currentValue)
|
def invertValues(self):
'Substract all node and edge label values from 1.0, to \n\t\tinvert the values. Attempting to invert a value outside [0,1] will\n\t\tset the error flag on the object.'
for node in self.nlabels.keys():
for label in self.nlabels[node]:
currentValue = self.nlabels[node][label]
if ((currentValue < 0.0) or (currentValue > 1.0)):
sys.stderr.write((((((('\n !! Attempted to invert node: ' + node) + ' label "') + label) + '" with value ') + str(currentValue)) + '\n'))
self.error = True
else:
self.nlabels[node][label] = (1.0 - currentValue)
for edge in self.elabels.keys():
for label in self.elabels[edge]:
currentValue = self.elabels[edge][label]
if ((currentValue < 0.0) or (currentValue > 1.0)):
sys.stderr.write((((((('\n !! Attempted to invert edge: ' + str(edge)) + ' label "') + label) + '" with value ') + str(currentValue)) + '\n'))
self.error = True
else:
self.elabels[edge][label] = (1.0 - currentValue)<|docstring|>Substract all node and edge label values from 1.0, to
invert the values. Attempting to invert a value outside [0,1] will
set the error flag on the object.<|endoftext|>
|
1cf080b8f2b81d53a0dcd05be779807a966b1f7ea421742f963f3e263761268b
|
def subStructIterator(self, nodeNumbers):
' Return an iterator which gives all substructures with n nodes\n\t\tn belonging to the list depths'
if isinstance(nodeNumbers, int):
nodeNumbers = [nodeNumbers]
subStruct = []
for n in self.nlabels.keys():
subStruct.append(set([n]))
if (1 in nodeNumbers):
(yield smallGraph.SmallGraph([(n, ''.join(self.nlabels[n].keys()))], []))
for d in range(2, (max(nodeNumbers) + 1)):
newSubsS = set([])
newSubsL = []
for sub in subStruct:
le = getEdgesToNeighbours(sub, self.elabels.keys())
for (f, to) in le:
new = sub.union([to])
lnew = list(new)
lnew.sort()
snew = ','.join(lnew)
if (not (snew in newSubsS)):
newSubsS.add(snew)
newSubsL.append(new)
if (d in nodeNumbers):
(yield self.getSubSmallGraph(new))
subStruct = newSubsL
|
Return an iterator which gives all substructures with n nodes
n belonging to the list depths
|
src/lg.py
|
subStructIterator
|
CurrenWong/lgeval
| 1
|
python
|
def subStructIterator(self, nodeNumbers):
' Return an iterator which gives all substructures with n nodes\n\t\tn belonging to the list depths'
if isinstance(nodeNumbers, int):
nodeNumbers = [nodeNumbers]
subStruct = []
for n in self.nlabels.keys():
subStruct.append(set([n]))
if (1 in nodeNumbers):
(yield smallGraph.SmallGraph([(n, .join(self.nlabels[n].keys()))], []))
for d in range(2, (max(nodeNumbers) + 1)):
newSubsS = set([])
newSubsL = []
for sub in subStruct:
le = getEdgesToNeighbours(sub, self.elabels.keys())
for (f, to) in le:
new = sub.union([to])
lnew = list(new)
lnew.sort()
snew = ','.join(lnew)
if (not (snew in newSubsS)):
newSubsS.add(snew)
newSubsL.append(new)
if (d in nodeNumbers):
(yield self.getSubSmallGraph(new))
subStruct = newSubsL
|
def subStructIterator(self, nodeNumbers):
' Return an iterator which gives all substructures with n nodes\n\t\tn belonging to the list depths'
if isinstance(nodeNumbers, int):
nodeNumbers = [nodeNumbers]
subStruct = []
for n in self.nlabels.keys():
subStruct.append(set([n]))
if (1 in nodeNumbers):
(yield smallGraph.SmallGraph([(n, .join(self.nlabels[n].keys()))], []))
for d in range(2, (max(nodeNumbers) + 1)):
newSubsS = set([])
newSubsL = []
for sub in subStruct:
le = getEdgesToNeighbours(sub, self.elabels.keys())
for (f, to) in le:
new = sub.union([to])
lnew = list(new)
lnew.sort()
snew = ','.join(lnew)
if (not (snew in newSubsS)):
newSubsS.add(snew)
newSubsL.append(new)
if (d in nodeNumbers):
(yield self.getSubSmallGraph(new))
subStruct = newSubsL<|docstring|>Return an iterator which gives all substructures with n nodes
n belonging to the list depths<|endoftext|>
|
1bee5be3a6776b90115b4b9bf2fd858087c5a3d5f89dcc405ce75c2f0873fb8b
|
def getSubSmallGraph(self, nodelist):
'Return the small graph with the primitives in nodelist and all edges \n\t\tbetween them. The used label is the merged list of labels from nodes/edges'
sg = smallGraph.SmallGraph()
for n in nodelist:
sg.nodes[n] = self.nlabels[n].keys()
for e in getEdgesBetweenThem(nodelist, self.elabels.keys()):
sg.edges[e] = self.elabels[e].keys()
return sg
|
Return the small graph with the primitives in nodelist and all edges
between them. The used label is the merged list of labels from nodes/edges
|
src/lg.py
|
getSubSmallGraph
|
CurrenWong/lgeval
| 1
|
python
|
def getSubSmallGraph(self, nodelist):
'Return the small graph with the primitives in nodelist and all edges \n\t\tbetween them. The used label is the merged list of labels from nodes/edges'
sg = smallGraph.SmallGraph()
for n in nodelist:
sg.nodes[n] = self.nlabels[n].keys()
for e in getEdgesBetweenThem(nodelist, self.elabels.keys()):
sg.edges[e] = self.elabels[e].keys()
return sg
|
def getSubSmallGraph(self, nodelist):
'Return the small graph with the primitives in nodelist and all edges \n\t\tbetween them. The used label is the merged list of labels from nodes/edges'
sg = smallGraph.SmallGraph()
for n in nodelist:
sg.nodes[n] = self.nlabels[n].keys()
for e in getEdgesBetweenThem(nodelist, self.elabels.keys()):
sg.edges[e] = self.elabels[e].keys()
return sg<|docstring|>Return the small graph with the primitives in nodelist and all edges
between them. The used label is the merged list of labels from nodes/edges<|endoftext|>
|
cb94903731a1095aaa2681aca8558acd7fae5763b8b5254d1d6935ee78ee3862
|
def compareSubStruct(self, olg, depths):
'Return the list of couple of substructure which disagree\n\t\tthe substructure from self are used as references'
allerrors = []
for struc in olg.subStructIterator(depths):
sg1 = self.getSubSmallGraph(struc.nodes.keys())
if (not (struc == sg1)):
allerrors.append((struc, sg1))
return allerrors
|
Return the list of couple of substructure which disagree
the substructure from self are used as references
|
src/lg.py
|
compareSubStruct
|
CurrenWong/lgeval
| 1
|
python
|
def compareSubStruct(self, olg, depths):
'Return the list of couple of substructure which disagree\n\t\tthe substructure from self are used as references'
allerrors = []
for struc in olg.subStructIterator(depths):
sg1 = self.getSubSmallGraph(struc.nodes.keys())
if (not (struc == sg1)):
allerrors.append((struc, sg1))
return allerrors
|
def compareSubStruct(self, olg, depths):
'Return the list of couple of substructure which disagree\n\t\tthe substructure from self are used as references'
allerrors = []
for struc in olg.subStructIterator(depths):
sg1 = self.getSubSmallGraph(struc.nodes.keys())
if (not (struc == sg1)):
allerrors.append((struc, sg1))
return allerrors<|docstring|>Return the list of couple of substructure which disagree
the substructure from self are used as references<|endoftext|>
|
efd2e58961a60cf5efc44d762ca6e10e23a251966860e3b4dceeaefc8a6e318d
|
def compareSegmentsStruct(self, lgGT, depths):
'Compute the number of differing segments, and record disagreements\n\t\tin a list. \n\t\tThe primitives in each subgraph should be of the same number and names\n\t\t(identifiers). Nodes are merged that have identical (label,value) pairs\n\t\ton nodes and all identical incoming and outgoing edges. If used for\n\t\tclassification evaluation, the ground-truth should be lgGT. The first\n\t\tkey value of the matrix is the lgGT obj structure, which gives the\n\t\tstructure of the corresponding primitives which is the key to get the\n\t\terror structure in self.'
(sp1, ps1, _, sre1) = self.segmentGraph()
(spGT, psGT, _, sreGT) = lgGT.segmentGraph()
segDiffs = set()
correctSegments = set()
for primitive in psGT.keys():
obj1Id = ps1[primitive][ps1[primitive].keys()[0]]
obj2Id = psGT[primitive][psGT[primitive].keys()[0]]
if ((not ('ABSENT' in self.nlabels[primitive])) and (not ('ABSENT' in lgGT.nlabels[primitive]))):
segPrimSet1 = sp1[obj1Id][0]
segPrimSet2 = spGT[obj2Id][0]
if (segPrimSet1 != segPrimSet2):
segDiffs.add((obj2Id, obj1Id))
else:
correctSegments.add(obj2Id)
elif (len(self.nlabels.keys()) > 1):
segDiffs.add((obj2Id, obj1Id))
for seg in correctSegments:
firstPrim = list(spGT[seg][0])[0]
(cost, diff) = self.cmpNodes(self.nlabels[firstPrim].keys(), lgGT.nlabels[firstPrim].keys())
segId1 = ps1[firstPrim][ps1[firstPrim].keys()[0]]
segId2 = psGT[firstPrim][psGT[firstPrim].keys()[0]]
if ((0, []) != (cost, diff)):
segDiffs.add((segId2, segId1))
allSegWithErr = set([p for (p, _) in segDiffs])
lgObj = Lg()
for (sid, lprim) in spGT.iteritems():
lgObj.nlabels[sid] = lgGT.nlabels[list(lprim[0])[0]]
segEdgeErr = set()
for thisPair in sreGT.keys():
thisParentIds = set(spGT[thisPair[0]][0])
thisChildIds = set(spGT[thisPair[1]][0])
lgObj.elabels[thisPair] = lgGT.elabels[(list(thisParentIds)[0], list(thisChildIds)[0])]
for parentId in thisParentIds:
for childId in thisChildIds:
if ((not ((parentId, childId) in self.elabels.keys())) or ((0, []) != self.cmpEdges(self.elabels[(parentId, childId)].keys(), lgGT.elabels[(parentId, childId)].keys()))):
segEdgeErr.add(thisPair)
continue
listOfAllError = []
for smg in lgObj.subStructIterator(depths):
showIt = False
if (len(set(smg.nodes.keys()).intersection(allSegWithErr)) > 0):
showIt = True
for pair in smg.edges.keys():
if (pair in segEdgeErr):
showIt = True
continue
if showIt:
allPrim = []
for s in smg.nodes.keys():
allPrim.extend(spGT[s][0])
smgPrim1 = self.getSubSmallGraph(allPrim)
smgPrimGT = lgGT.getSubSmallGraph(allPrim)
listOfAllError.append((smg, smgPrimGT, smgPrim1))
return listOfAllError
|
Compute the number of differing segments, and record disagreements
in a list.
The primitives in each subgraph should be of the same number and names
(identifiers). Nodes are merged that have identical (label,value) pairs
on nodes and all identical incoming and outgoing edges. If used for
classification evaluation, the ground-truth should be lgGT. The first
key value of the matrix is the lgGT obj structure, which gives the
structure of the corresponding primitives which is the key to get the
error structure in self.
|
src/lg.py
|
compareSegmentsStruct
|
CurrenWong/lgeval
| 1
|
python
|
def compareSegmentsStruct(self, lgGT, depths):
'Compute the number of differing segments, and record disagreements\n\t\tin a list. \n\t\tThe primitives in each subgraph should be of the same number and names\n\t\t(identifiers). Nodes are merged that have identical (label,value) pairs\n\t\ton nodes and all identical incoming and outgoing edges. If used for\n\t\tclassification evaluation, the ground-truth should be lgGT. The first\n\t\tkey value of the matrix is the lgGT obj structure, which gives the\n\t\tstructure of the corresponding primitives which is the key to get the\n\t\terror structure in self.'
(sp1, ps1, _, sre1) = self.segmentGraph()
(spGT, psGT, _, sreGT) = lgGT.segmentGraph()
segDiffs = set()
correctSegments = set()
for primitive in psGT.keys():
obj1Id = ps1[primitive][ps1[primitive].keys()[0]]
obj2Id = psGT[primitive][psGT[primitive].keys()[0]]
if ((not ('ABSENT' in self.nlabels[primitive])) and (not ('ABSENT' in lgGT.nlabels[primitive]))):
segPrimSet1 = sp1[obj1Id][0]
segPrimSet2 = spGT[obj2Id][0]
if (segPrimSet1 != segPrimSet2):
segDiffs.add((obj2Id, obj1Id))
else:
correctSegments.add(obj2Id)
elif (len(self.nlabels.keys()) > 1):
segDiffs.add((obj2Id, obj1Id))
for seg in correctSegments:
firstPrim = list(spGT[seg][0])[0]
(cost, diff) = self.cmpNodes(self.nlabels[firstPrim].keys(), lgGT.nlabels[firstPrim].keys())
segId1 = ps1[firstPrim][ps1[firstPrim].keys()[0]]
segId2 = psGT[firstPrim][psGT[firstPrim].keys()[0]]
if ((0, []) != (cost, diff)):
segDiffs.add((segId2, segId1))
allSegWithErr = set([p for (p, _) in segDiffs])
lgObj = Lg()
for (sid, lprim) in spGT.iteritems():
lgObj.nlabels[sid] = lgGT.nlabels[list(lprim[0])[0]]
segEdgeErr = set()
for thisPair in sreGT.keys():
thisParentIds = set(spGT[thisPair[0]][0])
thisChildIds = set(spGT[thisPair[1]][0])
lgObj.elabels[thisPair] = lgGT.elabels[(list(thisParentIds)[0], list(thisChildIds)[0])]
for parentId in thisParentIds:
for childId in thisChildIds:
if ((not ((parentId, childId) in self.elabels.keys())) or ((0, []) != self.cmpEdges(self.elabels[(parentId, childId)].keys(), lgGT.elabels[(parentId, childId)].keys()))):
segEdgeErr.add(thisPair)
continue
listOfAllError = []
for smg in lgObj.subStructIterator(depths):
showIt = False
if (len(set(smg.nodes.keys()).intersection(allSegWithErr)) > 0):
showIt = True
for pair in smg.edges.keys():
if (pair in segEdgeErr):
showIt = True
continue
if showIt:
allPrim = []
for s in smg.nodes.keys():
allPrim.extend(spGT[s][0])
smgPrim1 = self.getSubSmallGraph(allPrim)
smgPrimGT = lgGT.getSubSmallGraph(allPrim)
listOfAllError.append((smg, smgPrimGT, smgPrim1))
return listOfAllError
|
def compareSegmentsStruct(self, lgGT, depths):
'Compute the number of differing segments, and record disagreements\n\t\tin a list. \n\t\tThe primitives in each subgraph should be of the same number and names\n\t\t(identifiers). Nodes are merged that have identical (label,value) pairs\n\t\ton nodes and all identical incoming and outgoing edges. If used for\n\t\tclassification evaluation, the ground-truth should be lgGT. The first\n\t\tkey value of the matrix is the lgGT obj structure, which gives the\n\t\tstructure of the corresponding primitives which is the key to get the\n\t\terror structure in self.'
(sp1, ps1, _, sre1) = self.segmentGraph()
(spGT, psGT, _, sreGT) = lgGT.segmentGraph()
segDiffs = set()
correctSegments = set()
for primitive in psGT.keys():
obj1Id = ps1[primitive][ps1[primitive].keys()[0]]
obj2Id = psGT[primitive][psGT[primitive].keys()[0]]
if ((not ('ABSENT' in self.nlabels[primitive])) and (not ('ABSENT' in lgGT.nlabels[primitive]))):
segPrimSet1 = sp1[obj1Id][0]
segPrimSet2 = spGT[obj2Id][0]
if (segPrimSet1 != segPrimSet2):
segDiffs.add((obj2Id, obj1Id))
else:
correctSegments.add(obj2Id)
elif (len(self.nlabels.keys()) > 1):
segDiffs.add((obj2Id, obj1Id))
for seg in correctSegments:
firstPrim = list(spGT[seg][0])[0]
(cost, diff) = self.cmpNodes(self.nlabels[firstPrim].keys(), lgGT.nlabels[firstPrim].keys())
segId1 = ps1[firstPrim][ps1[firstPrim].keys()[0]]
segId2 = psGT[firstPrim][psGT[firstPrim].keys()[0]]
if ((0, []) != (cost, diff)):
segDiffs.add((segId2, segId1))
allSegWithErr = set([p for (p, _) in segDiffs])
lgObj = Lg()
for (sid, lprim) in spGT.iteritems():
lgObj.nlabels[sid] = lgGT.nlabels[list(lprim[0])[0]]
segEdgeErr = set()
for thisPair in sreGT.keys():
thisParentIds = set(spGT[thisPair[0]][0])
thisChildIds = set(spGT[thisPair[1]][0])
lgObj.elabels[thisPair] = lgGT.elabels[(list(thisParentIds)[0], list(thisChildIds)[0])]
for parentId in thisParentIds:
for childId in thisChildIds:
if ((not ((parentId, childId) in self.elabels.keys())) or ((0, []) != self.cmpEdges(self.elabels[(parentId, childId)].keys(), lgGT.elabels[(parentId, childId)].keys()))):
segEdgeErr.add(thisPair)
continue
listOfAllError = []
for smg in lgObj.subStructIterator(depths):
showIt = False
if (len(set(smg.nodes.keys()).intersection(allSegWithErr)) > 0):
showIt = True
for pair in smg.edges.keys():
if (pair in segEdgeErr):
showIt = True
continue
if showIt:
allPrim = []
for s in smg.nodes.keys():
allPrim.extend(spGT[s][0])
smgPrim1 = self.getSubSmallGraph(allPrim)
smgPrimGT = lgGT.getSubSmallGraph(allPrim)
listOfAllError.append((smg, smgPrimGT, smgPrim1))
return listOfAllError<|docstring|>Compute the number of differing segments, and record disagreements
in a list.
The primitives in each subgraph should be of the same number and names
(identifiers). Nodes are merged that have identical (label,value) pairs
on nodes and all identical incoming and outgoing edges. If used for
classification evaluation, the ground-truth should be lgGT. The first
key value of the matrix is the lgGT obj structure, which gives the
structure of the corresponding primitives which is the key to get the
error structure in self.<|endoftext|>
|
e3532f2c6a761cd01f5790d8b24f4498816c1df401a3b6b57328eaa807d6eebb
|
@pp.autoname
def test_crossing_arm(wg_width=0.5, r1=3.0, r2=1.1, taper_width=1.2, taper_length=3.4):
' crossing arm\n '
c = pp.Component()
(c << pp.c.ellipse(radii=(r1, r2), layer=LAYER.SLAB150))
xmax = (taper_length + (taper_width / 2))
h = (wg_width / 2)
taper_points = [((- xmax), h), (((- taper_width) / 2), (taper_width / 2)), ((taper_width / 2), (taper_width / 2)), (xmax, h), (xmax, (- h)), ((taper_width / 2), ((- taper_width) / 2)), (((- taper_width) / 2), ((- taper_width) / 2)), ((- xmax), (- h))]
c.add_polygon(taper_points, layer=LAYER.WG)
c.add_port(name='W0', midpoint=((- xmax), 0), orientation=180, width=wg_width, layer=LAYER.WG)
c.add_port(name='E0', midpoint=(xmax, 0), orientation=0, width=wg_width, layer=LAYER.WG)
return c
|
crossing arm
|
pp/samples/11_component_layout.py
|
test_crossing_arm
|
smartalecH/gdsfactory
| 16
|
python
|
@pp.autoname
def test_crossing_arm(wg_width=0.5, r1=3.0, r2=1.1, taper_width=1.2, taper_length=3.4):
' \n '
c = pp.Component()
(c << pp.c.ellipse(radii=(r1, r2), layer=LAYER.SLAB150))
xmax = (taper_length + (taper_width / 2))
h = (wg_width / 2)
taper_points = [((- xmax), h), (((- taper_width) / 2), (taper_width / 2)), ((taper_width / 2), (taper_width / 2)), (xmax, h), (xmax, (- h)), ((taper_width / 2), ((- taper_width) / 2)), (((- taper_width) / 2), ((- taper_width) / 2)), ((- xmax), (- h))]
c.add_polygon(taper_points, layer=LAYER.WG)
c.add_port(name='W0', midpoint=((- xmax), 0), orientation=180, width=wg_width, layer=LAYER.WG)
c.add_port(name='E0', midpoint=(xmax, 0), orientation=0, width=wg_width, layer=LAYER.WG)
return c
|
@pp.autoname
def test_crossing_arm(wg_width=0.5, r1=3.0, r2=1.1, taper_width=1.2, taper_length=3.4):
' \n '
c = pp.Component()
(c << pp.c.ellipse(radii=(r1, r2), layer=LAYER.SLAB150))
xmax = (taper_length + (taper_width / 2))
h = (wg_width / 2)
taper_points = [((- xmax), h), (((- taper_width) / 2), (taper_width / 2)), ((taper_width / 2), (taper_width / 2)), (xmax, h), (xmax, (- h)), ((taper_width / 2), ((- taper_width) / 2)), (((- taper_width) / 2), ((- taper_width) / 2)), ((- xmax), (- h))]
c.add_polygon(taper_points, layer=LAYER.WG)
c.add_port(name='W0', midpoint=((- xmax), 0), orientation=180, width=wg_width, layer=LAYER.WG)
c.add_port(name='E0', midpoint=(xmax, 0), orientation=0, width=wg_width, layer=LAYER.WG)
return c<|docstring|>crossing arm<|endoftext|>
|
08324922500a7aeb0f5289aa3bb0bbac93c5e2593b5bfdae96d3b167dfe11336
|
def on_start(self):
' \n on_start is called when a Locust start before any task is scheduled\n '
(m_email, m_password) = self.get_user_credentials()
time.sleep(random.randint(0, self.MAX_LOGIN_TIME))
self.log_user(m_email, m_password)
self.lifecycle()
|
on_start is called when a Locust start before any task is scheduled
|
tools/testing/lifecycle.py
|
on_start
|
AETT-UA/ws_deployment
| 4
|
python
|
def on_start(self):
' \n \n '
(m_email, m_password) = self.get_user_credentials()
time.sleep(random.randint(0, self.MAX_LOGIN_TIME))
self.log_user(m_email, m_password)
self.lifecycle()
|
def on_start(self):
' \n \n '
(m_email, m_password) = self.get_user_credentials()
time.sleep(random.randint(0, self.MAX_LOGIN_TIME))
self.log_user(m_email, m_password)
self.lifecycle()<|docstring|>on_start is called when a Locust start before any task is scheduled<|endoftext|>
|
be332e16aab3a676cefc79fe96d7188d62ca1ba3c4583580900b390909bf44d1
|
def lifecycle(self):
' \n lifecycle of a class\n '
global num_curr_users
global user_pool_size
self.create_attendance_sheet()
teacher_refreshing_attendances_table_thread = threading.Thread(target=self.refresh_students_table, args=(self.REGISTRATION_STUDENT_TIME_LIMIT_SECONDS,))
teacher_refreshing_attendances_table_thread.start()
student_registers_threads = []
for _ in range(self.NUM_STUDENTS):
tmp_thread = threading.Thread(target=self.student_executes_register, args=())
tmp_thread.start()
student_registers_threads.append(tmp_thread)
teacher_registers_threads = []
if (random.random() < 0.5):
for _ in range(self.NUM_STUDENTS_REGISTERED_BY_TEACHER):
tmp_thread = threading.Thread(target=self.teacher_manually_registers_a_student, args=())
tmp_thread.start()
teacher_registers_threads.append(tmp_thread)
for tmp_thread in student_registers_threads:
tmp_thread.join()
for tmp_thread in teacher_registers_threads:
tmp_thread.join()
if (random.random() < 0.1):
self.teacher_manually_deletes_some_students()
self.close_attendance_sheet()
teacher_refreshing_attendances_table_thread.join()
self.consult_attendance_sheet()
teacher_refreshing_attendances_table_thread = threading.Thread(target=self.refresh_students_table, args=(self.ATTENDANCE_SHEET_CONSULTATION_DURATION_SECONDS,))
teacher_refreshing_attendances_table_thread.start()
teacher_refreshing_attendances_table_thread.join()
num_curr_users += 1
|
lifecycle of a class
|
tools/testing/lifecycle.py
|
lifecycle
|
AETT-UA/ws_deployment
| 4
|
python
|
def lifecycle(self):
' \n \n '
global num_curr_users
global user_pool_size
self.create_attendance_sheet()
teacher_refreshing_attendances_table_thread = threading.Thread(target=self.refresh_students_table, args=(self.REGISTRATION_STUDENT_TIME_LIMIT_SECONDS,))
teacher_refreshing_attendances_table_thread.start()
student_registers_threads = []
for _ in range(self.NUM_STUDENTS):
tmp_thread = threading.Thread(target=self.student_executes_register, args=())
tmp_thread.start()
student_registers_threads.append(tmp_thread)
teacher_registers_threads = []
if (random.random() < 0.5):
for _ in range(self.NUM_STUDENTS_REGISTERED_BY_TEACHER):
tmp_thread = threading.Thread(target=self.teacher_manually_registers_a_student, args=())
tmp_thread.start()
teacher_registers_threads.append(tmp_thread)
for tmp_thread in student_registers_threads:
tmp_thread.join()
for tmp_thread in teacher_registers_threads:
tmp_thread.join()
if (random.random() < 0.1):
self.teacher_manually_deletes_some_students()
self.close_attendance_sheet()
teacher_refreshing_attendances_table_thread.join()
self.consult_attendance_sheet()
teacher_refreshing_attendances_table_thread = threading.Thread(target=self.refresh_students_table, args=(self.ATTENDANCE_SHEET_CONSULTATION_DURATION_SECONDS,))
teacher_refreshing_attendances_table_thread.start()
teacher_refreshing_attendances_table_thread.join()
num_curr_users += 1
|
def lifecycle(self):
' \n \n '
global num_curr_users
global user_pool_size
self.create_attendance_sheet()
teacher_refreshing_attendances_table_thread = threading.Thread(target=self.refresh_students_table, args=(self.REGISTRATION_STUDENT_TIME_LIMIT_SECONDS,))
teacher_refreshing_attendances_table_thread.start()
student_registers_threads = []
for _ in range(self.NUM_STUDENTS):
tmp_thread = threading.Thread(target=self.student_executes_register, args=())
tmp_thread.start()
student_registers_threads.append(tmp_thread)
teacher_registers_threads = []
if (random.random() < 0.5):
for _ in range(self.NUM_STUDENTS_REGISTERED_BY_TEACHER):
tmp_thread = threading.Thread(target=self.teacher_manually_registers_a_student, args=())
tmp_thread.start()
teacher_registers_threads.append(tmp_thread)
for tmp_thread in student_registers_threads:
tmp_thread.join()
for tmp_thread in teacher_registers_threads:
tmp_thread.join()
if (random.random() < 0.1):
self.teacher_manually_deletes_some_students()
self.close_attendance_sheet()
teacher_refreshing_attendances_table_thread.join()
self.consult_attendance_sheet()
teacher_refreshing_attendances_table_thread = threading.Thread(target=self.refresh_students_table, args=(self.ATTENDANCE_SHEET_CONSULTATION_DURATION_SECONDS,))
teacher_refreshing_attendances_table_thread.start()
teacher_refreshing_attendances_table_thread.join()
num_curr_users += 1<|docstring|>lifecycle of a class<|endoftext|>
|
4aa605c3380f94ff74e546669633ae17cfa5b6d6b3e5047402b3faf6afa688f1
|
def check_option_r(question_label: Label, option_1: Button, option_2: Button, user_choice):
' Check if the user has selected the correct answer.'
question_label.destroy()
option_1.destroy()
option_2.destroy()
if reasoning.check_answer(user_choice):
reasoning.score += 1
show_phase()
|
Check if the user has selected the correct answer.
|
main.py
|
check_option_r
|
DanielM24/GIA-Practice-Tests
| 0
|
python
|
def check_option_r(question_label: Label, option_1: Button, option_2: Button, user_choice):
' '
question_label.destroy()
option_1.destroy()
option_2.destroy()
if reasoning.check_answer(user_choice):
reasoning.score += 1
show_phase()
|
def check_option_r(question_label: Label, option_1: Button, option_2: Button, user_choice):
' '
question_label.destroy()
option_1.destroy()
option_2.destroy()
if reasoning.check_answer(user_choice):
reasoning.score += 1
show_phase()<|docstring|>Check if the user has selected the correct answer.<|endoftext|>
|
73ddeb55fbff3b97533b0a4cddfd1cf73fae710c2fe570dad299747345ddad87
|
def check_option_ps(letters_label: Label, user_choice):
' Check if the user has selected the correct answer.'
if perceptual_speed.check_answer(user_choice):
perceptual_speed.score += 1
show_letters(letters_label)
|
Check if the user has selected the correct answer.
|
main.py
|
check_option_ps
|
DanielM24/GIA-Practice-Tests
| 0
|
python
|
def check_option_ps(letters_label: Label, user_choice):
' '
if perceptual_speed.check_answer(user_choice):
perceptual_speed.score += 1
show_letters(letters_label)
|
def check_option_ps(letters_label: Label, user_choice):
' '
if perceptual_speed.check_answer(user_choice):
perceptual_speed.score += 1
show_letters(letters_label)<|docstring|>Check if the user has selected the correct answer.<|endoftext|>
|
2ff1c46c1fb31c16193cea1fe1b1ce3e90973d50e1fc8e0775be3ae15c111cc9
|
def check_option_nsp(option_0: Button, option_1: Button, option_2: Button, user_choice):
' Check if the user has selected the correct answer.'
if number_speed.check_answer(user_choice):
number_speed.score += 1
show_numbers(option_0, option_1, option_2)
|
Check if the user has selected the correct answer.
|
main.py
|
check_option_nsp
|
DanielM24/GIA-Practice-Tests
| 0
|
python
|
def check_option_nsp(option_0: Button, option_1: Button, option_2: Button, user_choice):
' '
if number_speed.check_answer(user_choice):
number_speed.score += 1
show_numbers(option_0, option_1, option_2)
|
def check_option_nsp(option_0: Button, option_1: Button, option_2: Button, user_choice):
' '
if number_speed.check_answer(user_choice):
number_speed.score += 1
show_numbers(option_0, option_1, option_2)<|docstring|>Check if the user has selected the correct answer.<|endoftext|>
|
6462d7c057c63cf973e57250c9445f1beef21022f9b14f6556e7882a286eb47f
|
def check_option_wm(option_0: Button, option_1: Button, option_2: Button, user_choice):
' Check if the user has selected the correct answer.'
if word_meaning.check_answer(user_choice):
word_meaning.score += 1
show_words(option_0, option_1, option_2)
|
Check if the user has selected the correct answer.
|
main.py
|
check_option_wm
|
DanielM24/GIA-Practice-Tests
| 0
|
python
|
def check_option_wm(option_0: Button, option_1: Button, option_2: Button, user_choice):
' '
if word_meaning.check_answer(user_choice):
word_meaning.score += 1
show_words(option_0, option_1, option_2)
|
def check_option_wm(option_0: Button, option_1: Button, option_2: Button, user_choice):
' '
if word_meaning.check_answer(user_choice):
word_meaning.score += 1
show_words(option_0, option_1, option_2)<|docstring|>Check if the user has selected the correct answer.<|endoftext|>
|
e77c68e1fcc5949a0fc125ab067657c4c3da1f308a95e992fe77b29d702c8299
|
def check_option_sv(pairs: list[Label], user_choice):
' Check if the user has selected the correct answer.'
global image_pairs
if spatial_visualisation.check_answer(user_choice):
spatial_visualisation.score += 1
spatial_visualisation.add_report(image_pairs)
show_images(pairs)
|
Check if the user has selected the correct answer.
|
main.py
|
check_option_sv
|
DanielM24/GIA-Practice-Tests
| 0
|
python
|
def check_option_sv(pairs: list[Label], user_choice):
' '
global image_pairs
if spatial_visualisation.check_answer(user_choice):
spatial_visualisation.score += 1
spatial_visualisation.add_report(image_pairs)
show_images(pairs)
|
def check_option_sv(pairs: list[Label], user_choice):
' '
global image_pairs
if spatial_visualisation.check_answer(user_choice):
spatial_visualisation.score += 1
spatial_visualisation.add_report(image_pairs)
show_images(pairs)<|docstring|>Check if the user has selected the correct answer.<|endoftext|>
|
88c0624de13b92a70aa08a4842ccaa1fddd6480b55d604fa0c17e236c7f32678
|
def draw_image(side, angle):
'Generates a PIL Image of a drawn R with a given side and angle'
letter_font = ImageFont.truetype('verdana.ttf', 80)
image = Image.new(mode='RGB', size=(100, 100), color='white')
draw = ImageDraw.Draw(image)
draw.text((20, 1), 'R', font=letter_font, fill='black', align='center', stroke_width=1, stroke_fill='black')
image = image.rotate(angle)
if (side == 1):
image = image.transpose(method=Image.FLIP_LEFT_RIGHT)
return image
|
Generates a PIL Image of a drawn R with a given side and angle
|
main.py
|
draw_image
|
DanielM24/GIA-Practice-Tests
| 0
|
python
|
def draw_image(side, angle):
letter_font = ImageFont.truetype('verdana.ttf', 80)
image = Image.new(mode='RGB', size=(100, 100), color='white')
draw = ImageDraw.Draw(image)
draw.text((20, 1), 'R', font=letter_font, fill='black', align='center', stroke_width=1, stroke_fill='black')
image = image.rotate(angle)
if (side == 1):
image = image.transpose(method=Image.FLIP_LEFT_RIGHT)
return image
|
def draw_image(side, angle):
letter_font = ImageFont.truetype('verdana.ttf', 80)
image = Image.new(mode='RGB', size=(100, 100), color='white')
draw = ImageDraw.Draw(image)
draw.text((20, 1), 'R', font=letter_font, fill='black', align='center', stroke_width=1, stroke_fill='black')
image = image.rotate(angle)
if (side == 1):
image = image.transpose(method=Image.FLIP_LEFT_RIGHT)
return image<|docstring|>Generates a PIL Image of a drawn R with a given side and angle<|endoftext|>
|
afd5d9e7ce156272a167f9ed0f442ca57f193a98783497cda94c2d04e0d796ae
|
def get_pairs_image(images: list):
' Function which generates a picture of both pairs. '
global image_pairs
pairs_image_rows = []
for index in range(2):
pairs_image_row = np.hstack([images[index], images[(index + 2)]])
pairs_image_rows.append(pairs_image_row)
image_pairs = np.vstack([i for i in pairs_image_rows])
image_pairs = PIL.Image.fromarray(image_pairs)
image_pairs = image_pairs.resize((75, 75), resample=0)
|
Function which generates a picture of both pairs.
|
main.py
|
get_pairs_image
|
DanielM24/GIA-Practice-Tests
| 0
|
python
|
def get_pairs_image(images: list):
' '
global image_pairs
pairs_image_rows = []
for index in range(2):
pairs_image_row = np.hstack([images[index], images[(index + 2)]])
pairs_image_rows.append(pairs_image_row)
image_pairs = np.vstack([i for i in pairs_image_rows])
image_pairs = PIL.Image.fromarray(image_pairs)
image_pairs = image_pairs.resize((75, 75), resample=0)
|
def get_pairs_image(images: list):
' '
global image_pairs
pairs_image_rows = []
for index in range(2):
pairs_image_row = np.hstack([images[index], images[(index + 2)]])
pairs_image_rows.append(pairs_image_row)
image_pairs = np.vstack([i for i in pairs_image_rows])
image_pairs = PIL.Image.fromarray(image_pairs)
image_pairs = image_pairs.resize((75, 75), resample=0)<|docstring|>Function which generates a picture of both pairs.<|endoftext|>
|
14c391e2ef3628064dcfb56452b278a5ca5cf5c98ad0dc481f4fb114ba4e6d31
|
def __init__(self):
'Initialize hash table.'
self._size = 1024
self._table = ([None] * self._size)
|
Initialize hash table.
|
src/hash.py
|
__init__
|
regenalgrant/datastructures
| 0
|
python
|
def __init__(self):
self._size = 1024
self._table = ([None] * self._size)
|
def __init__(self):
self._size = 1024
self._table = ([None] * self._size)<|docstring|>Initialize hash table.<|endoftext|>
|
93039aa0ab45a6d5594abf95b30ef3e133320212c8cd5c067e4b7260ab211d31
|
def get(self, key):
'Look item up in table.'
if (not isinstance(key, str)):
raise TypeError(key, type(key), 'Key must be a string!')
hashed = self._hashing(key, self._size)
if self._table[hashed]:
for i in self._table[hashed]:
if (i[0] == hashed):
return i[1]
raise KeyError(key)
|
Look item up in table.
|
src/hash.py
|
get
|
regenalgrant/datastructures
| 0
|
python
|
def get(self, key):
if (not isinstance(key, str)):
raise TypeError(key, type(key), 'Key must be a string!')
hashed = self._hashing(key, self._size)
if self._table[hashed]:
for i in self._table[hashed]:
if (i[0] == hashed):
return i[1]
raise KeyError(key)
|
def get(self, key):
if (not isinstance(key, str)):
raise TypeError(key, type(key), 'Key must be a string!')
hashed = self._hashing(key, self._size)
if self._table[hashed]:
for i in self._table[hashed]:
if (i[0] == hashed):
return i[1]
raise KeyError(key)<|docstring|>Look item up in table.<|endoftext|>
|
caf11d0c396ea3f9d21f775b336cd63457c11ba72f623d1800f12adfd2bdda54
|
def set(self, key, val):
'Insert into table.'
hashed = self._hashing(key, self._size)
if self._table[hashed]:
self._table[hashed].append((hashed, val))
else:
self._table[hashed] = [(hashed, val)]
|
Insert into table.
|
src/hash.py
|
set
|
regenalgrant/datastructures
| 0
|
python
|
def set(self, key, val):
hashed = self._hashing(key, self._size)
if self._table[hashed]:
self._table[hashed].append((hashed, val))
else:
self._table[hashed] = [(hashed, val)]
|
def set(self, key, val):
hashed = self._hashing(key, self._size)
if self._table[hashed]:
self._table[hashed].append((hashed, val))
else:
self._table[hashed] = [(hashed, val)]<|docstring|>Insert into table.<|endoftext|>
|
65e926b11f065a82806004817d85f46831e9022d576803a71d18c1ba7369e2a9
|
def _hashing(self, string, table_size):
'Hash function.'
summed = 0
for i in range(len(string)):
summed = (summed + ord(string[i]))
return (summed % table_size)
|
Hash function.
|
src/hash.py
|
_hashing
|
regenalgrant/datastructures
| 0
|
python
|
def _hashing(self, string, table_size):
summed = 0
for i in range(len(string)):
summed = (summed + ord(string[i]))
return (summed % table_size)
|
def _hashing(self, string, table_size):
summed = 0
for i in range(len(string)):
summed = (summed + ord(string[i]))
return (summed % table_size)<|docstring|>Hash function.<|endoftext|>
|
df316a4d15d809993a1681f42bbaa052b4a982004ba290f2f5d5faddaf448639
|
def form_to_dict(form):
'Helper function useful for tests, receives a django form and returns a dict so it can be used in client.post\n :type form: django.forms.ModelForm\n :rtype dict\n '
return {f: (form.fields[f].initial if form.fields[f].initial else '') for f in form.fields}
|
Helper function useful for tests, receives a django form and returns a dict so it can be used in client.post
:type form: django.forms.ModelForm
:rtype dict
|
anaf/test.py
|
form_to_dict
|
tovmeod/anaf
| 2
|
python
|
def form_to_dict(form):
'Helper function useful for tests, receives a django form and returns a dict so it can be used in client.post\n :type form: django.forms.ModelForm\n :rtype dict\n '
return {f: (form.fields[f].initial if form.fields[f].initial else ) for f in form.fields}
|
def form_to_dict(form):
'Helper function useful for tests, receives a django form and returns a dict so it can be used in client.post\n :type form: django.forms.ModelForm\n :rtype dict\n '
return {f: (form.fields[f].initial if form.fields[f].initial else ) for f in form.fields}<|docstring|>Helper function useful for tests, receives a django form and returns a dict so it can be used in client.post
:type form: django.forms.ModelForm
:rtype dict<|endoftext|>
|
c503d2bf3b6c75ff7c83c6ee3ef06d3d866263c278db3dd3e8e8e2e317211218
|
def cmpDataApi(self, old, new, fieldname='root'):
"\n Compares data using the old API with data retrieved with the new\n They don't need to be equivalent, the new API may return at least the data the old API was able to and may add\n :param str or dict or list old: content retrieved using the old API\n :param str or dict or list new: content retrieved using the new DRF API\n :return bool: is it kosher?\n "
if isinstance(old, six.string_types):
old = json.loads(old)
if isinstance(new, six.string_types):
new = json.loads(new)
if (isinstance(old, dict) and isinstance(new, dict)):
for (k, v) in sorted(old.items()):
if (k == 'resource_uri'):
continue
assert (k in new), 'Field {}.{} not found on new.\nold:{}\nnew:{}'.format(fieldname, k, old, new)
assert isinstance(v, type(new[k])), 'Field {}.{} exists but have different content type.\nold:{}\nnew:{}'.format(fieldname, k, v, new[k])
if isinstance(v, dict):
self.cmpDataApi(v, new[k], '{}.{}'.format(fieldname, k))
elif isinstance(v, six.string_types):
assert (v == new[k]), 'Field {}.{} exists but have different value.\nold:{}\nnew:{}'.format(fieldname, k, v, new[k])
else:
assert (v == new[k])
elif (isinstance(old, list) and isinstance(new, list)):
old.sort(key=(lambda x: x['id']))
new.sort(key=(lambda x: x['id']))
for (i, v) in enumerate(old):
self.cmpDataApi(v, new[i], str(i))
else:
assert False, 'old and new have different types'
|
Compares data using the old API with data retrieved with the new
They don't need to be equivalent, the new API may return at least the data the old API was able to and may add
:param str or dict or list old: content retrieved using the old API
:param str or dict or list new: content retrieved using the new DRF API
:return bool: is it kosher?
|
anaf/test.py
|
cmpDataApi
|
tovmeod/anaf
| 2
|
python
|
def cmpDataApi(self, old, new, fieldname='root'):
"\n Compares data using the old API with data retrieved with the new\n They don't need to be equivalent, the new API may return at least the data the old API was able to and may add\n :param str or dict or list old: content retrieved using the old API\n :param str or dict or list new: content retrieved using the new DRF API\n :return bool: is it kosher?\n "
if isinstance(old, six.string_types):
old = json.loads(old)
if isinstance(new, six.string_types):
new = json.loads(new)
if (isinstance(old, dict) and isinstance(new, dict)):
for (k, v) in sorted(old.items()):
if (k == 'resource_uri'):
continue
assert (k in new), 'Field {}.{} not found on new.\nold:{}\nnew:{}'.format(fieldname, k, old, new)
assert isinstance(v, type(new[k])), 'Field {}.{} exists but have different content type.\nold:{}\nnew:{}'.format(fieldname, k, v, new[k])
if isinstance(v, dict):
self.cmpDataApi(v, new[k], '{}.{}'.format(fieldname, k))
elif isinstance(v, six.string_types):
assert (v == new[k]), 'Field {}.{} exists but have different value.\nold:{}\nnew:{}'.format(fieldname, k, v, new[k])
else:
assert (v == new[k])
elif (isinstance(old, list) and isinstance(new, list)):
old.sort(key=(lambda x: x['id']))
new.sort(key=(lambda x: x['id']))
for (i, v) in enumerate(old):
self.cmpDataApi(v, new[i], str(i))
else:
assert False, 'old and new have different types'
|
def cmpDataApi(self, old, new, fieldname='root'):
"\n Compares data using the old API with data retrieved with the new\n They don't need to be equivalent, the new API may return at least the data the old API was able to and may add\n :param str or dict or list old: content retrieved using the old API\n :param str or dict or list new: content retrieved using the new DRF API\n :return bool: is it kosher?\n "
if isinstance(old, six.string_types):
old = json.loads(old)
if isinstance(new, six.string_types):
new = json.loads(new)
if (isinstance(old, dict) and isinstance(new, dict)):
for (k, v) in sorted(old.items()):
if (k == 'resource_uri'):
continue
assert (k in new), 'Field {}.{} not found on new.\nold:{}\nnew:{}'.format(fieldname, k, old, new)
assert isinstance(v, type(new[k])), 'Field {}.{} exists but have different content type.\nold:{}\nnew:{}'.format(fieldname, k, v, new[k])
if isinstance(v, dict):
self.cmpDataApi(v, new[k], '{}.{}'.format(fieldname, k))
elif isinstance(v, six.string_types):
assert (v == new[k]), 'Field {}.{} exists but have different value.\nold:{}\nnew:{}'.format(fieldname, k, v, new[k])
else:
assert (v == new[k])
elif (isinstance(old, list) and isinstance(new, list)):
old.sort(key=(lambda x: x['id']))
new.sort(key=(lambda x: x['id']))
for (i, v) in enumerate(old):
self.cmpDataApi(v, new[i], str(i))
else:
assert False, 'old and new have different types'<|docstring|>Compares data using the old API with data retrieved with the new
They don't need to be equivalent, the new API may return at least the data the old API was able to and may add
:param str or dict or list old: content retrieved using the old API
:param str or dict or list new: content retrieved using the new DRF API
:return bool: is it kosher?<|endoftext|>
|
c646d081e2865a19e6abc9bf2d0cd291b2f3923ed6bc4d084bb65228bff6b8f8
|
def run(self):
'\n Sets up the live server and databases, and then loops over handling\n http requests.\n '
if self.connections_override:
for (alias, conn) in self.connections_override.items():
connections[alias] = conn
try:
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
for (index, port) in enumerate(self.possible_ports):
try:
WSGITestRequestHandler.logs = self.logs
self.httpd = WSGIServer((self.host, port), WSGITestRequestHandler)
except socket.error as e:
if (((index + 1) < len(self.possible_ports)) and (e.errno == errno.EADDRINUSE)):
continue
else:
raise
else:
self.port = port
break
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
|
Sets up the live server and databases, and then loops over handling
http requests.
|
anaf/test.py
|
run
|
tovmeod/anaf
| 2
|
python
|
def run(self):
'\n Sets up the live server and databases, and then loops over handling\n http requests.\n '
if self.connections_override:
for (alias, conn) in self.connections_override.items():
connections[alias] = conn
try:
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
for (index, port) in enumerate(self.possible_ports):
try:
WSGITestRequestHandler.logs = self.logs
self.httpd = WSGIServer((self.host, port), WSGITestRequestHandler)
except socket.error as e:
if (((index + 1) < len(self.possible_ports)) and (e.errno == errno.EADDRINUSE)):
continue
else:
raise
else:
self.port = port
break
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()
|
def run(self):
'\n Sets up the live server and databases, and then loops over handling\n http requests.\n '
if self.connections_override:
for (alias, conn) in self.connections_override.items():
connections[alias] = conn
try:
handler = self.static_handler(_MediaFilesHandler(WSGIHandler()))
for (index, port) in enumerate(self.possible_ports):
try:
WSGITestRequestHandler.logs = self.logs
self.httpd = WSGIServer((self.host, port), WSGITestRequestHandler)
except socket.error as e:
if (((index + 1) < len(self.possible_ports)) and (e.errno == errno.EADDRINUSE)):
continue
else:
raise
else:
self.port = port
break
self.httpd.set_app(handler)
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set()<|docstring|>Sets up the live server and databases, and then loops over handling
http requests.<|endoftext|>
|
43b24964c53af9a709cda6e1fe69bb21b95af69560b38cfbe3230b63cd1eaa04
|
def _ifailed(self):
'Call this on tearDown, check if it was the last run test that failed\n '
return (not (sys.exc_info() == (None, None, None)))
|
Call this on tearDown, check if it was the last run test that failed
|
anaf/test.py
|
_ifailed
|
tovmeod/anaf
| 2
|
python
|
def _ifailed(self):
'\n '
return (not (sys.exc_info() == (None, None, None)))
|
def _ifailed(self):
'\n '
return (not (sys.exc_info() == (None, None, None)))<|docstring|>Call this on tearDown, check if it was the last run test that failed<|endoftext|>
|
a26f6f9c39c63817c80beee956ef33fb2cb85c7b38e7157d2f53701d3067cd5e
|
def get(self, url_name):
'Get the page based on the viewname and wait it to load\n '
url = six.moves.urllib.parse.urljoin(self.live_server_url, ('#' + reverse(url_name)))
self.driver.get(url)
sleep(0.1)
self.wait_load()
|
Get the page based on the viewname and wait it to load
|
anaf/test.py
|
get
|
tovmeod/anaf
| 2
|
python
|
def get(self, url_name):
'\n '
url = six.moves.urllib.parse.urljoin(self.live_server_url, ('#' + reverse(url_name)))
self.driver.get(url)
sleep(0.1)
self.wait_load()
|
def get(self, url_name):
'\n '
url = six.moves.urllib.parse.urljoin(self.live_server_url, ('#' + reverse(url_name)))
self.driver.get(url)
sleep(0.1)
self.wait_load()<|docstring|>Get the page based on the viewname and wait it to load<|endoftext|>
|
7551f562a22a577dd860e0d7d0e69b82ce9c431d317d4cc881b6f5f713ae8ac4
|
def wait_load(self):
'wait for the #loading-splash and #loading-status to not be visible anymore\n '
self.wait_not_selector('#loading-splash')
self.wait_not_selector('#loading-status')
|
wait for the #loading-splash and #loading-status to not be visible anymore
|
anaf/test.py
|
wait_load
|
tovmeod/anaf
| 2
|
python
|
def wait_load(self):
'\n '
self.wait_not_selector('#loading-splash')
self.wait_not_selector('#loading-status')
|
def wait_load(self):
'\n '
self.wait_not_selector('#loading-splash')
self.wait_not_selector('#loading-status')<|docstring|>wait for the #loading-splash and #loading-status to not be visible anymore<|endoftext|>
|
d30e4fd3e9ba953006db049b5264871fd8bf5b19d143b07048575acbbe6f650e
|
def wait_until(self, callback, timeout=WAIT_TIMEOUT):
'\n Helper function that blocks the execution of the tests until the\n specified callback returns a value that is not falsy. This function can\n be called, for example, after clicking a link or submitting a form.\n See the other public methods that call this function for more details.\n '
WebDriverWait(self.driver, timeout).until(callback)
|
Helper function that blocks the execution of the tests until the
specified callback returns a value that is not falsy. This function can
be called, for example, after clicking a link or submitting a form.
See the other public methods that call this function for more details.
|
anaf/test.py
|
wait_until
|
tovmeod/anaf
| 2
|
python
|
def wait_until(self, callback, timeout=WAIT_TIMEOUT):
'\n Helper function that blocks the execution of the tests until the\n specified callback returns a value that is not falsy. This function can\n be called, for example, after clicking a link or submitting a form.\n See the other public methods that call this function for more details.\n '
WebDriverWait(self.driver, timeout).until(callback)
|
def wait_until(self, callback, timeout=WAIT_TIMEOUT):
'\n Helper function that blocks the execution of the tests until the\n specified callback returns a value that is not falsy. This function can\n be called, for example, after clicking a link or submitting a form.\n See the other public methods that call this function for more details.\n '
WebDriverWait(self.driver, timeout).until(callback)<|docstring|>Helper function that blocks the execution of the tests until the
specified callback returns a value that is not falsy. This function can
be called, for example, after clicking a link or submitting a form.
See the other public methods that call this function for more details.<|endoftext|>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.