repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
umutbozkurt/django-rest-framework-mongoengine | rest_framework_mongoengine/serializers.py | DocumentSerializer.recursive_save | def recursive_save(self, validated_data, instance=None):
"""
Recursively traverses validated_data and creates EmbeddedDocuments
of the appropriate subtype from them.
Returns Mongonengine model instance.
"""
# me_data is an analogue of validated_data, but contains
# mongoengine EmbeddedDocument instances for nested data structures
# instead of OrderedDicts.
#
# For example:
# validated_data = {'id:, "1", 'embed': OrderedDict({'a': 'b'})}
# me_data = {'id': "1", 'embed': <EmbeddedDocument>}
me_data = dict()
for key, value in validated_data.items():
try:
field = self.fields[key]
# for EmbeddedDocumentSerializers, call recursive_save
if isinstance(field, EmbeddedDocumentSerializer):
me_data[key] = field.recursive_save(value)
# same for lists of EmbeddedDocumentSerializers i.e.
# ListField(EmbeddedDocumentField) or EmbeddedDocumentListField
elif ((isinstance(field, serializers.ListSerializer) or
isinstance(field, serializers.ListField)) and
isinstance(field.child, EmbeddedDocumentSerializer)):
me_data[key] = []
for datum in value:
me_data[key].append(field.child.recursive_save(datum))
# same for dicts of EmbeddedDocumentSerializers (or, speaking
# in Mongoengine terms, MapField(EmbeddedDocument(Embed))
elif (isinstance(field, drfm_fields.DictField) and
hasattr(field, "child") and
isinstance(field.child, EmbeddedDocumentSerializer)):
me_data[key] = {}
for datum_key, datum_value in value.items():
me_data[key][datum_key] = field.child.recursive_save(datum_value)
# for regular fields just set value
else:
me_data[key] = value
except KeyError: # this is dynamic data
me_data[key] = value
# create (if needed), save (if needed) and return mongoengine instance
if not instance:
instance = self.Meta.model(**me_data)
else:
for key, value in me_data.items():
setattr(instance, key, value)
if self._saving_instances:
instance.save()
return instance | python | def recursive_save(self, validated_data, instance=None):
"""
Recursively traverses validated_data and creates EmbeddedDocuments
of the appropriate subtype from them.
Returns Mongonengine model instance.
"""
# me_data is an analogue of validated_data, but contains
# mongoengine EmbeddedDocument instances for nested data structures
# instead of OrderedDicts.
#
# For example:
# validated_data = {'id:, "1", 'embed': OrderedDict({'a': 'b'})}
# me_data = {'id': "1", 'embed': <EmbeddedDocument>}
me_data = dict()
for key, value in validated_data.items():
try:
field = self.fields[key]
# for EmbeddedDocumentSerializers, call recursive_save
if isinstance(field, EmbeddedDocumentSerializer):
me_data[key] = field.recursive_save(value)
# same for lists of EmbeddedDocumentSerializers i.e.
# ListField(EmbeddedDocumentField) or EmbeddedDocumentListField
elif ((isinstance(field, serializers.ListSerializer) or
isinstance(field, serializers.ListField)) and
isinstance(field.child, EmbeddedDocumentSerializer)):
me_data[key] = []
for datum in value:
me_data[key].append(field.child.recursive_save(datum))
# same for dicts of EmbeddedDocumentSerializers (or, speaking
# in Mongoengine terms, MapField(EmbeddedDocument(Embed))
elif (isinstance(field, drfm_fields.DictField) and
hasattr(field, "child") and
isinstance(field.child, EmbeddedDocumentSerializer)):
me_data[key] = {}
for datum_key, datum_value in value.items():
me_data[key][datum_key] = field.child.recursive_save(datum_value)
# for regular fields just set value
else:
me_data[key] = value
except KeyError: # this is dynamic data
me_data[key] = value
# create (if needed), save (if needed) and return mongoengine instance
if not instance:
instance = self.Meta.model(**me_data)
else:
for key, value in me_data.items():
setattr(instance, key, value)
if self._saving_instances:
instance.save()
return instance | [
"def",
"recursive_save",
"(",
"self",
",",
"validated_data",
",",
"instance",
"=",
"None",
")",
":",
"# me_data is an analogue of validated_data, but contains",
"# mongoengine EmbeddedDocument instances for nested data structures",
"# instead of OrderedDicts.",
"#",
"# For example:",... | Recursively traverses validated_data and creates EmbeddedDocuments
of the appropriate subtype from them.
Returns Mongonengine model instance. | [
"Recursively",
"traverses",
"validated_data",
"and",
"creates",
"EmbeddedDocuments",
"of",
"the",
"appropriate",
"subtype",
"from",
"them",
"."
] | 2fe6de53907b31a5e8b742e4c6b728942b5fa4f0 | https://github.com/umutbozkurt/django-rest-framework-mongoengine/blob/2fe6de53907b31a5e8b742e4c6b728942b5fa4f0/rest_framework_mongoengine/serializers.py#L205-L264 | train | 202,400 |
umutbozkurt/django-rest-framework-mongoengine | rest_framework_mongoengine/serializers.py | DocumentSerializer.apply_customization | def apply_customization(self, serializer, customization):
"""
Applies fields customization to a nested or embedded DocumentSerializer.
"""
# apply fields or exclude
if customization.fields is not None:
if len(customization.fields) == 0:
# customization fields are empty, set Meta.fields to '__all__'
serializer.Meta.fields = ALL_FIELDS
else:
serializer.Meta.fields = customization.fields
if customization.exclude is not None:
serializer.Meta.exclude = customization.exclude
# apply extra_kwargs
if customization.extra_kwargs is not None:
serializer.Meta.extra_kwargs = customization.extra_kwargs
# apply validate_methods
for method_name, method in customization.validate_methods.items():
setattr(serializer, method_name, method) | python | def apply_customization(self, serializer, customization):
"""
Applies fields customization to a nested or embedded DocumentSerializer.
"""
# apply fields or exclude
if customization.fields is not None:
if len(customization.fields) == 0:
# customization fields are empty, set Meta.fields to '__all__'
serializer.Meta.fields = ALL_FIELDS
else:
serializer.Meta.fields = customization.fields
if customization.exclude is not None:
serializer.Meta.exclude = customization.exclude
# apply extra_kwargs
if customization.extra_kwargs is not None:
serializer.Meta.extra_kwargs = customization.extra_kwargs
# apply validate_methods
for method_name, method in customization.validate_methods.items():
setattr(serializer, method_name, method) | [
"def",
"apply_customization",
"(",
"self",
",",
"serializer",
",",
"customization",
")",
":",
"# apply fields or exclude",
"if",
"customization",
".",
"fields",
"is",
"not",
"None",
":",
"if",
"len",
"(",
"customization",
".",
"fields",
")",
"==",
"0",
":",
... | Applies fields customization to a nested or embedded DocumentSerializer. | [
"Applies",
"fields",
"customization",
"to",
"a",
"nested",
"or",
"embedded",
"DocumentSerializer",
"."
] | 2fe6de53907b31a5e8b742e4c6b728942b5fa4f0 | https://github.com/umutbozkurt/django-rest-framework-mongoengine/blob/2fe6de53907b31a5e8b742e4c6b728942b5fa4f0/rest_framework_mongoengine/serializers.py#L542-L562 | train | 202,401 |
umutbozkurt/django-rest-framework-mongoengine | rest_framework_mongoengine/serializers.py | DynamicDocumentSerializer.to_internal_value | def to_internal_value(self, data):
"""
Updates _validated_data with dynamic data, i.e. data,
not listed in fields.
"""
ret = super(DynamicDocumentSerializer, self).to_internal_value(data)
dynamic_data = self._get_dynamic_data(ret)
ret.update(dynamic_data)
return ret | python | def to_internal_value(self, data):
"""
Updates _validated_data with dynamic data, i.e. data,
not listed in fields.
"""
ret = super(DynamicDocumentSerializer, self).to_internal_value(data)
dynamic_data = self._get_dynamic_data(ret)
ret.update(dynamic_data)
return ret | [
"def",
"to_internal_value",
"(",
"self",
",",
"data",
")",
":",
"ret",
"=",
"super",
"(",
"DynamicDocumentSerializer",
",",
"self",
")",
".",
"to_internal_value",
"(",
"data",
")",
"dynamic_data",
"=",
"self",
".",
"_get_dynamic_data",
"(",
"ret",
")",
"ret"... | Updates _validated_data with dynamic data, i.e. data,
not listed in fields. | [
"Updates",
"_validated_data",
"with",
"dynamic",
"data",
"i",
".",
"e",
".",
"data",
"not",
"listed",
"in",
"fields",
"."
] | 2fe6de53907b31a5e8b742e4c6b728942b5fa4f0 | https://github.com/umutbozkurt/django-rest-framework-mongoengine/blob/2fe6de53907b31a5e8b742e4c6b728942b5fa4f0/rest_framework_mongoengine/serializers.py#L829-L837 | train | 202,402 |
umutbozkurt/django-rest-framework-mongoengine | rest_framework_mongoengine/utils.py | get_field_kwargs | def get_field_kwargs(field_name, model_field):
"""
Creating a default instance of a basic non-relational field.
"""
kwargs = {}
# The following will only be used by ModelField classes.
# Gets removed for everything else.
kwargs['model_field'] = model_field
if hasattr(model_field, 'verbose_name') and needs_label(model_field, field_name):
kwargs['label'] = capfirst(model_field.verbose_name)
if hasattr(model_field, 'help_text'):
kwargs['help_text'] = model_field.help_text
if isinstance(model_field, me_fields.DecimalField):
precision = model_field.precision
max_value = getattr(model_field, 'max_value', None)
if max_value is not None:
max_length = len(str(max_value)) + precision
else:
max_length = 65536
kwargs['decimal_places'] = precision
kwargs['max_digits'] = max_length
if isinstance(model_field, me_fields.GeoJsonBaseField):
kwargs['geo_type'] = model_field._type
if isinstance(model_field, me_fields.SequenceField) or model_field.primary_key or model_field.db_field == '_id':
# If this field is read-only, then return early.
# Further keyword arguments are not valid.
kwargs['read_only'] = True
return kwargs
if model_field.default and not isinstance(model_field, me_fields.ComplexBaseField):
kwargs['default'] = model_field.default
if model_field.null:
kwargs['allow_null'] = True
if model_field.null and isinstance(model_field, me_fields.StringField):
kwargs['allow_blank'] = True
if 'default' not in kwargs:
kwargs['required'] = model_field.required
# handle special cases - compound fields: mongoengine.ListField/DictField
if kwargs['required'] is True:
if isinstance(model_field, me_fields.ListField) or isinstance(model_field, me_fields.DictField):
kwargs['allow_empty'] = False
if model_field.choices:
# If this model field contains choices, then return early.
# Further keyword arguments are not valid.
kwargs['choices'] = model_field.choices
return kwargs
if isinstance(model_field, me_fields.StringField):
if model_field.regex:
kwargs['regex'] = model_field.regex
max_length = getattr(model_field, 'max_length', None)
if max_length is not None and isinstance(model_field, me_fields.StringField):
kwargs['max_length'] = max_length
min_length = getattr(model_field, 'min_length', None)
if min_length is not None and isinstance(model_field, me_fields.StringField):
kwargs['min_length'] = min_length
max_value = getattr(model_field, 'max_value', None)
if max_value is not None and isinstance(model_field, NUMERIC_FIELD_TYPES):
kwargs['max_value'] = max_value
min_value = getattr(model_field, 'min_value', None)
if min_value is not None and isinstance(model_field, NUMERIC_FIELD_TYPES):
kwargs['min_value'] = min_value
return kwargs | python | def get_field_kwargs(field_name, model_field):
"""
Creating a default instance of a basic non-relational field.
"""
kwargs = {}
# The following will only be used by ModelField classes.
# Gets removed for everything else.
kwargs['model_field'] = model_field
if hasattr(model_field, 'verbose_name') and needs_label(model_field, field_name):
kwargs['label'] = capfirst(model_field.verbose_name)
if hasattr(model_field, 'help_text'):
kwargs['help_text'] = model_field.help_text
if isinstance(model_field, me_fields.DecimalField):
precision = model_field.precision
max_value = getattr(model_field, 'max_value', None)
if max_value is not None:
max_length = len(str(max_value)) + precision
else:
max_length = 65536
kwargs['decimal_places'] = precision
kwargs['max_digits'] = max_length
if isinstance(model_field, me_fields.GeoJsonBaseField):
kwargs['geo_type'] = model_field._type
if isinstance(model_field, me_fields.SequenceField) or model_field.primary_key or model_field.db_field == '_id':
# If this field is read-only, then return early.
# Further keyword arguments are not valid.
kwargs['read_only'] = True
return kwargs
if model_field.default and not isinstance(model_field, me_fields.ComplexBaseField):
kwargs['default'] = model_field.default
if model_field.null:
kwargs['allow_null'] = True
if model_field.null and isinstance(model_field, me_fields.StringField):
kwargs['allow_blank'] = True
if 'default' not in kwargs:
kwargs['required'] = model_field.required
# handle special cases - compound fields: mongoengine.ListField/DictField
if kwargs['required'] is True:
if isinstance(model_field, me_fields.ListField) or isinstance(model_field, me_fields.DictField):
kwargs['allow_empty'] = False
if model_field.choices:
# If this model field contains choices, then return early.
# Further keyword arguments are not valid.
kwargs['choices'] = model_field.choices
return kwargs
if isinstance(model_field, me_fields.StringField):
if model_field.regex:
kwargs['regex'] = model_field.regex
max_length = getattr(model_field, 'max_length', None)
if max_length is not None and isinstance(model_field, me_fields.StringField):
kwargs['max_length'] = max_length
min_length = getattr(model_field, 'min_length', None)
if min_length is not None and isinstance(model_field, me_fields.StringField):
kwargs['min_length'] = min_length
max_value = getattr(model_field, 'max_value', None)
if max_value is not None and isinstance(model_field, NUMERIC_FIELD_TYPES):
kwargs['max_value'] = max_value
min_value = getattr(model_field, 'min_value', None)
if min_value is not None and isinstance(model_field, NUMERIC_FIELD_TYPES):
kwargs['min_value'] = min_value
return kwargs | [
"def",
"get_field_kwargs",
"(",
"field_name",
",",
"model_field",
")",
":",
"kwargs",
"=",
"{",
"}",
"# The following will only be used by ModelField classes.",
"# Gets removed for everything else.",
"kwargs",
"[",
"'model_field'",
"]",
"=",
"model_field",
"if",
"hasattr",
... | Creating a default instance of a basic non-relational field. | [
"Creating",
"a",
"default",
"instance",
"of",
"a",
"basic",
"non",
"-",
"relational",
"field",
"."
] | 2fe6de53907b31a5e8b742e4c6b728942b5fa4f0 | https://github.com/umutbozkurt/django-rest-framework-mongoengine/blob/2fe6de53907b31a5e8b742e4c6b728942b5fa4f0/rest_framework_mongoengine/utils.py#L112-L190 | train | 202,403 |
umutbozkurt/django-rest-framework-mongoengine | rest_framework_mongoengine/utils.py | get_relation_kwargs | def get_relation_kwargs(field_name, relation_info):
"""
Creating a default instance of a flat relational field.
"""
model_field, related_model = relation_info
kwargs = {}
if related_model and not issubclass(related_model, EmbeddedDocument):
kwargs['queryset'] = related_model.objects
if model_field:
if hasattr(model_field, 'verbose_name') and needs_label(model_field, field_name):
kwargs['label'] = capfirst(model_field.verbose_name)
if hasattr(model_field, 'help_text'):
kwargs['help_text'] = model_field.help_text
kwargs['required'] = model_field.required
if model_field.null:
kwargs['allow_null'] = True
if getattr(model_field, 'unique', False):
validator = UniqueValidator(queryset=related_model.objects)
kwargs['validators'] = [validator]
return kwargs | python | def get_relation_kwargs(field_name, relation_info):
"""
Creating a default instance of a flat relational field.
"""
model_field, related_model = relation_info
kwargs = {}
if related_model and not issubclass(related_model, EmbeddedDocument):
kwargs['queryset'] = related_model.objects
if model_field:
if hasattr(model_field, 'verbose_name') and needs_label(model_field, field_name):
kwargs['label'] = capfirst(model_field.verbose_name)
if hasattr(model_field, 'help_text'):
kwargs['help_text'] = model_field.help_text
kwargs['required'] = model_field.required
if model_field.null:
kwargs['allow_null'] = True
if getattr(model_field, 'unique', False):
validator = UniqueValidator(queryset=related_model.objects)
kwargs['validators'] = [validator]
return kwargs | [
"def",
"get_relation_kwargs",
"(",
"field_name",
",",
"relation_info",
")",
":",
"model_field",
",",
"related_model",
"=",
"relation_info",
"kwargs",
"=",
"{",
"}",
"if",
"related_model",
"and",
"not",
"issubclass",
"(",
"related_model",
",",
"EmbeddedDocument",
"... | Creating a default instance of a flat relational field. | [
"Creating",
"a",
"default",
"instance",
"of",
"a",
"flat",
"relational",
"field",
"."
] | 2fe6de53907b31a5e8b742e4c6b728942b5fa4f0 | https://github.com/umutbozkurt/django-rest-framework-mongoengine/blob/2fe6de53907b31a5e8b742e4c6b728942b5fa4f0/rest_framework_mongoengine/utils.py#L193-L216 | train | 202,404 |
umutbozkurt/django-rest-framework-mongoengine | rest_framework_mongoengine/utils.py | get_nested_relation_kwargs | def get_nested_relation_kwargs(field_name, relation_info):
"""
Creating a default instance of a nested serializer
"""
kwargs = get_relation_kwargs(field_name, relation_info)
kwargs.pop('queryset')
kwargs.pop('required')
kwargs['read_only'] = True
return kwargs | python | def get_nested_relation_kwargs(field_name, relation_info):
"""
Creating a default instance of a nested serializer
"""
kwargs = get_relation_kwargs(field_name, relation_info)
kwargs.pop('queryset')
kwargs.pop('required')
kwargs['read_only'] = True
return kwargs | [
"def",
"get_nested_relation_kwargs",
"(",
"field_name",
",",
"relation_info",
")",
":",
"kwargs",
"=",
"get_relation_kwargs",
"(",
"field_name",
",",
"relation_info",
")",
"kwargs",
".",
"pop",
"(",
"'queryset'",
")",
"kwargs",
".",
"pop",
"(",
"'required'",
")"... | Creating a default instance of a nested serializer | [
"Creating",
"a",
"default",
"instance",
"of",
"a",
"nested",
"serializer"
] | 2fe6de53907b31a5e8b742e4c6b728942b5fa4f0 | https://github.com/umutbozkurt/django-rest-framework-mongoengine/blob/2fe6de53907b31a5e8b742e4c6b728942b5fa4f0/rest_framework_mongoengine/utils.py#L219-L227 | train | 202,405 |
aestrivex/bctpy | bct/algorithms/degree.py | degrees_dir | def degrees_dir(CIJ):
'''
Node degree is the number of links connected to the node. The indegree
is the number of inward links and the outdegree is the number of
outward links.
Parameters
----------
CIJ : NxN np.ndarray
directed binary/weighted connection matrix
Returns
-------
id : Nx1 np.ndarray
node in-degree
od : Nx1 np.ndarray
node out-degree
deg : Nx1 np.ndarray
node degree (in-degree + out-degree)
Notes
-----
Inputs are assumed to be on the columns of the CIJ matrix.
Weight information is discarded.
'''
CIJ = binarize(CIJ, copy=True) # ensure CIJ is binary
id = np.sum(CIJ, axis=0) # indegree = column sum of CIJ
od = np.sum(CIJ, axis=1) # outdegree = row sum of CIJ
deg = id + od # degree = indegree+outdegree
return id, od, deg | python | def degrees_dir(CIJ):
'''
Node degree is the number of links connected to the node. The indegree
is the number of inward links and the outdegree is the number of
outward links.
Parameters
----------
CIJ : NxN np.ndarray
directed binary/weighted connection matrix
Returns
-------
id : Nx1 np.ndarray
node in-degree
od : Nx1 np.ndarray
node out-degree
deg : Nx1 np.ndarray
node degree (in-degree + out-degree)
Notes
-----
Inputs are assumed to be on the columns of the CIJ matrix.
Weight information is discarded.
'''
CIJ = binarize(CIJ, copy=True) # ensure CIJ is binary
id = np.sum(CIJ, axis=0) # indegree = column sum of CIJ
od = np.sum(CIJ, axis=1) # outdegree = row sum of CIJ
deg = id + od # degree = indegree+outdegree
return id, od, deg | [
"def",
"degrees_dir",
"(",
"CIJ",
")",
":",
"CIJ",
"=",
"binarize",
"(",
"CIJ",
",",
"copy",
"=",
"True",
")",
"# ensure CIJ is binary",
"id",
"=",
"np",
".",
"sum",
"(",
"CIJ",
",",
"axis",
"=",
"0",
")",
"# indegree = column sum of CIJ",
"od",
"=",
"... | Node degree is the number of links connected to the node. The indegree
is the number of inward links and the outdegree is the number of
outward links.
Parameters
----------
CIJ : NxN np.ndarray
directed binary/weighted connection matrix
Returns
-------
id : Nx1 np.ndarray
node in-degree
od : Nx1 np.ndarray
node out-degree
deg : Nx1 np.ndarray
node degree (in-degree + out-degree)
Notes
-----
Inputs are assumed to be on the columns of the CIJ matrix.
Weight information is discarded. | [
"Node",
"degree",
"is",
"the",
"number",
"of",
"links",
"connected",
"to",
"the",
"node",
".",
"The",
"indegree",
"is",
"the",
"number",
"of",
"inward",
"links",
"and",
"the",
"outdegree",
"is",
"the",
"number",
"of",
"outward",
"links",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/degree.py#L6-L35 | train | 202,406 |
aestrivex/bctpy | bct/algorithms/degree.py | degrees_und | def degrees_und(CIJ):
'''
Node degree is the number of links connected to the node.
Parameters
----------
CIJ : NxN np.ndarray
undirected binary/weighted connection matrix
Returns
-------
deg : Nx1 np.ndarray
node degree
Notes
-----
Weight information is discarded.
'''
CIJ = binarize(CIJ, copy=True) # ensure CIJ is binary
return np.sum(CIJ, axis=0) | python | def degrees_und(CIJ):
'''
Node degree is the number of links connected to the node.
Parameters
----------
CIJ : NxN np.ndarray
undirected binary/weighted connection matrix
Returns
-------
deg : Nx1 np.ndarray
node degree
Notes
-----
Weight information is discarded.
'''
CIJ = binarize(CIJ, copy=True) # ensure CIJ is binary
return np.sum(CIJ, axis=0) | [
"def",
"degrees_und",
"(",
"CIJ",
")",
":",
"CIJ",
"=",
"binarize",
"(",
"CIJ",
",",
"copy",
"=",
"True",
")",
"# ensure CIJ is binary",
"return",
"np",
".",
"sum",
"(",
"CIJ",
",",
"axis",
"=",
"0",
")"
] | Node degree is the number of links connected to the node.
Parameters
----------
CIJ : NxN np.ndarray
undirected binary/weighted connection matrix
Returns
-------
deg : Nx1 np.ndarray
node degree
Notes
-----
Weight information is discarded. | [
"Node",
"degree",
"is",
"the",
"number",
"of",
"links",
"connected",
"to",
"the",
"node",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/degree.py#L38-L57 | train | 202,407 |
aestrivex/bctpy | bct/algorithms/degree.py | strengths_dir | def strengths_dir(CIJ):
'''
Node strength is the sum of weights of links connected to the node. The
instrength is the sum of inward link weights and the outstrength is the
sum of outward link weights.
Parameters
----------
CIJ : NxN np.ndarray
directed weighted connection matrix
Returns
-------
is : Nx1 np.ndarray
node in-strength
os : Nx1 np.ndarray
node out-strength
str : Nx1 np.ndarray
node strength (in-strength + out-strength)
Notes
-----
Inputs are assumed to be on the columns of the CIJ matrix.
'''
istr = np.sum(CIJ, axis=0)
ostr = np.sum(CIJ, axis=1)
return istr + ostr | python | def strengths_dir(CIJ):
'''
Node strength is the sum of weights of links connected to the node. The
instrength is the sum of inward link weights and the outstrength is the
sum of outward link weights.
Parameters
----------
CIJ : NxN np.ndarray
directed weighted connection matrix
Returns
-------
is : Nx1 np.ndarray
node in-strength
os : Nx1 np.ndarray
node out-strength
str : Nx1 np.ndarray
node strength (in-strength + out-strength)
Notes
-----
Inputs are assumed to be on the columns of the CIJ matrix.
'''
istr = np.sum(CIJ, axis=0)
ostr = np.sum(CIJ, axis=1)
return istr + ostr | [
"def",
"strengths_dir",
"(",
"CIJ",
")",
":",
"istr",
"=",
"np",
".",
"sum",
"(",
"CIJ",
",",
"axis",
"=",
"0",
")",
"ostr",
"=",
"np",
".",
"sum",
"(",
"CIJ",
",",
"axis",
"=",
"1",
")",
"return",
"istr",
"+",
"ostr"
] | Node strength is the sum of weights of links connected to the node. The
instrength is the sum of inward link weights and the outstrength is the
sum of outward link weights.
Parameters
----------
CIJ : NxN np.ndarray
directed weighted connection matrix
Returns
-------
is : Nx1 np.ndarray
node in-strength
os : Nx1 np.ndarray
node out-strength
str : Nx1 np.ndarray
node strength (in-strength + out-strength)
Notes
-----
Inputs are assumed to be on the columns of the CIJ matrix. | [
"Node",
"strength",
"is",
"the",
"sum",
"of",
"weights",
"of",
"links",
"connected",
"to",
"the",
"node",
".",
"The",
"instrength",
"is",
"the",
"sum",
"of",
"inward",
"link",
"weights",
"and",
"the",
"outstrength",
"is",
"the",
"sum",
"of",
"outward",
"... | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/degree.py#L111-L137 | train | 202,408 |
aestrivex/bctpy | bct/algorithms/degree.py | strengths_und_sign | def strengths_und_sign(W):
'''
Node strength is the sum of weights of links connected to the node.
Parameters
----------
W : NxN np.ndarray
undirected connection matrix with positive and negative weights
Returns
-------
Spos : Nx1 np.ndarray
nodal strength of positive weights
Sneg : Nx1 np.ndarray
nodal strength of positive weights
vpos : float
total positive weight
vneg : float
total negative weight
'''
W = W.copy()
n = len(W)
np.fill_diagonal(W, 0) # clear diagonal
Spos = np.sum(W * (W > 0), axis=0) # positive strengths
Sneg = np.sum(W * (W < 0), axis=0) # negative strengths
vpos = np.sum(W[W > 0]) # positive weight
vneg = np.sum(W[W < 0]) # negative weight
return Spos, Sneg, vpos, vneg | python | def strengths_und_sign(W):
'''
Node strength is the sum of weights of links connected to the node.
Parameters
----------
W : NxN np.ndarray
undirected connection matrix with positive and negative weights
Returns
-------
Spos : Nx1 np.ndarray
nodal strength of positive weights
Sneg : Nx1 np.ndarray
nodal strength of positive weights
vpos : float
total positive weight
vneg : float
total negative weight
'''
W = W.copy()
n = len(W)
np.fill_diagonal(W, 0) # clear diagonal
Spos = np.sum(W * (W > 0), axis=0) # positive strengths
Sneg = np.sum(W * (W < 0), axis=0) # negative strengths
vpos = np.sum(W[W > 0]) # positive weight
vneg = np.sum(W[W < 0]) # negative weight
return Spos, Sneg, vpos, vneg | [
"def",
"strengths_und_sign",
"(",
"W",
")",
":",
"W",
"=",
"W",
".",
"copy",
"(",
")",
"n",
"=",
"len",
"(",
"W",
")",
"np",
".",
"fill_diagonal",
"(",
"W",
",",
"0",
")",
"# clear diagonal",
"Spos",
"=",
"np",
".",
"sum",
"(",
"W",
"*",
"(",
... | Node strength is the sum of weights of links connected to the node.
Parameters
----------
W : NxN np.ndarray
undirected connection matrix with positive and negative weights
Returns
-------
Spos : Nx1 np.ndarray
nodal strength of positive weights
Sneg : Nx1 np.ndarray
nodal strength of positive weights
vpos : float
total positive weight
vneg : float
total negative weight | [
"Node",
"strength",
"is",
"the",
"sum",
"of",
"weights",
"of",
"links",
"connected",
"to",
"the",
"node",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/degree.py#L157-L185 | train | 202,409 |
aestrivex/bctpy | bct/algorithms/similarity.py | edge_nei_overlap_bu | def edge_nei_overlap_bu(CIJ):
'''
This function determines the neighbors of two nodes that are linked by
an edge, and then computes their overlap. Connection matrix must be
binary and directed. Entries of 'EC' that are 'inf' indicate that no
edge is present. Entries of 'EC' that are 0 denote "local bridges", i.e.
edges that link completely non-overlapping neighborhoods. Low values
of EC indicate edges that are "weak ties".
If CIJ is weighted, the weights are ignored.
Parameters
----------
CIJ : NxN np.ndarray
undirected binary/weighted connection matrix
Returns
-------
EC : NxN np.ndarray
edge neighborhood overlap matrix
ec : Kx1 np.ndarray
edge neighborhood overlap per edge vector
degij : NxN np.ndarray
degrees of node pairs connected by each edge
'''
ik, jk = np.where(CIJ)
lel = len(CIJ[ik, jk])
n = len(CIJ)
deg = degrees_und(CIJ)
ec = np.zeros((lel,))
degij = np.zeros((2, lel))
for e in range(lel):
neiik = np.setdiff1d(np.union1d(
np.where(CIJ[ik[e], :]), np.where(CIJ[:, ik[e]])), (ik[e], jk[e]))
neijk = np.setdiff1d(np.union1d(
np.where(CIJ[jk[e], :]), np.where(CIJ[:, jk[e]])), (ik[e], jk[e]))
ec[e] = len(np.intersect1d(neiik, neijk)) / \
len(np.union1d(neiik, neijk))
degij[:, e] = (deg[ik[e]], deg[jk[e]])
EC = np.tile(np.inf, (n, n))
EC[ik, jk] = ec
return EC, ec, degij | python | def edge_nei_overlap_bu(CIJ):
'''
This function determines the neighbors of two nodes that are linked by
an edge, and then computes their overlap. Connection matrix must be
binary and directed. Entries of 'EC' that are 'inf' indicate that no
edge is present. Entries of 'EC' that are 0 denote "local bridges", i.e.
edges that link completely non-overlapping neighborhoods. Low values
of EC indicate edges that are "weak ties".
If CIJ is weighted, the weights are ignored.
Parameters
----------
CIJ : NxN np.ndarray
undirected binary/weighted connection matrix
Returns
-------
EC : NxN np.ndarray
edge neighborhood overlap matrix
ec : Kx1 np.ndarray
edge neighborhood overlap per edge vector
degij : NxN np.ndarray
degrees of node pairs connected by each edge
'''
ik, jk = np.where(CIJ)
lel = len(CIJ[ik, jk])
n = len(CIJ)
deg = degrees_und(CIJ)
ec = np.zeros((lel,))
degij = np.zeros((2, lel))
for e in range(lel):
neiik = np.setdiff1d(np.union1d(
np.where(CIJ[ik[e], :]), np.where(CIJ[:, ik[e]])), (ik[e], jk[e]))
neijk = np.setdiff1d(np.union1d(
np.where(CIJ[jk[e], :]), np.where(CIJ[:, jk[e]])), (ik[e], jk[e]))
ec[e] = len(np.intersect1d(neiik, neijk)) / \
len(np.union1d(neiik, neijk))
degij[:, e] = (deg[ik[e]], deg[jk[e]])
EC = np.tile(np.inf, (n, n))
EC[ik, jk] = ec
return EC, ec, degij | [
"def",
"edge_nei_overlap_bu",
"(",
"CIJ",
")",
":",
"ik",
",",
"jk",
"=",
"np",
".",
"where",
"(",
"CIJ",
")",
"lel",
"=",
"len",
"(",
"CIJ",
"[",
"ik",
",",
"jk",
"]",
")",
"n",
"=",
"len",
"(",
"CIJ",
")",
"deg",
"=",
"degrees_und",
"(",
"C... | This function determines the neighbors of two nodes that are linked by
an edge, and then computes their overlap. Connection matrix must be
binary and directed. Entries of 'EC' that are 'inf' indicate that no
edge is present. Entries of 'EC' that are 0 denote "local bridges", i.e.
edges that link completely non-overlapping neighborhoods. Low values
of EC indicate edges that are "weak ties".
If CIJ is weighted, the weights are ignored.
Parameters
----------
CIJ : NxN np.ndarray
undirected binary/weighted connection matrix
Returns
-------
EC : NxN np.ndarray
edge neighborhood overlap matrix
ec : Kx1 np.ndarray
edge neighborhood overlap per edge vector
degij : NxN np.ndarray
degrees of node pairs connected by each edge | [
"This",
"function",
"determines",
"the",
"neighbors",
"of",
"two",
"nodes",
"that",
"are",
"linked",
"by",
"an",
"edge",
"and",
"then",
"computes",
"their",
"overlap",
".",
"Connection",
"matrix",
"must",
"be",
"binary",
"and",
"directed",
".",
"Entries",
"o... | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/similarity.py#L56-L100 | train | 202,410 |
aestrivex/bctpy | bct/algorithms/similarity.py | matching_ind | def matching_ind(CIJ):
'''
For any two nodes u and v, the matching index computes the amount of
overlap in the connection patterns of u and v. Self-connections and
u-v connections are ignored. The matching index is a symmetric
quantity, similar to a correlation or a dot product.
Parameters
----------
CIJ : NxN np.ndarray
adjacency matrix
Returns
-------
Min : NxN np.ndarray
matching index for incoming connections
Mout : NxN np.ndarray
matching index for outgoing connections
Mall : NxN np.ndarray
matching index for all connections
Notes
-----
Does not use self- or cross connections for comparison.
Does not use connections that are not present in BOTH u and v.
All output matrices are calculated for upper triangular only.
'''
n = len(CIJ)
Min = np.zeros((n, n))
Mout = np.zeros((n, n))
Mall = np.zeros((n, n))
# compare incoming connections
for i in range(n - 1):
for j in range(i + 1, n):
c1i = CIJ[:, i]
c2i = CIJ[:, j]
usei = np.logical_or(c1i, c2i)
usei[i] = 0
usei[j] = 0
nconi = np.sum(c1i[usei]) + np.sum(c2i[usei])
if not nconi:
Min[i, j] = 0
else:
Min[i, j] = 2 * \
np.sum(np.logical_and(c1i[usei], c2i[usei])) / nconi
c1o = CIJ[i, :]
c2o = CIJ[j, :]
useo = np.logical_or(c1o, c2o)
useo[i] = 0
useo[j] = 0
ncono = np.sum(c1o[useo]) + np.sum(c2o[useo])
if not ncono:
Mout[i, j] = 0
else:
Mout[i, j] = 2 * \
np.sum(np.logical_and(c1o[useo], c2o[useo])) / ncono
c1a = np.ravel((c1i, c1o))
c2a = np.ravel((c2i, c2o))
usea = np.logical_or(c1a, c2a)
usea[i] = 0
usea[i + n] = 0
usea[j] = 0
usea[j + n] = 0
ncona = np.sum(c1a[usea]) + np.sum(c2a[usea])
if not ncona:
Mall[i, j] = 0
else:
Mall[i, j] = 2 * \
np.sum(np.logical_and(c1a[usea], c2a[usea])) / ncona
Min = Min + Min.T
Mout = Mout + Mout.T
Mall = Mall + Mall.T
return Min, Mout, Mall | python | def matching_ind(CIJ):
'''
For any two nodes u and v, the matching index computes the amount of
overlap in the connection patterns of u and v. Self-connections and
u-v connections are ignored. The matching index is a symmetric
quantity, similar to a correlation or a dot product.
Parameters
----------
CIJ : NxN np.ndarray
adjacency matrix
Returns
-------
Min : NxN np.ndarray
matching index for incoming connections
Mout : NxN np.ndarray
matching index for outgoing connections
Mall : NxN np.ndarray
matching index for all connections
Notes
-----
Does not use self- or cross connections for comparison.
Does not use connections that are not present in BOTH u and v.
All output matrices are calculated for upper triangular only.
'''
n = len(CIJ)
Min = np.zeros((n, n))
Mout = np.zeros((n, n))
Mall = np.zeros((n, n))
# compare incoming connections
for i in range(n - 1):
for j in range(i + 1, n):
c1i = CIJ[:, i]
c2i = CIJ[:, j]
usei = np.logical_or(c1i, c2i)
usei[i] = 0
usei[j] = 0
nconi = np.sum(c1i[usei]) + np.sum(c2i[usei])
if not nconi:
Min[i, j] = 0
else:
Min[i, j] = 2 * \
np.sum(np.logical_and(c1i[usei], c2i[usei])) / nconi
c1o = CIJ[i, :]
c2o = CIJ[j, :]
useo = np.logical_or(c1o, c2o)
useo[i] = 0
useo[j] = 0
ncono = np.sum(c1o[useo]) + np.sum(c2o[useo])
if not ncono:
Mout[i, j] = 0
else:
Mout[i, j] = 2 * \
np.sum(np.logical_and(c1o[useo], c2o[useo])) / ncono
c1a = np.ravel((c1i, c1o))
c2a = np.ravel((c2i, c2o))
usea = np.logical_or(c1a, c2a)
usea[i] = 0
usea[i + n] = 0
usea[j] = 0
usea[j + n] = 0
ncona = np.sum(c1a[usea]) + np.sum(c2a[usea])
if not ncona:
Mall[i, j] = 0
else:
Mall[i, j] = 2 * \
np.sum(np.logical_and(c1a[usea], c2a[usea])) / ncona
Min = Min + Min.T
Mout = Mout + Mout.T
Mall = Mall + Mall.T
return Min, Mout, Mall | [
"def",
"matching_ind",
"(",
"CIJ",
")",
":",
"n",
"=",
"len",
"(",
"CIJ",
")",
"Min",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
",",
"n",
")",
")",
"Mout",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
",",
"n",
")",
")",
"Mall",
"=",
"np",
".",
... | For any two nodes u and v, the matching index computes the amount of
overlap in the connection patterns of u and v. Self-connections and
u-v connections are ignored. The matching index is a symmetric
quantity, similar to a correlation or a dot product.
Parameters
----------
CIJ : NxN np.ndarray
adjacency matrix
Returns
-------
Min : NxN np.ndarray
matching index for incoming connections
Mout : NxN np.ndarray
matching index for outgoing connections
Mall : NxN np.ndarray
matching index for all connections
Notes
-----
Does not use self- or cross connections for comparison.
Does not use connections that are not present in BOTH u and v.
All output matrices are calculated for upper triangular only. | [
"For",
"any",
"two",
"nodes",
"u",
"and",
"v",
"the",
"matching",
"index",
"computes",
"the",
"amount",
"of",
"overlap",
"in",
"the",
"connection",
"patterns",
"of",
"u",
"and",
"v",
".",
"Self",
"-",
"connections",
"and",
"u",
"-",
"v",
"connections",
... | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/similarity.py#L167-L245 | train | 202,411 |
aestrivex/bctpy | bct/algorithms/similarity.py | dice_pairwise_und | def dice_pairwise_und(a1, a2):
'''
Calculates pairwise dice similarity for each vertex between two
matrices. Treats the matrices as binary and undirected.
Paramaters
----------
A1 : NxN np.ndarray
Matrix 1
A2 : NxN np.ndarray
Matrix 2
Returns
-------
D : Nx1 np.ndarray
dice similarity vector
'''
a1 = binarize(a1, copy=True)
a2 = binarize(a2, copy=True) # ensure matrices are binary
n = len(a1)
np.fill_diagonal(a1, 0)
np.fill_diagonal(a2, 0) # set diagonals to 0
d = np.zeros((n,)) # dice similarity
# calculate the common neighbors for each vertex
for i in range(n):
d[i] = 2 * (np.sum(np.logical_and(a1[:, i], a2[:, i])) /
(np.sum(a1[:, i]) + np.sum(a2[:, i])))
return d | python | def dice_pairwise_und(a1, a2):
'''
Calculates pairwise dice similarity for each vertex between two
matrices. Treats the matrices as binary and undirected.
Paramaters
----------
A1 : NxN np.ndarray
Matrix 1
A2 : NxN np.ndarray
Matrix 2
Returns
-------
D : Nx1 np.ndarray
dice similarity vector
'''
a1 = binarize(a1, copy=True)
a2 = binarize(a2, copy=True) # ensure matrices are binary
n = len(a1)
np.fill_diagonal(a1, 0)
np.fill_diagonal(a2, 0) # set diagonals to 0
d = np.zeros((n,)) # dice similarity
# calculate the common neighbors for each vertex
for i in range(n):
d[i] = 2 * (np.sum(np.logical_and(a1[:, i], a2[:, i])) /
(np.sum(a1[:, i]) + np.sum(a2[:, i])))
return d | [
"def",
"dice_pairwise_und",
"(",
"a1",
",",
"a2",
")",
":",
"a1",
"=",
"binarize",
"(",
"a1",
",",
"copy",
"=",
"True",
")",
"a2",
"=",
"binarize",
"(",
"a2",
",",
"copy",
"=",
"True",
")",
"# ensure matrices are binary",
"n",
"=",
"len",
"(",
"a1",
... | Calculates pairwise dice similarity for each vertex between two
matrices. Treats the matrices as binary and undirected.
Paramaters
----------
A1 : NxN np.ndarray
Matrix 1
A2 : NxN np.ndarray
Matrix 2
Returns
-------
D : Nx1 np.ndarray
dice similarity vector | [
"Calculates",
"pairwise",
"dice",
"similarity",
"for",
"each",
"vertex",
"between",
"two",
"matrices",
".",
"Treats",
"the",
"matrices",
"as",
"binary",
"and",
"undirected",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/similarity.py#L295-L326 | train | 202,412 |
aestrivex/bctpy | bct/algorithms/similarity.py | corr_flat_und | def corr_flat_und(a1, a2):
'''
Returns the correlation coefficient between two flattened adjacency
matrices. Only the upper triangular part is used to avoid double counting
undirected matrices. Similarity metric for weighted matrices.
Parameters
----------
A1 : NxN np.ndarray
undirected matrix 1
A2 : NxN np.ndarray
undirected matrix 2
Returns
-------
r : float
Correlation coefficient describing edgewise similarity of a1 and a2
'''
n = len(a1)
if len(a2) != n:
raise BCTParamError("Cannot calculate flattened correlation on "
"matrices of different size")
triu_ix = np.where(np.triu(np.ones((n, n)), 1))
return np.corrcoef(a1[triu_ix].flat, a2[triu_ix].flat)[0][1] | python | def corr_flat_und(a1, a2):
'''
Returns the correlation coefficient between two flattened adjacency
matrices. Only the upper triangular part is used to avoid double counting
undirected matrices. Similarity metric for weighted matrices.
Parameters
----------
A1 : NxN np.ndarray
undirected matrix 1
A2 : NxN np.ndarray
undirected matrix 2
Returns
-------
r : float
Correlation coefficient describing edgewise similarity of a1 and a2
'''
n = len(a1)
if len(a2) != n:
raise BCTParamError("Cannot calculate flattened correlation on "
"matrices of different size")
triu_ix = np.where(np.triu(np.ones((n, n)), 1))
return np.corrcoef(a1[triu_ix].flat, a2[triu_ix].flat)[0][1] | [
"def",
"corr_flat_und",
"(",
"a1",
",",
"a2",
")",
":",
"n",
"=",
"len",
"(",
"a1",
")",
"if",
"len",
"(",
"a2",
")",
"!=",
"n",
":",
"raise",
"BCTParamError",
"(",
"\"Cannot calculate flattened correlation on \"",
"\"matrices of different size\"",
")",
"triu_... | Returns the correlation coefficient between two flattened adjacency
matrices. Only the upper triangular part is used to avoid double counting
undirected matrices. Similarity metric for weighted matrices.
Parameters
----------
A1 : NxN np.ndarray
undirected matrix 1
A2 : NxN np.ndarray
undirected matrix 2
Returns
-------
r : float
Correlation coefficient describing edgewise similarity of a1 and a2 | [
"Returns",
"the",
"correlation",
"coefficient",
"between",
"two",
"flattened",
"adjacency",
"matrices",
".",
"Only",
"the",
"upper",
"triangular",
"part",
"is",
"used",
"to",
"avoid",
"double",
"counting",
"undirected",
"matrices",
".",
"Similarity",
"metric",
"fo... | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/similarity.py#L329-L352 | train | 202,413 |
aestrivex/bctpy | bct/algorithms/similarity.py | corr_flat_dir | def corr_flat_dir(a1, a2):
'''
Returns the correlation coefficient between two flattened adjacency
matrices. Similarity metric for weighted matrices.
Parameters
----------
A1 : NxN np.ndarray
directed matrix 1
A2 : NxN np.ndarray
directed matrix 2
Returns
-------
r : float
Correlation coefficient describing edgewise similarity of a1 and a2
'''
n = len(a1)
if len(a2) != n:
raise BCTParamError("Cannot calculate flattened correlation on "
"matrices of different size")
ix = np.logical_not(np.eye(n))
return np.corrcoef(a1[ix].flat, a2[ix].flat)[0][1] | python | def corr_flat_dir(a1, a2):
'''
Returns the correlation coefficient between two flattened adjacency
matrices. Similarity metric for weighted matrices.
Parameters
----------
A1 : NxN np.ndarray
directed matrix 1
A2 : NxN np.ndarray
directed matrix 2
Returns
-------
r : float
Correlation coefficient describing edgewise similarity of a1 and a2
'''
n = len(a1)
if len(a2) != n:
raise BCTParamError("Cannot calculate flattened correlation on "
"matrices of different size")
ix = np.logical_not(np.eye(n))
return np.corrcoef(a1[ix].flat, a2[ix].flat)[0][1] | [
"def",
"corr_flat_dir",
"(",
"a1",
",",
"a2",
")",
":",
"n",
"=",
"len",
"(",
"a1",
")",
"if",
"len",
"(",
"a2",
")",
"!=",
"n",
":",
"raise",
"BCTParamError",
"(",
"\"Cannot calculate flattened correlation on \"",
"\"matrices of different size\"",
")",
"ix",
... | Returns the correlation coefficient between two flattened adjacency
matrices. Similarity metric for weighted matrices.
Parameters
----------
A1 : NxN np.ndarray
directed matrix 1
A2 : NxN np.ndarray
directed matrix 2
Returns
-------
r : float
Correlation coefficient describing edgewise similarity of a1 and a2 | [
"Returns",
"the",
"correlation",
"coefficient",
"between",
"two",
"flattened",
"adjacency",
"matrices",
".",
"Similarity",
"metric",
"for",
"weighted",
"matrices",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/similarity.py#L355-L377 | train | 202,414 |
aestrivex/bctpy | bct/utils/visualization.py | adjacency_plot_und | def adjacency_plot_und(A, coor, tube=False):
'''
This function in matlab is a visualization helper which translates an
adjacency matrix and an Nx3 matrix of spatial coordinates, and plots a
3D isometric network connecting the undirected unweighted nodes using a
specific plotting format. Including the formatted output is not useful at
all for bctpy since matplotlib will not be able to plot it in quite the
same way.
Instead of doing this, I have included code that will plot the adjacency
matrix onto nodes at the given spatial coordinates in mayavi
This routine is basically a less featureful version of the 3D brain in
cvu, the connectome visualization utility which I also maintain. cvu uses
freesurfer surfaces and annotations to get the node coordinates (rather
than leaving them up to the user) and has many other interactive
visualization features not included here for the sake of brevity.
There are other similar visualizations in the ConnectomeViewer and the
UCLA multimodal connectivity database.
Note that unlike other bctpy functions, this function depends on mayavi.
Paramaters
----------
A : NxN np.ndarray
adjacency matrix
coor : Nx3 np.ndarray
vector of node coordinates
tube : bool
plots using cylindrical tubes for higher resolution image. If True,
plots cylindrical tube sources. If False, plots line sources. Default
value is False.
Returns
-------
fig : Instance(Scene)
handle to a mayavi figure.
Notes
-----
To display the output interactively, call
fig=adjacency_plot_und(A,coor)
from mayavi import mlab
mlab.show()
Note: Thresholding the matrix is strongly recommended. It is recommended
that the input matrix have fewer than 5000 total connections in order to
achieve reasonable performance and noncluttered visualization.
'''
from mayavi import mlab
n = len(A)
nr_edges = (n * n - 1) // 2
#starts = np.zeros((nr_edges,3))
#vecs = np.zeros((nr_edges,3))
#adjdat = np.zeros((nr_edges,))
ixes, = np.where(np.triu(np.ones((n, n)), 1).flat)
# i=0
# for r2 in xrange(n):
# for r1 in xrange(r2):
# starts[i,:] = coor[r1,:]
# vecs[i,:] = coor[r2,:] - coor[r1,:]
# adjdat[i,:]
# i+=1
adjdat = A.flat[ixes]
A_r = np.tile(coor, (n, 1, 1))
starts = np.reshape(A_r, (n * n, 3))[ixes, :]
vecs = np.reshape(A_r - np.transpose(A_r, (1, 0, 2)), (n * n, 3))[ixes, :]
# plotting
fig = mlab.figure()
nodesource = mlab.pipeline.scalar_scatter(
coor[:, 0], coor[:, 1], coor[:, 2], figure=fig)
nodes = mlab.pipeline.glyph(nodesource, scale_mode='none',
scale_factor=3., mode='sphere', figure=fig)
nodes.glyph.color_mode = 'color_by_scalar'
vectorsrc = mlab.pipeline.vector_scatter(
starts[:, 0], starts[:, 1], starts[
:, 2], vecs[:, 0], vecs[:, 1], vecs[:, 2],
figure=fig)
vectorsrc.mlab_source.dataset.point_data.scalars = adjdat
thres = mlab.pipeline.threshold(vectorsrc,
low=0.0001, up=np.max(A), figure=fig)
vectors = mlab.pipeline.vectors(thres, colormap='YlOrRd',
scale_mode='vector', figure=fig)
vectors.glyph.glyph.clamping = False
vectors.glyph.glyph.color_mode = 'color_by_scalar'
vectors.glyph.color_mode = 'color_by_scalar'
vectors.glyph.glyph_source.glyph_position = 'head'
vectors.actor.property.opacity = .7
if tube:
vectors.glyph.glyph_source.glyph_source = (vectors.glyph.glyph_source.
glyph_dict['cylinder_source'])
vectors.glyph.glyph_source.glyph_source.radius = 0.015
else:
vectors.glyph.glyph_source.glyph_source.glyph_type = 'dash'
return fig | python | def adjacency_plot_und(A, coor, tube=False):
'''
This function in matlab is a visualization helper which translates an
adjacency matrix and an Nx3 matrix of spatial coordinates, and plots a
3D isometric network connecting the undirected unweighted nodes using a
specific plotting format. Including the formatted output is not useful at
all for bctpy since matplotlib will not be able to plot it in quite the
same way.
Instead of doing this, I have included code that will plot the adjacency
matrix onto nodes at the given spatial coordinates in mayavi
This routine is basically a less featureful version of the 3D brain in
cvu, the connectome visualization utility which I also maintain. cvu uses
freesurfer surfaces and annotations to get the node coordinates (rather
than leaving them up to the user) and has many other interactive
visualization features not included here for the sake of brevity.
There are other similar visualizations in the ConnectomeViewer and the
UCLA multimodal connectivity database.
Note that unlike other bctpy functions, this function depends on mayavi.
Paramaters
----------
A : NxN np.ndarray
adjacency matrix
coor : Nx3 np.ndarray
vector of node coordinates
tube : bool
plots using cylindrical tubes for higher resolution image. If True,
plots cylindrical tube sources. If False, plots line sources. Default
value is False.
Returns
-------
fig : Instance(Scene)
handle to a mayavi figure.
Notes
-----
To display the output interactively, call
fig=adjacency_plot_und(A,coor)
from mayavi import mlab
mlab.show()
Note: Thresholding the matrix is strongly recommended. It is recommended
that the input matrix have fewer than 5000 total connections in order to
achieve reasonable performance and noncluttered visualization.
'''
from mayavi import mlab
n = len(A)
nr_edges = (n * n - 1) // 2
#starts = np.zeros((nr_edges,3))
#vecs = np.zeros((nr_edges,3))
#adjdat = np.zeros((nr_edges,))
ixes, = np.where(np.triu(np.ones((n, n)), 1).flat)
# i=0
# for r2 in xrange(n):
# for r1 in xrange(r2):
# starts[i,:] = coor[r1,:]
# vecs[i,:] = coor[r2,:] - coor[r1,:]
# adjdat[i,:]
# i+=1
adjdat = A.flat[ixes]
A_r = np.tile(coor, (n, 1, 1))
starts = np.reshape(A_r, (n * n, 3))[ixes, :]
vecs = np.reshape(A_r - np.transpose(A_r, (1, 0, 2)), (n * n, 3))[ixes, :]
# plotting
fig = mlab.figure()
nodesource = mlab.pipeline.scalar_scatter(
coor[:, 0], coor[:, 1], coor[:, 2], figure=fig)
nodes = mlab.pipeline.glyph(nodesource, scale_mode='none',
scale_factor=3., mode='sphere', figure=fig)
nodes.glyph.color_mode = 'color_by_scalar'
vectorsrc = mlab.pipeline.vector_scatter(
starts[:, 0], starts[:, 1], starts[
:, 2], vecs[:, 0], vecs[:, 1], vecs[:, 2],
figure=fig)
vectorsrc.mlab_source.dataset.point_data.scalars = adjdat
thres = mlab.pipeline.threshold(vectorsrc,
low=0.0001, up=np.max(A), figure=fig)
vectors = mlab.pipeline.vectors(thres, colormap='YlOrRd',
scale_mode='vector', figure=fig)
vectors.glyph.glyph.clamping = False
vectors.glyph.glyph.color_mode = 'color_by_scalar'
vectors.glyph.color_mode = 'color_by_scalar'
vectors.glyph.glyph_source.glyph_position = 'head'
vectors.actor.property.opacity = .7
if tube:
vectors.glyph.glyph_source.glyph_source = (vectors.glyph.glyph_source.
glyph_dict['cylinder_source'])
vectors.glyph.glyph_source.glyph_source.radius = 0.015
else:
vectors.glyph.glyph_source.glyph_source.glyph_type = 'dash'
return fig | [
"def",
"adjacency_plot_und",
"(",
"A",
",",
"coor",
",",
"tube",
"=",
"False",
")",
":",
"from",
"mayavi",
"import",
"mlab",
"n",
"=",
"len",
"(",
"A",
")",
"nr_edges",
"=",
"(",
"n",
"*",
"n",
"-",
"1",
")",
"//",
"2",
"#starts = np.zeros((nr_edges,... | This function in matlab is a visualization helper which translates an
adjacency matrix and an Nx3 matrix of spatial coordinates, and plots a
3D isometric network connecting the undirected unweighted nodes using a
specific plotting format. Including the formatted output is not useful at
all for bctpy since matplotlib will not be able to plot it in quite the
same way.
Instead of doing this, I have included code that will plot the adjacency
matrix onto nodes at the given spatial coordinates in mayavi
This routine is basically a less featureful version of the 3D brain in
cvu, the connectome visualization utility which I also maintain. cvu uses
freesurfer surfaces and annotations to get the node coordinates (rather
than leaving them up to the user) and has many other interactive
visualization features not included here for the sake of brevity.
There are other similar visualizations in the ConnectomeViewer and the
UCLA multimodal connectivity database.
Note that unlike other bctpy functions, this function depends on mayavi.
Paramaters
----------
A : NxN np.ndarray
adjacency matrix
coor : Nx3 np.ndarray
vector of node coordinates
tube : bool
plots using cylindrical tubes for higher resolution image. If True,
plots cylindrical tube sources. If False, plots line sources. Default
value is False.
Returns
-------
fig : Instance(Scene)
handle to a mayavi figure.
Notes
-----
To display the output interactively, call
fig=adjacency_plot_und(A,coor)
from mayavi import mlab
mlab.show()
Note: Thresholding the matrix is strongly recommended. It is recommended
that the input matrix have fewer than 5000 total connections in order to
achieve reasonable performance and noncluttered visualization. | [
"This",
"function",
"in",
"matlab",
"is",
"a",
"visualization",
"helper",
"which",
"translates",
"an",
"adjacency",
"matrix",
"and",
"an",
"Nx3",
"matrix",
"of",
"spatial",
"coordinates",
"and",
"plots",
"a",
"3D",
"isometric",
"network",
"connecting",
"the",
... | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/utils/visualization.py#L6-L116 | train | 202,415 |
aestrivex/bctpy | bct/utils/visualization.py | backbone_wu | def backbone_wu(CIJ, avgdeg):
'''
The network backbone contains the dominant connections in the network
and may be used to aid network visualization. This function computes
the backbone of a given weighted and undirected connection matrix CIJ,
using a minimum-spanning-tree based algorithm.
Parameters
----------
CIJ : NxN np.ndarray
weighted undirected connection matrix
avgdeg : int
desired average degree of backbone
Returns
-------
CIJtree : NxN np.ndarray
connection matrix of the minimum spanning tree of CIJ
CIJclus : NxN np.ndarray
connection matrix of the minimum spanning tree plus strongest
connections up to some average degree 'avgdeg'. Identical to CIJtree
if the degree requirement is already met.
Notes
-----
NOTE: nodes with zero strength are discarded.
NOTE: CIJclus will have a total average degree exactly equal to
(or very close to) 'avgdeg'.
NOTE: 'avgdeg' backfill is handled slightly differently than in Hagmann
et al 2008.
'''
n = len(CIJ)
if not np.all(CIJ == CIJ.T):
raise BCTParamError('backbone_wu can only be computed for undirected '
'matrices. If your matrix is has noise, correct it with np.around')
CIJtree = np.zeros((n, n))
# find strongest edge (if multiple edges are tied, use only first one)
i, j = np.where(np.max(CIJ) == CIJ)
im = [i[0], i[1]] # what? why take two values? doesnt that mess up multiples?
jm = [j[0], j[1]]
# copy into tree graph
CIJtree[im, jm] = CIJ[im, jm]
in_ = im
out = np.setdiff1d(range(n), in_)
# repeat n-2 times
for ix in range(n - 2):
CIJ_io = CIJ[np.ix_(in_, out)]
i, j = np.where(np.max(CIJ_io) == CIJ_io)
# i,j=np.where(np.max(CIJ[in_,out])==CIJ[in_,out])
print(i, j)
im = in_[i[0]]
jm = out[j[0]]
# copy into tree graph
CIJtree[im, jm] = CIJ[im, jm]
CIJtree[jm, im] = CIJ[jm, im]
in_ = np.append(in_, jm)
out = np.setdiff1d(range(n), in_)
# now add connections back with the total number of added connections
# determined by the desired avgdeg
CIJnotintree = CIJ * np.logical_not(CIJtree)
ix, = np.where(CIJnotintree.flat)
a = np.sort(CIJnotintree.flat[ix])[::-1]
cutoff = avgdeg * n - 2 * (n - 1) - 1
# if the avgdeg req is already satisfied, skip this
if cutoff >= np.size(a):
CIJclus = CIJtree.copy()
else:
thr = a[cutoff]
CIJclus = CIJtree + CIJnotintree * (CIJnotintree >= thr)
return CIJtree, CIJclus | python | def backbone_wu(CIJ, avgdeg):
'''
The network backbone contains the dominant connections in the network
and may be used to aid network visualization. This function computes
the backbone of a given weighted and undirected connection matrix CIJ,
using a minimum-spanning-tree based algorithm.
Parameters
----------
CIJ : NxN np.ndarray
weighted undirected connection matrix
avgdeg : int
desired average degree of backbone
Returns
-------
CIJtree : NxN np.ndarray
connection matrix of the minimum spanning tree of CIJ
CIJclus : NxN np.ndarray
connection matrix of the minimum spanning tree plus strongest
connections up to some average degree 'avgdeg'. Identical to CIJtree
if the degree requirement is already met.
Notes
-----
NOTE: nodes with zero strength are discarded.
NOTE: CIJclus will have a total average degree exactly equal to
(or very close to) 'avgdeg'.
NOTE: 'avgdeg' backfill is handled slightly differently than in Hagmann
et al 2008.
'''
n = len(CIJ)
if not np.all(CIJ == CIJ.T):
raise BCTParamError('backbone_wu can only be computed for undirected '
'matrices. If your matrix is has noise, correct it with np.around')
CIJtree = np.zeros((n, n))
# find strongest edge (if multiple edges are tied, use only first one)
i, j = np.where(np.max(CIJ) == CIJ)
im = [i[0], i[1]] # what? why take two values? doesnt that mess up multiples?
jm = [j[0], j[1]]
# copy into tree graph
CIJtree[im, jm] = CIJ[im, jm]
in_ = im
out = np.setdiff1d(range(n), in_)
# repeat n-2 times
for ix in range(n - 2):
CIJ_io = CIJ[np.ix_(in_, out)]
i, j = np.where(np.max(CIJ_io) == CIJ_io)
# i,j=np.where(np.max(CIJ[in_,out])==CIJ[in_,out])
print(i, j)
im = in_[i[0]]
jm = out[j[0]]
# copy into tree graph
CIJtree[im, jm] = CIJ[im, jm]
CIJtree[jm, im] = CIJ[jm, im]
in_ = np.append(in_, jm)
out = np.setdiff1d(range(n), in_)
# now add connections back with the total number of added connections
# determined by the desired avgdeg
CIJnotintree = CIJ * np.logical_not(CIJtree)
ix, = np.where(CIJnotintree.flat)
a = np.sort(CIJnotintree.flat[ix])[::-1]
cutoff = avgdeg * n - 2 * (n - 1) - 1
# if the avgdeg req is already satisfied, skip this
if cutoff >= np.size(a):
CIJclus = CIJtree.copy()
else:
thr = a[cutoff]
CIJclus = CIJtree + CIJnotintree * (CIJnotintree >= thr)
return CIJtree, CIJclus | [
"def",
"backbone_wu",
"(",
"CIJ",
",",
"avgdeg",
")",
":",
"n",
"=",
"len",
"(",
"CIJ",
")",
"if",
"not",
"np",
".",
"all",
"(",
"CIJ",
"==",
"CIJ",
".",
"T",
")",
":",
"raise",
"BCTParamError",
"(",
"'backbone_wu can only be computed for undirected '",
... | The network backbone contains the dominant connections in the network
and may be used to aid network visualization. This function computes
the backbone of a given weighted and undirected connection matrix CIJ,
using a minimum-spanning-tree based algorithm.
Parameters
----------
CIJ : NxN np.ndarray
weighted undirected connection matrix
avgdeg : int
desired average degree of backbone
Returns
-------
CIJtree : NxN np.ndarray
connection matrix of the minimum spanning tree of CIJ
CIJclus : NxN np.ndarray
connection matrix of the minimum spanning tree plus strongest
connections up to some average degree 'avgdeg'. Identical to CIJtree
if the degree requirement is already met.
Notes
-----
NOTE: nodes with zero strength are discarded.
NOTE: CIJclus will have a total average degree exactly equal to
(or very close to) 'avgdeg'.
NOTE: 'avgdeg' backfill is handled slightly differently than in Hagmann
et al 2008. | [
"The",
"network",
"backbone",
"contains",
"the",
"dominant",
"connections",
"in",
"the",
"network",
"and",
"may",
"be",
"used",
"to",
"aid",
"network",
"visualization",
".",
"This",
"function",
"computes",
"the",
"backbone",
"of",
"a",
"given",
"weighted",
"an... | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/utils/visualization.py#L268-L344 | train | 202,416 |
aestrivex/bctpy | bct/utils/visualization.py | reorderMAT | def reorderMAT(m, H=5000, cost='line'):
'''
This function reorders the connectivity matrix in order to place more
edges closer to the diagonal. This often helps in displaying community
structure, clusters, etc.
Parameters
----------
MAT : NxN np.ndarray
connection matrix
H : int
number of reordering attempts
cost : str
'line' or 'circ' for shape of lattice (linear or ring lattice).
Default is linear lattice.
Returns
-------
MATreordered : NxN np.ndarray
reordered connection matrix
MATindices : Nx1 np.ndarray
reordered indices
MATcost : float
objective function cost of reordered matrix
Notes
-----
I'm not 100% sure how the algorithms between this and reorder_matrix
differ, but this code looks a ton sketchier and might have had some minor
bugs in it. Considering reorder_matrix() does the same thing using a well
vetted simulated annealing algorithm, just use that. ~rlaplant
'''
from scipy import linalg, stats
m = m.copy()
n = len(m)
np.fill_diagonal(m, 0)
# generate cost function
if cost == 'line':
profile = stats.norm.pdf(range(1, n + 1), 0, n / 2)[::-1]
elif cost == 'circ':
profile = stats.norm.pdf(range(1, n + 1), n / 2, n / 4)[::-1]
else:
raise BCTParamError('dfun must be line or circ')
costf = linalg.toeplitz(profile, r=profile)
lowcost = np.sum(costf * m)
# keep track of starting configuration
m_start = m.copy()
starta = np.arange(n)
# reorder
for h in range(H):
a = np.arange(n)
# choose two positions and flip them
r1, r2 = rng.randint(n, size=(2,))
a[r1] = r2
a[r2] = r1
costnew = np.sum((m[np.ix_(a, a)]) * costf)
# if this reduced the overall cost
if costnew < lowcost:
m = m[np.ix_(a, a)]
r2_swap = starta[r2]
r1_swap = starta[r1]
starta[r1] = r2_swap
starta[r2] = r1_swap
lowcost = costnew
M_reordered = m_start[np.ix_(starta, starta)]
m_indices = starta
cost = lowcost
return M_reordered, m_indices, cost | python | def reorderMAT(m, H=5000, cost='line'):
'''
This function reorders the connectivity matrix in order to place more
edges closer to the diagonal. This often helps in displaying community
structure, clusters, etc.
Parameters
----------
MAT : NxN np.ndarray
connection matrix
H : int
number of reordering attempts
cost : str
'line' or 'circ' for shape of lattice (linear or ring lattice).
Default is linear lattice.
Returns
-------
MATreordered : NxN np.ndarray
reordered connection matrix
MATindices : Nx1 np.ndarray
reordered indices
MATcost : float
objective function cost of reordered matrix
Notes
-----
I'm not 100% sure how the algorithms between this and reorder_matrix
differ, but this code looks a ton sketchier and might have had some minor
bugs in it. Considering reorder_matrix() does the same thing using a well
vetted simulated annealing algorithm, just use that. ~rlaplant
'''
from scipy import linalg, stats
m = m.copy()
n = len(m)
np.fill_diagonal(m, 0)
# generate cost function
if cost == 'line':
profile = stats.norm.pdf(range(1, n + 1), 0, n / 2)[::-1]
elif cost == 'circ':
profile = stats.norm.pdf(range(1, n + 1), n / 2, n / 4)[::-1]
else:
raise BCTParamError('dfun must be line or circ')
costf = linalg.toeplitz(profile, r=profile)
lowcost = np.sum(costf * m)
# keep track of starting configuration
m_start = m.copy()
starta = np.arange(n)
# reorder
for h in range(H):
a = np.arange(n)
# choose two positions and flip them
r1, r2 = rng.randint(n, size=(2,))
a[r1] = r2
a[r2] = r1
costnew = np.sum((m[np.ix_(a, a)]) * costf)
# if this reduced the overall cost
if costnew < lowcost:
m = m[np.ix_(a, a)]
r2_swap = starta[r2]
r1_swap = starta[r1]
starta[r1] = r2_swap
starta[r2] = r1_swap
lowcost = costnew
M_reordered = m_start[np.ix_(starta, starta)]
m_indices = starta
cost = lowcost
return M_reordered, m_indices, cost | [
"def",
"reorderMAT",
"(",
"m",
",",
"H",
"=",
"5000",
",",
"cost",
"=",
"'line'",
")",
":",
"from",
"scipy",
"import",
"linalg",
",",
"stats",
"m",
"=",
"m",
".",
"copy",
"(",
")",
"n",
"=",
"len",
"(",
"m",
")",
"np",
".",
"fill_diagonal",
"("... | This function reorders the connectivity matrix in order to place more
edges closer to the diagonal. This often helps in displaying community
structure, clusters, etc.
Parameters
----------
MAT : NxN np.ndarray
connection matrix
H : int
number of reordering attempts
cost : str
'line' or 'circ' for shape of lattice (linear or ring lattice).
Default is linear lattice.
Returns
-------
MATreordered : NxN np.ndarray
reordered connection matrix
MATindices : Nx1 np.ndarray
reordered indices
MATcost : float
objective function cost of reordered matrix
Notes
-----
I'm not 100% sure how the algorithms between this and reorder_matrix
differ, but this code looks a ton sketchier and might have had some minor
bugs in it. Considering reorder_matrix() does the same thing using a well
vetted simulated annealing algorithm, just use that. ~rlaplant | [
"This",
"function",
"reorders",
"the",
"connectivity",
"matrix",
"in",
"order",
"to",
"place",
"more",
"edges",
"closer",
"to",
"the",
"diagonal",
".",
"This",
"often",
"helps",
"in",
"displaying",
"community",
"structure",
"clusters",
"etc",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/utils/visualization.py#L402-L473 | train | 202,417 |
aestrivex/bctpy | bct/utils/visualization.py | reorder_matrix | def reorder_matrix(m1, cost='line', verbose=False, H=1e4, Texp=10, T0=1e-3, Hbrk=10):
'''
This function rearranges the nodes in matrix M1 such that the matrix
elements are squeezed along the main diagonal. The function uses a
version of simulated annealing.
Parameters
----------
M1 : NxN np.ndarray
connection matrix weighted/binary directed/undirected
cost : str
'line' or 'circ' for shape of lattice (linear or ring lattice).
Default is linear lattice.
verbose : bool
print out cost at each iteration. Default False.
H : int
annealing parameter, default value 1e6
Texp : int
annealing parameter, default value 1. Coefficient of H s.t.
Texp0=1-Texp/H
T0 : float
annealing parameter, default value 1e-3
Hbrk : int
annealing parameter, default value = 10. Coefficient of H s.t.
Hbrk0 = H/Hkbr
Returns
-------
Mreordered : NxN np.ndarray
reordered connection matrix
Mindices : Nx1 np.ndarray
reordered indices
Mcost : float
objective function cost of reordered matrix
Notes
-----
Note that in general, the outcome will depend on the initial condition
(the setting of the random number seed). Also, there is no good way to
determine optimal annealing parameters in advance - these paramters
will need to be adjusted "by hand" (particularly H, Texp, and T0).
For large and/or dense matrices, it is highly recommended to perform
exploratory runs varying the settings of 'H' and 'Texp' and then select
the best values.
Based on extensive testing, it appears that T0 and Hbrk can remain
unchanged in most cases. Texp may be varied from 1-1/H to 1-10/H, for
example. H is the most important parameter - set to larger values as
the problem size increases. It is advisable to run this function
multiple times and select the solution(s) with the lowest 'cost'.
Setting 'Texp' to zero cancels annealing and uses a greedy algorithm
instead.
'''
from scipy import linalg, stats
n = len(m1)
if n < 2:
raise BCTParamError("align_matrix will infinite loop on a singleton "
"or null matrix.")
# generate cost function
if cost == 'line':
profile = stats.norm.pdf(range(1, n + 1), loc=0, scale=n / 2)[::-1]
elif cost == 'circ':
profile = stats.norm.pdf(
range(1, n + 1), loc=n / 2, scale=n / 4)[::-1]
else:
raise BCTParamError('cost must be line or circ')
costf = linalg.toeplitz(profile, r=profile) * np.logical_not(np.eye(n))
costf /= np.sum(costf)
# establish maxcost, lowcost, mincost
maxcost = np.sum(np.sort(costf.flat) * np.sort(m1.flat))
lowcost = np.sum(m1 * costf) / maxcost
mincost = lowcost
# initialize
anew = np.arange(n)
amin = np.arange(n)
h = 0
hcnt = 0
# adjust annealing parameters
# H determines the maximal number of steps (user specified)
# Texp determines the steepness of the temperature gradient
Texp = 1 - Texp / H
# T0 sets the initial temperature and scales the energy term (user provided)
# Hbrk sets a break point for the stimulation
Hbrk = H / Hbrk
while h < H:
h += 1
hcnt += 1
# terminate if no new mincost has been found for some time
if hcnt > Hbrk:
break
T = T0 * Texp**h
atmp = anew.copy()
r1, r2 = rng.randint(n, size=(2,))
while r1 == r2:
r2 = rng.randint(n)
atmp[r1] = anew[r2]
atmp[r2] = anew[r1]
costnew = np.sum((m1[np.ix_(atmp, atmp)]) * costf) / maxcost
# annealing
if costnew < lowcost or rng.random_sample() < np.exp(-(costnew - lowcost) / T):
anew = atmp
lowcost = costnew
# is this a new absolute best?
if lowcost < mincost:
amin = anew
mincost = lowcost
if verbose:
print('step %i ... current lowest cost = %f' % (h, mincost))
hcnt = 0
if verbose:
print('step %i ... final lowest cost = %f' % (h, mincost))
M_reordered = m1[np.ix_(amin, amin)]
M_indices = amin
cost = mincost
return M_reordered, M_indices, cost | python | def reorder_matrix(m1, cost='line', verbose=False, H=1e4, Texp=10, T0=1e-3, Hbrk=10):
'''
This function rearranges the nodes in matrix M1 such that the matrix
elements are squeezed along the main diagonal. The function uses a
version of simulated annealing.
Parameters
----------
M1 : NxN np.ndarray
connection matrix weighted/binary directed/undirected
cost : str
'line' or 'circ' for shape of lattice (linear or ring lattice).
Default is linear lattice.
verbose : bool
print out cost at each iteration. Default False.
H : int
annealing parameter, default value 1e6
Texp : int
annealing parameter, default value 1. Coefficient of H s.t.
Texp0=1-Texp/H
T0 : float
annealing parameter, default value 1e-3
Hbrk : int
annealing parameter, default value = 10. Coefficient of H s.t.
Hbrk0 = H/Hkbr
Returns
-------
Mreordered : NxN np.ndarray
reordered connection matrix
Mindices : Nx1 np.ndarray
reordered indices
Mcost : float
objective function cost of reordered matrix
Notes
-----
Note that in general, the outcome will depend on the initial condition
(the setting of the random number seed). Also, there is no good way to
determine optimal annealing parameters in advance - these paramters
will need to be adjusted "by hand" (particularly H, Texp, and T0).
For large and/or dense matrices, it is highly recommended to perform
exploratory runs varying the settings of 'H' and 'Texp' and then select
the best values.
Based on extensive testing, it appears that T0 and Hbrk can remain
unchanged in most cases. Texp may be varied from 1-1/H to 1-10/H, for
example. H is the most important parameter - set to larger values as
the problem size increases. It is advisable to run this function
multiple times and select the solution(s) with the lowest 'cost'.
Setting 'Texp' to zero cancels annealing and uses a greedy algorithm
instead.
'''
from scipy import linalg, stats
n = len(m1)
if n < 2:
raise BCTParamError("align_matrix will infinite loop on a singleton "
"or null matrix.")
# generate cost function
if cost == 'line':
profile = stats.norm.pdf(range(1, n + 1), loc=0, scale=n / 2)[::-1]
elif cost == 'circ':
profile = stats.norm.pdf(
range(1, n + 1), loc=n / 2, scale=n / 4)[::-1]
else:
raise BCTParamError('cost must be line or circ')
costf = linalg.toeplitz(profile, r=profile) * np.logical_not(np.eye(n))
costf /= np.sum(costf)
# establish maxcost, lowcost, mincost
maxcost = np.sum(np.sort(costf.flat) * np.sort(m1.flat))
lowcost = np.sum(m1 * costf) / maxcost
mincost = lowcost
# initialize
anew = np.arange(n)
amin = np.arange(n)
h = 0
hcnt = 0
# adjust annealing parameters
# H determines the maximal number of steps (user specified)
# Texp determines the steepness of the temperature gradient
Texp = 1 - Texp / H
# T0 sets the initial temperature and scales the energy term (user provided)
# Hbrk sets a break point for the stimulation
Hbrk = H / Hbrk
while h < H:
h += 1
hcnt += 1
# terminate if no new mincost has been found for some time
if hcnt > Hbrk:
break
T = T0 * Texp**h
atmp = anew.copy()
r1, r2 = rng.randint(n, size=(2,))
while r1 == r2:
r2 = rng.randint(n)
atmp[r1] = anew[r2]
atmp[r2] = anew[r1]
costnew = np.sum((m1[np.ix_(atmp, atmp)]) * costf) / maxcost
# annealing
if costnew < lowcost or rng.random_sample() < np.exp(-(costnew - lowcost) / T):
anew = atmp
lowcost = costnew
# is this a new absolute best?
if lowcost < mincost:
amin = anew
mincost = lowcost
if verbose:
print('step %i ... current lowest cost = %f' % (h, mincost))
hcnt = 0
if verbose:
print('step %i ... final lowest cost = %f' % (h, mincost))
M_reordered = m1[np.ix_(amin, amin)]
M_indices = amin
cost = mincost
return M_reordered, M_indices, cost | [
"def",
"reorder_matrix",
"(",
"m1",
",",
"cost",
"=",
"'line'",
",",
"verbose",
"=",
"False",
",",
"H",
"=",
"1e4",
",",
"Texp",
"=",
"10",
",",
"T0",
"=",
"1e-3",
",",
"Hbrk",
"=",
"10",
")",
":",
"from",
"scipy",
"import",
"linalg",
",",
"stats... | This function rearranges the nodes in matrix M1 such that the matrix
elements are squeezed along the main diagonal. The function uses a
version of simulated annealing.
Parameters
----------
M1 : NxN np.ndarray
connection matrix weighted/binary directed/undirected
cost : str
'line' or 'circ' for shape of lattice (linear or ring lattice).
Default is linear lattice.
verbose : bool
print out cost at each iteration. Default False.
H : int
annealing parameter, default value 1e6
Texp : int
annealing parameter, default value 1. Coefficient of H s.t.
Texp0=1-Texp/H
T0 : float
annealing parameter, default value 1e-3
Hbrk : int
annealing parameter, default value = 10. Coefficient of H s.t.
Hbrk0 = H/Hkbr
Returns
-------
Mreordered : NxN np.ndarray
reordered connection matrix
Mindices : Nx1 np.ndarray
reordered indices
Mcost : float
objective function cost of reordered matrix
Notes
-----
Note that in general, the outcome will depend on the initial condition
(the setting of the random number seed). Also, there is no good way to
determine optimal annealing parameters in advance - these paramters
will need to be adjusted "by hand" (particularly H, Texp, and T0).
For large and/or dense matrices, it is highly recommended to perform
exploratory runs varying the settings of 'H' and 'Texp' and then select
the best values.
Based on extensive testing, it appears that T0 and Hbrk can remain
unchanged in most cases. Texp may be varied from 1-1/H to 1-10/H, for
example. H is the most important parameter - set to larger values as
the problem size increases. It is advisable to run this function
multiple times and select the solution(s) with the lowest 'cost'.
Setting 'Texp' to zero cancels annealing and uses a greedy algorithm
instead. | [
"This",
"function",
"rearranges",
"the",
"nodes",
"in",
"matrix",
"M1",
"such",
"that",
"the",
"matrix",
"elements",
"are",
"squeezed",
"along",
"the",
"main",
"diagonal",
".",
"The",
"function",
"uses",
"a",
"version",
"of",
"simulated",
"annealing",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/utils/visualization.py#L476-L599 | train | 202,418 |
aestrivex/bctpy | bct/utils/visualization.py | writetoPAJ | def writetoPAJ(CIJ, fname, directed):
'''
This function writes a Pajek .net file from a numpy matrix
Parameters
----------
CIJ : NxN np.ndarray
adjacency matrix
fname : str
filename
directed : bool
True if the network is directed and False otherwise. The data format
may be required to know this for some reason so I am afraid to just
use directed as the default value.
'''
n = np.size(CIJ, axis=0)
with open(fname, 'w') as fd:
fd.write('*vertices %i \r' % n)
for i in range(1, n + 1):
fd.write('%i "%i" \r' % (i, i))
if directed:
fd.write('*arcs \r')
else:
fd.write('*edges \r')
for i in range(n):
for j in range(n):
if CIJ[i, j] != 0:
fd.write('%i %i %.6f \r' % (i + 1, j + 1, CIJ[i, j])) | python | def writetoPAJ(CIJ, fname, directed):
'''
This function writes a Pajek .net file from a numpy matrix
Parameters
----------
CIJ : NxN np.ndarray
adjacency matrix
fname : str
filename
directed : bool
True if the network is directed and False otherwise. The data format
may be required to know this for some reason so I am afraid to just
use directed as the default value.
'''
n = np.size(CIJ, axis=0)
with open(fname, 'w') as fd:
fd.write('*vertices %i \r' % n)
for i in range(1, n + 1):
fd.write('%i "%i" \r' % (i, i))
if directed:
fd.write('*arcs \r')
else:
fd.write('*edges \r')
for i in range(n):
for j in range(n):
if CIJ[i, j] != 0:
fd.write('%i %i %.6f \r' % (i + 1, j + 1, CIJ[i, j])) | [
"def",
"writetoPAJ",
"(",
"CIJ",
",",
"fname",
",",
"directed",
")",
":",
"n",
"=",
"np",
".",
"size",
"(",
"CIJ",
",",
"axis",
"=",
"0",
")",
"with",
"open",
"(",
"fname",
",",
"'w'",
")",
"as",
"fd",
":",
"fd",
".",
"write",
"(",
"'*vertices ... | This function writes a Pajek .net file from a numpy matrix
Parameters
----------
CIJ : NxN np.ndarray
adjacency matrix
fname : str
filename
directed : bool
True if the network is directed and False otherwise. The data format
may be required to know this for some reason so I am afraid to just
use directed as the default value. | [
"This",
"function",
"writes",
"a",
"Pajek",
".",
"net",
"file",
"from",
"a",
"numpy",
"matrix"
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/utils/visualization.py#L745-L772 | train | 202,419 |
aestrivex/bctpy | bct/algorithms/reference.py | makeevenCIJ | def makeevenCIJ(n, k, sz_cl, seed=None):
'''
This function generates a random, directed network with a specified
number of fully connected modules linked together by evenly distributed
remaining random connections.
Parameters
----------
N : int
number of vertices (must be power of 2)
K : int
number of edges
sz_cl : int
size of clusters (must be power of 2)
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
connection matrix
Notes
-----
N must be a power of 2.
A warning is generated if all modules contain more edges than K.
Cluster size is 2^sz_cl;
'''
rng = get_rng(seed)
# compute number of hierarchical levels and adjust cluster size
mx_lvl = int(np.floor(np.log2(n)))
sz_cl -= 1
# make a stupid little template
t = np.ones((2, 2)) * 2
# check n against the number of levels
Nlvl = 2**mx_lvl
if Nlvl != n:
print("Warning: n must be a power of 2")
n = Nlvl
# create hierarchical template
for lvl in range(1, mx_lvl):
s = 2**(lvl + 1)
CIJ = np.ones((s, s))
grp1 = range(int(s / 2))
grp2 = range(int(s / 2), s)
ix1 = np.add.outer(np.array(grp1) * s, grp1).flatten()
ix2 = np.add.outer(np.array(grp2) * s, grp2).flatten()
CIJ.flat[ix1] = t # numpy indexing is teh sucks :(
CIJ.flat[ix2] = t
CIJ += 1
t = CIJ.copy()
CIJ -= (np.ones((s, s)) + mx_lvl * np.eye(s))
# assign connection probabilities
CIJp = (CIJ >= (mx_lvl - sz_cl))
# determine nr of non-cluster connections left and their possible positions
rem_k = k - np.size(np.where(CIJp.flatten()))
if rem_k < 0:
print("Warning: K is too small, output matrix contains clusters only")
return CIJp
a, b = np.where(np.logical_not(CIJp + np.eye(n)))
# assign remK randomly dstributed connections
rp = rng.permutation(len(a))
a = a[rp[:rem_k]]
b = b[rp[:rem_k]]
for ai, bi in zip(a, b):
CIJp[ai, bi] = 1
return np.array(CIJp, dtype=int) | python | def makeevenCIJ(n, k, sz_cl, seed=None):
'''
This function generates a random, directed network with a specified
number of fully connected modules linked together by evenly distributed
remaining random connections.
Parameters
----------
N : int
number of vertices (must be power of 2)
K : int
number of edges
sz_cl : int
size of clusters (must be power of 2)
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
connection matrix
Notes
-----
N must be a power of 2.
A warning is generated if all modules contain more edges than K.
Cluster size is 2^sz_cl;
'''
rng = get_rng(seed)
# compute number of hierarchical levels and adjust cluster size
mx_lvl = int(np.floor(np.log2(n)))
sz_cl -= 1
# make a stupid little template
t = np.ones((2, 2)) * 2
# check n against the number of levels
Nlvl = 2**mx_lvl
if Nlvl != n:
print("Warning: n must be a power of 2")
n = Nlvl
# create hierarchical template
for lvl in range(1, mx_lvl):
s = 2**(lvl + 1)
CIJ = np.ones((s, s))
grp1 = range(int(s / 2))
grp2 = range(int(s / 2), s)
ix1 = np.add.outer(np.array(grp1) * s, grp1).flatten()
ix2 = np.add.outer(np.array(grp2) * s, grp2).flatten()
CIJ.flat[ix1] = t # numpy indexing is teh sucks :(
CIJ.flat[ix2] = t
CIJ += 1
t = CIJ.copy()
CIJ -= (np.ones((s, s)) + mx_lvl * np.eye(s))
# assign connection probabilities
CIJp = (CIJ >= (mx_lvl - sz_cl))
# determine nr of non-cluster connections left and their possible positions
rem_k = k - np.size(np.where(CIJp.flatten()))
if rem_k < 0:
print("Warning: K is too small, output matrix contains clusters only")
return CIJp
a, b = np.where(np.logical_not(CIJp + np.eye(n)))
# assign remK randomly dstributed connections
rp = rng.permutation(len(a))
a = a[rp[:rem_k]]
b = b[rp[:rem_k]]
for ai, bi in zip(a, b):
CIJp[ai, bi] = 1
return np.array(CIJp, dtype=int) | [
"def",
"makeevenCIJ",
"(",
"n",
",",
"k",
",",
"sz_cl",
",",
"seed",
"=",
"None",
")",
":",
"rng",
"=",
"get_rng",
"(",
"seed",
")",
"# compute number of hierarchical levels and adjust cluster size",
"mx_lvl",
"=",
"int",
"(",
"np",
".",
"floor",
"(",
"np",
... | This function generates a random, directed network with a specified
number of fully connected modules linked together by evenly distributed
remaining random connections.
Parameters
----------
N : int
number of vertices (must be power of 2)
K : int
number of edges
sz_cl : int
size of clusters (must be power of 2)
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
connection matrix
Notes
-----
N must be a power of 2.
A warning is generated if all modules contain more edges than K.
Cluster size is 2^sz_cl; | [
"This",
"function",
"generates",
"a",
"random",
"directed",
"network",
"with",
"a",
"specified",
"number",
"of",
"fully",
"connected",
"modules",
"linked",
"together",
"by",
"evenly",
"distributed",
"remaining",
"random",
"connections",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/reference.py#L467-L542 | train | 202,420 |
aestrivex/bctpy | bct/algorithms/reference.py | makerandCIJdegreesfixed | def makerandCIJdegreesfixed(inv, outv, seed=None):
'''
This function generates a directed random network with a specified
in-degree and out-degree sequence.
Parameters
----------
inv : Nx1 np.ndarray
in-degree vector
outv : Nx1 np.ndarray
out-degree vector
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
Notes
-----
Necessary conditions include:
length(in) = length(out) = n
sum(in) = sum(out) = k
in(i), out(i) < n-1
in(i) + out(j) < n+2
in(i) + out(i) < n
No connections are placed on the main diagonal
The algorithm used in this function is not, technically, guaranteed to
terminate. If a valid distribution of in and out degrees is provided,
this function will find it in bounded time with probability
1-(1/(2*(k^2))). This turns out to be a serious problem when
computing infinite degree matrices, but offers good performance
otherwise.
'''
rng = get_rng(seed)
n = len(inv)
k = np.sum(inv)
in_inv = np.zeros((k,))
out_inv = np.zeros((k,))
i_in = 0
i_out = 0
for i in range(n):
in_inv[i_in:i_in + inv[i]] = i
out_inv[i_out:i_out + outv[i]] = i
i_in += inv[i]
i_out += outv[i]
CIJ = np.eye(n)
edges = np.array((out_inv, in_inv[rng.permutation(k)]))
# create CIJ and check for double edges and self connections
for i in range(k):
if CIJ[edges[0, i], edges[1, i]]:
tried = set()
while True:
if len(tried) == k:
raise BCTParamError('Could not resolve the given '
'in and out vectors')
switch = rng.randint(k)
while switch in tried:
switch = rng.randint(k)
if not (CIJ[edges[0, i], edges[1, switch]] or
CIJ[edges[0, switch], edges[1, i]]):
CIJ[edges[0, switch], edges[1, switch]] = 0
CIJ[edges[0, switch], edges[1, i]] = 1
if switch < i:
CIJ[edges[0, switch], edges[1, switch]] = 0
CIJ[edges[0, switch], edges[1, i]] = 1
t = edges[1, i]
edges[1, i] = edges[1, switch]
edges[1, switch] = t
break
tried.add(switch)
else:
CIJ[edges[0, i], edges[1, i]] = 1
CIJ -= np.eye(n)
return CIJ | python | def makerandCIJdegreesfixed(inv, outv, seed=None):
'''
This function generates a directed random network with a specified
in-degree and out-degree sequence.
Parameters
----------
inv : Nx1 np.ndarray
in-degree vector
outv : Nx1 np.ndarray
out-degree vector
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
Notes
-----
Necessary conditions include:
length(in) = length(out) = n
sum(in) = sum(out) = k
in(i), out(i) < n-1
in(i) + out(j) < n+2
in(i) + out(i) < n
No connections are placed on the main diagonal
The algorithm used in this function is not, technically, guaranteed to
terminate. If a valid distribution of in and out degrees is provided,
this function will find it in bounded time with probability
1-(1/(2*(k^2))). This turns out to be a serious problem when
computing infinite degree matrices, but offers good performance
otherwise.
'''
rng = get_rng(seed)
n = len(inv)
k = np.sum(inv)
in_inv = np.zeros((k,))
out_inv = np.zeros((k,))
i_in = 0
i_out = 0
for i in range(n):
in_inv[i_in:i_in + inv[i]] = i
out_inv[i_out:i_out + outv[i]] = i
i_in += inv[i]
i_out += outv[i]
CIJ = np.eye(n)
edges = np.array((out_inv, in_inv[rng.permutation(k)]))
# create CIJ and check for double edges and self connections
for i in range(k):
if CIJ[edges[0, i], edges[1, i]]:
tried = set()
while True:
if len(tried) == k:
raise BCTParamError('Could not resolve the given '
'in and out vectors')
switch = rng.randint(k)
while switch in tried:
switch = rng.randint(k)
if not (CIJ[edges[0, i], edges[1, switch]] or
CIJ[edges[0, switch], edges[1, i]]):
CIJ[edges[0, switch], edges[1, switch]] = 0
CIJ[edges[0, switch], edges[1, i]] = 1
if switch < i:
CIJ[edges[0, switch], edges[1, switch]] = 0
CIJ[edges[0, switch], edges[1, i]] = 1
t = edges[1, i]
edges[1, i] = edges[1, switch]
edges[1, switch] = t
break
tried.add(switch)
else:
CIJ[edges[0, i], edges[1, i]] = 1
CIJ -= np.eye(n)
return CIJ | [
"def",
"makerandCIJdegreesfixed",
"(",
"inv",
",",
"outv",
",",
"seed",
"=",
"None",
")",
":",
"rng",
"=",
"get_rng",
"(",
"seed",
")",
"n",
"=",
"len",
"(",
"inv",
")",
"k",
"=",
"np",
".",
"sum",
"(",
"inv",
")",
"in_inv",
"=",
"np",
".",
"ze... | This function generates a directed random network with a specified
in-degree and out-degree sequence.
Parameters
----------
inv : Nx1 np.ndarray
in-degree vector
outv : Nx1 np.ndarray
out-degree vector
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
Notes
-----
Necessary conditions include:
length(in) = length(out) = n
sum(in) = sum(out) = k
in(i), out(i) < n-1
in(i) + out(j) < n+2
in(i) + out(i) < n
No connections are placed on the main diagonal
The algorithm used in this function is not, technically, guaranteed to
terminate. If a valid distribution of in and out degrees is provided,
this function will find it in bounded time with probability
1-(1/(2*(k^2))). This turns out to be a serious problem when
computing infinite degree matrices, but offers good performance
otherwise. | [
"This",
"function",
"generates",
"a",
"directed",
"random",
"network",
"with",
"a",
"specified",
"in",
"-",
"degree",
"and",
"out",
"-",
"degree",
"sequence",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/reference.py#L604-L686 | train | 202,421 |
aestrivex/bctpy | bct/algorithms/reference.py | makerandCIJ_dir | def makerandCIJ_dir(n, k, seed=None):
'''
This function generates a directed random network
Parameters
----------
N : int
number of vertices
K : int
number of edges
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
directed random connection matrix
Notes
-----
no connections are placed on the main diagonal.
'''
rng = get_rng(seed)
ix, = np.where(np.logical_not(np.eye(n)).flat)
rp = rng.permutation(np.size(ix))
CIJ = np.zeros((n, n))
CIJ.flat[ix[rp][:k]] = 1
return CIJ | python | def makerandCIJ_dir(n, k, seed=None):
'''
This function generates a directed random network
Parameters
----------
N : int
number of vertices
K : int
number of edges
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
directed random connection matrix
Notes
-----
no connections are placed on the main diagonal.
'''
rng = get_rng(seed)
ix, = np.where(np.logical_not(np.eye(n)).flat)
rp = rng.permutation(np.size(ix))
CIJ = np.zeros((n, n))
CIJ.flat[ix[rp][:k]] = 1
return CIJ | [
"def",
"makerandCIJ_dir",
"(",
"n",
",",
"k",
",",
"seed",
"=",
"None",
")",
":",
"rng",
"=",
"get_rng",
"(",
"seed",
")",
"ix",
",",
"=",
"np",
".",
"where",
"(",
"np",
".",
"logical_not",
"(",
"np",
".",
"eye",
"(",
"n",
")",
")",
".",
"fla... | This function generates a directed random network
Parameters
----------
N : int
number of vertices
K : int
number of edges
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
CIJ : NxN np.ndarray
directed random connection matrix
Notes
-----
no connections are placed on the main diagonal. | [
"This",
"function",
"generates",
"a",
"directed",
"random",
"network"
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/reference.py#L689-L718 | train | 202,422 |
aestrivex/bctpy | bct/algorithms/reference.py | randmio_dir | def randmio_dir(R, itr, seed=None):
'''
This function randomizes a directed network, while preserving the in-
and out-degree distributions. In weighted networks, the function
preserves the out-strength but not the in-strength distributions.
Parameters
----------
W : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out
'''
rng = get_rng(seed)
R = R.copy()
n = len(R)
i, j = np.where(R)
k = len(i)
itr *= k
max_attempts = np.round(n * k / (n * (n - 1)))
eff = 0
for it in range(int(itr)):
att = 0
while att <= max_attempts: # while not rewired
while True:
e1 = rng.randint(k)
e2 = rng.randint(k)
while e1 == e2:
e2 = rng.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break # all 4 vertices must be different
# rewiring condition
if not (R[a, d] or R[c, b]):
R[a, d] = R[a, b]
R[a, b] = 0
R[c, b] = R[c, d]
R[c, d] = 0
i.setflags(write=True)
j.setflags(write=True)
i[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
return R, eff | python | def randmio_dir(R, itr, seed=None):
'''
This function randomizes a directed network, while preserving the in-
and out-degree distributions. In weighted networks, the function
preserves the out-strength but not the in-strength distributions.
Parameters
----------
W : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out
'''
rng = get_rng(seed)
R = R.copy()
n = len(R)
i, j = np.where(R)
k = len(i)
itr *= k
max_attempts = np.round(n * k / (n * (n - 1)))
eff = 0
for it in range(int(itr)):
att = 0
while att <= max_attempts: # while not rewired
while True:
e1 = rng.randint(k)
e2 = rng.randint(k)
while e1 == e2:
e2 = rng.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break # all 4 vertices must be different
# rewiring condition
if not (R[a, d] or R[c, b]):
R[a, d] = R[a, b]
R[a, b] = 0
R[c, b] = R[c, d]
R[c, d] = 0
i.setflags(write=True)
j.setflags(write=True)
i[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
return R, eff | [
"def",
"randmio_dir",
"(",
"R",
",",
"itr",
",",
"seed",
"=",
"None",
")",
":",
"rng",
"=",
"get_rng",
"(",
"seed",
")",
"R",
"=",
"R",
".",
"copy",
"(",
")",
"n",
"=",
"len",
"(",
"R",
")",
"i",
",",
"j",
"=",
"np",
".",
"where",
"(",
"R... | This function randomizes a directed network, while preserving the in-
and out-degree distributions. In weighted networks, the function
preserves the out-strength but not the in-strength distributions.
Parameters
----------
W : NxN np.ndarray
directed binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out | [
"This",
"function",
"randomizes",
"a",
"directed",
"network",
"while",
"preserving",
"the",
"in",
"-",
"and",
"out",
"-",
"degree",
"distributions",
".",
"In",
"weighted",
"networks",
"the",
"function",
"preserves",
"the",
"out",
"-",
"strength",
"but",
"not",... | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/reference.py#L1199-L1263 | train | 202,423 |
aestrivex/bctpy | bct/algorithms/reference.py | randmio_und | def randmio_und(R, itr, seed=None):
'''
This function randomizes an undirected network, while preserving the
degree distribution. The function does not preserve the strength
distribution in weighted networks.
Parameters
----------
W : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out
'''
if not np.all(R == R.T):
raise BCTParamError("Input must be undirected")
rng = get_rng(seed)
R = R.copy()
n = len(R)
i, j = np.where(np.tril(R))
k = len(i)
itr *= k
# maximum number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1)))
# actual number of successful rewirings
eff = 0
for it in range(int(itr)):
att = 0
while att <= max_attempts: # while not rewired
while True:
e1, e2 = rng.randint(k, size=(2,))
while e1 == e2:
e2 = rng.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break # all 4 vertices must be different
if rng.random_sample() > .5:
i.setflags(write=True)
j.setflags(write=True)
i[e2] = d
j[e2] = c # flip edge c-d with 50% probability
c = i[e2]
d = j[e2] # to explore all potential rewirings
# rewiring condition
if not (R[a, d] or R[c, b]):
R[a, d] = R[a, b]
R[a, b] = 0
R[d, a] = R[b, a]
R[b, a] = 0
R[c, b] = R[c, d]
R[c, d] = 0
R[b, c] = R[d, c]
R[d, c] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
return R, eff | python | def randmio_und(R, itr, seed=None):
'''
This function randomizes an undirected network, while preserving the
degree distribution. The function does not preserve the strength
distribution in weighted networks.
Parameters
----------
W : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out
'''
if not np.all(R == R.T):
raise BCTParamError("Input must be undirected")
rng = get_rng(seed)
R = R.copy()
n = len(R)
i, j = np.where(np.tril(R))
k = len(i)
itr *= k
# maximum number of rewiring attempts per iteration
max_attempts = np.round(n * k / (n * (n - 1)))
# actual number of successful rewirings
eff = 0
for it in range(int(itr)):
att = 0
while att <= max_attempts: # while not rewired
while True:
e1, e2 = rng.randint(k, size=(2,))
while e1 == e2:
e2 = rng.randint(k)
a = i[e1]
b = j[e1]
c = i[e2]
d = j[e2]
if a != c and a != d and b != c and b != d:
break # all 4 vertices must be different
if rng.random_sample() > .5:
i.setflags(write=True)
j.setflags(write=True)
i[e2] = d
j[e2] = c # flip edge c-d with 50% probability
c = i[e2]
d = j[e2] # to explore all potential rewirings
# rewiring condition
if not (R[a, d] or R[c, b]):
R[a, d] = R[a, b]
R[a, b] = 0
R[d, a] = R[b, a]
R[b, a] = 0
R[c, b] = R[c, d]
R[c, d] = 0
R[b, c] = R[d, c]
R[d, c] = 0
j.setflags(write=True)
j[e1] = d
j[e2] = b # reassign edge indices
eff += 1
break
att += 1
return R, eff | [
"def",
"randmio_und",
"(",
"R",
",",
"itr",
",",
"seed",
"=",
"None",
")",
":",
"if",
"not",
"np",
".",
"all",
"(",
"R",
"==",
"R",
".",
"T",
")",
":",
"raise",
"BCTParamError",
"(",
"\"Input must be undirected\"",
")",
"rng",
"=",
"get_rng",
"(",
... | This function randomizes an undirected network, while preserving the
degree distribution. The function does not preserve the strength
distribution in weighted networks.
Parameters
----------
W : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
R : NxN np.ndarray
randomized network
eff : int
number of actual rewirings carried out | [
"This",
"function",
"randomizes",
"an",
"undirected",
"network",
"while",
"preserving",
"the",
"degree",
"distribution",
".",
"The",
"function",
"does",
"not",
"preserve",
"the",
"strength",
"distribution",
"in",
"weighted",
"networks",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/reference.py#L1460-L1538 | train | 202,424 |
aestrivex/bctpy | bct/algorithms/reference.py | randmio_und_signed | def randmio_und_signed(R, itr, seed=None):
'''
This function randomizes an undirected weighted network with positive
and negative weights, while simultaneously preserving the degree
distribution of positive and negative weights. The function does not
preserve the strength distribution in weighted networks.
Parameters
----------
W : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
R : NxN np.ndarray
randomized network
'''
rng = get_rng(seed)
R = R.copy()
n = len(R)
itr *= int(n * (n -1) / 2)
max_attempts = int(np.round(n / 2))
eff = 0
for it in range(int(itr)):
att = 0
while att <= max_attempts:
a, b, c, d = pick_four_unique_nodes_quickly(n, rng)
r0_ab = R[a, b]
r0_cd = R[c, d]
r0_ad = R[a, d]
r0_cb = R[c, b]
#rewiring condition
if ( np.sign(r0_ab) == np.sign(r0_cd) and
np.sign(r0_ad) == np.sign(r0_cb) and
np.sign(r0_ab) != np.sign(r0_ad)):
R[a, d] = R[d, a] = r0_ab
R[a, b] = R[b, a] = r0_ad
R[c, b] = R[b, c] = r0_cd
R[c, d] = R[d, c] = r0_cb
eff += 1
break
att += 1
return R, eff | python | def randmio_und_signed(R, itr, seed=None):
'''
This function randomizes an undirected weighted network with positive
and negative weights, while simultaneously preserving the degree
distribution of positive and negative weights. The function does not
preserve the strength distribution in weighted networks.
Parameters
----------
W : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
R : NxN np.ndarray
randomized network
'''
rng = get_rng(seed)
R = R.copy()
n = len(R)
itr *= int(n * (n -1) / 2)
max_attempts = int(np.round(n / 2))
eff = 0
for it in range(int(itr)):
att = 0
while att <= max_attempts:
a, b, c, d = pick_four_unique_nodes_quickly(n, rng)
r0_ab = R[a, b]
r0_cd = R[c, d]
r0_ad = R[a, d]
r0_cb = R[c, b]
#rewiring condition
if ( np.sign(r0_ab) == np.sign(r0_cd) and
np.sign(r0_ad) == np.sign(r0_cb) and
np.sign(r0_ab) != np.sign(r0_ad)):
R[a, d] = R[d, a] = r0_ab
R[a, b] = R[b, a] = r0_ad
R[c, b] = R[b, c] = r0_cd
R[c, d] = R[d, c] = r0_cb
eff += 1
break
att += 1
return R, eff | [
"def",
"randmio_und_signed",
"(",
"R",
",",
"itr",
",",
"seed",
"=",
"None",
")",
":",
"rng",
"=",
"get_rng",
"(",
"seed",
")",
"R",
"=",
"R",
".",
"copy",
"(",
")",
"n",
"=",
"len",
"(",
"R",
")",
"itr",
"*=",
"int",
"(",
"n",
"*",
"(",
"n... | This function randomizes an undirected weighted network with positive
and negative weights, while simultaneously preserving the degree
distribution of positive and negative weights. The function does not
preserve the strength distribution in weighted networks.
Parameters
----------
W : NxN np.ndarray
undirected binary/weighted connection matrix
itr : int
rewiring parameter. Each edge is rewired approximately itr times.
seed : hashable, optional
If None (default), use the np.random's global random state to generate random numbers.
Otherwise, use a new np.random.RandomState instance seeded with the given value.
Returns
-------
R : NxN np.ndarray
randomized network | [
"This",
"function",
"randomizes",
"an",
"undirected",
"weighted",
"network",
"with",
"positive",
"and",
"negative",
"weights",
"while",
"simultaneously",
"preserving",
"the",
"degree",
"distribution",
"of",
"positive",
"and",
"negative",
"weights",
".",
"The",
"func... | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/reference.py#L1541-L1599 | train | 202,425 |
aestrivex/bctpy | bct/algorithms/generative.py | evaluate_generative_model | def evaluate_generative_model(A, Atgt, D, eta, gamma=None,
model_type='matching', model_var='powerlaw', epsilon=1e-6, seed=None):
'''
Generates synthetic networks with parameters provided and evaluates their
energy function. The energy function is defined as in Betzel et al. 2016.
Basically it takes the Kolmogorov-Smirnov statistics of 4 network
measures; comparing the degree distributions, clustering coefficients,
betweenness centrality, and Euclidean distances between connected regions.
The energy is globally low if the synthetic network matches the target.
Energy is defined as the maximum difference across the four statistics.
'''
m = np.size(np.where(Atgt.flat))//2
n = len(Atgt)
xk = np.sum(Atgt, axis=1)
xc = clustering_coef_bu(Atgt)
xb = betweenness_bin(Atgt)
xe = D[np.triu(Atgt, 1) > 0]
B = generative_model(A, D, m, eta, gamma, model_type=model_type,
model_var=model_var, epsilon=epsilon, copy=True, seed=seed)
#if eta != gamma then an error is thrown within generative model
nB = len(eta)
if nB == 1:
B = np.reshape(B, np.append(np.shape(B), 1))
K = np.zeros((nB, 4))
def kstats(x, y):
bin_edges = np.concatenate([[-np.inf],
np.sort(np.concatenate((x, y))),
[np.inf]])
bin_x,_ = np.histogram(x, bin_edges)
bin_y,_ = np.histogram(y, bin_edges)
#print(np.shape(bin_x))
sum_x = np.cumsum(bin_x) / np.sum(bin_x)
sum_y = np.cumsum(bin_y) / np.sum(bin_y)
cdfsamp_x = sum_x[:-1]
cdfsamp_y = sum_y[:-1]
delta_cdf = np.abs(cdfsamp_x - cdfsamp_y)
print(np.shape(delta_cdf))
#print(delta_cdf)
print(np.argmax(delta_cdf), np.max(delta_cdf))
return np.max(delta_cdf)
for ib in range(nB):
Bc = B[:,:,ib]
yk = np.sum(Bc, axis=1)
yc = clustering_coef_bu(Bc)
yb = betweenness_bin(Bc)
ye = D[np.triu(Bc, 1) > 0]
K[ib, 0] = kstats(xk, yk)
K[ib, 1] = kstats(xc, yc)
K[ib, 2] = kstats(xb, yb)
K[ib, 3] = kstats(xe, ye)
return np.max(K, axis=1) | python | def evaluate_generative_model(A, Atgt, D, eta, gamma=None,
model_type='matching', model_var='powerlaw', epsilon=1e-6, seed=None):
'''
Generates synthetic networks with parameters provided and evaluates their
energy function. The energy function is defined as in Betzel et al. 2016.
Basically it takes the Kolmogorov-Smirnov statistics of 4 network
measures; comparing the degree distributions, clustering coefficients,
betweenness centrality, and Euclidean distances between connected regions.
The energy is globally low if the synthetic network matches the target.
Energy is defined as the maximum difference across the four statistics.
'''
m = np.size(np.where(Atgt.flat))//2
n = len(Atgt)
xk = np.sum(Atgt, axis=1)
xc = clustering_coef_bu(Atgt)
xb = betweenness_bin(Atgt)
xe = D[np.triu(Atgt, 1) > 0]
B = generative_model(A, D, m, eta, gamma, model_type=model_type,
model_var=model_var, epsilon=epsilon, copy=True, seed=seed)
#if eta != gamma then an error is thrown within generative model
nB = len(eta)
if nB == 1:
B = np.reshape(B, np.append(np.shape(B), 1))
K = np.zeros((nB, 4))
def kstats(x, y):
bin_edges = np.concatenate([[-np.inf],
np.sort(np.concatenate((x, y))),
[np.inf]])
bin_x,_ = np.histogram(x, bin_edges)
bin_y,_ = np.histogram(y, bin_edges)
#print(np.shape(bin_x))
sum_x = np.cumsum(bin_x) / np.sum(bin_x)
sum_y = np.cumsum(bin_y) / np.sum(bin_y)
cdfsamp_x = sum_x[:-1]
cdfsamp_y = sum_y[:-1]
delta_cdf = np.abs(cdfsamp_x - cdfsamp_y)
print(np.shape(delta_cdf))
#print(delta_cdf)
print(np.argmax(delta_cdf), np.max(delta_cdf))
return np.max(delta_cdf)
for ib in range(nB):
Bc = B[:,:,ib]
yk = np.sum(Bc, axis=1)
yc = clustering_coef_bu(Bc)
yb = betweenness_bin(Bc)
ye = D[np.triu(Bc, 1) > 0]
K[ib, 0] = kstats(xk, yk)
K[ib, 1] = kstats(xc, yc)
K[ib, 2] = kstats(xb, yb)
K[ib, 3] = kstats(xe, ye)
return np.max(K, axis=1) | [
"def",
"evaluate_generative_model",
"(",
"A",
",",
"Atgt",
",",
"D",
",",
"eta",
",",
"gamma",
"=",
"None",
",",
"model_type",
"=",
"'matching'",
",",
"model_var",
"=",
"'powerlaw'",
",",
"epsilon",
"=",
"1e-6",
",",
"seed",
"=",
"None",
")",
":",
"m",... | Generates synthetic networks with parameters provided and evaluates their
energy function. The energy function is defined as in Betzel et al. 2016.
Basically it takes the Kolmogorov-Smirnov statistics of 4 network
measures; comparing the degree distributions, clustering coefficients,
betweenness centrality, and Euclidean distances between connected regions.
The energy is globally low if the synthetic network matches the target.
Energy is defined as the maximum difference across the four statistics. | [
"Generates",
"synthetic",
"networks",
"with",
"parameters",
"provided",
"and",
"evaluates",
"their",
"energy",
"function",
".",
"The",
"energy",
"function",
"is",
"defined",
"as",
"in",
"Betzel",
"et",
"al",
".",
"2016",
".",
"Basically",
"it",
"takes",
"the",... | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/generative.py#L529-L596 | train | 202,426 |
aestrivex/bctpy | bct/algorithms/centrality.py | diversity_coef_sign | def diversity_coef_sign(W, ci):
'''
The Shannon-entropy based diversity coefficient measures the diversity
of intermodular connections of individual nodes and ranges from 0 to 1.
Parameters
----------
W : NxN np.ndarray
undirected connection matrix with positive and negative weights
ci : Nx1 np.ndarray
community affiliation vector
Returns
-------
Hpos : Nx1 np.ndarray
diversity coefficient based on positive connections
Hneg : Nx1 np.ndarray
diversity coefficient based on negative connections
'''
n = len(W) # number of nodes
_, ci = np.unique(ci, return_inverse=True)
ci += 1
m = np.max(ci) # number of modules
def entropy(w_):
S = np.sum(w_, axis=1) # strength
Snm = np.zeros((n, m)) # node-to-module degree
for i in range(m):
Snm[:, i] = np.sum(w_[:, ci == i + 1], axis=1)
pnm = Snm / (np.tile(S, (m, 1)).T)
pnm[np.isnan(pnm)] = 0
pnm[np.logical_not(pnm)] = 1
return -np.sum(pnm * np.log(pnm), axis=1) / np.log(m)
#explicitly ignore compiler warning for division by zero
with np.errstate(invalid='ignore'):
Hpos = entropy(W * (W > 0))
Hneg = entropy(-W * (W < 0))
return Hpos, Hneg | python | def diversity_coef_sign(W, ci):
'''
The Shannon-entropy based diversity coefficient measures the diversity
of intermodular connections of individual nodes and ranges from 0 to 1.
Parameters
----------
W : NxN np.ndarray
undirected connection matrix with positive and negative weights
ci : Nx1 np.ndarray
community affiliation vector
Returns
-------
Hpos : Nx1 np.ndarray
diversity coefficient based on positive connections
Hneg : Nx1 np.ndarray
diversity coefficient based on negative connections
'''
n = len(W) # number of nodes
_, ci = np.unique(ci, return_inverse=True)
ci += 1
m = np.max(ci) # number of modules
def entropy(w_):
S = np.sum(w_, axis=1) # strength
Snm = np.zeros((n, m)) # node-to-module degree
for i in range(m):
Snm[:, i] = np.sum(w_[:, ci == i + 1], axis=1)
pnm = Snm / (np.tile(S, (m, 1)).T)
pnm[np.isnan(pnm)] = 0
pnm[np.logical_not(pnm)] = 1
return -np.sum(pnm * np.log(pnm), axis=1) / np.log(m)
#explicitly ignore compiler warning for division by zero
with np.errstate(invalid='ignore'):
Hpos = entropy(W * (W > 0))
Hneg = entropy(-W * (W < 0))
return Hpos, Hneg | [
"def",
"diversity_coef_sign",
"(",
"W",
",",
"ci",
")",
":",
"n",
"=",
"len",
"(",
"W",
")",
"# number of nodes",
"_",
",",
"ci",
"=",
"np",
".",
"unique",
"(",
"ci",
",",
"return_inverse",
"=",
"True",
")",
"ci",
"+=",
"1",
"m",
"=",
"np",
".",
... | The Shannon-entropy based diversity coefficient measures the diversity
of intermodular connections of individual nodes and ranges from 0 to 1.
Parameters
----------
W : NxN np.ndarray
undirected connection matrix with positive and negative weights
ci : Nx1 np.ndarray
community affiliation vector
Returns
-------
Hpos : Nx1 np.ndarray
diversity coefficient based on positive connections
Hneg : Nx1 np.ndarray
diversity coefficient based on negative connections | [
"The",
"Shannon",
"-",
"entropy",
"based",
"diversity",
"coefficient",
"measures",
"the",
"diversity",
"of",
"intermodular",
"connections",
"of",
"individual",
"nodes",
"and",
"ranges",
"from",
"0",
"to",
"1",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/centrality.py#L140-L181 | train | 202,427 |
aestrivex/bctpy | bct/algorithms/centrality.py | edge_betweenness_bin | def edge_betweenness_bin(G):
'''
Edge betweenness centrality is the fraction of all shortest paths in
the network that contain a given edge. Edges with high values of
betweenness centrality participate in a large number of shortest paths.
Parameters
----------
A : NxN np.ndarray
binary directed/undirected connection matrix
Returns
-------
EBC : NxN np.ndarray
edge betweenness centrality matrix
BC : Nx1 np.ndarray
node betweenness centrality vector
Notes
-----
Betweenness centrality may be normalised to the range [0,1] as
BC/[(N-1)(N-2)], where N is the number of nodes in the network.
'''
n = len(G)
BC = np.zeros((n,)) # vertex betweenness
EBC = np.zeros((n, n)) # edge betweenness
for u in range(n):
D = np.zeros((n,))
D[u] = 1 # distance from u
NP = np.zeros((n,))
NP[u] = 1 # number of paths from u
P = np.zeros((n, n)) # predecessors
Q = np.zeros((n,), dtype=int) # indices
q = n - 1 # order of non-increasing distance
Gu = G.copy()
V = np.array([u])
while V.size:
Gu[:, V] = 0 # remove remaining in-edges
for v in V:
Q[q] = v
q -= 1
W, = np.where(Gu[v, :]) # neighbors of V
for w in W:
if D[w]:
NP[w] += NP[v] # NP(u->w) sum of old and new
P[w, v] = 1 # v is a predecessor
else:
D[w] = 1
NP[w] = NP[v] # NP(u->v) = NP of new path
P[w, v] = 1 # v is a predecessor
V, = np.where(np.any(Gu[V, :], axis=0))
if np.any(np.logical_not(D)): # if some vertices unreachable
Q[:q], = np.where(np.logical_not(D)) # ...these are first in line
DP = np.zeros((n,)) # dependency
for w in Q[:n - 1]:
BC[w] += DP[w]
for v in np.where(P[w, :])[0]:
DPvw = (1 + DP[w]) * NP[v] / NP[w]
DP[v] += DPvw
EBC[v, w] += DPvw
return EBC, BC | python | def edge_betweenness_bin(G):
'''
Edge betweenness centrality is the fraction of all shortest paths in
the network that contain a given edge. Edges with high values of
betweenness centrality participate in a large number of shortest paths.
Parameters
----------
A : NxN np.ndarray
binary directed/undirected connection matrix
Returns
-------
EBC : NxN np.ndarray
edge betweenness centrality matrix
BC : Nx1 np.ndarray
node betweenness centrality vector
Notes
-----
Betweenness centrality may be normalised to the range [0,1] as
BC/[(N-1)(N-2)], where N is the number of nodes in the network.
'''
n = len(G)
BC = np.zeros((n,)) # vertex betweenness
EBC = np.zeros((n, n)) # edge betweenness
for u in range(n):
D = np.zeros((n,))
D[u] = 1 # distance from u
NP = np.zeros((n,))
NP[u] = 1 # number of paths from u
P = np.zeros((n, n)) # predecessors
Q = np.zeros((n,), dtype=int) # indices
q = n - 1 # order of non-increasing distance
Gu = G.copy()
V = np.array([u])
while V.size:
Gu[:, V] = 0 # remove remaining in-edges
for v in V:
Q[q] = v
q -= 1
W, = np.where(Gu[v, :]) # neighbors of V
for w in W:
if D[w]:
NP[w] += NP[v] # NP(u->w) sum of old and new
P[w, v] = 1 # v is a predecessor
else:
D[w] = 1
NP[w] = NP[v] # NP(u->v) = NP of new path
P[w, v] = 1 # v is a predecessor
V, = np.where(np.any(Gu[V, :], axis=0))
if np.any(np.logical_not(D)): # if some vertices unreachable
Q[:q], = np.where(np.logical_not(D)) # ...these are first in line
DP = np.zeros((n,)) # dependency
for w in Q[:n - 1]:
BC[w] += DP[w]
for v in np.where(P[w, :])[0]:
DPvw = (1 + DP[w]) * NP[v] / NP[w]
DP[v] += DPvw
EBC[v, w] += DPvw
return EBC, BC | [
"def",
"edge_betweenness_bin",
"(",
"G",
")",
":",
"n",
"=",
"len",
"(",
"G",
")",
"BC",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
",",
")",
")",
"# vertex betweenness",
"EBC",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
",",
"n",
")",
")",
"# edge b... | Edge betweenness centrality is the fraction of all shortest paths in
the network that contain a given edge. Edges with high values of
betweenness centrality participate in a large number of shortest paths.
Parameters
----------
A : NxN np.ndarray
binary directed/undirected connection matrix
Returns
-------
EBC : NxN np.ndarray
edge betweenness centrality matrix
BC : Nx1 np.ndarray
node betweenness centrality vector
Notes
-----
Betweenness centrality may be normalised to the range [0,1] as
BC/[(N-1)(N-2)], where N is the number of nodes in the network. | [
"Edge",
"betweenness",
"centrality",
"is",
"the",
"fraction",
"of",
"all",
"shortest",
"paths",
"in",
"the",
"network",
"that",
"contain",
"a",
"given",
"edge",
".",
"Edges",
"with",
"high",
"values",
"of",
"betweenness",
"centrality",
"participate",
"in",
"a"... | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/centrality.py#L183-L248 | train | 202,428 |
aestrivex/bctpy | bct/algorithms/centrality.py | erange | def erange(CIJ):
'''
Shortcuts are central edges which significantly reduce the
characteristic path length in the network.
Parameters
----------
CIJ : NxN np.ndarray
binary directed connection matrix
Returns
-------
Erange : NxN np.ndarray
range for each edge, i.e. the length of the shortest path from i to j
for edge c(i,j) after the edge has been removed from the graph
eta : float
average range for the entire graph
Eshort : NxN np.ndarray
entries are ones for shortcut edges
fs : float
fractions of shortcuts in the graph
Follows the treatment of 'shortcuts' by Duncan Watts
'''
N = len(CIJ)
K = np.size(np.where(CIJ)[1])
Erange = np.zeros((N, N))
i, j = np.where(CIJ)
for c in range(len(i)):
CIJcut = CIJ.copy()
CIJcut[i[c], j[c]] = 0
R, D = reachdist(CIJcut)
Erange[i[c], j[c]] = D[i[c], j[c]]
# average range (ignore Inf)
eta = (np.sum(Erange[np.logical_and(Erange > 0, Erange < np.inf)]) /
len(Erange[np.logical_and(Erange > 0, Erange < np.inf)]))
# Original entries of D are ones, thus entries of Erange
# must be two or greater.
# If Erange(i,j) > 2, then the edge is a shortcut.
# 'fshort' is the fraction of shortcuts over the entire graph.
Eshort = Erange > 2
fs = len(np.where(Eshort)) / K
return Erange, eta, Eshort, fs | python | def erange(CIJ):
'''
Shortcuts are central edges which significantly reduce the
characteristic path length in the network.
Parameters
----------
CIJ : NxN np.ndarray
binary directed connection matrix
Returns
-------
Erange : NxN np.ndarray
range for each edge, i.e. the length of the shortest path from i to j
for edge c(i,j) after the edge has been removed from the graph
eta : float
average range for the entire graph
Eshort : NxN np.ndarray
entries are ones for shortcut edges
fs : float
fractions of shortcuts in the graph
Follows the treatment of 'shortcuts' by Duncan Watts
'''
N = len(CIJ)
K = np.size(np.where(CIJ)[1])
Erange = np.zeros((N, N))
i, j = np.where(CIJ)
for c in range(len(i)):
CIJcut = CIJ.copy()
CIJcut[i[c], j[c]] = 0
R, D = reachdist(CIJcut)
Erange[i[c], j[c]] = D[i[c], j[c]]
# average range (ignore Inf)
eta = (np.sum(Erange[np.logical_and(Erange > 0, Erange < np.inf)]) /
len(Erange[np.logical_and(Erange > 0, Erange < np.inf)]))
# Original entries of D are ones, thus entries of Erange
# must be two or greater.
# If Erange(i,j) > 2, then the edge is a shortcut.
# 'fshort' is the fraction of shortcuts over the entire graph.
Eshort = Erange > 2
fs = len(np.where(Eshort)) / K
return Erange, eta, Eshort, fs | [
"def",
"erange",
"(",
"CIJ",
")",
":",
"N",
"=",
"len",
"(",
"CIJ",
")",
"K",
"=",
"np",
".",
"size",
"(",
"np",
".",
"where",
"(",
"CIJ",
")",
"[",
"1",
"]",
")",
"Erange",
"=",
"np",
".",
"zeros",
"(",
"(",
"N",
",",
"N",
")",
")",
"i... | Shortcuts are central edges which significantly reduce the
characteristic path length in the network.
Parameters
----------
CIJ : NxN np.ndarray
binary directed connection matrix
Returns
-------
Erange : NxN np.ndarray
range for each edge, i.e. the length of the shortest path from i to j
for edge c(i,j) after the edge has been removed from the graph
eta : float
average range for the entire graph
Eshort : NxN np.ndarray
entries are ones for shortcut edges
fs : float
fractions of shortcuts in the graph
Follows the treatment of 'shortcuts' by Duncan Watts | [
"Shortcuts",
"are",
"central",
"edges",
"which",
"significantly",
"reduce",
"the",
"characteristic",
"path",
"length",
"in",
"the",
"network",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/centrality.py#L355-L402 | train | 202,429 |
aestrivex/bctpy | bct/algorithms/centrality.py | module_degree_zscore | def module_degree_zscore(W, ci, flag=0):
'''
The within-module degree z-score is a within-module version of degree
centrality.
Parameters
----------
W : NxN np.narray
binary/weighted directed/undirected connection matrix
ci : Nx1 np.array_like
community affiliation vector
flag : int
Graph type. 0: undirected graph (default)
1: directed graph in degree
2: directed graph out degree
3: directed graph in and out degree
Returns
-------
Z : Nx1 np.ndarray
within-module degree Z-score
'''
_, ci = np.unique(ci, return_inverse=True)
ci += 1
if flag == 2:
W = W.copy()
W = W.T
elif flag == 3:
W = W.copy()
W = W + W.T
n = len(W)
Z = np.zeros((n,)) # number of vertices
for i in range(1, int(np.max(ci) + 1)):
Koi = np.sum(W[np.ix_(ci == i, ci == i)], axis=1)
Z[np.where(ci == i)] = (Koi - np.mean(Koi)) / np.std(Koi)
Z[np.where(np.isnan(Z))] = 0
return Z | python | def module_degree_zscore(W, ci, flag=0):
'''
The within-module degree z-score is a within-module version of degree
centrality.
Parameters
----------
W : NxN np.narray
binary/weighted directed/undirected connection matrix
ci : Nx1 np.array_like
community affiliation vector
flag : int
Graph type. 0: undirected graph (default)
1: directed graph in degree
2: directed graph out degree
3: directed graph in and out degree
Returns
-------
Z : Nx1 np.ndarray
within-module degree Z-score
'''
_, ci = np.unique(ci, return_inverse=True)
ci += 1
if flag == 2:
W = W.copy()
W = W.T
elif flag == 3:
W = W.copy()
W = W + W.T
n = len(W)
Z = np.zeros((n,)) # number of vertices
for i in range(1, int(np.max(ci) + 1)):
Koi = np.sum(W[np.ix_(ci == i, ci == i)], axis=1)
Z[np.where(ci == i)] = (Koi - np.mean(Koi)) / np.std(Koi)
Z[np.where(np.isnan(Z))] = 0
return Z | [
"def",
"module_degree_zscore",
"(",
"W",
",",
"ci",
",",
"flag",
"=",
"0",
")",
":",
"_",
",",
"ci",
"=",
"np",
".",
"unique",
"(",
"ci",
",",
"return_inverse",
"=",
"True",
")",
"ci",
"+=",
"1",
"if",
"flag",
"==",
"2",
":",
"W",
"=",
"W",
"... | The within-module degree z-score is a within-module version of degree
centrality.
Parameters
----------
W : NxN np.narray
binary/weighted directed/undirected connection matrix
ci : Nx1 np.array_like
community affiliation vector
flag : int
Graph type. 0: undirected graph (default)
1: directed graph in degree
2: directed graph out degree
3: directed graph in and out degree
Returns
-------
Z : Nx1 np.ndarray
within-module degree Z-score | [
"The",
"within",
"-",
"module",
"degree",
"z",
"-",
"score",
"is",
"a",
"within",
"-",
"module",
"version",
"of",
"degree",
"centrality",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/centrality.py#L606-L645 | train | 202,430 |
aestrivex/bctpy | bct/algorithms/centrality.py | pagerank_centrality | def pagerank_centrality(A, d, falff=None):
'''
The PageRank centrality is a variant of eigenvector centrality. This
function computes the PageRank centrality of each vertex in a graph.
Formally, PageRank is defined as the stationary distribution achieved
by instantiating a Markov chain on a graph. The PageRank centrality of
a given vertex, then, is proportional to the number of steps (or amount
of time) spent at that vertex as a result of such a process.
The PageRank index gets modified by the addition of a damping factor,
d. In terms of a Markov chain, the damping factor specifies the
fraction of the time that a random walker will transition to one of its
current state's neighbors. The remaining fraction of the time the
walker is restarted at a random vertex. A common value for the damping
factor is d = 0.85.
Parameters
----------
A : NxN np.narray
adjacency matrix
d : float
damping factor (see description)
falff : Nx1 np.ndarray | None
Initial page rank probability, non-negative values. Default value is
None. If not specified, a naive bayesian prior is used.
Returns
-------
r : Nx1 np.ndarray
vectors of page rankings
Notes
-----
Note: The algorithm will work well for smaller matrices (number of
nodes around 1000 or less)
'''
from scipy import linalg
N = len(A)
if falff is None:
norm_falff = np.ones((N,)) / N
else:
norm_falff = falff / np.sum(falff)
deg = np.sum(A, axis=0)
deg[deg == 0] = 1
D1 = np.diag(1 / deg)
B = np.eye(N) - d * np.dot(A, D1)
b = (1 - d) * norm_falff
r = linalg.solve(B, b)
r /= np.sum(r)
return r | python | def pagerank_centrality(A, d, falff=None):
'''
The PageRank centrality is a variant of eigenvector centrality. This
function computes the PageRank centrality of each vertex in a graph.
Formally, PageRank is defined as the stationary distribution achieved
by instantiating a Markov chain on a graph. The PageRank centrality of
a given vertex, then, is proportional to the number of steps (or amount
of time) spent at that vertex as a result of such a process.
The PageRank index gets modified by the addition of a damping factor,
d. In terms of a Markov chain, the damping factor specifies the
fraction of the time that a random walker will transition to one of its
current state's neighbors. The remaining fraction of the time the
walker is restarted at a random vertex. A common value for the damping
factor is d = 0.85.
Parameters
----------
A : NxN np.narray
adjacency matrix
d : float
damping factor (see description)
falff : Nx1 np.ndarray | None
Initial page rank probability, non-negative values. Default value is
None. If not specified, a naive bayesian prior is used.
Returns
-------
r : Nx1 np.ndarray
vectors of page rankings
Notes
-----
Note: The algorithm will work well for smaller matrices (number of
nodes around 1000 or less)
'''
from scipy import linalg
N = len(A)
if falff is None:
norm_falff = np.ones((N,)) / N
else:
norm_falff = falff / np.sum(falff)
deg = np.sum(A, axis=0)
deg[deg == 0] = 1
D1 = np.diag(1 / deg)
B = np.eye(N) - d * np.dot(A, D1)
b = (1 - d) * norm_falff
r = linalg.solve(B, b)
r /= np.sum(r)
return r | [
"def",
"pagerank_centrality",
"(",
"A",
",",
"d",
",",
"falff",
"=",
"None",
")",
":",
"from",
"scipy",
"import",
"linalg",
"N",
"=",
"len",
"(",
"A",
")",
"if",
"falff",
"is",
"None",
":",
"norm_falff",
"=",
"np",
".",
"ones",
"(",
"(",
"N",
","... | The PageRank centrality is a variant of eigenvector centrality. This
function computes the PageRank centrality of each vertex in a graph.
Formally, PageRank is defined as the stationary distribution achieved
by instantiating a Markov chain on a graph. The PageRank centrality of
a given vertex, then, is proportional to the number of steps (or amount
of time) spent at that vertex as a result of such a process.
The PageRank index gets modified by the addition of a damping factor,
d. In terms of a Markov chain, the damping factor specifies the
fraction of the time that a random walker will transition to one of its
current state's neighbors. The remaining fraction of the time the
walker is restarted at a random vertex. A common value for the damping
factor is d = 0.85.
Parameters
----------
A : NxN np.narray
adjacency matrix
d : float
damping factor (see description)
falff : Nx1 np.ndarray | None
Initial page rank probability, non-negative values. Default value is
None. If not specified, a naive bayesian prior is used.
Returns
-------
r : Nx1 np.ndarray
vectors of page rankings
Notes
-----
Note: The algorithm will work well for smaller matrices (number of
nodes around 1000 or less) | [
"The",
"PageRank",
"centrality",
"is",
"a",
"variant",
"of",
"eigenvector",
"centrality",
".",
"This",
"function",
"computes",
"the",
"PageRank",
"centrality",
"of",
"each",
"vertex",
"in",
"a",
"graph",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/centrality.py#L648-L700 | train | 202,431 |
aestrivex/bctpy | bct/algorithms/centrality.py | subgraph_centrality | def subgraph_centrality(CIJ):
'''
The subgraph centrality of a node is a weighted sum of closed walks of
different lengths in the network starting and ending at the node. This
function returns a vector of subgraph centralities for each node of the
network.
Parameters
----------
CIJ : NxN np.ndarray
binary adjacency matrix
Cs : Nx1 np.ndarray
subgraph centrality
'''
from scipy import linalg
vals, vecs = linalg.eig(CIJ) # compute eigendecomposition
# lambdas=np.diag(vals)
# compute eigenvector centr.
Cs = np.real(np.dot(vecs * vecs, np.exp(vals)))
return Cs | python | def subgraph_centrality(CIJ):
'''
The subgraph centrality of a node is a weighted sum of closed walks of
different lengths in the network starting and ending at the node. This
function returns a vector of subgraph centralities for each node of the
network.
Parameters
----------
CIJ : NxN np.ndarray
binary adjacency matrix
Cs : Nx1 np.ndarray
subgraph centrality
'''
from scipy import linalg
vals, vecs = linalg.eig(CIJ) # compute eigendecomposition
# lambdas=np.diag(vals)
# compute eigenvector centr.
Cs = np.real(np.dot(vecs * vecs, np.exp(vals)))
return Cs | [
"def",
"subgraph_centrality",
"(",
"CIJ",
")",
":",
"from",
"scipy",
"import",
"linalg",
"vals",
",",
"vecs",
"=",
"linalg",
".",
"eig",
"(",
"CIJ",
")",
"# compute eigendecomposition",
"# lambdas=np.diag(vals)",
"# compute eigenvector centr.",
"Cs",
"=",
"np",
".... | The subgraph centrality of a node is a weighted sum of closed walks of
different lengths in the network starting and ending at the node. This
function returns a vector of subgraph centralities for each node of the
network.
Parameters
----------
CIJ : NxN np.ndarray
binary adjacency matrix
Cs : Nx1 np.ndarray
subgraph centrality | [
"The",
"subgraph",
"centrality",
"of",
"a",
"node",
"is",
"a",
"weighted",
"sum",
"of",
"closed",
"walks",
"of",
"different",
"lengths",
"in",
"the",
"network",
"starting",
"and",
"ending",
"at",
"the",
"node",
".",
"This",
"function",
"returns",
"a",
"vec... | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/centrality.py#L830-L851 | train | 202,432 |
aestrivex/bctpy | bct/utils/other.py | invert | def invert(W, copy=True):
'''
Inverts elementwise the weights in an input connection matrix.
In other words, change the from the matrix of internode strengths to the
matrix of internode distances.
If copy is not set, this function will *modify W in place.*
Parameters
----------
W : np.ndarray
weighted connectivity matrix
copy : bool
if True, returns a copy of the matrix. Otherwise, modifies the matrix
in place. Default value=True.
Returns
-------
W : np.ndarray
inverted connectivity matrix
'''
if copy:
W = W.copy()
E = np.where(W)
W[E] = 1. / W[E]
return W | python | def invert(W, copy=True):
'''
Inverts elementwise the weights in an input connection matrix.
In other words, change the from the matrix of internode strengths to the
matrix of internode distances.
If copy is not set, this function will *modify W in place.*
Parameters
----------
W : np.ndarray
weighted connectivity matrix
copy : bool
if True, returns a copy of the matrix. Otherwise, modifies the matrix
in place. Default value=True.
Returns
-------
W : np.ndarray
inverted connectivity matrix
'''
if copy:
W = W.copy()
E = np.where(W)
W[E] = 1. / W[E]
return W | [
"def",
"invert",
"(",
"W",
",",
"copy",
"=",
"True",
")",
":",
"if",
"copy",
":",
"W",
"=",
"W",
".",
"copy",
"(",
")",
"E",
"=",
"np",
".",
"where",
"(",
"W",
")",
"W",
"[",
"E",
"]",
"=",
"1.",
"/",
"W",
"[",
"E",
"]",
"return",
"W"
] | Inverts elementwise the weights in an input connection matrix.
In other words, change the from the matrix of internode strengths to the
matrix of internode distances.
If copy is not set, this function will *modify W in place.*
Parameters
----------
W : np.ndarray
weighted connectivity matrix
copy : bool
if True, returns a copy of the matrix. Otherwise, modifies the matrix
in place. Default value=True.
Returns
-------
W : np.ndarray
inverted connectivity matrix | [
"Inverts",
"elementwise",
"the",
"weights",
"in",
"an",
"input",
"connection",
"matrix",
".",
"In",
"other",
"words",
"change",
"the",
"from",
"the",
"matrix",
"of",
"internode",
"strengths",
"to",
"the",
"matrix",
"of",
"internode",
"distances",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/utils/other.py#L217-L242 | train | 202,433 |
aestrivex/bctpy | bct/algorithms/modularity.py | ci2ls | def ci2ls(ci):
'''
Convert from a community index vector to a 2D python list of modules
The list is a pure python list, not requiring numpy.
Parameters
----------
ci : Nx1 np.ndarray
the community index vector
zeroindexed : bool
If True, ci uses zero-indexing (lowest value is 0). Defaults to False.
Returns
-------
ls : listof(list)
pure python list with lowest value zero-indexed
(regardless of zero-indexing parameter)
'''
if not np.size(ci):
return ci # list is empty
_, ci = np.unique(ci, return_inverse=True)
ci += 1
nr_indices = int(max(ci))
ls = []
for c in range(nr_indices):
ls.append([])
for i, x in enumerate(ci):
ls[ci[i] - 1].append(i)
return ls | python | def ci2ls(ci):
'''
Convert from a community index vector to a 2D python list of modules
The list is a pure python list, not requiring numpy.
Parameters
----------
ci : Nx1 np.ndarray
the community index vector
zeroindexed : bool
If True, ci uses zero-indexing (lowest value is 0). Defaults to False.
Returns
-------
ls : listof(list)
pure python list with lowest value zero-indexed
(regardless of zero-indexing parameter)
'''
if not np.size(ci):
return ci # list is empty
_, ci = np.unique(ci, return_inverse=True)
ci += 1
nr_indices = int(max(ci))
ls = []
for c in range(nr_indices):
ls.append([])
for i, x in enumerate(ci):
ls[ci[i] - 1].append(i)
return ls | [
"def",
"ci2ls",
"(",
"ci",
")",
":",
"if",
"not",
"np",
".",
"size",
"(",
"ci",
")",
":",
"return",
"ci",
"# list is empty",
"_",
",",
"ci",
"=",
"np",
".",
"unique",
"(",
"ci",
",",
"return_inverse",
"=",
"True",
")",
"ci",
"+=",
"1",
"nr_indice... | Convert from a community index vector to a 2D python list of modules
The list is a pure python list, not requiring numpy.
Parameters
----------
ci : Nx1 np.ndarray
the community index vector
zeroindexed : bool
If True, ci uses zero-indexing (lowest value is 0). Defaults to False.
Returns
-------
ls : listof(list)
pure python list with lowest value zero-indexed
(regardless of zero-indexing parameter) | [
"Convert",
"from",
"a",
"community",
"index",
"vector",
"to",
"a",
"2D",
"python",
"list",
"of",
"modules",
"The",
"list",
"is",
"a",
"pure",
"python",
"list",
"not",
"requiring",
"numpy",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/modularity.py#L6-L34 | train | 202,434 |
aestrivex/bctpy | bct/algorithms/modularity.py | ls2ci | def ls2ci(ls, zeroindexed=False):
'''
Convert from a 2D python list of modules to a community index vector.
The list is a pure python list, not requiring numpy.
Parameters
----------
ls : listof(list)
pure python list with lowest value zero-indexed
(regardless of value of zeroindexed parameter)
zeroindexed : bool
If True, ci uses zero-indexing (lowest value is 0). Defaults to False.
Returns
-------
ci : Nx1 np.ndarray
community index vector
'''
if ls is None or np.size(ls) == 0:
return () # list is empty
nr_indices = sum(map(len, ls))
ci = np.zeros((nr_indices,), dtype=int)
z = int(not zeroindexed)
for i, x in enumerate(ls):
for j, y in enumerate(ls[i]):
ci[ls[i][j]] = i + z
return ci | python | def ls2ci(ls, zeroindexed=False):
'''
Convert from a 2D python list of modules to a community index vector.
The list is a pure python list, not requiring numpy.
Parameters
----------
ls : listof(list)
pure python list with lowest value zero-indexed
(regardless of value of zeroindexed parameter)
zeroindexed : bool
If True, ci uses zero-indexing (lowest value is 0). Defaults to False.
Returns
-------
ci : Nx1 np.ndarray
community index vector
'''
if ls is None or np.size(ls) == 0:
return () # list is empty
nr_indices = sum(map(len, ls))
ci = np.zeros((nr_indices,), dtype=int)
z = int(not zeroindexed)
for i, x in enumerate(ls):
for j, y in enumerate(ls[i]):
ci[ls[i][j]] = i + z
return ci | [
"def",
"ls2ci",
"(",
"ls",
",",
"zeroindexed",
"=",
"False",
")",
":",
"if",
"ls",
"is",
"None",
"or",
"np",
".",
"size",
"(",
"ls",
")",
"==",
"0",
":",
"return",
"(",
")",
"# list is empty",
"nr_indices",
"=",
"sum",
"(",
"map",
"(",
"len",
","... | Convert from a 2D python list of modules to a community index vector.
The list is a pure python list, not requiring numpy.
Parameters
----------
ls : listof(list)
pure python list with lowest value zero-indexed
(regardless of value of zeroindexed parameter)
zeroindexed : bool
If True, ci uses zero-indexing (lowest value is 0). Defaults to False.
Returns
-------
ci : Nx1 np.ndarray
community index vector | [
"Convert",
"from",
"a",
"2D",
"python",
"list",
"of",
"modules",
"to",
"a",
"community",
"index",
"vector",
".",
"The",
"list",
"is",
"a",
"pure",
"python",
"list",
"not",
"requiring",
"numpy",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/modularity.py#L37-L63 | train | 202,435 |
aestrivex/bctpy | bct/algorithms/modularity.py | _safe_squeeze | def _safe_squeeze(arr, *args, **kwargs):
"""
numpy.squeeze will reduce a 1-item array down to a zero-dimensional "array",
which is not necessarily desirable.
This function does the squeeze operation, but ensures that there is at least
1 dimension in the output.
"""
out = np.squeeze(arr, *args, **kwargs)
if np.ndim(out) == 0:
out = out.reshape((1,))
return out | python | def _safe_squeeze(arr, *args, **kwargs):
"""
numpy.squeeze will reduce a 1-item array down to a zero-dimensional "array",
which is not necessarily desirable.
This function does the squeeze operation, but ensures that there is at least
1 dimension in the output.
"""
out = np.squeeze(arr, *args, **kwargs)
if np.ndim(out) == 0:
out = out.reshape((1,))
return out | [
"def",
"_safe_squeeze",
"(",
"arr",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"out",
"=",
"np",
".",
"squeeze",
"(",
"arr",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"np",
".",
"ndim",
"(",
"out",
")",
"==",
"0",
":",
... | numpy.squeeze will reduce a 1-item array down to a zero-dimensional "array",
which is not necessarily desirable.
This function does the squeeze operation, but ensures that there is at least
1 dimension in the output. | [
"numpy",
".",
"squeeze",
"will",
"reduce",
"a",
"1",
"-",
"item",
"array",
"down",
"to",
"a",
"zero",
"-",
"dimensional",
"array",
"which",
"is",
"not",
"necessarily",
"desirable",
".",
"This",
"function",
"does",
"the",
"squeeze",
"operation",
"but",
"ens... | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/modularity.py#L464-L474 | train | 202,436 |
aestrivex/bctpy | bct/algorithms/modularity.py | modularity_und_sign | def modularity_und_sign(W, ci, qtype='sta'):
'''
This function simply calculates the signed modularity for a given
partition. It does not do automatic partition generation right now.
Parameters
----------
W : NxN np.ndarray
undirected weighted/binary connection matrix with positive and
negative weights
ci : Nx1 np.ndarray
community partition
qtype : str
modularity type. Can be 'sta' (default), 'pos', 'smp', 'gja', 'neg'.
See Rubinov and Sporns (2011) for a description.
Returns
-------
ci : Nx1 np.ndarray
the partition which was input (for consistency of the API)
Q : float
maximized modularity metric
Notes
-----
uses a deterministic algorithm
'''
n = len(W)
_, ci = np.unique(ci, return_inverse=True)
ci += 1
W0 = W * (W > 0) # positive weights matrix
W1 = -W * (W < 0) # negative weights matrix
s0 = np.sum(W0) # positive sum of weights
s1 = np.sum(W1) # negative sum of weights
Knm0 = np.zeros((n, n)) # positive node-to-module degree
Knm1 = np.zeros((n, n)) # negative node-to-module degree
for m in range(int(np.max(ci))): # loop over initial modules
Knm0[:, m] = np.sum(W0[:, ci == m + 1], axis=1)
Knm1[:, m] = np.sum(W1[:, ci == m + 1], axis=1)
Kn0 = np.sum(Knm0, axis=1) # positive node degree
Kn1 = np.sum(Knm1, axis=1) # negative node degree
Km0 = np.sum(Knm0, axis=0) # positive module degree
Km1 = np.sum(Knm1, axis=0) # negaitve module degree
if qtype == 'smp':
d0 = 1 / s0
d1 = 1 / s1 # dQ=dQ0/s0-dQ1/s1
elif qtype == 'gja':
d0 = 1 / (s0 + s1)
d1 = 1 / (s0 + s1) # dQ=(dQ0-dQ1)/(s0+s1)
elif qtype == 'sta':
d0 = 1 / s0
d1 = 1 / (s0 + s1) # dQ=dQ0/s0-dQ1/(s0+s1)
elif qtype == 'pos':
d0 = 1 / s0
d1 = 0 # dQ=dQ0/s0
elif qtype == 'neg':
d0 = 0
d1 = 1 / s1 # dQ=-dQ1/s1
else:
raise KeyError('modularity type unknown')
if not s0: # adjust for absent positive weights
s0 = 1
d0 = 0
if not s1: # adjust for absent negative weights
s1 = 1
d1 = 0
m = np.tile(ci, (n, 1))
q0 = (W0 - np.outer(Kn0, Kn0) / s0) * (m == m.T)
q1 = (W1 - np.outer(Kn1, Kn1) / s1) * (m == m.T)
q = d0 * np.sum(q0) - d1 * np.sum(q1)
return ci, q | python | def modularity_und_sign(W, ci, qtype='sta'):
'''
This function simply calculates the signed modularity for a given
partition. It does not do automatic partition generation right now.
Parameters
----------
W : NxN np.ndarray
undirected weighted/binary connection matrix with positive and
negative weights
ci : Nx1 np.ndarray
community partition
qtype : str
modularity type. Can be 'sta' (default), 'pos', 'smp', 'gja', 'neg'.
See Rubinov and Sporns (2011) for a description.
Returns
-------
ci : Nx1 np.ndarray
the partition which was input (for consistency of the API)
Q : float
maximized modularity metric
Notes
-----
uses a deterministic algorithm
'''
n = len(W)
_, ci = np.unique(ci, return_inverse=True)
ci += 1
W0 = W * (W > 0) # positive weights matrix
W1 = -W * (W < 0) # negative weights matrix
s0 = np.sum(W0) # positive sum of weights
s1 = np.sum(W1) # negative sum of weights
Knm0 = np.zeros((n, n)) # positive node-to-module degree
Knm1 = np.zeros((n, n)) # negative node-to-module degree
for m in range(int(np.max(ci))): # loop over initial modules
Knm0[:, m] = np.sum(W0[:, ci == m + 1], axis=1)
Knm1[:, m] = np.sum(W1[:, ci == m + 1], axis=1)
Kn0 = np.sum(Knm0, axis=1) # positive node degree
Kn1 = np.sum(Knm1, axis=1) # negative node degree
Km0 = np.sum(Knm0, axis=0) # positive module degree
Km1 = np.sum(Knm1, axis=0) # negaitve module degree
if qtype == 'smp':
d0 = 1 / s0
d1 = 1 / s1 # dQ=dQ0/s0-dQ1/s1
elif qtype == 'gja':
d0 = 1 / (s0 + s1)
d1 = 1 / (s0 + s1) # dQ=(dQ0-dQ1)/(s0+s1)
elif qtype == 'sta':
d0 = 1 / s0
d1 = 1 / (s0 + s1) # dQ=dQ0/s0-dQ1/(s0+s1)
elif qtype == 'pos':
d0 = 1 / s0
d1 = 0 # dQ=dQ0/s0
elif qtype == 'neg':
d0 = 0
d1 = 1 / s1 # dQ=-dQ1/s1
else:
raise KeyError('modularity type unknown')
if not s0: # adjust for absent positive weights
s0 = 1
d0 = 0
if not s1: # adjust for absent negative weights
s1 = 1
d1 = 0
m = np.tile(ci, (n, 1))
q0 = (W0 - np.outer(Kn0, Kn0) / s0) * (m == m.T)
q1 = (W1 - np.outer(Kn1, Kn1) / s1) * (m == m.T)
q = d0 * np.sum(q0) - d1 * np.sum(q1)
return ci, q | [
"def",
"modularity_und_sign",
"(",
"W",
",",
"ci",
",",
"qtype",
"=",
"'sta'",
")",
":",
"n",
"=",
"len",
"(",
"W",
")",
"_",
",",
"ci",
"=",
"np",
".",
"unique",
"(",
"ci",
",",
"return_inverse",
"=",
"True",
")",
"ci",
"+=",
"1",
"W0",
"=",
... | This function simply calculates the signed modularity for a given
partition. It does not do automatic partition generation right now.
Parameters
----------
W : NxN np.ndarray
undirected weighted/binary connection matrix with positive and
negative weights
ci : Nx1 np.ndarray
community partition
qtype : str
modularity type. Can be 'sta' (default), 'pos', 'smp', 'gja', 'neg'.
See Rubinov and Sporns (2011) for a description.
Returns
-------
ci : Nx1 np.ndarray
the partition which was input (for consistency of the API)
Q : float
maximized modularity metric
Notes
-----
uses a deterministic algorithm | [
"This",
"function",
"simply",
"calculates",
"the",
"signed",
"modularity",
"for",
"a",
"given",
"partition",
".",
"It",
"does",
"not",
"do",
"automatic",
"partition",
"generation",
"right",
"now",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/modularity.py#L1575-L1653 | train | 202,437 |
aestrivex/bctpy | bct/algorithms/modularity.py | partition_distance | def partition_distance(cx, cy):
'''
This function quantifies the distance between pairs of community
partitions with information theoretic measures.
Parameters
----------
cx : Nx1 np.ndarray
community affiliation vector X
cy : Nx1 np.ndarray
community affiliation vector Y
Returns
-------
VIn : Nx1 np.ndarray
normalized variation of information
MIn : Nx1 np.ndarray
normalized mutual information
Notes
-----
(Definitions:
VIn = [H(X) + H(Y) - 2MI(X,Y)]/log(n)
MIn = 2MI(X,Y)/[H(X)+H(Y)]
where H is entropy, MI is mutual information and n is number of nodes)
'''
n = np.size(cx)
_, cx = np.unique(cx, return_inverse=True)
_, cy = np.unique(cy, return_inverse=True)
_, cxy = np.unique(cx + cy * 1j, return_inverse=True)
cx += 1
cy += 1
cxy += 1
Px = np.histogram(cx, bins=np.max(cx))[0] / n
Py = np.histogram(cy, bins=np.max(cy))[0] / n
Pxy = np.histogram(cxy, bins=np.max(cxy))[0] / n
Hx = -np.sum(Px * np.log(Px))
Hy = -np.sum(Py * np.log(Py))
Hxy = -np.sum(Pxy * np.log(Pxy))
Vin = (2 * Hxy - Hx - Hy) / np.log(n)
Min = 2 * (Hx + Hy - Hxy) / (Hx + Hy)
return Vin, Min | python | def partition_distance(cx, cy):
'''
This function quantifies the distance between pairs of community
partitions with information theoretic measures.
Parameters
----------
cx : Nx1 np.ndarray
community affiliation vector X
cy : Nx1 np.ndarray
community affiliation vector Y
Returns
-------
VIn : Nx1 np.ndarray
normalized variation of information
MIn : Nx1 np.ndarray
normalized mutual information
Notes
-----
(Definitions:
VIn = [H(X) + H(Y) - 2MI(X,Y)]/log(n)
MIn = 2MI(X,Y)/[H(X)+H(Y)]
where H is entropy, MI is mutual information and n is number of nodes)
'''
n = np.size(cx)
_, cx = np.unique(cx, return_inverse=True)
_, cy = np.unique(cy, return_inverse=True)
_, cxy = np.unique(cx + cy * 1j, return_inverse=True)
cx += 1
cy += 1
cxy += 1
Px = np.histogram(cx, bins=np.max(cx))[0] / n
Py = np.histogram(cy, bins=np.max(cy))[0] / n
Pxy = np.histogram(cxy, bins=np.max(cxy))[0] / n
Hx = -np.sum(Px * np.log(Px))
Hy = -np.sum(Py * np.log(Py))
Hxy = -np.sum(Pxy * np.log(Pxy))
Vin = (2 * Hxy - Hx - Hy) / np.log(n)
Min = 2 * (Hx + Hy - Hxy) / (Hx + Hy)
return Vin, Min | [
"def",
"partition_distance",
"(",
"cx",
",",
"cy",
")",
":",
"n",
"=",
"np",
".",
"size",
"(",
"cx",
")",
"_",
",",
"cx",
"=",
"np",
".",
"unique",
"(",
"cx",
",",
"return_inverse",
"=",
"True",
")",
"_",
",",
"cy",
"=",
"np",
".",
"unique",
... | This function quantifies the distance between pairs of community
partitions with information theoretic measures.
Parameters
----------
cx : Nx1 np.ndarray
community affiliation vector X
cy : Nx1 np.ndarray
community affiliation vector Y
Returns
-------
VIn : Nx1 np.ndarray
normalized variation of information
MIn : Nx1 np.ndarray
normalized mutual information
Notes
-----
(Definitions:
VIn = [H(X) + H(Y) - 2MI(X,Y)]/log(n)
MIn = 2MI(X,Y)/[H(X)+H(Y)]
where H is entropy, MI is mutual information and n is number of nodes) | [
"This",
"function",
"quantifies",
"the",
"distance",
"between",
"pairs",
"of",
"community",
"partitions",
"with",
"information",
"theoretic",
"measures",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/modularity.py#L1656-L1701 | train | 202,438 |
aestrivex/bctpy | bct/algorithms/distance.py | breadth | def breadth(CIJ, source):
'''
Implementation of breadth-first search.
Parameters
----------
CIJ : NxN np.ndarray
binary directed/undirected connection matrix
source : int
source vertex
Returns
-------
distance : Nx1 np.ndarray
vector of distances between source and ith vertex (0 for source)
branch : Nx1 np.ndarray
vertex that precedes i in the breadth-first search (-1 for source)
Notes
-----
Breadth-first search tree does not contain all paths (or all
shortest paths), but allows the determination of at least one path with
minimum distance. The entire graph is explored, starting from source
vertex 'source'.
'''
n = len(CIJ)
# colors: white,gray,black
white = 0
gray = 1
black = 2
color = np.zeros((n,))
distance = np.inf * np.ones((n,))
branch = np.zeros((n,))
# start on vertex source
color[source] = gray
distance[source] = 0
branch[source] = -1
Q = [source]
# keep going until the entire graph is explored
while Q:
u = Q[0]
ns, = np.where(CIJ[u, :])
for v in ns:
# this allows the source distance itself to be recorded
if distance[v] == 0:
distance[v] = distance[u] + 1
if color[v] == white:
color[v] = gray
distance[v] = distance[u] + 1
branch[v] = u
Q.append(v)
Q = Q[1:]
color[u] = black
return distance, branch | python | def breadth(CIJ, source):
'''
Implementation of breadth-first search.
Parameters
----------
CIJ : NxN np.ndarray
binary directed/undirected connection matrix
source : int
source vertex
Returns
-------
distance : Nx1 np.ndarray
vector of distances between source and ith vertex (0 for source)
branch : Nx1 np.ndarray
vertex that precedes i in the breadth-first search (-1 for source)
Notes
-----
Breadth-first search tree does not contain all paths (or all
shortest paths), but allows the determination of at least one path with
minimum distance. The entire graph is explored, starting from source
vertex 'source'.
'''
n = len(CIJ)
# colors: white,gray,black
white = 0
gray = 1
black = 2
color = np.zeros((n,))
distance = np.inf * np.ones((n,))
branch = np.zeros((n,))
# start on vertex source
color[source] = gray
distance[source] = 0
branch[source] = -1
Q = [source]
# keep going until the entire graph is explored
while Q:
u = Q[0]
ns, = np.where(CIJ[u, :])
for v in ns:
# this allows the source distance itself to be recorded
if distance[v] == 0:
distance[v] = distance[u] + 1
if color[v] == white:
color[v] = gray
distance[v] = distance[u] + 1
branch[v] = u
Q.append(v)
Q = Q[1:]
color[u] = black
return distance, branch | [
"def",
"breadth",
"(",
"CIJ",
",",
"source",
")",
":",
"n",
"=",
"len",
"(",
"CIJ",
")",
"# colors: white,gray,black",
"white",
"=",
"0",
"gray",
"=",
"1",
"black",
"=",
"2",
"color",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
",",
")",
")",
"distan... | Implementation of breadth-first search.
Parameters
----------
CIJ : NxN np.ndarray
binary directed/undirected connection matrix
source : int
source vertex
Returns
-------
distance : Nx1 np.ndarray
vector of distances between source and ith vertex (0 for source)
branch : Nx1 np.ndarray
vertex that precedes i in the breadth-first search (-1 for source)
Notes
-----
Breadth-first search tree does not contain all paths (or all
shortest paths), but allows the determination of at least one path with
minimum distance. The entire graph is explored, starting from source
vertex 'source'. | [
"Implementation",
"of",
"breadth",
"-",
"first",
"search",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/distance.py#L44-L102 | train | 202,439 |
aestrivex/bctpy | bct/algorithms/distance.py | charpath | def charpath(D, include_diagonal=False, include_infinite=True):
'''
The characteristic path length is the average shortest path length in
the network. The global efficiency is the average inverse shortest path
length in the network.
Parameters
----------
D : NxN np.ndarray
distance matrix
include_diagonal : bool
If True, include the weights on the diagonal. Default value is False.
include_infinite : bool
If True, include infinite distances in calculation
Returns
-------
lambda : float
characteristic path length
efficiency : float
global efficiency
ecc : Nx1 np.ndarray
eccentricity at each vertex
radius : float
radius of graph
diameter : float
diameter of graph
Notes
-----
The input distance matrix may be obtained with any of the distance
functions, e.g. distance_bin, distance_wei.
Characteristic path length is calculated as the global mean of
the distance matrix D, excludings any 'Infs' but including distances on
the main diagonal.
'''
D = D.copy()
if not include_diagonal:
np.fill_diagonal(D, np.nan)
if not include_infinite:
D[np.isinf(D)] = np.nan
Dv = D[np.logical_not(np.isnan(D))].ravel()
# mean of finite entries of D[G]
lambda_ = np.mean(Dv)
# efficiency: mean of inverse entries of D[G]
efficiency = np.mean(1 / Dv)
# eccentricity for each vertex (ignore inf)
ecc = np.array(np.ma.masked_where(np.isnan(D), D).max(axis=1))
# radius of graph
radius = np.min(ecc) # but what about zeros?
# diameter of graph
diameter = np.max(ecc)
return lambda_, efficiency, ecc, radius, diameter | python | def charpath(D, include_diagonal=False, include_infinite=True):
'''
The characteristic path length is the average shortest path length in
the network. The global efficiency is the average inverse shortest path
length in the network.
Parameters
----------
D : NxN np.ndarray
distance matrix
include_diagonal : bool
If True, include the weights on the diagonal. Default value is False.
include_infinite : bool
If True, include infinite distances in calculation
Returns
-------
lambda : float
characteristic path length
efficiency : float
global efficiency
ecc : Nx1 np.ndarray
eccentricity at each vertex
radius : float
radius of graph
diameter : float
diameter of graph
Notes
-----
The input distance matrix may be obtained with any of the distance
functions, e.g. distance_bin, distance_wei.
Characteristic path length is calculated as the global mean of
the distance matrix D, excludings any 'Infs' but including distances on
the main diagonal.
'''
D = D.copy()
if not include_diagonal:
np.fill_diagonal(D, np.nan)
if not include_infinite:
D[np.isinf(D)] = np.nan
Dv = D[np.logical_not(np.isnan(D))].ravel()
# mean of finite entries of D[G]
lambda_ = np.mean(Dv)
# efficiency: mean of inverse entries of D[G]
efficiency = np.mean(1 / Dv)
# eccentricity for each vertex (ignore inf)
ecc = np.array(np.ma.masked_where(np.isnan(D), D).max(axis=1))
# radius of graph
radius = np.min(ecc) # but what about zeros?
# diameter of graph
diameter = np.max(ecc)
return lambda_, efficiency, ecc, radius, diameter | [
"def",
"charpath",
"(",
"D",
",",
"include_diagonal",
"=",
"False",
",",
"include_infinite",
"=",
"True",
")",
":",
"D",
"=",
"D",
".",
"copy",
"(",
")",
"if",
"not",
"include_diagonal",
":",
"np",
".",
"fill_diagonal",
"(",
"D",
",",
"np",
".",
"nan... | The characteristic path length is the average shortest path length in
the network. The global efficiency is the average inverse shortest path
length in the network.
Parameters
----------
D : NxN np.ndarray
distance matrix
include_diagonal : bool
If True, include the weights on the diagonal. Default value is False.
include_infinite : bool
If True, include infinite distances in calculation
Returns
-------
lambda : float
characteristic path length
efficiency : float
global efficiency
ecc : Nx1 np.ndarray
eccentricity at each vertex
radius : float
radius of graph
diameter : float
diameter of graph
Notes
-----
The input distance matrix may be obtained with any of the distance
functions, e.g. distance_bin, distance_wei.
Characteristic path length is calculated as the global mean of
the distance matrix D, excludings any 'Infs' but including distances on
the main diagonal. | [
"The",
"characteristic",
"path",
"length",
"is",
"the",
"average",
"shortest",
"path",
"length",
"in",
"the",
"network",
".",
"The",
"global",
"efficiency",
"is",
"the",
"average",
"inverse",
"shortest",
"path",
"length",
"in",
"the",
"network",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/distance.py#L105-L166 | train | 202,440 |
aestrivex/bctpy | bct/algorithms/distance.py | cycprob | def cycprob(Pq):
'''
Cycles are paths which begin and end at the same node. Cycle
probability for path length d, is the fraction of all paths of length
d-1 that may be extended to form cycles of length d.
Parameters
----------
Pq : NxNxQ np.ndarray
Path matrix with Pq[i,j,q] = number of paths from i to j of length q.
Produced by findpaths()
Returns
-------
fcyc : Qx1 np.ndarray
fraction of all paths that are cycles for each path length q
pcyc : Qx1 np.ndarray
probability that a non-cyclic path of length q-1 can be extended to
form a cycle of length q for each path length q
'''
# note: fcyc[1] must be zero, as there cannot be cycles of length 1
fcyc = np.zeros(np.size(Pq, axis=2))
for q in range(np.size(Pq, axis=2)):
if np.sum(Pq[:, :, q]) > 0:
fcyc[q] = np.sum(np.diag(Pq[:, :, q])) / np.sum(Pq[:, :, q])
else:
fcyc[q] = 0
# note: pcyc[1] is not defined (set to zero)
# note: pcyc[2] is equal to the fraction of reciprocal connections
# note: there are no non-cyclic paths of length N and no cycles of len N+1
pcyc = np.zeros(np.size(Pq, axis=2))
for q in range(np.size(Pq, axis=2)):
if np.sum(Pq[:, :, q - 1]) - np.sum(np.diag(Pq[:, :, q - 1])) > 0:
pcyc[q] = (np.sum(np.diag(Pq[:, :, q - 1])) /
np.sum(Pq[:, :, q - 1]) - np.sum(np.diag(Pq[:, :, q - 1])))
else:
pcyc[q] = 0
return fcyc, pcyc | python | def cycprob(Pq):
'''
Cycles are paths which begin and end at the same node. Cycle
probability for path length d, is the fraction of all paths of length
d-1 that may be extended to form cycles of length d.
Parameters
----------
Pq : NxNxQ np.ndarray
Path matrix with Pq[i,j,q] = number of paths from i to j of length q.
Produced by findpaths()
Returns
-------
fcyc : Qx1 np.ndarray
fraction of all paths that are cycles for each path length q
pcyc : Qx1 np.ndarray
probability that a non-cyclic path of length q-1 can be extended to
form a cycle of length q for each path length q
'''
# note: fcyc[1] must be zero, as there cannot be cycles of length 1
fcyc = np.zeros(np.size(Pq, axis=2))
for q in range(np.size(Pq, axis=2)):
if np.sum(Pq[:, :, q]) > 0:
fcyc[q] = np.sum(np.diag(Pq[:, :, q])) / np.sum(Pq[:, :, q])
else:
fcyc[q] = 0
# note: pcyc[1] is not defined (set to zero)
# note: pcyc[2] is equal to the fraction of reciprocal connections
# note: there are no non-cyclic paths of length N and no cycles of len N+1
pcyc = np.zeros(np.size(Pq, axis=2))
for q in range(np.size(Pq, axis=2)):
if np.sum(Pq[:, :, q - 1]) - np.sum(np.diag(Pq[:, :, q - 1])) > 0:
pcyc[q] = (np.sum(np.diag(Pq[:, :, q - 1])) /
np.sum(Pq[:, :, q - 1]) - np.sum(np.diag(Pq[:, :, q - 1])))
else:
pcyc[q] = 0
return fcyc, pcyc | [
"def",
"cycprob",
"(",
"Pq",
")",
":",
"# note: fcyc[1] must be zero, as there cannot be cycles of length 1",
"fcyc",
"=",
"np",
".",
"zeros",
"(",
"np",
".",
"size",
"(",
"Pq",
",",
"axis",
"=",
"2",
")",
")",
"for",
"q",
"in",
"range",
"(",
"np",
".",
... | Cycles are paths which begin and end at the same node. Cycle
probability for path length d, is the fraction of all paths of length
d-1 that may be extended to form cycles of length d.
Parameters
----------
Pq : NxNxQ np.ndarray
Path matrix with Pq[i,j,q] = number of paths from i to j of length q.
Produced by findpaths()
Returns
-------
fcyc : Qx1 np.ndarray
fraction of all paths that are cycles for each path length q
pcyc : Qx1 np.ndarray
probability that a non-cyclic path of length q-1 can be extended to
form a cycle of length q for each path length q | [
"Cycles",
"are",
"paths",
"which",
"begin",
"and",
"end",
"at",
"the",
"same",
"node",
".",
"Cycle",
"probability",
"for",
"path",
"length",
"d",
"is",
"the",
"fraction",
"of",
"all",
"paths",
"of",
"length",
"d",
"-",
"1",
"that",
"may",
"be",
"extend... | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/distance.py#L169-L209 | train | 202,441 |
aestrivex/bctpy | bct/algorithms/distance.py | distance_wei_floyd | def distance_wei_floyd(adjacency, transform=None):
"""
Computes the topological length of the shortest possible path connecting
every pair of nodes in the network.
Parameters
----------
D : (N x N) array_like
Weighted/unweighted, direct/undirected connection weight/length array
transform : str, optional
If `adjacency` is a connection weight array, specify a transform to map
input connection weights to connection lengths. Options include ['log',
'inv'], where 'log' is `-np.log(adjacency)` and 'inv' is `1/adjacency`.
Default: None
Returns
-------
SPL : (N x N) ndarray
Weighted/unweighted shortest path-length array. If `D` is a directed
graph, then `SPL` is not symmetric
hops : (N x N) ndarray
Number of edges in the shortest path array. If `D` is unweighted, `SPL`
and `hops` are identical.
Pmat : (N x N) ndarray
Element `[i,j]` of this array indicates the next node in the shortest
path between `i` and `j`. This array is used as an input argument for
function `retrieve_shortest_path()`, which returns as output the
sequence of nodes comprising the shortest path between a given pair of
nodes.
Notes
-----
There may be more than one shortest path between any pair of nodes in the
network. Non-unique shortest paths are termed shortest path degeneracies
and are most likely to occur in unweighted networks. When the shortest-path
is degenerate, the elements of `Pmat` correspond to the first shortest path
discovered by the algorithm.
The input array may be either a connection weight or length array. The
connection length array is typically obtained with a mapping from weight to
length, such that higher weights are mapped to shorter lengths (see
argument `transform`, above).
Originally written in Matlab by Andrea Avena-Koenigsberger (IU, 2012)
References
----------
.. [1] Floyd, R. W. (1962). Algorithm 97: shortest path. Communications of
the ACM, 5(6), 345.
.. [2] Roy, B. (1959). Transitivite et connexite. Comptes Rendus
Hebdomadaires Des Seances De L Academie Des Sciences, 249(2), 216-218.
.. [3] Warshall, S. (1962). A theorem on boolean matrices. Journal of the
ACM (JACM), 9(1), 11-12.
.. [4] https://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm
"""
if transform is not None:
if transform == 'log':
if np.logical_or(adjacency > 1, adjacency < 0).any():
raise ValueError("Connection strengths must be in the " +
"interval [0,1) to use the transform " +
"-log(w_ij).")
SPL = -np.log(adjacency)
elif transform == 'inv':
SPL = 1. / adjacency
else:
raise ValueError("Unexpected transform type. Only 'log' and " +
"'inv' are accepted")
else:
SPL = adjacency.copy().astype('float')
SPL[SPL == 0] = np.inf
n = adjacency.shape[1]
flag_find_paths = True
hops = np.array(adjacency != 0).astype('float')
Pmat = np.repeat(np.atleast_2d(np.arange(0, n)), n, 0)
for k in range(n):
i2k_k2j = np.repeat(SPL[:, [k]], n, 1) + np.repeat(SPL[[k], :], n, 0)
if flag_find_paths:
path = SPL > i2k_k2j
i, j = np.where(path)
hops[path] = hops[i, k] + hops[k, j]
Pmat[path] = Pmat[i, k]
SPL = np.min(np.stack([SPL, i2k_k2j], 2), 2)
I = np.eye(n) > 0
SPL[I] = 0
if flag_find_paths:
hops[I], Pmat[I] = 0, 0
return SPL, hops, Pmat | python | def distance_wei_floyd(adjacency, transform=None):
"""
Computes the topological length of the shortest possible path connecting
every pair of nodes in the network.
Parameters
----------
D : (N x N) array_like
Weighted/unweighted, direct/undirected connection weight/length array
transform : str, optional
If `adjacency` is a connection weight array, specify a transform to map
input connection weights to connection lengths. Options include ['log',
'inv'], where 'log' is `-np.log(adjacency)` and 'inv' is `1/adjacency`.
Default: None
Returns
-------
SPL : (N x N) ndarray
Weighted/unweighted shortest path-length array. If `D` is a directed
graph, then `SPL` is not symmetric
hops : (N x N) ndarray
Number of edges in the shortest path array. If `D` is unweighted, `SPL`
and `hops` are identical.
Pmat : (N x N) ndarray
Element `[i,j]` of this array indicates the next node in the shortest
path between `i` and `j`. This array is used as an input argument for
function `retrieve_shortest_path()`, which returns as output the
sequence of nodes comprising the shortest path between a given pair of
nodes.
Notes
-----
There may be more than one shortest path between any pair of nodes in the
network. Non-unique shortest paths are termed shortest path degeneracies
and are most likely to occur in unweighted networks. When the shortest-path
is degenerate, the elements of `Pmat` correspond to the first shortest path
discovered by the algorithm.
The input array may be either a connection weight or length array. The
connection length array is typically obtained with a mapping from weight to
length, such that higher weights are mapped to shorter lengths (see
argument `transform`, above).
Originally written in Matlab by Andrea Avena-Koenigsberger (IU, 2012)
References
----------
.. [1] Floyd, R. W. (1962). Algorithm 97: shortest path. Communications of
the ACM, 5(6), 345.
.. [2] Roy, B. (1959). Transitivite et connexite. Comptes Rendus
Hebdomadaires Des Seances De L Academie Des Sciences, 249(2), 216-218.
.. [3] Warshall, S. (1962). A theorem on boolean matrices. Journal of the
ACM (JACM), 9(1), 11-12.
.. [4] https://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm
"""
if transform is not None:
if transform == 'log':
if np.logical_or(adjacency > 1, adjacency < 0).any():
raise ValueError("Connection strengths must be in the " +
"interval [0,1) to use the transform " +
"-log(w_ij).")
SPL = -np.log(adjacency)
elif transform == 'inv':
SPL = 1. / adjacency
else:
raise ValueError("Unexpected transform type. Only 'log' and " +
"'inv' are accepted")
else:
SPL = adjacency.copy().astype('float')
SPL[SPL == 0] = np.inf
n = adjacency.shape[1]
flag_find_paths = True
hops = np.array(adjacency != 0).astype('float')
Pmat = np.repeat(np.atleast_2d(np.arange(0, n)), n, 0)
for k in range(n):
i2k_k2j = np.repeat(SPL[:, [k]], n, 1) + np.repeat(SPL[[k], :], n, 0)
if flag_find_paths:
path = SPL > i2k_k2j
i, j = np.where(path)
hops[path] = hops[i, k] + hops[k, j]
Pmat[path] = Pmat[i, k]
SPL = np.min(np.stack([SPL, i2k_k2j], 2), 2)
I = np.eye(n) > 0
SPL[I] = 0
if flag_find_paths:
hops[I], Pmat[I] = 0, 0
return SPL, hops, Pmat | [
"def",
"distance_wei_floyd",
"(",
"adjacency",
",",
"transform",
"=",
"None",
")",
":",
"if",
"transform",
"is",
"not",
"None",
":",
"if",
"transform",
"==",
"'log'",
":",
"if",
"np",
".",
"logical_or",
"(",
"adjacency",
">",
"1",
",",
"adjacency",
"<",
... | Computes the topological length of the shortest possible path connecting
every pair of nodes in the network.
Parameters
----------
D : (N x N) array_like
Weighted/unweighted, direct/undirected connection weight/length array
transform : str, optional
If `adjacency` is a connection weight array, specify a transform to map
input connection weights to connection lengths. Options include ['log',
'inv'], where 'log' is `-np.log(adjacency)` and 'inv' is `1/adjacency`.
Default: None
Returns
-------
SPL : (N x N) ndarray
Weighted/unweighted shortest path-length array. If `D` is a directed
graph, then `SPL` is not symmetric
hops : (N x N) ndarray
Number of edges in the shortest path array. If `D` is unweighted, `SPL`
and `hops` are identical.
Pmat : (N x N) ndarray
Element `[i,j]` of this array indicates the next node in the shortest
path between `i` and `j`. This array is used as an input argument for
function `retrieve_shortest_path()`, which returns as output the
sequence of nodes comprising the shortest path between a given pair of
nodes.
Notes
-----
There may be more than one shortest path between any pair of nodes in the
network. Non-unique shortest paths are termed shortest path degeneracies
and are most likely to occur in unweighted networks. When the shortest-path
is degenerate, the elements of `Pmat` correspond to the first shortest path
discovered by the algorithm.
The input array may be either a connection weight or length array. The
connection length array is typically obtained with a mapping from weight to
length, such that higher weights are mapped to shorter lengths (see
argument `transform`, above).
Originally written in Matlab by Andrea Avena-Koenigsberger (IU, 2012)
References
----------
.. [1] Floyd, R. W. (1962). Algorithm 97: shortest path. Communications of
the ACM, 5(6), 345.
.. [2] Roy, B. (1959). Transitivite et connexite. Comptes Rendus
Hebdomadaires Des Seances De L Academie Des Sciences, 249(2), 216-218.
.. [3] Warshall, S. (1962). A theorem on boolean matrices. Journal of the
ACM (JACM), 9(1), 11-12.
.. [4] https://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm | [
"Computes",
"the",
"topological",
"length",
"of",
"the",
"shortest",
"possible",
"path",
"connecting",
"every",
"pair",
"of",
"nodes",
"in",
"the",
"network",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/distance.py#L326-L421 | train | 202,442 |
aestrivex/bctpy | bct/algorithms/distance.py | findwalks | def findwalks(CIJ):
'''
Walks are sequences of linked nodes, that may visit a single node more
than once. This function finds the number of walks of a given length,
between any two nodes.
Parameters
----------
CIJ : NxN np.ndarray
binary directed/undirected connection matrix
Returns
-------
Wq : NxNxQ np.ndarray
Wq[i,j,q] is the number of walks from i to j of length q
twalk : int
total number of walks found
wlq : Qx1 np.ndarray
walk length distribution as a function of q
Notes
-----
Wq grows very quickly for larger N,K,q. Weights are discarded.
'''
CIJ = binarize(CIJ, copy=True)
n = len(CIJ)
Wq = np.zeros((n, n, n))
CIJpwr = CIJ.copy()
Wq[:, :, 1] = CIJ
for q in range(n):
CIJpwr = np.dot(CIJpwr, CIJ)
Wq[:, :, q] = CIJpwr
twalk = np.sum(Wq) # total number of walks
wlq = np.sum(np.sum(Wq, axis=0), axis=0)
return Wq, twalk, wlq | python | def findwalks(CIJ):
'''
Walks are sequences of linked nodes, that may visit a single node more
than once. This function finds the number of walks of a given length,
between any two nodes.
Parameters
----------
CIJ : NxN np.ndarray
binary directed/undirected connection matrix
Returns
-------
Wq : NxNxQ np.ndarray
Wq[i,j,q] is the number of walks from i to j of length q
twalk : int
total number of walks found
wlq : Qx1 np.ndarray
walk length distribution as a function of q
Notes
-----
Wq grows very quickly for larger N,K,q. Weights are discarded.
'''
CIJ = binarize(CIJ, copy=True)
n = len(CIJ)
Wq = np.zeros((n, n, n))
CIJpwr = CIJ.copy()
Wq[:, :, 1] = CIJ
for q in range(n):
CIJpwr = np.dot(CIJpwr, CIJ)
Wq[:, :, q] = CIJpwr
twalk = np.sum(Wq) # total number of walks
wlq = np.sum(np.sum(Wq, axis=0), axis=0)
return Wq, twalk, wlq | [
"def",
"findwalks",
"(",
"CIJ",
")",
":",
"CIJ",
"=",
"binarize",
"(",
"CIJ",
",",
"copy",
"=",
"True",
")",
"n",
"=",
"len",
"(",
"CIJ",
")",
"Wq",
"=",
"np",
".",
"zeros",
"(",
"(",
"n",
",",
"n",
",",
"n",
")",
")",
"CIJpwr",
"=",
"CIJ",... | Walks are sequences of linked nodes, that may visit a single node more
than once. This function finds the number of walks of a given length,
between any two nodes.
Parameters
----------
CIJ : NxN np.ndarray
binary directed/undirected connection matrix
Returns
-------
Wq : NxNxQ np.ndarray
Wq[i,j,q] is the number of walks from i to j of length q
twalk : int
total number of walks found
wlq : Qx1 np.ndarray
walk length distribution as a function of q
Notes
-----
Wq grows very quickly for larger N,K,q. Weights are discarded. | [
"Walks",
"are",
"sequences",
"of",
"linked",
"nodes",
"that",
"may",
"visit",
"a",
"single",
"node",
"more",
"than",
"once",
".",
"This",
"function",
"finds",
"the",
"number",
"of",
"walks",
"of",
"a",
"given",
"length",
"between",
"any",
"two",
"nodes",
... | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/distance.py#L797-L832 | train | 202,443 |
aestrivex/bctpy | bct/algorithms/distance.py | mean_first_passage_time | def mean_first_passage_time(adjacency):
"""
Calculates mean first passage time of `adjacency`
The first passage time from i to j is the expected number of steps it takes
a random walker starting at node i to arrive for the first time at node j.
The mean first passage time is not a symmetric measure: `mfpt(i,j)` may be
different from `mfpt(j,i)`.
Parameters
----------
adjacency : (N x N) array_like
Weighted/unweighted, direct/undirected connection weight/length array
Returns
-------
MFPT : (N x N) ndarray
Pairwise mean first passage time array
References
----------
.. [1] Goni, J., Avena-Koenigsberger, A., de Mendizabal, N. V., van den
Heuvel, M. P., Betzel, R. F., & Sporns, O. (2013). Exploring the
morphospace of communication efficiency in complex networks. PLoS One,
8(3), e58070.
"""
P = np.linalg.solve(np.diag(np.sum(adjacency, axis=1)), adjacency)
n = len(P)
D, V = np.linalg.eig(P.T)
aux = np.abs(D - 1)
index = np.where(aux == aux.min())[0]
if aux[index] > 10e-3:
raise ValueError("Cannot find eigenvalue of 1. Minimum eigenvalue " +
"value is {0}. Tolerance was ".format(aux[index]+1) +
"set at 10e-3.")
w = V[:, index].T
w = w / np.sum(w)
W = np.real(np.repeat(w, n, 0))
I = np.eye(n)
Z = np.linalg.inv(I - P + W)
mfpt = (np.repeat(np.atleast_2d(np.diag(Z)), n, 0) - Z) / W
return mfpt | python | def mean_first_passage_time(adjacency):
"""
Calculates mean first passage time of `adjacency`
The first passage time from i to j is the expected number of steps it takes
a random walker starting at node i to arrive for the first time at node j.
The mean first passage time is not a symmetric measure: `mfpt(i,j)` may be
different from `mfpt(j,i)`.
Parameters
----------
adjacency : (N x N) array_like
Weighted/unweighted, direct/undirected connection weight/length array
Returns
-------
MFPT : (N x N) ndarray
Pairwise mean first passage time array
References
----------
.. [1] Goni, J., Avena-Koenigsberger, A., de Mendizabal, N. V., van den
Heuvel, M. P., Betzel, R. F., & Sporns, O. (2013). Exploring the
morphospace of communication efficiency in complex networks. PLoS One,
8(3), e58070.
"""
P = np.linalg.solve(np.diag(np.sum(adjacency, axis=1)), adjacency)
n = len(P)
D, V = np.linalg.eig(P.T)
aux = np.abs(D - 1)
index = np.where(aux == aux.min())[0]
if aux[index] > 10e-3:
raise ValueError("Cannot find eigenvalue of 1. Minimum eigenvalue " +
"value is {0}. Tolerance was ".format(aux[index]+1) +
"set at 10e-3.")
w = V[:, index].T
w = w / np.sum(w)
W = np.real(np.repeat(w, n, 0))
I = np.eye(n)
Z = np.linalg.inv(I - P + W)
mfpt = (np.repeat(np.atleast_2d(np.diag(Z)), n, 0) - Z) / W
return mfpt | [
"def",
"mean_first_passage_time",
"(",
"adjacency",
")",
":",
"P",
"=",
"np",
".",
"linalg",
".",
"solve",
"(",
"np",
".",
"diag",
"(",
"np",
".",
"sum",
"(",
"adjacency",
",",
"axis",
"=",
"1",
")",
")",
",",
"adjacency",
")",
"n",
"=",
"len",
"... | Calculates mean first passage time of `adjacency`
The first passage time from i to j is the expected number of steps it takes
a random walker starting at node i to arrive for the first time at node j.
The mean first passage time is not a symmetric measure: `mfpt(i,j)` may be
different from `mfpt(j,i)`.
Parameters
----------
adjacency : (N x N) array_like
Weighted/unweighted, direct/undirected connection weight/length array
Returns
-------
MFPT : (N x N) ndarray
Pairwise mean first passage time array
References
----------
.. [1] Goni, J., Avena-Koenigsberger, A., de Mendizabal, N. V., van den
Heuvel, M. P., Betzel, R. F., & Sporns, O. (2013). Exploring the
morphospace of communication efficiency in complex networks. PLoS One,
8(3), e58070. | [
"Calculates",
"mean",
"first",
"passage",
"time",
"of",
"adjacency"
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/distance.py#L1007-L1057 | train | 202,444 |
aestrivex/bctpy | bct/utils/miscellaneous_utilities.py | teachers_round | def teachers_round(x):
'''
Do rounding such that .5 always rounds to 1, and not bankers rounding.
This is for compatibility with matlab functions, and ease of testing.
'''
if ((x > 0) and (x % 1 >= 0.5)) or ((x < 0) and (x % 1 > 0.5)):
return int(np.ceil(x))
else:
return int(np.floor(x)) | python | def teachers_round(x):
'''
Do rounding such that .5 always rounds to 1, and not bankers rounding.
This is for compatibility with matlab functions, and ease of testing.
'''
if ((x > 0) and (x % 1 >= 0.5)) or ((x < 0) and (x % 1 > 0.5)):
return int(np.ceil(x))
else:
return int(np.floor(x)) | [
"def",
"teachers_round",
"(",
"x",
")",
":",
"if",
"(",
"(",
"x",
">",
"0",
")",
"and",
"(",
"x",
"%",
"1",
">=",
"0.5",
")",
")",
"or",
"(",
"(",
"x",
"<",
"0",
")",
"and",
"(",
"x",
"%",
"1",
">",
"0.5",
")",
")",
":",
"return",
"int"... | Do rounding such that .5 always rounds to 1, and not bankers rounding.
This is for compatibility with matlab functions, and ease of testing. | [
"Do",
"rounding",
"such",
"that",
".",
"5",
"always",
"rounds",
"to",
"1",
"and",
"not",
"bankers",
"rounding",
".",
"This",
"is",
"for",
"compatibility",
"with",
"matlab",
"functions",
"and",
"ease",
"of",
"testing",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/utils/miscellaneous_utilities.py#L10-L18 | train | 202,445 |
aestrivex/bctpy | bct/utils/miscellaneous_utilities.py | dummyvar | def dummyvar(cis, return_sparse=False):
'''
This is an efficient implementation of matlab's "dummyvar" command
using sparse matrices.
input: partitions, NxM array-like containing M partitions of N nodes
into <=N distinct communities
output: dummyvar, an NxR matrix containing R column variables (indicator
variables) with N entries, where R is the total number of communities
summed across each of the M partitions.
i.e.
r = sum((max(len(unique(partitions[i]))) for i in range(m)))
'''
# num_rows is not affected by partition indexes
n = np.size(cis, axis=0)
m = np.size(cis, axis=1)
r = np.sum((np.max(len(np.unique(cis[:, i])))) for i in range(m))
nnz = np.prod(cis.shape)
ix = np.argsort(cis, axis=0)
# s_cis=np.sort(cis,axis=0)
# FIXME use the sorted indices to sort by row efficiently
s_cis = cis[ix][:, range(m), range(m)]
mask = np.hstack((((True,),) * m, (s_cis[:-1, :] != s_cis[1:, :]).T))
indptr, = np.where(mask.flat)
indptr = np.append(indptr, nnz)
import scipy.sparse as sp
dv = sp.csc_matrix((np.repeat((1,), nnz), ix.T.flat, indptr), shape=(n, r))
return dv.toarray() | python | def dummyvar(cis, return_sparse=False):
'''
This is an efficient implementation of matlab's "dummyvar" command
using sparse matrices.
input: partitions, NxM array-like containing M partitions of N nodes
into <=N distinct communities
output: dummyvar, an NxR matrix containing R column variables (indicator
variables) with N entries, where R is the total number of communities
summed across each of the M partitions.
i.e.
r = sum((max(len(unique(partitions[i]))) for i in range(m)))
'''
# num_rows is not affected by partition indexes
n = np.size(cis, axis=0)
m = np.size(cis, axis=1)
r = np.sum((np.max(len(np.unique(cis[:, i])))) for i in range(m))
nnz = np.prod(cis.shape)
ix = np.argsort(cis, axis=0)
# s_cis=np.sort(cis,axis=0)
# FIXME use the sorted indices to sort by row efficiently
s_cis = cis[ix][:, range(m), range(m)]
mask = np.hstack((((True,),) * m, (s_cis[:-1, :] != s_cis[1:, :]).T))
indptr, = np.where(mask.flat)
indptr = np.append(indptr, nnz)
import scipy.sparse as sp
dv = sp.csc_matrix((np.repeat((1,), nnz), ix.T.flat, indptr), shape=(n, r))
return dv.toarray() | [
"def",
"dummyvar",
"(",
"cis",
",",
"return_sparse",
"=",
"False",
")",
":",
"# num_rows is not affected by partition indexes",
"n",
"=",
"np",
".",
"size",
"(",
"cis",
",",
"axis",
"=",
"0",
")",
"m",
"=",
"np",
".",
"size",
"(",
"cis",
",",
"axis",
"... | This is an efficient implementation of matlab's "dummyvar" command
using sparse matrices.
input: partitions, NxM array-like containing M partitions of N nodes
into <=N distinct communities
output: dummyvar, an NxR matrix containing R column variables (indicator
variables) with N entries, where R is the total number of communities
summed across each of the M partitions.
i.e.
r = sum((max(len(unique(partitions[i]))) for i in range(m))) | [
"This",
"is",
"an",
"efficient",
"implementation",
"of",
"matlab",
"s",
"dummyvar",
"command",
"using",
"sparse",
"matrices",
"."
] | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/utils/miscellaneous_utilities.py#L54-L86 | train | 202,446 |
aestrivex/bctpy | bct/algorithms/core.py | assortativity_bin | def assortativity_bin(CIJ, flag=0):
'''
The assortativity coefficient is a correlation coefficient between the
degrees of all nodes on two opposite ends of a link. A positive
assortativity coefficient indicates that nodes tend to link to other
nodes with the same or similar degree.
Parameters
----------
CIJ : NxN np.ndarray
binary directed/undirected connection matrix
flag : int
0 : undirected graph; degree/degree correlation
1 : directed graph; out-degree/in-degree correlation
2 : directed graph; in-degree/out-degree correlation
3 : directed graph; out-degree/out-degree correlation
4 : directed graph; in-degree/in-degreen correlation
Returns
-------
r : float
assortativity coefficient
Notes
-----
The function accepts weighted networks, but all connection
weights are ignored. The main diagonal should be empty. For flag 1
the function computes the directed assortativity described in Rubinov
and Sporns (2010) NeuroImage.
'''
if flag == 0: # undirected version
deg = degrees_und(CIJ)
i, j = np.where(np.triu(CIJ, 1) > 0)
K = len(i)
degi = deg[i]
degj = deg[j]
else: # directed version
id, od, deg = degrees_dir(CIJ)
i, j = np.where(CIJ > 0)
K = len(i)
if flag == 1:
degi = od[i]
degj = id[j]
elif flag == 2:
degi = id[i]
degj = od[j]
elif flag == 3:
degi = od[i]
degj = od[j]
elif flag == 4:
degi = id[i]
degj = id[j]
else:
raise ValueError('Flag must be 0-4')
# compute assortativity
term1 = np.sum(degi * degj) / K
term2 = np.square(np.sum(.5 * (degi + degj)) / K)
term3 = np.sum(.5 * (degi * degi + degj * degj)) / K
r = (term1 - term2) / (term3 - term2)
return r | python | def assortativity_bin(CIJ, flag=0):
'''
The assortativity coefficient is a correlation coefficient between the
degrees of all nodes on two opposite ends of a link. A positive
assortativity coefficient indicates that nodes tend to link to other
nodes with the same or similar degree.
Parameters
----------
CIJ : NxN np.ndarray
binary directed/undirected connection matrix
flag : int
0 : undirected graph; degree/degree correlation
1 : directed graph; out-degree/in-degree correlation
2 : directed graph; in-degree/out-degree correlation
3 : directed graph; out-degree/out-degree correlation
4 : directed graph; in-degree/in-degreen correlation
Returns
-------
r : float
assortativity coefficient
Notes
-----
The function accepts weighted networks, but all connection
weights are ignored. The main diagonal should be empty. For flag 1
the function computes the directed assortativity described in Rubinov
and Sporns (2010) NeuroImage.
'''
if flag == 0: # undirected version
deg = degrees_und(CIJ)
i, j = np.where(np.triu(CIJ, 1) > 0)
K = len(i)
degi = deg[i]
degj = deg[j]
else: # directed version
id, od, deg = degrees_dir(CIJ)
i, j = np.where(CIJ > 0)
K = len(i)
if flag == 1:
degi = od[i]
degj = id[j]
elif flag == 2:
degi = id[i]
degj = od[j]
elif flag == 3:
degi = od[i]
degj = od[j]
elif flag == 4:
degi = id[i]
degj = id[j]
else:
raise ValueError('Flag must be 0-4')
# compute assortativity
term1 = np.sum(degi * degj) / K
term2 = np.square(np.sum(.5 * (degi + degj)) / K)
term3 = np.sum(.5 * (degi * degi + degj * degj)) / K
r = (term1 - term2) / (term3 - term2)
return r | [
"def",
"assortativity_bin",
"(",
"CIJ",
",",
"flag",
"=",
"0",
")",
":",
"if",
"flag",
"==",
"0",
":",
"# undirected version",
"deg",
"=",
"degrees_und",
"(",
"CIJ",
")",
"i",
",",
"j",
"=",
"np",
".",
"where",
"(",
"np",
".",
"triu",
"(",
"CIJ",
... | The assortativity coefficient is a correlation coefficient between the
degrees of all nodes on two opposite ends of a link. A positive
assortativity coefficient indicates that nodes tend to link to other
nodes with the same or similar degree.
Parameters
----------
CIJ : NxN np.ndarray
binary directed/undirected connection matrix
flag : int
0 : undirected graph; degree/degree correlation
1 : directed graph; out-degree/in-degree correlation
2 : directed graph; in-degree/out-degree correlation
3 : directed graph; out-degree/out-degree correlation
4 : directed graph; in-degree/in-degreen correlation
Returns
-------
r : float
assortativity coefficient
Notes
-----
The function accepts weighted networks, but all connection
weights are ignored. The main diagonal should be empty. For flag 1
the function computes the directed assortativity described in Rubinov
and Sporns (2010) NeuroImage. | [
"The",
"assortativity",
"coefficient",
"is",
"a",
"correlation",
"coefficient",
"between",
"the",
"degrees",
"of",
"all",
"nodes",
"on",
"two",
"opposite",
"ends",
"of",
"a",
"link",
".",
"A",
"positive",
"assortativity",
"coefficient",
"indicates",
"that",
"nod... | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/core.py#L9-L70 | train | 202,447 |
aestrivex/bctpy | bct/algorithms/core.py | kcore_bd | def kcore_bd(CIJ, k, peel=False):
'''
The k-core is the largest subnetwork comprising nodes of degree at
least k. This function computes the k-core for a given binary directed
connection matrix by recursively peeling off nodes with degree lower
than k, until no such nodes remain.
Parameters
----------
CIJ : NxN np.ndarray
binary directed adjacency matrix
k : int
level of k-core
peel : bool
If True, additionally calculates peelorder and peellevel. Defaults to
False.
Returns
-------
CIJkcore : NxN np.ndarray
connection matrix of the k-core. This matrix only contains nodes of
degree at least k.
kn : int
size of k-core
peelorder : Nx1 np.ndarray
indices in the order in which they were peeled away during k-core
decomposition. only returned if peel is specified.
peellevel : Nx1 np.ndarray
corresponding level - nodes in at the same level have been peeled
away at the same time. only return if peel is specified
Notes
-----
'peelorder' and 'peellevel' are similar the the k-core sub-shells
described in Modha and Singh (2010).
'''
if peel:
peelorder, peellevel = ([], [])
iter = 0
CIJkcore = CIJ.copy()
while True:
id, od, deg = degrees_dir(CIJkcore) # get degrees of matrix
# find nodes with degree <k
ff, = np.where(np.logical_and(deg < k, deg > 0))
if ff.size == 0:
break # if none found -> stop
# else peel away found nodes
iter += 1
CIJkcore[ff, :] = 0
CIJkcore[:, ff] = 0
if peel:
peelorder.append(ff)
if peel:
peellevel.append(iter * np.ones((len(ff),)))
kn = np.sum(deg > 0)
if peel:
return CIJkcore, kn, peelorder, peellevel
else:
return CIJkcore, kn | python | def kcore_bd(CIJ, k, peel=False):
'''
The k-core is the largest subnetwork comprising nodes of degree at
least k. This function computes the k-core for a given binary directed
connection matrix by recursively peeling off nodes with degree lower
than k, until no such nodes remain.
Parameters
----------
CIJ : NxN np.ndarray
binary directed adjacency matrix
k : int
level of k-core
peel : bool
If True, additionally calculates peelorder and peellevel. Defaults to
False.
Returns
-------
CIJkcore : NxN np.ndarray
connection matrix of the k-core. This matrix only contains nodes of
degree at least k.
kn : int
size of k-core
peelorder : Nx1 np.ndarray
indices in the order in which they were peeled away during k-core
decomposition. only returned if peel is specified.
peellevel : Nx1 np.ndarray
corresponding level - nodes in at the same level have been peeled
away at the same time. only return if peel is specified
Notes
-----
'peelorder' and 'peellevel' are similar the the k-core sub-shells
described in Modha and Singh (2010).
'''
if peel:
peelorder, peellevel = ([], [])
iter = 0
CIJkcore = CIJ.copy()
while True:
id, od, deg = degrees_dir(CIJkcore) # get degrees of matrix
# find nodes with degree <k
ff, = np.where(np.logical_and(deg < k, deg > 0))
if ff.size == 0:
break # if none found -> stop
# else peel away found nodes
iter += 1
CIJkcore[ff, :] = 0
CIJkcore[:, ff] = 0
if peel:
peelorder.append(ff)
if peel:
peellevel.append(iter * np.ones((len(ff),)))
kn = np.sum(deg > 0)
if peel:
return CIJkcore, kn, peelorder, peellevel
else:
return CIJkcore, kn | [
"def",
"kcore_bd",
"(",
"CIJ",
",",
"k",
",",
"peel",
"=",
"False",
")",
":",
"if",
"peel",
":",
"peelorder",
",",
"peellevel",
"=",
"(",
"[",
"]",
",",
"[",
"]",
")",
"iter",
"=",
"0",
"CIJkcore",
"=",
"CIJ",
".",
"copy",
"(",
")",
"while",
... | The k-core is the largest subnetwork comprising nodes of degree at
least k. This function computes the k-core for a given binary directed
connection matrix by recursively peeling off nodes with degree lower
than k, until no such nodes remain.
Parameters
----------
CIJ : NxN np.ndarray
binary directed adjacency matrix
k : int
level of k-core
peel : bool
If True, additionally calculates peelorder and peellevel. Defaults to
False.
Returns
-------
CIJkcore : NxN np.ndarray
connection matrix of the k-core. This matrix only contains nodes of
degree at least k.
kn : int
size of k-core
peelorder : Nx1 np.ndarray
indices in the order in which they were peeled away during k-core
decomposition. only returned if peel is specified.
peellevel : Nx1 np.ndarray
corresponding level - nodes in at the same level have been peeled
away at the same time. only return if peel is specified
Notes
-----
'peelorder' and 'peellevel' are similar the the k-core sub-shells
described in Modha and Singh (2010). | [
"The",
"k",
"-",
"core",
"is",
"the",
"largest",
"subnetwork",
"comprising",
"nodes",
"of",
"degree",
"at",
"least",
"k",
".",
"This",
"function",
"computes",
"the",
"k",
"-",
"core",
"for",
"a",
"given",
"binary",
"directed",
"connection",
"matrix",
"by",... | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/core.py#L232-L297 | train | 202,448 |
aestrivex/bctpy | bct/algorithms/core.py | kcore_bu | def kcore_bu(CIJ, k, peel=False):
'''
The k-core is the largest subnetwork comprising nodes of degree at
least k. This function computes the k-core for a given binary
undirected connection matrix by recursively peeling off nodes with
degree lower than k, until no such nodes remain.
Parameters
----------
CIJ : NxN np.ndarray
binary undirected connection matrix
k : int
level of k-core
peel : bool
If True, additionally calculates peelorder and peellevel. Defaults to
False.
Returns
-------
CIJkcore : NxN np.ndarray
connection matrix of the k-core. This matrix only contains nodes of
degree at least k.
kn : int
size of k-core
peelorder : Nx1 np.ndarray
indices in the order in which they were peeled away during k-core
decomposition. only returned if peel is specified.
peellevel : Nx1 np.ndarray
corresponding level - nodes in at the same level have been peeled
away at the same time. only return if peel is specified
Notes
-----
'peelorder' and 'peellevel' are similar the the k-core sub-shells
described in Modha and Singh (2010).
'''
if peel:
peelorder, peellevel = ([], [])
iter = 0
CIJkcore = CIJ.copy()
while True:
deg = degrees_und(CIJkcore) # get degrees of matrix
# find nodes with degree <k
ff, = np.where(np.logical_and(deg < k, deg > 0))
if ff.size == 0:
break # if none found -> stop
# else peel away found nodes
iter += 1
CIJkcore[ff, :] = 0
CIJkcore[:, ff] = 0
if peel:
peelorder.append(ff)
if peel:
peellevel.append(iter * np.ones((len(ff),)))
kn = np.sum(deg > 0)
if peel:
return CIJkcore, kn, peelorder, peellevel
else:
return CIJkcore, kn | python | def kcore_bu(CIJ, k, peel=False):
'''
The k-core is the largest subnetwork comprising nodes of degree at
least k. This function computes the k-core for a given binary
undirected connection matrix by recursively peeling off nodes with
degree lower than k, until no such nodes remain.
Parameters
----------
CIJ : NxN np.ndarray
binary undirected connection matrix
k : int
level of k-core
peel : bool
If True, additionally calculates peelorder and peellevel. Defaults to
False.
Returns
-------
CIJkcore : NxN np.ndarray
connection matrix of the k-core. This matrix only contains nodes of
degree at least k.
kn : int
size of k-core
peelorder : Nx1 np.ndarray
indices in the order in which they were peeled away during k-core
decomposition. only returned if peel is specified.
peellevel : Nx1 np.ndarray
corresponding level - nodes in at the same level have been peeled
away at the same time. only return if peel is specified
Notes
-----
'peelorder' and 'peellevel' are similar the the k-core sub-shells
described in Modha and Singh (2010).
'''
if peel:
peelorder, peellevel = ([], [])
iter = 0
CIJkcore = CIJ.copy()
while True:
deg = degrees_und(CIJkcore) # get degrees of matrix
# find nodes with degree <k
ff, = np.where(np.logical_and(deg < k, deg > 0))
if ff.size == 0:
break # if none found -> stop
# else peel away found nodes
iter += 1
CIJkcore[ff, :] = 0
CIJkcore[:, ff] = 0
if peel:
peelorder.append(ff)
if peel:
peellevel.append(iter * np.ones((len(ff),)))
kn = np.sum(deg > 0)
if peel:
return CIJkcore, kn, peelorder, peellevel
else:
return CIJkcore, kn | [
"def",
"kcore_bu",
"(",
"CIJ",
",",
"k",
",",
"peel",
"=",
"False",
")",
":",
"if",
"peel",
":",
"peelorder",
",",
"peellevel",
"=",
"(",
"[",
"]",
",",
"[",
"]",
")",
"iter",
"=",
"0",
"CIJkcore",
"=",
"CIJ",
".",
"copy",
"(",
")",
"while",
... | The k-core is the largest subnetwork comprising nodes of degree at
least k. This function computes the k-core for a given binary
undirected connection matrix by recursively peeling off nodes with
degree lower than k, until no such nodes remain.
Parameters
----------
CIJ : NxN np.ndarray
binary undirected connection matrix
k : int
level of k-core
peel : bool
If True, additionally calculates peelorder and peellevel. Defaults to
False.
Returns
-------
CIJkcore : NxN np.ndarray
connection matrix of the k-core. This matrix only contains nodes of
degree at least k.
kn : int
size of k-core
peelorder : Nx1 np.ndarray
indices in the order in which they were peeled away during k-core
decomposition. only returned if peel is specified.
peellevel : Nx1 np.ndarray
corresponding level - nodes in at the same level have been peeled
away at the same time. only return if peel is specified
Notes
-----
'peelorder' and 'peellevel' are similar the the k-core sub-shells
described in Modha and Singh (2010). | [
"The",
"k",
"-",
"core",
"is",
"the",
"largest",
"subnetwork",
"comprising",
"nodes",
"of",
"degree",
"at",
"least",
"k",
".",
"This",
"function",
"computes",
"the",
"k",
"-",
"core",
"for",
"a",
"given",
"binary",
"undirected",
"connection",
"matrix",
"by... | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/core.py#L300-L365 | train | 202,449 |
aestrivex/bctpy | bct/algorithms/core.py | score_wu | def score_wu(CIJ, s):
'''
The s-core is the largest subnetwork comprising nodes of strength at
least s. This function computes the s-core for a given weighted
undirected connection matrix. Computation is analogous to the more
widely used k-core, but is based on node strengths instead of node
degrees.
Parameters
----------
CIJ : NxN np.ndarray
weighted undirected connection matrix
s : float
level of s-core. Note that can take on any fractional value.
Returns
-------
CIJscore : NxN np.ndarray
connection matrix of the s-core. This matrix contains only nodes with
a strength of at least s.
sn : int
size of s-core
'''
CIJscore = CIJ.copy()
while True:
str = strengths_und(CIJscore) # get strengths of matrix
# find nodes with strength <s
ff, = np.where(np.logical_and(str < s, str > 0))
if ff.size == 0:
break # if none found -> stop
# else peel away found nodes
CIJscore[ff, :] = 0
CIJscore[:, ff] = 0
sn = np.sum(str > 0)
return CIJscore, sn | python | def score_wu(CIJ, s):
'''
The s-core is the largest subnetwork comprising nodes of strength at
least s. This function computes the s-core for a given weighted
undirected connection matrix. Computation is analogous to the more
widely used k-core, but is based on node strengths instead of node
degrees.
Parameters
----------
CIJ : NxN np.ndarray
weighted undirected connection matrix
s : float
level of s-core. Note that can take on any fractional value.
Returns
-------
CIJscore : NxN np.ndarray
connection matrix of the s-core. This matrix contains only nodes with
a strength of at least s.
sn : int
size of s-core
'''
CIJscore = CIJ.copy()
while True:
str = strengths_und(CIJscore) # get strengths of matrix
# find nodes with strength <s
ff, = np.where(np.logical_and(str < s, str > 0))
if ff.size == 0:
break # if none found -> stop
# else peel away found nodes
CIJscore[ff, :] = 0
CIJscore[:, ff] = 0
sn = np.sum(str > 0)
return CIJscore, sn | [
"def",
"score_wu",
"(",
"CIJ",
",",
"s",
")",
":",
"CIJscore",
"=",
"CIJ",
".",
"copy",
"(",
")",
"while",
"True",
":",
"str",
"=",
"strengths_und",
"(",
"CIJscore",
")",
"# get strengths of matrix",
"# find nodes with strength <s",
"ff",
",",
"=",
"np",
"... | The s-core is the largest subnetwork comprising nodes of strength at
least s. This function computes the s-core for a given weighted
undirected connection matrix. Computation is analogous to the more
widely used k-core, but is based on node strengths instead of node
degrees.
Parameters
----------
CIJ : NxN np.ndarray
weighted undirected connection matrix
s : float
level of s-core. Note that can take on any fractional value.
Returns
-------
CIJscore : NxN np.ndarray
connection matrix of the s-core. This matrix contains only nodes with
a strength of at least s.
sn : int
size of s-core | [
"The",
"s",
"-",
"core",
"is",
"the",
"largest",
"subnetwork",
"comprising",
"nodes",
"of",
"strength",
"at",
"least",
"s",
".",
"This",
"function",
"computes",
"the",
"s",
"-",
"core",
"for",
"a",
"given",
"weighted",
"undirected",
"connection",
"matrix",
... | 4cb0e759eb4a038750b07e23bd29958c400684b8 | https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/core.py#L595-L633 | train | 202,450 |
chakki-works/seqeval | seqeval/callbacks.py | F1Metrics.find_pad_index | def find_pad_index(self, array):
"""Find padding index.
Args:
array (list): integer list.
Returns:
idx: padding index.
Examples:
>>> array = [1, 2, 0]
>>> self.find_pad_index(array)
2
"""
try:
return list(array).index(self.pad_value)
except ValueError:
return len(array) | python | def find_pad_index(self, array):
"""Find padding index.
Args:
array (list): integer list.
Returns:
idx: padding index.
Examples:
>>> array = [1, 2, 0]
>>> self.find_pad_index(array)
2
"""
try:
return list(array).index(self.pad_value)
except ValueError:
return len(array) | [
"def",
"find_pad_index",
"(",
"self",
",",
"array",
")",
":",
"try",
":",
"return",
"list",
"(",
"array",
")",
".",
"index",
"(",
"self",
".",
"pad_value",
")",
"except",
"ValueError",
":",
"return",
"len",
"(",
"array",
")"
] | Find padding index.
Args:
array (list): integer list.
Returns:
idx: padding index.
Examples:
>>> array = [1, 2, 0]
>>> self.find_pad_index(array)
2 | [
"Find",
"padding",
"index",
"."
] | f1e5ff1a94da11500c47fd11d4d72617f7f55911 | https://github.com/chakki-works/seqeval/blob/f1e5ff1a94da11500c47fd11d4d72617f7f55911/seqeval/callbacks.py#L21-L38 | train | 202,451 |
chakki-works/seqeval | seqeval/callbacks.py | F1Metrics.get_length | def get_length(self, y):
"""Get true length of y.
Args:
y (list): padded list.
Returns:
lens: true length of y.
Examples:
>>> y = [[1, 0, 0], [1, 1, 0], [1, 1, 1]]
>>> self.get_length(y)
[1, 2, 3]
"""
lens = [self.find_pad_index(row) for row in y]
return lens | python | def get_length(self, y):
"""Get true length of y.
Args:
y (list): padded list.
Returns:
lens: true length of y.
Examples:
>>> y = [[1, 0, 0], [1, 1, 0], [1, 1, 1]]
>>> self.get_length(y)
[1, 2, 3]
"""
lens = [self.find_pad_index(row) for row in y]
return lens | [
"def",
"get_length",
"(",
"self",
",",
"y",
")",
":",
"lens",
"=",
"[",
"self",
".",
"find_pad_index",
"(",
"row",
")",
"for",
"row",
"in",
"y",
"]",
"return",
"lens"
] | Get true length of y.
Args:
y (list): padded list.
Returns:
lens: true length of y.
Examples:
>>> y = [[1, 0, 0], [1, 1, 0], [1, 1, 1]]
>>> self.get_length(y)
[1, 2, 3] | [
"Get",
"true",
"length",
"of",
"y",
"."
] | f1e5ff1a94da11500c47fd11d4d72617f7f55911 | https://github.com/chakki-works/seqeval/blob/f1e5ff1a94da11500c47fd11d4d72617f7f55911/seqeval/callbacks.py#L40-L55 | train | 202,452 |
chakki-works/seqeval | seqeval/callbacks.py | F1Metrics.convert_idx_to_name | def convert_idx_to_name(self, y, lens):
"""Convert label index to name.
Args:
y (list): label index list.
lens (list): true length of y.
Returns:
y: label name list.
Examples:
>>> # assumes that id2label = {1: 'B-LOC', 2: 'I-LOC'}
>>> y = [[1, 0, 0], [1, 2, 0], [1, 1, 1]]
>>> lens = [1, 2, 3]
>>> self.convert_idx_to_name(y, lens)
[['B-LOC'], ['B-LOC', 'I-LOC'], ['B-LOC', 'B-LOC', 'B-LOC']]
"""
y = [[self.id2label[idx] for idx in row[:l]]
for row, l in zip(y, lens)]
return y | python | def convert_idx_to_name(self, y, lens):
"""Convert label index to name.
Args:
y (list): label index list.
lens (list): true length of y.
Returns:
y: label name list.
Examples:
>>> # assumes that id2label = {1: 'B-LOC', 2: 'I-LOC'}
>>> y = [[1, 0, 0], [1, 2, 0], [1, 1, 1]]
>>> lens = [1, 2, 3]
>>> self.convert_idx_to_name(y, lens)
[['B-LOC'], ['B-LOC', 'I-LOC'], ['B-LOC', 'B-LOC', 'B-LOC']]
"""
y = [[self.id2label[idx] for idx in row[:l]]
for row, l in zip(y, lens)]
return y | [
"def",
"convert_idx_to_name",
"(",
"self",
",",
"y",
",",
"lens",
")",
":",
"y",
"=",
"[",
"[",
"self",
".",
"id2label",
"[",
"idx",
"]",
"for",
"idx",
"in",
"row",
"[",
":",
"l",
"]",
"]",
"for",
"row",
",",
"l",
"in",
"zip",
"(",
"y",
",",
... | Convert label index to name.
Args:
y (list): label index list.
lens (list): true length of y.
Returns:
y: label name list.
Examples:
>>> # assumes that id2label = {1: 'B-LOC', 2: 'I-LOC'}
>>> y = [[1, 0, 0], [1, 2, 0], [1, 1, 1]]
>>> lens = [1, 2, 3]
>>> self.convert_idx_to_name(y, lens)
[['B-LOC'], ['B-LOC', 'I-LOC'], ['B-LOC', 'B-LOC', 'B-LOC']] | [
"Convert",
"label",
"index",
"to",
"name",
"."
] | f1e5ff1a94da11500c47fd11d4d72617f7f55911 | https://github.com/chakki-works/seqeval/blob/f1e5ff1a94da11500c47fd11d4d72617f7f55911/seqeval/callbacks.py#L57-L76 | train | 202,453 |
chakki-works/seqeval | seqeval/callbacks.py | F1Metrics.predict | def predict(self, X, y):
"""Predict sequences.
Args:
X (list): input data.
y (list): tags.
Returns:
y_true: true sequences.
y_pred: predicted sequences.
"""
y_pred = self.model.predict_on_batch(X)
# reduce dimension.
y_true = np.argmax(y, -1)
y_pred = np.argmax(y_pred, -1)
lens = self.get_length(y_true)
y_true = self.convert_idx_to_name(y_true, lens)
y_pred = self.convert_idx_to_name(y_pred, lens)
return y_true, y_pred | python | def predict(self, X, y):
"""Predict sequences.
Args:
X (list): input data.
y (list): tags.
Returns:
y_true: true sequences.
y_pred: predicted sequences.
"""
y_pred = self.model.predict_on_batch(X)
# reduce dimension.
y_true = np.argmax(y, -1)
y_pred = np.argmax(y_pred, -1)
lens = self.get_length(y_true)
y_true = self.convert_idx_to_name(y_true, lens)
y_pred = self.convert_idx_to_name(y_pred, lens)
return y_true, y_pred | [
"def",
"predict",
"(",
"self",
",",
"X",
",",
"y",
")",
":",
"y_pred",
"=",
"self",
".",
"model",
".",
"predict_on_batch",
"(",
"X",
")",
"# reduce dimension.",
"y_true",
"=",
"np",
".",
"argmax",
"(",
"y",
",",
"-",
"1",
")",
"y_pred",
"=",
"np",
... | Predict sequences.
Args:
X (list): input data.
y (list): tags.
Returns:
y_true: true sequences.
y_pred: predicted sequences. | [
"Predict",
"sequences",
"."
] | f1e5ff1a94da11500c47fd11d4d72617f7f55911 | https://github.com/chakki-works/seqeval/blob/f1e5ff1a94da11500c47fd11d4d72617f7f55911/seqeval/callbacks.py#L78-L100 | train | 202,454 |
chakki-works/seqeval | seqeval/callbacks.py | F1Metrics.score | def score(self, y_true, y_pred):
"""Calculate f1 score.
Args:
y_true (list): true sequences.
y_pred (list): predicted sequences.
Returns:
score: f1 score.
"""
score = f1_score(y_true, y_pred)
print(' - f1: {:04.2f}'.format(score * 100))
print(classification_report(y_true, y_pred, digits=4))
return score | python | def score(self, y_true, y_pred):
"""Calculate f1 score.
Args:
y_true (list): true sequences.
y_pred (list): predicted sequences.
Returns:
score: f1 score.
"""
score = f1_score(y_true, y_pred)
print(' - f1: {:04.2f}'.format(score * 100))
print(classification_report(y_true, y_pred, digits=4))
return score | [
"def",
"score",
"(",
"self",
",",
"y_true",
",",
"y_pred",
")",
":",
"score",
"=",
"f1_score",
"(",
"y_true",
",",
"y_pred",
")",
"print",
"(",
"' - f1: {:04.2f}'",
".",
"format",
"(",
"score",
"*",
"100",
")",
")",
"print",
"(",
"classification_report",... | Calculate f1 score.
Args:
y_true (list): true sequences.
y_pred (list): predicted sequences.
Returns:
score: f1 score. | [
"Calculate",
"f1",
"score",
"."
] | f1e5ff1a94da11500c47fd11d4d72617f7f55911 | https://github.com/chakki-works/seqeval/blob/f1e5ff1a94da11500c47fd11d4d72617f7f55911/seqeval/callbacks.py#L102-L115 | train | 202,455 |
chakki-works/seqeval | seqeval/metrics/sequence_labeling.py | get_entities | def get_entities(seq, suffix=False):
"""Gets entities from sequence.
Args:
seq (list): sequence of labels.
Returns:
list: list of (chunk_type, chunk_start, chunk_end).
Example:
>>> from seqeval.metrics.sequence_labeling import get_entities
>>> seq = ['B-PER', 'I-PER', 'O', 'B-LOC']
>>> get_entities(seq)
[('PER', 0, 1), ('LOC', 3, 3)]
"""
# for nested list
if any(isinstance(s, list) for s in seq):
seq = [item for sublist in seq for item in sublist + ['O']]
prev_tag = 'O'
prev_type = ''
begin_offset = 0
chunks = []
for i, chunk in enumerate(seq + ['O']):
if suffix:
tag = chunk[-1]
type_ = chunk.split('-')[0]
else:
tag = chunk[0]
type_ = chunk.split('-')[-1]
if end_of_chunk(prev_tag, tag, prev_type, type_):
chunks.append((prev_type, begin_offset, i-1))
if start_of_chunk(prev_tag, tag, prev_type, type_):
begin_offset = i
prev_tag = tag
prev_type = type_
return chunks | python | def get_entities(seq, suffix=False):
"""Gets entities from sequence.
Args:
seq (list): sequence of labels.
Returns:
list: list of (chunk_type, chunk_start, chunk_end).
Example:
>>> from seqeval.metrics.sequence_labeling import get_entities
>>> seq = ['B-PER', 'I-PER', 'O', 'B-LOC']
>>> get_entities(seq)
[('PER', 0, 1), ('LOC', 3, 3)]
"""
# for nested list
if any(isinstance(s, list) for s in seq):
seq = [item for sublist in seq for item in sublist + ['O']]
prev_tag = 'O'
prev_type = ''
begin_offset = 0
chunks = []
for i, chunk in enumerate(seq + ['O']):
if suffix:
tag = chunk[-1]
type_ = chunk.split('-')[0]
else:
tag = chunk[0]
type_ = chunk.split('-')[-1]
if end_of_chunk(prev_tag, tag, prev_type, type_):
chunks.append((prev_type, begin_offset, i-1))
if start_of_chunk(prev_tag, tag, prev_type, type_):
begin_offset = i
prev_tag = tag
prev_type = type_
return chunks | [
"def",
"get_entities",
"(",
"seq",
",",
"suffix",
"=",
"False",
")",
":",
"# for nested list",
"if",
"any",
"(",
"isinstance",
"(",
"s",
",",
"list",
")",
"for",
"s",
"in",
"seq",
")",
":",
"seq",
"=",
"[",
"item",
"for",
"sublist",
"in",
"seq",
"f... | Gets entities from sequence.
Args:
seq (list): sequence of labels.
Returns:
list: list of (chunk_type, chunk_start, chunk_end).
Example:
>>> from seqeval.metrics.sequence_labeling import get_entities
>>> seq = ['B-PER', 'I-PER', 'O', 'B-LOC']
>>> get_entities(seq)
[('PER', 0, 1), ('LOC', 3, 3)] | [
"Gets",
"entities",
"from",
"sequence",
"."
] | f1e5ff1a94da11500c47fd11d4d72617f7f55911 | https://github.com/chakki-works/seqeval/blob/f1e5ff1a94da11500c47fd11d4d72617f7f55911/seqeval/metrics/sequence_labeling.py#L15-L53 | train | 202,456 |
chakki-works/seqeval | seqeval/metrics/sequence_labeling.py | end_of_chunk | def end_of_chunk(prev_tag, tag, prev_type, type_):
"""Checks if a chunk ended between the previous and current word.
Args:
prev_tag: previous chunk tag.
tag: current chunk tag.
prev_type: previous type.
type_: current type.
Returns:
chunk_end: boolean.
"""
chunk_end = False
if prev_tag == 'E': chunk_end = True
if prev_tag == 'S': chunk_end = True
if prev_tag == 'B' and tag == 'B': chunk_end = True
if prev_tag == 'B' and tag == 'S': chunk_end = True
if prev_tag == 'B' and tag == 'O': chunk_end = True
if prev_tag == 'I' and tag == 'B': chunk_end = True
if prev_tag == 'I' and tag == 'S': chunk_end = True
if prev_tag == 'I' and tag == 'O': chunk_end = True
if prev_tag != 'O' and prev_tag != '.' and prev_type != type_:
chunk_end = True
return chunk_end | python | def end_of_chunk(prev_tag, tag, prev_type, type_):
"""Checks if a chunk ended between the previous and current word.
Args:
prev_tag: previous chunk tag.
tag: current chunk tag.
prev_type: previous type.
type_: current type.
Returns:
chunk_end: boolean.
"""
chunk_end = False
if prev_tag == 'E': chunk_end = True
if prev_tag == 'S': chunk_end = True
if prev_tag == 'B' and tag == 'B': chunk_end = True
if prev_tag == 'B' and tag == 'S': chunk_end = True
if prev_tag == 'B' and tag == 'O': chunk_end = True
if prev_tag == 'I' and tag == 'B': chunk_end = True
if prev_tag == 'I' and tag == 'S': chunk_end = True
if prev_tag == 'I' and tag == 'O': chunk_end = True
if prev_tag != 'O' and prev_tag != '.' and prev_type != type_:
chunk_end = True
return chunk_end | [
"def",
"end_of_chunk",
"(",
"prev_tag",
",",
"tag",
",",
"prev_type",
",",
"type_",
")",
":",
"chunk_end",
"=",
"False",
"if",
"prev_tag",
"==",
"'E'",
":",
"chunk_end",
"=",
"True",
"if",
"prev_tag",
"==",
"'S'",
":",
"chunk_end",
"=",
"True",
"if",
"... | Checks if a chunk ended between the previous and current word.
Args:
prev_tag: previous chunk tag.
tag: current chunk tag.
prev_type: previous type.
type_: current type.
Returns:
chunk_end: boolean. | [
"Checks",
"if",
"a",
"chunk",
"ended",
"between",
"the",
"previous",
"and",
"current",
"word",
"."
] | f1e5ff1a94da11500c47fd11d4d72617f7f55911 | https://github.com/chakki-works/seqeval/blob/f1e5ff1a94da11500c47fd11d4d72617f7f55911/seqeval/metrics/sequence_labeling.py#L56-L83 | train | 202,457 |
chakki-works/seqeval | seqeval/metrics/sequence_labeling.py | start_of_chunk | def start_of_chunk(prev_tag, tag, prev_type, type_):
"""Checks if a chunk started between the previous and current word.
Args:
prev_tag: previous chunk tag.
tag: current chunk tag.
prev_type: previous type.
type_: current type.
Returns:
chunk_start: boolean.
"""
chunk_start = False
if tag == 'B': chunk_start = True
if tag == 'S': chunk_start = True
if prev_tag == 'E' and tag == 'E': chunk_start = True
if prev_tag == 'E' and tag == 'I': chunk_start = True
if prev_tag == 'S' and tag == 'E': chunk_start = True
if prev_tag == 'S' and tag == 'I': chunk_start = True
if prev_tag == 'O' and tag == 'E': chunk_start = True
if prev_tag == 'O' and tag == 'I': chunk_start = True
if tag != 'O' and tag != '.' and prev_type != type_:
chunk_start = True
return chunk_start | python | def start_of_chunk(prev_tag, tag, prev_type, type_):
"""Checks if a chunk started between the previous and current word.
Args:
prev_tag: previous chunk tag.
tag: current chunk tag.
prev_type: previous type.
type_: current type.
Returns:
chunk_start: boolean.
"""
chunk_start = False
if tag == 'B': chunk_start = True
if tag == 'S': chunk_start = True
if prev_tag == 'E' and tag == 'E': chunk_start = True
if prev_tag == 'E' and tag == 'I': chunk_start = True
if prev_tag == 'S' and tag == 'E': chunk_start = True
if prev_tag == 'S' and tag == 'I': chunk_start = True
if prev_tag == 'O' and tag == 'E': chunk_start = True
if prev_tag == 'O' and tag == 'I': chunk_start = True
if tag != 'O' and tag != '.' and prev_type != type_:
chunk_start = True
return chunk_start | [
"def",
"start_of_chunk",
"(",
"prev_tag",
",",
"tag",
",",
"prev_type",
",",
"type_",
")",
":",
"chunk_start",
"=",
"False",
"if",
"tag",
"==",
"'B'",
":",
"chunk_start",
"=",
"True",
"if",
"tag",
"==",
"'S'",
":",
"chunk_start",
"=",
"True",
"if",
"pr... | Checks if a chunk started between the previous and current word.
Args:
prev_tag: previous chunk tag.
tag: current chunk tag.
prev_type: previous type.
type_: current type.
Returns:
chunk_start: boolean. | [
"Checks",
"if",
"a",
"chunk",
"started",
"between",
"the",
"previous",
"and",
"current",
"word",
"."
] | f1e5ff1a94da11500c47fd11d4d72617f7f55911 | https://github.com/chakki-works/seqeval/blob/f1e5ff1a94da11500c47fd11d4d72617f7f55911/seqeval/metrics/sequence_labeling.py#L86-L113 | train | 202,458 |
chakki-works/seqeval | seqeval/metrics/sequence_labeling.py | f1_score | def f1_score(y_true, y_pred, average='micro', suffix=False):
"""Compute the F1 score.
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a tagger.
Returns:
score : float.
Example:
>>> from seqeval.metrics import f1_score
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> f1_score(y_true, y_pred)
0.50
"""
true_entities = set(get_entities(y_true, suffix))
pred_entities = set(get_entities(y_pred, suffix))
nb_correct = len(true_entities & pred_entities)
nb_pred = len(pred_entities)
nb_true = len(true_entities)
p = nb_correct / nb_pred if nb_pred > 0 else 0
r = nb_correct / nb_true if nb_true > 0 else 0
score = 2 * p * r / (p + r) if p + r > 0 else 0
return score | python | def f1_score(y_true, y_pred, average='micro', suffix=False):
"""Compute the F1 score.
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a tagger.
Returns:
score : float.
Example:
>>> from seqeval.metrics import f1_score
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> f1_score(y_true, y_pred)
0.50
"""
true_entities = set(get_entities(y_true, suffix))
pred_entities = set(get_entities(y_pred, suffix))
nb_correct = len(true_entities & pred_entities)
nb_pred = len(pred_entities)
nb_true = len(true_entities)
p = nb_correct / nb_pred if nb_pred > 0 else 0
r = nb_correct / nb_true if nb_true > 0 else 0
score = 2 * p * r / (p + r) if p + r > 0 else 0
return score | [
"def",
"f1_score",
"(",
"y_true",
",",
"y_pred",
",",
"average",
"=",
"'micro'",
",",
"suffix",
"=",
"False",
")",
":",
"true_entities",
"=",
"set",
"(",
"get_entities",
"(",
"y_true",
",",
"suffix",
")",
")",
"pred_entities",
"=",
"set",
"(",
"get_entit... | Compute the F1 score.
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a tagger.
Returns:
score : float.
Example:
>>> from seqeval.metrics import f1_score
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> f1_score(y_true, y_pred)
0.50 | [
"Compute",
"the",
"F1",
"score",
"."
] | f1e5ff1a94da11500c47fd11d4d72617f7f55911 | https://github.com/chakki-works/seqeval/blob/f1e5ff1a94da11500c47fd11d4d72617f7f55911/seqeval/metrics/sequence_labeling.py#L116-L151 | train | 202,459 |
chakki-works/seqeval | seqeval/metrics/sequence_labeling.py | precision_score | def precision_score(y_true, y_pred, average='micro', suffix=False):
"""Compute the precision.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample.
The best value is 1 and the worst value is 0.
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a tagger.
Returns:
score : float.
Example:
>>> from seqeval.metrics import precision_score
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> precision_score(y_true, y_pred)
0.50
"""
true_entities = set(get_entities(y_true, suffix))
pred_entities = set(get_entities(y_pred, suffix))
nb_correct = len(true_entities & pred_entities)
nb_pred = len(pred_entities)
score = nb_correct / nb_pred if nb_pred > 0 else 0
return score | python | def precision_score(y_true, y_pred, average='micro', suffix=False):
"""Compute the precision.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample.
The best value is 1 and the worst value is 0.
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a tagger.
Returns:
score : float.
Example:
>>> from seqeval.metrics import precision_score
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> precision_score(y_true, y_pred)
0.50
"""
true_entities = set(get_entities(y_true, suffix))
pred_entities = set(get_entities(y_pred, suffix))
nb_correct = len(true_entities & pred_entities)
nb_pred = len(pred_entities)
score = nb_correct / nb_pred if nb_pred > 0 else 0
return score | [
"def",
"precision_score",
"(",
"y_true",
",",
"y_pred",
",",
"average",
"=",
"'micro'",
",",
"suffix",
"=",
"False",
")",
":",
"true_entities",
"=",
"set",
"(",
"get_entities",
"(",
"y_true",
",",
"suffix",
")",
")",
"pred_entities",
"=",
"set",
"(",
"ge... | Compute the precision.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample.
The best value is 1 and the worst value is 0.
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a tagger.
Returns:
score : float.
Example:
>>> from seqeval.metrics import precision_score
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> precision_score(y_true, y_pred)
0.50 | [
"Compute",
"the",
"precision",
"."
] | f1e5ff1a94da11500c47fd11d4d72617f7f55911 | https://github.com/chakki-works/seqeval/blob/f1e5ff1a94da11500c47fd11d4d72617f7f55911/seqeval/metrics/sequence_labeling.py#L187-L218 | train | 202,460 |
chakki-works/seqeval | seqeval/metrics/sequence_labeling.py | recall_score | def recall_score(y_true, y_pred, average='micro', suffix=False):
"""Compute the recall.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a tagger.
Returns:
score : float.
Example:
>>> from seqeval.metrics import recall_score
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> recall_score(y_true, y_pred)
0.50
"""
true_entities = set(get_entities(y_true, suffix))
pred_entities = set(get_entities(y_pred, suffix))
nb_correct = len(true_entities & pred_entities)
nb_true = len(true_entities)
score = nb_correct / nb_true if nb_true > 0 else 0
return score | python | def recall_score(y_true, y_pred, average='micro', suffix=False):
"""Compute the recall.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a tagger.
Returns:
score : float.
Example:
>>> from seqeval.metrics import recall_score
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> recall_score(y_true, y_pred)
0.50
"""
true_entities = set(get_entities(y_true, suffix))
pred_entities = set(get_entities(y_pred, suffix))
nb_correct = len(true_entities & pred_entities)
nb_true = len(true_entities)
score = nb_correct / nb_true if nb_true > 0 else 0
return score | [
"def",
"recall_score",
"(",
"y_true",
",",
"y_pred",
",",
"average",
"=",
"'micro'",
",",
"suffix",
"=",
"False",
")",
":",
"true_entities",
"=",
"set",
"(",
"get_entities",
"(",
"y_true",
",",
"suffix",
")",
")",
"pred_entities",
"=",
"set",
"(",
"get_e... | Compute the recall.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a tagger.
Returns:
score : float.
Example:
>>> from seqeval.metrics import recall_score
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> recall_score(y_true, y_pred)
0.50 | [
"Compute",
"the",
"recall",
"."
] | f1e5ff1a94da11500c47fd11d4d72617f7f55911 | https://github.com/chakki-works/seqeval/blob/f1e5ff1a94da11500c47fd11d4d72617f7f55911/seqeval/metrics/sequence_labeling.py#L221-L252 | train | 202,461 |
chakki-works/seqeval | seqeval/metrics/sequence_labeling.py | classification_report | def classification_report(y_true, y_pred, digits=2, suffix=False):
"""Build a text report showing the main classification metrics.
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a classifier.
digits : int. Number of digits for formatting output floating point values.
Returns:
report : string. Text summary of the precision, recall, F1 score for each class.
Examples:
>>> from seqeval.metrics import classification_report
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> print(classification_report(y_true, y_pred))
precision recall f1-score support
<BLANKLINE>
MISC 0.00 0.00 0.00 1
PER 1.00 1.00 1.00 1
<BLANKLINE>
micro avg 0.50 0.50 0.50 2
macro avg 0.50 0.50 0.50 2
<BLANKLINE>
"""
true_entities = set(get_entities(y_true, suffix))
pred_entities = set(get_entities(y_pred, suffix))
name_width = 0
d1 = defaultdict(set)
d2 = defaultdict(set)
for e in true_entities:
d1[e[0]].add((e[1], e[2]))
name_width = max(name_width, len(e[0]))
for e in pred_entities:
d2[e[0]].add((e[1], e[2]))
last_line_heading = 'macro avg'
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers)
report = head_fmt.format(u'', *headers, width=width)
report += u'\n\n'
row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\n'
ps, rs, f1s, s = [], [], [], []
for type_name, true_entities in d1.items():
pred_entities = d2[type_name]
nb_correct = len(true_entities & pred_entities)
nb_pred = len(pred_entities)
nb_true = len(true_entities)
p = nb_correct / nb_pred if nb_pred > 0 else 0
r = nb_correct / nb_true if nb_true > 0 else 0
f1 = 2 * p * r / (p + r) if p + r > 0 else 0
report += row_fmt.format(*[type_name, p, r, f1, nb_true], width=width, digits=digits)
ps.append(p)
rs.append(r)
f1s.append(f1)
s.append(nb_true)
report += u'\n'
# compute averages
report += row_fmt.format('micro avg',
precision_score(y_true, y_pred, suffix=suffix),
recall_score(y_true, y_pred, suffix=suffix),
f1_score(y_true, y_pred, suffix=suffix),
np.sum(s),
width=width, digits=digits)
report += row_fmt.format(last_line_heading,
np.average(ps, weights=s),
np.average(rs, weights=s),
np.average(f1s, weights=s),
np.sum(s),
width=width, digits=digits)
return report | python | def classification_report(y_true, y_pred, digits=2, suffix=False):
"""Build a text report showing the main classification metrics.
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a classifier.
digits : int. Number of digits for formatting output floating point values.
Returns:
report : string. Text summary of the precision, recall, F1 score for each class.
Examples:
>>> from seqeval.metrics import classification_report
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> print(classification_report(y_true, y_pred))
precision recall f1-score support
<BLANKLINE>
MISC 0.00 0.00 0.00 1
PER 1.00 1.00 1.00 1
<BLANKLINE>
micro avg 0.50 0.50 0.50 2
macro avg 0.50 0.50 0.50 2
<BLANKLINE>
"""
true_entities = set(get_entities(y_true, suffix))
pred_entities = set(get_entities(y_pred, suffix))
name_width = 0
d1 = defaultdict(set)
d2 = defaultdict(set)
for e in true_entities:
d1[e[0]].add((e[1], e[2]))
name_width = max(name_width, len(e[0]))
for e in pred_entities:
d2[e[0]].add((e[1], e[2]))
last_line_heading = 'macro avg'
width = max(name_width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
head_fmt = u'{:>{width}s} ' + u' {:>9}' * len(headers)
report = head_fmt.format(u'', *headers, width=width)
report += u'\n\n'
row_fmt = u'{:>{width}s} ' + u' {:>9.{digits}f}' * 3 + u' {:>9}\n'
ps, rs, f1s, s = [], [], [], []
for type_name, true_entities in d1.items():
pred_entities = d2[type_name]
nb_correct = len(true_entities & pred_entities)
nb_pred = len(pred_entities)
nb_true = len(true_entities)
p = nb_correct / nb_pred if nb_pred > 0 else 0
r = nb_correct / nb_true if nb_true > 0 else 0
f1 = 2 * p * r / (p + r) if p + r > 0 else 0
report += row_fmt.format(*[type_name, p, r, f1, nb_true], width=width, digits=digits)
ps.append(p)
rs.append(r)
f1s.append(f1)
s.append(nb_true)
report += u'\n'
# compute averages
report += row_fmt.format('micro avg',
precision_score(y_true, y_pred, suffix=suffix),
recall_score(y_true, y_pred, suffix=suffix),
f1_score(y_true, y_pred, suffix=suffix),
np.sum(s),
width=width, digits=digits)
report += row_fmt.format(last_line_heading,
np.average(ps, weights=s),
np.average(rs, weights=s),
np.average(f1s, weights=s),
np.sum(s),
width=width, digits=digits)
return report | [
"def",
"classification_report",
"(",
"y_true",
",",
"y_pred",
",",
"digits",
"=",
"2",
",",
"suffix",
"=",
"False",
")",
":",
"true_entities",
"=",
"set",
"(",
"get_entities",
"(",
"y_true",
",",
"suffix",
")",
")",
"pred_entities",
"=",
"set",
"(",
"get... | Build a text report showing the main classification metrics.
Args:
y_true : 2d array. Ground truth (correct) target values.
y_pred : 2d array. Estimated targets as returned by a classifier.
digits : int. Number of digits for formatting output floating point values.
Returns:
report : string. Text summary of the precision, recall, F1 score for each class.
Examples:
>>> from seqeval.metrics import classification_report
>>> y_true = [['O', 'O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> y_pred = [['O', 'O', 'B-MISC', 'I-MISC', 'I-MISC', 'I-MISC', 'O'], ['B-PER', 'I-PER', 'O']]
>>> print(classification_report(y_true, y_pred))
precision recall f1-score support
<BLANKLINE>
MISC 0.00 0.00 0.00 1
PER 1.00 1.00 1.00 1
<BLANKLINE>
micro avg 0.50 0.50 0.50 2
macro avg 0.50 0.50 0.50 2
<BLANKLINE> | [
"Build",
"a",
"text",
"report",
"showing",
"the",
"main",
"classification",
"metrics",
"."
] | f1e5ff1a94da11500c47fd11d4d72617f7f55911 | https://github.com/chakki-works/seqeval/blob/f1e5ff1a94da11500c47fd11d4d72617f7f55911/seqeval/metrics/sequence_labeling.py#L288-L369 | train | 202,462 |
jd/daiquiri | daiquiri/output.py | TimedRotatingFile._timedelta_to_seconds | def _timedelta_to_seconds(td):
"""Convert a datetime.timedelta object into a seconds interval for
rotating file ouput.
:param td: datetime.timedelta
:return: time in seconds
:rtype: int
"""
if isinstance(td, numbers.Real):
td = datetime.timedelta(seconds=td)
return td.total_seconds() | python | def _timedelta_to_seconds(td):
"""Convert a datetime.timedelta object into a seconds interval for
rotating file ouput.
:param td: datetime.timedelta
:return: time in seconds
:rtype: int
"""
if isinstance(td, numbers.Real):
td = datetime.timedelta(seconds=td)
return td.total_seconds() | [
"def",
"_timedelta_to_seconds",
"(",
"td",
")",
":",
"if",
"isinstance",
"(",
"td",
",",
"numbers",
".",
"Real",
")",
":",
"td",
"=",
"datetime",
".",
"timedelta",
"(",
"seconds",
"=",
"td",
")",
"return",
"td",
".",
"total_seconds",
"(",
")"
] | Convert a datetime.timedelta object into a seconds interval for
rotating file ouput.
:param td: datetime.timedelta
:return: time in seconds
:rtype: int | [
"Convert",
"a",
"datetime",
".",
"timedelta",
"object",
"into",
"a",
"seconds",
"interval",
"for",
"rotating",
"file",
"ouput",
"."
] | 3fd8df555ab9ed1d11b5931bfef3ba6dce5b8166 | https://github.com/jd/daiquiri/blob/3fd8df555ab9ed1d11b5931bfef3ba6dce5b8166/daiquiri/output.py#L158-L168 | train | 202,463 |
jd/daiquiri | daiquiri/__init__.py | getLogger | def getLogger(name=None, **kwargs):
"""Build a logger with the given name.
:param name: The name for the logger. This is usually the module
name, ``__name__``.
:type name: string
"""
adapter = _LOGGERS.get(name)
if not adapter:
# NOTE(jd) Keep using the `adapter' variable here because so it's not
# collected by Python since _LOGGERS contains only a weakref
adapter = KeywordArgumentAdapter(logging.getLogger(name), kwargs)
_LOGGERS[name] = adapter
return adapter | python | def getLogger(name=None, **kwargs):
"""Build a logger with the given name.
:param name: The name for the logger. This is usually the module
name, ``__name__``.
:type name: string
"""
adapter = _LOGGERS.get(name)
if not adapter:
# NOTE(jd) Keep using the `adapter' variable here because so it's not
# collected by Python since _LOGGERS contains only a weakref
adapter = KeywordArgumentAdapter(logging.getLogger(name), kwargs)
_LOGGERS[name] = adapter
return adapter | [
"def",
"getLogger",
"(",
"name",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"adapter",
"=",
"_LOGGERS",
".",
"get",
"(",
"name",
")",
"if",
"not",
"adapter",
":",
"# NOTE(jd) Keep using the `adapter' variable here because so it's not",
"# collected by Python si... | Build a logger with the given name.
:param name: The name for the logger. This is usually the module
name, ``__name__``.
:type name: string | [
"Build",
"a",
"logger",
"with",
"the",
"given",
"name",
"."
] | 3fd8df555ab9ed1d11b5931bfef3ba6dce5b8166 | https://github.com/jd/daiquiri/blob/3fd8df555ab9ed1d11b5931bfef3ba6dce5b8166/daiquiri/__init__.py#L64-L77 | train | 202,464 |
jd/daiquiri | daiquiri/__init__.py | setup | def setup(level=logging.WARNING, outputs=[output.STDERR], program_name=None,
capture_warnings=True):
"""Setup Python logging.
This will setup basic handlers for Python logging.
:param level: Root log level.
:param outputs: Iterable of outputs to log to.
:param program_name: The name of the program. Auto-detected if not set.
:param capture_warnings: Capture warnings from the `warnings' module.
"""
root_logger = logging.getLogger(None)
# Remove all handlers
for handler in list(root_logger.handlers):
root_logger.removeHandler(handler)
# Add configured handlers
for out in outputs:
if isinstance(out, str):
out = output.preconfigured.get(out)
if out is None:
raise RuntimeError("Output {} is not available".format(out))
out.add_to_logger(root_logger)
root_logger.setLevel(level)
program_logger = logging.getLogger(program_name)
def logging_excepthook(exc_type, value, tb):
program_logger.critical(
"".join(traceback.format_exception(exc_type, value, tb)))
sys.excepthook = logging_excepthook
if capture_warnings:
logging.captureWarnings(True) | python | def setup(level=logging.WARNING, outputs=[output.STDERR], program_name=None,
capture_warnings=True):
"""Setup Python logging.
This will setup basic handlers for Python logging.
:param level: Root log level.
:param outputs: Iterable of outputs to log to.
:param program_name: The name of the program. Auto-detected if not set.
:param capture_warnings: Capture warnings from the `warnings' module.
"""
root_logger = logging.getLogger(None)
# Remove all handlers
for handler in list(root_logger.handlers):
root_logger.removeHandler(handler)
# Add configured handlers
for out in outputs:
if isinstance(out, str):
out = output.preconfigured.get(out)
if out is None:
raise RuntimeError("Output {} is not available".format(out))
out.add_to_logger(root_logger)
root_logger.setLevel(level)
program_logger = logging.getLogger(program_name)
def logging_excepthook(exc_type, value, tb):
program_logger.critical(
"".join(traceback.format_exception(exc_type, value, tb)))
sys.excepthook = logging_excepthook
if capture_warnings:
logging.captureWarnings(True) | [
"def",
"setup",
"(",
"level",
"=",
"logging",
".",
"WARNING",
",",
"outputs",
"=",
"[",
"output",
".",
"STDERR",
"]",
",",
"program_name",
"=",
"None",
",",
"capture_warnings",
"=",
"True",
")",
":",
"root_logger",
"=",
"logging",
".",
"getLogger",
"(",
... | Setup Python logging.
This will setup basic handlers for Python logging.
:param level: Root log level.
:param outputs: Iterable of outputs to log to.
:param program_name: The name of the program. Auto-detected if not set.
:param capture_warnings: Capture warnings from the `warnings' module. | [
"Setup",
"Python",
"logging",
"."
] | 3fd8df555ab9ed1d11b5931bfef3ba6dce5b8166 | https://github.com/jd/daiquiri/blob/3fd8df555ab9ed1d11b5931bfef3ba6dce5b8166/daiquiri/__init__.py#L80-L116 | train | 202,465 |
jd/daiquiri | daiquiri/__init__.py | set_default_log_levels | def set_default_log_levels(loggers_and_log_levels):
"""Set default log levels for some loggers.
:param loggers_and_log_levels: List of tuple (logger name, level).
"""
for logger, level in loggers_and_log_levels:
if isinstance(level, str):
level = level.upper()
logging.getLogger(logger).setLevel(level) | python | def set_default_log_levels(loggers_and_log_levels):
"""Set default log levels for some loggers.
:param loggers_and_log_levels: List of tuple (logger name, level).
"""
for logger, level in loggers_and_log_levels:
if isinstance(level, str):
level = level.upper()
logging.getLogger(logger).setLevel(level) | [
"def",
"set_default_log_levels",
"(",
"loggers_and_log_levels",
")",
":",
"for",
"logger",
",",
"level",
"in",
"loggers_and_log_levels",
":",
"if",
"isinstance",
"(",
"level",
",",
"str",
")",
":",
"level",
"=",
"level",
".",
"upper",
"(",
")",
"logging",
".... | Set default log levels for some loggers.
:param loggers_and_log_levels: List of tuple (logger name, level). | [
"Set",
"default",
"log",
"levels",
"for",
"some",
"loggers",
"."
] | 3fd8df555ab9ed1d11b5931bfef3ba6dce5b8166 | https://github.com/jd/daiquiri/blob/3fd8df555ab9ed1d11b5931bfef3ba6dce5b8166/daiquiri/__init__.py#L130-L138 | train | 202,466 |
Netflix-Skunkworks/swag-client | swag_client/cli.py | create_swag_from_ctx | def create_swag_from_ctx(ctx):
"""Creates SWAG client from the current context."""
swag_opts = {}
if ctx.type == 'file':
swag_opts = {
'swag.type': 'file',
'swag.data_dir': ctx.data_dir,
'swag.data_file': ctx.data_file
}
elif ctx.type == 's3':
swag_opts = {
'swag.type': 's3',
'swag.bucket_name': ctx.bucket_name,
'swag.data_file': ctx.data_file,
'swag.region': ctx.region
}
elif ctx.type == 'dynamodb':
swag_opts = {
'swag.type': 'dynamodb',
'swag.region': ctx.region
}
return SWAGManager(**parse_swag_config_options(swag_opts)) | python | def create_swag_from_ctx(ctx):
"""Creates SWAG client from the current context."""
swag_opts = {}
if ctx.type == 'file':
swag_opts = {
'swag.type': 'file',
'swag.data_dir': ctx.data_dir,
'swag.data_file': ctx.data_file
}
elif ctx.type == 's3':
swag_opts = {
'swag.type': 's3',
'swag.bucket_name': ctx.bucket_name,
'swag.data_file': ctx.data_file,
'swag.region': ctx.region
}
elif ctx.type == 'dynamodb':
swag_opts = {
'swag.type': 'dynamodb',
'swag.region': ctx.region
}
return SWAGManager(**parse_swag_config_options(swag_opts)) | [
"def",
"create_swag_from_ctx",
"(",
"ctx",
")",
":",
"swag_opts",
"=",
"{",
"}",
"if",
"ctx",
".",
"type",
"==",
"'file'",
":",
"swag_opts",
"=",
"{",
"'swag.type'",
":",
"'file'",
",",
"'swag.data_dir'",
":",
"ctx",
".",
"data_dir",
",",
"'swag.data_file'... | Creates SWAG client from the current context. | [
"Creates",
"SWAG",
"client",
"from",
"the",
"current",
"context",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/cli.py#L29-L50 | train | 202,467 |
Netflix-Skunkworks/swag-client | swag_client/cli.py | file | def file(ctx, data_dir, data_file):
"""Use the File SWAG Backend"""
if not ctx.file:
ctx.data_file = data_file
if not ctx.data_dir:
ctx.data_dir = data_dir
ctx.type = 'file' | python | def file(ctx, data_dir, data_file):
"""Use the File SWAG Backend"""
if not ctx.file:
ctx.data_file = data_file
if not ctx.data_dir:
ctx.data_dir = data_dir
ctx.type = 'file' | [
"def",
"file",
"(",
"ctx",
",",
"data_dir",
",",
"data_file",
")",
":",
"if",
"not",
"ctx",
".",
"file",
":",
"ctx",
".",
"data_file",
"=",
"data_file",
"if",
"not",
"ctx",
".",
"data_dir",
":",
"ctx",
".",
"data_dir",
"=",
"data_dir",
"ctx",
".",
... | Use the File SWAG Backend | [
"Use",
"the",
"File",
"SWAG",
"Backend"
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/cli.py#L95-L103 | train | 202,468 |
Netflix-Skunkworks/swag-client | swag_client/cli.py | s3 | def s3(ctx, bucket_name, data_file, region):
"""Use the S3 SWAG backend."""
if not ctx.data_file:
ctx.data_file = data_file
if not ctx.bucket_name:
ctx.bucket_name = bucket_name
if not ctx.region:
ctx.region = region
ctx.type = 's3' | python | def s3(ctx, bucket_name, data_file, region):
"""Use the S3 SWAG backend."""
if not ctx.data_file:
ctx.data_file = data_file
if not ctx.bucket_name:
ctx.bucket_name = bucket_name
if not ctx.region:
ctx.region = region
ctx.type = 's3' | [
"def",
"s3",
"(",
"ctx",
",",
"bucket_name",
",",
"data_file",
",",
"region",
")",
":",
"if",
"not",
"ctx",
".",
"data_file",
":",
"ctx",
".",
"data_file",
"=",
"data_file",
"if",
"not",
"ctx",
".",
"bucket_name",
":",
"ctx",
".",
"bucket_name",
"=",
... | Use the S3 SWAG backend. | [
"Use",
"the",
"S3",
"SWAG",
"backend",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/cli.py#L111-L122 | train | 202,469 |
Netflix-Skunkworks/swag-client | swag_client/cli.py | list | def list(ctx):
"""List SWAG account info."""
if ctx.namespace != 'accounts':
click.echo(
click.style('Only account data is available for listing.', fg='red')
)
return
swag = create_swag_from_ctx(ctx)
accounts = swag.get_all()
_table = [[result['name'], result.get('id')] for result in accounts]
click.echo(
tabulate(_table, headers=["Account Name", "Account Number"])
) | python | def list(ctx):
"""List SWAG account info."""
if ctx.namespace != 'accounts':
click.echo(
click.style('Only account data is available for listing.', fg='red')
)
return
swag = create_swag_from_ctx(ctx)
accounts = swag.get_all()
_table = [[result['name'], result.get('id')] for result in accounts]
click.echo(
tabulate(_table, headers=["Account Name", "Account Number"])
) | [
"def",
"list",
"(",
"ctx",
")",
":",
"if",
"ctx",
".",
"namespace",
"!=",
"'accounts'",
":",
"click",
".",
"echo",
"(",
"click",
".",
"style",
"(",
"'Only account data is available for listing.'",
",",
"fg",
"=",
"'red'",
")",
")",
"return",
"swag",
"=",
... | List SWAG account info. | [
"List",
"SWAG",
"account",
"info",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/cli.py#L127-L140 | train | 202,470 |
Netflix-Skunkworks/swag-client | swag_client/cli.py | list_service | def list_service(ctx, name):
"""Retrieve accounts pertaining to named service."""
swag = create_swag_from_ctx(ctx)
accounts = swag.get_service_enabled(name)
_table = [[result['name'], result.get('id')] for result in accounts]
click.echo(
tabulate(_table, headers=["Account Name", "Account Number"])
) | python | def list_service(ctx, name):
"""Retrieve accounts pertaining to named service."""
swag = create_swag_from_ctx(ctx)
accounts = swag.get_service_enabled(name)
_table = [[result['name'], result.get('id')] for result in accounts]
click.echo(
tabulate(_table, headers=["Account Name", "Account Number"])
) | [
"def",
"list_service",
"(",
"ctx",
",",
"name",
")",
":",
"swag",
"=",
"create_swag_from_ctx",
"(",
"ctx",
")",
"accounts",
"=",
"swag",
".",
"get_service_enabled",
"(",
"name",
")",
"_table",
"=",
"[",
"[",
"result",
"[",
"'name'",
"]",
",",
"result",
... | Retrieve accounts pertaining to named service. | [
"Retrieve",
"accounts",
"pertaining",
"to",
"named",
"service",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/cli.py#L146-L154 | train | 202,471 |
Netflix-Skunkworks/swag-client | swag_client/cli.py | migrate | def migrate(ctx, start_version, end_version):
"""Transition from one SWAG schema to another."""
if ctx.type == 'file':
if ctx.data_file:
file_path = ctx.data_file
else:
file_path = os.path.join(ctx.data_file, ctx.namespace + '.json')
# todo make this more like alemebic and determine/load versions automatically
with open(file_path, 'r') as f:
data = json.loads(f.read())
data = run_migration(data, start_version, end_version)
with open(file_path, 'w') as f:
f.write(json.dumps(data)) | python | def migrate(ctx, start_version, end_version):
"""Transition from one SWAG schema to another."""
if ctx.type == 'file':
if ctx.data_file:
file_path = ctx.data_file
else:
file_path = os.path.join(ctx.data_file, ctx.namespace + '.json')
# todo make this more like alemebic and determine/load versions automatically
with open(file_path, 'r') as f:
data = json.loads(f.read())
data = run_migration(data, start_version, end_version)
with open(file_path, 'w') as f:
f.write(json.dumps(data)) | [
"def",
"migrate",
"(",
"ctx",
",",
"start_version",
",",
"end_version",
")",
":",
"if",
"ctx",
".",
"type",
"==",
"'file'",
":",
"if",
"ctx",
".",
"data_file",
":",
"file_path",
"=",
"ctx",
".",
"data_file",
"else",
":",
"file_path",
"=",
"os",
".",
... | Transition from one SWAG schema to another. | [
"Transition",
"from",
"one",
"SWAG",
"schema",
"to",
"another",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/cli.py#L161-L175 | train | 202,472 |
Netflix-Skunkworks/swag-client | swag_client/cli.py | propagate | def propagate(ctx):
"""Transfers SWAG data from one backend to another"""
data = []
if ctx.type == 'file':
if ctx.data_file:
file_path = ctx.data_file
else:
file_path = os.path.join(ctx.data_dir, ctx.namespace + '.json')
with open(file_path, 'r') as f:
data = json.loads(f.read())
swag_opts = {
'swag.type': 'dynamodb'
}
swag = SWAGManager(**parse_swag_config_options(swag_opts))
for item in data:
time.sleep(2)
swag.create(item, dry_run=ctx.dry_run) | python | def propagate(ctx):
"""Transfers SWAG data from one backend to another"""
data = []
if ctx.type == 'file':
if ctx.data_file:
file_path = ctx.data_file
else:
file_path = os.path.join(ctx.data_dir, ctx.namespace + '.json')
with open(file_path, 'r') as f:
data = json.loads(f.read())
swag_opts = {
'swag.type': 'dynamodb'
}
swag = SWAGManager(**parse_swag_config_options(swag_opts))
for item in data:
time.sleep(2)
swag.create(item, dry_run=ctx.dry_run) | [
"def",
"propagate",
"(",
"ctx",
")",
":",
"data",
"=",
"[",
"]",
"if",
"ctx",
".",
"type",
"==",
"'file'",
":",
"if",
"ctx",
".",
"data_file",
":",
"file_path",
"=",
"ctx",
".",
"data_file",
"else",
":",
"file_path",
"=",
"os",
".",
"path",
".",
... | Transfers SWAG data from one backend to another | [
"Transfers",
"SWAG",
"data",
"from",
"one",
"backend",
"to",
"another"
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/cli.py#L180-L200 | train | 202,473 |
Netflix-Skunkworks/swag-client | swag_client/cli.py | create | def create(ctx, data):
"""Create a new SWAG item."""
swag = create_swag_from_ctx(ctx)
data = json.loads(data.read())
for account in data:
swag.create(account, dry_run=ctx.dry_run) | python | def create(ctx, data):
"""Create a new SWAG item."""
swag = create_swag_from_ctx(ctx)
data = json.loads(data.read())
for account in data:
swag.create(account, dry_run=ctx.dry_run) | [
"def",
"create",
"(",
"ctx",
",",
"data",
")",
":",
"swag",
"=",
"create_swag_from_ctx",
"(",
"ctx",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"data",
".",
"read",
"(",
")",
")",
"for",
"account",
"in",
"data",
":",
"swag",
".",
"create",
"(",
... | Create a new SWAG item. | [
"Create",
"a",
"new",
"SWAG",
"item",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/cli.py#L206-L212 | train | 202,474 |
Netflix-Skunkworks/swag-client | swag_client/cli.py | deploy_service | def deploy_service(ctx, path, name, regions, disabled):
"""Deploys a new service JSON to multiple accounts. NAME is the service name you wish to deploy."""
enabled = False if disabled else True
swag = create_swag_from_ctx(ctx)
accounts = swag.get_all(search_filter=path)
log.debug('Searching for accounts. Found: {} JMESPath: `{}`'.format(len(accounts), path))
for a in accounts:
try:
if not swag.get_service(name, search_filter="[?id=='{id}']".format(id=a['id'])):
log.info('Found an account to update. AccountName: {name} AccountNumber: {number}'.format(name=a['name'], number=a['id']))
status = []
for region in regions:
status.append(
{
'enabled': enabled,
'region': region
}
)
a['services'].append(
{
'name': name,
'status': status
}
)
swag.update(a, dry_run=ctx.dry_run)
except InvalidSWAGDataException as e:
log.warning('Found a data quality issue. AccountName: {name} AccountNumber: {number}'.format(name=a['name'], number=a['id']))
log.info('Service has been deployed to all matching accounts.') | python | def deploy_service(ctx, path, name, regions, disabled):
"""Deploys a new service JSON to multiple accounts. NAME is the service name you wish to deploy."""
enabled = False if disabled else True
swag = create_swag_from_ctx(ctx)
accounts = swag.get_all(search_filter=path)
log.debug('Searching for accounts. Found: {} JMESPath: `{}`'.format(len(accounts), path))
for a in accounts:
try:
if not swag.get_service(name, search_filter="[?id=='{id}']".format(id=a['id'])):
log.info('Found an account to update. AccountName: {name} AccountNumber: {number}'.format(name=a['name'], number=a['id']))
status = []
for region in regions:
status.append(
{
'enabled': enabled,
'region': region
}
)
a['services'].append(
{
'name': name,
'status': status
}
)
swag.update(a, dry_run=ctx.dry_run)
except InvalidSWAGDataException as e:
log.warning('Found a data quality issue. AccountName: {name} AccountNumber: {number}'.format(name=a['name'], number=a['id']))
log.info('Service has been deployed to all matching accounts.') | [
"def",
"deploy_service",
"(",
"ctx",
",",
"path",
",",
"name",
",",
"regions",
",",
"disabled",
")",
":",
"enabled",
"=",
"False",
"if",
"disabled",
"else",
"True",
"swag",
"=",
"create_swag_from_ctx",
"(",
"ctx",
")",
"accounts",
"=",
"swag",
".",
"get_... | Deploys a new service JSON to multiple accounts. NAME is the service name you wish to deploy. | [
"Deploys",
"a",
"new",
"service",
"JSON",
"to",
"multiple",
"accounts",
".",
"NAME",
"is",
"the",
"service",
"name",
"you",
"wish",
"to",
"deploy",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/cli.py#L234-L265 | train | 202,475 |
Netflix-Skunkworks/swag-client | swag_client/cli.py | seed_aws_data | def seed_aws_data(ctx, data):
"""Seeds SWAG from a list of known AWS accounts."""
swag = create_swag_from_ctx(ctx)
for k, v in json.loads(data.read()).items():
for account in v['accounts']:
data = {
'description': 'This is an AWS owned account used for {}'.format(k),
'id': account['account_id'],
'contacts': [],
'owner': 'aws',
'provider': 'aws',
'sensitive': False,
'email': 'support@amazon.com',
'name': k + '-' + account['region']
}
click.echo(click.style(
'Seeded Account. AccountName: {}'.format(data['name']), fg='green')
)
swag.create(data, dry_run=ctx.dry_run) | python | def seed_aws_data(ctx, data):
"""Seeds SWAG from a list of known AWS accounts."""
swag = create_swag_from_ctx(ctx)
for k, v in json.loads(data.read()).items():
for account in v['accounts']:
data = {
'description': 'This is an AWS owned account used for {}'.format(k),
'id': account['account_id'],
'contacts': [],
'owner': 'aws',
'provider': 'aws',
'sensitive': False,
'email': 'support@amazon.com',
'name': k + '-' + account['region']
}
click.echo(click.style(
'Seeded Account. AccountName: {}'.format(data['name']), fg='green')
)
swag.create(data, dry_run=ctx.dry_run) | [
"def",
"seed_aws_data",
"(",
"ctx",
",",
"data",
")",
":",
"swag",
"=",
"create_swag_from_ctx",
"(",
"ctx",
")",
"for",
"k",
",",
"v",
"in",
"json",
".",
"loads",
"(",
"data",
".",
"read",
"(",
")",
")",
".",
"items",
"(",
")",
":",
"for",
"accou... | Seeds SWAG from a list of known AWS accounts. | [
"Seeds",
"SWAG",
"from",
"a",
"list",
"of",
"known",
"AWS",
"accounts",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/cli.py#L271-L291 | train | 202,476 |
Netflix-Skunkworks/swag-client | swag_client/cli.py | seed_aws_organization | def seed_aws_organization(ctx, owner):
"""Seeds SWAG from an AWS organziation."""
swag = create_swag_from_ctx(ctx)
accounts = swag.get_all()
_ids = [result.get('id') for result in accounts]
client = boto3.client('organizations')
paginator = client.get_paginator('list_accounts')
response_iterator = paginator.paginate()
count = 0
for response in response_iterator:
for account in response['Accounts']:
if account['Id'] in _ids:
click.echo(click.style(
'Ignoring Duplicate Account. AccountId: {} already exists in SWAG'.format(account['Id']), fg='yellow')
)
continue
if account['Status'] == 'SUSPENDED':
status = 'deprecated'
else:
status = 'created'
data = {
'id': account['Id'],
'name': account['Name'],
'description': 'Account imported from AWS organization.',
'email': account['Email'],
'owner': owner,
'provider': 'aws',
'contacts': [],
'sensitive': False,
'status': [{'region': 'all', 'status': status}]
}
click.echo(click.style(
'Seeded Account. AccountName: {}'.format(data['name']), fg='green')
)
count += 1
swag.create(data, dry_run=ctx.dry_run)
click.echo('Seeded {} accounts to SWAG.'.format(count)) | python | def seed_aws_organization(ctx, owner):
"""Seeds SWAG from an AWS organziation."""
swag = create_swag_from_ctx(ctx)
accounts = swag.get_all()
_ids = [result.get('id') for result in accounts]
client = boto3.client('organizations')
paginator = client.get_paginator('list_accounts')
response_iterator = paginator.paginate()
count = 0
for response in response_iterator:
for account in response['Accounts']:
if account['Id'] in _ids:
click.echo(click.style(
'Ignoring Duplicate Account. AccountId: {} already exists in SWAG'.format(account['Id']), fg='yellow')
)
continue
if account['Status'] == 'SUSPENDED':
status = 'deprecated'
else:
status = 'created'
data = {
'id': account['Id'],
'name': account['Name'],
'description': 'Account imported from AWS organization.',
'email': account['Email'],
'owner': owner,
'provider': 'aws',
'contacts': [],
'sensitive': False,
'status': [{'region': 'all', 'status': status}]
}
click.echo(click.style(
'Seeded Account. AccountName: {}'.format(data['name']), fg='green')
)
count += 1
swag.create(data, dry_run=ctx.dry_run)
click.echo('Seeded {} accounts to SWAG.'.format(count)) | [
"def",
"seed_aws_organization",
"(",
"ctx",
",",
"owner",
")",
":",
"swag",
"=",
"create_swag_from_ctx",
"(",
"ctx",
")",
"accounts",
"=",
"swag",
".",
"get_all",
"(",
")",
"_ids",
"=",
"[",
"result",
".",
"get",
"(",
"'id'",
")",
"for",
"result",
"in"... | Seeds SWAG from an AWS organziation. | [
"Seeds",
"SWAG",
"from",
"an",
"AWS",
"organziation",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/cli.py#L296-L339 | train | 202,477 |
Netflix-Skunkworks/swag-client | swag_client/backends/s3.py | load_file | def load_file(client, bucket, data_file):
"""Tries to load JSON data from S3."""
logger.debug('Loading item from s3. Bucket: {bucket} Key: {key}'.format(
bucket=bucket,
key=data_file
))
# If the file doesn't exist, then return an empty dict:
try:
data = _get_from_s3(client, bucket, data_file)
except ClientError as ce:
if ce.response['Error']['Code'] == 'NoSuchKey':
return {}
else:
raise ce
if sys.version_info > (3,):
data = data.decode('utf-8')
return json.loads(data) | python | def load_file(client, bucket, data_file):
"""Tries to load JSON data from S3."""
logger.debug('Loading item from s3. Bucket: {bucket} Key: {key}'.format(
bucket=bucket,
key=data_file
))
# If the file doesn't exist, then return an empty dict:
try:
data = _get_from_s3(client, bucket, data_file)
except ClientError as ce:
if ce.response['Error']['Code'] == 'NoSuchKey':
return {}
else:
raise ce
if sys.version_info > (3,):
data = data.decode('utf-8')
return json.loads(data) | [
"def",
"load_file",
"(",
"client",
",",
"bucket",
",",
"data_file",
")",
":",
"logger",
".",
"debug",
"(",
"'Loading item from s3. Bucket: {bucket} Key: {key}'",
".",
"format",
"(",
"bucket",
"=",
"bucket",
",",
"key",
"=",
"data_file",
")",
")",
"# If the file ... | Tries to load JSON data from S3. | [
"Tries",
"to",
"load",
"JSON",
"data",
"from",
"S3",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/backends/s3.py#L36-L57 | train | 202,478 |
Netflix-Skunkworks/swag-client | swag_client/backends/s3.py | save_file | def save_file(client, bucket, data_file, items, dry_run=None):
"""Tries to write JSON data to data file in S3."""
logger.debug('Writing {number_items} items to s3. Bucket: {bucket} Key: {key}'.format(
number_items=len(items),
bucket=bucket,
key=data_file
))
if not dry_run:
return _put_to_s3(client, bucket, data_file, json.dumps(items)) | python | def save_file(client, bucket, data_file, items, dry_run=None):
"""Tries to write JSON data to data file in S3."""
logger.debug('Writing {number_items} items to s3. Bucket: {bucket} Key: {key}'.format(
number_items=len(items),
bucket=bucket,
key=data_file
))
if not dry_run:
return _put_to_s3(client, bucket, data_file, json.dumps(items)) | [
"def",
"save_file",
"(",
"client",
",",
"bucket",
",",
"data_file",
",",
"items",
",",
"dry_run",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"'Writing {number_items} items to s3. Bucket: {bucket} Key: {key}'",
".",
"format",
"(",
"number_items",
"=",
"len... | Tries to write JSON data to data file in S3. | [
"Tries",
"to",
"write",
"JSON",
"data",
"to",
"data",
"file",
"in",
"S3",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/backends/s3.py#L60-L69 | train | 202,479 |
Netflix-Skunkworks/swag-client | swag_client/backends/s3.py | S3SWAGManager.create | def create(self, item, dry_run=None):
"""Creates a new item in file."""
logger.debug('Creating new item. Item: {item} Path: {data_file}'.format(
item=item,
data_file=self.data_file
))
items = load_file(self.client, self.bucket_name, self.data_file)
items = append_item(self.namespace, self.version, item, items)
save_file(self.client, self.bucket_name, self.data_file, items, dry_run=dry_run)
return item | python | def create(self, item, dry_run=None):
"""Creates a new item in file."""
logger.debug('Creating new item. Item: {item} Path: {data_file}'.format(
item=item,
data_file=self.data_file
))
items = load_file(self.client, self.bucket_name, self.data_file)
items = append_item(self.namespace, self.version, item, items)
save_file(self.client, self.bucket_name, self.data_file, items, dry_run=dry_run)
return item | [
"def",
"create",
"(",
"self",
",",
"item",
",",
"dry_run",
"=",
"None",
")",
":",
"logger",
".",
"debug",
"(",
"'Creating new item. Item: {item} Path: {data_file}'",
".",
"format",
"(",
"item",
"=",
"item",
",",
"data_file",
"=",
"self",
".",
"data_file",
")... | Creates a new item in file. | [
"Creates",
"a",
"new",
"item",
"in",
"file",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/backends/s3.py#L92-L103 | train | 202,480 |
Netflix-Skunkworks/swag-client | swag_client/backends/s3.py | S3SWAGManager.health_check | def health_check(self):
"""Uses head object to make sure the file exists in S3."""
logger.debug('Health Check on S3 file for: {namespace}'.format(
namespace=self.namespace
))
try:
self.client.head_object(Bucket=self.bucket_name, Key=self.data_file)
return True
except ClientError as e:
logger.debug('Error encountered with S3. Assume unhealthy') | python | def health_check(self):
"""Uses head object to make sure the file exists in S3."""
logger.debug('Health Check on S3 file for: {namespace}'.format(
namespace=self.namespace
))
try:
self.client.head_object(Bucket=self.bucket_name, Key=self.data_file)
return True
except ClientError as e:
logger.debug('Error encountered with S3. Assume unhealthy') | [
"def",
"health_check",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"'Health Check on S3 file for: {namespace}'",
".",
"format",
"(",
"namespace",
"=",
"self",
".",
"namespace",
")",
")",
"try",
":",
"self",
".",
"client",
".",
"head_object",
"(",
"Bu... | Uses head object to make sure the file exists in S3. | [
"Uses",
"head",
"object",
"to",
"make",
"sure",
"the",
"file",
"exists",
"in",
"S3",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/backends/s3.py#L134-L144 | train | 202,481 |
Netflix-Skunkworks/swag-client | swag_client/backends/dynamodb.py | DynamoDBSWAGManager.health_check | def health_check(self):
"""Gets a single item to determine if Dynamo is functioning."""
logger.debug('Health Check on Table: {namespace}'.format(
namespace=self.namespace
))
try:
self.get_all()
return True
except ClientError as e:
logger.exception(e)
logger.error('Error encountered with Database. Assume unhealthy')
return False | python | def health_check(self):
"""Gets a single item to determine if Dynamo is functioning."""
logger.debug('Health Check on Table: {namespace}'.format(
namespace=self.namespace
))
try:
self.get_all()
return True
except ClientError as e:
logger.exception(e)
logger.error('Error encountered with Database. Assume unhealthy')
return False | [
"def",
"health_check",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"'Health Check on Table: {namespace}'",
".",
"format",
"(",
"namespace",
"=",
"self",
".",
"namespace",
")",
")",
"try",
":",
"self",
".",
"get_all",
"(",
")",
"return",
"True",
"ex... | Gets a single item to determine if Dynamo is functioning. | [
"Gets",
"a",
"single",
"item",
"to",
"determine",
"if",
"Dynamo",
"is",
"functioning",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/backends/dynamodb.py#L85-L98 | train | 202,482 |
Netflix-Skunkworks/swag-client | swag_client/util.py | parse_swag_config_options | def parse_swag_config_options(config):
"""Ensures that options passed to the backend are valid."""
options = {}
for key, val in config.items():
if key.startswith('swag.backend.'):
options[key[12:]] = val
if key.startswith('swag.'):
options[key[5:]] = val
if options.get('type') == 's3':
return S3OptionsSchema(strict=True).load(options).data
elif options.get('type') == 'dynamodb':
return DynamoDBOptionsSchema(strict=True).load(options).data
else:
return FileOptionsSchema(strict=True).load(options).data | python | def parse_swag_config_options(config):
"""Ensures that options passed to the backend are valid."""
options = {}
for key, val in config.items():
if key.startswith('swag.backend.'):
options[key[12:]] = val
if key.startswith('swag.'):
options[key[5:]] = val
if options.get('type') == 's3':
return S3OptionsSchema(strict=True).load(options).data
elif options.get('type') == 'dynamodb':
return DynamoDBOptionsSchema(strict=True).load(options).data
else:
return FileOptionsSchema(strict=True).load(options).data | [
"def",
"parse_swag_config_options",
"(",
"config",
")",
":",
"options",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"config",
".",
"items",
"(",
")",
":",
"if",
"key",
".",
"startswith",
"(",
"'swag.backend.'",
")",
":",
"options",
"[",
"key",
"[",
... | Ensures that options passed to the backend are valid. | [
"Ensures",
"that",
"options",
"passed",
"to",
"the",
"backend",
"are",
"valid",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/util.py#L39-L53 | train | 202,483 |
Netflix-Skunkworks/swag-client | swag_client/util.py | deprecated | def deprecated(message):
"""Deprecated function decorator."""
def wrapper(fn):
def deprecated_method(*args, **kargs):
warnings.warn(message, DeprecationWarning, 2)
return fn(*args, **kargs)
# TODO: use decorator ? functools.wrapper ?
deprecated_method.__name__ = fn.__name__
deprecated_method.__doc__ = "%s\n\n%s" % (message, fn.__doc__)
return deprecated_method
return wrapper | python | def deprecated(message):
"""Deprecated function decorator."""
def wrapper(fn):
def deprecated_method(*args, **kargs):
warnings.warn(message, DeprecationWarning, 2)
return fn(*args, **kargs)
# TODO: use decorator ? functools.wrapper ?
deprecated_method.__name__ = fn.__name__
deprecated_method.__doc__ = "%s\n\n%s" % (message, fn.__doc__)
return deprecated_method
return wrapper | [
"def",
"deprecated",
"(",
"message",
")",
":",
"def",
"wrapper",
"(",
"fn",
")",
":",
"def",
"deprecated_method",
"(",
"*",
"args",
",",
"*",
"*",
"kargs",
")",
":",
"warnings",
".",
"warn",
"(",
"message",
",",
"DeprecationWarning",
",",
"2",
")",
"... | Deprecated function decorator. | [
"Deprecated",
"function",
"decorator",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/util.py#L56-L66 | train | 202,484 |
Netflix-Skunkworks/swag-client | swag_client/util.py | is_sub_dict | def is_sub_dict(sub_dict, dictionary):
"""Legacy filter for determining if a given dict is present."""
for key in sub_dict.keys():
if key not in dictionary:
return False
if (type(sub_dict[key]) is not dict) and (sub_dict[key] != dictionary[key]):
return False
if (type(sub_dict[key]) is dict) and (not is_sub_dict(sub_dict[key], dictionary[key])):
return False
return True | python | def is_sub_dict(sub_dict, dictionary):
"""Legacy filter for determining if a given dict is present."""
for key in sub_dict.keys():
if key not in dictionary:
return False
if (type(sub_dict[key]) is not dict) and (sub_dict[key] != dictionary[key]):
return False
if (type(sub_dict[key]) is dict) and (not is_sub_dict(sub_dict[key], dictionary[key])):
return False
return True | [
"def",
"is_sub_dict",
"(",
"sub_dict",
",",
"dictionary",
")",
":",
"for",
"key",
"in",
"sub_dict",
".",
"keys",
"(",
")",
":",
"if",
"key",
"not",
"in",
"dictionary",
":",
"return",
"False",
"if",
"(",
"type",
"(",
"sub_dict",
"[",
"key",
"]",
")",
... | Legacy filter for determining if a given dict is present. | [
"Legacy",
"filter",
"for",
"determining",
"if",
"a",
"given",
"dict",
"is",
"present",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/util.py#L94-L103 | train | 202,485 |
Netflix-Skunkworks/swag-client | swag_client/swag.py | get_by_name | def get_by_name(account_name, bucket, region='us-west-2', json_path='accounts.json', alias=None):
"""Given an account name, attempts to retrieve associated account info."""
for account in get_all_accounts(bucket, region, json_path)['accounts']:
if 'aws' in account['type']:
if account['name'] == account_name:
return account
elif alias:
for a in account['alias']:
if a == account_name:
return account | python | def get_by_name(account_name, bucket, region='us-west-2', json_path='accounts.json', alias=None):
"""Given an account name, attempts to retrieve associated account info."""
for account in get_all_accounts(bucket, region, json_path)['accounts']:
if 'aws' in account['type']:
if account['name'] == account_name:
return account
elif alias:
for a in account['alias']:
if a == account_name:
return account | [
"def",
"get_by_name",
"(",
"account_name",
",",
"bucket",
",",
"region",
"=",
"'us-west-2'",
",",
"json_path",
"=",
"'accounts.json'",
",",
"alias",
"=",
"None",
")",
":",
"for",
"account",
"in",
"get_all_accounts",
"(",
"bucket",
",",
"region",
",",
"json_p... | Given an account name, attempts to retrieve associated account info. | [
"Given",
"an",
"account",
"name",
"attempts",
"to",
"retrieve",
"associated",
"account",
"info",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/swag.py#L12-L21 | train | 202,486 |
Netflix-Skunkworks/swag-client | swag_client/swag.py | get_all_accounts | def get_all_accounts(bucket, region='us-west-2', json_path='accounts.json', **filters):
"""Fetches all the accounts from SWAG."""
swag_opts = {
'swag.type': 's3',
'swag.bucket_name': bucket,
'swag.bucket_region': region,
'swag.data_file': json_path,
'swag.schema_version': 1
}
swag = SWAGManager(**parse_swag_config_options(swag_opts))
accounts = swag.get_all()
accounts = [account for account in accounts['accounts'] if is_sub_dict(filters, account)]
return {'accounts': accounts} | python | def get_all_accounts(bucket, region='us-west-2', json_path='accounts.json', **filters):
"""Fetches all the accounts from SWAG."""
swag_opts = {
'swag.type': 's3',
'swag.bucket_name': bucket,
'swag.bucket_region': region,
'swag.data_file': json_path,
'swag.schema_version': 1
}
swag = SWAGManager(**parse_swag_config_options(swag_opts))
accounts = swag.get_all()
accounts = [account for account in accounts['accounts'] if is_sub_dict(filters, account)]
return {'accounts': accounts} | [
"def",
"get_all_accounts",
"(",
"bucket",
",",
"region",
"=",
"'us-west-2'",
",",
"json_path",
"=",
"'accounts.json'",
",",
"*",
"*",
"filters",
")",
":",
"swag_opts",
"=",
"{",
"'swag.type'",
":",
"'s3'",
",",
"'swag.bucket_name'",
":",
"bucket",
",",
"'swa... | Fetches all the accounts from SWAG. | [
"Fetches",
"all",
"the",
"accounts",
"from",
"SWAG",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/swag.py#L34-L47 | train | 202,487 |
Netflix-Skunkworks/swag-client | swag_client/backends/file.py | load_file | def load_file(data_file):
"""Tries to load JSON from data file."""
try:
with open(data_file, 'r', encoding='utf-8') as f:
return json.loads(f.read())
except JSONDecodeError as e:
return [] | python | def load_file(data_file):
"""Tries to load JSON from data file."""
try:
with open(data_file, 'r', encoding='utf-8') as f:
return json.loads(f.read())
except JSONDecodeError as e:
return [] | [
"def",
"load_file",
"(",
"data_file",
")",
":",
"try",
":",
"with",
"open",
"(",
"data_file",
",",
"'r'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"return",
"json",
".",
"loads",
"(",
"f",
".",
"read",
"(",
")",
")",
"except",
"JSONDec... | Tries to load JSON from data file. | [
"Tries",
"to",
"load",
"JSON",
"from",
"data",
"file",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/backends/file.py#L23-L30 | train | 202,488 |
Netflix-Skunkworks/swag-client | swag_client/backends/file.py | save_file | def save_file(data_file, data, dry_run=None):
"""Writes JSON data to data file."""
if dry_run:
return
with open(data_file, 'w', encoding='utf-8') as f:
if sys.version_info > (3, 0):
f.write(json.dumps(data))
else:
f.write(json.dumps(data).decode('utf-8')) | python | def save_file(data_file, data, dry_run=None):
"""Writes JSON data to data file."""
if dry_run:
return
with open(data_file, 'w', encoding='utf-8') as f:
if sys.version_info > (3, 0):
f.write(json.dumps(data))
else:
f.write(json.dumps(data).decode('utf-8')) | [
"def",
"save_file",
"(",
"data_file",
",",
"data",
",",
"dry_run",
"=",
"None",
")",
":",
"if",
"dry_run",
":",
"return",
"with",
"open",
"(",
"data_file",
",",
"'w'",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"if",
"sys",
".",
"version_i... | Writes JSON data to data file. | [
"Writes",
"JSON",
"data",
"to",
"data",
"file",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/backends/file.py#L33-L42 | train | 202,489 |
Netflix-Skunkworks/swag-client | swag_client/backends/file.py | FileSWAGManager.health_check | def health_check(self):
"""Checks to make sure the file is there."""
logger.debug('Health Check on file for: {namespace}'.format(
namespace=self.namespace
))
return os.path.isfile(self.data_file) | python | def health_check(self):
"""Checks to make sure the file is there."""
logger.debug('Health Check on file for: {namespace}'.format(
namespace=self.namespace
))
return os.path.isfile(self.data_file) | [
"def",
"health_check",
"(",
"self",
")",
":",
"logger",
".",
"debug",
"(",
"'Health Check on file for: {namespace}'",
".",
"format",
"(",
"namespace",
"=",
"self",
".",
"namespace",
")",
")",
"return",
"os",
".",
"path",
".",
"isfile",
"(",
"self",
".",
"d... | Checks to make sure the file is there. | [
"Checks",
"to",
"make",
"sure",
"the",
"file",
"is",
"there",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/backends/file.py#L114-L120 | train | 202,490 |
Netflix-Skunkworks/swag-client | swag_client/backend.py | SWAGManager.configure | def configure(self, *args, **kwargs):
"""Configures a SWAG manager. Overrides existing configuration."""
self.version = kwargs['schema_version']
self.namespace = kwargs['namespace']
self.backend = get(kwargs['type'])(*args, **kwargs)
self.context = kwargs.pop('schema_context', {}) | python | def configure(self, *args, **kwargs):
"""Configures a SWAG manager. Overrides existing configuration."""
self.version = kwargs['schema_version']
self.namespace = kwargs['namespace']
self.backend = get(kwargs['type'])(*args, **kwargs)
self.context = kwargs.pop('schema_context', {}) | [
"def",
"configure",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"version",
"=",
"kwargs",
"[",
"'schema_version'",
"]",
"self",
".",
"namespace",
"=",
"kwargs",
"[",
"'namespace'",
"]",
"self",
".",
"backend",
"=",
... | Configures a SWAG manager. Overrides existing configuration. | [
"Configures",
"a",
"SWAG",
"manager",
".",
"Overrides",
"existing",
"configuration",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/backend.py#L57-L63 | train | 202,491 |
Netflix-Skunkworks/swag-client | swag_client/backend.py | SWAGManager.create | def create(self, item, dry_run=None):
"""Create a new item in backend."""
return self.backend.create(validate(item, version=self.version, context=self.context), dry_run=dry_run) | python | def create(self, item, dry_run=None):
"""Create a new item in backend."""
return self.backend.create(validate(item, version=self.version, context=self.context), dry_run=dry_run) | [
"def",
"create",
"(",
"self",
",",
"item",
",",
"dry_run",
"=",
"None",
")",
":",
"return",
"self",
".",
"backend",
".",
"create",
"(",
"validate",
"(",
"item",
",",
"version",
"=",
"self",
".",
"version",
",",
"context",
"=",
"self",
".",
"context",... | Create a new item in backend. | [
"Create",
"a",
"new",
"item",
"in",
"backend",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/backend.py#L65-L67 | train | 202,492 |
Netflix-Skunkworks/swag-client | swag_client/backend.py | SWAGManager.delete | def delete(self, item, dry_run=None):
"""Delete an item in backend."""
return self.backend.delete(item, dry_run=dry_run) | python | def delete(self, item, dry_run=None):
"""Delete an item in backend."""
return self.backend.delete(item, dry_run=dry_run) | [
"def",
"delete",
"(",
"self",
",",
"item",
",",
"dry_run",
"=",
"None",
")",
":",
"return",
"self",
".",
"backend",
".",
"delete",
"(",
"item",
",",
"dry_run",
"=",
"dry_run",
")"
] | Delete an item in backend. | [
"Delete",
"an",
"item",
"in",
"backend",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/backend.py#L69-L71 | train | 202,493 |
Netflix-Skunkworks/swag-client | swag_client/backend.py | SWAGManager.update | def update(self, item, dry_run=None):
"""Update an item in backend."""
return self.backend.update(validate(item, version=self.version, context=self.context), dry_run=dry_run) | python | def update(self, item, dry_run=None):
"""Update an item in backend."""
return self.backend.update(validate(item, version=self.version, context=self.context), dry_run=dry_run) | [
"def",
"update",
"(",
"self",
",",
"item",
",",
"dry_run",
"=",
"None",
")",
":",
"return",
"self",
".",
"backend",
".",
"update",
"(",
"validate",
"(",
"item",
",",
"version",
"=",
"self",
".",
"version",
",",
"context",
"=",
"self",
".",
"context",... | Update an item in backend. | [
"Update",
"an",
"item",
"in",
"backend",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/backend.py#L73-L75 | train | 202,494 |
Netflix-Skunkworks/swag-client | swag_client/backend.py | SWAGManager.get_all | def get_all(self, search_filter=None):
"""Fetch all data from backend."""
items = self.backend.get_all()
if not items:
if self.version == 1:
return {self.namespace: []}
return []
if search_filter:
items = jmespath.search(search_filter, items)
return items | python | def get_all(self, search_filter=None):
"""Fetch all data from backend."""
items = self.backend.get_all()
if not items:
if self.version == 1:
return {self.namespace: []}
return []
if search_filter:
items = jmespath.search(search_filter, items)
return items | [
"def",
"get_all",
"(",
"self",
",",
"search_filter",
"=",
"None",
")",
":",
"items",
"=",
"self",
".",
"backend",
".",
"get_all",
"(",
")",
"if",
"not",
"items",
":",
"if",
"self",
".",
"version",
"==",
"1",
":",
"return",
"{",
"self",
".",
"namesp... | Fetch all data from backend. | [
"Fetch",
"all",
"data",
"from",
"backend",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/backend.py#L81-L93 | train | 202,495 |
Netflix-Skunkworks/swag-client | swag_client/backend.py | SWAGManager.get_service_enabled | def get_service_enabled(self, name, accounts_list=None, search_filter=None, region=None):
"""Get a list of accounts where a service has been enabled."""
if not accounts_list:
accounts = self.get_all(search_filter=search_filter)
else:
accounts = accounts_list
if self.version == 1:
accounts = accounts['accounts']
enabled = []
for account in accounts:
if self.version == 1:
account_filter = "accounts[?id=='{id}']".format(id=account['id'])
else:
account_filter = "[?id=='{id}']".format(id=account['id'])
service = self.get_service(name, search_filter=account_filter)
if self.version == 1:
if service:
service = service['enabled'] # no region information available in v1
else:
if not region:
service_filter = "status[?enabled]"
else:
service_filter = "status[?(region=='{region}' || region=='all') && enabled]".format(region=region)
service = jmespath.search(service_filter, service)
if service:
enabled.append(account)
return enabled | python | def get_service_enabled(self, name, accounts_list=None, search_filter=None, region=None):
"""Get a list of accounts where a service has been enabled."""
if not accounts_list:
accounts = self.get_all(search_filter=search_filter)
else:
accounts = accounts_list
if self.version == 1:
accounts = accounts['accounts']
enabled = []
for account in accounts:
if self.version == 1:
account_filter = "accounts[?id=='{id}']".format(id=account['id'])
else:
account_filter = "[?id=='{id}']".format(id=account['id'])
service = self.get_service(name, search_filter=account_filter)
if self.version == 1:
if service:
service = service['enabled'] # no region information available in v1
else:
if not region:
service_filter = "status[?enabled]"
else:
service_filter = "status[?(region=='{region}' || region=='all') && enabled]".format(region=region)
service = jmespath.search(service_filter, service)
if service:
enabled.append(account)
return enabled | [
"def",
"get_service_enabled",
"(",
"self",
",",
"name",
",",
"accounts_list",
"=",
"None",
",",
"search_filter",
"=",
"None",
",",
"region",
"=",
"None",
")",
":",
"if",
"not",
"accounts_list",
":",
"accounts",
"=",
"self",
".",
"get_all",
"(",
"search_fil... | Get a list of accounts where a service has been enabled. | [
"Get",
"a",
"list",
"of",
"accounts",
"where",
"a",
"service",
"has",
"been",
"enabled",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/backend.py#L99-L132 | train | 202,496 |
Netflix-Skunkworks/swag-client | swag_client/backend.py | SWAGManager.get_service | def get_service(self, name, search_filter):
"""Fetch service metadata."""
if self.version == 1:
service_filter = "service.{name}".format(name=name)
return jmespath.search(service_filter, self.get(search_filter))
else:
service_filter = "services[?name=='{}']".format(name)
return one(jmespath.search(service_filter, self.get(search_filter))) | python | def get_service(self, name, search_filter):
"""Fetch service metadata."""
if self.version == 1:
service_filter = "service.{name}".format(name=name)
return jmespath.search(service_filter, self.get(search_filter))
else:
service_filter = "services[?name=='{}']".format(name)
return one(jmespath.search(service_filter, self.get(search_filter))) | [
"def",
"get_service",
"(",
"self",
",",
"name",
",",
"search_filter",
")",
":",
"if",
"self",
".",
"version",
"==",
"1",
":",
"service_filter",
"=",
"\"service.{name}\"",
".",
"format",
"(",
"name",
"=",
"name",
")",
"return",
"jmespath",
".",
"search",
... | Fetch service metadata. | [
"Fetch",
"service",
"metadata",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/backend.py#L134-L141 | train | 202,497 |
Netflix-Skunkworks/swag-client | swag_client/backend.py | SWAGManager.get_service_name | def get_service_name(self, name, search_filter):
"""Fetch account name as referenced by a particular service. """
service_filter = "services[?name=='{}'].metadata.name".format(name)
return one(jmespath.search(service_filter, self.get(search_filter))) | python | def get_service_name(self, name, search_filter):
"""Fetch account name as referenced by a particular service. """
service_filter = "services[?name=='{}'].metadata.name".format(name)
return one(jmespath.search(service_filter, self.get(search_filter))) | [
"def",
"get_service_name",
"(",
"self",
",",
"name",
",",
"search_filter",
")",
":",
"service_filter",
"=",
"\"services[?name=='{}'].metadata.name\"",
".",
"format",
"(",
"name",
")",
"return",
"one",
"(",
"jmespath",
".",
"search",
"(",
"service_filter",
",",
"... | Fetch account name as referenced by a particular service. | [
"Fetch",
"account",
"name",
"as",
"referenced",
"by",
"a",
"particular",
"service",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/backend.py#L143-L146 | train | 202,498 |
Netflix-Skunkworks/swag-client | swag_client/backend.py | SWAGManager.get_by_name | def get_by_name(self, name, alias=None):
"""Fetch all accounts with name specified, optionally include aliases."""
search_filter = "[?name=='{}']".format(name)
if alias:
if self.version == 1:
search_filter = "accounts[?name=='{name}' || contains(alias, '{name}')]".format(name=name)
elif self.version == 2:
search_filter = "[?name=='{name}' || contains(aliases, '{name}')]".format(name=name)
return self.get_all(search_filter) | python | def get_by_name(self, name, alias=None):
"""Fetch all accounts with name specified, optionally include aliases."""
search_filter = "[?name=='{}']".format(name)
if alias:
if self.version == 1:
search_filter = "accounts[?name=='{name}' || contains(alias, '{name}')]".format(name=name)
elif self.version == 2:
search_filter = "[?name=='{name}' || contains(aliases, '{name}')]".format(name=name)
return self.get_all(search_filter) | [
"def",
"get_by_name",
"(",
"self",
",",
"name",
",",
"alias",
"=",
"None",
")",
":",
"search_filter",
"=",
"\"[?name=='{}']\"",
".",
"format",
"(",
"name",
")",
"if",
"alias",
":",
"if",
"self",
".",
"version",
"==",
"1",
":",
"search_filter",
"=",
"\"... | Fetch all accounts with name specified, optionally include aliases. | [
"Fetch",
"all",
"accounts",
"with",
"name",
"specified",
"optionally",
"include",
"aliases",
"."
] | e43816a85c4f48011cf497a4eae14f9df71fee0f | https://github.com/Netflix-Skunkworks/swag-client/blob/e43816a85c4f48011cf497a4eae14f9df71fee0f/swag_client/backend.py#L148-L159 | train | 202,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.