hexsha
stringlengths 40
40
| size
int64 4
996k
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
996k
| avg_line_length
float64 1.33
58.2k
| max_line_length
int64 2
323k
| alphanum_fraction
float64 0
0.97
| content_no_comment
stringlengths 0
946k
| is_comment_constant_removed
bool 2
classes | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7907f9f16e61e6d3d77348c8c036aab74d3205b1
| 723
|
py
|
Python
|
renovation_service_provider_manager/api/__init__.py
|
leam-tech/renovation_service_provider_manager
|
353125e3d332d841495f93bf154b76f2cef75d3f
|
[
"MIT"
] | null | null | null |
renovation_service_provider_manager/api/__init__.py
|
leam-tech/renovation_service_provider_manager
|
353125e3d332d841495f93bf154b76f2cef75d3f
|
[
"MIT"
] | null | null | null |
renovation_service_provider_manager/api/__init__.py
|
leam-tech/renovation_service_provider_manager
|
353125e3d332d841495f93bf154b76f2cef75d3f
|
[
"MIT"
] | null | null | null |
import frappe, re
from renovation_service_provider_manager import invoke_mediator
@frappe.whitelist(allow_guest=True)
def get_service_provider_client_id(provider):
k = f"client_id_{re.sub('[^0-9a-zA-Z]+', '_', provider.lower())}"
client_id = frappe.cache().get_value(k)
if client_id:
return client_id
client_id = get_client_id_from_mediator(provider)
frappe.cache().set_value(k, client_id, expires_in_sec=18000) # 5hr
return client_id
def get_client_id_from_mediator(provider):
try:
r = invoke_mediator("/api/method/renovation_mediator.api.get_service_provider_client_id", {"provider": provider})
r.raise_for_status()
r = r.json()
return r["message"]
except:
frappe.throw(r.text)
| 31.434783
| 117
| 0.749654
|
import frappe, re
from renovation_service_provider_manager import invoke_mediator
@frappe.whitelist(allow_guest=True)
def get_service_provider_client_id(provider):
k = f"client_id_{re.sub('[^0-9a-zA-Z]+', '_', provider.lower())}"
client_id = frappe.cache().get_value(k)
if client_id:
return client_id
client_id = get_client_id_from_mediator(provider)
frappe.cache().set_value(k, client_id, expires_in_sec=18000)
return client_id
def get_client_id_from_mediator(provider):
try:
r = invoke_mediator("/api/method/renovation_mediator.api.get_service_provider_client_id", {"provider": provider})
r.raise_for_status()
r = r.json()
return r["message"]
except:
frappe.throw(r.text)
| true
| true
|
7907fafcb079d191c0e965019b8286b4c02cb7f0
| 2,514
|
py
|
Python
|
virtual/lib/python3.8/site-packages/setuptools/extern/__init__.py
|
ShaviyaVictor/nyumbakumi-
|
933d825844da139998867594c1e21b09ba5c8e63
|
[
"MIT"
] | 3
|
2022-03-02T12:13:02.000Z
|
2022-03-02T12:38:46.000Z
|
virtual/lib/python3.8/site-packages/setuptools/extern/__init__.py
|
ShaviyaVictor/nyumbakumi-
|
933d825844da139998867594c1e21b09ba5c8e63
|
[
"MIT"
] | 1
|
2022-03-15T12:10:47.000Z
|
2022-03-15T12:10:47.000Z
|
virtual/lib/python3.8/site-packages/setuptools/extern/__init__.py
|
ShaviyaVictor/nyumbakumi-
|
933d825844da139998867594c1e21b09ba5c8e63
|
[
"MIT"
] | null | null | null |
import importlib.util
import sys
class VendorImporter:
"""
A PEP 302 meta path importer for finding optionally-vendored
or otherwise naturally-installed packages from root_name.
"""
def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
self.root_name = root_name
self.vendored_names = set(vendored_names)
self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
@property
def search_path(self):
"""
Search first the vendor package then as a natural package.
"""
yield self.vendor_pkg + '.'
yield ''
def _module_matches_namespace(self, fullname):
"""Figure out if the target module is vendored."""
root, base, target = fullname.partition(self.root_name + '.')
return not root and any(map(target.startswith, self.vendored_names))
def load_module(self, fullname):
"""
Iterate over the search path to locate and load fullname.
"""
root, base, target = fullname.partition(self.root_name + '.')
for prefix in self.search_path:
try:
extant = prefix + target
__import__(extant)
mod = sys.modules[extant]
sys.modules[fullname] = mod
return mod
except ImportError:
pass
else:
raise ImportError(
"The '{target}' package is required; "
"normally this is bundled with this package so if you get "
"this warning, consult the packager of your "
"distribution.".format(**locals())
)
def create_module(self, spec):
return self.load_module(spec.name)
def exec_module(self, module):
pass
def find_spec(self, fullname, path=None, target=None):
"""Return a module spec for vendored names."""
return (
importlib.util.spec_from_loader(fullname, self)
if self._module_matches_namespace(fullname) else None
)
def install(self):
"""
Install this importer into sys.meta_path if not already present.
"""
if self not in sys.meta_path:
sys.meta_path.append(self)
names = (
'packaging', 'pyparsing', 'ordered_set', 'more_itertools', 'importlib_metadata',
'zipp', 'importlib_resources', 'jaraco', 'typing_extensions', 'nspektr',
)
VendorImporter(__name__, names, 'setuptools._vendor').install()
| 32.649351
| 84
| 0.604614
|
import importlib.util
import sys
class VendorImporter:
def __init__(self, root_name, vendored_names=(), vendor_pkg=None):
self.root_name = root_name
self.vendored_names = set(vendored_names)
self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor')
@property
def search_path(self):
yield self.vendor_pkg + '.'
yield ''
def _module_matches_namespace(self, fullname):
root, base, target = fullname.partition(self.root_name + '.')
return not root and any(map(target.startswith, self.vendored_names))
def load_module(self, fullname):
root, base, target = fullname.partition(self.root_name + '.')
for prefix in self.search_path:
try:
extant = prefix + target
__import__(extant)
mod = sys.modules[extant]
sys.modules[fullname] = mod
return mod
except ImportError:
pass
else:
raise ImportError(
"The '{target}' package is required; "
"normally this is bundled with this package so if you get "
"this warning, consult the packager of your "
"distribution.".format(**locals())
)
def create_module(self, spec):
return self.load_module(spec.name)
def exec_module(self, module):
pass
def find_spec(self, fullname, path=None, target=None):
return (
importlib.util.spec_from_loader(fullname, self)
if self._module_matches_namespace(fullname) else None
)
def install(self):
if self not in sys.meta_path:
sys.meta_path.append(self)
names = (
'packaging', 'pyparsing', 'ordered_set', 'more_itertools', 'importlib_metadata',
'zipp', 'importlib_resources', 'jaraco', 'typing_extensions', 'nspektr',
)
VendorImporter(__name__, names, 'setuptools._vendor').install()
| true
| true
|
7907fb1a03f455a7370cb9f215000397fc06da34
| 2,382
|
py
|
Python
|
python/atlassian/config-report.py
|
oldD0g/code-snippets
|
68325d63122a5bbbab68dd726ea3add096380e12
|
[
"CC0-1.0"
] | null | null | null |
python/atlassian/config-report.py
|
oldD0g/code-snippets
|
68325d63122a5bbbab68dd726ea3add096380e12
|
[
"CC0-1.0"
] | null | null | null |
python/atlassian/config-report.py
|
oldD0g/code-snippets
|
68325d63122a5bbbab68dd726ea3add096380e12
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python
"""
Object-oriented implementation of backup reporting code.
Defines a class called 'Backup' that records all backups of a device
"""
import os, sys, argparse
import glob
from configparser import ConfigParser
from atlassian import Confluence
class Backup:
def __init__(self, device, backup_root):
self.device = device
self.root = backup_root
config_pattern = "{}/*/{}".format(self.root, device)
configs = glob.glob(config_pattern, recursive=True)
# Remove the full pathname, we only want the directory and the filename
bkps = [dir[len(backup_root)+1:] for dir in configs]
self.backups = bkps
def name(self):
return self.device
def latest(self):
if len(self.backups) >= 1:
return self.backups[-1].split('/')[0]
else:
return "NotFound"
def main():
parser = ConfigParser()
parser.read('config-demo.ini')
device_list_file = parser['backups']['device_list']
apikey = parser['confluence']['apikey']
username = parser['confluence']['username']
url = parser['confluence']['url']
page_ID = parser['confluence']['page_ID']
confluence = Confluence(url=url, username=username, password=apikey)
# Read in all the devices from the nominated file
with open(device_list_file) as file:
lines = file.readlines()
devices = [line.rstrip() for line in lines]
wiki_table = "||Device||Date||"
for device in devices:
device_bkp = Backup(device, parser['backups']['path'])
latest_bkp_date = device_bkp.latest()
print(f"Latest backup for {device_bkp.name()} is {latest_bkp_date}")
wiki_table += "\n" + f"|{device}|{latest_bkp_date}|"
print("Wiki text for table is:")
print(wiki_table)
result = confluence.update_page(
page_id=page_ID,
title='Config Retrievals',
representation="wiki",
body=wiki_table)
#pprint(result)
print(f"Title of page set to '{result['title']}'")
print(f"Confluence revision for page is now {result['version']['confRev']}")
if __name__ == "__main__":
main()
| 32.630137
| 82
| 0.585642
|
import os, sys, argparse
import glob
from configparser import ConfigParser
from atlassian import Confluence
class Backup:
def __init__(self, device, backup_root):
self.device = device
self.root = backup_root
config_pattern = "{}/*/{}".format(self.root, device)
configs = glob.glob(config_pattern, recursive=True)
bkps = [dir[len(backup_root)+1:] for dir in configs]
self.backups = bkps
def name(self):
return self.device
def latest(self):
if len(self.backups) >= 1:
return self.backups[-1].split('/')[0]
else:
return "NotFound"
def main():
parser = ConfigParser()
parser.read('config-demo.ini')
device_list_file = parser['backups']['device_list']
apikey = parser['confluence']['apikey']
username = parser['confluence']['username']
url = parser['confluence']['url']
page_ID = parser['confluence']['page_ID']
confluence = Confluence(url=url, username=username, password=apikey)
with open(device_list_file) as file:
lines = file.readlines()
devices = [line.rstrip() for line in lines]
wiki_table = "||Device||Date||"
for device in devices:
device_bkp = Backup(device, parser['backups']['path'])
latest_bkp_date = device_bkp.latest()
print(f"Latest backup for {device_bkp.name()} is {latest_bkp_date}")
wiki_table += "\n" + f"|{device}|{latest_bkp_date}|"
print("Wiki text for table is:")
print(wiki_table)
result = confluence.update_page(
page_id=page_ID,
title='Config Retrievals',
representation="wiki",
body=wiki_table)
print(f"Title of page set to '{result['title']}'")
print(f"Confluence revision for page is now {result['version']['confRev']}")
if __name__ == "__main__":
main()
| true
| true
|
7907fb69542584c044bb901f0348ed8fd6ad0055
| 2,662
|
py
|
Python
|
nnunet/training/network_training/competitions_with_custom_Trainers/MMS/nnUNetTrainerV2_MMS.py
|
nasyxx/nnUNet
|
92d5f2352349eed278e22f7a38cb86b0fccd7c75
|
[
"Apache-2.0"
] | 72
|
2020-10-30T08:55:17.000Z
|
2022-03-30T03:15:55.000Z
|
nnunet/training/network_training/competitions_with_custom_Trainers/MMS/nnUNetTrainerV2_MMS.py
|
nasyxx/nnUNet
|
92d5f2352349eed278e22f7a38cb86b0fccd7c75
|
[
"Apache-2.0"
] | 16
|
2021-01-13T03:39:47.000Z
|
2022-03-31T21:35:32.000Z
|
nnunet/training/network_training/competitions_with_custom_Trainers/MMS/nnUNetTrainerV2_MMS.py
|
nasyxx/nnUNet
|
92d5f2352349eed278e22f7a38cb86b0fccd7c75
|
[
"Apache-2.0"
] | 20
|
2020-10-29T20:47:28.000Z
|
2022-03-26T07:18:00.000Z
|
import torch
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNet_variants.data_augmentation.nnUNetTrainerV2_insaneDA import \
nnUNetTrainerV2_insaneDA
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
class nnUNetTrainerV2_MMS(nnUNetTrainerV2_insaneDA):
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params["p_rot"] = 0.7
self.data_aug_params["p_eldef"] = 0.1
self.data_aug_params["p_scale"] = 0.3
self.data_aug_params["independent_scale_factor_for_each_axis"] = True
self.data_aug_params["p_independent_scale_per_axis"] = 0.3
self.data_aug_params["do_additive_brightness"] = True
self.data_aug_params["additive_brightness_mu"] = 0
self.data_aug_params["additive_brightness_sigma"] = 0.2
self.data_aug_params["additive_brightness_p_per_sample"] = 0.3
self.data_aug_params["additive_brightness_p_per_channel"] = 1
self.data_aug_params["elastic_deform_alpha"] = (0., 300.)
self.data_aug_params["elastic_deform_sigma"] = (9., 15.)
self.data_aug_params['gamma_range'] = (0.5, 1.6)
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.BatchNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.BatchNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
"""def run_training(self):
from batchviewer import view_batch
a = next(self.tr_gen)
view_batch(a['data'])
import IPython;IPython.embed()"""
| 43.639344
| 117
| 0.657776
|
import torch
from nnunet.network_architecture.generic_UNet import Generic_UNet
from nnunet.network_architecture.initialization import InitWeights_He
from nnunet.training.network_training.nnUNet_variants.data_augmentation.nnUNetTrainerV2_insaneDA import \
nnUNetTrainerV2_insaneDA
from nnunet.utilities.nd_softmax import softmax_helper
from torch import nn
class nnUNetTrainerV2_MMS(nnUNetTrainerV2_insaneDA):
def setup_DA_params(self):
super().setup_DA_params()
self.data_aug_params["p_rot"] = 0.7
self.data_aug_params["p_eldef"] = 0.1
self.data_aug_params["p_scale"] = 0.3
self.data_aug_params["independent_scale_factor_for_each_axis"] = True
self.data_aug_params["p_independent_scale_per_axis"] = 0.3
self.data_aug_params["do_additive_brightness"] = True
self.data_aug_params["additive_brightness_mu"] = 0
self.data_aug_params["additive_brightness_sigma"] = 0.2
self.data_aug_params["additive_brightness_p_per_sample"] = 0.3
self.data_aug_params["additive_brightness_p_per_channel"] = 1
self.data_aug_params["elastic_deform_alpha"] = (0., 300.)
self.data_aug_params["elastic_deform_sigma"] = (9., 15.)
self.data_aug_params['gamma_range'] = (0.5, 1.6)
def initialize_network(self):
if self.threeD:
conv_op = nn.Conv3d
dropout_op = nn.Dropout3d
norm_op = nn.BatchNorm3d
else:
conv_op = nn.Conv2d
dropout_op = nn.Dropout2d
norm_op = nn.BatchNorm2d
norm_op_kwargs = {'eps': 1e-5, 'affine': True}
dropout_op_kwargs = {'p': 0, 'inplace': True}
net_nonlin = nn.LeakyReLU
net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}
self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes,
len(self.net_num_pool_op_kernel_sizes),
self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,
dropout_op_kwargs,
net_nonlin, net_nonlin_kwargs, True, False, lambda x: x, InitWeights_He(1e-2),
self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)
if torch.cuda.is_available():
self.network.cuda()
self.network.inference_apply_nonlin = softmax_helper
| true
| true
|
7907fb94af616116405a175738be8418ab426188
| 1,294
|
py
|
Python
|
7.py
|
Nikolas-01/Lesson_7
|
dfbc8306bf9858b85253e5bf2066bb147b93ece0
|
[
"MIT"
] | null | null | null |
7.py
|
Nikolas-01/Lesson_7
|
dfbc8306bf9858b85253e5bf2066bb147b93ece0
|
[
"MIT"
] | null | null | null |
7.py
|
Nikolas-01/Lesson_7
|
dfbc8306bf9858b85253e5bf2066bb147b93ece0
|
[
"MIT"
] | null | null | null |
from docxtpl import DocxTemplate
import csv
import json
import random
#случайный авто
with open('Car_info.txt') as file:
car_rand = []
reader = csv.reader(file)
for row in file:
car_rand.append(row)
report_car = car_rand[random.randint(0, len(car_rand)-1)]
car_info = report_car.split()
#О авто
def get_data (Brand, Model, Fuel_cons, Price):
return {
'Название': Brand,
'Модель': Model,
'Объем': Fuel_cons,
'Цена': Price
}
def from_template(Brand, Model, Fuel_cons, Price, template):
template = DocxTemplate(template)
data = get_data(Brand, Model, Fuel_cons, Price)
template.render(data)
template.save('О_машине.docx')
def report(Brand, Model, Fuel_cons, Price):
template = 'О_машине.docx'
document = from_template(Brand, Model, Fuel_cons, Price, template)
report(car_info[0], car_info[1], car_info[2], car_info[3])
#csv файл
car_list=[]
with open('Авто_инфо.txt', 'r') as file:
for row in file:
inner_list = [x.strip() for x in row.split(',')]
car_list.append(inner_list)
print(car_list)
with open('car.csv', 'w') as file:
writer = csv.writer(file, delimiter = '*')
writer.writerows(car_list)
#json файл
with open('Авто_json.txt', 'w') as f:
json.dump(str(car_info), f)
| 30.809524
| 70
| 0.665379
|
from docxtpl import DocxTemplate
import csv
import json
import random
with open('Car_info.txt') as file:
car_rand = []
reader = csv.reader(file)
for row in file:
car_rand.append(row)
report_car = car_rand[random.randint(0, len(car_rand)-1)]
car_info = report_car.split()
def get_data (Brand, Model, Fuel_cons, Price):
return {
'Название': Brand,
'Модель': Model,
'Объем': Fuel_cons,
'Цена': Price
}
def from_template(Brand, Model, Fuel_cons, Price, template):
template = DocxTemplate(template)
data = get_data(Brand, Model, Fuel_cons, Price)
template.render(data)
template.save('О_машине.docx')
def report(Brand, Model, Fuel_cons, Price):
template = 'О_машине.docx'
document = from_template(Brand, Model, Fuel_cons, Price, template)
report(car_info[0], car_info[1], car_info[2], car_info[3])
car_list=[]
with open('Авто_инфо.txt', 'r') as file:
for row in file:
inner_list = [x.strip() for x in row.split(',')]
car_list.append(inner_list)
print(car_list)
with open('car.csv', 'w') as file:
writer = csv.writer(file, delimiter = '*')
writer.writerows(car_list)
with open('Авто_json.txt', 'w') as f:
json.dump(str(car_info), f)
| true
| true
|
7907fb9f3bf5ec2da1ef4ed0d8d5e4c7860ac719
| 1,228
|
py
|
Python
|
examples/undocumented/python/structure_discrete_hmsvm_bmrm.py
|
gf712/shogun
|
ca2afb8f092288455701539aa58952dbf6743378
|
[
"BSD-3-Clause"
] | 2,753
|
2015-01-02T11:34:13.000Z
|
2022-03-25T07:04:27.000Z
|
examples/undocumented/python/structure_discrete_hmsvm_bmrm.py
|
gf712/shogun
|
ca2afb8f092288455701539aa58952dbf6743378
|
[
"BSD-3-Clause"
] | 2,404
|
2015-01-02T19:31:41.000Z
|
2022-03-09T10:58:22.000Z
|
examples/undocumented/python/structure_discrete_hmsvm_bmrm.py
|
gf712/shogun
|
ca2afb8f092288455701539aa58952dbf6743378
|
[
"BSD-3-Clause"
] | 1,156
|
2015-01-03T01:57:21.000Z
|
2022-03-26T01:06:28.000Z
|
#!/usr/bin/env python
import numpy
import scipy
from scipy import io
data_dict = scipy.io.loadmat('../data/hmsvm_data_large_integer.mat', struct_as_record=False)
parameter_list=[[data_dict]]
def structure_discrete_hmsvm_bmrm (m_data_dict=data_dict):
import shogun as sg
try:
_ = sg.create_machine("DualLibQPBMSOSVM")
except:
print("DualLibQPBMSOSVM not available")
return
labels_array = m_data_dict['label'][0]
idxs = numpy.nonzero(labels_array == -1)
labels_array[idxs] = 0
labels = sg.SequenceLabels(labels_array, 250, 500, 2)
features = sg.RealMatrixFeatures(m_data_dict['signal'].astype(float), 250, 500)
num_obs = 4 # given by the data file used
model = sg.create_structured_model("HMSVMModel", features=features, labels=labels,
state_model_type="SMT_TWO_STATE", num_obs=num_obs)
sosvm = sg.create_machine("DualLibQPBMSOSVM", model=model, labels=labels, m_lambda=5000.0)
sosvm.train()
#print sosvm.get_w()
predicted = sosvm.apply(features)
evaluator = sg.create_evaluation("StructuredAccuracy")
acc = evaluator.evaluate(predicted, labels)
#print('Accuracy = %.4f' % acc)
if __name__ == '__main__':
print("Discrete HMSVM BMRM")
structure_discrete_hmsvm_bmrm(*parameter_list[0])
| 27.909091
| 92
| 0.754886
|
import numpy
import scipy
from scipy import io
data_dict = scipy.io.loadmat('../data/hmsvm_data_large_integer.mat', struct_as_record=False)
parameter_list=[[data_dict]]
def structure_discrete_hmsvm_bmrm (m_data_dict=data_dict):
import shogun as sg
try:
_ = sg.create_machine("DualLibQPBMSOSVM")
except:
print("DualLibQPBMSOSVM not available")
return
labels_array = m_data_dict['label'][0]
idxs = numpy.nonzero(labels_array == -1)
labels_array[idxs] = 0
labels = sg.SequenceLabels(labels_array, 250, 500, 2)
features = sg.RealMatrixFeatures(m_data_dict['signal'].astype(float), 250, 500)
num_obs = 4
model = sg.create_structured_model("HMSVMModel", features=features, labels=labels,
state_model_type="SMT_TWO_STATE", num_obs=num_obs)
sosvm = sg.create_machine("DualLibQPBMSOSVM", model=model, labels=labels, m_lambda=5000.0)
sosvm.train()
predicted = sosvm.apply(features)
evaluator = sg.create_evaluation("StructuredAccuracy")
acc = evaluator.evaluate(predicted, labels)
if __name__ == '__main__':
print("Discrete HMSVM BMRM")
structure_discrete_hmsvm_bmrm(*parameter_list[0])
| true
| true
|
7907fc90c49114bc8b1bda121717a23b62b78812
| 5,390
|
py
|
Python
|
topi/python/topi/cuda/conv2d_hwcn.py
|
peterjc123/tvm
|
d6dcd6c56febfbb12efe67884c188f045f435893
|
[
"Apache-2.0"
] | 48
|
2020-07-29T18:09:23.000Z
|
2021-10-09T01:53:33.000Z
|
topi/python/topi/cuda/conv2d_hwcn.py
|
peterjc123/tvm
|
d6dcd6c56febfbb12efe67884c188f045f435893
|
[
"Apache-2.0"
] | 9
|
2021-04-02T02:28:07.000Z
|
2022-03-26T18:23:59.000Z
|
Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/3rdparty/tvm/topi/python/topi/cuda/conv2d_hwcn.py
|
lablup/training_results_v0.7
|
f5bb59aa0f8b18b602763abe47d1d24d0d54b197
|
[
"Apache-2.0"
] | 42
|
2020-08-01T06:41:24.000Z
|
2022-01-20T10:33:08.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, too-many-locals, too-many-statements
"""Schedule for conv2d_hwcn with auto fusion"""
import tvm
from .. import tag
def schedule_conv2d_hwcn(outs):
"""Schedule for conv2d_hwcn and any element-wise operations.
Parameters
----------
outs: Array of Tensor
The computation graph description of conv2d_hwcn in the format
of an array of tensors.
Returns
-------
s: Schedule
The computation schedule for conv2d_hwcn.
"""
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
sch = tvm.create_schedule([x.op for x in outs])
def schedule(Apad, W, B):
"""Schedule conv2d_hwcn"""
sch[Apad].compute_inline()
AA = sch.cache_read(Apad, "shared", [B])
WW = sch.cache_read(W, "shared", [B])
AL = sch.cache_read(AA, "local", [B])
WL = sch.cache_read(WW, "local", [B])
if B.op in sch.outputs:
Out = B
BL = sch.cache_write(Out, "local")
else:
Out = sch.outputs[0].output(0)
sch[B].set_scope("local")
BL = B
tile = 8
num_thread = 8
block_factor = tile * num_thread
step = 8
vthread = 2
block_x = tvm.thread_axis("blockIdx.x")
block_y = tvm.thread_axis("blockIdx.y")
block_z = tvm.thread_axis("blockIdx.z")
thread_x = tvm.thread_axis((0, num_thread), "threadIdx.x")
thread_y = tvm.thread_axis((0, num_thread), "threadIdx.y")
thread_xz = tvm.thread_axis((0, vthread), "vthread", name="vx")
thread_yz = tvm.thread_axis((0, vthread), "vthread", name="vy")
hi, wi, fi, ni = sch[Out].op.axis
bz = sch[Out].fuse(hi, wi)
by, fi = sch[Out].split(fi, factor=block_factor)
bx, ni = sch[Out].split(ni, factor=block_factor)
tyz, fi = sch[Out].split(fi, nparts=vthread)
txz, ni = sch[Out].split(ni, nparts=vthread)
ty, fi = sch[Out].split(fi, nparts=num_thread)
tx, ni = sch[Out].split(ni, nparts=num_thread)
sch[Out].reorder(bz, by, bx, tyz, txz, ty, tx, fi, ni)
sch[Out].bind(bz, block_z)
sch[Out].bind(by, block_y)
sch[Out].bind(bx, block_x)
sch[Out].bind(tyz, thread_yz)
sch[Out].bind(txz, thread_xz)
sch[Out].bind(ty, thread_y)
sch[Out].bind(tx, thread_x)
# Schedule BL local write
sch[BL].compute_at(sch[Out], tx)
yi, xi, fi, ni = sch[BL].op.axis
ry, rx, rc = sch[BL].op.reduce_axis
rco, rci = sch[BL].split(rc, factor=step)
sch[BL].reorder(rco, ry, rx, rci, fi, ni)
fuse_index = sch[BL].fuse(ry, rx)
fuse_index = sch[BL].fuse(fuse_index, rco)
rx = fuse_index
sch[AA].compute_at(sch[BL], rx)
sch[WW].compute_at(sch[BL], rx)
sch[AL].compute_at(sch[BL], rci)
sch[WL].compute_at(sch[BL], rci)
# Schedule for A's shared memory load
yi, xi, ci, ni = sch[AA].op.axis
ty, ci = sch[AA].split(ci, nparts=num_thread)
tx, ni = sch[AA].split(ni, nparts=num_thread)
_, ni = sch[AA].split(ni, factor=4)
sch[AA].reorder(ty, tx, yi, xi, ci, ni)
sch[AA].bind(ty, thread_y)
sch[AA].bind(tx, thread_x)
sch[AA].vectorize(ni)
# Schedule for W's shared memory load
yi, xi, ci, fi = sch[WW].op.axis
ty, ci = sch[WW].split(ci, nparts=num_thread)
tx, fi = sch[WW].split(fi, nparts=num_thread)
_, fi = sch[WW].split(fi, factor=4)
sch[WW].reorder(ty, tx, yi, xi, ci, fi)
sch[WW].bind(ty, thread_y)
sch[WW].bind(tx, thread_x)
sch[WW].vectorize(fi)
scheduled_ops = []
def traverse(operator):
"""Traverse operators from computation graph"""
if tag.is_broadcast(operator.tag):
if operator not in sch.outputs:
sch[operator].compute_inline()
for tensor in operator.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
elif operator.tag == 'conv2d_hwcn':
Apad = operator.input_tensors[0]
W = operator.input_tensors[1]
if isinstance(W.op, tvm.tensor.ComputeOp) and 'dilate' in W.op.tag:
sch[W].compute_inline()
B = operator.output(0)
schedule(Apad, W, B)
else:
raise RuntimeError("Unsupported operator: %s" % operator.tag)
scheduled_ops.append(operator)
traverse(outs[0].op)
return sch
| 37.957746
| 79
| 0.601299
|
import tvm
from .. import tag
def schedule_conv2d_hwcn(outs):
outs = [outs] if isinstance(outs, tvm.tensor.Tensor) else outs
sch = tvm.create_schedule([x.op for x in outs])
def schedule(Apad, W, B):
sch[Apad].compute_inline()
AA = sch.cache_read(Apad, "shared", [B])
WW = sch.cache_read(W, "shared", [B])
AL = sch.cache_read(AA, "local", [B])
WL = sch.cache_read(WW, "local", [B])
if B.op in sch.outputs:
Out = B
BL = sch.cache_write(Out, "local")
else:
Out = sch.outputs[0].output(0)
sch[B].set_scope("local")
BL = B
tile = 8
num_thread = 8
block_factor = tile * num_thread
step = 8
vthread = 2
block_x = tvm.thread_axis("blockIdx.x")
block_y = tvm.thread_axis("blockIdx.y")
block_z = tvm.thread_axis("blockIdx.z")
thread_x = tvm.thread_axis((0, num_thread), "threadIdx.x")
thread_y = tvm.thread_axis((0, num_thread), "threadIdx.y")
thread_xz = tvm.thread_axis((0, vthread), "vthread", name="vx")
thread_yz = tvm.thread_axis((0, vthread), "vthread", name="vy")
hi, wi, fi, ni = sch[Out].op.axis
bz = sch[Out].fuse(hi, wi)
by, fi = sch[Out].split(fi, factor=block_factor)
bx, ni = sch[Out].split(ni, factor=block_factor)
tyz, fi = sch[Out].split(fi, nparts=vthread)
txz, ni = sch[Out].split(ni, nparts=vthread)
ty, fi = sch[Out].split(fi, nparts=num_thread)
tx, ni = sch[Out].split(ni, nparts=num_thread)
sch[Out].reorder(bz, by, bx, tyz, txz, ty, tx, fi, ni)
sch[Out].bind(bz, block_z)
sch[Out].bind(by, block_y)
sch[Out].bind(bx, block_x)
sch[Out].bind(tyz, thread_yz)
sch[Out].bind(txz, thread_xz)
sch[Out].bind(ty, thread_y)
sch[Out].bind(tx, thread_x)
sch[BL].compute_at(sch[Out], tx)
yi, xi, fi, ni = sch[BL].op.axis
ry, rx, rc = sch[BL].op.reduce_axis
rco, rci = sch[BL].split(rc, factor=step)
sch[BL].reorder(rco, ry, rx, rci, fi, ni)
fuse_index = sch[BL].fuse(ry, rx)
fuse_index = sch[BL].fuse(fuse_index, rco)
rx = fuse_index
sch[AA].compute_at(sch[BL], rx)
sch[WW].compute_at(sch[BL], rx)
sch[AL].compute_at(sch[BL], rci)
sch[WL].compute_at(sch[BL], rci)
yi, xi, ci, ni = sch[AA].op.axis
ty, ci = sch[AA].split(ci, nparts=num_thread)
tx, ni = sch[AA].split(ni, nparts=num_thread)
_, ni = sch[AA].split(ni, factor=4)
sch[AA].reorder(ty, tx, yi, xi, ci, ni)
sch[AA].bind(ty, thread_y)
sch[AA].bind(tx, thread_x)
sch[AA].vectorize(ni)
# Schedule for W's shared memory load
yi, xi, ci, fi = sch[WW].op.axis
ty, ci = sch[WW].split(ci, nparts=num_thread)
tx, fi = sch[WW].split(fi, nparts=num_thread)
_, fi = sch[WW].split(fi, factor=4)
sch[WW].reorder(ty, tx, yi, xi, ci, fi)
sch[WW].bind(ty, thread_y)
sch[WW].bind(tx, thread_x)
sch[WW].vectorize(fi)
scheduled_ops = []
def traverse(operator):
if tag.is_broadcast(operator.tag):
if operator not in sch.outputs:
sch[operator].compute_inline()
for tensor in operator.input_tensors:
if tensor.op.input_tensors and tensor.op not in scheduled_ops:
traverse(tensor.op)
elif operator.tag == 'conv2d_hwcn':
Apad = operator.input_tensors[0]
W = operator.input_tensors[1]
if isinstance(W.op, tvm.tensor.ComputeOp) and 'dilate' in W.op.tag:
sch[W].compute_inline()
B = operator.output(0)
schedule(Apad, W, B)
else:
raise RuntimeError("Unsupported operator: %s" % operator.tag)
scheduled_ops.append(operator)
traverse(outs[0].op)
return sch
| true
| true
|
7907fcd9d8b5f016e33c1b2eeafd3a39be62d79e
| 3,299
|
py
|
Python
|
data/make_hdf5_files.py
|
pisalore/pointnet_shrec17-classificator
|
4c2288d16b953f466967a3deb569bba059a156f8
|
[
"MIT"
] | 3
|
2019-11-13T09:16:47.000Z
|
2021-02-17T08:48:48.000Z
|
data/make_hdf5_files.py
|
pisalore/pointnet_shrec17-classificator
|
4c2288d16b953f466967a3deb569bba059a156f8
|
[
"MIT"
] | null | null | null |
data/make_hdf5_files.py
|
pisalore/pointnet_shrec17-classificator
|
4c2288d16b953f466967a3deb569bba059a156f8
|
[
"MIT"
] | null | null | null |
import h5py
import numpy as np
import os
from plyfile import PlyData, PlyElement
HDF5_DATA = 'hdf5_data'
print('Generating .h5 files...', '\n')
if not os.path.exists(HDF5_DATA):
os.mkdir(HDF5_DATA)
filenames_training = [line.rstrip() for line in open("filelist_training.txt", 'r')]
filenames_testing = [line.rstrip() for line in open("filelist_testing.txt", 'r')]
print((len(filenames_training)))
print((len(filenames_testing)))
f_training = h5py.File("./hdf5_data/data_training.h5", 'w')
f_testing = h5py.File("./hdf5_data/data_testing.h5", 'w')
a_data_training = np.zeros((len(filenames_training), 2048, 3))
a_pid_training = np.zeros((len(filenames_training), 2048), dtype = np.uint8)
labeldata_training = []
a_label_training = np.zeros((len(filenames_training), 1), dtype = np.uint8)
a_data_testing = np.zeros((len(filenames_testing), 2048, 3))
a_pid_testing = np.zeros((len(filenames_testing), 2048), dtype = np.uint8)
labeldata_testing = []
a_label_testing = np.zeros((len(filenames_testing), 1), dtype = np.uint8)
# ====== GENERATING TRAINING FILES ======
#========================================
for i in range(0, len(filenames_training)):
print(filenames_training[i])
plydata = PlyData.read("./ply_dir/" + filenames_training[i] + ".ply")
piddata = [line.rstrip() for line in open("./seg_dir/" + filenames_training[i] + ".seg", 'r')]
# labeldata = [line.rstrip() for line in open("./label_dir/" + filenames_training[i] + ".seg", 'r')]
for j in range(0, 2048):
a_data_training[i, j] = [plydata['vertex']['x'][j], plydata['vertex']['y'][j], plydata['vertex']['z'][j]]
a_pid_training[i, j] = piddata[j]
# a_label_training[i, j] = labeldata[j]
for i in range(0, len(filenames_training)):
labeldata = [line.rstrip() for line in open("./label_dir/" + filenames_training[i] + ".seg", 'r')]
a_label_training[i] = labeldata[0]
data = f_training.create_dataset("data", data = a_data_training)
pid = f_training.create_dataset("pid", data = a_pid_training)
label = f_training.create_dataset("label", data = a_label_training)
# ====== GENERATING TRAINING FILES ======
#========================================
# ====== GENERATING TESTING FILES ======
#========================================
for i in range(0, len(filenames_testing)):
plydata = PlyData.read("./ply_dir/" + filenames_testing[i] + ".ply")
piddata = [line.rstrip() for line in open("./seg_dir/" + filenames_testing[i] + ".seg", 'r')]
# labeldata = [line.rstrip() for line in open("./label_dir/" + filenames_testing[i] + ".seg", 'r')]
for j in range(0, 2048):
a_data_testing[i, j] = [plydata['vertex']['x'][j], plydata['vertex']['y'][j], plydata['vertex']['z'][j]]
a_pid_testing[i, j] = piddata[j]
# a_label_testing[i, j] = labeldata[j]
for i in range(0, len(filenames_testing)):
labeldata = [line.rstrip() for line in open("./label_dir/" + filenames_testing[i] + ".seg", 'r')]
a_label_testing[i] = labeldata[0]
data = f_testing.create_dataset("data", data = a_data_testing)
pid = f_testing.create_dataset("pid", data = a_pid_testing)
label = f_testing.create_dataset("label", data = a_label_testing)
#========================================
#========================================
print('HDF5 files generated.')
| 41.759494
| 113
| 0.631403
|
import h5py
import numpy as np
import os
from plyfile import PlyData, PlyElement
HDF5_DATA = 'hdf5_data'
print('Generating .h5 files...', '\n')
if not os.path.exists(HDF5_DATA):
os.mkdir(HDF5_DATA)
filenames_training = [line.rstrip() for line in open("filelist_training.txt", 'r')]
filenames_testing = [line.rstrip() for line in open("filelist_testing.txt", 'r')]
print((len(filenames_training)))
print((len(filenames_testing)))
f_training = h5py.File("./hdf5_data/data_training.h5", 'w')
f_testing = h5py.File("./hdf5_data/data_testing.h5", 'w')
a_data_training = np.zeros((len(filenames_training), 2048, 3))
a_pid_training = np.zeros((len(filenames_training), 2048), dtype = np.uint8)
labeldata_training = []
a_label_training = np.zeros((len(filenames_training), 1), dtype = np.uint8)
a_data_testing = np.zeros((len(filenames_testing), 2048, 3))
a_pid_testing = np.zeros((len(filenames_testing), 2048), dtype = np.uint8)
labeldata_testing = []
a_label_testing = np.zeros((len(filenames_testing), 1), dtype = np.uint8)
for i in range(0, len(filenames_training)):
print(filenames_training[i])
plydata = PlyData.read("./ply_dir/" + filenames_training[i] + ".ply")
piddata = [line.rstrip() for line in open("./seg_dir/" + filenames_training[i] + ".seg", 'r')]
for j in range(0, 2048):
a_data_training[i, j] = [plydata['vertex']['x'][j], plydata['vertex']['y'][j], plydata['vertex']['z'][j]]
a_pid_training[i, j] = piddata[j]
for i in range(0, len(filenames_training)):
labeldata = [line.rstrip() for line in open("./label_dir/" + filenames_training[i] + ".seg", 'r')]
a_label_training[i] = labeldata[0]
data = f_training.create_dataset("data", data = a_data_training)
pid = f_training.create_dataset("pid", data = a_pid_training)
label = f_training.create_dataset("label", data = a_label_training)
for i in range(0, len(filenames_testing)):
plydata = PlyData.read("./ply_dir/" + filenames_testing[i] + ".ply")
piddata = [line.rstrip() for line in open("./seg_dir/" + filenames_testing[i] + ".seg", 'r')]
for j in range(0, 2048):
a_data_testing[i, j] = [plydata['vertex']['x'][j], plydata['vertex']['y'][j], plydata['vertex']['z'][j]]
a_pid_testing[i, j] = piddata[j]
for i in range(0, len(filenames_testing)):
labeldata = [line.rstrip() for line in open("./label_dir/" + filenames_testing[i] + ".seg", 'r')]
a_label_testing[i] = labeldata[0]
data = f_testing.create_dataset("data", data = a_data_testing)
pid = f_testing.create_dataset("pid", data = a_pid_testing)
label = f_testing.create_dataset("label", data = a_label_testing)
print('HDF5 files generated.')
| true
| true
|
7907fd6c2e4e2f3c44e4478178b44dc4ccb98a8e
| 1,390
|
py
|
Python
|
tareas/3/ManzanaresJorge-SalazarJesus/spn.py
|
jorgelmp/sistop-2022-1
|
5c3b7e5215247533446aa006affe6cc64a48d989
|
[
"CC-BY-4.0"
] | 6
|
2021-08-30T19:11:57.000Z
|
2021-09-05T01:30:59.000Z
|
tareas/3/ManzanaresJorge-SalazarJesus/spn.py
|
jorgelmp/sistop-2022-1
|
5c3b7e5215247533446aa006affe6cc64a48d989
|
[
"CC-BY-4.0"
] | 13
|
2021-09-07T22:24:47.000Z
|
2021-11-23T05:26:38.000Z
|
tareas/3/ManzanaresJorge-SalazarJesus/spn.py
|
jorgelmp/sistop-2022-1
|
5c3b7e5215247533446aa006affe6cc64a48d989
|
[
"CC-BY-4.0"
] | 33
|
2021-09-01T00:44:27.000Z
|
2022-02-09T06:17:55.000Z
|
from scheduler import Scheduler
from collections import deque
class Spn(Scheduler):
name = "Shortest Process Next (SPN)"
ejecutados = []
ejecutados_visual = ""
def __init__(self,procesos=[]):
self.spn_queue = deque()
self.t = procesos[0].arrvl_time
self.procesos = procesos
self.getMaxT(procesos)
self.check_for_new_process()
def check_for_new_process(self):
list=[]
for i in self.procesos:
#print(i.id+" "+str(i.exec_time))
if i.arrvl_time <= self.t:
list.append(i)
# self.procesos.remove(i)
list.sort(key=lambda Process: Process.exec_time)
for i in list:
self.spn_queue.append(i)
self.procesos.remove(i)
def execute(self):
while self.t < self.max_t:
if self.spn_queue:
ejecutando = self.spn_queue.popleft()
self.ejecutados_visual+=ejecutando.id
self.t +=1
while not ejecutando.execute(1) :
self.ejecutados_visual+=ejecutando.id
self.t +=1
ejecutando.compl_time = self.t
self.ejecutados.append(ejecutando)
else:
self.emptyExec()
self.check_for_new_process()
#print(self.t)
| 32.325581
| 57
| 0.539568
|
from scheduler import Scheduler
from collections import deque
class Spn(Scheduler):
name = "Shortest Process Next (SPN)"
ejecutados = []
ejecutados_visual = ""
def __init__(self,procesos=[]):
self.spn_queue = deque()
self.t = procesos[0].arrvl_time
self.procesos = procesos
self.getMaxT(procesos)
self.check_for_new_process()
def check_for_new_process(self):
list=[]
for i in self.procesos:
if i.arrvl_time <= self.t:
list.append(i)
list.sort(key=lambda Process: Process.exec_time)
for i in list:
self.spn_queue.append(i)
self.procesos.remove(i)
def execute(self):
while self.t < self.max_t:
if self.spn_queue:
ejecutando = self.spn_queue.popleft()
self.ejecutados_visual+=ejecutando.id
self.t +=1
while not ejecutando.execute(1) :
self.ejecutados_visual+=ejecutando.id
self.t +=1
ejecutando.compl_time = self.t
self.ejecutados.append(ejecutando)
else:
self.emptyExec()
self.check_for_new_process()
| true
| true
|
7907fdd7386241163ed033444bcaddf34a68b930
| 5,414
|
py
|
Python
|
tests/test_visualizer.py
|
bjtuyxc/detectron2
|
ebb9f8c9166765c508f8ac53d9ed2004739b28d1
|
[
"Apache-2.0"
] | null | null | null |
tests/test_visualizer.py
|
bjtuyxc/detectron2
|
ebb9f8c9166765c508f8ac53d9ed2004739b28d1
|
[
"Apache-2.0"
] | null | null | null |
tests/test_visualizer.py
|
bjtuyxc/detectron2
|
ebb9f8c9166765c508f8ac53d9ed2004739b28d1
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# File:
import numpy as np
import unittest
import torch
from detectron2.data import MetadataCatalog
from detectron2.structures import Instances, RotatedBoxes, BoxMode
from detectron2.utils.visualizer import Visualizer
class TestVisualizer(unittest.TestCase):
def _random_data(self):
H, W = 100, 100
N = 10
img = np.random.rand(H, W, 3) * 255
boxxy = np.random.rand(N, 2) * (H // 2)
boxes = np.concatenate((boxxy, boxxy + H // 2), axis=1)
def _rand_poly():
return np.random.rand(3, 2).flatten() * H
polygons = [[_rand_poly() for _ in range(np.random.randint(1, 5))] for _ in range(N)]
mask = np.zeros_like(img[:, :, 0], dtype=np.bool)
mask[:10, 10:20] = 1
labels = [str(i) for i in range(N)]
return img, boxes, labels, polygons, [mask] * N
@property
def metadata(self):
return MetadataCatalog.get("coco_2017_train")
def test_draw_dataset_dict(self):
img = np.random.rand(512, 512, 3) * 255
dic = {'annotations': [{'bbox': [368.9946492271106,
330.891438763377,
13.148537455410235,
13.644708680142685],
'bbox_mode': BoxMode.XYWH_ABS,
'category_id': 0,
'iscrowd': 1,
'segmentation': {'counts': '_jh52m?2N2N2N2O100O10O001N1O2MceP2',
'size': [512, 512]}}],
'height': 512,
'image_id': 1,
'width': 512}
v = Visualizer(img, self.metadata)
v.draw_dataset_dict(dic)
def test_overlay_instances(self):
img, boxes, labels, polygons, masks = self._random_data()
v = Visualizer(img, self.metadata)
output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image()
self.assertEqual(output.shape, img.shape)
# Test 2x scaling
v = Visualizer(img, self.metadata, scale=2.0)
output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image()
self.assertEqual(output.shape[0], img.shape[0] * 2)
# Test overlay masks
v = Visualizer(img, self.metadata)
output = v.overlay_instances(masks=masks, boxes=boxes, labels=labels).get_image()
self.assertEqual(output.shape, img.shape)
def test_overlay_instances_no_boxes(self):
img, boxes, labels, polygons, _ = self._random_data()
v = Visualizer(img, self.metadata)
v.overlay_instances(masks=polygons, boxes=None, labels=labels).get_image()
def test_draw_instance_predictions(self):
img, boxes, _, _, masks = self._random_data()
num_inst = len(boxes)
inst = Instances((img.shape[0], img.shape[1]))
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
inst.scores = torch.rand(num_inst)
inst.pred_boxes = torch.from_numpy(boxes)
inst.pred_masks = torch.from_numpy(np.asarray(masks))
v = Visualizer(img, self.metadata)
v.draw_instance_predictions(inst)
def test_draw_empty_mask_predictions(self):
img, boxes, _, _, masks = self._random_data()
num_inst = len(boxes)
inst = Instances((img.shape[0], img.shape[1]))
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
inst.scores = torch.rand(num_inst)
inst.pred_boxes = torch.from_numpy(boxes)
inst.pred_masks = torch.from_numpy(np.zeros_like(np.asarray(masks)))
v = Visualizer(img, self.metadata)
v.draw_instance_predictions(inst)
def test_correct_output_shape(self):
img = np.random.rand(928, 928, 3) * 255
v = Visualizer(img, self.metadata)
out = v.output.get_image()
self.assertEqual(out.shape, img.shape)
def test_overlay_rotated_instances(self):
H, W = 100, 150
img = np.random.rand(H, W, 3) * 255
num_boxes = 50
boxes_5d = torch.zeros(num_boxes, 5)
boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-0.1 * W, 1.1 * W)
boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-0.1 * H, 1.1 * H)
boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H))
boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H))
boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800)
rotated_boxes = RotatedBoxes(boxes_5d)
labels = [str(i) for i in range(num_boxes)]
v = Visualizer(img, self.metadata)
output = v.overlay_instances(boxes=rotated_boxes, labels=labels).get_image()
self.assertEqual(output.shape, img.shape)
def test_draw_no_metadata(self):
img, boxes, _, _, masks = self._random_data()
num_inst = len(boxes)
inst = Instances((img.shape[0], img.shape[1]))
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
inst.scores = torch.rand(num_inst)
inst.pred_boxes = torch.from_numpy(boxes)
inst.pred_masks = torch.from_numpy(np.asarray(masks))
v = Visualizer(img, MetadataCatalog.get("asdfasdf"))
v.draw_instance_predictions(inst)
| 40.402985
| 96
| 0.602143
|
import numpy as np
import unittest
import torch
from detectron2.data import MetadataCatalog
from detectron2.structures import Instances, RotatedBoxes, BoxMode
from detectron2.utils.visualizer import Visualizer
class TestVisualizer(unittest.TestCase):
def _random_data(self):
H, W = 100, 100
N = 10
img = np.random.rand(H, W, 3) * 255
boxxy = np.random.rand(N, 2) * (H // 2)
boxes = np.concatenate((boxxy, boxxy + H // 2), axis=1)
def _rand_poly():
return np.random.rand(3, 2).flatten() * H
polygons = [[_rand_poly() for _ in range(np.random.randint(1, 5))] for _ in range(N)]
mask = np.zeros_like(img[:, :, 0], dtype=np.bool)
mask[:10, 10:20] = 1
labels = [str(i) for i in range(N)]
return img, boxes, labels, polygons, [mask] * N
@property
def metadata(self):
return MetadataCatalog.get("coco_2017_train")
def test_draw_dataset_dict(self):
img = np.random.rand(512, 512, 3) * 255
dic = {'annotations': [{'bbox': [368.9946492271106,
330.891438763377,
13.148537455410235,
13.644708680142685],
'bbox_mode': BoxMode.XYWH_ABS,
'category_id': 0,
'iscrowd': 1,
'segmentation': {'counts': '_jh52m?2N2N2N2O100O10O001N1O2MceP2',
'size': [512, 512]}}],
'height': 512,
'image_id': 1,
'width': 512}
v = Visualizer(img, self.metadata)
v.draw_dataset_dict(dic)
def test_overlay_instances(self):
img, boxes, labels, polygons, masks = self._random_data()
v = Visualizer(img, self.metadata)
output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image()
self.assertEqual(output.shape, img.shape)
v = Visualizer(img, self.metadata, scale=2.0)
output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image()
self.assertEqual(output.shape[0], img.shape[0] * 2)
v = Visualizer(img, self.metadata)
output = v.overlay_instances(masks=masks, boxes=boxes, labels=labels).get_image()
self.assertEqual(output.shape, img.shape)
def test_overlay_instances_no_boxes(self):
img, boxes, labels, polygons, _ = self._random_data()
v = Visualizer(img, self.metadata)
v.overlay_instances(masks=polygons, boxes=None, labels=labels).get_image()
def test_draw_instance_predictions(self):
img, boxes, _, _, masks = self._random_data()
num_inst = len(boxes)
inst = Instances((img.shape[0], img.shape[1]))
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
inst.scores = torch.rand(num_inst)
inst.pred_boxes = torch.from_numpy(boxes)
inst.pred_masks = torch.from_numpy(np.asarray(masks))
v = Visualizer(img, self.metadata)
v.draw_instance_predictions(inst)
def test_draw_empty_mask_predictions(self):
img, boxes, _, _, masks = self._random_data()
num_inst = len(boxes)
inst = Instances((img.shape[0], img.shape[1]))
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
inst.scores = torch.rand(num_inst)
inst.pred_boxes = torch.from_numpy(boxes)
inst.pred_masks = torch.from_numpy(np.zeros_like(np.asarray(masks)))
v = Visualizer(img, self.metadata)
v.draw_instance_predictions(inst)
def test_correct_output_shape(self):
img = np.random.rand(928, 928, 3) * 255
v = Visualizer(img, self.metadata)
out = v.output.get_image()
self.assertEqual(out.shape, img.shape)
def test_overlay_rotated_instances(self):
H, W = 100, 150
img = np.random.rand(H, W, 3) * 255
num_boxes = 50
boxes_5d = torch.zeros(num_boxes, 5)
boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-0.1 * W, 1.1 * W)
boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-0.1 * H, 1.1 * H)
boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H))
boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H))
boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800)
rotated_boxes = RotatedBoxes(boxes_5d)
labels = [str(i) for i in range(num_boxes)]
v = Visualizer(img, self.metadata)
output = v.overlay_instances(boxes=rotated_boxes, labels=labels).get_image()
self.assertEqual(output.shape, img.shape)
def test_draw_no_metadata(self):
img, boxes, _, _, masks = self._random_data()
num_inst = len(boxes)
inst = Instances((img.shape[0], img.shape[1]))
inst.pred_classes = torch.randint(0, 80, size=(num_inst,))
inst.scores = torch.rand(num_inst)
inst.pred_boxes = torch.from_numpy(boxes)
inst.pred_masks = torch.from_numpy(np.asarray(masks))
v = Visualizer(img, MetadataCatalog.get("asdfasdf"))
v.draw_instance_predictions(inst)
| true
| true
|
7907fdda48e28d327e2b86a5e1a99f1449f54f4a
| 1,778
|
py
|
Python
|
Scripts/mybar.py
|
jovanzac/Captain
|
3e410aa22eec4f72274b9bf4f0f2b3c91936356d
|
[
"MIT"
] | null | null | null |
Scripts/mybar.py
|
jovanzac/Captain
|
3e410aa22eec4f72274b9bf4f0f2b3c91936356d
|
[
"MIT"
] | null | null | null |
Scripts/mybar.py
|
jovanzac/Captain
|
3e410aa22eec4f72274b9bf4f0f2b3c91936356d
|
[
"MIT"
] | 1
|
2020-12-25T08:21:37.000Z
|
2020-12-25T08:21:37.000Z
|
import tkinter as tk
from PIL import Image, ImageTk
# The Custom Variable Widgets
class MyBar(tk.Canvas) :
def __init__(self, master:object, shape:object, value=0, maximum=100,
bg="#231303", trough_color='#8a7852', bar_color='#f7f4bf'):
"""Creating the alpha mask and creating a custom widget of the given shape and dimensions."""
# open shape mask with PIL
im_shape_alpha = Image.open(shape).convert('L')
# create bar shape image with the choosen backgroound color
im_shape = Image.new('RGBA', im_shape_alpha.size, bg)
# apply shape as alpha mask to "cut out" the bar shape
im_shape.putalpha(im_shape_alpha)
width, height = im_shape_alpha.size
# create the canvas
tk.Canvas.__init__(self, master, bg=trough_color, width=width, height=height, highlightthickness=0)
self._value = value # bar value
self.maximum = maximum # maximum value
# bar width and height
self.height = height
self.width = width
# create tkinter image for the shape from the PIL Image
self.img_trough = ImageTk.PhotoImage(im_shape, master=self)
# create bar to display the value
self.create_rectangle(0, height, width, height * (1 - value/self.maximum), width=0, fill=bar_color, tags='pbar')
# display shape on top
self.create_image(0, 0, anchor='nw', image=self.img_trough)
@property
def value(self):
"""Return bar's value."""
return self._value
@value.setter
def value(self, value:int):
"""Set bar's value."""
self._value = value
# adjust bar height to value
self.coords('pbar', 0, self.height, self.width, self.height*(1 - value/self.maximum))
| 40.409091
| 120
| 0.644544
|
import tkinter as tk
from PIL import Image, ImageTk
class MyBar(tk.Canvas) :
def __init__(self, master:object, shape:object, value=0, maximum=100,
bg="#231303", trough_color='#8a7852', bar_color='#f7f4bf'):
im_shape_alpha = Image.open(shape).convert('L')
im_shape = Image.new('RGBA', im_shape_alpha.size, bg)
im_shape.putalpha(im_shape_alpha)
width, height = im_shape_alpha.size
tk.Canvas.__init__(self, master, bg=trough_color, width=width, height=height, highlightthickness=0)
self._value = value
self.maximum = maximum
self.height = height
self.width = width
self.img_trough = ImageTk.PhotoImage(im_shape, master=self)
self.create_rectangle(0, height, width, height * (1 - value/self.maximum), width=0, fill=bar_color, tags='pbar')
self.create_image(0, 0, anchor='nw', image=self.img_trough)
@property
def value(self):
return self._value
@value.setter
def value(self, value:int):
self._value = value
self.coords('pbar', 0, self.height, self.width, self.height*(1 - value/self.maximum))
| true
| true
|
7907fed5bbfcfa6e7035b885995f9b21d8943f56
| 610
|
py
|
Python
|
v_python/fixture/admin_catalog.py
|
spcartman/selenium_full_course
|
673f25dcf2340c0c14666c7a91f774fd7659f0b1
|
[
"MIT"
] | null | null | null |
v_python/fixture/admin_catalog.py
|
spcartman/selenium_full_course
|
673f25dcf2340c0c14666c7a91f774fd7659f0b1
|
[
"MIT"
] | null | null | null |
v_python/fixture/admin_catalog.py
|
spcartman/selenium_full_course
|
673f25dcf2340c0c14666c7a91f774fd7659f0b1
|
[
"MIT"
] | null | null | null |
class AdminCatalogHelper:
def __init__(self, app):
self.app = app
def go_though_each_product_and_print_browser_log(self):
for i in range(len(self.app.wd.find_elements_by_css_selector('.dataTable td:nth-of-type(3) a[href*="&product_id="]'))):
self.app.wd.find_elements_by_css_selector('.dataTable td:nth-of-type(3) a[href*="&product_id="]')[i].click()
[print(log) for log in self.app.wd.get_log("browser")]
self.app.wait_for_element_to_be_visible('#tab-general')
self.app.wd.find_element_by_css_selector('button[name="cancel"]').click()
| 50.833333
| 127
| 0.678689
|
class AdminCatalogHelper:
def __init__(self, app):
self.app = app
def go_though_each_product_and_print_browser_log(self):
for i in range(len(self.app.wd.find_elements_by_css_selector('.dataTable td:nth-of-type(3) a[href*="&product_id="]'))):
self.app.wd.find_elements_by_css_selector('.dataTable td:nth-of-type(3) a[href*="&product_id="]')[i].click()
[print(log) for log in self.app.wd.get_log("browser")]
self.app.wait_for_element_to_be_visible('#tab-general')
self.app.wd.find_element_by_css_selector('button[name="cancel"]').click()
| true
| true
|
7907fee4677a36aa4ecc2aa9a88cbdfe69077ec6
| 687
|
py
|
Python
|
setup.py
|
arthurcgusmao/py-mcc-f1
|
d1b7cb856fbf03faad6a9eeeaea08da049c603c0
|
[
"MIT"
] | 7
|
2020-10-26T21:33:40.000Z
|
2022-02-14T10:56:06.000Z
|
setup.py
|
arthurcgusmao/py-mcc-f1
|
d1b7cb856fbf03faad6a9eeeaea08da049c603c0
|
[
"MIT"
] | 1
|
2022-02-13T19:17:15.000Z
|
2022-02-13T19:17:15.000Z
|
setup.py
|
arthurcgusmao/py-mcc-f1
|
d1b7cb856fbf03faad6a9eeeaea08da049c603c0
|
[
"MIT"
] | 1
|
2022-02-14T10:56:08.000Z
|
2022-02-14T10:56:08.000Z
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="py-mcc-f1",
version="0.1.0",
author="Arthur Colombini Gusmão",
description="MCC-F1 Curve",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/arthurcgusmao/py-mcc-f1",
packages=setuptools.find_packages(),
install_requires=[
"numpy>=1.14.0",
"scikit-learn>=0.22"
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 26.423077
| 53
| 0.630277
|
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="py-mcc-f1",
version="0.1.0",
author="Arthur Colombini Gusmão",
description="MCC-F1 Curve",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/arthurcgusmao/py-mcc-f1",
packages=setuptools.find_packages(),
install_requires=[
"numpy>=1.14.0",
"scikit-learn>=0.22"
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| true
| true
|
7907ffa028fa0010a06fc93cf03a76362f49f5c9
| 1,127
|
py
|
Python
|
udemy-data-structures-and-algorithms/15-recursion/15.8_string_permutation.py
|
washimimizuku/python-data-structures-and-algorithms
|
537f4eabaf31888ae48004d153088fb28bb684ab
|
[
"MIT"
] | null | null | null |
udemy-data-structures-and-algorithms/15-recursion/15.8_string_permutation.py
|
washimimizuku/python-data-structures-and-algorithms
|
537f4eabaf31888ae48004d153088fb28bb684ab
|
[
"MIT"
] | null | null | null |
udemy-data-structures-and-algorithms/15-recursion/15.8_string_permutation.py
|
washimimizuku/python-data-structures-and-algorithms
|
537f4eabaf31888ae48004d153088fb28bb684ab
|
[
"MIT"
] | null | null | null |
'''
Given a string, write a function that uses recursion to output a
list of all the possible permutations of that string.
For example, given s='abc' the function should return ['abc', 'acb', 'bac', 'bca', 'cab', 'cba']
Note: If a character is repeated, treat each occurence as distinct,
for example an input of 'xxx' would return a list with 6 "versions" of 'xxx'
'''
from nose.tools import assert_equal
def permute(s):
out = []
# Base case
if (len(s) == 1):
out = [s]
else:
# For every letter in string
for i, let in enumerate(s):
# For every permutation
for perm in permute(s[:i] + s[i + 1:]):
# Add it to the output
out += [let + perm]
return out
class TestPerm(object):
def test(self, solution):
assert_equal(sorted(solution('abc')), sorted(
['abc', 'acb', 'bac', 'bca', 'cab', 'cba']))
assert_equal(sorted(solution('dog')), sorted(
['dog', 'dgo', 'odg', 'ogd', 'gdo', 'god']))
print('All test cases passed.')
# Run Tests
t = TestPerm()
t.test(permute)
| 23
| 96
| 0.573203
|
from nose.tools import assert_equal
def permute(s):
out = []
if (len(s) == 1):
out = [s]
else:
for i, let in enumerate(s):
for perm in permute(s[:i] + s[i + 1:]):
out += [let + perm]
return out
class TestPerm(object):
def test(self, solution):
assert_equal(sorted(solution('abc')), sorted(
['abc', 'acb', 'bac', 'bca', 'cab', 'cba']))
assert_equal(sorted(solution('dog')), sorted(
['dog', 'dgo', 'odg', 'ogd', 'gdo', 'god']))
print('All test cases passed.')
t = TestPerm()
t.test(permute)
| true
| true
|
7908015094df0f7d24b375510cc3e85e33122519
| 11,743
|
py
|
Python
|
PNet/train_pnet.py
|
mangye16/ReID-Label-Noise
|
89aa11f68c275a0bcff232d9a5c3ae152c9276af
|
[
"MIT"
] | 11
|
2020-04-03T09:01:36.000Z
|
2022-03-11T08:12:16.000Z
|
PNet/train_pnet.py
|
mangye16/ReID-Label-Noise
|
89aa11f68c275a0bcff232d9a5c3ae152c9276af
|
[
"MIT"
] | null | null | null |
PNet/train_pnet.py
|
mangye16/ReID-Label-Noise
|
89aa11f68c275a0bcff232d9a5c3ae152c9276af
|
[
"MIT"
] | 3
|
2020-12-18T11:53:05.000Z
|
2022-01-12T16:35:45.000Z
|
# -*- coding: UTF-8 -*-
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torchvision import datasets, models, transforms
from tensorboardX import SummaryWriter
import sys
import json
import scipy
import os, time
import argparse
import numpy as np
import torchvision
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from PIL import Image
from shutil import copyfile
from model import ft_net
from test_eval_cython import get_test_acc, extr_fea_train
from utils import *
import loader, loss
import pdb
version = torch.__version__
# #####################################################################
# argsions
# --------
parser = argparse.ArgumentParser(description='Training')
parser.add_argument('--gpu',default='0', type=str,help='gpu ids: e.g. 0 0,1,2 0,2')
parser.add_argument('--seed', default=1, type=int, help='rng seed')
parser.add_argument('--model_dir',default='.checkpoint/', type=str, help='output model name')
parser.add_argument('--data_dir',default='/home/comp/mangye/dataset/', type=str, help='data dir')
parser.add_argument('--dataset',default='duke',type=str, help='training data:Market1501, DukeMTMCreID')
parser.add_argument('--pretrained',default='',type=str, help='path of pretrained "model:./model/baseline/net_8.pth"')
parser.add_argument('--batchsize', default=32, type=int, help='batchsize')
parser.add_argument('--noise_ratio', default=0.2, type=float, help='percentage of noise data in the training')
parser.add_argument('--lr', default=0.01, type=float, help='initial learning rate')
parser.add_argument('--alpha', default=2, type=float, help='beta distribution: alpha')
parser.add_argument('--beta', default=6, type=float, help='beta distribution: beta')
parser.add_argument('--LabelWt', default=60, type=int, help='label refinment weight')
parser.add_argument('--weighttype', default=0, type=int, help='weight type: instance weight, class weight')
parser.add_argument('--stage2', action='store_true', help='training stage 2')
args = parser.parse_args()
torch.manual_seed(args.seed)
start_epoch = 0
if args.stage2:
start_epoch = start_epoch + 20
best_acc = 0
test_epoch = 2
lr = args.lr
data_dir = args.data_dir + args.dataset
suffix = args.dataset + '_noise_{}_'.format(args.noise_ratio)
if args.LabelWt > 0 or args.stage2:
suffix = suffix + 'batch_{}_wt_{}'.format(args.batchsize,args.LabelWt)
else:
suffix = suffix + 'batch_{}_baseline'.format(args.batchsize)
if args.stage2:
suffix = suffix + '_beta_{}_{}_lr_{:1.1e}'.format(args.alpha, args.beta, args.lr)
suffix = suffix + '_w_st2_new'
else:
suffix = suffix + '_lr_{:1.1e}'.format(args.lr)
suffix = suffix + '_w_st1'
print ('model: ' + suffix)
# define the log path
log_dir = './new_res/' + args.dataset + '_log/'
checkpoint_path = './res/checkpoint/'
vis_log_dir = log_dir + suffix + '/'
if not os.path.isdir(log_dir):
os.makedirs(log_dir)
if not os.path.isdir(vis_log_dir):
os.makedirs(vis_log_dir)
writer = SummaryWriter(vis_log_dir)
test_log_file = open(log_dir + suffix + '.txt', "w")
sys.stdout = Logger(log_dir + suffix + '_os.txt')
# define the gpu id
str_ids = args.gpu.split(',')
gpu_ids = []
for str_id in str_ids:
gid = int(str_id)
if gid >=0:
gpu_ids.append(gid)
# set gpu ids
if len(gpu_ids)>0:
torch.cuda.set_device(gpu_ids[0])
print ('using gpu: {}'.format(gpu_ids))
# #####################################################################
# Load Data
train_transform = transforms.Compose([
#transforms.RandomResizedCrop(size=128, scale=(0.75,1.0), ratio=(0.75,1.3333), interpolation=3), #Image.BICUBIC)
transforms.Resize((288,144), interpolation=3),
transforms.RandomCrop((256,128)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
test_transform = transforms.Compose([
transforms.Resize((256,128), interpolation=3),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
# load training dataDatasetFolder
print('Starting loading training data: ', args.dataset )
train_dataset = loader.DatasetFolder(os.path.join(data_dir, 'train'), transform=train_transform)
class_names = train_dataset.classes
dataset_sizes_train = len(train_dataset)
use_gpu = torch.cuda.is_available()
# Define a model
model = ft_net(len(class_names))
if use_gpu:
model = model.cuda()
# Load a pretrainied model
if args.pretrained or args.stage2:
# model_name = 'market_noise_0.2_batch_32_lambda_0.4_lr_1.0e-02_st1_epoch_best.t'
model_name = '{}_noise_{}_batch_32_wt_60_lr_1.0e-02_w_st1_epoch_best.t'.format(args.dataset, args.noise_ratio)
print('Initilizaing weights with {}'.format(model_name))
model_path = checkpoint_path + model_name
model.load_state_dict(torch.load(model_path))
else:
print('Initilizaing weights with ImageNet')
# generate noisy label
if args.noise_ratio >= 0:
trainLabels = torch.LongTensor([y for (p, y, w) in train_dataset.imgs])
trainLabels_nsy, if_truelbl = gen_nosiy_lbl(trainLabels, args.noise_ratio, len(class_names))
print('Finish adding noisy label')
# generate instance weight
if args.stage2:
print('Generating sef-generated weights......')
weight_file = './new_res/' + 'new_{}_{}_weights.npy'.format(args.dataset, args.noise_ratio)
label_file = './new_res/' + 'new_{}_{}_label.npy'.format(args.dataset, args.noise_ratio)
# if os.path.exists(weight_file):
# all_weights = np.load(weight_file)
# pre_pids = np.load(label_file)
# else:
tansform_bak = train_transform
train_dataset.transform = test_transform
temploader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batchsize, shuffle=False, num_workers=8)
model.eval() # Set model to evaluate mode
print('Start extract features...')
start = time.time()
train_feas, pre_pids = extr_fea_train(model, train_dataset, temploader, use_gpu)
print('Evaluation time: {}'.format(time.time()-start))
indexs, ori_weight = gen_weights_dist(train_feas, trainLabels_nsy, class_names, args.alpha, args.beta)
order = np.argsort(indexs)
all_weights = ori_weight[order]
np.save(weight_file, all_weights)
np.save(label_file, pre_pids)
train_dataset.transform = tansform_bak
all_weights = all_weights.astype(np.float32)
for i in range(len(trainLabels_nsy)):
train_dataset.imgs[i] = (train_dataset.imgs[i][0], int(pre_pids[i]), all_weights[i])
else:
print('Setting same weights for all the instances...')
for i in range(len(trainLabels_nsy)):
train_dataset.imgs[i] = (train_dataset.imgs[i][0], trainLabels_nsy[i],1)
dataloaders_train = torch.utils.data.DataLoader(train_dataset, batch_size=args.batchsize, shuffle=True, num_workers=8) # 8 workers may work faster
# load testing dataDatasetFolder
test_dataset = {x: datasets.ImageFolder( os.path.join(data_dir,x) ,test_transform) for x in ['gallery','query']}
dataloaders_test = {x: torch.utils.data.DataLoader(test_dataset[x], batch_size=args.batchsize, shuffle=False, num_workers=8) for x in ['gallery','query']}
# Define loss functions
# if args.LabelWt>0:
# criterion = loss.LabelRefineLoss(lambda1=args.LabelWt)
if args.stage2:
criterion = loss.InstanceWeightLoss(weighted = 1)
else:
criterion = nn.CrossEntropyLoss()
# optimizer
ignored_params = list(map(id, model.model.fc.parameters() )) + list(map(id, model.classifier.parameters() ))
base_params = filter(lambda p: id(p) not in ignored_params, model.parameters())
optimizer_ft = optim.SGD([
{'params': base_params, 'lr': lr},
{'params': model.model.fc.parameters(), 'lr': lr*10},
{'params': model.classifier.parameters(), 'lr': lr*10}
], weight_decay=5e-4, momentum=0.9, nesterov=True)
# Decay LR by a factor of 0.1 every 40 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=20, gamma=0.1)
def save_network(network, epoch_label, is_best = False):
if is_best:
save_path = checkpoint_path + suffix + '_epoch_best.t'
else:
save_path = checkpoint_path + suffix + '_epoch_{}.t'.format(epoch_label)
torch.save(network.state_dict(), save_path)
def sigmoid_rampup(current, rampup_length):
"""Exponential rampup from https://arxiv.org/abs/1610.02242"""
if rampup_length == 0:
return 1.0
else:
current = np.clip(current, 0.0, rampup_length)
phase = 1.0 - current / rampup_length
w = float(np.exp(-2.0 * phase * phase))
return min(w,0.5)
def train_model(model, criterion, optimizer_ft, scheduler, epoch):
scheduler.step()
lambda1 = sigmoid_rampup(epoch, args.LabelWt)
train_loss = AverageMeter()
data_time = AverageMeter()
batch_time = AverageMeter()
model.train()
correct = 0
total = 0
end = time.time()
for batch_idx, (inputs, targets, weights) in enumerate(dataloaders_train):
if use_gpu:
inputs = Variable(inputs.cuda())
targets = Variable(targets.cuda())
weights = Variable(weights.cuda())
data_time.update(time.time() - end)
optimizer_ft.zero_grad()
outputs = model(inputs)
if args.stage2:
loss = criterion(outputs, targets, weights)
else:
loss = criterion(outputs, targets, lambda1)
loss.backward()
optimizer_ft.step()
train_loss.update(loss.item(), inputs.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
_, predicted = outputs.max(1)
correct += predicted.eq(targets).sum().item()
total += inputs.size(0)
if batch_idx%10==0:
print('Epoch: [{}][{}/{}] '
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Data: {data_time.val:.3f} ({data_time.avg:.3f}) '
'Loss: {train_loss.val:.4f} ({train_loss.avg:.4f}) '
'Accu: {:.2f}'.format(
epoch, batch_idx, len(dataloaders_train),100.*correct/total, batch_time=batch_time, data_time=data_time, train_loss=train_loss))
writer.add_scalar('training acc (train)', 100.*correct/total, epoch)
writer.add_scalar('loss', train_loss.avg, epoch)
for epoch in range(start_epoch, start_epoch+41):
# training
print('Start Training..........')
train_model(model, criterion, optimizer_ft, exp_lr_scheduler, epoch)
# evaluation
if epoch%test_epoch ==0:
model.eval() # Set model to evaluate mode
start = time.time()
cmc, mAP = get_test_acc(model, test_dataset, dataloaders_test, use_gpu, max_rank=10)
if cmc[0] > best_acc:
best_epoch = epoch
best_acc = cmc[0]
save_network(model, epoch, is_best = True)
print('Epoch {}: R1:{:.4%} R5:{:.4%} R10:{:.4%} mAP:{:.4%} (Best Epoch[{}])'.format(
epoch, cmc[0],cmc[4],cmc[9], mAP ,best_epoch))
print('Epoch {}: R1:{:.4%} R5:{:.4%} R10:{:.4%} mAP:{:.4%} (Best Epoch[{}])'.format(
epoch, cmc[0],cmc[4],cmc[9], mAP ,best_epoch), file = test_log_file)
test_log_file.flush()
print('Evaluation time: {}'.format(time.time()-start))
# if epoch%20==0:
# save_network(model, epoch, is_best = False)
| 38.755776
| 154
| 0.664311
|
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torchvision import datasets, models, transforms
from tensorboardX import SummaryWriter
import sys
import json
import scipy
import os, time
import argparse
import numpy as np
import torchvision
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from PIL import Image
from shutil import copyfile
from model import ft_net
from test_eval_cython import get_test_acc, extr_fea_train
from utils import *
import loader, loss
import pdb
version = torch.__version__
sys.stdout = Logger(log_dir + suffix + '_os.txt')
str_ids = args.gpu.split(',')
gpu_ids = []
for str_id in str_ids:
gid = int(str_id)
if gid >=0:
gpu_ids.append(gid)
if len(gpu_ids)>0:
torch.cuda.set_device(gpu_ids[0])
print ('using gpu: {}'.format(gpu_ids))
, pre_pids)
train_dataset.transform = tansform_bak
all_weights = all_weights.astype(np.float32)
for i in range(len(trainLabels_nsy)):
train_dataset.imgs[i] = (train_dataset.imgs[i][0], int(pre_pids[i]), all_weights[i])
else:
print('Setting same weights for all the instances...')
for i in range(len(trainLabels_nsy)):
train_dataset.imgs[i] = (train_dataset.imgs[i][0], trainLabels_nsy[i],1)
dataloaders_train = torch.utils.data.DataLoader(train_dataset, batch_size=args.batchsize, shuffle=True, num_workers=8)
test_dataset = {x: datasets.ImageFolder( os.path.join(data_dir,x) ,test_transform) for x in ['gallery','query']}
dataloaders_test = {x: torch.utils.data.DataLoader(test_dataset[x], batch_size=args.batchsize, shuffle=False, num_workers=8) for x in ['gallery','query']}
if args.stage2:
criterion = loss.InstanceWeightLoss(weighted = 1)
else:
criterion = nn.CrossEntropyLoss()
ignored_params = list(map(id, model.model.fc.parameters() )) + list(map(id, model.classifier.parameters() ))
base_params = filter(lambda p: id(p) not in ignored_params, model.parameters())
optimizer_ft = optim.SGD([
{'params': base_params, 'lr': lr},
{'params': model.model.fc.parameters(), 'lr': lr*10},
{'params': model.classifier.parameters(), 'lr': lr*10}
], weight_decay=5e-4, momentum=0.9, nesterov=True)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_ft, step_size=20, gamma=0.1)
def save_network(network, epoch_label, is_best = False):
if is_best:
save_path = checkpoint_path + suffix + '_epoch_best.t'
else:
save_path = checkpoint_path + suffix + '_epoch_{}.t'.format(epoch_label)
torch.save(network.state_dict(), save_path)
def sigmoid_rampup(current, rampup_length):
if rampup_length == 0:
return 1.0
else:
current = np.clip(current, 0.0, rampup_length)
phase = 1.0 - current / rampup_length
w = float(np.exp(-2.0 * phase * phase))
return min(w,0.5)
def train_model(model, criterion, optimizer_ft, scheduler, epoch):
scheduler.step()
lambda1 = sigmoid_rampup(epoch, args.LabelWt)
train_loss = AverageMeter()
data_time = AverageMeter()
batch_time = AverageMeter()
model.train()
correct = 0
total = 0
end = time.time()
for batch_idx, (inputs, targets, weights) in enumerate(dataloaders_train):
if use_gpu:
inputs = Variable(inputs.cuda())
targets = Variable(targets.cuda())
weights = Variable(weights.cuda())
data_time.update(time.time() - end)
optimizer_ft.zero_grad()
outputs = model(inputs)
if args.stage2:
loss = criterion(outputs, targets, weights)
else:
loss = criterion(outputs, targets, lambda1)
loss.backward()
optimizer_ft.step()
train_loss.update(loss.item(), inputs.size(0))
batch_time.update(time.time() - end)
end = time.time()
_, predicted = outputs.max(1)
correct += predicted.eq(targets).sum().item()
total += inputs.size(0)
if batch_idx%10==0:
print('Epoch: [{}][{}/{}] '
'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) '
'Data: {data_time.val:.3f} ({data_time.avg:.3f}) '
'Loss: {train_loss.val:.4f} ({train_loss.avg:.4f}) '
'Accu: {:.2f}'.format(
epoch, batch_idx, len(dataloaders_train),100.*correct/total, batch_time=batch_time, data_time=data_time, train_loss=train_loss))
writer.add_scalar('training acc (train)', 100.*correct/total, epoch)
writer.add_scalar('loss', train_loss.avg, epoch)
for epoch in range(start_epoch, start_epoch+41):
print('Start Training..........')
train_model(model, criterion, optimizer_ft, exp_lr_scheduler, epoch)
if epoch%test_epoch ==0:
model.eval()
start = time.time()
cmc, mAP = get_test_acc(model, test_dataset, dataloaders_test, use_gpu, max_rank=10)
if cmc[0] > best_acc:
best_epoch = epoch
best_acc = cmc[0]
save_network(model, epoch, is_best = True)
print('Epoch {}: R1:{:.4%} R5:{:.4%} R10:{:.4%} mAP:{:.4%} (Best Epoch[{}])'.format(
epoch, cmc[0],cmc[4],cmc[9], mAP ,best_epoch))
print('Epoch {}: R1:{:.4%} R5:{:.4%} R10:{:.4%} mAP:{:.4%} (Best Epoch[{}])'.format(
epoch, cmc[0],cmc[4],cmc[9], mAP ,best_epoch), file = test_log_file)
test_log_file.flush()
print('Evaluation time: {}'.format(time.time()-start))
| true
| true
|
79080274d654c3494c58716d1acbc6511f150845
| 2,436
|
py
|
Python
|
tools/mo/openvino/tools/mo/front/kaldi/extractors/tdnncomponent_ext.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 1,127
|
2018-10-15T14:36:58.000Z
|
2020-04-20T09:29:44.000Z
|
tools/mo/openvino/tools/mo/front/kaldi/extractors/tdnncomponent_ext.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 439
|
2018-10-20T04:40:35.000Z
|
2020-04-19T05:56:25.000Z
|
tools/mo/openvino/tools/mo/front/kaldi/extractors/tdnncomponent_ext.py
|
ryanloney/openvino-1
|
4e0a740eb3ee31062ba0df88fcf438564f67edb7
|
[
"Apache-2.0"
] | 414
|
2018-10-17T05:53:46.000Z
|
2020-04-16T17:29:53.000Z
|
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.kaldi.loader.utils import read_binary_bool_token, read_binary_integer32_token, collect_until_token, \
read_binary_float_token
from openvino.tools.mo.front.kaldi.utils import read_binary_vector, read_binary_matrix
from openvino.tools.mo.ops.tdnncomponent import TdnnComponent
class TdnnComponentFrontExtractor(FrontExtractorOp):
op = 'tdnncomponent'
enabled = True
@classmethod
def extract(cls, node):
pb = node.parameters
collect_until_token(pb, b'<MaxChange>')
max_change = read_binary_float_token(pb)
collect_until_token(pb, b'<L2Regularize>')
collect_until_token(pb, b'<LearningRate>')
collect_until_token(pb, b'<TimeOffsets>')
time_offsets = read_binary_vector(pb, False, np.int32)
collect_until_token(pb, b'<LinearParams>')
weights, weights_shape = read_binary_matrix(pb)
collect_until_token(pb, b'<BiasParams>')
bias_params = read_binary_vector(pb)
collect_until_token(pb, b'<OrthonormalConstraint>')
orthonormal_constraint = read_binary_float_token(pb) # used only on training
collect_until_token(pb, b'<UseNaturalGradient>')
use_natural_grad = read_binary_bool_token(pb) # used only on training
collect_until_token(pb, b'<NumSamplesHistory>')
num_samples_hist = read_binary_float_token(pb)
collect_until_token(pb, b'<AlphaInOut>')
alpha_in_out = read_binary_float_token(pb), read_binary_float_token(pb) # for training, usually (4, 4)
# according to Kaldi documentation http://kaldi-asr.org/doc/classkaldi_1_1nnet3_1_1TdnnComponent.html#details
# it looks like it's used only during training (but not 100% sure)
collect_until_token(pb, b'<RankInOut>')
rank_in_out = read_binary_integer32_token(pb), read_binary_integer32_token(pb)
biases = mo_array(bias_params) if len(bias_params) != 0 else None
attrs = {
'weights': np.reshape(weights, weights_shape),
'biases': biases,
'time_offsets': time_offsets,
}
TdnnComponent.update_node_stat(node, attrs)
return cls.enabled
| 40.6
| 130
| 0.719212
|
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import mo_array
from openvino.tools.mo.front.extractor import FrontExtractorOp
from openvino.tools.mo.front.kaldi.loader.utils import read_binary_bool_token, read_binary_integer32_token, collect_until_token, \
read_binary_float_token
from openvino.tools.mo.front.kaldi.utils import read_binary_vector, read_binary_matrix
from openvino.tools.mo.ops.tdnncomponent import TdnnComponent
class TdnnComponentFrontExtractor(FrontExtractorOp):
op = 'tdnncomponent'
enabled = True
@classmethod
def extract(cls, node):
pb = node.parameters
collect_until_token(pb, b'<MaxChange>')
max_change = read_binary_float_token(pb)
collect_until_token(pb, b'<L2Regularize>')
collect_until_token(pb, b'<LearningRate>')
collect_until_token(pb, b'<TimeOffsets>')
time_offsets = read_binary_vector(pb, False, np.int32)
collect_until_token(pb, b'<LinearParams>')
weights, weights_shape = read_binary_matrix(pb)
collect_until_token(pb, b'<BiasParams>')
bias_params = read_binary_vector(pb)
collect_until_token(pb, b'<OrthonormalConstraint>')
orthonormal_constraint = read_binary_float_token(pb)
collect_until_token(pb, b'<UseNaturalGradient>')
use_natural_grad = read_binary_bool_token(pb)
collect_until_token(pb, b'<NumSamplesHistory>')
num_samples_hist = read_binary_float_token(pb)
collect_until_token(pb, b'<AlphaInOut>')
alpha_in_out = read_binary_float_token(pb), read_binary_float_token(pb)
collect_until_token(pb, b'<RankInOut>')
rank_in_out = read_binary_integer32_token(pb), read_binary_integer32_token(pb)
biases = mo_array(bias_params) if len(bias_params) != 0 else None
attrs = {
'weights': np.reshape(weights, weights_shape),
'biases': biases,
'time_offsets': time_offsets,
}
TdnnComponent.update_node_stat(node, attrs)
return cls.enabled
| true
| true
|
790802f077454ad281ac4d77e36901e0b7c8bf8b
| 70,452
|
py
|
Python
|
rasa/nlu/classifiers/diet_classifier.py
|
mukulbalodi/rasa
|
3126ef1148c165f2402f3c7203138d429e46c68c
|
[
"Apache-2.0"
] | null | null | null |
rasa/nlu/classifiers/diet_classifier.py
|
mukulbalodi/rasa
|
3126ef1148c165f2402f3c7203138d429e46c68c
|
[
"Apache-2.0"
] | null | null | null |
rasa/nlu/classifiers/diet_classifier.py
|
mukulbalodi/rasa
|
3126ef1148c165f2402f3c7203138d429e46c68c
|
[
"Apache-2.0"
] | 1
|
2022-02-22T12:35:19.000Z
|
2022-02-22T12:35:19.000Z
|
from __future__ import annotations
import copy
import logging
from collections import defaultdict
from pathlib import Path
from rasa.nlu.featurizers.featurizer import Featurizer
import numpy as np
import scipy.sparse
import tensorflow as tf
from typing import Any, Dict, List, Optional, Text, Tuple, Union, Type
from rasa.engine.graph import ExecutionContext, GraphComponent
from rasa.engine.recipes.default_recipe import DefaultV1Recipe
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.nlu.extractors.extractor import EntityExtractorMixin
from rasa.nlu.classifiers.classifier import IntentClassifier
import rasa.shared.utils.io
import rasa.utils.io as io_utils
import rasa.nlu.utils.bilou_utils as bilou_utils
from rasa.shared.constants import DIAGNOSTIC_DATA
from rasa.nlu.extractors.extractor import EntityTagSpec
from rasa.nlu.classifiers import LABEL_RANKING_LENGTH
from rasa.utils import train_utils
from rasa.utils.tensorflow import rasa_layers
from rasa.utils.tensorflow.models import RasaModel, TransformerRasaModel
from rasa.utils.tensorflow.model_data import (
RasaModelData,
FeatureSignature,
FeatureArray,
)
from rasa.nlu.constants import TOKENS_NAMES, DEFAULT_TRANSFORMER_SIZE
from rasa.shared.nlu.constants import (
SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
TEXT,
INTENT,
INTENT_RESPONSE_KEY,
ENTITIES,
ENTITY_ATTRIBUTE_TYPE,
ENTITY_ATTRIBUTE_GROUP,
ENTITY_ATTRIBUTE_ROLE,
NO_ENTITY_TAG,
SPLIT_ENTITIES_BY_COMMA,
)
from rasa.shared.exceptions import InvalidConfigException
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.utils.tensorflow.constants import (
LABEL,
IDS,
HIDDEN_LAYERS_SIZES,
RENORMALIZE_CONFIDENCES,
SHARE_HIDDEN_LAYERS,
TRANSFORMER_SIZE,
NUM_TRANSFORMER_LAYERS,
NUM_HEADS,
BATCH_SIZES,
BATCH_STRATEGY,
EPOCHS,
RANDOM_SEED,
LEARNING_RATE,
RANKING_LENGTH,
LOSS_TYPE,
SIMILARITY_TYPE,
NUM_NEG,
SPARSE_INPUT_DROPOUT,
DENSE_INPUT_DROPOUT,
MASKED_LM,
ENTITY_RECOGNITION,
TENSORBOARD_LOG_DIR,
INTENT_CLASSIFICATION,
EVAL_NUM_EXAMPLES,
EVAL_NUM_EPOCHS,
UNIDIRECTIONAL_ENCODER,
DROP_RATE,
DROP_RATE_ATTENTION,
CONNECTION_DENSITY,
NEGATIVE_MARGIN_SCALE,
REGULARIZATION_CONSTANT,
SCALE_LOSS,
USE_MAX_NEG_SIM,
MAX_NEG_SIM,
MAX_POS_SIM,
EMBEDDING_DIMENSION,
BILOU_FLAG,
KEY_RELATIVE_ATTENTION,
VALUE_RELATIVE_ATTENTION,
MAX_RELATIVE_POSITION,
AUTO,
BALANCED,
CROSS_ENTROPY,
TENSORBOARD_LOG_LEVEL,
CONCAT_DIMENSION,
FEATURIZERS,
CHECKPOINT_MODEL,
SEQUENCE,
SENTENCE,
SEQUENCE_LENGTH,
DENSE_DIMENSION,
MASK,
CONSTRAIN_SIMILARITIES,
MODEL_CONFIDENCE,
SOFTMAX,
)
logger = logging.getLogger(__name__)
SPARSE = "sparse"
DENSE = "dense"
LABEL_KEY = LABEL
LABEL_SUB_KEY = IDS
POSSIBLE_TAGS = [ENTITY_ATTRIBUTE_TYPE, ENTITY_ATTRIBUTE_ROLE, ENTITY_ATTRIBUTE_GROUP]
@DefaultV1Recipe.register(
[
DefaultV1Recipe.ComponentType.INTENT_CLASSIFIER,
DefaultV1Recipe.ComponentType.ENTITY_EXTRACTOR,
],
is_trainable=True,
)
class DIETClassifier(GraphComponent, IntentClassifier, EntityExtractorMixin):
"""A multi-task model for intent classification and entity extraction.
DIET is Dual Intent and Entity Transformer.
The architecture is based on a transformer which is shared for both tasks.
A sequence of entity labels is predicted through a Conditional Random Field (CRF)
tagging layer on top of the transformer output sequence corresponding to the
input sequence of tokens. The transformer output for the ``__CLS__`` token and
intent labels are embedded into a single semantic vector space. We use the
dot-product loss to maximize the similarity with the target label and minimize
similarities with negative samples.
"""
@classmethod
def required_components(cls) -> List[Type]:
"""Components that should be included in the pipeline before this component."""
return [Featurizer]
@staticmethod
def get_default_config() -> Dict[Text, Any]:
"""The component's default config (see parent class for full docstring)."""
# please make sure to update the docs when changing a default parameter
return {
# ## Architecture of the used neural network
# Hidden layer sizes for layers before the embedding layers for user message
# and labels.
# The number of hidden layers is equal to the length of the corresponding
# list.
HIDDEN_LAYERS_SIZES: {TEXT: [], LABEL: []},
# Whether to share the hidden layer weights between user message and labels.
SHARE_HIDDEN_LAYERS: False,
# Number of units in transformer
TRANSFORMER_SIZE: DEFAULT_TRANSFORMER_SIZE,
# Number of transformer layers
NUM_TRANSFORMER_LAYERS: 2,
# Number of attention heads in transformer
NUM_HEADS: 4,
# If 'True' use key relative embeddings in attention
KEY_RELATIVE_ATTENTION: False,
# If 'True' use value relative embeddings in attention
VALUE_RELATIVE_ATTENTION: False,
# Max position for relative embeddings. Only in effect if key- or value
# relative attention are turned on
MAX_RELATIVE_POSITION: 5,
# Use a unidirectional or bidirectional encoder.
UNIDIRECTIONAL_ENCODER: False,
# ## Training parameters
# Initial and final batch sizes:
# Batch size will be linearly increased for each epoch.
BATCH_SIZES: [64, 256],
# Strategy used when creating batches.
# Can be either 'sequence' or 'balanced'.
BATCH_STRATEGY: BALANCED,
# Number of epochs to train
EPOCHS: 300,
# Set random seed to any 'int' to get reproducible results
RANDOM_SEED: None,
# Initial learning rate for the optimizer
LEARNING_RATE: 0.001,
# ## Parameters for embeddings
# Dimension size of embedding vectors
EMBEDDING_DIMENSION: 20,
# Dense dimension to use for sparse features.
DENSE_DIMENSION: {TEXT: 128, LABEL: 20},
# Default dimension to use for concatenating sequence and sentence features.
CONCAT_DIMENSION: {TEXT: 128, LABEL: 20},
# The number of incorrect labels. The algorithm will minimize
# their similarity to the user input during training.
NUM_NEG: 20,
# Type of similarity measure to use, either 'auto' or 'cosine' or 'inner'.
SIMILARITY_TYPE: AUTO,
# The type of the loss function, either 'cross_entropy' or 'margin'.
LOSS_TYPE: CROSS_ENTROPY,
# Number of top intents for which confidences should be reported.
# Set to 0 if confidences for all intents should be reported.
RANKING_LENGTH: LABEL_RANKING_LENGTH,
# Indicates how similar the algorithm should try to make embedding vectors
# for correct labels.
# Should be 0.0 < ... < 1.0 for 'cosine' similarity type.
MAX_POS_SIM: 0.8,
# Maximum negative similarity for incorrect labels.
# Should be -1.0 < ... < 1.0 for 'cosine' similarity type.
MAX_NEG_SIM: -0.4,
# If 'True' the algorithm only minimizes maximum similarity over
# incorrect intent labels, used only if 'loss_type' is set to 'margin'.
USE_MAX_NEG_SIM: True,
# If 'True' scale loss inverse proportionally to the confidence
# of the correct prediction
SCALE_LOSS: False,
# ## Regularization parameters
# The scale of regularization
REGULARIZATION_CONSTANT: 0.002,
# The scale of how important is to minimize the maximum similarity
# between embeddings of different labels,
# used only if 'loss_type' is set to 'margin'.
NEGATIVE_MARGIN_SCALE: 0.8,
# Dropout rate for encoder
DROP_RATE: 0.2,
# Dropout rate for attention
DROP_RATE_ATTENTION: 0,
# Fraction of trainable weights in internal layers.
CONNECTION_DENSITY: 0.2,
# If 'True' apply dropout to sparse input tensors
SPARSE_INPUT_DROPOUT: True,
# If 'True' apply dropout to dense input tensors
DENSE_INPUT_DROPOUT: True,
# ## Evaluation parameters
# How often calculate validation accuracy.
# Small values may hurt performance.
EVAL_NUM_EPOCHS: 20,
# How many examples to use for hold out validation set
# Large values may hurt performance, e.g. model accuracy.
# Set to 0 for no validation.
EVAL_NUM_EXAMPLES: 0,
# ## Model config
# If 'True' intent classification is trained and intent predicted.
INTENT_CLASSIFICATION: True,
# If 'True' named entity recognition is trained and entities predicted.
ENTITY_RECOGNITION: True,
# If 'True' random tokens of the input message will be masked and the model
# should predict those tokens.
MASKED_LM: False,
# 'BILOU_flag' determines whether to use BILOU tagging or not.
# If set to 'True' labelling is more rigorous, however more
# examples per entity are required.
# Rule of thumb: you should have more than 100 examples per entity.
BILOU_FLAG: True,
# If you want to use tensorboard to visualize training and validation
# metrics, set this option to a valid output directory.
TENSORBOARD_LOG_DIR: None,
# Define when training metrics for tensorboard should be logged.
# Either after every epoch or for every training step.
# Valid values: 'epoch' and 'batch'
TENSORBOARD_LOG_LEVEL: "epoch",
# Perform model checkpointing
CHECKPOINT_MODEL: False,
# Specify what features to use as sequence and sentence features
# By default all features in the pipeline are used.
FEATURIZERS: [],
# Split entities by comma, this makes sense e.g. for a list of ingredients
# in a recipie, but it doesn't make sense for the parts of an address
SPLIT_ENTITIES_BY_COMMA: True,
# If 'True' applies sigmoid on all similarity terms and adds
# it to the loss function to ensure that similarity values are
# approximately bounded. Used inside cross-entropy loss only.
CONSTRAIN_SIMILARITIES: False,
# Model confidence to be returned during inference. Currently, the only
# possible value is `softmax`.
MODEL_CONFIDENCE: SOFTMAX,
# Determines whether the confidences of the chosen top intents should be
# renormalized so that they sum up to 1. By default, we do not renormalize
# and return the confidences for the top intents as is.
# Note that renormalization only makes sense if confidences are generated
# via `softmax`.
RENORMALIZE_CONFIDENCES: False,
}
def __init__(
self,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
index_label_id_mapping: Optional[Dict[int, Text]] = None,
entity_tag_specs: Optional[List[EntityTagSpec]] = None,
model: Optional[RasaModel] = None,
sparse_feature_sizes: Optional[Dict[Text, Dict[Text, List[int]]]] = None,
) -> None:
"""Declare instance variables with default values."""
if EPOCHS not in config:
rasa.shared.utils.io.raise_warning(
f"Please configure the number of '{EPOCHS}' in your configuration file."
f" We will change the default value of '{EPOCHS}' in the future to 1. "
)
self.component_config = config
self._model_storage = model_storage
self._resource = resource
self._execution_context = execution_context
self._check_config_parameters()
# transform numbers to labels
self.index_label_id_mapping = index_label_id_mapping or {}
self._entity_tag_specs = entity_tag_specs
self.model = model
self.tmp_checkpoint_dir = None
if self.component_config[CHECKPOINT_MODEL]:
self.tmp_checkpoint_dir = Path(rasa.utils.io.create_temporary_directory())
self._label_data: Optional[RasaModelData] = None
self._data_example: Optional[Dict[Text, Dict[Text, List[FeatureArray]]]] = None
self.split_entities_config = rasa.utils.train_utils.init_split_entities(
self.component_config[SPLIT_ENTITIES_BY_COMMA],
SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
)
self.finetune_mode = self._execution_context.is_finetuning
self._sparse_feature_sizes = sparse_feature_sizes
# init helpers
def _check_masked_lm(self) -> None:
if (
self.component_config[MASKED_LM]
and self.component_config[NUM_TRANSFORMER_LAYERS] == 0
):
raise ValueError(
f"If number of transformer layers is 0, "
f"'{MASKED_LM}' option should be 'False'."
)
def _check_share_hidden_layers_sizes(self) -> None:
if self.component_config.get(SHARE_HIDDEN_LAYERS):
first_hidden_layer_sizes = next(
iter(self.component_config[HIDDEN_LAYERS_SIZES].values())
)
# check that all hidden layer sizes are the same
identical_hidden_layer_sizes = all(
current_hidden_layer_sizes == first_hidden_layer_sizes
for current_hidden_layer_sizes in self.component_config[
HIDDEN_LAYERS_SIZES
].values()
)
if not identical_hidden_layer_sizes:
raise ValueError(
f"If hidden layer weights are shared, "
f"{HIDDEN_LAYERS_SIZES} must coincide."
)
def _check_config_parameters(self) -> None:
self.component_config = train_utils.check_deprecated_options(
self.component_config
)
self._check_masked_lm()
self._check_share_hidden_layers_sizes()
self.component_config = train_utils.update_confidence_type(
self.component_config
)
train_utils.validate_configuration_settings(self.component_config)
self.component_config = train_utils.update_similarity_type(
self.component_config
)
self.component_config = train_utils.update_evaluation_parameters(
self.component_config
)
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> DIETClassifier:
"""Creates a new untrained component (see parent class for full docstring)."""
return cls(config, model_storage, resource, execution_context)
@property
def label_key(self) -> Optional[Text]:
"""Return key if intent classification is activated."""
return LABEL_KEY if self.component_config[INTENT_CLASSIFICATION] else None
@property
def label_sub_key(self) -> Optional[Text]:
"""Return sub key if intent classification is activated."""
return LABEL_SUB_KEY if self.component_config[INTENT_CLASSIFICATION] else None
@staticmethod
def model_class() -> Type[RasaModel]:
return DIET
# training data helpers:
@staticmethod
def _label_id_index_mapping(
training_data: TrainingData, attribute: Text
) -> Dict[Text, int]:
"""Create label_id dictionary."""
distinct_label_ids = {
example.get(attribute) for example in training_data.intent_examples
} - {None}
return {
label_id: idx for idx, label_id in enumerate(sorted(distinct_label_ids))
}
@staticmethod
def _invert_mapping(mapping: Dict) -> Dict:
return {value: key for key, value in mapping.items()}
def _create_entity_tag_specs(
self, training_data: TrainingData
) -> List[EntityTagSpec]:
"""Create entity tag specifications with their respective tag id mappings."""
_tag_specs = []
for tag_name in POSSIBLE_TAGS:
if self.component_config[BILOU_FLAG]:
tag_id_index_mapping = bilou_utils.build_tag_id_dict(
training_data, tag_name
)
else:
tag_id_index_mapping = self._tag_id_index_mapping_for(
tag_name, training_data
)
if tag_id_index_mapping:
_tag_specs.append(
EntityTagSpec(
tag_name=tag_name,
tags_to_ids=tag_id_index_mapping,
ids_to_tags=self._invert_mapping(tag_id_index_mapping),
num_tags=len(tag_id_index_mapping),
)
)
return _tag_specs
@staticmethod
def _tag_id_index_mapping_for(
tag_name: Text, training_data: TrainingData
) -> Optional[Dict[Text, int]]:
"""Create mapping from tag name to id."""
if tag_name == ENTITY_ATTRIBUTE_ROLE:
distinct_tags = training_data.entity_roles
elif tag_name == ENTITY_ATTRIBUTE_GROUP:
distinct_tags = training_data.entity_groups
else:
distinct_tags = training_data.entities
distinct_tags = distinct_tags - {NO_ENTITY_TAG} - {None}
if not distinct_tags:
return None
tag_id_dict = {
tag_id: idx for idx, tag_id in enumerate(sorted(distinct_tags), 1)
}
# NO_ENTITY_TAG corresponds to non-entity which should correspond to 0 index
# needed for correct prediction for padding
tag_id_dict[NO_ENTITY_TAG] = 0
return tag_id_dict
@staticmethod
def _find_example_for_label(
label: Text, examples: List[Message], attribute: Text
) -> Optional[Message]:
for ex in examples:
if ex.get(attribute) == label:
return ex
return None
def _check_labels_features_exist(
self, labels_example: List[Message], attribute: Text
) -> bool:
"""Checks if all labels have features set."""
return all(
label_example.features_present(
attribute, self.component_config[FEATURIZERS]
)
for label_example in labels_example
)
def _extract_features(
self, message: Message, attribute: Text
) -> Dict[Text, Union[scipy.sparse.spmatrix, np.ndarray]]:
(
sparse_sequence_features,
sparse_sentence_features,
) = message.get_sparse_features(attribute, self.component_config[FEATURIZERS])
dense_sequence_features, dense_sentence_features = message.get_dense_features(
attribute, self.component_config[FEATURIZERS]
)
if dense_sequence_features is not None and sparse_sequence_features is not None:
if (
dense_sequence_features.features.shape[0]
!= sparse_sequence_features.features.shape[0]
):
raise ValueError(
f"Sequence dimensions for sparse and dense sequence features "
f"don't coincide in '{message.get(TEXT)}'"
f"for attribute '{attribute}'."
)
if dense_sentence_features is not None and sparse_sentence_features is not None:
if (
dense_sentence_features.features.shape[0]
!= sparse_sentence_features.features.shape[0]
):
raise ValueError(
f"Sequence dimensions for sparse and dense sentence features "
f"don't coincide in '{message.get(TEXT)}'"
f"for attribute '{attribute}'."
)
# If we don't use the transformer and we don't want to do entity recognition,
# to speed up training take only the sentence features as feature vector.
# We would not make use of the sequence anyway in this setup. Carrying over
# those features to the actual training process takes quite some time.
if (
self.component_config[NUM_TRANSFORMER_LAYERS] == 0
and not self.component_config[ENTITY_RECOGNITION]
and attribute not in [INTENT, INTENT_RESPONSE_KEY]
):
sparse_sequence_features = None
dense_sequence_features = None
out = {}
if sparse_sentence_features is not None:
out[f"{SPARSE}_{SENTENCE}"] = sparse_sentence_features.features
if sparse_sequence_features is not None:
out[f"{SPARSE}_{SEQUENCE}"] = sparse_sequence_features.features
if dense_sentence_features is not None:
out[f"{DENSE}_{SENTENCE}"] = dense_sentence_features.features
if dense_sequence_features is not None:
out[f"{DENSE}_{SEQUENCE}"] = dense_sequence_features.features
return out
def _check_input_dimension_consistency(self, model_data: RasaModelData) -> None:
"""Checks if features have same dimensionality if hidden layers are shared."""
if self.component_config.get(SHARE_HIDDEN_LAYERS):
num_text_sentence_features = model_data.number_of_units(TEXT, SENTENCE)
num_label_sentence_features = model_data.number_of_units(LABEL, SENTENCE)
num_text_sequence_features = model_data.number_of_units(TEXT, SEQUENCE)
num_label_sequence_features = model_data.number_of_units(LABEL, SEQUENCE)
if (0 < num_text_sentence_features != num_label_sentence_features > 0) or (
0 < num_text_sequence_features != num_label_sequence_features > 0
):
raise ValueError(
"If embeddings are shared text features and label features "
"must coincide. Check the output dimensions of previous components."
)
def _extract_labels_precomputed_features(
self, label_examples: List[Message], attribute: Text = INTENT
) -> Tuple[List[FeatureArray], List[FeatureArray]]:
"""Collects precomputed encodings."""
features = defaultdict(list)
for e in label_examples:
label_features = self._extract_features(e, attribute)
for feature_key, feature_value in label_features.items():
features[feature_key].append(feature_value)
sequence_features = []
sentence_features = []
for feature_name, feature_value in features.items():
if SEQUENCE in feature_name:
sequence_features.append(
FeatureArray(np.array(feature_value), number_of_dimensions=3)
)
else:
sentence_features.append(
FeatureArray(np.array(feature_value), number_of_dimensions=3)
)
return sequence_features, sentence_features
@staticmethod
def _compute_default_label_features(
labels_example: List[Message],
) -> List[FeatureArray]:
"""Computes one-hot representation for the labels."""
logger.debug("No label features found. Computing default label features.")
eye_matrix = np.eye(len(labels_example), dtype=np.float32)
# add sequence dimension to one-hot labels
return [
FeatureArray(
np.array([np.expand_dims(a, 0) for a in eye_matrix]),
number_of_dimensions=3,
)
]
def _create_label_data(
self,
training_data: TrainingData,
label_id_dict: Dict[Text, int],
attribute: Text,
) -> RasaModelData:
"""Create matrix with label_ids encoded in rows as bag of words.
Find a training example for each label and get the encoded features
from the corresponding Message object.
If the features are already computed, fetch them from the message object
else compute a one hot encoding for the label as the feature vector.
"""
# Collect one example for each label
labels_idx_examples = []
for label_name, idx in label_id_dict.items():
label_example = self._find_example_for_label(
label_name, training_data.intent_examples, attribute
)
labels_idx_examples.append((idx, label_example))
# Sort the list of tuples based on label_idx
labels_idx_examples = sorted(labels_idx_examples, key=lambda x: x[0])
labels_example = [example for (_, example) in labels_idx_examples]
# Collect features, precomputed if they exist, else compute on the fly
if self._check_labels_features_exist(labels_example, attribute):
(
sequence_features,
sentence_features,
) = self._extract_labels_precomputed_features(labels_example, attribute)
else:
sequence_features = None
sentence_features = self._compute_default_label_features(labels_example)
label_data = RasaModelData()
label_data.add_features(LABEL, SEQUENCE, sequence_features)
label_data.add_features(LABEL, SENTENCE, sentence_features)
if label_data.does_feature_not_exist(
LABEL, SENTENCE
) and label_data.does_feature_not_exist(LABEL, SEQUENCE):
raise ValueError(
"No label features are present. Please check your configuration file."
)
label_ids = np.array([idx for (idx, _) in labels_idx_examples])
# explicitly add last dimension to label_ids
# to track correctly dynamic sequences
label_data.add_features(
LABEL_KEY,
LABEL_SUB_KEY,
[FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)],
)
label_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE)
return label_data
def _use_default_label_features(self, label_ids: np.ndarray) -> List[FeatureArray]:
feature_arrays: List[FeatureArray] = self._label_data.get(LABEL, SENTENCE)
all_label_features = feature_arrays[0]
return [
FeatureArray(
np.array([all_label_features[label_id] for label_id in label_ids]),
number_of_dimensions=all_label_features.number_of_dimensions,
)
]
def _create_model_data(
self,
training_data: List[Message],
label_id_dict: Optional[Dict[Text, int]] = None,
label_attribute: Optional[Text] = None,
training: bool = True,
) -> RasaModelData:
"""Prepare data for training and create a RasaModelData object."""
from rasa.utils.tensorflow import model_data_utils
attributes_to_consider = [TEXT]
if training and self.component_config[INTENT_CLASSIFICATION]:
# we don't have any intent labels during prediction, just add them during
# training
attributes_to_consider.append(label_attribute)
if (
training
and self.component_config[ENTITY_RECOGNITION]
and self._entity_tag_specs
):
# Add entities as labels only during training and only if there was
# training data added for entities with DIET configured to predict entities.
attributes_to_consider.append(ENTITIES)
if training and label_attribute is not None:
# only use those training examples that have the label_attribute set
# during training
training_data = [
example for example in training_data if label_attribute in example.data
]
training_data = [
message
for message in training_data
if message.features_present(
attribute=TEXT, featurizers=self.component_config.get(FEATURIZERS)
)
]
if not training_data:
# no training data are present to train
return RasaModelData()
(
features_for_examples,
sparse_feature_sizes,
) = model_data_utils.featurize_training_examples(
training_data,
attributes_to_consider,
entity_tag_specs=self._entity_tag_specs,
featurizers=self.component_config[FEATURIZERS],
bilou_tagging=self.component_config[BILOU_FLAG],
)
attribute_data, _ = model_data_utils.convert_to_data_format(
features_for_examples, consider_dialogue_dimension=False
)
model_data = RasaModelData(
label_key=self.label_key, label_sub_key=self.label_sub_key
)
model_data.add_data(attribute_data)
model_data.add_lengths(TEXT, SEQUENCE_LENGTH, TEXT, SEQUENCE)
# Current implementation doesn't yet account for updating sparse
# feature sizes of label attributes. That's why we remove them.
sparse_feature_sizes = self._remove_label_sparse_feature_sizes(
sparse_feature_sizes=sparse_feature_sizes, label_attribute=label_attribute
)
model_data.add_sparse_feature_sizes(sparse_feature_sizes)
self._add_label_features(
model_data, training_data, label_attribute, label_id_dict, training
)
# make sure all keys are in the same order during training and prediction
# as we rely on the order of key and sub-key when constructing the actual
# tensors from the model data
model_data.sort()
return model_data
@staticmethod
def _remove_label_sparse_feature_sizes(
sparse_feature_sizes: Dict[Text, Dict[Text, List[int]]],
label_attribute: Optional[Text] = None,
) -> Dict[Text, Dict[Text, List[int]]]:
if label_attribute in sparse_feature_sizes:
del sparse_feature_sizes[label_attribute]
return sparse_feature_sizes
def _add_label_features(
self,
model_data: RasaModelData,
training_data: List[Message],
label_attribute: Text,
label_id_dict: Dict[Text, int],
training: bool = True,
) -> None:
label_ids = []
if training and self.component_config[INTENT_CLASSIFICATION]:
for example in training_data:
if example.get(label_attribute):
label_ids.append(label_id_dict[example.get(label_attribute)])
# explicitly add last dimension to label_ids
# to track correctly dynamic sequences
model_data.add_features(
LABEL_KEY,
LABEL_SUB_KEY,
[FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)],
)
if (
label_attribute
and model_data.does_feature_not_exist(label_attribute, SENTENCE)
and model_data.does_feature_not_exist(label_attribute, SEQUENCE)
):
# no label features are present, get default features from _label_data
model_data.add_features(
LABEL, SENTENCE, self._use_default_label_features(np.array(label_ids))
)
# as label_attribute can have different values, e.g. INTENT or RESPONSE,
# copy over the features to the LABEL key to make
# it easier to access the label features inside the model itself
model_data.update_key(label_attribute, SENTENCE, LABEL, SENTENCE)
model_data.update_key(label_attribute, SEQUENCE, LABEL, SEQUENCE)
model_data.update_key(label_attribute, MASK, LABEL, MASK)
model_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE)
# train helpers
def preprocess_train_data(self, training_data: TrainingData) -> RasaModelData:
"""Prepares data for training.
Performs sanity checks on training data, extracts encodings for labels.
"""
if self.component_config[BILOU_FLAG]:
bilou_utils.apply_bilou_schema(training_data)
label_id_index_mapping = self._label_id_index_mapping(
training_data, attribute=INTENT
)
if not label_id_index_mapping:
# no labels are present to train
return RasaModelData()
self.index_label_id_mapping = self._invert_mapping(label_id_index_mapping)
self._label_data = self._create_label_data(
training_data, label_id_index_mapping, attribute=INTENT
)
self._entity_tag_specs = self._create_entity_tag_specs(training_data)
label_attribute = (
INTENT if self.component_config[INTENT_CLASSIFICATION] else None
)
model_data = self._create_model_data(
training_data.nlu_examples,
label_id_index_mapping,
label_attribute=label_attribute,
)
self._check_input_dimension_consistency(model_data)
return model_data
@staticmethod
def _check_enough_labels(model_data: RasaModelData) -> bool:
return len(np.unique(model_data.get(LABEL_KEY, LABEL_SUB_KEY))) >= 2
def train(self, training_data: TrainingData) -> Resource:
"""Train the embedding intent classifier on a data set."""
model_data = self.preprocess_train_data(training_data)
if model_data.is_empty():
logger.debug(
f"Cannot train '{self.__class__.__name__}'. No data was provided. "
f"Skipping training of the classifier."
)
return self._resource
if not self.model and self.finetune_mode:
raise rasa.shared.exceptions.InvalidParameterException(
f"{self.__class__.__name__} was instantiated "
f"with `model=None` and `finetune_mode=True`. "
f"This is not a valid combination as the component "
f"needs an already instantiated and trained model "
f"to continue training in finetune mode."
)
if self.component_config.get(INTENT_CLASSIFICATION):
if not self._check_enough_labels(model_data):
logger.error(
f"Cannot train '{self.__class__.__name__}'. "
f"Need at least 2 different intent classes. "
f"Skipping training of classifier."
)
return self._resource
if self.component_config.get(ENTITY_RECOGNITION):
self.check_correct_entity_annotations(training_data)
# keep one example for persisting and loading
self._data_example = model_data.first_data_example()
if not self.finetune_mode:
# No pre-trained model to load from. Create a new instance of the model.
self.model = self._instantiate_model_class(model_data)
self.model.compile(
optimizer=tf.keras.optimizers.Adam(self.component_config[LEARNING_RATE])
)
else:
self.model.adjust_for_incremental_training(
data_example=self._data_example,
new_sparse_feature_sizes=model_data.get_sparse_feature_sizes(),
old_sparse_feature_sizes=self._sparse_feature_sizes,
)
self._sparse_feature_sizes = model_data.get_sparse_feature_sizes()
data_generator, validation_data_generator = train_utils.create_data_generators(
model_data,
self.component_config[BATCH_SIZES],
self.component_config[EPOCHS],
self.component_config[BATCH_STRATEGY],
self.component_config[EVAL_NUM_EXAMPLES],
self.component_config[RANDOM_SEED],
)
callbacks = train_utils.create_common_callbacks(
self.component_config[EPOCHS],
self.component_config[TENSORBOARD_LOG_DIR],
self.component_config[TENSORBOARD_LOG_LEVEL],
self.tmp_checkpoint_dir,
)
self.model.fit(
data_generator,
epochs=self.component_config[EPOCHS],
validation_data=validation_data_generator,
validation_freq=self.component_config[EVAL_NUM_EPOCHS],
callbacks=callbacks,
verbose=False,
shuffle=False, # we use custom shuffle inside data generator
)
self.persist()
return self._resource
# process helpers
def _predict(
self, message: Message
) -> Optional[Dict[Text, Union[tf.Tensor, Dict[Text, tf.Tensor]]]]:
if self.model is None:
logger.debug(
f"There is no trained model for '{self.__class__.__name__}': The "
f"component is either not trained or didn't receive enough training "
f"data."
)
return None
# create session data from message and convert it into a batch of 1
model_data = self._create_model_data([message], training=False)
if model_data.is_empty():
return None
return self.model.run_inference(model_data)
def _predict_label(
self, predict_out: Optional[Dict[Text, tf.Tensor]]
) -> Tuple[Dict[Text, Any], List[Dict[Text, Any]]]:
"""Predicts the intent of the provided message."""
label: Dict[Text, Any] = {"name": None, "confidence": 0.0}
label_ranking = []
if predict_out is None:
return label, label_ranking
message_sim = predict_out["i_scores"]
message_sim = message_sim.flatten() # sim is a matrix
# if X contains all zeros do not predict some label
if message_sim.size == 0:
return label, label_ranking
# rank the confidences
ranking_length = self.component_config[RANKING_LENGTH]
renormalize = (
self.component_config[RENORMALIZE_CONFIDENCES]
and self.component_config[MODEL_CONFIDENCE] == SOFTMAX
)
ranked_label_indices, message_sim = train_utils.rank_and_mask(
message_sim, ranking_length=ranking_length, renormalize=renormalize
)
# construct the label and ranking
casted_message_sim: List[float] = message_sim.tolist() # np.float to float
top_label_idx = ranked_label_indices[0]
label = {
"name": self.index_label_id_mapping[top_label_idx],
"confidence": casted_message_sim[top_label_idx],
}
ranking = [(idx, casted_message_sim[idx]) for idx in ranked_label_indices]
label_ranking = [
{"name": self.index_label_id_mapping[label_idx], "confidence": score}
for label_idx, score in ranking
]
return label, label_ranking
def _predict_entities(
self, predict_out: Optional[Dict[Text, tf.Tensor]], message: Message
) -> List[Dict]:
if predict_out is None:
return []
predicted_tags, confidence_values = train_utils.entity_label_to_tags(
predict_out, self._entity_tag_specs, self.component_config[BILOU_FLAG]
)
entities = self.convert_predictions_into_entities(
message.get(TEXT),
message.get(TOKENS_NAMES[TEXT], []),
predicted_tags,
self.split_entities_config,
confidence_values,
)
entities = self.add_extractor_name(entities)
entities = message.get(ENTITIES, []) + entities
return entities
def process(self, messages: List[Message]) -> List[Message]:
"""Augments the message with intents, entities, and diagnostic data."""
for message in messages:
out = self._predict(message)
if self.component_config[INTENT_CLASSIFICATION]:
label, label_ranking = self._predict_label(out)
message.set(INTENT, label, add_to_output=True)
message.set("intent_ranking", label_ranking, add_to_output=True)
if self.component_config[ENTITY_RECOGNITION]:
entities = self._predict_entities(out, message)
message.set(ENTITIES, entities, add_to_output=True)
if out and self._execution_context.should_add_diagnostic_data:
message.add_diagnostic_data(
self._execution_context.node_name, out.get(DIAGNOSTIC_DATA)
)
return messages
def persist(self) -> None:
"""Persist this model into the passed directory."""
if self.model is None:
return None
with self._model_storage.write_to(self._resource) as model_path:
file_name = self.__class__.__name__
tf_model_file = model_path / f"{file_name}.tf_model"
rasa.shared.utils.io.create_directory_for_file(tf_model_file)
if self.component_config[CHECKPOINT_MODEL] and self.tmp_checkpoint_dir:
self.model.load_weights(self.tmp_checkpoint_dir / "checkpoint.tf_model")
# Save an empty file to flag that this model has been
# produced using checkpointing
checkpoint_marker = model_path / f"{file_name}.from_checkpoint.pkl"
checkpoint_marker.touch()
self.model.save(str(tf_model_file))
io_utils.pickle_dump(
model_path / f"{file_name}.data_example.pkl", self._data_example
)
io_utils.pickle_dump(
model_path / f"{file_name}.sparse_feature_sizes.pkl",
self._sparse_feature_sizes,
)
io_utils.pickle_dump(
model_path / f"{file_name}.label_data.pkl", dict(self._label_data.data)
)
io_utils.json_pickle(
model_path / f"{file_name}.index_label_id_mapping.json",
self.index_label_id_mapping,
)
entity_tag_specs = (
[tag_spec._asdict() for tag_spec in self._entity_tag_specs]
if self._entity_tag_specs
else []
)
rasa.shared.utils.io.dump_obj_as_json_to_file(
model_path / f"{file_name}.entity_tag_specs.json", entity_tag_specs
)
@classmethod
def load(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
**kwargs: Any,
) -> DIETClassifier:
"""Loads a policy from the storage (see parent class for full docstring)."""
try:
with model_storage.read_from(resource) as model_path:
return cls._load(
model_path, config, model_storage, resource, execution_context
)
except ValueError:
logger.debug(
f"Failed to load {cls.__class__.__name__} from model storage. Resource "
f"'{resource.name}' doesn't exist."
)
return cls(config, model_storage, resource, execution_context)
@classmethod
def _load(
cls,
model_path: Path,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> "DIETClassifier":
"""Loads the trained model from the provided directory."""
(
index_label_id_mapping,
entity_tag_specs,
label_data,
data_example,
sparse_feature_sizes,
) = cls._load_from_files(model_path)
config = train_utils.update_confidence_type(config)
config = train_utils.update_similarity_type(config)
model = cls._load_model(
entity_tag_specs,
label_data,
config,
data_example,
model_path,
finetune_mode=execution_context.is_finetuning,
)
return cls(
config=config,
model_storage=model_storage,
resource=resource,
execution_context=execution_context,
index_label_id_mapping=index_label_id_mapping,
entity_tag_specs=entity_tag_specs,
model=model,
sparse_feature_sizes=sparse_feature_sizes,
)
@classmethod
def _load_from_files(
cls, model_path: Path
) -> Tuple[
Dict[int, Text],
List[EntityTagSpec],
RasaModelData,
Dict[Text, Dict[Text, List[FeatureArray]]],
Dict[Text, Dict[Text, List[int]]],
]:
file_name = cls.__name__
data_example = io_utils.pickle_load(
model_path / f"{file_name}.data_example.pkl"
)
label_data = io_utils.pickle_load(model_path / f"{file_name}.label_data.pkl")
label_data = RasaModelData(data=label_data)
sparse_feature_sizes = io_utils.pickle_load(
model_path / f"{file_name}.sparse_feature_sizes.pkl"
)
index_label_id_mapping = io_utils.json_unpickle(
model_path / f"{file_name}.index_label_id_mapping.json"
)
entity_tag_specs = rasa.shared.utils.io.read_json_file(
model_path / f"{file_name}.entity_tag_specs.json"
)
entity_tag_specs = [
EntityTagSpec(
tag_name=tag_spec["tag_name"],
ids_to_tags={
int(key): value for key, value in tag_spec["ids_to_tags"].items()
},
tags_to_ids={
key: int(value) for key, value in tag_spec["tags_to_ids"].items()
},
num_tags=tag_spec["num_tags"],
)
for tag_spec in entity_tag_specs
]
# jsonpickle converts dictionary keys to strings
index_label_id_mapping = {
int(key): value for key, value in index_label_id_mapping.items()
}
return (
index_label_id_mapping,
entity_tag_specs,
label_data,
data_example,
sparse_feature_sizes,
)
@classmethod
def _load_model(
cls,
entity_tag_specs: List[EntityTagSpec],
label_data: RasaModelData,
config: Dict[Text, Any],
data_example: Dict[Text, Dict[Text, List[FeatureArray]]],
model_path: Path,
finetune_mode: bool = False,
) -> "RasaModel":
file_name = cls.__name__
tf_model_file = model_path / f"{file_name}.tf_model"
label_key = LABEL_KEY if config[INTENT_CLASSIFICATION] else None
label_sub_key = LABEL_SUB_KEY if config[INTENT_CLASSIFICATION] else None
model_data_example = RasaModelData(
label_key=label_key, label_sub_key=label_sub_key, data=data_example
)
model = cls._load_model_class(
tf_model_file,
model_data_example,
label_data,
entity_tag_specs,
config,
finetune_mode=finetune_mode,
)
return model
@classmethod
def _load_model_class(
cls,
tf_model_file: Text,
model_data_example: RasaModelData,
label_data: RasaModelData,
entity_tag_specs: List[EntityTagSpec],
config: Dict[Text, Any],
finetune_mode: bool,
) -> "RasaModel":
predict_data_example = RasaModelData(
label_key=model_data_example.label_key,
data={
feature_name: features
for feature_name, features in model_data_example.items()
if TEXT in feature_name
},
)
return cls.model_class().load(
tf_model_file,
model_data_example,
predict_data_example,
data_signature=model_data_example.get_signature(),
label_data=label_data,
entity_tag_specs=entity_tag_specs,
config=copy.deepcopy(config),
finetune_mode=finetune_mode,
)
def _instantiate_model_class(self, model_data: RasaModelData) -> "RasaModel":
return self.model_class()(
data_signature=model_data.get_signature(),
label_data=self._label_data,
entity_tag_specs=self._entity_tag_specs,
config=self.component_config,
)
class DIET(TransformerRasaModel):
def __init__(
self,
data_signature: Dict[Text, Dict[Text, List[FeatureSignature]]],
label_data: RasaModelData,
entity_tag_specs: Optional[List[EntityTagSpec]],
config: Dict[Text, Any],
) -> None:
# create entity tag spec before calling super otherwise building the model
# will fail
super().__init__("DIET", config, data_signature, label_data)
self._entity_tag_specs = self._ordered_tag_specs(entity_tag_specs)
self.predict_data_signature = {
feature_name: features
for feature_name, features in data_signature.items()
if TEXT in feature_name
}
# tf training
self._create_metrics()
self._update_metrics_to_log()
# needed for efficient prediction
self.all_labels_embed: Optional[tf.Tensor] = None
self._prepare_layers()
@staticmethod
def _ordered_tag_specs(
entity_tag_specs: Optional[List[EntityTagSpec]],
) -> List[EntityTagSpec]:
"""Ensure that order of entity tag specs matches CRF layer order."""
if entity_tag_specs is None:
return []
crf_order = [
ENTITY_ATTRIBUTE_TYPE,
ENTITY_ATTRIBUTE_ROLE,
ENTITY_ATTRIBUTE_GROUP,
]
ordered_tag_spec = []
for tag_name in crf_order:
for tag_spec in entity_tag_specs:
if tag_name == tag_spec.tag_name:
ordered_tag_spec.append(tag_spec)
return ordered_tag_spec
def _check_data(self) -> None:
if TEXT not in self.data_signature:
raise InvalidConfigException(
f"No text features specified. "
f"Cannot train '{self.__class__.__name__}' model."
)
if self.config[INTENT_CLASSIFICATION]:
if LABEL not in self.data_signature:
raise InvalidConfigException(
f"No label features specified. "
f"Cannot train '{self.__class__.__name__}' model."
)
if self.config[SHARE_HIDDEN_LAYERS]:
different_sentence_signatures = False
different_sequence_signatures = False
if (
SENTENCE in self.data_signature[TEXT]
and SENTENCE in self.data_signature[LABEL]
):
different_sentence_signatures = (
self.data_signature[TEXT][SENTENCE]
!= self.data_signature[LABEL][SENTENCE]
)
if (
SEQUENCE in self.data_signature[TEXT]
and SEQUENCE in self.data_signature[LABEL]
):
different_sequence_signatures = (
self.data_signature[TEXT][SEQUENCE]
!= self.data_signature[LABEL][SEQUENCE]
)
if different_sentence_signatures or different_sequence_signatures:
raise ValueError(
"If hidden layer weights are shared, data signatures "
"for text_features and label_features must coincide."
)
if self.config[ENTITY_RECOGNITION] and (
ENTITIES not in self.data_signature
or ENTITY_ATTRIBUTE_TYPE not in self.data_signature[ENTITIES]
):
logger.debug(
f"You specified '{self.__class__.__name__}' to train entities, but "
f"no entities are present in the training data. Skipping training of "
f"entities."
)
self.config[ENTITY_RECOGNITION] = False
def _create_metrics(self) -> None:
# self.metrics will have the same order as they are created
# so create loss metrics first to output losses first
self.mask_loss = tf.keras.metrics.Mean(name="m_loss")
self.intent_loss = tf.keras.metrics.Mean(name="i_loss")
self.entity_loss = tf.keras.metrics.Mean(name="e_loss")
self.entity_group_loss = tf.keras.metrics.Mean(name="g_loss")
self.entity_role_loss = tf.keras.metrics.Mean(name="r_loss")
# create accuracy metrics second to output accuracies second
self.mask_acc = tf.keras.metrics.Mean(name="m_acc")
self.intent_acc = tf.keras.metrics.Mean(name="i_acc")
self.entity_f1 = tf.keras.metrics.Mean(name="e_f1")
self.entity_group_f1 = tf.keras.metrics.Mean(name="g_f1")
self.entity_role_f1 = tf.keras.metrics.Mean(name="r_f1")
def _update_metrics_to_log(self) -> None:
debug_log_level = logging.getLogger("rasa").level == logging.DEBUG
if self.config[MASKED_LM]:
self.metrics_to_log.append("m_acc")
if debug_log_level:
self.metrics_to_log.append("m_loss")
if self.config[INTENT_CLASSIFICATION]:
self.metrics_to_log.append("i_acc")
if debug_log_level:
self.metrics_to_log.append("i_loss")
if self.config[ENTITY_RECOGNITION]:
for tag_spec in self._entity_tag_specs:
if tag_spec.num_tags != 0:
name = tag_spec.tag_name
self.metrics_to_log.append(f"{name[0]}_f1")
if debug_log_level:
self.metrics_to_log.append(f"{name[0]}_loss")
self._log_metric_info()
def _log_metric_info(self) -> None:
metric_name = {
"t": "total",
"i": "intent",
"e": "entity",
"m": "mask",
"r": "role",
"g": "group",
}
logger.debug("Following metrics will be logged during training: ")
for metric in self.metrics_to_log:
parts = metric.split("_")
name = f"{metric_name[parts[0]]} {parts[1]}"
logger.debug(f" {metric} ({name})")
def _prepare_layers(self) -> None:
# For user text, prepare layers that combine different feature types, embed
# everything using a transformer and optionally also do masked language
# modeling.
self.text_name = TEXT
self._tf_layers[
f"sequence_layer.{self.text_name}"
] = rasa_layers.RasaSequenceLayer(
self.text_name, self.data_signature[self.text_name], self.config
)
if self.config[MASKED_LM]:
self._prepare_mask_lm_loss(self.text_name)
# Intent labels are treated similarly to user text but without the transformer,
# without masked language modelling, and with no dropout applied to the
# individual features, only to the overall label embedding after all label
# features have been combined.
if self.config[INTENT_CLASSIFICATION]:
self.label_name = TEXT if self.config[SHARE_HIDDEN_LAYERS] else LABEL
# disable input dropout applied to sparse and dense label features
label_config = self.config.copy()
label_config.update(
{SPARSE_INPUT_DROPOUT: False, DENSE_INPUT_DROPOUT: False}
)
self._tf_layers[
f"feature_combining_layer.{self.label_name}"
] = rasa_layers.RasaFeatureCombiningLayer(
self.label_name, self.label_signature[self.label_name], label_config
)
self._prepare_ffnn_layer(
self.label_name,
self.config[HIDDEN_LAYERS_SIZES][self.label_name],
self.config[DROP_RATE],
)
self._prepare_label_classification_layers(predictor_attribute=TEXT)
if self.config[ENTITY_RECOGNITION]:
self._prepare_entity_recognition_layers()
def _prepare_mask_lm_loss(self, name: Text) -> None:
# for embedding predicted tokens at masked positions
self._prepare_embed_layers(f"{name}_lm_mask")
# for embedding the true tokens that got masked
self._prepare_embed_layers(f"{name}_golden_token")
# mask loss is additional loss
# set scaling to False, so that it doesn't overpower other losses
self._prepare_dot_product_loss(f"{name}_mask", scale_loss=False)
def _create_bow(
self,
sequence_features: List[Union[tf.Tensor, tf.SparseTensor]],
sentence_features: List[Union[tf.Tensor, tf.SparseTensor]],
sequence_feature_lengths: tf.Tensor,
name: Text,
) -> tf.Tensor:
x, _ = self._tf_layers[f"feature_combining_layer.{name}"](
(sequence_features, sentence_features, sequence_feature_lengths),
training=self._training,
)
# convert to bag-of-words by summing along the sequence dimension
x = tf.reduce_sum(x, axis=1)
return self._tf_layers[f"ffnn.{name}"](x, self._training)
def _create_all_labels(self) -> Tuple[tf.Tensor, tf.Tensor]:
all_label_ids = self.tf_label_data[LABEL_KEY][LABEL_SUB_KEY][0]
sequence_feature_lengths = self._get_sequence_feature_lengths(
self.tf_label_data, LABEL
)
x = self._create_bow(
self.tf_label_data[LABEL][SEQUENCE],
self.tf_label_data[LABEL][SENTENCE],
sequence_feature_lengths,
self.label_name,
)
all_labels_embed = self._tf_layers[f"embed.{LABEL}"](x)
return all_label_ids, all_labels_embed
def _mask_loss(
self,
outputs: tf.Tensor,
inputs: tf.Tensor,
seq_ids: tf.Tensor,
mlm_mask_boolean: tf.Tensor,
name: Text,
) -> tf.Tensor:
# make sure there is at least one element in the mask
mlm_mask_boolean = tf.cond(
tf.reduce_any(mlm_mask_boolean),
lambda: mlm_mask_boolean,
lambda: tf.scatter_nd([[0, 0, 0]], [True], tf.shape(mlm_mask_boolean)),
)
mlm_mask_boolean = tf.squeeze(mlm_mask_boolean, -1)
# Pick elements that were masked, throwing away the batch & sequence dimension
# and effectively switching from shape (batch_size, sequence_length, units) to
# (num_masked_elements, units).
outputs = tf.boolean_mask(outputs, mlm_mask_boolean)
inputs = tf.boolean_mask(inputs, mlm_mask_boolean)
ids = tf.boolean_mask(seq_ids, mlm_mask_boolean)
tokens_predicted_embed = self._tf_layers[f"embed.{name}_lm_mask"](outputs)
tokens_true_embed = self._tf_layers[f"embed.{name}_golden_token"](inputs)
# To limit the otherwise computationally expensive loss calculation, we
# constrain the label space in MLM (i.e. token space) to only those tokens that
# were masked in this batch. Hence the reduced list of token embeddings
# (tokens_true_embed) and the reduced list of labels (ids) are passed as
# all_labels_embed and all_labels, respectively. In the future, we could be less
# restrictive and construct a slightly bigger label space which could include
# tokens not masked in the current batch too.
return self._tf_layers[f"loss.{name}_mask"](
inputs_embed=tokens_predicted_embed,
labels_embed=tokens_true_embed,
labels=ids,
all_labels_embed=tokens_true_embed,
all_labels=ids,
)
def _calculate_label_loss(
self, text_features: tf.Tensor, label_features: tf.Tensor, label_ids: tf.Tensor
) -> tf.Tensor:
all_label_ids, all_labels_embed = self._create_all_labels()
text_embed = self._tf_layers[f"embed.{TEXT}"](text_features)
label_embed = self._tf_layers[f"embed.{LABEL}"](label_features)
return self._tf_layers[f"loss.{LABEL}"](
text_embed, label_embed, label_ids, all_labels_embed, all_label_ids
)
def batch_loss(
self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]
) -> tf.Tensor:
"""Calculates the loss for the given batch.
Args:
batch_in: The batch.
Returns:
The loss of the given batch.
"""
tf_batch_data = self.batch_to_model_data_format(batch_in, self.data_signature)
sequence_feature_lengths = self._get_sequence_feature_lengths(
tf_batch_data, TEXT
)
(
text_transformed,
text_in,
mask_combined_sequence_sentence,
text_seq_ids,
mlm_mask_boolean_text,
_,
) = self._tf_layers[f"sequence_layer.{self.text_name}"](
(
tf_batch_data[TEXT][SEQUENCE],
tf_batch_data[TEXT][SENTENCE],
sequence_feature_lengths,
),
training=self._training,
)
losses = []
# Lengths of sequences in case of sentence-level features are always 1, but they
# can effectively be 0 if sentence-level features aren't present.
sentence_feature_lengths = self._get_sentence_feature_lengths(
tf_batch_data, TEXT
)
combined_sequence_sentence_feature_lengths = (
sequence_feature_lengths + sentence_feature_lengths
)
if self.config[MASKED_LM]:
loss, acc = self._mask_loss(
text_transformed, text_in, text_seq_ids, mlm_mask_boolean_text, TEXT
)
self.mask_loss.update_state(loss)
self.mask_acc.update_state(acc)
losses.append(loss)
if self.config[INTENT_CLASSIFICATION]:
loss = self._batch_loss_intent(
combined_sequence_sentence_feature_lengths,
text_transformed,
tf_batch_data,
)
losses.append(loss)
if self.config[ENTITY_RECOGNITION]:
losses += self._batch_loss_entities(
mask_combined_sequence_sentence,
sequence_feature_lengths,
text_transformed,
tf_batch_data,
)
return tf.math.add_n(losses)
def _batch_loss_intent(
self,
combined_sequence_sentence_feature_lengths_text: tf.Tensor,
text_transformed: tf.Tensor,
tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],
) -> tf.Tensor:
# get sentence features vector for intent classification
sentence_vector = self._last_token(
text_transformed, combined_sequence_sentence_feature_lengths_text
)
sequence_feature_lengths_label = self._get_sequence_feature_lengths(
tf_batch_data, LABEL
)
label_ids = tf_batch_data[LABEL_KEY][LABEL_SUB_KEY][0]
label = self._create_bow(
tf_batch_data[LABEL][SEQUENCE],
tf_batch_data[LABEL][SENTENCE],
sequence_feature_lengths_label,
self.label_name,
)
loss, acc = self._calculate_label_loss(sentence_vector, label, label_ids)
self._update_label_metrics(loss, acc)
return loss
def _update_label_metrics(self, loss: tf.Tensor, acc: tf.Tensor) -> None:
self.intent_loss.update_state(loss)
self.intent_acc.update_state(acc)
def _batch_loss_entities(
self,
mask_combined_sequence_sentence: tf.Tensor,
sequence_feature_lengths: tf.Tensor,
text_transformed: tf.Tensor,
tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],
) -> List[tf.Tensor]:
losses = []
entity_tags = None
for tag_spec in self._entity_tag_specs:
if tag_spec.num_tags == 0:
continue
tag_ids = tf_batch_data[ENTITIES][tag_spec.tag_name][0]
# add a zero (no entity) for the sentence features to match the shape of
# inputs
tag_ids = tf.pad(tag_ids, [[0, 0], [0, 1], [0, 0]])
loss, f1, _logits = self._calculate_entity_loss(
text_transformed,
tag_ids,
mask_combined_sequence_sentence,
sequence_feature_lengths,
tag_spec.tag_name,
entity_tags,
)
if tag_spec.tag_name == ENTITY_ATTRIBUTE_TYPE:
# use the entity tags as additional input for the role
# and group CRF
entity_tags = tf.one_hot(
tf.cast(tag_ids[:, :, 0], tf.int32), depth=tag_spec.num_tags
)
self._update_entity_metrics(loss, f1, tag_spec.tag_name)
losses.append(loss)
return losses
def _update_entity_metrics(
self, loss: tf.Tensor, f1: tf.Tensor, tag_name: Text
) -> None:
if tag_name == ENTITY_ATTRIBUTE_TYPE:
self.entity_loss.update_state(loss)
self.entity_f1.update_state(f1)
elif tag_name == ENTITY_ATTRIBUTE_GROUP:
self.entity_group_loss.update_state(loss)
self.entity_group_f1.update_state(f1)
elif tag_name == ENTITY_ATTRIBUTE_ROLE:
self.entity_role_loss.update_state(loss)
self.entity_role_f1.update_state(f1)
def prepare_for_predict(self) -> None:
"""Prepares the model for prediction."""
if self.config[INTENT_CLASSIFICATION]:
_, self.all_labels_embed = self._create_all_labels()
def batch_predict(
self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]
) -> Dict[Text, tf.Tensor]:
"""Predicts the output of the given batch.
Args:
batch_in: The batch.
Returns:
The output to predict.
"""
tf_batch_data = self.batch_to_model_data_format(
batch_in, self.predict_data_signature
)
sequence_feature_lengths = self._get_sequence_feature_lengths(
tf_batch_data, TEXT
)
sentence_feature_lengths = self._get_sentence_feature_lengths(
tf_batch_data, TEXT
)
text_transformed, _, _, _, _, attention_weights = self._tf_layers[
f"sequence_layer.{self.text_name}"
](
(
tf_batch_data[TEXT][SEQUENCE],
tf_batch_data[TEXT][SENTENCE],
sequence_feature_lengths,
),
training=self._training,
)
predictions = {
DIAGNOSTIC_DATA: {
"attention_weights": attention_weights,
"text_transformed": text_transformed,
}
}
if self.config[INTENT_CLASSIFICATION]:
predictions.update(
self._batch_predict_intents(
sequence_feature_lengths + sentence_feature_lengths,
text_transformed,
)
)
if self.config[ENTITY_RECOGNITION]:
predictions.update(
self._batch_predict_entities(sequence_feature_lengths, text_transformed)
)
return predictions
def _batch_predict_entities(
self, sequence_feature_lengths: tf.Tensor, text_transformed: tf.Tensor
) -> Dict[Text, tf.Tensor]:
predictions: Dict[Text, tf.Tensor] = {}
entity_tags = None
for tag_spec in self._entity_tag_specs:
# skip crf layer if it was not trained
if tag_spec.num_tags == 0:
continue
name = tag_spec.tag_name
_input = text_transformed
if entity_tags is not None:
_tags = self._tf_layers[f"embed.{name}.tags"](entity_tags)
_input = tf.concat([_input, _tags], axis=-1)
_logits = self._tf_layers[f"embed.{name}.logits"](_input)
pred_ids, confidences = self._tf_layers[f"crf.{name}"](
_logits, sequence_feature_lengths
)
predictions[f"e_{name}_ids"] = pred_ids
predictions[f"e_{name}_scores"] = confidences
if name == ENTITY_ATTRIBUTE_TYPE:
# use the entity tags as additional input for the role
# and group CRF
entity_tags = tf.one_hot(
tf.cast(pred_ids, tf.int32), depth=tag_spec.num_tags
)
return predictions
def _batch_predict_intents(
self,
combined_sequence_sentence_feature_lengths: tf.Tensor,
text_transformed: tf.Tensor,
) -> Dict[Text, tf.Tensor]:
if self.all_labels_embed is None:
raise ValueError(
"The model was not prepared for prediction. "
"Call `prepare_for_predict` first."
)
# get sentence feature vector for intent classification
sentence_vector = self._last_token(
text_transformed, combined_sequence_sentence_feature_lengths
)
sentence_vector_embed = self._tf_layers[f"embed.{TEXT}"](sentence_vector)
_, scores = self._tf_layers[
f"loss.{LABEL}"
].get_similarities_and_confidences_from_embeddings(
sentence_vector_embed[:, tf.newaxis, :],
self.all_labels_embed[tf.newaxis, :, :],
)
return {"i_scores": scores}
| 38.309951
| 88
| 0.625901
|
from __future__ import annotations
import copy
import logging
from collections import defaultdict
from pathlib import Path
from rasa.nlu.featurizers.featurizer import Featurizer
import numpy as np
import scipy.sparse
import tensorflow as tf
from typing import Any, Dict, List, Optional, Text, Tuple, Union, Type
from rasa.engine.graph import ExecutionContext, GraphComponent
from rasa.engine.recipes.default_recipe import DefaultV1Recipe
from rasa.engine.storage.resource import Resource
from rasa.engine.storage.storage import ModelStorage
from rasa.nlu.extractors.extractor import EntityExtractorMixin
from rasa.nlu.classifiers.classifier import IntentClassifier
import rasa.shared.utils.io
import rasa.utils.io as io_utils
import rasa.nlu.utils.bilou_utils as bilou_utils
from rasa.shared.constants import DIAGNOSTIC_DATA
from rasa.nlu.extractors.extractor import EntityTagSpec
from rasa.nlu.classifiers import LABEL_RANKING_LENGTH
from rasa.utils import train_utils
from rasa.utils.tensorflow import rasa_layers
from rasa.utils.tensorflow.models import RasaModel, TransformerRasaModel
from rasa.utils.tensorflow.model_data import (
RasaModelData,
FeatureSignature,
FeatureArray,
)
from rasa.nlu.constants import TOKENS_NAMES, DEFAULT_TRANSFORMER_SIZE
from rasa.shared.nlu.constants import (
SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
TEXT,
INTENT,
INTENT_RESPONSE_KEY,
ENTITIES,
ENTITY_ATTRIBUTE_TYPE,
ENTITY_ATTRIBUTE_GROUP,
ENTITY_ATTRIBUTE_ROLE,
NO_ENTITY_TAG,
SPLIT_ENTITIES_BY_COMMA,
)
from rasa.shared.exceptions import InvalidConfigException
from rasa.shared.nlu.training_data.training_data import TrainingData
from rasa.shared.nlu.training_data.message import Message
from rasa.utils.tensorflow.constants import (
LABEL,
IDS,
HIDDEN_LAYERS_SIZES,
RENORMALIZE_CONFIDENCES,
SHARE_HIDDEN_LAYERS,
TRANSFORMER_SIZE,
NUM_TRANSFORMER_LAYERS,
NUM_HEADS,
BATCH_SIZES,
BATCH_STRATEGY,
EPOCHS,
RANDOM_SEED,
LEARNING_RATE,
RANKING_LENGTH,
LOSS_TYPE,
SIMILARITY_TYPE,
NUM_NEG,
SPARSE_INPUT_DROPOUT,
DENSE_INPUT_DROPOUT,
MASKED_LM,
ENTITY_RECOGNITION,
TENSORBOARD_LOG_DIR,
INTENT_CLASSIFICATION,
EVAL_NUM_EXAMPLES,
EVAL_NUM_EPOCHS,
UNIDIRECTIONAL_ENCODER,
DROP_RATE,
DROP_RATE_ATTENTION,
CONNECTION_DENSITY,
NEGATIVE_MARGIN_SCALE,
REGULARIZATION_CONSTANT,
SCALE_LOSS,
USE_MAX_NEG_SIM,
MAX_NEG_SIM,
MAX_POS_SIM,
EMBEDDING_DIMENSION,
BILOU_FLAG,
KEY_RELATIVE_ATTENTION,
VALUE_RELATIVE_ATTENTION,
MAX_RELATIVE_POSITION,
AUTO,
BALANCED,
CROSS_ENTROPY,
TENSORBOARD_LOG_LEVEL,
CONCAT_DIMENSION,
FEATURIZERS,
CHECKPOINT_MODEL,
SEQUENCE,
SENTENCE,
SEQUENCE_LENGTH,
DENSE_DIMENSION,
MASK,
CONSTRAIN_SIMILARITIES,
MODEL_CONFIDENCE,
SOFTMAX,
)
logger = logging.getLogger(__name__)
SPARSE = "sparse"
DENSE = "dense"
LABEL_KEY = LABEL
LABEL_SUB_KEY = IDS
POSSIBLE_TAGS = [ENTITY_ATTRIBUTE_TYPE, ENTITY_ATTRIBUTE_ROLE, ENTITY_ATTRIBUTE_GROUP]
@DefaultV1Recipe.register(
[
DefaultV1Recipe.ComponentType.INTENT_CLASSIFIER,
DefaultV1Recipe.ComponentType.ENTITY_EXTRACTOR,
],
is_trainable=True,
)
class DIETClassifier(GraphComponent, IntentClassifier, EntityExtractorMixin):
@classmethod
def required_components(cls) -> List[Type]:
return [Featurizer]
@staticmethod
def get_default_config() -> Dict[Text, Any]:
return {
S: {TEXT: [], LABEL: []},
SHARE_HIDDEN_LAYERS: False,
TRANSFORMER_SIZE: DEFAULT_TRANSFORMER_SIZE,
NUM_TRANSFORMER_LAYERS: 2,
NUM_HEADS: 4,
KEY_RELATIVE_ATTENTION: False,
VALUE_RELATIVE_ATTENTION: False,
MAX_RELATIVE_POSITION: 5,
UNIDIRECTIONAL_ENCODER: False,
H_SIZES: [64, 256],
BATCH_STRATEGY: BALANCED,
EPOCHS: 300,
RANDOM_SEED: None,
LEARNING_RATE: 0.001,
DENSE_DIMENSION: {TEXT: 128, LABEL: 20},
CONCAT_DIMENSION: {TEXT: 128, LABEL: 20},
NUM_NEG: 20,
SIMILARITY_TYPE: AUTO,
LOSS_TYPE: CROSS_ENTROPY,
RANKING_LENGTH: LABEL_RANKING_LENGTH,
MAX_POS_SIM: 0.8,
MAX_NEG_SIM: -0.4,
USE_MAX_NEG_SIM: True,
SCALE_LOSS: False,
2,
NEGATIVE_MARGIN_SCALE: 0.8,
DROP_RATE: 0.2,
DROP_RATE_ATTENTION: 0,
CONNECTION_DENSITY: 0.2,
SPARSE_INPUT_DROPOUT: True,
DENSE_INPUT_DROPOUT: True,
_EPOCHS: 20,
EVAL_NUM_EXAMPLES: 0,
ENT_CLASSIFICATION: True,
ENTITY_RECOGNITION: True,
MASKED_LM: False,
BILOU_FLAG: True,
TENSORBOARD_LOG_DIR: None,
TENSORBOARD_LOG_LEVEL: "epoch",
CHECKPOINT_MODEL: False,
FEATURIZERS: [],
SPLIT_ENTITIES_BY_COMMA: True,
# If 'True' applies sigmoid on all similarity terms and adds
# it to the loss function to ensure that similarity values are
# approximately bounded. Used inside cross-entropy loss only.
CONSTRAIN_SIMILARITIES: False,
# Model confidence to be returned during inference. Currently, the only
# possible value is `softmax`.
MODEL_CONFIDENCE: SOFTMAX,
# Determines whether the confidences of the chosen top intents should be
# renormalized so that they sum up to 1. By default, we do not renormalize
# and return the confidences for the top intents as is.
# Note that renormalization only makes sense if confidences are generated
# via `softmax`.
RENORMALIZE_CONFIDENCES: False,
}
def __init__(
self,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
index_label_id_mapping: Optional[Dict[int, Text]] = None,
entity_tag_specs: Optional[List[EntityTagSpec]] = None,
model: Optional[RasaModel] = None,
sparse_feature_sizes: Optional[Dict[Text, Dict[Text, List[int]]]] = None,
) -> None:
if EPOCHS not in config:
rasa.shared.utils.io.raise_warning(
f"Please configure the number of '{EPOCHS}' in your configuration file."
f" We will change the default value of '{EPOCHS}' in the future to 1. "
)
self.component_config = config
self._model_storage = model_storage
self._resource = resource
self._execution_context = execution_context
self._check_config_parameters()
# transform numbers to labels
self.index_label_id_mapping = index_label_id_mapping or {}
self._entity_tag_specs = entity_tag_specs
self.model = model
self.tmp_checkpoint_dir = None
if self.component_config[CHECKPOINT_MODEL]:
self.tmp_checkpoint_dir = Path(rasa.utils.io.create_temporary_directory())
self._label_data: Optional[RasaModelData] = None
self._data_example: Optional[Dict[Text, Dict[Text, List[FeatureArray]]]] = None
self.split_entities_config = rasa.utils.train_utils.init_split_entities(
self.component_config[SPLIT_ENTITIES_BY_COMMA],
SPLIT_ENTITIES_BY_COMMA_DEFAULT_VALUE,
)
self.finetune_mode = self._execution_context.is_finetuning
self._sparse_feature_sizes = sparse_feature_sizes
# init helpers
def _check_masked_lm(self) -> None:
if (
self.component_config[MASKED_LM]
and self.component_config[NUM_TRANSFORMER_LAYERS] == 0
):
raise ValueError(
f"If number of transformer layers is 0, "
f"'{MASKED_LM}' option should be 'False'."
)
def _check_share_hidden_layers_sizes(self) -> None:
if self.component_config.get(SHARE_HIDDEN_LAYERS):
first_hidden_layer_sizes = next(
iter(self.component_config[HIDDEN_LAYERS_SIZES].values())
)
# check that all hidden layer sizes are the same
identical_hidden_layer_sizes = all(
current_hidden_layer_sizes == first_hidden_layer_sizes
for current_hidden_layer_sizes in self.component_config[
HIDDEN_LAYERS_SIZES
].values()
)
if not identical_hidden_layer_sizes:
raise ValueError(
f"If hidden layer weights are shared, "
f"{HIDDEN_LAYERS_SIZES} must coincide."
)
def _check_config_parameters(self) -> None:
self.component_config = train_utils.check_deprecated_options(
self.component_config
)
self._check_masked_lm()
self._check_share_hidden_layers_sizes()
self.component_config = train_utils.update_confidence_type(
self.component_config
)
train_utils.validate_configuration_settings(self.component_config)
self.component_config = train_utils.update_similarity_type(
self.component_config
)
self.component_config = train_utils.update_evaluation_parameters(
self.component_config
)
@classmethod
def create(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> DIETClassifier:
return cls(config, model_storage, resource, execution_context)
@property
def label_key(self) -> Optional[Text]:
return LABEL_KEY if self.component_config[INTENT_CLASSIFICATION] else None
@property
def label_sub_key(self) -> Optional[Text]:
return LABEL_SUB_KEY if self.component_config[INTENT_CLASSIFICATION] else None
@staticmethod
def model_class() -> Type[RasaModel]:
return DIET
# training data helpers:
@staticmethod
def _label_id_index_mapping(
training_data: TrainingData, attribute: Text
) -> Dict[Text, int]:
distinct_label_ids = {
example.get(attribute) for example in training_data.intent_examples
} - {None}
return {
label_id: idx for idx, label_id in enumerate(sorted(distinct_label_ids))
}
@staticmethod
def _invert_mapping(mapping: Dict) -> Dict:
return {value: key for key, value in mapping.items()}
def _create_entity_tag_specs(
self, training_data: TrainingData
) -> List[EntityTagSpec]:
_tag_specs = []
for tag_name in POSSIBLE_TAGS:
if self.component_config[BILOU_FLAG]:
tag_id_index_mapping = bilou_utils.build_tag_id_dict(
training_data, tag_name
)
else:
tag_id_index_mapping = self._tag_id_index_mapping_for(
tag_name, training_data
)
if tag_id_index_mapping:
_tag_specs.append(
EntityTagSpec(
tag_name=tag_name,
tags_to_ids=tag_id_index_mapping,
ids_to_tags=self._invert_mapping(tag_id_index_mapping),
num_tags=len(tag_id_index_mapping),
)
)
return _tag_specs
@staticmethod
def _tag_id_index_mapping_for(
tag_name: Text, training_data: TrainingData
) -> Optional[Dict[Text, int]]:
if tag_name == ENTITY_ATTRIBUTE_ROLE:
distinct_tags = training_data.entity_roles
elif tag_name == ENTITY_ATTRIBUTE_GROUP:
distinct_tags = training_data.entity_groups
else:
distinct_tags = training_data.entities
distinct_tags = distinct_tags - {NO_ENTITY_TAG} - {None}
if not distinct_tags:
return None
tag_id_dict = {
tag_id: idx for idx, tag_id in enumerate(sorted(distinct_tags), 1)
}
# NO_ENTITY_TAG corresponds to non-entity which should correspond to 0 index
# needed for correct prediction for padding
tag_id_dict[NO_ENTITY_TAG] = 0
return tag_id_dict
@staticmethod
def _find_example_for_label(
label: Text, examples: List[Message], attribute: Text
) -> Optional[Message]:
for ex in examples:
if ex.get(attribute) == label:
return ex
return None
def _check_labels_features_exist(
self, labels_example: List[Message], attribute: Text
) -> bool:
return all(
label_example.features_present(
attribute, self.component_config[FEATURIZERS]
)
for label_example in labels_example
)
def _extract_features(
self, message: Message, attribute: Text
) -> Dict[Text, Union[scipy.sparse.spmatrix, np.ndarray]]:
(
sparse_sequence_features,
sparse_sentence_features,
) = message.get_sparse_features(attribute, self.component_config[FEATURIZERS])
dense_sequence_features, dense_sentence_features = message.get_dense_features(
attribute, self.component_config[FEATURIZERS]
)
if dense_sequence_features is not None and sparse_sequence_features is not None:
if (
dense_sequence_features.features.shape[0]
!= sparse_sequence_features.features.shape[0]
):
raise ValueError(
f"Sequence dimensions for sparse and dense sequence features "
f"don't coincide in '{message.get(TEXT)}'"
f"for attribute '{attribute}'."
)
if dense_sentence_features is not None and sparse_sentence_features is not None:
if (
dense_sentence_features.features.shape[0]
!= sparse_sentence_features.features.shape[0]
):
raise ValueError(
f"Sequence dimensions for sparse and dense sentence features "
f"don't coincide in '{message.get(TEXT)}'"
f"for attribute '{attribute}'."
)
# If we don't use the transformer and we don't want to do entity recognition,
# to speed up training take only the sentence features as feature vector.
# We would not make use of the sequence anyway in this setup. Carrying over
# those features to the actual training process takes quite some time.
if (
self.component_config[NUM_TRANSFORMER_LAYERS] == 0
and not self.component_config[ENTITY_RECOGNITION]
and attribute not in [INTENT, INTENT_RESPONSE_KEY]
):
sparse_sequence_features = None
dense_sequence_features = None
out = {}
if sparse_sentence_features is not None:
out[f"{SPARSE}_{SENTENCE}"] = sparse_sentence_features.features
if sparse_sequence_features is not None:
out[f"{SPARSE}_{SEQUENCE}"] = sparse_sequence_features.features
if dense_sentence_features is not None:
out[f"{DENSE}_{SENTENCE}"] = dense_sentence_features.features
if dense_sequence_features is not None:
out[f"{DENSE}_{SEQUENCE}"] = dense_sequence_features.features
return out
def _check_input_dimension_consistency(self, model_data: RasaModelData) -> None:
if self.component_config.get(SHARE_HIDDEN_LAYERS):
num_text_sentence_features = model_data.number_of_units(TEXT, SENTENCE)
num_label_sentence_features = model_data.number_of_units(LABEL, SENTENCE)
num_text_sequence_features = model_data.number_of_units(TEXT, SEQUENCE)
num_label_sequence_features = model_data.number_of_units(LABEL, SEQUENCE)
if (0 < num_text_sentence_features != num_label_sentence_features > 0) or (
0 < num_text_sequence_features != num_label_sequence_features > 0
):
raise ValueError(
"If embeddings are shared text features and label features "
"must coincide. Check the output dimensions of previous components."
)
def _extract_labels_precomputed_features(
self, label_examples: List[Message], attribute: Text = INTENT
) -> Tuple[List[FeatureArray], List[FeatureArray]]:
features = defaultdict(list)
for e in label_examples:
label_features = self._extract_features(e, attribute)
for feature_key, feature_value in label_features.items():
features[feature_key].append(feature_value)
sequence_features = []
sentence_features = []
for feature_name, feature_value in features.items():
if SEQUENCE in feature_name:
sequence_features.append(
FeatureArray(np.array(feature_value), number_of_dimensions=3)
)
else:
sentence_features.append(
FeatureArray(np.array(feature_value), number_of_dimensions=3)
)
return sequence_features, sentence_features
@staticmethod
def _compute_default_label_features(
labels_example: List[Message],
) -> List[FeatureArray]:
logger.debug("No label features found. Computing default label features.")
eye_matrix = np.eye(len(labels_example), dtype=np.float32)
# add sequence dimension to one-hot labels
return [
FeatureArray(
np.array([np.expand_dims(a, 0) for a in eye_matrix]),
number_of_dimensions=3,
)
]
def _create_label_data(
self,
training_data: TrainingData,
label_id_dict: Dict[Text, int],
attribute: Text,
) -> RasaModelData:
# Collect one example for each label
labels_idx_examples = []
for label_name, idx in label_id_dict.items():
label_example = self._find_example_for_label(
label_name, training_data.intent_examples, attribute
)
labels_idx_examples.append((idx, label_example))
# Sort the list of tuples based on label_idx
labels_idx_examples = sorted(labels_idx_examples, key=lambda x: x[0])
labels_example = [example for (_, example) in labels_idx_examples]
# Collect features, precomputed if they exist, else compute on the fly
if self._check_labels_features_exist(labels_example, attribute):
(
sequence_features,
sentence_features,
) = self._extract_labels_precomputed_features(labels_example, attribute)
else:
sequence_features = None
sentence_features = self._compute_default_label_features(labels_example)
label_data = RasaModelData()
label_data.add_features(LABEL, SEQUENCE, sequence_features)
label_data.add_features(LABEL, SENTENCE, sentence_features)
if label_data.does_feature_not_exist(
LABEL, SENTENCE
) and label_data.does_feature_not_exist(LABEL, SEQUENCE):
raise ValueError(
"No label features are present. Please check your configuration file."
)
label_ids = np.array([idx for (idx, _) in labels_idx_examples])
# explicitly add last dimension to label_ids
# to track correctly dynamic sequences
label_data.add_features(
LABEL_KEY,
LABEL_SUB_KEY,
[FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)],
)
label_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE)
return label_data
def _use_default_label_features(self, label_ids: np.ndarray) -> List[FeatureArray]:
feature_arrays: List[FeatureArray] = self._label_data.get(LABEL, SENTENCE)
all_label_features = feature_arrays[0]
return [
FeatureArray(
np.array([all_label_features[label_id] for label_id in label_ids]),
number_of_dimensions=all_label_features.number_of_dimensions,
)
]
def _create_model_data(
self,
training_data: List[Message],
label_id_dict: Optional[Dict[Text, int]] = None,
label_attribute: Optional[Text] = None,
training: bool = True,
) -> RasaModelData:
from rasa.utils.tensorflow import model_data_utils
attributes_to_consider = [TEXT]
if training and self.component_config[INTENT_CLASSIFICATION]:
# we don't have any intent labels during prediction, just add them during
attributes_to_consider.append(label_attribute)
if (
training
and self.component_config[ENTITY_RECOGNITION]
and self._entity_tag_specs
):
attributes_to_consider.append(ENTITIES)
if training and label_attribute is not None:
training_data = [
example for example in training_data if label_attribute in example.data
]
training_data = [
message
for message in training_data
if message.features_present(
attribute=TEXT, featurizers=self.component_config.get(FEATURIZERS)
)
]
if not training_data:
return RasaModelData()
(
features_for_examples,
sparse_feature_sizes,
) = model_data_utils.featurize_training_examples(
training_data,
attributes_to_consider,
entity_tag_specs=self._entity_tag_specs,
featurizers=self.component_config[FEATURIZERS],
bilou_tagging=self.component_config[BILOU_FLAG],
)
attribute_data, _ = model_data_utils.convert_to_data_format(
features_for_examples, consider_dialogue_dimension=False
)
model_data = RasaModelData(
label_key=self.label_key, label_sub_key=self.label_sub_key
)
model_data.add_data(attribute_data)
model_data.add_lengths(TEXT, SEQUENCE_LENGTH, TEXT, SEQUENCE)
# feature sizes of label attributes. That's why we remove them.
sparse_feature_sizes = self._remove_label_sparse_feature_sizes(
sparse_feature_sizes=sparse_feature_sizes, label_attribute=label_attribute
)
model_data.add_sparse_feature_sizes(sparse_feature_sizes)
self._add_label_features(
model_data, training_data, label_attribute, label_id_dict, training
)
model_data.sort()
return model_data
@staticmethod
def _remove_label_sparse_feature_sizes(
sparse_feature_sizes: Dict[Text, Dict[Text, List[int]]],
label_attribute: Optional[Text] = None,
) -> Dict[Text, Dict[Text, List[int]]]:
if label_attribute in sparse_feature_sizes:
del sparse_feature_sizes[label_attribute]
return sparse_feature_sizes
def _add_label_features(
self,
model_data: RasaModelData,
training_data: List[Message],
label_attribute: Text,
label_id_dict: Dict[Text, int],
training: bool = True,
) -> None:
label_ids = []
if training and self.component_config[INTENT_CLASSIFICATION]:
for example in training_data:
if example.get(label_attribute):
label_ids.append(label_id_dict[example.get(label_attribute)])
model_data.add_features(
LABEL_KEY,
LABEL_SUB_KEY,
[FeatureArray(np.expand_dims(label_ids, -1), number_of_dimensions=2)],
)
if (
label_attribute
and model_data.does_feature_not_exist(label_attribute, SENTENCE)
and model_data.does_feature_not_exist(label_attribute, SEQUENCE)
):
model_data.add_features(
LABEL, SENTENCE, self._use_default_label_features(np.array(label_ids))
)
model_data.update_key(label_attribute, SENTENCE, LABEL, SENTENCE)
model_data.update_key(label_attribute, SEQUENCE, LABEL, SEQUENCE)
model_data.update_key(label_attribute, MASK, LABEL, MASK)
model_data.add_lengths(LABEL, SEQUENCE_LENGTH, LABEL, SEQUENCE)
def preprocess_train_data(self, training_data: TrainingData) -> RasaModelData:
if self.component_config[BILOU_FLAG]:
bilou_utils.apply_bilou_schema(training_data)
label_id_index_mapping = self._label_id_index_mapping(
training_data, attribute=INTENT
)
if not label_id_index_mapping:
return RasaModelData()
self.index_label_id_mapping = self._invert_mapping(label_id_index_mapping)
self._label_data = self._create_label_data(
training_data, label_id_index_mapping, attribute=INTENT
)
self._entity_tag_specs = self._create_entity_tag_specs(training_data)
label_attribute = (
INTENT if self.component_config[INTENT_CLASSIFICATION] else None
)
model_data = self._create_model_data(
training_data.nlu_examples,
label_id_index_mapping,
label_attribute=label_attribute,
)
self._check_input_dimension_consistency(model_data)
return model_data
@staticmethod
def _check_enough_labels(model_data: RasaModelData) -> bool:
return len(np.unique(model_data.get(LABEL_KEY, LABEL_SUB_KEY))) >= 2
def train(self, training_data: TrainingData) -> Resource:
model_data = self.preprocess_train_data(training_data)
if model_data.is_empty():
logger.debug(
f"Cannot train '{self.__class__.__name__}'. No data was provided. "
f"Skipping training of the classifier."
)
return self._resource
if not self.model and self.finetune_mode:
raise rasa.shared.exceptions.InvalidParameterException(
f"{self.__class__.__name__} was instantiated "
f"with `model=None` and `finetune_mode=True`. "
f"This is not a valid combination as the component "
f"needs an already instantiated and trained model "
f"to continue training in finetune mode."
)
if self.component_config.get(INTENT_CLASSIFICATION):
if not self._check_enough_labels(model_data):
logger.error(
f"Cannot train '{self.__class__.__name__}'. "
f"Need at least 2 different intent classes. "
f"Skipping training of classifier."
)
return self._resource
if self.component_config.get(ENTITY_RECOGNITION):
self.check_correct_entity_annotations(training_data)
self._data_example = model_data.first_data_example()
if not self.finetune_mode:
self.model = self._instantiate_model_class(model_data)
self.model.compile(
optimizer=tf.keras.optimizers.Adam(self.component_config[LEARNING_RATE])
)
else:
self.model.adjust_for_incremental_training(
data_example=self._data_example,
new_sparse_feature_sizes=model_data.get_sparse_feature_sizes(),
old_sparse_feature_sizes=self._sparse_feature_sizes,
)
self._sparse_feature_sizes = model_data.get_sparse_feature_sizes()
data_generator, validation_data_generator = train_utils.create_data_generators(
model_data,
self.component_config[BATCH_SIZES],
self.component_config[EPOCHS],
self.component_config[BATCH_STRATEGY],
self.component_config[EVAL_NUM_EXAMPLES],
self.component_config[RANDOM_SEED],
)
callbacks = train_utils.create_common_callbacks(
self.component_config[EPOCHS],
self.component_config[TENSORBOARD_LOG_DIR],
self.component_config[TENSORBOARD_LOG_LEVEL],
self.tmp_checkpoint_dir,
)
self.model.fit(
data_generator,
epochs=self.component_config[EPOCHS],
validation_data=validation_data_generator,
validation_freq=self.component_config[EVAL_NUM_EPOCHS],
callbacks=callbacks,
verbose=False,
shuffle=False,
)
self.persist()
return self._resource
def _predict(
self, message: Message
) -> Optional[Dict[Text, Union[tf.Tensor, Dict[Text, tf.Tensor]]]]:
if self.model is None:
logger.debug(
f"There is no trained model for '{self.__class__.__name__}': The "
f"component is either not trained or didn't receive enough training "
f"data."
)
return None
# create session data from message and convert it into a batch of 1
model_data = self._create_model_data([message], training=False)
if model_data.is_empty():
return None
return self.model.run_inference(model_data)
def _predict_label(
self, predict_out: Optional[Dict[Text, tf.Tensor]]
) -> Tuple[Dict[Text, Any], List[Dict[Text, Any]]]:
label: Dict[Text, Any] = {"name": None, "confidence": 0.0}
label_ranking = []
if predict_out is None:
return label, label_ranking
message_sim = predict_out["i_scores"]
message_sim = message_sim.flatten() # sim is a matrix
# if X contains all zeros do not predict some label
if message_sim.size == 0:
return label, label_ranking
# rank the confidences
ranking_length = self.component_config[RANKING_LENGTH]
renormalize = (
self.component_config[RENORMALIZE_CONFIDENCES]
and self.component_config[MODEL_CONFIDENCE] == SOFTMAX
)
ranked_label_indices, message_sim = train_utils.rank_and_mask(
message_sim, ranking_length=ranking_length, renormalize=renormalize
)
# construct the label and ranking
casted_message_sim: List[float] = message_sim.tolist() # np.float to float
top_label_idx = ranked_label_indices[0]
label = {
"name": self.index_label_id_mapping[top_label_idx],
"confidence": casted_message_sim[top_label_idx],
}
ranking = [(idx, casted_message_sim[idx]) for idx in ranked_label_indices]
label_ranking = [
{"name": self.index_label_id_mapping[label_idx], "confidence": score}
for label_idx, score in ranking
]
return label, label_ranking
def _predict_entities(
self, predict_out: Optional[Dict[Text, tf.Tensor]], message: Message
) -> List[Dict]:
if predict_out is None:
return []
predicted_tags, confidence_values = train_utils.entity_label_to_tags(
predict_out, self._entity_tag_specs, self.component_config[BILOU_FLAG]
)
entities = self.convert_predictions_into_entities(
message.get(TEXT),
message.get(TOKENS_NAMES[TEXT], []),
predicted_tags,
self.split_entities_config,
confidence_values,
)
entities = self.add_extractor_name(entities)
entities = message.get(ENTITIES, []) + entities
return entities
def process(self, messages: List[Message]) -> List[Message]:
for message in messages:
out = self._predict(message)
if self.component_config[INTENT_CLASSIFICATION]:
label, label_ranking = self._predict_label(out)
message.set(INTENT, label, add_to_output=True)
message.set("intent_ranking", label_ranking, add_to_output=True)
if self.component_config[ENTITY_RECOGNITION]:
entities = self._predict_entities(out, message)
message.set(ENTITIES, entities, add_to_output=True)
if out and self._execution_context.should_add_diagnostic_data:
message.add_diagnostic_data(
self._execution_context.node_name, out.get(DIAGNOSTIC_DATA)
)
return messages
def persist(self) -> None:
if self.model is None:
return None
with self._model_storage.write_to(self._resource) as model_path:
file_name = self.__class__.__name__
tf_model_file = model_path / f"{file_name}.tf_model"
rasa.shared.utils.io.create_directory_for_file(tf_model_file)
if self.component_config[CHECKPOINT_MODEL] and self.tmp_checkpoint_dir:
self.model.load_weights(self.tmp_checkpoint_dir / "checkpoint.tf_model")
# Save an empty file to flag that this model has been
# produced using checkpointing
checkpoint_marker = model_path / f"{file_name}.from_checkpoint.pkl"
checkpoint_marker.touch()
self.model.save(str(tf_model_file))
io_utils.pickle_dump(
model_path / f"{file_name}.data_example.pkl", self._data_example
)
io_utils.pickle_dump(
model_path / f"{file_name}.sparse_feature_sizes.pkl",
self._sparse_feature_sizes,
)
io_utils.pickle_dump(
model_path / f"{file_name}.label_data.pkl", dict(self._label_data.data)
)
io_utils.json_pickle(
model_path / f"{file_name}.index_label_id_mapping.json",
self.index_label_id_mapping,
)
entity_tag_specs = (
[tag_spec._asdict() for tag_spec in self._entity_tag_specs]
if self._entity_tag_specs
else []
)
rasa.shared.utils.io.dump_obj_as_json_to_file(
model_path / f"{file_name}.entity_tag_specs.json", entity_tag_specs
)
@classmethod
def load(
cls,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
**kwargs: Any,
) -> DIETClassifier:
try:
with model_storage.read_from(resource) as model_path:
return cls._load(
model_path, config, model_storage, resource, execution_context
)
except ValueError:
logger.debug(
f"Failed to load {cls.__class__.__name__} from model storage. Resource "
f"'{resource.name}' doesn't exist."
)
return cls(config, model_storage, resource, execution_context)
@classmethod
def _load(
cls,
model_path: Path,
config: Dict[Text, Any],
model_storage: ModelStorage,
resource: Resource,
execution_context: ExecutionContext,
) -> "DIETClassifier":
(
index_label_id_mapping,
entity_tag_specs,
label_data,
data_example,
sparse_feature_sizes,
) = cls._load_from_files(model_path)
config = train_utils.update_confidence_type(config)
config = train_utils.update_similarity_type(config)
model = cls._load_model(
entity_tag_specs,
label_data,
config,
data_example,
model_path,
finetune_mode=execution_context.is_finetuning,
)
return cls(
config=config,
model_storage=model_storage,
resource=resource,
execution_context=execution_context,
index_label_id_mapping=index_label_id_mapping,
entity_tag_specs=entity_tag_specs,
model=model,
sparse_feature_sizes=sparse_feature_sizes,
)
@classmethod
def _load_from_files(
cls, model_path: Path
) -> Tuple[
Dict[int, Text],
List[EntityTagSpec],
RasaModelData,
Dict[Text, Dict[Text, List[FeatureArray]]],
Dict[Text, Dict[Text, List[int]]],
]:
file_name = cls.__name__
data_example = io_utils.pickle_load(
model_path / f"{file_name}.data_example.pkl"
)
label_data = io_utils.pickle_load(model_path / f"{file_name}.label_data.pkl")
label_data = RasaModelData(data=label_data)
sparse_feature_sizes = io_utils.pickle_load(
model_path / f"{file_name}.sparse_feature_sizes.pkl"
)
index_label_id_mapping = io_utils.json_unpickle(
model_path / f"{file_name}.index_label_id_mapping.json"
)
entity_tag_specs = rasa.shared.utils.io.read_json_file(
model_path / f"{file_name}.entity_tag_specs.json"
)
entity_tag_specs = [
EntityTagSpec(
tag_name=tag_spec["tag_name"],
ids_to_tags={
int(key): value for key, value in tag_spec["ids_to_tags"].items()
},
tags_to_ids={
key: int(value) for key, value in tag_spec["tags_to_ids"].items()
},
num_tags=tag_spec["num_tags"],
)
for tag_spec in entity_tag_specs
]
index_label_id_mapping = {
int(key): value for key, value in index_label_id_mapping.items()
}
return (
index_label_id_mapping,
entity_tag_specs,
label_data,
data_example,
sparse_feature_sizes,
)
@classmethod
def _load_model(
cls,
entity_tag_specs: List[EntityTagSpec],
label_data: RasaModelData,
config: Dict[Text, Any],
data_example: Dict[Text, Dict[Text, List[FeatureArray]]],
model_path: Path,
finetune_mode: bool = False,
) -> "RasaModel":
file_name = cls.__name__
tf_model_file = model_path / f"{file_name}.tf_model"
label_key = LABEL_KEY if config[INTENT_CLASSIFICATION] else None
label_sub_key = LABEL_SUB_KEY if config[INTENT_CLASSIFICATION] else None
model_data_example = RasaModelData(
label_key=label_key, label_sub_key=label_sub_key, data=data_example
)
model = cls._load_model_class(
tf_model_file,
model_data_example,
label_data,
entity_tag_specs,
config,
finetune_mode=finetune_mode,
)
return model
@classmethod
def _load_model_class(
cls,
tf_model_file: Text,
model_data_example: RasaModelData,
label_data: RasaModelData,
entity_tag_specs: List[EntityTagSpec],
config: Dict[Text, Any],
finetune_mode: bool,
) -> "RasaModel":
predict_data_example = RasaModelData(
label_key=model_data_example.label_key,
data={
feature_name: features
for feature_name, features in model_data_example.items()
if TEXT in feature_name
},
)
return cls.model_class().load(
tf_model_file,
model_data_example,
predict_data_example,
data_signature=model_data_example.get_signature(),
label_data=label_data,
entity_tag_specs=entity_tag_specs,
config=copy.deepcopy(config),
finetune_mode=finetune_mode,
)
def _instantiate_model_class(self, model_data: RasaModelData) -> "RasaModel":
return self.model_class()(
data_signature=model_data.get_signature(),
label_data=self._label_data,
entity_tag_specs=self._entity_tag_specs,
config=self.component_config,
)
class DIET(TransformerRasaModel):
def __init__(
self,
data_signature: Dict[Text, Dict[Text, List[FeatureSignature]]],
label_data: RasaModelData,
entity_tag_specs: Optional[List[EntityTagSpec]],
config: Dict[Text, Any],
) -> None:
super().__init__("DIET", config, data_signature, label_data)
self._entity_tag_specs = self._ordered_tag_specs(entity_tag_specs)
self.predict_data_signature = {
feature_name: features
for feature_name, features in data_signature.items()
if TEXT in feature_name
}
self._create_metrics()
self._update_metrics_to_log()
self.all_labels_embed: Optional[tf.Tensor] = None
self._prepare_layers()
@staticmethod
def _ordered_tag_specs(
entity_tag_specs: Optional[List[EntityTagSpec]],
) -> List[EntityTagSpec]:
if entity_tag_specs is None:
return []
crf_order = [
ENTITY_ATTRIBUTE_TYPE,
ENTITY_ATTRIBUTE_ROLE,
ENTITY_ATTRIBUTE_GROUP,
]
ordered_tag_spec = []
for tag_name in crf_order:
for tag_spec in entity_tag_specs:
if tag_name == tag_spec.tag_name:
ordered_tag_spec.append(tag_spec)
return ordered_tag_spec
def _check_data(self) -> None:
if TEXT not in self.data_signature:
raise InvalidConfigException(
f"No text features specified. "
f"Cannot train '{self.__class__.__name__}' model."
)
if self.config[INTENT_CLASSIFICATION]:
if LABEL not in self.data_signature:
raise InvalidConfigException(
f"No label features specified. "
f"Cannot train '{self.__class__.__name__}' model."
)
if self.config[SHARE_HIDDEN_LAYERS]:
different_sentence_signatures = False
different_sequence_signatures = False
if (
SENTENCE in self.data_signature[TEXT]
and SENTENCE in self.data_signature[LABEL]
):
different_sentence_signatures = (
self.data_signature[TEXT][SENTENCE]
!= self.data_signature[LABEL][SENTENCE]
)
if (
SEQUENCE in self.data_signature[TEXT]
and SEQUENCE in self.data_signature[LABEL]
):
different_sequence_signatures = (
self.data_signature[TEXT][SEQUENCE]
!= self.data_signature[LABEL][SEQUENCE]
)
if different_sentence_signatures or different_sequence_signatures:
raise ValueError(
"If hidden layer weights are shared, data signatures "
"for text_features and label_features must coincide."
)
if self.config[ENTITY_RECOGNITION] and (
ENTITIES not in self.data_signature
or ENTITY_ATTRIBUTE_TYPE not in self.data_signature[ENTITIES]
):
logger.debug(
f"You specified '{self.__class__.__name__}' to train entities, but "
f"no entities are present in the training data. Skipping training of "
f"entities."
)
self.config[ENTITY_RECOGNITION] = False
def _create_metrics(self) -> None:
self.mask_loss = tf.keras.metrics.Mean(name="m_loss")
self.intent_loss = tf.keras.metrics.Mean(name="i_loss")
self.entity_loss = tf.keras.metrics.Mean(name="e_loss")
self.entity_group_loss = tf.keras.metrics.Mean(name="g_loss")
self.entity_role_loss = tf.keras.metrics.Mean(name="r_loss")
self.mask_acc = tf.keras.metrics.Mean(name="m_acc")
self.intent_acc = tf.keras.metrics.Mean(name="i_acc")
self.entity_f1 = tf.keras.metrics.Mean(name="e_f1")
self.entity_group_f1 = tf.keras.metrics.Mean(name="g_f1")
self.entity_role_f1 = tf.keras.metrics.Mean(name="r_f1")
def _update_metrics_to_log(self) -> None:
debug_log_level = logging.getLogger("rasa").level == logging.DEBUG
if self.config[MASKED_LM]:
self.metrics_to_log.append("m_acc")
if debug_log_level:
self.metrics_to_log.append("m_loss")
if self.config[INTENT_CLASSIFICATION]:
self.metrics_to_log.append("i_acc")
if debug_log_level:
self.metrics_to_log.append("i_loss")
if self.config[ENTITY_RECOGNITION]:
for tag_spec in self._entity_tag_specs:
if tag_spec.num_tags != 0:
name = tag_spec.tag_name
self.metrics_to_log.append(f"{name[0]}_f1")
if debug_log_level:
self.metrics_to_log.append(f"{name[0]}_loss")
self._log_metric_info()
def _log_metric_info(self) -> None:
metric_name = {
"t": "total",
"i": "intent",
"e": "entity",
"m": "mask",
"r": "role",
"g": "group",
}
logger.debug("Following metrics will be logged during training: ")
for metric in self.metrics_to_log:
parts = metric.split("_")
name = f"{metric_name[parts[0]]} {parts[1]}"
logger.debug(f" {metric} ({name})")
def _prepare_layers(self) -> None:
self.text_name = TEXT
self._tf_layers[
f"sequence_layer.{self.text_name}"
] = rasa_layers.RasaSequenceLayer(
self.text_name, self.data_signature[self.text_name], self.config
)
if self.config[MASKED_LM]:
self._prepare_mask_lm_loss(self.text_name)
if self.config[INTENT_CLASSIFICATION]:
self.label_name = TEXT if self.config[SHARE_HIDDEN_LAYERS] else LABEL
label_config = self.config.copy()
label_config.update(
{SPARSE_INPUT_DROPOUT: False, DENSE_INPUT_DROPOUT: False}
)
self._tf_layers[
f"feature_combining_layer.{self.label_name}"
] = rasa_layers.RasaFeatureCombiningLayer(
self.label_name, self.label_signature[self.label_name], label_config
)
self._prepare_ffnn_layer(
self.label_name,
self.config[HIDDEN_LAYERS_SIZES][self.label_name],
self.config[DROP_RATE],
)
self._prepare_label_classification_layers(predictor_attribute=TEXT)
if self.config[ENTITY_RECOGNITION]:
self._prepare_entity_recognition_layers()
def _prepare_mask_lm_loss(self, name: Text) -> None:
self._prepare_embed_layers(f"{name}_lm_mask")
self._prepare_embed_layers(f"{name}_golden_token")
self._prepare_dot_product_loss(f"{name}_mask", scale_loss=False)
def _create_bow(
self,
sequence_features: List[Union[tf.Tensor, tf.SparseTensor]],
sentence_features: List[Union[tf.Tensor, tf.SparseTensor]],
sequence_feature_lengths: tf.Tensor,
name: Text,
) -> tf.Tensor:
x, _ = self._tf_layers[f"feature_combining_layer.{name}"](
(sequence_features, sentence_features, sequence_feature_lengths),
training=self._training,
)
# convert to bag-of-words by summing along the sequence dimension
x = tf.reduce_sum(x, axis=1)
return self._tf_layers[f"ffnn.{name}"](x, self._training)
def _create_all_labels(self) -> Tuple[tf.Tensor, tf.Tensor]:
all_label_ids = self.tf_label_data[LABEL_KEY][LABEL_SUB_KEY][0]
sequence_feature_lengths = self._get_sequence_feature_lengths(
self.tf_label_data, LABEL
)
x = self._create_bow(
self.tf_label_data[LABEL][SEQUENCE],
self.tf_label_data[LABEL][SENTENCE],
sequence_feature_lengths,
self.label_name,
)
all_labels_embed = self._tf_layers[f"embed.{LABEL}"](x)
return all_label_ids, all_labels_embed
def _mask_loss(
self,
outputs: tf.Tensor,
inputs: tf.Tensor,
seq_ids: tf.Tensor,
mlm_mask_boolean: tf.Tensor,
name: Text,
) -> tf.Tensor:
# make sure there is at least one element in the mask
mlm_mask_boolean = tf.cond(
tf.reduce_any(mlm_mask_boolean),
lambda: mlm_mask_boolean,
lambda: tf.scatter_nd([[0, 0, 0]], [True], tf.shape(mlm_mask_boolean)),
)
mlm_mask_boolean = tf.squeeze(mlm_mask_boolean, -1)
# Pick elements that were masked, throwing away the batch & sequence dimension
# and effectively switching from shape (batch_size, sequence_length, units) to
# (num_masked_elements, units).
outputs = tf.boolean_mask(outputs, mlm_mask_boolean)
inputs = tf.boolean_mask(inputs, mlm_mask_boolean)
ids = tf.boolean_mask(seq_ids, mlm_mask_boolean)
tokens_predicted_embed = self._tf_layers[f"embed.{name}_lm_mask"](outputs)
tokens_true_embed = self._tf_layers[f"embed.{name}_golden_token"](inputs)
# To limit the otherwise computationally expensive loss calculation, we
# constrain the label space in MLM (i.e. token space) to only those tokens that
# were masked in this batch. Hence the reduced list of token embeddings
# (tokens_true_embed) and the reduced list of labels (ids) are passed as
# all_labels_embed and all_labels, respectively. In the future, we could be less
# restrictive and construct a slightly bigger label space which could include
# tokens not masked in the current batch too.
return self._tf_layers[f"loss.{name}_mask"](
inputs_embed=tokens_predicted_embed,
labels_embed=tokens_true_embed,
labels=ids,
all_labels_embed=tokens_true_embed,
all_labels=ids,
)
def _calculate_label_loss(
self, text_features: tf.Tensor, label_features: tf.Tensor, label_ids: tf.Tensor
) -> tf.Tensor:
all_label_ids, all_labels_embed = self._create_all_labels()
text_embed = self._tf_layers[f"embed.{TEXT}"](text_features)
label_embed = self._tf_layers[f"embed.{LABEL}"](label_features)
return self._tf_layers[f"loss.{LABEL}"](
text_embed, label_embed, label_ids, all_labels_embed, all_label_ids
)
def batch_loss(
self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]
) -> tf.Tensor:
tf_batch_data = self.batch_to_model_data_format(batch_in, self.data_signature)
sequence_feature_lengths = self._get_sequence_feature_lengths(
tf_batch_data, TEXT
)
(
text_transformed,
text_in,
mask_combined_sequence_sentence,
text_seq_ids,
mlm_mask_boolean_text,
_,
) = self._tf_layers[f"sequence_layer.{self.text_name}"](
(
tf_batch_data[TEXT][SEQUENCE],
tf_batch_data[TEXT][SENTENCE],
sequence_feature_lengths,
),
training=self._training,
)
losses = []
# Lengths of sequences in case of sentence-level features are always 1, but they
# can effectively be 0 if sentence-level features aren't present.
sentence_feature_lengths = self._get_sentence_feature_lengths(
tf_batch_data, TEXT
)
combined_sequence_sentence_feature_lengths = (
sequence_feature_lengths + sentence_feature_lengths
)
if self.config[MASKED_LM]:
loss, acc = self._mask_loss(
text_transformed, text_in, text_seq_ids, mlm_mask_boolean_text, TEXT
)
self.mask_loss.update_state(loss)
self.mask_acc.update_state(acc)
losses.append(loss)
if self.config[INTENT_CLASSIFICATION]:
loss = self._batch_loss_intent(
combined_sequence_sentence_feature_lengths,
text_transformed,
tf_batch_data,
)
losses.append(loss)
if self.config[ENTITY_RECOGNITION]:
losses += self._batch_loss_entities(
mask_combined_sequence_sentence,
sequence_feature_lengths,
text_transformed,
tf_batch_data,
)
return tf.math.add_n(losses)
def _batch_loss_intent(
self,
combined_sequence_sentence_feature_lengths_text: tf.Tensor,
text_transformed: tf.Tensor,
tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],
) -> tf.Tensor:
sentence_vector = self._last_token(
text_transformed, combined_sequence_sentence_feature_lengths_text
)
sequence_feature_lengths_label = self._get_sequence_feature_lengths(
tf_batch_data, LABEL
)
label_ids = tf_batch_data[LABEL_KEY][LABEL_SUB_KEY][0]
label = self._create_bow(
tf_batch_data[LABEL][SEQUENCE],
tf_batch_data[LABEL][SENTENCE],
sequence_feature_lengths_label,
self.label_name,
)
loss, acc = self._calculate_label_loss(sentence_vector, label, label_ids)
self._update_label_metrics(loss, acc)
return loss
def _update_label_metrics(self, loss: tf.Tensor, acc: tf.Tensor) -> None:
self.intent_loss.update_state(loss)
self.intent_acc.update_state(acc)
def _batch_loss_entities(
self,
mask_combined_sequence_sentence: tf.Tensor,
sequence_feature_lengths: tf.Tensor,
text_transformed: tf.Tensor,
tf_batch_data: Dict[Text, Dict[Text, List[tf.Tensor]]],
) -> List[tf.Tensor]:
losses = []
entity_tags = None
for tag_spec in self._entity_tag_specs:
if tag_spec.num_tags == 0:
continue
tag_ids = tf_batch_data[ENTITIES][tag_spec.tag_name][0]
tag_ids = tf.pad(tag_ids, [[0, 0], [0, 1], [0, 0]])
loss, f1, _logits = self._calculate_entity_loss(
text_transformed,
tag_ids,
mask_combined_sequence_sentence,
sequence_feature_lengths,
tag_spec.tag_name,
entity_tags,
)
if tag_spec.tag_name == ENTITY_ATTRIBUTE_TYPE:
entity_tags = tf.one_hot(
tf.cast(tag_ids[:, :, 0], tf.int32), depth=tag_spec.num_tags
)
self._update_entity_metrics(loss, f1, tag_spec.tag_name)
losses.append(loss)
return losses
def _update_entity_metrics(
self, loss: tf.Tensor, f1: tf.Tensor, tag_name: Text
) -> None:
if tag_name == ENTITY_ATTRIBUTE_TYPE:
self.entity_loss.update_state(loss)
self.entity_f1.update_state(f1)
elif tag_name == ENTITY_ATTRIBUTE_GROUP:
self.entity_group_loss.update_state(loss)
self.entity_group_f1.update_state(f1)
elif tag_name == ENTITY_ATTRIBUTE_ROLE:
self.entity_role_loss.update_state(loss)
self.entity_role_f1.update_state(f1)
def prepare_for_predict(self) -> None:
if self.config[INTENT_CLASSIFICATION]:
_, self.all_labels_embed = self._create_all_labels()
def batch_predict(
self, batch_in: Union[Tuple[tf.Tensor], Tuple[np.ndarray]]
) -> Dict[Text, tf.Tensor]:
tf_batch_data = self.batch_to_model_data_format(
batch_in, self.predict_data_signature
)
sequence_feature_lengths = self._get_sequence_feature_lengths(
tf_batch_data, TEXT
)
sentence_feature_lengths = self._get_sentence_feature_lengths(
tf_batch_data, TEXT
)
text_transformed, _, _, _, _, attention_weights = self._tf_layers[
f"sequence_layer.{self.text_name}"
](
(
tf_batch_data[TEXT][SEQUENCE],
tf_batch_data[TEXT][SENTENCE],
sequence_feature_lengths,
),
training=self._training,
)
predictions = {
DIAGNOSTIC_DATA: {
"attention_weights": attention_weights,
"text_transformed": text_transformed,
}
}
if self.config[INTENT_CLASSIFICATION]:
predictions.update(
self._batch_predict_intents(
sequence_feature_lengths + sentence_feature_lengths,
text_transformed,
)
)
if self.config[ENTITY_RECOGNITION]:
predictions.update(
self._batch_predict_entities(sequence_feature_lengths, text_transformed)
)
return predictions
def _batch_predict_entities(
self, sequence_feature_lengths: tf.Tensor, text_transformed: tf.Tensor
) -> Dict[Text, tf.Tensor]:
predictions: Dict[Text, tf.Tensor] = {}
entity_tags = None
for tag_spec in self._entity_tag_specs:
if tag_spec.num_tags == 0:
continue
name = tag_spec.tag_name
_input = text_transformed
if entity_tags is not None:
_tags = self._tf_layers[f"embed.{name}.tags"](entity_tags)
_input = tf.concat([_input, _tags], axis=-1)
_logits = self._tf_layers[f"embed.{name}.logits"](_input)
pred_ids, confidences = self._tf_layers[f"crf.{name}"](
_logits, sequence_feature_lengths
)
predictions[f"e_{name}_ids"] = pred_ids
predictions[f"e_{name}_scores"] = confidences
if name == ENTITY_ATTRIBUTE_TYPE:
entity_tags = tf.one_hot(
tf.cast(pred_ids, tf.int32), depth=tag_spec.num_tags
)
return predictions
def _batch_predict_intents(
self,
combined_sequence_sentence_feature_lengths: tf.Tensor,
text_transformed: tf.Tensor,
) -> Dict[Text, tf.Tensor]:
if self.all_labels_embed is None:
raise ValueError(
"The model was not prepared for prediction. "
"Call `prepare_for_predict` first."
)
sentence_vector = self._last_token(
text_transformed, combined_sequence_sentence_feature_lengths
)
sentence_vector_embed = self._tf_layers[f"embed.{TEXT}"](sentence_vector)
_, scores = self._tf_layers[
f"loss.{LABEL}"
].get_similarities_and_confidences_from_embeddings(
sentence_vector_embed[:, tf.newaxis, :],
self.all_labels_embed[tf.newaxis, :, :],
)
return {"i_scores": scores}
| true
| true
|
79080353a2b4abcea79550828a093d0dd73b34c5
| 2,964
|
py
|
Python
|
src/imagedata/transports/abstracttransport.py
|
erling6232/imagedata
|
69226b317ff43eb52ed48503582e5770bcb47ec4
|
[
"MIT"
] | 1
|
2021-09-02T07:20:19.000Z
|
2021-09-02T07:20:19.000Z
|
src/imagedata/transports/abstracttransport.py
|
erling6232/imagedata
|
69226b317ff43eb52ed48503582e5770bcb47ec4
|
[
"MIT"
] | 3
|
2018-02-28T09:54:21.000Z
|
2022-03-22T10:05:39.000Z
|
src/imagedata/transports/abstracttransport.py
|
erling6232/imagedata
|
69226b317ff43eb52ed48503582e5770bcb47ec4
|
[
"MIT"
] | null | null | null |
"""Abstract class for image transports.
Defines generic functions.
"""
# Copyright (c) 2018 Erling Andersen, Haukeland University Hospital, Bergen, Norway
from abc import ABCMeta, abstractmethod # , abstractproperty
# import imagedata.transports
class NoOtherInstance(Exception):
pass
class AbstractTransport(object, metaclass=ABCMeta):
"""Abstract base class definition for imagedata transport plugins.
Plugins must be a subclass of AbstractPlugin and
must define the attributes set in __init__() and
the following methods:
open() method
isfile() method
walk() method
"""
plugin_type = 'transport'
def __init__(self, name, description, authors, version, url, schemes):
object.__init__(self)
self.__name = name
self.__description = description
self.__authors = authors
self.__version = version
self.__url = url
self.__schemes = schemes
@property
def name(self):
"""Plugin name
Single word string describing the image format.
Typical names: file, dicom, xnat
"""
return self.__name
@property
def description(self):
"""Plugin description
Single line string describing the transport method.
"""
return self.__description
@property
def authors(self):
"""Plugin authors
Multi-line string naming the author(s) of the plugin.
"""
return self.__authors
@property
def version(self):
"""Plugin version
String giving the plugin version.
Version scheme: 1.0.0
"""
return self.__version
@property
def url(self):
"""Plugin URL
URL string to the site of the plugin or the author(s).
"""
return self.__url
@property
def schemes(self):
"""List of transport schemes supported by this plugin.
List of strings.
"""
return self.__schemes
@abstractmethod
def walk(self, top):
"""Generate the file names in a directory tree by walking the tree.
Input:
- top: starting point for walk (str)
Return:
- tuples of (root, dirs, files)
"""
pass
@abstractmethod
def isfile(self, path):
"""Return True if path is an existing regular file.
"""
pass
@abstractmethod
def open(self, path, mode='r'):
"""Extract a member from the archive as a file-like object.
"""
pass
@abstractmethod
def close(self):
"""Close the transport
"""
pass
@abstractmethod
def info(self, path) -> str:
"""Return info describing the object
Args:
path (str): object path
Returns:
description (str): Preferably a one-line string describing the object
"""
pass
| 23.52381
| 83
| 0.592105
|
from abc import ABCMeta, abstractmethod
class NoOtherInstance(Exception):
pass
class AbstractTransport(object, metaclass=ABCMeta):
plugin_type = 'transport'
def __init__(self, name, description, authors, version, url, schemes):
object.__init__(self)
self.__name = name
self.__description = description
self.__authors = authors
self.__version = version
self.__url = url
self.__schemes = schemes
@property
def name(self):
return self.__name
@property
def description(self):
return self.__description
@property
def authors(self):
return self.__authors
@property
def version(self):
return self.__version
@property
def url(self):
return self.__url
@property
def schemes(self):
return self.__schemes
@abstractmethod
def walk(self, top):
pass
@abstractmethod
def isfile(self, path):
pass
@abstractmethod
def open(self, path, mode='r'):
pass
@abstractmethod
def close(self):
pass
@abstractmethod
def info(self, path) -> str:
pass
| true
| true
|
790803d1ca6e878f6c564a575b45f035b7ac69cb
| 4,789
|
py
|
Python
|
tests/pools/test_wallet_pool_store.py
|
duderino999/ceres-combineharvester
|
f63ab6c4d0e33c3b6550c1f5641f28ab2c68b001
|
[
"Apache-2.0"
] | 39
|
2021-08-04T14:49:27.000Z
|
2022-03-29T16:30:19.000Z
|
tests/pools/test_wallet_pool_store.py
|
rickguo216/ceres-combineharvester
|
e93b26a77b1fc4fe9de80d10f745b09a13f9c288
|
[
"Apache-2.0"
] | 30
|
2021-08-19T22:44:31.000Z
|
2022-03-29T19:09:26.000Z
|
tests/pools/test_wallet_pool_store.py
|
rickguo216/ceres-combineharvester
|
e93b26a77b1fc4fe9de80d10f745b09a13f9c288
|
[
"Apache-2.0"
] | 23
|
2021-08-07T07:33:20.000Z
|
2022-03-27T11:15:00.000Z
|
import asyncio
from pathlib import Path
from secrets import token_bytes
from typing import Optional
import aiosqlite
import pytest
from clvm_tools import binutils
from ceres.types.blockchain_format.coin import Coin
from ceres.types.blockchain_format.program import Program, SerializedProgram
from ceres.types.blockchain_format.sized_bytes import bytes32
from ceres.types.coin_spend import CoinSpend
from ceres.util.db_wrapper import DBWrapper
from ceres.util.ints import uint64
from ceres.wallet.wallet_pool_store import WalletPoolStore
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
def make_child_solution(coin_spend: CoinSpend, new_coin: Optional[Coin] = None) -> CoinSpend:
new_puzzle_hash: bytes32 = token_bytes(32)
solution = "()"
puzzle = f"(q . ((51 0x{new_puzzle_hash.hex()} 1)))"
puzzle_prog = Program.to(binutils.assemble(puzzle))
solution_prog = Program.to(binutils.assemble(solution))
if new_coin is None:
new_coin = coin_spend.additions()[0]
sol: CoinSpend = CoinSpend(
new_coin,
SerializedProgram.from_program(puzzle_prog),
SerializedProgram.from_program(solution_prog),
)
return sol
class TestWalletPoolStore:
@pytest.mark.asyncio
async def test_store(self):
db_filename = Path("wallet_store_test.db")
if db_filename.exists():
db_filename.unlink()
db_connection = await aiosqlite.connect(db_filename)
db_wrapper = DBWrapper(db_connection)
store = await WalletPoolStore.create(db_wrapper)
try:
await db_wrapper.begin_transaction()
coin_0 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
coin_0_alt = Coin(token_bytes(32), token_bytes(32), uint64(12312))
solution_0: CoinSpend = make_child_solution(None, coin_0)
solution_0_alt: CoinSpend = make_child_solution(None, coin_0_alt)
solution_1: CoinSpend = make_child_solution(solution_0)
assert store.get_spends_for_wallet(0) == []
assert store.get_spends_for_wallet(1) == []
await store.add_spend(1, solution_1, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
# Idempotent
await store.add_spend(1, solution_1, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
with pytest.raises(ValueError):
await store.add_spend(1, solution_1, 101)
# Rebuild cache, no longer present
await db_wrapper.rollback_transaction()
await store.rebuild_cache()
assert store.get_spends_for_wallet(1) == []
await store.rebuild_cache()
await store.add_spend(1, solution_1, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
solution_1_alt: CoinSpend = make_child_solution(solution_0_alt)
with pytest.raises(ValueError):
await store.add_spend(1, solution_1_alt, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
solution_2: CoinSpend = make_child_solution(solution_1)
await store.add_spend(1, solution_2, 100)
await store.rebuild_cache()
solution_3: CoinSpend = make_child_solution(solution_2)
await store.add_spend(1, solution_3, 100)
solution_4: CoinSpend = make_child_solution(solution_3)
with pytest.raises(ValueError):
await store.add_spend(1, solution_4, 99)
await store.rebuild_cache()
await store.add_spend(1, solution_4, 101)
await store.rebuild_cache()
await store.rollback(101, 1)
await store.rebuild_cache()
assert store.get_spends_for_wallet(1) == [
(100, solution_1),
(100, solution_2),
(100, solution_3),
(101, solution_4),
]
await store.rebuild_cache()
await store.rollback(100, 1)
await store.rebuild_cache()
assert store.get_spends_for_wallet(1) == [
(100, solution_1),
(100, solution_2),
(100, solution_3),
]
with pytest.raises(ValueError):
await store.add_spend(1, solution_1, 105)
await store.add_spend(1, solution_4, 105)
solution_5: CoinSpend = make_child_solution(solution_4)
await store.add_spend(1, solution_5, 105)
await store.rollback(99, 1)
assert store.get_spends_for_wallet(1) == []
finally:
await db_connection.close()
db_filename.unlink()
| 36.557252
| 93
| 0.640426
|
import asyncio
from pathlib import Path
from secrets import token_bytes
from typing import Optional
import aiosqlite
import pytest
from clvm_tools import binutils
from ceres.types.blockchain_format.coin import Coin
from ceres.types.blockchain_format.program import Program, SerializedProgram
from ceres.types.blockchain_format.sized_bytes import bytes32
from ceres.types.coin_spend import CoinSpend
from ceres.util.db_wrapper import DBWrapper
from ceres.util.ints import uint64
from ceres.wallet.wallet_pool_store import WalletPoolStore
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
def make_child_solution(coin_spend: CoinSpend, new_coin: Optional[Coin] = None) -> CoinSpend:
new_puzzle_hash: bytes32 = token_bytes(32)
solution = "()"
puzzle = f"(q . ((51 0x{new_puzzle_hash.hex()} 1)))"
puzzle_prog = Program.to(binutils.assemble(puzzle))
solution_prog = Program.to(binutils.assemble(solution))
if new_coin is None:
new_coin = coin_spend.additions()[0]
sol: CoinSpend = CoinSpend(
new_coin,
SerializedProgram.from_program(puzzle_prog),
SerializedProgram.from_program(solution_prog),
)
return sol
class TestWalletPoolStore:
@pytest.mark.asyncio
async def test_store(self):
db_filename = Path("wallet_store_test.db")
if db_filename.exists():
db_filename.unlink()
db_connection = await aiosqlite.connect(db_filename)
db_wrapper = DBWrapper(db_connection)
store = await WalletPoolStore.create(db_wrapper)
try:
await db_wrapper.begin_transaction()
coin_0 = Coin(token_bytes(32), token_bytes(32), uint64(12312))
coin_0_alt = Coin(token_bytes(32), token_bytes(32), uint64(12312))
solution_0: CoinSpend = make_child_solution(None, coin_0)
solution_0_alt: CoinSpend = make_child_solution(None, coin_0_alt)
solution_1: CoinSpend = make_child_solution(solution_0)
assert store.get_spends_for_wallet(0) == []
assert store.get_spends_for_wallet(1) == []
await store.add_spend(1, solution_1, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
await store.add_spend(1, solution_1, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
with pytest.raises(ValueError):
await store.add_spend(1, solution_1, 101)
await db_wrapper.rollback_transaction()
await store.rebuild_cache()
assert store.get_spends_for_wallet(1) == []
await store.rebuild_cache()
await store.add_spend(1, solution_1, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
solution_1_alt: CoinSpend = make_child_solution(solution_0_alt)
with pytest.raises(ValueError):
await store.add_spend(1, solution_1_alt, 100)
assert store.get_spends_for_wallet(1) == [(100, solution_1)]
solution_2: CoinSpend = make_child_solution(solution_1)
await store.add_spend(1, solution_2, 100)
await store.rebuild_cache()
solution_3: CoinSpend = make_child_solution(solution_2)
await store.add_spend(1, solution_3, 100)
solution_4: CoinSpend = make_child_solution(solution_3)
with pytest.raises(ValueError):
await store.add_spend(1, solution_4, 99)
await store.rebuild_cache()
await store.add_spend(1, solution_4, 101)
await store.rebuild_cache()
await store.rollback(101, 1)
await store.rebuild_cache()
assert store.get_spends_for_wallet(1) == [
(100, solution_1),
(100, solution_2),
(100, solution_3),
(101, solution_4),
]
await store.rebuild_cache()
await store.rollback(100, 1)
await store.rebuild_cache()
assert store.get_spends_for_wallet(1) == [
(100, solution_1),
(100, solution_2),
(100, solution_3),
]
with pytest.raises(ValueError):
await store.add_spend(1, solution_1, 105)
await store.add_spend(1, solution_4, 105)
solution_5: CoinSpend = make_child_solution(solution_4)
await store.add_spend(1, solution_5, 105)
await store.rollback(99, 1)
assert store.get_spends_for_wallet(1) == []
finally:
await db_connection.close()
db_filename.unlink()
| true
| true
|
7908048c54c17fc631ebe3c58b705e6febe60f67
| 5,593
|
py
|
Python
|
google-cloud-sdk/lib/googlecloudsdk/command_lib/crash_handling.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-sdk/lib/googlecloudsdk/command_lib/crash_handling.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-sdk/lib/googlecloudsdk/command_lib/crash_handling.py
|
bopopescu/searchparty
|
afdc2805cb1b77bd5ac9fdd1a76217f4841f0ea6
|
[
"Apache-2.0"
] | 3
|
2017-07-27T18:44:13.000Z
|
2020-07-25T17:48:53.000Z
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Error Reporting Handler."""
import sys
import traceback
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.error_reporting import util
from googlecloudsdk.api_lib.util import apis as core_apis
from googlecloudsdk.calliope import backend
from googlecloudsdk.command_lib import error_reporting_util
from googlecloudsdk.core import config
from googlecloudsdk.core import http
from googlecloudsdk.core import log
from googlecloudsdk.core import metrics
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_attr
def _IsInstallationCorruption(err):
"""Determines if the error may be from installation corruption.
Args:
err: Exception err.
Returns:
bool, True if installation error, False otherwise
"""
return (isinstance(err, backend.CommandLoadFailure) and
isinstance(err.root_exception, ImportError))
def _PrintInstallationAction(err, err_string):
"""Prompts installation error action.
Args:
err: Exception err.
err_string: Exception err string.
"""
# This usually indicates installation corruption.
# We do want to suggest `gcloud components reinstall` here (ex. as opposed
# to the similar message in gcloud.py), because there's a good chance it'll
# work (rather than a manual reinstall).
# Don't suggest `gcloud feedback`, because this is probably an
# installation problem.
log.error(
('gcloud failed to load ({0}): {1}\n\n'
'This usually indicates corruption in your gcloud installation or '
'problems with your Python interpreter.\n\n'
'Please verify that the following is the path to a working Python 2.7 '
'executable:\n'
' {2}\n'
'If it is not, please set the CLOUDSDK_PYTHON environment variable to '
'point to a working Python 2.7 executable.\n\n'
'If you are still experiencing problems, please run the following '
'command to reinstall:\n'
' $ gcloud components reinstall\n\n'
'If that command fails, please reinstall the Cloud SDK using the '
'instructions here:\n'
' https://cloud.google.com/sdk/'
).format(err.command, err_string, sys.executable))
CRASH_SERVICE = 'gcloud'
ERROR_SERVICE = 'gcloud-user-error'
CRASH_PROJECT = 'cloud-sdk-errors'
CRASH_API_KEY = 'AIzaSyA45D7bA0Y1vyLmQ_Gl10G149M8jiwwK-s'
def _GetReportingClient():
"""Returns a client that uses an API key for Cloud SDK crash reports.
Returns:
An error reporting client that uses an API key for Cloud SDK crash reports.
"""
client_class = core_apis.GetClientClass(util.API_NAME, util.API_VERSION)
client_instance = client_class(get_credentials=False, http=http.Http())
client_instance.AddGlobalParam('key', CRASH_API_KEY)
return client_instance
def ReportError(err, is_crash):
"""Report the anonymous crash information to the Error Reporting service.
Args:
err: Exception, the error that caused the crash.
is_crash: bool, True if this is a crash, False if it is a user error.
"""
if properties.VALUES.core.disable_usage_reporting.GetBool():
return
stacktrace = traceback.format_exc(err)
stacktrace = error_reporting_util.RemovePrivateInformationFromTraceback(
stacktrace)
command = properties.VALUES.metrics.command_name.Get()
cid = metrics.GetCIDIfMetricsEnabled()
client = _GetReportingClient()
reporter = util.ErrorReporting(client)
try:
method_config = client.projects_events.GetMethodConfig('Report')
request = reporter.GenerateReportRequest(
error_message=stacktrace,
service=CRASH_SERVICE if is_crash else ERROR_SERVICE,
version=config.CLOUD_SDK_VERSION, project=CRASH_PROJECT,
request_url=command, user=cid)
http_request = client.projects_events.PrepareHttpRequest(
method_config, request)
metrics.CustomBeacon(http_request.url, http_request.http_method,
http_request.body, http_request.headers)
except apitools_exceptions.Error as e:
log.file_only_logger.error(
'Unable to report crash stacktrace:\n{0}'.format(
console_attr.EncodeForConsole(e)))
def HandleGcloudCrash(err):
"""Checks if installation error occurred, then proceeds with Error Reporting.
Args:
err: Exception err.
"""
err_string = console_attr.EncodeForConsole(err)
log.file_only_logger.exception('BEGIN CRASH STACKTRACE')
if _IsInstallationCorruption(err):
_PrintInstallationAction(err, err_string)
else:
log.error(u'gcloud crashed ({0}): {1}'.format(
getattr(err, 'error_name', type(err).__name__), err_string))
ReportError(err, is_crash=True)
log.err.Print('\nIf you would like to report this issue, please run the '
'following command:')
log.err.Print(' gcloud feedback')
log.err.Print('\nTo check gcloud for common problems, please run the '
'following command:')
log.err.Print(' gcloud info --run-diagnostics')
| 37.039735
| 79
| 0.734311
|
import sys
import traceback
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.error_reporting import util
from googlecloudsdk.api_lib.util import apis as core_apis
from googlecloudsdk.calliope import backend
from googlecloudsdk.command_lib import error_reporting_util
from googlecloudsdk.core import config
from googlecloudsdk.core import http
from googlecloudsdk.core import log
from googlecloudsdk.core import metrics
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_attr
def _IsInstallationCorruption(err):
return (isinstance(err, backend.CommandLoadFailure) and
isinstance(err.root_exception, ImportError))
def _PrintInstallationAction(err, err_string):
# installation problem.
log.error(
('gcloud failed to load ({0}): {1}\n\n'
'This usually indicates corruption in your gcloud installation or '
'problems with your Python interpreter.\n\n'
'Please verify that the following is the path to a working Python 2.7 '
'executable:\n'
' {2}\n'
'If it is not, please set the CLOUDSDK_PYTHON environment variable to '
'point to a working Python 2.7 executable.\n\n'
'If you are still experiencing problems, please run the following '
'command to reinstall:\n'
' $ gcloud components reinstall\n\n'
'If that command fails, please reinstall the Cloud SDK using the '
'instructions here:\n'
' https://cloud.google.com/sdk/'
).format(err.command, err_string, sys.executable))
CRASH_SERVICE = 'gcloud'
ERROR_SERVICE = 'gcloud-user-error'
CRASH_PROJECT = 'cloud-sdk-errors'
CRASH_API_KEY = 'AIzaSyA45D7bA0Y1vyLmQ_Gl10G149M8jiwwK-s'
def _GetReportingClient():
client_class = core_apis.GetClientClass(util.API_NAME, util.API_VERSION)
client_instance = client_class(get_credentials=False, http=http.Http())
client_instance.AddGlobalParam('key', CRASH_API_KEY)
return client_instance
def ReportError(err, is_crash):
if properties.VALUES.core.disable_usage_reporting.GetBool():
return
stacktrace = traceback.format_exc(err)
stacktrace = error_reporting_util.RemovePrivateInformationFromTraceback(
stacktrace)
command = properties.VALUES.metrics.command_name.Get()
cid = metrics.GetCIDIfMetricsEnabled()
client = _GetReportingClient()
reporter = util.ErrorReporting(client)
try:
method_config = client.projects_events.GetMethodConfig('Report')
request = reporter.GenerateReportRequest(
error_message=stacktrace,
service=CRASH_SERVICE if is_crash else ERROR_SERVICE,
version=config.CLOUD_SDK_VERSION, project=CRASH_PROJECT,
request_url=command, user=cid)
http_request = client.projects_events.PrepareHttpRequest(
method_config, request)
metrics.CustomBeacon(http_request.url, http_request.http_method,
http_request.body, http_request.headers)
except apitools_exceptions.Error as e:
log.file_only_logger.error(
'Unable to report crash stacktrace:\n{0}'.format(
console_attr.EncodeForConsole(e)))
def HandleGcloudCrash(err):
err_string = console_attr.EncodeForConsole(err)
log.file_only_logger.exception('BEGIN CRASH STACKTRACE')
if _IsInstallationCorruption(err):
_PrintInstallationAction(err, err_string)
else:
log.error(u'gcloud crashed ({0}): {1}'.format(
getattr(err, 'error_name', type(err).__name__), err_string))
ReportError(err, is_crash=True)
log.err.Print('\nIf you would like to report this issue, please run the '
'following command:')
log.err.Print(' gcloud feedback')
log.err.Print('\nTo check gcloud for common problems, please run the '
'following command:')
log.err.Print(' gcloud info --run-diagnostics')
| true
| true
|
790804b88401ab86c218f1bb24640bb1070e042d
| 353,326
|
py
|
Python
|
src/sage/graphs/graph.py
|
cffbots/sage
|
226937dfa9b8b335e873c3c65a796ae1b0924ff2
|
[
"BSL-1.0"
] | null | null | null |
src/sage/graphs/graph.py
|
cffbots/sage
|
226937dfa9b8b335e873c3c65a796ae1b0924ff2
|
[
"BSL-1.0"
] | null | null | null |
src/sage/graphs/graph.py
|
cffbots/sage
|
226937dfa9b8b335e873c3c65a796ae1b0924ff2
|
[
"BSL-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
r"""
Undirected graphs
This module implements functions and operations involving undirected graphs.
{INDEX_OF_METHODS}
AUTHORS:
- Robert L. Miller (2006-10-22): initial version
- William Stein (2006-12-05): Editing
- Robert L. Miller (2007-01-13): refactoring, adjusting for NetworkX-0.33, fixed
plotting bugs (2007-01-23): basic tutorial, edge labels, loops, multiple
edges and arcs (2007-02-07): graph6 and sparse6 formats, matrix input
- Emily Kirkmann (2007-02-11): added graph_border option to plot and show
- Robert L. Miller (2007-02-12): vertex color-maps, graph boundaries, graph6
helper functions in Cython
- Robert L. Miller Sage Days 3 (2007-02-17-21): 3d plotting in Tachyon
- Robert L. Miller (2007-02-25): display a partition
- Robert L. Miller (2007-02-28): associate arbitrary objects to vertices, edge
and arc label display (in 2d), edge coloring
- Robert L. Miller (2007-03-21): Automorphism group, isomorphism check,
canonical label
- Robert L. Miller (2007-06-07-09): NetworkX function wrapping
- Michael W. Hansen (2007-06-09): Topological sort generation
- Emily Kirkman, Robert L. Miller Sage Days 4: Finished wrapping NetworkX
- Emily Kirkman (2007-07-21): Genus (including circular planar, all embeddings
and all planar embeddings), all paths, interior paths
- Bobby Moretti (2007-08-12): fixed up plotting of graphs with edge colors
differentiated by label
- Jason Grout (2007-09-25): Added functions, bug fixes, and general enhancements
- Robert L. Miller (Sage Days 7): Edge labeled graph isomorphism
- Tom Boothby (Sage Days 7): Miscellaneous awesomeness
- Tom Boothby (2008-01-09): Added graphviz output
- David Joyner (2009-2): Fixed docstring bug related to GAP.
- Stephen Hartke (2009-07-26): Fixed bug in blocks_and_cut_vertices() that
caused an incorrect result when the vertex 0 was a cut vertex.
- Stephen Hartke (2009-08-22): Fixed bug in blocks_and_cut_vertices() where the
list of cut_vertices is not treated as a set.
- Anders Jonsson (2009-10-10): Counting of spanning trees and out-trees added.
- Nathann Cohen (2009-09) : Cliquer, Connectivity, Flows and everything that
uses Linear Programming and class numerical.MIP
- Nicolas M. Thiery (2010-02): graph layout code refactoring, dot2tex/graphviz
interface
- David Coudert (2012-04) : Reduction rules in vertex_cover.
- Birk Eisermann (2012-06): added recognition of weakly chordal graphs and
long-hole-free / long-antihole-free graphs
- Alexandre P. Zuge (2013-07): added join operation.
- Amritanshu Prasad (2014-08): added clique polynomial
- Julian Rüth (2018-06-21): upgrade to NetworkX 2
- David Coudert (2018-10-07): cleaning
- Amanda Francis, Caitlin Lienkaemper, Kate Collins, Rajat Mittal (2019-03-10):
methods for computing effective resistance
- Amanda Francis, Caitlin Lienkaemper, Kate Collins, Rajat Mittal (2019-03-19):
most_common_neighbors and common_neighbors_matrix added.
- Jean-Florent Raymond (2019-04): is_redundant, is_dominating,
private_neighbors
Graph Format
------------
Supported formats
~~~~~~~~~~~~~~~~~
Sage Graphs can be created from a wide range of inputs. A few examples are
covered here.
- NetworkX dictionary format:
::
sage: d = {0: [1,4,5], 1: [2,6], 2: [3,7], 3: [4,8], 4: [9], \
5: [7, 8], 6: [8,9], 7: [9]}
sage: G = Graph(d); G
Graph on 10 vertices
sage: G.plot().show() # or G.show()
- A NetworkX graph:
::
sage: import networkx
sage: K = networkx.complete_bipartite_graph(12,7)
sage: G = Graph(K)
sage: G.degree()
[7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 12, 12, 12, 12, 12, 12, 12]
- graph6 or sparse6 format:
::
sage: s = ':I`AKGsaOs`cI]Gb~'
sage: G = Graph(s, sparse=True); G
Looped multi-graph on 10 vertices
sage: G.plot().show() # or G.show()
Note that the ``\`` character is an escape character in Python, and also a
character used by graph6 strings:
::
sage: G = Graph('Ihe\n@GUA')
Traceback (most recent call last):
...
RuntimeError: the string (Ihe) seems corrupt: for n = 10, the string is too short
In Python, the escaped character ``\`` is represented by ``\\``:
::
sage: G = Graph('Ihe\\n@GUA')
sage: G.plot().show() # or G.show()
- adjacency matrix: In an adjacency matrix, each column and each row represent a
vertex. If a 1 shows up in row `i`, column `j`, there is an edge `(i,j)`.
::
sage: M = Matrix([(0,1,0,0,1,1,0,0,0,0),(1,0,1,0,0,0,1,0,0,0), \
(0,1,0,1,0,0,0,1,0,0), (0,0,1,0,1,0,0,0,1,0),(1,0,0,1,0,0,0,0,0,1), \
(1,0,0,0,0,0,0,1,1,0), (0,1,0,0,0,0,0,0,1,1),(0,0,1,0,0,1,0,0,0,1), \
(0,0,0,1,0,1,1,0,0,0), (0,0,0,0,1,0,1,1,0,0)])
sage: M
[0 1 0 0 1 1 0 0 0 0]
[1 0 1 0 0 0 1 0 0 0]
[0 1 0 1 0 0 0 1 0 0]
[0 0 1 0 1 0 0 0 1 0]
[1 0 0 1 0 0 0 0 0 1]
[1 0 0 0 0 0 0 1 1 0]
[0 1 0 0 0 0 0 0 1 1]
[0 0 1 0 0 1 0 0 0 1]
[0 0 0 1 0 1 1 0 0 0]
[0 0 0 0 1 0 1 1 0 0]
sage: G = Graph(M); G
Graph on 10 vertices
sage: G.plot().show() # or G.show()
- incidence matrix: In an incidence matrix, each row represents a vertex and
each column represents an edge.
::
sage: M = Matrix([(-1, 0, 0, 0, 1, 0, 0, 0, 0, 0,-1, 0, 0, 0, 0),
....: ( 1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0,-1, 0, 0, 0),
....: ( 0, 1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0,-1, 0, 0),
....: ( 0, 0, 1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0,-1, 0),
....: ( 0, 0, 0, 1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0,-1),
....: ( 0, 0, 0, 0, 0,-1, 0, 0, 0, 1, 1, 0, 0, 0, 0),
....: ( 0, 0, 0, 0, 0, 0, 0, 1,-1, 0, 0, 1, 0, 0, 0),
....: ( 0, 0, 0, 0, 0, 1,-1, 0, 0, 0, 0, 0, 1, 0, 0),
....: ( 0, 0, 0, 0, 0, 0, 0, 0, 1,-1, 0, 0, 0, 1, 0),
....: ( 0, 0, 0, 0, 0, 0, 1,-1, 0, 0, 0, 0, 0, 0, 1)])
sage: M
[-1 0 0 0 1 0 0 0 0 0 -1 0 0 0 0]
[ 1 -1 0 0 0 0 0 0 0 0 0 -1 0 0 0]
[ 0 1 -1 0 0 0 0 0 0 0 0 0 -1 0 0]
[ 0 0 1 -1 0 0 0 0 0 0 0 0 0 -1 0]
[ 0 0 0 1 -1 0 0 0 0 0 0 0 0 0 -1]
[ 0 0 0 0 0 -1 0 0 0 1 1 0 0 0 0]
[ 0 0 0 0 0 0 0 1 -1 0 0 1 0 0 0]
[ 0 0 0 0 0 1 -1 0 0 0 0 0 1 0 0]
[ 0 0 0 0 0 0 0 0 1 -1 0 0 0 1 0]
[ 0 0 0 0 0 0 1 -1 0 0 0 0 0 0 1]
sage: G = Graph(M); G
Graph on 10 vertices
sage: G.plot().show() # or G.show()
sage: DiGraph(matrix(2,[0,0,-1,1]), format="incidence_matrix")
Traceback (most recent call last):
...
ValueError: there must be two nonzero entries (-1 & 1) per column
- a list of edges::
sage: g = Graph([(1,3),(3,8),(5,2)])
sage: g
Graph on 5 vertices
- an igraph Graph::
sage: import igraph # optional - python_igraph
sage: g = Graph(igraph.Graph([(1,3),(3,2),(0,2)])) # optional - python_igraph
sage: g # optional - python_igraph
Graph on 4 vertices
Generators
----------
Use ``graphs(n)`` to iterate through all non-isomorphic graphs of given size::
sage: for g in graphs(4):
....: print(g.degree_sequence())
[0, 0, 0, 0]
[1, 1, 0, 0]
[2, 1, 1, 0]
[3, 1, 1, 1]
[1, 1, 1, 1]
[2, 2, 1, 1]
[2, 2, 2, 0]
[3, 2, 2, 1]
[2, 2, 2, 2]
[3, 3, 2, 2]
[3, 3, 3, 3]
Similarly ``graphs()`` will iterate through all graphs. The complete graph of 4
vertices is of course the smallest graph with chromatic number bigger than
three::
sage: for g in graphs():
....: if g.chromatic_number() > 3:
....: break
sage: g.is_isomorphic(graphs.CompleteGraph(4))
True
For some commonly used graphs to play with, type::
sage: graphs.[tab] # not tested
and hit {tab}. Most of these graphs come with their own custom plot, so you can
see how people usually visualize these graphs.
::
sage: G = graphs.PetersenGraph()
sage: G.plot().show() # or G.show()
sage: G.degree_histogram()
[0, 0, 0, 10]
sage: G.adjacency_matrix()
[0 1 0 0 1 1 0 0 0 0]
[1 0 1 0 0 0 1 0 0 0]
[0 1 0 1 0 0 0 1 0 0]
[0 0 1 0 1 0 0 0 1 0]
[1 0 0 1 0 0 0 0 0 1]
[1 0 0 0 0 0 0 1 1 0]
[0 1 0 0 0 0 0 0 1 1]
[0 0 1 0 0 1 0 0 0 1]
[0 0 0 1 0 1 1 0 0 0]
[0 0 0 0 1 0 1 1 0 0]
::
sage: S = G.subgraph([0,1,2,3])
sage: S.plot().show() # or S.show()
sage: S.density()
1/2
::
sage: G = GraphQuery(display_cols=['graph6'], num_vertices=7, diameter=5)
sage: L = G.get_graphs_list()
sage: graphs_list.show_graphs(L)
.. _Graph:labels:
Labels
------
Each vertex can have any hashable object as a label. These are things like
strings, numbers, and tuples. Each edge is given a default label of ``None``,
but if specified, edges can have any label at all. Edges between vertices `u`
and `v` are represented typically as ``(u, v, l)``, where ``l`` is the label for
the edge.
Note that vertex labels themselves cannot be mutable items::
sage: M = Matrix( [[0,0],[0,0]] )
sage: G = Graph({ 0 : { M : None } })
Traceback (most recent call last):
...
TypeError: mutable matrices are unhashable
However, if one wants to define a dictionary, with the same keys and arbitrary
objects for entries, one can make that association::
sage: d = {0 : graphs.DodecahedralGraph(), 1 : graphs.FlowerSnark(), \
2 : graphs.MoebiusKantorGraph(), 3 : graphs.PetersenGraph() }
sage: d[2]
Moebius-Kantor Graph: Graph on 16 vertices
sage: T = graphs.TetrahedralGraph()
sage: T.vertices()
[0, 1, 2, 3]
sage: T.set_vertices(d)
sage: T.get_vertex(1)
Flower Snark: Graph on 20 vertices
Database
--------
There is a database available for searching for graphs that satisfy a certain
set of parameters, including number of vertices and edges, density, maximum and
minimum degree, diameter, radius, and connectivity. To see a list of all search
parameter keywords broken down by their designated table names, type ::
sage: graph_db_info()
{...}
For more details on data types or keyword input, enter ::
sage: GraphQuery? # not tested
The results of a query can be viewed with the show method, or can be viewed
individually by iterating through the results ::
sage: Q = GraphQuery(display_cols=['graph6'],num_vertices=7, diameter=5)
sage: Q.show()
Graph6
--------------------
F?`po
F?gqg
F@?]O
F@OKg
F@R@o
FA_pW
FEOhW
FGC{o
FIAHo
Show each graph as you iterate through the results::
sage: for g in Q:
....: show(g)
Visualization
-------------
To see a graph `G` you are working with, there are three main options. You can
view the graph in two dimensions via matplotlib with ``show()``. ::
sage: G = graphs.RandomGNP(15,.3)
sage: G.show()
And you can view it in three dimensions via jmol with ``show3d()``. ::
sage: G.show3d()
Or it can be rendered with `\LaTeX`. This requires the right additions to a
standard `\mbox{\rm\TeX}` installation. Then standard Sage commands, such as
``view(G)`` will display the graph, or ``latex(G)`` will produce a string
suitable for inclusion in a `\LaTeX` document. More details on this are at the
:mod:`sage.graphs.graph_latex` module. ::
sage: from sage.graphs.graph_latex import check_tkz_graph
sage: check_tkz_graph() # random - depends on TeX installation
sage: latex(G)
\begin{tikzpicture}
...
\end{tikzpicture}
Mutability
----------
Graphs are mutable, and thus unusable as dictionary keys, unless
``data_structure="static_sparse"`` is used::
sage: G = graphs.PetersenGraph()
sage: {G:1}[G]
Traceback (most recent call last):
...
TypeError: This graph is mutable, and thus not hashable. Create an immutable copy by `g.copy(immutable=True)`
sage: G_immutable = Graph(G, immutable=True)
sage: G_immutable == G
True
sage: {G_immutable:1}[G_immutable]
1
Methods
-------
"""
# ****************************************************************************
# Copyright (C) 2006-2007 Robert L. Miller <rlmillster@gmail.com>
# 2018 Julian Rüth <julian.rueth@fsfe.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# https://www.gnu.org/licenses/
# ****************************************************************************
import itertools
from copy import copy
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.rings.integer import Integer
from sage.rings.integer_ring import ZZ
import sage.graphs.generic_graph_pyx as generic_graph_pyx
from sage.graphs.generic_graph import GenericGraph
from sage.graphs.digraph import DiGraph
from sage.graphs.independent_sets import IndependentSets
from sage.misc.rest_index_of_methods import doc_index, gen_thematic_rest_table_index
from sage.graphs.views import EdgesView
from sage.misc.lazy_import import lazy_import
from sage.features import PythonModule
lazy_import('sage.graphs.mcqd', ['mcqd'],
feature=PythonModule('sage.graphs.mcqd', spkg='mcqd'))
from sage.misc.decorators import rename_keyword
class Graph(GenericGraph):
r"""
Undirected graph.
A graph is a set of vertices connected by edges. See the
:wikipedia:`Graph_(mathematics)` for more information. For a collection of
pre-defined graphs, see the :mod:`~sage.graphs.graph_generators` module.
A :class:`Graph` object has many methods whose list can be obtained by
typing ``g.<tab>`` (i.e. hit the 'tab' key) or by reading the documentation
of :mod:`~sage.graphs.graph`, :mod:`~sage.graphs.generic_graph`, and
:mod:`~sage.graphs.digraph`.
INPUT:
By default, a :class:`Graph` object is simple (i.e. no *loops* nor *multiple
edges*) and unweighted. This can be easily tuned with the appropriate flags
(see below).
- ``data`` -- can be any of the following (see the ``format`` argument):
#. ``Graph()`` -- build a graph on 0 vertices.
#. ``Graph(5)`` -- return an edgeless graph on the 5 vertices 0,...,4.
#. ``Graph([list_of_vertices, list_of_edges])`` -- returns a graph with
given vertices/edges.
To bypass auto-detection, prefer the more explicit
``Graph([V, E], format='vertices_and_edges')``.
#. ``Graph(list_of_edges)`` -- return a graph with a given list of edges
(see documentation of
:meth:`~sage.graphs.generic_graph.GenericGraph.add_edges`).
To bypass auto-detection, prefer the more explicit
``Graph(L, format='list_of_edges')``.
#. ``Graph({1: [2, 3, 4], 3: [4]})`` -- return a graph by associating to
each vertex the list of its neighbors.
To bypass auto-detection, prefer the more explicit
``Graph(D, format='dict_of_lists')``.
#. ``Graph({1: {2: 'a', 3:'b'} ,3:{2:'c'}})`` -- return a graph by
associating a list of neighbors to each vertex and providing its edge
label.
To bypass auto-detection, prefer the more explicit
``Graph(D, format='dict_of_dicts')``.
For graphs with multiple edges, you can provide a list of labels
instead, e.g.: ``Graph({1: {2: ['a1', 'a2'], 3:['b']} ,3:{2:['c']}})``.
#. ``Graph(a_symmetric_matrix)`` -- return a graph with given (weighted)
adjacency matrix (see documentation of
:meth:`~sage.graphs.generic_graph.GenericGraph.adjacency_matrix`).
To bypass auto-detection, prefer the more explicit ``Graph(M,
format='adjacency_matrix')``. To take weights into account, use
``format='weighted_adjacency_matrix'`` instead.
#. ``Graph(a_nonsymmetric_matrix)`` -- return a graph with given incidence
matrix (see documentation of
:meth:`~sage.graphs.generic_graph.GenericGraph.incidence_matrix`).
To bypass auto-detection, prefer the more explicit
``Graph(M, format='incidence_matrix')``.
#. ``Graph([V, f])`` -- return a graph from a vertex set ``V`` and a
*symmetric* function ``f``. The graph contains an edge `u,v` whenever
``f(u,v)`` is ``True``.. Example: ``Graph([ [1..10], lambda x,y:
abs(x-y).is_square()])``
#. ``Graph(':I`ES@obGkqegW~')`` -- return a graph from a graph6 or sparse6
string (see documentation of :meth:`graph6_string` or
:meth:`sparse6_string`).
#. ``Graph(a_seidel_matrix, format='seidel_adjacency_matrix')`` -- return
a graph with a given Seidel adjacency matrix (see documentation of
:meth:`seidel_adjacency_matrix`).
#. ``Graph(another_graph)`` -- return a graph from a Sage (di)graph,
`pygraphviz <https://pygraphviz.github.io/>`__ graph, `NetworkX
<https://networkx.github.io/>`__ graph, or `igraph
<http://igraph.org/python/>`__ graph.
- ``pos`` -- a positioning dictionary (cf. documentation of
:meth:`~sage.graphs.generic_graph.GenericGraph.layout`). For example, to
draw 4 vertices on a square::
{0: [-1,-1],
1: [ 1,-1],
2: [ 1, 1],
3: [-1, 1]}
- ``name`` -- (must be an explicitly named parameter, i.e.,
``name="complete")`` gives the graph a name
- ``loops`` -- boolean (default: ``None``); whether to allow loops (ignored
if data is an instance of the ``Graph`` class)
- ``multiedges`` -- boolean (default: ``None``); whether to allow multiple
edges (ignored if data is an instance of the ``Graph`` class).
- ``weighted`` -- boolean (default: ``None``); whether graph thinks of
itself as weighted or not. See
:meth:`~sage.graphs.generic_graph.GenericGraph.weighted`.
- ``format`` -- if set to ``None`` (default), :class:`Graph` tries to guess
input's format. To avoid this possibly time-consuming step, one of the
following values can be specified (see description above): ``"int"``,
``"graph6"``, ``"sparse6"``, ``"rule"``, ``"list_of_edges"``,
``"dict_of_lists"``, ``"dict_of_dicts"``, ``"adjacency_matrix"``,
``"weighted_adjacency_matrix"``, ``"seidel_adjacency_matrix"``,
``"incidence_matrix"``, ``"NX"``, ``"igraph"``.
- ``sparse`` -- boolean (default: ``True``); ``sparse=True`` is an alias for
``data_structure="sparse"``, and ``sparse=False`` is an alias for
``data_structure="dense"``.
- ``data_structure`` -- one of the following (for more information, see
:mod:`~sage.graphs.base.overview`)
* ``"dense"`` -- selects the :mod:`~sage.graphs.base.dense_graph`
backend.
* ``"sparse"`` -- selects the :mod:`~sage.graphs.base.sparse_graph`
backend.
* ``"static_sparse"`` -- selects the
:mod:`~sage.graphs.base.static_sparse_backend` (this backend is faster
than the sparse backend and smaller in memory, and it is immutable, so
that the resulting graphs can be used as dictionary keys).
- ``immutable`` -- boolean (default: ``False``); whether to create a
immutable graph. Note that ``immutable=True`` is actually a shortcut for
``data_structure='static_sparse'``. Set to ``False`` by default.
- ``vertex_labels`` -- boolean (default: ``True``); whether to allow any
object as a vertex (slower), or only the integers `0,...,n-1`, where `n`
is the number of vertices.
- ``convert_empty_dict_labels_to_None`` -- this arguments sets the default
edge labels used by NetworkX (empty dictionaries) to be replaced by
``None``, the default Sage edge label. It is set to ``True`` iff a
NetworkX graph is on the input.
EXAMPLES:
We illustrate the first seven input formats (the other two involve packages
that are currently not standard in Sage):
#. An integer giving the number of vertices::
sage: g = Graph(5); g
Graph on 5 vertices
sage: g.vertices()
[0, 1, 2, 3, 4]
sage: g.edges()
[]
#. A dictionary of dictionaries::
sage: g = Graph({0:{1:'x',2:'z',3:'a'}, 2:{5:'out'}}); g
Graph on 5 vertices
The labels ('x', 'z', 'a', 'out') are labels for edges. For example,
'out' is the label for the edge on 2 and 5. Labels can be used as
weights, if all the labels share some common parent.::
sage: a, b, c, d, e, f = sorted(SymmetricGroup(3)) # optional - sage.groups
sage: Graph({b: {d: 'c', e: 'p'}, c: {d: 'p', e: 'c'}}) # optional - sage.groups
Graph on 4 vertices
#. A dictionary of lists::
sage: g = Graph({0:[1,2,3], 2:[4]}); g
Graph on 5 vertices
#. A list of vertices and a function describing adjacencies. Note that the
list of vertices and the function must be enclosed in a list (i.e., [list
of vertices, function]).
Construct the Paley graph over GF(13).::
sage: g=Graph([GF(13), lambda i,j: i!=j and (i-j).is_square()])
sage: g.vertices()
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
sage: g.adjacency_matrix()
[0 1 0 1 1 0 0 0 0 1 1 0 1]
[1 0 1 0 1 1 0 0 0 0 1 1 0]
[0 1 0 1 0 1 1 0 0 0 0 1 1]
[1 0 1 0 1 0 1 1 0 0 0 0 1]
[1 1 0 1 0 1 0 1 1 0 0 0 0]
[0 1 1 0 1 0 1 0 1 1 0 0 0]
[0 0 1 1 0 1 0 1 0 1 1 0 0]
[0 0 0 1 1 0 1 0 1 0 1 1 0]
[0 0 0 0 1 1 0 1 0 1 0 1 1]
[1 0 0 0 0 1 1 0 1 0 1 0 1]
[1 1 0 0 0 0 1 1 0 1 0 1 0]
[0 1 1 0 0 0 0 1 1 0 1 0 1]
[1 0 1 1 0 0 0 0 1 1 0 1 0]
Construct the line graph of a complete graph.::
sage: g=graphs.CompleteGraph(4)
sage: line_graph=Graph([g.edges(labels=false), \
lambda i,j: len(set(i).intersection(set(j)))>0], \
loops=False)
sage: line_graph.vertices()
[(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]
sage: line_graph.adjacency_matrix()
[0 1 1 1 1 0]
[1 0 1 1 0 1]
[1 1 0 0 1 1]
[1 1 0 0 1 1]
[1 0 1 1 0 1]
[0 1 1 1 1 0]
#. A graph6 or sparse6 string: Sage automatically recognizes whether a
string is in graph6 or sparse6 format::
sage: s = ':I`AKGsaOs`cI]Gb~'
sage: Graph(s,sparse=True)
Looped multi-graph on 10 vertices
::
sage: G = Graph('G?????')
sage: G = Graph("G'?G?C")
Traceback (most recent call last):
...
RuntimeError: the string seems corrupt: valid characters are
?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~
sage: G = Graph('G??????')
Traceback (most recent call last):
...
RuntimeError: the string (G??????) seems corrupt: for n = 8, the string is too long
::
sage: G = Graph(":I'AKGsaOs`cI]Gb~")
Traceback (most recent call last):
...
RuntimeError: the string seems corrupt: valid characters are
?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_`abcdefghijklmnopqrstuvwxyz{|}~
There are also list functions to take care of lists of graphs::
sage: s = ':IgMoqoCUOqeb\n:I`AKGsaOs`cI]Gb~\n:I`EDOAEQ?PccSsge\\N\n'
sage: graphs_list.from_sparse6(s)
[Looped multi-graph on 10 vertices, Looped multi-graph on 10 vertices, Looped multi-graph on 10 vertices]
#. A Sage matrix:
Note: If format is not specified, then Sage assumes a symmetric square
matrix is an adjacency matrix, otherwise an incidence matrix.
- an adjacency matrix::
sage: M = graphs.PetersenGraph().am(); M
[0 1 0 0 1 1 0 0 0 0]
[1 0 1 0 0 0 1 0 0 0]
[0 1 0 1 0 0 0 1 0 0]
[0 0 1 0 1 0 0 0 1 0]
[1 0 0 1 0 0 0 0 0 1]
[1 0 0 0 0 0 0 1 1 0]
[0 1 0 0 0 0 0 0 1 1]
[0 0 1 0 0 1 0 0 0 1]
[0 0 0 1 0 1 1 0 0 0]
[0 0 0 0 1 0 1 1 0 0]
sage: Graph(M)
Graph on 10 vertices
::
sage: Graph(matrix([[1,2],[2,4]]),loops=True,sparse=True)
Looped multi-graph on 2 vertices
sage: M = Matrix([[0,1,-1],[1,0,-1/2],[-1,-1/2,0]]); M
[ 0 1 -1]
[ 1 0 -1/2]
[ -1 -1/2 0]
sage: G = Graph(M,sparse=True); G
Graph on 3 vertices
sage: G.weighted()
True
- an incidence matrix::
sage: M = Matrix(6, [-1,0,0,0,1, 1,-1,0,0,0, 0,1,-1,0,0, 0,0,1,-1,0, 0,0,0,1,-1, 0,0,0,0,0]); M
[-1 0 0 0 1]
[ 1 -1 0 0 0]
[ 0 1 -1 0 0]
[ 0 0 1 -1 0]
[ 0 0 0 1 -1]
[ 0 0 0 0 0]
sage: Graph(M)
Graph on 6 vertices
sage: Graph(Matrix([[1],[1],[1]]))
Traceback (most recent call last):
...
ValueError: there must be one or two nonzero entries per column in an incidence matrix, got entries [1, 1, 1] in column 0
sage: Graph(Matrix([[1],[1],[0]]))
Graph on 3 vertices
sage: M = Matrix([[0,1,-1],[1,0,-1],[-1,-1,0]]); M
[ 0 1 -1]
[ 1 0 -1]
[-1 -1 0]
sage: Graph(M,sparse=True)
Graph on 3 vertices
sage: M = Matrix([[0,1,1],[1,0,1],[-1,-1,0]]); M
[ 0 1 1]
[ 1 0 1]
[-1 -1 0]
sage: Graph(M)
Traceback (most recent call last):
...
ValueError: there must be one or two nonzero entries per column in an incidence matrix, got entries [1, 1] in column 2
Check that :trac:`9714` is fixed::
sage: MA = Matrix([[1,2,0], [0,2,0], [0,0,1]])
sage: GA = Graph(MA, format='adjacency_matrix')
sage: MI = GA.incidence_matrix(oriented=False)
sage: MI
[2 1 1 0 0 0]
[0 1 1 2 2 0]
[0 0 0 0 0 2]
sage: Graph(MI).edges(labels=None)
[(0, 0), (0, 1), (0, 1), (1, 1), (1, 1), (2, 2)]
sage: M = Matrix([[1], [-1]]); M
[ 1]
[-1]
sage: Graph(M).edges()
[(0, 1, None)]
#. A Seidel adjacency matrix::
sage: from sage.combinat.matrices.hadamard_matrix import \
....: regular_symmetric_hadamard_matrix_with_constant_diagonal as rshcd
sage: m=rshcd(16,1)- matrix.identity(16)
sage: Graph(m,format="seidel_adjacency_matrix").is_strongly_regular(parameters=True)
(16, 6, 2, 2)
#. List of edges, or labelled edges::
sage: g = Graph([(1,3),(3,8),(5,2)])
sage: g
Graph on 5 vertices
sage: g = Graph([(1,2,"Peace"),(7,-9,"and"),(77,2, "Love")])
sage: g
Graph on 5 vertices
sage: g = Graph([(0, 2, '0'), (0, 2, '1'), (3, 3, '2')], loops=True, multiedges=True)
sage: g.loops()
[(3, 3, '2')]
#. A NetworkX MultiGraph::
sage: import networkx
sage: g = networkx.MultiGraph({0:[1,2,3], 2:[4]})
sage: Graph(g)
Multi-graph on 5 vertices
#. A NetworkX graph::
sage: import networkx
sage: g = networkx.Graph({0:[1,2,3], 2:[4]})
sage: DiGraph(g)
Digraph on 5 vertices
#. An igraph Graph (see also
:meth:`~sage.graphs.generic_graph.GenericGraph.igraph_graph`)::
sage: import igraph # optional - python_igraph
sage: g = igraph.Graph([(0, 1), (0, 2)]) # optional - python_igraph
sage: Graph(g) # optional - python_igraph
Graph on 3 vertices
If ``vertex_labels`` is ``True``, the names of the vertices are given by
the vertex attribute ``'name'``, if available::
sage: g = igraph.Graph([(0,1),(0,2)], vertex_attrs={'name':['a','b','c']}) # optional - python_igraph
sage: Graph(g).vertices() # optional - python_igraph
['a', 'b', 'c']
sage: g = igraph.Graph([(0,1),(0,2)], vertex_attrs={'label':['a','b','c']}) # optional - python_igraph
sage: Graph(g).vertices() # optional - python_igraph
[0, 1, 2]
If the igraph Graph has edge attributes, they are used as edge labels::
sage: g = igraph.Graph([(0,1),(0,2)], edge_attrs={'name':['a','b'], 'weight':[1,3]}) # optional - python_igraph
sage: Graph(g).edges() # optional - python_igraph
[(0, 1, {'name': 'a', 'weight': 1}), (0, 2, {'name': 'b', 'weight': 3})]
When defining an undirected graph from a function ``f``, it is *very*
important that ``f`` be symmetric. If it is not, anything can happen::
sage: f_sym = lambda x,y: abs(x-y) == 1
sage: f_nonsym = lambda x,y: (x-y) == 1
sage: G_sym = Graph([[4,6,1,5,3,7,2,0], f_sym])
sage: G_sym.is_isomorphic(graphs.PathGraph(8))
True
sage: G_nonsym = Graph([[4,6,1,5,3,7,2,0], f_nonsym])
sage: G_nonsym.size()
4
sage: G_nonsym.is_isomorphic(G_sym)
False
By default, graphs are mutable and can thus not be used as a dictionary
key::
sage: G = graphs.PetersenGraph()
sage: {G:1}[G]
Traceback (most recent call last):
...
TypeError: This graph is mutable, and thus not hashable. Create an immutable copy by `g.copy(immutable=True)`
When providing the optional arguments ``data_structure="static_sparse"`` or
``immutable=True`` (both mean the same), then an immutable graph results. ::
sage: G_imm = Graph(G, immutable=True)
sage: H_imm = Graph(G, data_structure='static_sparse')
sage: G_imm == H_imm == G
True
sage: {G_imm:1}[H_imm]
1
TESTS::
sage: Graph(4, format="HeyHeyHey")
Traceback (most recent call last):
...
ValueError: Unknown input format 'HeyHeyHey'
sage: Graph(igraph.Graph(directed=True)) # optional - python_igraph
Traceback (most recent call last):
...
ValueError: An *undirected* igraph graph was expected. To build an directed graph, call the DiGraph constructor.
sage: m = matrix([[0, -1], [-1, 0]])
sage: Graph(m, format="seidel_adjacency_matrix")
Graph on 2 vertices
sage: m[0,1] = 1
sage: Graph(m, format="seidel_adjacency_matrix")
Traceback (most recent call last):
...
ValueError: the adjacency matrix of a Seidel graph must be symmetric
sage: m[0,1] = -1; m[1,1] = 1
sage: Graph(m, format="seidel_adjacency_matrix")
Traceback (most recent call last):
...
ValueError: the adjacency matrix of a Seidel graph must have 0s on the main diagonal
From a list of vertices and a list of edges::
sage: G = Graph([[1,2,3], [(1,2)]]); G
Graph on 3 vertices
sage: G.edges()
[(1, 2, None)]
Check that :trac:`27505` is fixed::
sage: Graph(Graph().networkx_graph(), weighted=None, format='NX')
Graph on 0 vertices
"""
_directed = False
def __init__(self, data=None, pos=None, loops=None, format=None,
weighted=None, data_structure="sparse",
vertex_labels=True, name=None,
multiedges=None, convert_empty_dict_labels_to_None=None,
sparse=True, immutable=False):
"""
TESTS::
sage: G = Graph()
sage: loads(dumps(G)) == G
True
sage: a = matrix(2,2,[1,0,0,1])
sage: Graph(a).adjacency_matrix() == a
True
sage: a = matrix(2,2,[2,0,0,1])
sage: Graph(a,sparse=True).adjacency_matrix() == a
True
The positions are copied when the graph is built from another graph ::
sage: g = graphs.PetersenGraph()
sage: h = Graph(g)
sage: g.get_pos() == h.get_pos()
True
The position dictionary is not the input one (:trac:`22424`)::
sage: my_pos = {0:(0,0), 1:(1,1)}
sage: G = Graph([[0,1], [(0,1)]], pos=my_pos)
sage: my_pos == G._pos
True
sage: my_pos is G._pos
False
Or from a DiGraph ::
sage: d = DiGraph(g)
sage: h = Graph(d)
sage: g.get_pos() == h.get_pos()
True
Loops are not counted as multiedges (see :trac:`11693`) and edges are
not counted twice ::
sage: Graph({1:[1]}).num_edges()
1
sage: Graph({1:[2,2]}).num_edges()
2
An empty list or dictionary defines a simple graph
(:trac:`10441` and :trac:`12910`)::
sage: Graph([])
Graph on 0 vertices
sage: Graph({})
Graph on 0 vertices
sage: # not "Multi-graph on 0 vertices"
Verify that the int format works as expected (:trac:`12557`)::
sage: Graph(2).adjacency_matrix()
[0 0]
[0 0]
sage: Graph(3) == Graph(3,format='int')
True
Problem with weighted adjacency matrix (:trac:`13919`)::
sage: B = {0:{1:2,2:5,3:4},1:{2:2,4:7},2:{3:1,4:4,5:3},3:{5:4},4:{5:1,6:5},5:{6:7}}
sage: grafo3 = Graph(B,weighted=True)
sage: matad = grafo3.weighted_adjacency_matrix()
sage: grafo4 = Graph(matad,format = "adjacency_matrix", weighted=True)
sage: grafo4.shortest_path(0,6,by_weight=True)
[0, 1, 2, 5, 4, 6]
Graphs returned when setting ``immutable=False`` are mutable::
sage: g = graphs.PetersenGraph()
sage: g = Graph(g.edges(),immutable=False)
sage: g.add_edge("Hey", "Heyyyyyyy")
And their name is set::
sage: g = graphs.PetersenGraph()
sage: Graph(g, immutable=True)
Petersen graph: Graph on 10 vertices
Check error messages for graphs built from incidence matrices (see
:trac:`18440`)::
sage: Graph(matrix([[-1, 1, 0],[1, 0, 0]]))
Traceback (most recent call last):
...
ValueError: column 1 of the (oriented) incidence matrix contains only one nonzero value
sage: Graph(matrix([[1,1],[1,1],[1,0]]))
Traceback (most recent call last):
...
ValueError: there must be one or two nonzero entries per column in an incidence matrix, got entries [1, 1, 1] in column 0
sage: Graph(matrix([[3,1,1],[0,1,1]]))
Traceback (most recent call last):
...
ValueError: each column of a non-oriented incidence matrix must sum to 2, but column 0 does not
Vertex labels are retained in the graph (:trac:`14708`)::
sage: g = Graph()
sage: g.add_vertex(0)
sage: g.set_vertex(0, 'foo')
sage: g.get_vertices()
{0: 'foo'}
sage: Graph(g).get_vertices()
{0: 'foo'}
"""
GenericGraph.__init__(self)
from sage.structure.element import is_Matrix
if sparse is False:
if data_structure != "sparse":
raise ValueError("The 'sparse' argument is an alias for "
"'data_structure'. Please do not define both.")
data_structure = "dense"
if multiedges or weighted:
if data_structure == "dense":
raise RuntimeError("Multiedge and weighted c_graphs must be sparse.")
if immutable:
data_structure = 'static_sparse'
# If the data structure is static_sparse, we first build a graph
# using the sparse data structure, then re-encode the resulting graph
# as a static sparse graph.
from sage.graphs.base.sparse_graph import SparseGraphBackend
from sage.graphs.base.dense_graph import DenseGraphBackend
if data_structure in ["sparse", "static_sparse"]:
CGB = SparseGraphBackend
elif data_structure == "dense":
CGB = DenseGraphBackend
else:
raise ValueError("data_structure must be equal to 'sparse', "
"'static_sparse' or 'dense'")
self._backend = CGB(0, directed=False)
if format is None and isinstance(data, str):
if data.startswith(">>graph6<<"):
data = data[10:]
format = 'graph6'
elif data.startswith(">>sparse6<<"):
data = data[11:]
format = 'sparse6'
elif data[0] == ':':
format = 'sparse6'
else:
format = 'graph6'
if format is None and is_Matrix(data):
if data.is_symmetric():
format = 'adjacency_matrix'
else:
format = 'incidence_matrix'
if format is None and isinstance(data, Graph):
format = 'Graph'
from sage.graphs.all import DiGraph
if format is None and isinstance(data, DiGraph):
data = data.to_undirected()
format = 'Graph'
if (format is None and
isinstance(data, list) and
len(data) >= 2 and
callable(data[1])):
format = 'rule'
if (format is None and
isinstance(data, list) and
len(data) == 2 and
isinstance(data[0], list) and # a list of two lists, the second of
((isinstance(data[1], list) and # which contains iterables (the edges)
(not data[1] or callable(getattr(data[1][0], "__iter__", None)))) or
(isinstance(data[1], EdgesView)))):
format = "vertices_and_edges"
if format is None and isinstance(data, dict):
if not data:
format = 'dict_of_dicts'
else:
val = next(iter(data.values()))
if isinstance(val, (list, EdgesView)):
format = 'dict_of_lists'
elif isinstance(val, dict):
format = 'dict_of_dicts'
if format is None and hasattr(data, 'adj'):
# the input is a networkx (Multi)(Di)Graph
format = 'NX'
if (format is None and
hasattr(data, 'vcount') and
hasattr(data, 'get_edgelist')):
try:
import igraph
except ImportError:
raise ImportError("The data seems to be a igraph object, but "+
"igraph is not installed in Sage. To install "+
"it, run 'sage -i python_igraph'")
if format is None and isinstance(data, igraph.Graph):
format = 'igraph'
if format is None and isinstance(data, (int, Integer)):
format = 'int'
if format is None and data is None:
format = 'int'
data = 0
# Input is a list of edges or an EdgesView
if format is None and isinstance(data, (list, EdgesView)):
format = "list_of_edges"
if weighted is None:
weighted = False
if format is None:
raise ValueError("This input cannot be turned into a graph")
if format == 'weighted_adjacency_matrix':
if weighted is False:
raise ValueError("Format was weighted_adjacency_matrix but weighted was False.")
if weighted is None:
weighted = True
if multiedges is None:
multiedges = False
format = 'adjacency_matrix'
# At this point, 'format' has been set. We build the graph
if format == 'graph6':
if weighted is None:
weighted = False
self.allow_loops(loops if loops else False, check=False)
self.allow_multiple_edges(multiedges if multiedges else False, check=False)
from .graph_input import from_graph6
from_graph6(self, data)
elif format == 'sparse6':
if weighted is None:
weighted = False
self.allow_loops(False if loops is False else True, check=False)
self.allow_multiple_edges(False if multiedges is False else True, check=False)
from .graph_input import from_sparse6
from_sparse6(self, data)
elif format == 'adjacency_matrix':
from .graph_input import from_adjacency_matrix
from_adjacency_matrix(self, data, loops=loops, multiedges=multiedges, weighted=weighted)
elif format == 'incidence_matrix':
from .graph_input import from_incidence_matrix
from_incidence_matrix(self, data, loops=loops, multiedges=multiedges, weighted=weighted)
elif format == 'seidel_adjacency_matrix':
weighted = False
self.allow_loops(False)
self.allow_multiple_edges(False)
from .graph_input import from_seidel_adjacency_matrix
from_seidel_adjacency_matrix(self, data)
elif format == 'Graph':
if loops is None:
loops = data.allows_loops()
if multiedges is None:
multiedges = data.allows_multiple_edges()
if weighted is None:
weighted = data.weighted()
self.allow_loops(loops, check=False)
self.allow_multiple_edges(multiedges, check=False)
if data.get_pos() is not None:
pos = data.get_pos()
self.name(data.name())
self.set_vertices(data.get_vertices())
data._backend.subgraph_given_vertices(self._backend, data)
elif format == 'NX':
from sage.graphs.graph_input import from_networkx_graph
from_networkx_graph(self, data,
weighted=weighted, multiedges=multiedges, loops=loops,
convert_empty_dict_labels_to_None=convert_empty_dict_labels_to_None)
if weighted is None:
weighted = self.allows_multiple_edges()
elif format == 'igraph':
if data.is_directed():
raise ValueError("An *undirected* igraph graph was expected. "+
"To build an directed graph, call the DiGraph "+
"constructor.")
self.add_vertices(range(data.vcount()))
self.add_edges((e.source, e.target, e.attributes()) for e in data.es())
if vertex_labels and 'name' in data.vertex_attributes():
vs = data.vs()
self.relabel({v:vs[v]['name'] for v in self})
elif format == 'rule':
f = data[1]
verts = data[0]
if loops is None:
loops = any(f(v,v) for v in verts)
if weighted is None:
weighted = False
self.allow_loops(loops, check=False)
self.allow_multiple_edges(True if multiedges else False, check=False)
self.add_vertices(verts)
self.add_edges(e for e in itertools.combinations(verts,2) if f(*e))
if loops:
self.add_edges((v,v) for v in verts if f(v,v))
elif format == "vertices_and_edges":
self.allow_multiple_edges(bool(multiedges), check=False)
self.allow_loops(bool(loops), check=False)
self.add_vertices(data[0])
self.add_edges(data[1])
elif format == 'dict_of_dicts':
from .graph_input import from_dict_of_dicts
from_dict_of_dicts(self, data, loops=loops, multiedges=multiedges, weighted=weighted,
convert_empty_dict_labels_to_None = False if convert_empty_dict_labels_to_None is None else convert_empty_dict_labels_to_None)
elif format == 'dict_of_lists':
from .graph_input import from_dict_of_lists
from_dict_of_lists(self, data, loops=loops, multiedges=multiedges, weighted=weighted)
elif format == 'int':
self.allow_loops(loops if loops else False, check=False)
self.allow_multiple_edges(multiedges if multiedges else False, check=False)
if data < 0:
raise ValueError("The number of vertices cannot be strictly negative!")
if data:
self.add_vertices(range(data))
elif format == 'list_of_edges':
self.allow_multiple_edges(True if multiedges else False,
check=False)
self.allow_loops(True if loops else False, check=False)
self.add_edges(data)
else:
raise ValueError("Unknown input format '{}'".format(format))
if weighted is None:
weighted = False
self._weighted = getattr(self, '_weighted', weighted)
self._pos = copy(pos)
if format != 'Graph' or name is not None:
self.name(name)
if data_structure == "static_sparse":
from sage.graphs.base.static_sparse_backend import StaticSparseBackend
ib = StaticSparseBackend(self,
loops = self.allows_loops(),
multiedges = self.allows_multiple_edges())
self._backend = ib
self._immutable = True
### Formats
@doc_index("Basic methods")
def graph6_string(self):
r"""
Return the graph6 representation of the graph as an ASCII string.
This is only valid for simple (no loops, no multiple edges) graphs
on at most `2^{18}-1=262143` vertices.
.. NOTE::
As the graph6 format only handles graphs with vertex set
`\{0,...,n-1\}`, a :meth:`relabelled copy
<sage.graphs.generic_graph.GenericGraph.relabel>` will
be encoded, if necessary.
.. SEEALSO::
* :meth:`~sage.graphs.digraph.DiGraph.dig6_string` --
a similar string format for directed graphs
EXAMPLES::
sage: G = graphs.KrackhardtKiteGraph()
sage: G.graph6_string()
'IvUqwK@?G'
TESTS::
sage: Graph().graph6_string()
'?'
"""
n = self.order()
if n > 262143:
raise ValueError('graph6 format supports graphs on 0 to 262143 vertices only.')
elif self.has_loops() or self.has_multiple_edges():
raise ValueError('graph6 format supports only simple graphs (no loops, no multiple edges)')
else:
return generic_graph_pyx.small_integer_to_graph6(n) + generic_graph_pyx.binary_string_to_graph6(self._bit_vector())
@doc_index("Basic methods")
def sparse6_string(self):
r"""
Return the sparse6 representation of the graph as an ASCII string.
Only valid for undirected graphs on 0 to 262143 vertices, but loops
and multiple edges are permitted.
.. NOTE::
As the sparse6 format only handles graphs whose vertex set is
`\{0,...,n-1\}`, a :meth:`relabelled copy
<sage.graphs.generic_graph.GenericGraph.relabel>` of your graph will
be encoded if necessary.
EXAMPLES::
sage: G = graphs.BullGraph()
sage: G.sparse6_string()
':Da@en'
::
sage: G = Graph(loops=True, multiedges=True, data_structure="sparse")
sage: Graph(':?', data_structure="sparse") == G
True
TESTS::
sage: G = Graph()
sage: G.sparse6_string()
':?'
Check that :trac:`18445` is fixed::
sage: Graph(graphs.KneserGraph(5,2).sparse6_string()).size()
15
Graphs with 1 vertex are correctly handled (:trac:`24923`)::
sage: Graph([(0, 0)], loops=True).sparse6_string()
':@^'
sage: G = Graph(_)
sage: G.order(), G.size()
(1, 1)
sage: Graph([(0, 0), (0, 0)], loops=True, multiedges=True).sparse6_string()
':@N'
sage: H = Graph(_)
sage: H.order(), H.size()
(1, 2)
Sparse6 encoding of canonical graph is unique (:trac:`31026`)::
sage: G = Graph([(0,1),(1,2),(2,3),(3,0),(0,2)])
sage: H = Graph([(0,1),(1,2),(2,3),(3,0),(1,3)])
sage: G == H
False
sage: G.is_isomorphic(H)
True
sage: G.sparse6_string() == H.sparse6_string()
False
sage: G_ = G.canonical_label()
sage: H_ = H.canonical_label()
sage: G_ == H_
True
sage: G_.sparse6_string() == H_.sparse6_string()
True
The method can handle vertices with different types (:trac:`31026`)::
sage: G = Graph([(1, 'a')])
sage: H = Graph(G.sparse6_string())
sage: G.is_isomorphic(H)
True
sage: set(G) == set(H)
False
"""
n = self.order()
if not n:
return ':?'
if n > 262143:
raise ValueError('sparse6 format supports graphs on 0 to 262143 vertices only.')
if n == 1:
s = '0' * self.size()
else:
try:
V = sorted(self)
except TypeError:
V = self
v_to_int = {v:i for i,v in enumerate(V)}
edges = [sorted((v_to_int[u], v_to_int[v])) for u,v in self.edge_iterator(labels=False)]
edges.sort(key=lambda e: (e[1], e[0])) # reverse lexicographic order
# encode bit vector
k = int((ZZ(n) - 1).nbits())
v = 0
i = 0
m = 0
s = ''
while m < len(edges):
if edges[m][1] > v + 1:
sp = generic_graph_pyx.int_to_binary_string(edges[m][1])
sp = '0'*(k-len(sp)) + sp
s += '1' + sp
v = edges[m][1]
elif edges[m][1] == v + 1:
sp = generic_graph_pyx.int_to_binary_string(edges[m][0])
sp = '0'*(k-len(sp)) + sp
s += '1' + sp
v += 1
m += 1
else:
sp = generic_graph_pyx.int_to_binary_string(edges[m][0])
sp = '0'*(k-len(sp)) + sp
s += '0' + sp
m += 1
# encode s as a 6-string, as in R(x), but padding with 1's
# pad on the right to make a multiple of 6
s = s + ( '1' * ((6 - len(s))%6) )
# split into groups of 6, and convert numbers to decimal, adding 63
six_bits = ''
for i in range(0, len(s), 6):
six_bits += chr( int( s[i:i+6], 2) + 63 )
return ':' + generic_graph_pyx.small_integer_to_graph6(n) + six_bits
### Attributes
@doc_index("Basic methods")
def is_directed(self):
"""
Since graph is undirected, returns False.
EXAMPLES::
sage: Graph().is_directed()
False
"""
return False
### Properties
@doc_index("Graph properties")
def is_tree(self, certificate=False, output='vertex'):
r"""
Tests if the graph is a tree
The empty graph is defined to be not a tree.
INPUT:
- ``certificate`` -- boolean (default: ``False``); whether to return a
certificate. The method only returns boolean answers when
``certificate = False`` (default). When it is set to ``True``, it
either answers ``(True, None)`` when the graph is a tree or ``(False,
cycle)`` when it contains a cycle. It returns ``(False, None)`` when
the graph is empty or not connected.
- ``output`` -- either ``'vertex'`` (default) or ``'edge'``; whether the
certificate is given as a list of vertices (``output = 'vertex'``) or
a list of edges (``output = 'edge'``).
When the certificate cycle is given as a list of edges, the edges are
given as `(v_i, v_{i+1}, l)` where `v_1, v_2, \dots, v_n` are the
vertices of the cycles (in their cyclic order).
EXAMPLES::
sage: all(T.is_tree() for T in graphs.trees(15))
True
With certificates::
sage: g = graphs.RandomTree(30)
sage: g.is_tree(certificate=True)
(True, None)
sage: g.add_edge(10,-1)
sage: g.add_edge(11,-1)
sage: isit, cycle = g.is_tree(certificate=True)
sage: isit
False
sage: -1 in cycle
True
One can also ask for the certificate as a list of edges::
sage: g = graphs.CycleGraph(4)
sage: g.is_tree(certificate=True, output='edge')
(False, [(3, 2, None), (2, 1, None), (1, 0, None), (0, 3, None)])
This is useful for graphs with multiple edges::
sage: G = Graph([(1, 2, 'a'), (1, 2, 'b')], multiedges=True)
sage: G.is_tree(certificate=True)
(False, [1, 2])
sage: G.is_tree(certificate=True, output='edge')
(False, [(1, 2, 'a'), (2, 1, 'b')])
TESTS:
:trac:`14434` is fixed::
sage: g = Graph({0:[1,4,5],3:[4,8,9],4:[9],5:[7,8],7:[9]})
sage: _,cycle = g.is_tree(certificate=True)
sage: g.size()
10
sage: g.add_cycle(cycle)
sage: g.size()
10
The empty graph::
sage: graphs.EmptyGraph().is_tree()
False
sage: graphs.EmptyGraph().is_tree(certificate=True)
(False, None)
:trac:`22912` is fixed::
sage: G = Graph([(0,0), (0,1)], loops=True)
sage: G.is_tree(certificate=True)
(False, [0])
sage: G.is_tree(certificate=True, output='edge')
(False, [(0, 0, None)])
"""
if output not in ['vertex', 'edge']:
raise ValueError('output must be either vertex or edge')
if not self.order() or not self.is_connected():
return (False, None) if certificate else False
if certificate:
if self.order() == self.size() + 1:
return (True, None)
if self.allows_loops():
L = self.loop_edges() if output == 'edge' else self.loop_vertices()
if L:
return False, L[:1]
if self.has_multiple_edges():
if output == 'vertex':
return (False, list(self.multiple_edges(sort=True)[0][:2]))
edge1, edge2 = self.multiple_edges(sort=True)[:2]
if edge1[0] != edge2[0]:
return (False, [edge1, edge2])
return (False, [edge1, (edge2[1], edge2[0], edge2[2])])
if output == 'edge':
if self.allows_multiple_edges():
def vertices_to_edges(x):
return [(u[0], u[1], self.edge_label(u[0], u[1])[0])
for u in zip(x, x[1:] + [x[0]])]
else:
def vertices_to_edges(x):
return [(u[0], u[1], self.edge_label(u[0], u[1]))
for u in zip(x, x[1:] + [x[0]])]
# This code is a depth-first search that looks for a cycle in the
# graph. We *know* it exists as there are too many edges around.
seen = {}
u = next(self.vertex_iterator())
seen[u] = u
stack = [(u, v) for v in self.neighbor_iterator(u)]
while stack:
u, v = stack.pop()
if v in seen:
continue
for w in self.neighbor_iterator(v):
if u == w:
continue
elif w in seen:
cycle = [w, v]
while u != w:
cycle.append(u)
u = seen[u]
cycle.reverse()
if output == 'vertex':
return (False, cycle)
return (False, vertices_to_edges(cycle))
else:
stack.append((v, w))
seen[v] = u
else:
return self.order() == self.size() + 1
@doc_index("Graph properties")
def is_forest(self, certificate=False, output='vertex'):
"""
Tests if the graph is a forest, i.e. a disjoint union of trees.
INPUT:
- ``certificate`` -- boolean (default: ``False``); whether to return a
certificate. The method only returns boolean answers when
``certificate = False`` (default). When it is set to ``True``, it
either answers ``(True, None)`` when the graph is a forest or
``(False, cycle)`` when it contains a cycle.
- ``output`` -- either ``'vertex'`` (default) or ``'edge'``; whether the
certificate is given as a list of vertices (``output = 'vertex'``) or
a list of edges (``output = 'edge'``).
EXAMPLES::
sage: seven_acre_wood = sum(graphs.trees(7), Graph())
sage: seven_acre_wood.is_forest()
True
With certificates::
sage: g = graphs.RandomTree(30)
sage: g.is_forest(certificate=True)
(True, None)
sage: (2*g + graphs.PetersenGraph() + g).is_forest(certificate=True)
(False, [68, 66, 69, 67, 65])
"""
connected_components = self.connected_components()
number_of_connected_components = len(connected_components)
isit = (self.order() ==
self.size() + number_of_connected_components)
if not certificate:
return isit
else:
if isit:
return (True, None)
# The graph contains a cycle, and the user wants to see it.
# No need to copy the graph
if number_of_connected_components == 1:
return self.is_tree(certificate=True, output=output)
# We try to find a cycle in each connected component
for cc in connected_components:
isit, cycle = self.subgraph(cc).is_tree(certificate=True, output=output)
if not isit:
return (False, cycle)
@doc_index("Graph properties")
def is_cactus(self):
"""
Check whether the graph is cactus graph.
A graph is called *cactus graph* if it is connected and every pair of
simple cycles have at most one common vertex.
There are other definitions, see the :wikipedia:`Cactus_graph`.
EXAMPLES::
sage: g = Graph({1: [2], 2: [3, 4], 3: [4, 5, 6, 7], 8: [3, 5], 9: [6, 7]})
sage: g.is_cactus()
True
sage: c6 = graphs.CycleGraph(6)
sage: naphthalene = c6 + c6
sage: naphthalene.is_cactus() # Not connected
False
sage: naphthalene.merge_vertices([0, 6])
sage: naphthalene.is_cactus()
True
sage: naphthalene.merge_vertices([1, 7])
sage: naphthalene.is_cactus()
False
TESTS::
sage: all(graphs.PathGraph(i).is_cactus() for i in range(5))
True
sage: Graph('Fli@?').is_cactus()
False
Test a graph that is not outerplanar, see :trac:`24480`::
sage: graphs.Balaban10Cage().is_cactus()
False
"""
self._scream_if_not_simple()
# Special cases
if self.order() < 4:
return True
if self.size() > 3 * (self.order() - 1) / 2:
return False
# Every cactus graph is outerplanar
if not self.is_circular_planar():
return False
if not self.is_connected():
return False
# the number of faces is 1 plus the number of blocks of order > 2
B = self.blocks_and_cut_vertices()[0]
return len(self.faces()) == sum(1 for b in B if len(b) > 2) + 1
@doc_index("Graph properties")
def is_biconnected(self):
"""
Test if the graph is biconnected.
A biconnected graph is a connected graph on two or more vertices that is
not broken into disconnected pieces by deleting any single vertex.
.. SEEALSO::
- :meth:`~sage.graphs.generic_graph.GenericGraph.is_connected`
- :meth:`~sage.graphs.generic_graph.GenericGraph.blocks_and_cut_vertices`
- :meth:`~sage.graphs.generic_graph.GenericGraph.blocks_and_cuts_tree`
- :wikipedia:`Biconnected_graph`
EXAMPLES::
sage: G = graphs.PetersenGraph()
sage: G.is_biconnected()
True
sage: G.add_path([0,'a','b'])
sage: G.is_biconnected()
False
sage: G.add_edge('b', 1)
sage: G.is_biconnected()
True
TESTS::
sage: Graph().is_biconnected()
False
sage: Graph(1).is_biconnected()
False
sage: graphs.CompleteGraph(2).is_biconnected()
True
"""
if self.order() < 2 or not self.is_connected():
return False
if self.blocks_and_cut_vertices()[1]:
return False
return True
@doc_index("Graph properties")
def is_block_graph(self):
r"""
Return whether this graph is a block graph.
A block graph is a connected graph in which every biconnected component
(block) is a clique.
.. SEEALSO::
- :wikipedia:`Block_graph` for more details on these graphs
- :meth:`~sage.graphs.graph_generators.GraphGenerators.RandomBlockGraph`
-- generator of random block graphs
- :meth:`~sage.graphs.generic_graph.GenericGraph.blocks_and_cut_vertices`
- :meth:`~sage.graphs.generic_graph.GenericGraph.blocks_and_cuts_tree`
EXAMPLES::
sage: G = graphs.RandomBlockGraph(6, 2, kmax=4)
sage: G.is_block_graph()
True
sage: from sage.graphs.isgci import graph_classes
sage: G in graph_classes.Block
True
sage: graphs.CompleteGraph(4).is_block_graph()
True
sage: graphs.RandomTree(6).is_block_graph()
True
sage: graphs.PetersenGraph().is_block_graph()
False
sage: Graph(4).is_block_graph()
False
"""
if not self.is_connected():
return False
if self.is_clique():
return True
B,C = self.blocks_and_cut_vertices()
return all(self.is_clique(vertices=block) for block in B)
@doc_index("Graph properties")
def is_cograph(self):
"""
Check whether the graph is cograph.
A cograph is defined recursively: the single-vertex graph is
cograph, complement of cograph is cograph, and disjoint union
of two cographs is cograph. There are many other
characterizations, see the :wikipedia:`Cograph`.
EXAMPLES::
sage: graphs.HouseXGraph().is_cograph()
True
sage: graphs.HouseGraph().is_cograph()
False
.. TODO::
Implement faster recognition algorithm, as for instance
the linear time recognition algorithm using LexBFS proposed
in [Bre2008]_.
TESTS::
sage: [graphs.PathGraph(i).is_cograph() for i in range(6)]
[True, True, True, True, False, False]
sage: graphs.CycleGraph(5).is_cograph() # Self-complemented
False
"""
# A cograph has no 4-vertex path as an induced subgraph.
# We will first try to "decompose" graph by complements and
# split to connected components, and use fairly slow
# subgraph search if that fails.
self._scream_if_not_simple()
if self.order() < 4:
return True
if self.density()*2 > 1:
return self.complement().is_cograph()
if not self.is_connected():
return all(part.is_cograph() for part in self.connected_components_subgraphs())
P4 = Graph({0: [1], 1: [2], 2: [3]})
return self.subgraph_search(P4, induced=True) is None
@doc_index("Graph properties")
def is_apex(self):
r"""
Test if the graph is apex.
A graph is apex if it can be made planar by the removal of a single
vertex. The deleted vertex is called ``an apex`` of the graph, and a
graph may have more than one apex. For instance, in the minimal
nonplanar graphs `K_5` or `K_{3,3}`, every vertex is an apex. The apex
graphs include graphs that are themselves planar, in which case again
every vertex is an apex. The null graph is also counted as an apex graph
even though it has no vertex to remove. If the graph is not connected,
we say that it is apex if it has at most one non planar connected
component and that this component is apex. See the :wikipedia:`Apex_graph`
for more information.
.. SEEALSO::
- :meth:`~Graph.apex_vertices`
- :meth:`~sage.graphs.generic_graph.GenericGraph.is_planar`
EXAMPLES:
`K_5` and `K_{3,3}` are apex graphs, and each of their vertices is an
apex::
sage: G = graphs.CompleteGraph(5)
sage: G.is_apex()
True
sage: G = graphs.CompleteBipartiteGraph(3,3)
sage: G.is_apex()
True
The Petersen graph is not apex::
sage: G = graphs.PetersenGraph()
sage: G.is_apex()
False
A graph is apex if all its connected components are apex, but at most
one is not planar::
sage: M = graphs.Grid2dGraph(3,3)
sage: K5 = graphs.CompleteGraph(5)
sage: (M+K5).is_apex()
True
sage: (M+K5+K5).is_apex()
False
TESTS:
The null graph is apex::
sage: G = Graph()
sage: G.is_apex()
True
The graph might be mutable or immutable::
sage: G = Graph(M+K5, immutable=True)
sage: G.is_apex()
True
"""
# Easy cases: null graph, subgraphs of K_5 and K_3,3
if self.order() <= 5 or ( self.order() <= 6 and self.is_bipartite() ):
return True
return len(self.apex_vertices(k=1)) > 0
@doc_index("Graph properties")
def apex_vertices(self, k=None):
r"""
Return the list of apex vertices.
A graph is apex if it can be made planar by the removal of a single
vertex. The deleted vertex is called ``an apex`` of the graph, and a
graph may have more than one apex. For instance, in the minimal
nonplanar graphs `K_5` or `K_{3,3}`, every vertex is an apex. The apex
graphs include graphs that are themselves planar, in which case again
every vertex is an apex. The null graph is also counted as an apex graph
even though it has no vertex to remove. If the graph is not connected,
we say that it is apex if it has at most one non planar connected
component and that this component is apex. See the
:wikipedia:`Apex_graph` for more information.
.. SEEALSO::
- :meth:`~Graph.is_apex`
- :meth:`~sage.graphs.generic_graph.GenericGraph.is_planar`
INPUT:
- ``k`` -- integer (default: ``None``); when set to ``None``, the method
returns the list of all apex of the graph, possibly empty if the graph
is not apex. When set to a positive integer, the method ends as soon
as `k` apex vertices are found.
OUTPUT:
By default, the method returns the list of all apex of the graph. When
parameter ``k`` is set to a positive integer, the returned list is
bounded to `k` apex vertices.
EXAMPLES:
`K_5` and `K_{3,3}` are apex graphs, and each of their vertices is an
apex::
sage: G = graphs.CompleteGraph(5)
sage: G.apex_vertices()
[0, 1, 2, 3, 4]
sage: G = graphs.CompleteBipartiteGraph(3,3)
sage: G.is_apex()
True
sage: G.apex_vertices()
[0, 1, 2, 3, 4, 5]
sage: G.apex_vertices(k=3)
[0, 1, 2]
A `4\\times 4`-grid is apex and each of its vertices is an apex. When
adding a universal vertex, the resulting graph is apex and the universal
vertex is the unique apex vertex ::
sage: G = graphs.Grid2dGraph(4,4)
sage: set(G.apex_vertices()) == set(G.vertices())
True
sage: G.add_edges([('universal',v) for v in G])
sage: G.apex_vertices()
['universal']
The Petersen graph is not apex::
sage: G = graphs.PetersenGraph()
sage: G.apex_vertices()
[]
A graph is apex if all its connected components are apex, but at most
one is not planar::
sage: M = graphs.Grid2dGraph(3,3)
sage: K5 = graphs.CompleteGraph(5)
sage: (M+K5).apex_vertices()
[9, 10, 11, 12, 13]
sage: (M+K5+K5).apex_vertices()
[]
Neighbors of an apex of degree 2 are apex::
sage: G = graphs.Grid2dGraph(5,5)
sage: v = (666, 666)
sage: G.add_path([(1, 1), v, (3, 3)])
sage: G.is_planar()
False
sage: G.degree(v)
2
sage: sorted(G.apex_vertices())
[(1, 1), (2, 2), (3, 3), (666, 666)]
TESTS:
The null graph is apex although it has no apex vertex::
sage: G = Graph()
sage: G.apex_vertices()
[]
Parameter ``k`` cannot be a negative integer::
sage: G.apex_vertices(k=-1)
Traceback (most recent call last):
...
ValueError: parameter k must be a non negative integer
The graph might be mutable or immutable::
sage: G = Graph(M+K5, immutable=True)
sage: G.apex_vertices()
[9, 10, 11, 12, 13]
"""
if k is None:
k = self.order()
elif k < 0:
raise ValueError("parameter k must be a non negative integer")
# Easy cases: null graph, subgraphs of K_5 and K_3,3
if self.order() <= 5 or (self.order() <= 6 and self.is_bipartite()):
it = self.vertex_iterator()
return [next(it) for _ in range(k)]
if not self.is_connected():
# We search for its non planar connected components. If it has more
# than one such component, the graph is not apex. It is apex if
# either it has no such component, in which case the graph is
# planar, or if its unique non planar component is apex.
P = [H for H in self.connected_components_subgraphs() if not H.is_planar()]
if not P: # The graph is planar
it = self.vertex_iterator()
return [next(it) for _ in range(k)]
elif len(P) > 1:
return []
else:
# We proceed with the non planar component
if P[0].is_immutable():
H = Graph(P[0].edges(labels=0, sort=False), immutable=False, loops=False, multiedges=False)
else:
H = P[0]
elif self.is_planar():
# A planar graph is apex.
it = self.vertex_iterator()
return [next(it) for _ in range(k)]
else:
# We make a basic copy of the graph since we will modify it
H = Graph(self.edges(labels=0, sort=False), immutable=False, loops=False, multiedges=False)
# General case: basic implementation
#
# Test for each vertex if its removal makes the graph planar.
# Obviously, we don't test vertices of degree one. Furthermore, if a
# vertex of degree 2 is an apex, its neighbors also are. So we start
# with vertices of degree 2.
V = {}
for u in H:
d = H.degree(u)
if d > 1:
if d in V:
V[d].append(u)
else:
V[d] = [u]
apex = set()
for deg in sorted(V):
for u in V[deg]:
if u in apex: # True if neighbor of an apex of degree 2
if deg == 2:
# We ensure that its neighbors are known apex
apex.update(H.neighbor_iterator(u))
if len(apex) >= k:
return list(apex)[:k]
continue
E = H.edges_incident(u, labels=0)
H.delete_vertex(u)
if H.is_planar():
apex.add(u)
if deg == 2:
# The neighbors of an apex of degree 2 also are
apex.update(self.neighbor_iterator(u))
if len(apex) >= k:
return list(apex)[:k]
H.add_edges(E)
return list(apex)
@doc_index("Graph properties")
def is_overfull(self):
r"""
Tests whether the current graph is overfull.
A graph `G` on `n` vertices and `m` edges is said to be overfull if:
- `n` is odd
- It satisfies `2m > (n-1)\Delta(G)`, where `\Delta(G)` denotes the
maximum degree among all vertices in `G`.
An overfull graph must have a chromatic index of `\Delta(G)+1`.
EXAMPLES:
A complete graph of order `n > 1` is overfull if and only if `n` is
odd::
sage: graphs.CompleteGraph(6).is_overfull()
False
sage: graphs.CompleteGraph(7).is_overfull()
True
sage: graphs.CompleteGraph(1).is_overfull()
False
The claw graph is not overfull::
sage: from sage.graphs.graph_coloring import edge_coloring
sage: g = graphs.ClawGraph()
sage: g
Claw graph: Graph on 4 vertices
sage: edge_coloring(g, value_only=True)
3
sage: g.is_overfull()
False
The Holt graph is an example of a overfull graph::
sage: G = graphs.HoltGraph()
sage: G.is_overfull()
True
Checking that all complete graphs `K_n` for even `0 \leq n \leq 100`
are not overfull::
sage: def check_overfull_Kn_even(n):
....: i = 0
....: while i <= n:
....: if graphs.CompleteGraph(i).is_overfull():
....: print("A complete graph of even order cannot be overfull.")
....: return
....: i += 2
....: print("Complete graphs of even order up to %s are not overfull." % n)
...
sage: check_overfull_Kn_even(100) # long time
Complete graphs of even order up to 100 are not overfull.
The null graph, i.e. the graph with no vertices, is not overfull::
sage: Graph().is_overfull()
False
sage: graphs.CompleteGraph(0).is_overfull()
False
Checking that all complete graphs `K_n` for odd `1 < n \leq 100`
are overfull::
sage: def check_overfull_Kn_odd(n):
....: i = 3
....: while i <= n:
....: if not graphs.CompleteGraph(i).is_overfull():
....: print("A complete graph of odd order > 1 must be overfull.")
....: return
....: i += 2
....: print("Complete graphs of odd order > 1 up to %s are overfull." % n)
...
sage: check_overfull_Kn_odd(100) # long time
Complete graphs of odd order > 1 up to 100 are overfull.
The Petersen Graph, though, is not overfull while
its chromatic index is `\Delta+1`::
sage: g = graphs.PetersenGraph()
sage: g.is_overfull()
False
sage: from sage.graphs.graph_coloring import edge_coloring
sage: max(g.degree()) + 1 == edge_coloring(g, value_only=True)
True
"""
# # A possible optimized version. But the gain in speed is very little.
# return bool(self._backend.num_verts() & 1) and ( # odd order n
# 2 * self._backend.num_edges(self._directed) > #2m > \Delta(G)*(n-1)
# max(self.degree()) * (self._backend.num_verts() - 1))
# unoptimized version
return (self.order() % 2 == 1) and (
2 * self.size() > max(self.degree()) * (self.order() - 1))
@doc_index("Graph properties")
def is_even_hole_free(self, certificate=False):
r"""
Tests whether ``self`` contains an induced even hole.
A Hole is a cycle of length at least 4 (included). It is said to be even
(resp. odd) if its length is even (resp. odd).
Even-hole-free graphs always contain a bisimplicial vertex, which
ensures that their chromatic number is at most twice their clique number
[ACHRS2008]_.
INPUT:
- ``certificate`` -- boolean (default: ``False``); when ``certificate =
False``, this method only returns ``True`` or ``False``. If
``certificate = True``, the subgraph found is returned instead of
``False``.
EXAMPLES:
Is the Petersen Graph even-hole-free ::
sage: g = graphs.PetersenGraph()
sage: g.is_even_hole_free()
False
As any chordal graph is hole-free, interval graphs behave the same way::
sage: g = graphs.RandomIntervalGraph(20)
sage: g.is_even_hole_free()
True
It is clear, though, that a random Bipartite Graph which is not a forest
has an even hole::
sage: g = graphs.RandomBipartite(10, 10, .5)
sage: g.is_even_hole_free() and not g.is_forest()
False
We can check the certificate returned is indeed an even cycle::
sage: if not g.is_forest():
....: cycle = g.is_even_hole_free(certificate=True)
....: if cycle.order() % 2 == 1:
....: print("Error !")
....: if not cycle.is_isomorphic(
....: graphs.CycleGraph(cycle.order())):
....: print("Error !")
...
sage: print("Everything is Fine !")
Everything is Fine !
TESTS:
Bug reported in :trac:`9925`, and fixed by :trac:`9420`::
sage: g = Graph(':SiBFGaCEF_@CE`DEGH`CEFGaCDGaCDEHaDEF`CEH`ABCDEF', loops=False, multiedges=False)
sage: g.is_even_hole_free()
False
sage: g.is_even_hole_free(certificate=True)
Subgraph of (): Graph on 4 vertices
Making sure there are no other counter-examples around ::
sage: t = lambda x: (Graph(x).is_forest() or
....: isinstance(Graph(x).is_even_hole_free(certificate=True), Graph))
sage: all( t(graphs.RandomBipartite(10, 10, .5)) for i in range(100) )
True
"""
girth = self.girth()
if girth > self.order():
start = 4
elif not girth % 2:
if not certificate:
return False
start = girth
else:
start = girth + 1
from sage.graphs.generators.basic import CycleGraph
while start <= self.order():
subgraph = self.subgraph_search(CycleGraph(start), induced=True)
if subgraph is not None:
if certificate:
return subgraph
else:
return False
start += 2
return True
@doc_index("Graph properties")
def is_odd_hole_free(self, certificate=False):
r"""
Tests whether ``self`` contains an induced odd hole.
A Hole is a cycle of length at least 4 (included). It is said to be even
(resp. odd) if its length is even (resp. odd).
It is interesting to notice that while it is polynomial to check whether
a graph has an odd hole or an odd antihole [CCLSV2005]_, it is not known
whether testing for one of these two cases independently is polynomial
too.
INPUT:
- ``certificate`` -- boolean (default: ``False``); when ``certificate =
False``, this method only returns ``True`` or ``False``. If
``certificate = True``, the subgraph found is returned instead of
``False``.
EXAMPLES:
Is the Petersen Graph odd-hole-free ::
sage: g = graphs.PetersenGraph()
sage: g.is_odd_hole_free()
False
Which was to be expected, as its girth is 5 ::
sage: g.girth()
5
We can check the certificate returned is indeed a 5-cycle::
sage: cycle = g.is_odd_hole_free(certificate=True)
sage: cycle.is_isomorphic(graphs.CycleGraph(5))
True
As any chordal graph is hole-free, no interval graph has an odd hole::
sage: g = graphs.RandomIntervalGraph(20)
sage: g.is_odd_hole_free()
True
"""
girth = self.odd_girth()
if girth > self.order():
return True
if girth == 3:
start = 5
else:
if not certificate:
return False
start = girth
from sage.graphs.generators.basic import CycleGraph
while start <= self.order():
subgraph = self.subgraph_search(CycleGraph(start), induced=True)
if subgraph is not None:
if certificate:
return subgraph
else:
return False
start += 2
return True
@doc_index("Graph properties")
def is_triangle_free(self, algorithm='dense_graph', certificate=False):
r"""
Check whether ``self`` is triangle-free
INPUT:
- ``algorithm`` -- (default: ``'dense_graph'``) specifies the algorithm
to use among:
- ``'matrix'`` -- tests if the trace of the adjacency matrix is
positive.
- ``'bitset'`` -- encodes adjacencies into bitsets and uses fast
bitset operations to test if the input graph contains a
triangle. This method is generally faster than standard matrix
multiplication.
- ``'dense_graph'`` -- use the implementation of
:mod:`sage.graphs.base.static_dense_graph`
- ``certificate`` -- boolean (default: ``False``); whether to return a
triangle if one is found. This parameter is ignored when ``algorithm``
is ``'matrix'``.
EXAMPLES:
The Petersen Graph is triangle-free::
sage: g = graphs.PetersenGraph()
sage: g.is_triangle_free()
True
or a complete Bipartite Graph::
sage: G = graphs.CompleteBipartiteGraph(5,6)
sage: G.is_triangle_free(algorithm='matrix')
True
sage: G.is_triangle_free(algorithm='bitset')
True
sage: G.is_triangle_free(algorithm='dense_graph')
True
a tripartite graph, though, contains many triangles::
sage: G = (3 * graphs.CompleteGraph(5)).complement()
sage: G.is_triangle_free(algorithm='matrix')
False
sage: G.is_triangle_free(algorithm='bitset')
False
sage: G.is_triangle_free(algorithm='dense_graph')
False
Asking for a certificate::
sage: K4 = graphs.CompleteGraph(4)
sage: K4.is_triangle_free(algorithm='dense_graph', certificate=True)
(False, [0, 1, 2])
sage: K4.is_triangle_free(algorithm='bitset', certificate=True)
(False, [0, 1, 2])
TESTS:
Comparison of algorithms::
sage: for i in range(10): # long time
....: G = graphs.RandomBarabasiAlbert(50,2)
....: bm = G.is_triangle_free(algorithm='matrix')
....: bb = G.is_triangle_free(algorithm='bitset')
....: bd = G.is_triangle_free(algorithm='dense_graph')
....: if bm != bb or bm != bd:
....: print("That's not good!")
Asking for an unknown algorithm::
sage: g.is_triangle_free(algorithm='tip top')
Traceback (most recent call last):
...
ValueError: Algorithm 'tip top' not yet implemented. Please contribute.
Check the empty graph::
sage: graphs.EmptyGraph().is_triangle_free()
True
"""
if algorithm == 'dense_graph':
from sage.graphs.base.static_dense_graph import is_triangle_free
return is_triangle_free(self, certificate=certificate)
if algorithm == 'bitset':
if self.order() < 3:
return (True, []) if certificate else True
from sage.data_structures.bitset import Bitset
N = self.order()
vertex_to_int = {}
B = {}
for i, u in enumerate(self):
vertex_to_int[u] = i
B[u] = Bitset(capacity=N)
# map adjacency to bitsets
for u, v in self.edge_iterator(labels=None):
if u != v:
B[u].add(vertex_to_int[v])
B[v].add(vertex_to_int[u])
# Search for a triangle
for u, v in self.edge_iterator(labels=None):
BB = B[u] & B[v]
if BB:
if certificate:
for w in self.neighbor_iterator(u):
if vertex_to_int[w] in BB:
return False, [u, v, w]
return False
return (True, []) if certificate else True
elif algorithm == 'matrix':
if self.order() < 3:
return True
return (self.adjacency_matrix()**3).trace() == 0
else:
raise ValueError("Algorithm '%s' not yet implemented. Please contribute." %(algorithm))
@doc_index("Graph properties")
def is_split(self):
r"""
Returns ``True`` if the graph is a Split graph, ``False`` otherwise.
A Graph `G` is said to be a split graph if its vertices `V(G)` can be
partitioned into two sets `K` and `I` such that the vertices of `K`
induce a complete graph, and those of `I` are an independent set.
There is a simple test to check whether a graph is a split graph (see,
for instance, the book "Graph Classes, a survey" [BLS1999]_ page
203) :
Given the degree sequence `d_1 \geq ... \geq d_n` of `G`, a graph is a
split graph if and only if :
.. MATH::
\sum_{i=1}^\omega d_i = \omega (\omega - 1) + \sum_{i=\omega + 1}^nd_i
where `\omega = max \{i:d_i\geq i-1\}`.
EXAMPLES:
Split graphs are, in particular, chordal graphs. Hence, The Petersen
graph can not be split::
sage: graphs.PetersenGraph().is_split()
False
We can easily build some "random" split graph by creating a complete
graph, and adding vertices only connected to some random vertices of the
clique::
sage: g = graphs.CompleteGraph(10)
sage: sets = Subsets(Set(range(10)))
sage: for i in range(10, 25):
....: g.add_edges([(i,k) for k in sets.random_element()])
sage: g.is_split()
True
Another characterisation of split graph states that a graph is a split
graph if and only if does not contain the 4-cycle, 5-cycle or `2K_2` as
an induced subgraph. Hence for the above graph we have::
sage: forbidden_subgraphs = [graphs.CycleGraph(4), graphs.CycleGraph(5), 2 * graphs.CompleteGraph(2)]
sage: sum(g.subgraph_search_count(H,induced=True) for H in forbidden_subgraphs)
0
"""
self._scream_if_not_simple()
# our degree sequence is numbered from 0 to n-1, so to avoid
# any mistake, let's fix it :-)
degree_sequence = [0] + sorted(self.degree(), reverse=True)
for i, d in enumerate(degree_sequence):
if d >= i - 1:
omega = i
else:
break
left = sum(degree_sequence[:omega + 1])
right = omega * (omega - 1) + sum(degree_sequence[omega + 1:])
return left == right
@doc_index("Algorithmically hard stuff")
def is_perfect(self, certificate=False):
r"""
Tests whether the graph is perfect.
A graph `G` is said to be perfect if `\chi(H)=\omega(H)` hold for any
induced subgraph `H\subseteq_i G` (and so for `G` itself, too), where
`\chi(H)` represents the chromatic number of `H`, and `\omega(H)` its
clique number. The Strong Perfect Graph Theorem [CRST2006]_ gives
another characterization of perfect graphs:
A graph is perfect if and only if it contains no odd hole (cycle on an
odd number `k` of vertices, `k>3`) nor any odd antihole (complement of a
hole) as an induced subgraph.
INPUT:
- ``certificate`` -- boolean (default: ``False``); whether to return a
certificate.
OUTPUT:
When ``certificate = False``, this function returns a boolean
value. When ``certificate = True``, it returns a subgraph of ``self``
isomorphic to an odd hole or an odd antihole if any, and ``None``
otherwise.
EXAMPLES:
A Bipartite Graph is always perfect ::
sage: g = graphs.RandomBipartite(8,4,.5)
sage: g.is_perfect()
True
So is the line graph of a bipartite graph::
sage: g = graphs.RandomBipartite(4,3,0.7)
sage: g.line_graph().is_perfect() # long time
True
As well as the Cartesian product of two complete graphs::
sage: g = graphs.CompleteGraph(3).cartesian_product(graphs.CompleteGraph(3))
sage: g.is_perfect()
True
Interval Graphs, which are chordal graphs, too ::
sage: g = graphs.RandomIntervalGraph(7)
sage: g.is_perfect()
True
The PetersenGraph, which is triangle-free and has chromatic number 3 is
obviously not perfect::
sage: g = graphs.PetersenGraph()
sage: g.is_perfect()
False
We can obtain an induced 5-cycle as a certificate::
sage: g.is_perfect(certificate=True)
Subgraph of (Petersen graph): Graph on 5 vertices
TESTS:
Check that :trac:`13546` has been fixed::
sage: Graph(':FgGE@I@GxGs', loops=False, multiedges=False).is_perfect()
False
sage: g = Graph({0: [2, 3, 4, 5],
....: 1: [3, 4, 5, 6],
....: 2: [0, 4, 5, 6],
....: 3: [0, 1, 5, 6],
....: 4: [0, 1, 2, 6],
....: 5: [0, 1, 2, 3],
....: 6: [1, 2, 3, 4]})
sage: g.is_perfect()
False
TESTS::
sage: Graph(':Ab').is_perfect()
Traceback (most recent call last):
...
ValueError: This method is only defined for simple graphs, and yours is not one of them !
sage: g = Graph()
sage: g.allow_loops(True)
sage: g.add_edge(0,0)
sage: g.edges()
[(0, 0, None)]
sage: g.is_perfect()
Traceback (most recent call last):
...
ValueError: This method is only defined for simple graphs, and yours is not one of them !
"""
if self.has_multiple_edges() or self.has_loops():
raise ValueError("This method is only defined for simple graphs,"
" and yours is not one of them !")
if self.is_bipartite():
return True if not certificate else None
self_complement = self.complement()
self_complement.remove_loops()
self_complement.remove_multiple_edges()
if self_complement.is_bipartite():
return True if not certificate else None
answer = self.is_odd_hole_free(certificate=certificate)
if not (answer is True):
return answer
return self_complement.is_odd_hole_free(certificate=certificate)
@doc_index("Graph properties")
def is_edge_transitive(self):
r"""
Check if self is an edge transitive graph.
A graph is edge-transitive if its automorphism group acts transitively
on its edge set.
Equivalently, if there exists for any pair of edges `uv,u'v'\in E(G)` an
automorphism `\phi` of `G` such that `\phi(uv)=u'v'` (note this does not
necessarily mean that `\phi(u)=u'` and `\phi(v)=v'`).
.. SEEALSO::
- :wikipedia:`Edge-transitive_graph`
- :meth:`~Graph.is_arc_transitive`
- :meth:`~Graph.is_half_transitive`
- :meth:`~Graph.is_semi_symmetric`
EXAMPLES::
sage: P = graphs.PetersenGraph()
sage: P.is_edge_transitive()
True
sage: C = graphs.CubeGraph(3)
sage: C.is_edge_transitive()
True
sage: G = graphs.GrayGraph()
sage: G.is_edge_transitive()
True
sage: P = graphs.PathGraph(4)
sage: P.is_edge_transitive()
False
"""
from sage.libs.gap.libgap import libgap
if not self.size():
return True
A = self.automorphism_group()
e = next(self.edge_iterator(labels=False))
e = [A._domain_to_gap[e[0]], A._domain_to_gap[e[1]]]
e.sort()
return libgap(A).OrbitLength(e, libgap.OnSets) == self.size()
@doc_index("Graph properties")
def is_arc_transitive(self):
r"""
Check if self is an arc-transitive graph
A graph is arc-transitive if its automorphism group acts transitively on
its pairs of adjacent vertices.
Equivalently, if there exists for any pair of edges `uv,u'v'\in E(G)` an
automorphism `\phi_1` of `G` such that `\phi_1(u)=u'` and
`\phi_1(v)=v'`, as well as another automorphism `\phi_2` of `G` such
that `\phi_2(u)=v'` and `\phi_2(v)=u'`
.. SEEALSO::
- :wikipedia:`arc-transitive_graph`
- :meth:`~Graph.is_edge_transitive`
- :meth:`~Graph.is_half_transitive`
- :meth:`~Graph.is_semi_symmetric`
EXAMPLES::
sage: P = graphs.PetersenGraph()
sage: P.is_arc_transitive()
True
sage: G = graphs.GrayGraph()
sage: G.is_arc_transitive()
False
"""
from sage.libs.gap.libgap import libgap
if not self.size():
return True
A = self.automorphism_group()
e = next(self.edge_iterator(labels=False))
e = [A._domain_to_gap[e[0]], A._domain_to_gap[e[1]]]
return libgap(A).OrbitLength(e,libgap.OnTuples) == 2*self.size()
@doc_index("Graph properties")
def is_half_transitive(self):
"""
Check if self is a half-transitive graph.
A graph is half-transitive if it is both vertex and edge transitive
but not arc-transitive.
.. SEEALSO::
- :wikipedia:`half-transitive_graph`
- :meth:`~Graph.is_edge_transitive`
- :meth:`~Graph.is_arc_transitive`
- :meth:`~Graph.is_semi_symmetric`
EXAMPLES:
The Petersen Graph is not half-transitive::
sage: P = graphs.PetersenGraph()
sage: P.is_half_transitive()
False
The smallest half-transitive graph is the Holt Graph::
sage: H = graphs.HoltGraph()
sage: H.is_half_transitive()
True
"""
# A half-transitive graph always has only vertices of even degree
if any(d % 2 for d in self.degree_iterator()):
return False
return (self.is_edge_transitive() and
self.is_vertex_transitive() and
not self.is_arc_transitive())
@doc_index("Graph properties")
def is_semi_symmetric(self):
"""
Check if self is semi-symmetric.
A graph is semi-symmetric if it is regular, edge-transitive but not
vertex-transitive.
.. SEEALSO::
- :wikipedia:`Semi-symmetric_graph`
- :meth:`~Graph.is_edge_transitive`
- :meth:`~Graph.is_arc_transitive`
- :meth:`~Graph.is_half_transitive`
EXAMPLES:
The Petersen graph is not semi-symmetric::
sage: P = graphs.PetersenGraph()
sage: P.is_semi_symmetric()
False
The Gray graph is the smallest possible cubic semi-symmetric graph::
sage: G = graphs.GrayGraph()
sage: G.is_semi_symmetric()
True
Another well known semi-symmetric graph is the Ljubljana graph::
sage: L = graphs.LjubljanaGraph()
sage: L.is_semi_symmetric()
True
"""
# A semi-symmetric graph is always bipartite
if not self.is_bipartite():
return False
return (self.is_regular() and
self.is_edge_transitive() and not
self.is_vertex_transitive())
@doc_index("Graph properties")
def is_path(self):
r"""
Check whether ``self`` is a path.
A connected graph of order `n \geq 2` is a path if it is a tree
(see :meth:`is_tree`) with `n-2` vertices of degree 2 and two of
degree 1. By convention, a graph of order 1 without loops is a path,
but the empty graph is not a path.
EXAMPLES:
sage: G = graphs.PathGraph(5)
sage: G.is_path()
True
sage: H = graphs.CycleGraph(5)
sage: H.is_path()
False
sage: D = graphs.PathGraph(5).disjoint_union(graphs.CycleGraph(5))
sage: D.is_path()
False
sage: E = graphs.EmptyGraph()
sage: E.is_path()
False
sage: O = Graph([[1], []])
sage: O.is_path()
True
sage: O.allow_loops(True)
sage: O.add_edge(1, 1)
sage: O.is_path()
False
"""
order = self.order()
if order != self.size() + 1:
return False
if order <= 1:
return order == 1
deg_one_counter = 0
seen_counter = 0
for v in self.depth_first_search(next(self.vertex_iterator())):
seen_counter += 1
deg = self._backend.degree(v, False)
if deg == 1:
deg_one_counter += 1
if deg_one_counter > 2:
return False
elif deg != 2:
return False
return deg_one_counter == 2 and seen_counter == order
@doc_index("Connectivity, orientations, trees")
def degree_constrained_subgraph(self, bounds, solver=None, verbose=0,
*, integrality_tolerance=1e-3):
r"""
Returns a degree-constrained subgraph.
Given a graph `G` and two functions `f, g:V(G)\rightarrow \mathbb Z`
such that `f \leq g`, a degree-constrained subgraph in `G` is
a subgraph `G' \subseteq G` such that for any vertex `v \in G`,
`f(v) \leq d_{G'}(v) \leq g(v)`.
INPUT:
- ``bounds`` -- (default: ``None``); Two possibilities:
- A dictionary whose keys are the vertices, and values a pair of
real values ``(min,max)`` corresponding to the values
`(f(v),g(v))`.
- A function associating to each vertex a pair of
real values ``(min,max)`` corresponding to the values
`(f(v),g(v))`.
- ``solver`` -- string (default: ``None``); specify a Mixed Integer
Linear Programming (MILP) solver to be used. If set to ``None``, the
default one is used. For more information on MILP solvers and which
default solver is used, see the method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``); sets the level of
verbosity. Set to 0 by default, which means quiet.
- ``integrality_tolerance`` -- float; parameter for use with MILP
solvers over an inexact base ring; see
:meth:`MixedIntegerLinearProgram.get_values`.
OUTPUT:
- When a solution exists, this method outputs the degree-constrained
subgraph as a Graph object.
- When no solution exists, returns ``False``.
.. NOTE::
- This algorithm computes the degree-constrained subgraph of minimum
weight.
- If the graph's edges are weighted, these are taken into account.
- This problem can be solved in polynomial time.
EXAMPLES:
Is there a perfect matching in an even cycle? ::
sage: g = graphs.CycleGraph(6)
sage: bounds = lambda x: [1,1]
sage: m = g.degree_constrained_subgraph(bounds=bounds)
sage: m.size()
3
"""
self._scream_if_not_simple()
from sage.numerical.mip import MixedIntegerLinearProgram, MIPSolverException
p = MixedIntegerLinearProgram(maximization=False, solver=solver)
b = p.new_variable(binary=True)
if isinstance(bounds,dict):
f_bounds = lambda x: bounds[x]
else:
f_bounds = bounds
if self.weighted():
from sage.rings.real_mpfr import RR
weight = lambda x: x if x in RR else 1
else:
weight = lambda x: 1
for v in self:
minimum,maximum = f_bounds(v)
p.add_constraint(p.sum(b[frozenset((x,y))]*weight(l) for x,y,l in self.edges_incident(v)),
min=minimum, max=maximum)
p.set_objective(p.sum(b[frozenset((x,y))]*weight(l) for x,y,l in self.edge_iterator()))
try:
p.solve(log=verbose)
except MIPSolverException:
return False
g = copy(self)
b = p.get_values(b, convert=bool, tolerance=integrality_tolerance)
g.delete_edges(e for e in g.edge_iterator(labels=False) if not b[frozenset(e)])
return g
### Orientations
@doc_index("Connectivity, orientations, trees")
def strong_orientation(self):
r"""
Returns a strongly connected orientation of the current graph.
An orientation of an undirected graph is a digraph obtained by giving an
unique direction to each of its edges. An orientation is said to be
strong if there is a directed path between each pair of vertices. See
also the :wikipedia:`Strongly_connected_component`.
If the graph is 2-edge-connected, a strongly connected orientation
can be found in linear time. If the given graph is not 2-connected,
the orientation returned will ensure that each 2-connected component
has a strongly connected orientation.
OUTPUT:
A digraph representing an orientation of the current graph.
.. NOTE::
- This method assumes the graph is connected.
- This algorithm works in O(m).
EXAMPLES:
For a 2-regular graph, a strong orientation gives to each vertex an
out-degree equal to 1::
sage: g = graphs.CycleGraph(5)
sage: g.strong_orientation().out_degree()
[1, 1, 1, 1, 1]
The Petersen Graph is 2-edge connected. It then has a strongly connected
orientation::
sage: g = graphs.PetersenGraph()
sage: o = g.strong_orientation()
sage: len(o.strongly_connected_components())
1
The same goes for the CubeGraph in any dimension ::
sage: all(len(graphs.CubeGraph(i).strong_orientation().strongly_connected_components()) == 1 for i in range(2,6))
True
A multigraph also has a strong orientation ::
sage: g = Graph([(1,2),(1,2)], multiedges=True)
sage: g.strong_orientation()
Multi-digraph on 2 vertices
"""
from sage.graphs.digraph import DiGraph
d = DiGraph(multiedges=self.allows_multiple_edges())
i = 0
# The algorithm works through a depth-first search. Any edge
# used in the depth-first search is oriented in the direction
# in which it has been used. All the other edges are oriented
# backward
v = next(self.vertex_iterator())
seen = {}
i = 1
# Time at which the vertices have been discovered
seen[v] = i
# indicates the stack of edges to explore
next_ = self.edges_incident(v)
while next_:
e = next_.pop()
# Ignore loops
if e[0] == e[1]:
continue
# We assume e[0] to be a `seen` vertex
e = e if seen.get(e[0], False) is not False else (e[1], e[0], e[2])
# If we discovered a new vertex
if seen.get(e[1], False) is False:
d.add_edge(e)
next_.extend(ee for ee in self.edges_incident(e[1])
if ((e[0],e[1]) != (ee[0],ee[1])) and ((e[0],e[1]) != (ee[1],ee[0])))
i += 1
seen[e[1]] = i
# Else, we orient the edges backward
else:
if seen[e[0]] < seen[e[1]]:
d.add_edge(e[1], e[0], e[2])
else:
d.add_edge(e)
# Case of multiple edges. If another edge has already been inserted, we
# add the new one in the opposite direction.
tmp = None
for e in self.multiple_edges():
if tmp == (e[0], e[1]):
if d.has_edge(e[0], e[1]):
d.add_edge(e[1], e[0], e[2])
else:
d.add_edge(e)
tmp = (e[0], e[1])
return d
@doc_index("Connectivity, orientations, trees")
def minimum_outdegree_orientation(self, use_edge_labels=False, solver=None, verbose=0,
*, integrality_tolerance=1e-3):
r"""
Returns an orientation of ``self`` with the smallest possible maximum
outdegree.
Given a Graph `G`, it is polynomial to compute an orientation `D` of the
edges of `G` such that the maximum out-degree in `D` is minimized. This
problem, though, is NP-complete in the weighted case [AMOZ2006]_.
INPUT:
- ``use_edge_labels`` -- boolean (default: ``False``)
- When set to ``True``, uses edge labels as weights to compute the
orientation and assumes a weight of `1` when there is no value
available for a given edge.
- When set to ``False`` (default), gives a weight of 1 to all the
edges.
- ``solver`` -- string (default: ``None``); specify a Mixed Integer
Linear Programming (MILP) solver to be used. If set to ``None``, the
default one is used. For more information on MILP solvers and which
default solver is used, see the method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``); sets the level of
verbosity. Set to 0 by default, which means quiet.
- ``integrality_tolerance`` -- float; parameter for use with MILP
solvers over an inexact base ring; see
:meth:`MixedIntegerLinearProgram.get_values`.
EXAMPLES:
Given a complete bipartite graph `K_{n,m}`, the maximum out-degree of an
optimal orientation is `\left\lceil \frac {nm} {n+m}\right\rceil`::
sage: g = graphs.CompleteBipartiteGraph(3,4)
sage: o = g.minimum_outdegree_orientation()
sage: max(o.out_degree()) == integer_ceil((4*3)/(3+4))
True
"""
self._scream_if_not_simple()
if self.is_directed():
raise ValueError("Cannot compute an orientation of a DiGraph. "+\
"Please convert it to a Graph if you really mean it.")
if use_edge_labels:
from sage.rings.real_mpfr import RR
def weight(e):
l = self.edge_label(e)
return l if l in RR else 1
else:
def weight(e):
return 1
from sage.numerical.mip import MixedIntegerLinearProgram
p = MixedIntegerLinearProgram(maximization=False, solver=solver)
degree = p.new_variable(nonnegative=True)
# The orientation of an edge is boolean and indicates whether the edge
# uv goes from u to v ( equal to 0 ) or from v to u ( equal to 1)
orientation = p.new_variable(binary=True)
# Whether an edge adjacent to a vertex u counts positively or
# negatively. To do so, we first fix an arbitrary extremity per edge uv.
ext = {frozenset(e): e[0] for e in self.edge_iterator(labels=False)}
def outgoing(u, e, variable):
if u == ext[frozenset(e)]:
return variable
else:
return 1 - variable
for u in self:
p.add_constraint(p.sum(weight(e) * outgoing(u, e, orientation[frozenset(e)])
for e in self.edge_iterator(vertices=[u], labels=False))
- degree['max'], max=0)
p.set_objective(degree['max'])
p.solve(log=verbose)
orientation = p.get_values(orientation, convert=bool, tolerance=integrality_tolerance)
# All the edges from self are doubled in O
# ( one in each direction )
from sage.graphs.digraph import DiGraph
O = DiGraph(self)
# Builds the list of edges that should be removed
edges = []
for e in self.edge_iterator(labels=None):
if orientation[frozenset(e)]:
edges.append(e[::-1])
else:
edges.append(e)
O.delete_edges(edges)
return O
@doc_index("Connectivity, orientations, trees")
def bounded_outdegree_orientation(self, bound, solver=None, verbose=False,
*, integrality_tolerance=1e-3):
r"""
Computes an orientation of ``self`` such that every vertex `v` has
out-degree less than `b(v)`
INPUT:
- ``bound`` -- Maximum bound on the out-degree. Can be of three
different types :
* An integer `k`. In this case, computes an orientation whose maximum
out-degree is less than `k`.
* A dictionary associating to each vertex its associated maximum
out-degree.
* A function associating to each vertex its associated maximum
out-degree.
- ``solver`` -- string (default: ``None``); specify a Mixed Integer
Linear Programming (MILP) solver to be used. If set to ``None``, the
default one is used. For more information on MILP solvers and which
default solver is used, see the method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``); sets the level of
verbosity. Set to 0 by default, which means quiet.
- ``integrality_tolerance`` -- float; parameter for use with MILP
solvers over an inexact base ring; see
:meth:`MixedIntegerLinearProgram.get_values`.
OUTPUT:
A DiGraph representing the orientation if it exists. A ``ValueError``
exception is raised otherwise.
ALGORITHM:
The problem is solved through a maximum flow :
Given a graph `G`, we create a ``DiGraph`` `D` defined on `E(G)\cup
V(G)\cup \{s,t\}`. We then link `s` to all of `V(G)` (these edges having
a capacity equal to the bound associated to each element of `V(G)`), and
all the elements of `E(G)` to `t` . We then link each `v \in V(G)` to
each of its incident edges in `G`. A maximum integer flow of value
`|E(G)|` corresponds to an admissible orientation of `G`. Otherwise,
none exists.
EXAMPLES:
There is always an orientation of a graph `G` such that a vertex `v` has
out-degree at most `\lceil \frac {d(v)} 2 \rceil`::
sage: g = graphs.RandomGNP(40, .4)
sage: b = lambda v: integer_ceil(g.degree(v)/2)
sage: D = g.bounded_outdegree_orientation(b)
sage: all( D.out_degree(v) <= b(v) for v in g )
True
Chvatal's graph, being 4-regular, can be oriented in such a way that its
maximum out-degree is 2::
sage: g = graphs.ChvatalGraph()
sage: D = g.bounded_outdegree_orientation(2)
sage: max(D.out_degree())
2
For any graph `G`, it is possible to compute an orientation such that
the maximum out-degree is at most the maximum average degree of `G`
divided by 2. Anything less, though, is impossible.
sage: g = graphs.RandomGNP(40, .4)
sage: mad = g.maximum_average_degree()
Hence this is possible ::
sage: d = g.bounded_outdegree_orientation(integer_ceil(mad/2))
While this is not::
sage: try:
....: g.bounded_outdegree_orientation(integer_ceil(mad/2-1))
....: print("Error")
....: except ValueError:
....: pass
TESTS:
As previously for random graphs, but more intensively::
sage: for i in range(30): # long time (up to 6s on sage.math, 2012)
....: g = graphs.RandomGNP(40, .4)
....: b = lambda v: integer_ceil(g.degree(v)/2)
....: D = g.bounded_outdegree_orientation(b)
....: if not (
....: all( D.out_degree(v) <= b(v) for v in g ) or
....: D.size() != g.size()):
....: print("Something wrong happened")
"""
self._scream_if_not_simple()
from sage.graphs.all import DiGraph
n = self.order()
if not n:
return DiGraph()
vertices = list(self)
vertices_id = {y: x for x,y in enumerate(vertices)}
b = {}
# Checking the input type. We make a dictionary out of it
if isinstance(bound, dict):
b = bound
else:
try:
b = dict(zip(vertices,map(bound, vertices)))
except TypeError:
b = dict(zip(vertices, [bound]*n))
d = DiGraph()
# Adding the edges (s,v) and ((u,v),t)
d.add_edges(('s', vertices_id[v], b[v]) for v in vertices)
d.add_edges(((vertices_id[u], vertices_id[v]), 't', 1)
for u,v in self.edges(labels=None) )
# each v is linked to its incident edges
for u,v in self.edge_iterator(labels=None):
u,v = vertices_id[u], vertices_id[v]
d.add_edge(u, (u,v), 1)
d.add_edge(v, (u,v), 1)
# Solving the maximum flow
value, flow = d.flow('s','t', value_only=False, integer=True,
use_edge_labels=True, solver=solver, verbose=verbose,
integrality_tolerance=integrality_tolerance)
if value != self.size():
raise ValueError("No orientation exists for the given bound")
D = DiGraph()
D.add_vertices(vertices)
# The flow graph may not contain all the vertices, if they are
# not part of the flow...
for u in [x for x in range(n) if x in flow]:
for uu,vv in flow.neighbors_out(u):
v = vv if vv != u else uu
D.add_edge(vertices[u], vertices[v])
# I do not like when a method destroys the embedding ;-)
D.set_pos(self.get_pos())
return D
@doc_index("Connectivity, orientations, trees")
def orientations(self, data_structure=None, sparse=None):
r"""
Return an iterator over orientations of ``self``.
An *orientation* of an undirected graph is a directed graph such that
every edge is assigned a direction. Hence there are `2^s` oriented
digraphs for a simple graph with `s` edges.
INPUT:
- ``data_structure`` -- one of ``"sparse"``, ``"static_sparse"``, or
``"dense"``; see the documentation of :class:`Graph` or
:class:`DiGraph`; default is the data structure of ``self``
- ``sparse`` -- boolean (default: ``None``); ``sparse=True`` is an alias
for ``data_structure="sparse"``, and ``sparse=False`` is an alias for
``data_structure="dense"``. By default (``None``), guess the most
suitable data structure.
.. WARNING::
This always considers multiple edges of graphs as distinguishable,
and hence, may have repeated digraphs.
EXAMPLES::
sage: G = Graph([[1,2,3], [(1, 2, 'a'), (1, 3, 'b')]], format='vertices_and_edges')
sage: it = G.orientations()
sage: D = next(it)
sage: D.edges()
[(1, 2, 'a'), (1, 3, 'b')]
sage: D = next(it)
sage: D.edges()
[(1, 2, 'a'), (3, 1, 'b')]
TESTS::
sage: G = Graph()
sage: D = [g for g in G.orientations()]
sage: len(D)
1
sage: D[0]
Digraph on 0 vertices
sage: G = Graph(5)
sage: it = G.orientations()
sage: D = next(it)
sage: D.size()
0
sage: G = Graph([[1,2,'a'], [1,2,'b']], multiedges=True)
sage: len(list(G.orientations()))
4
sage: G = Graph([[1,2], [1,1]], loops=True)
sage: len(list(G.orientations()))
2
sage: G = Graph([[1,2],[2,3]])
sage: next(G.orientations())
Digraph on 3 vertices
sage: G = graphs.PetersenGraph()
sage: next(G.orientations())
An orientation of Petersen graph: Digraph on 10 vertices
An orientation must have the same ground set of vertices as the original
graph (:trac:`24366`)::
sage: G = Graph(1)
sage: next(G.orientations())
Digraph on 1 vertex
"""
if sparse is not None:
if data_structure is not None:
raise ValueError("cannot specify both 'sparse' and 'data_structure'")
data_structure = "sparse" if sparse else "dense"
if data_structure is None:
from sage.graphs.base.dense_graph import DenseGraphBackend
from sage.graphs.base.sparse_graph import SparseGraphBackend
if isinstance(self._backend, DenseGraphBackend):
data_structure = "dense"
elif isinstance(self._backend, SparseGraphBackend):
data_structure = "sparse"
else:
data_structure = "static_sparse"
name = self.name()
if name:
name = 'An orientation of ' + name
if not self.size():
D = DiGraph(data=[self.vertices(), []],
format='vertices_and_edges',
name=name,
pos=self._pos,
multiedges=self.allows_multiple_edges(),
loops=self.allows_loops(),
data_structure=data_structure)
if hasattr(self, '_embedding'):
D._embedding = copy(self._embedding)
yield D
return
E = [[(u,v,label), (v,u,label)] if u != v else [(u,v,label)]
for u,v,label in self.edge_iterator()]
verts = self.vertices()
for edges in itertools.product(*E):
D = DiGraph(data=[verts, edges],
format='vertices_and_edges',
name=name,
pos=self._pos,
multiedges=self.allows_multiple_edges(),
loops=self.allows_loops(),
data_structure=data_structure)
if hasattr(self, '_embedding'):
D._embedding = copy(self._embedding)
yield D
### Coloring
@doc_index("Basic methods")
def bipartite_color(self):
"""
Return a dictionary with vertices as the keys and the color class
as the values.
Fails with an error if the graph is not bipartite.
EXAMPLES::
sage: graphs.CycleGraph(4).bipartite_color()
{0: 1, 1: 0, 2: 1, 3: 0}
sage: graphs.CycleGraph(5).bipartite_color()
Traceback (most recent call last):
...
RuntimeError: Graph is not bipartite.
TESTS::
sage: Graph().bipartite_color()
{}
"""
isit, certificate = self.is_bipartite(certificate=True)
if isit:
return certificate
else:
raise RuntimeError("Graph is not bipartite.")
@doc_index("Basic methods")
def bipartite_sets(self):
r"""
Return `(X,Y)` where `X` and `Y` are the nodes in each bipartite set of
graph `G`.
Fails with an error if graph is not bipartite.
EXAMPLES::
sage: graphs.CycleGraph(4).bipartite_sets()
({0, 2}, {1, 3})
sage: graphs.CycleGraph(5).bipartite_sets()
Traceback (most recent call last):
...
RuntimeError: Graph is not bipartite.
"""
color = self.bipartite_color()
left = set()
right = set()
for u,s in color.items():
if s:
left.add(u)
else:
right.add(u)
return left, right
@doc_index("Coloring")
def chromatic_index(self, solver=None, verbose=0, *, integrality_tolerance=1e-3):
r"""
Return the chromatic index of the graph.
The chromatic index is the minimal number of colors needed to properly
color the edges of the graph.
INPUT:
- ``solver`` -- string (default: ``None``); specify a Mixed Integer
Linear Programming (MILP) solver to be used. If set to ``None``, the
default one is used. For more information on MILP solvers and which
default solver is used, see the method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``); sets the level of
verbosity. Set to 0 by default, which means quiet.
- ``integrality_tolerance`` -- float; parameter for use with MILP
solvers over an inexact base ring; see
:meth:`MixedIntegerLinearProgram.get_values`.
This method is a frontend for method
:meth:`sage.graphs.graph_coloring.edge_coloring` that uses a mixed
integer-linear programming formulation to compute the chromatic index.
.. SEEALSO::
- :wikipedia:`Edge_coloring` for further details on edge coloring
- :meth:`sage.graphs.graph_coloring.edge_coloring`
- :meth:`~Graph.fractional_chromatic_index`
- :meth:`~Graph.chromatic_number`
EXAMPLES:
The clique `K_n` has chromatic index `n` when `n` is odd and `n-1` when
`n` is even::
sage: graphs.CompleteGraph(4).chromatic_index()
3
sage: graphs.CompleteGraph(5).chromatic_index()
5
sage: graphs.CompleteGraph(6).chromatic_index()
5
The path `P_n` with `n \geq 2` has chromatic index 2::
sage: graphs.PathGraph(5).chromatic_index()
2
The windmill graph with parameters `k,n` has chromatic index `(k-1)n`::
sage: k,n = 3,4
sage: G = graphs.WindmillGraph(k,n)
sage: G.chromatic_index() == (k-1)*n
True
TESTS:
Graphs without vertices or edges::
sage: Graph().chromatic_index()
0
sage: Graph(2).chromatic_index()
0
"""
if not self.order() or not self.size():
return 0
from sage.graphs.graph_coloring import edge_coloring
return edge_coloring(self, value_only=True, solver=solver, verbose=verbose,
integrality_tolerance=integrality_tolerance)
@doc_index("Coloring")
def chromatic_number(self, algorithm="DLX", solver=None, verbose=0,
*, integrality_tolerance=1e-3):
r"""
Return the minimal number of colors needed to color the vertices of the
graph.
INPUT:
- ``algorithm`` -- Select an algorithm from the following supported
algorithms:
- If ``algorithm="DLX"`` (default), the chromatic number is computed
using the dancing link algorithm. It is inefficient speedwise to
compute the chromatic number through the dancing link algorithm
because this algorithm computes *all* the possible colorings to
check that one exists.
- If ``algorithm="CP"``, the chromatic number is computed using the
coefficients of the chromatic polynomial. Again, this method is
inefficient in terms of speed and it only useful for small graphs.
- If ``algorithm="MILP"``, the chromatic number is computed using a
mixed integer linear program. The performance of this implementation
is affected by whether optional MILP solvers have been installed
(see the :mod:`MILP module <sage.numerical.mip>`, or Sage's tutorial
on Linear Programming).
- ``solver`` -- string (default: ``None``); specify a Mixed Integer
Linear Programming (MILP) solver to be used. If set to ``None``, the
default one is used. For more information on MILP solvers and which
default solver is used, see the method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``); sets the level of
verbosity. Set to 0 by default, which means quiet.
- ``integrality_tolerance`` -- float; parameter for use with MILP
solvers over an inexact base ring; see
:meth:`MixedIntegerLinearProgram.get_values`.
.. SEEALSO::
For more functions related to graph coloring, see the module
:mod:`sage.graphs.graph_coloring`.
EXAMPLES::
sage: G = Graph({0: [1, 2, 3], 1: [2]})
sage: G.chromatic_number(algorithm="DLX")
3
sage: G.chromatic_number(algorithm="MILP")
3
sage: G.chromatic_number(algorithm="CP")
3
A bipartite graph has (by definition) chromatic number 2::
sage: graphs.RandomBipartite(50,50,0.7).chromatic_number()
2
A complete multipartite graph with k parts has chromatic number `k`::
sage: all(graphs.CompleteMultipartiteGraph([5]*i).chromatic_number() == i for i in range(2,5))
True
The complete graph has the largest chromatic number from all the graphs
of order `n`. Namely its chromatic number is `n`::
sage: all(graphs.CompleteGraph(i).chromatic_number() == i for i in range(10))
True
The Kneser graph with parameters `(n, 2)` for `n > 3` has chromatic
number `n-2`::
sage: all(graphs.KneserGraph(i,2).chromatic_number() == i-2 for i in range(4,6))
True
The Flower Snark graph has chromatic index 4 hence its line graph has
chromatic number 4::
sage: graphs.FlowerSnark().line_graph().chromatic_number()
4
TESTS::
sage: G = Graph()
sage: G.chromatic_number(algorithm="DLX")
0
sage: G.chromatic_number(algorithm="MILP")
0
sage: G.chromatic_number(algorithm="CP")
0
sage: G = Graph({0: [1, 2, 3], 1: [2]})
sage: G.chromatic_number(algorithm="foo")
Traceback (most recent call last):
...
ValueError: The 'algorithm' keyword must be set to either 'DLX', 'MILP' or 'CP'.
"""
self._scream_if_not_simple(allow_multiple_edges=True)
# default built-in algorithm; bad performance
if algorithm == "DLX":
from sage.graphs.graph_coloring import chromatic_number
return chromatic_number(self)
# Algorithm with good performance, but requires an optional
# package: choose any of GLPK or CBC.
elif algorithm == "MILP":
from sage.graphs.graph_coloring import vertex_coloring
return vertex_coloring(self, value_only=True, solver=solver, verbose=verbose,
integrality_tolerance=integrality_tolerance)
# another algorithm with bad performance; only good for small graphs
elif algorithm == "CP":
f = self.chromatic_polynomial()
i = 0
while not f(i):
i += 1
return i
else:
raise ValueError("The 'algorithm' keyword must be set to either 'DLX', 'MILP' or 'CP'.")
@doc_index("Coloring")
def coloring(self, algorithm="DLX", hex_colors=False, solver=None, verbose=0,
*, integrality_tolerance=1e-3):
r"""
Return the first (optimal) proper vertex-coloring found.
INPUT:
- ``algorithm`` -- Select an algorithm from the following supported
algorithms:
- If ``algorithm="DLX"`` (default), the coloring is computed using the
dancing link algorithm.
- If ``algorithm="MILP"``, the coloring is computed using a mixed
integer linear program. The performance of this implementation is
affected by whether optional MILP solvers have been installed (see
the :mod:`MILP module <sage.numerical.mip>`).
- ``hex_colors`` -- boolean (default: ``False``); if ``True``, return a
dictionary which can easily be used for plotting.
- ``solver`` -- string (default: ``None``); specify a Mixed Integer
Linear Programming (MILP) solver to be used. If set to ``None``, the
default one is used. For more information on MILP solvers and which
default solver is used, see the method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``); sets the level of
verbosity. Set to 0 by default, which means quiet.
- ``integrality_tolerance`` -- float; parameter for use with MILP
solvers over an inexact base ring; see
:meth:`MixedIntegerLinearProgram.get_values`.
.. SEEALSO::
For more functions related to graph coloring, see the
module :mod:`sage.graphs.graph_coloring`.
EXAMPLES::
sage: G = Graph("Fooba")
sage: P = G.coloring(algorithm="MILP")
sage: Q = G.coloring(algorithm="DLX")
sage: def are_equal_colorings(A, B):
....: return Set(map(Set, A)) == Set(map(Set, B))
sage: are_equal_colorings(P, [[1, 2, 3], [0, 5, 6], [4]])
True
sage: are_equal_colorings(P, Q)
True
sage: G.plot(partition=P)
Graphics object consisting of 16 graphics primitives
sage: G.coloring(hex_colors=True, algorithm="MILP")
{'#0000ff': [4], '#00ff00': [0, 6, 5], '#ff0000': [2, 1, 3]}
sage: H = G.coloring(hex_colors=True, algorithm="DLX")
sage: H
{'#0000ff': [4], '#00ff00': [1, 2, 3], '#ff0000': [0, 5, 6]}
sage: G.plot(vertex_colors=H)
Graphics object consisting of 16 graphics primitives
.. PLOT::
g = Graph("Fooba")
sphinx_plot(g.plot(partition=g.coloring()))
TESTS::
sage: G.coloring(algorithm="foo")
Traceback (most recent call last):
...
ValueError: The 'algorithm' keyword must be set to either 'DLX' or 'MILP'.
"""
self._scream_if_not_simple(allow_multiple_edges=True)
if algorithm == "MILP":
from sage.graphs.graph_coloring import vertex_coloring
return vertex_coloring(self, hex_colors=hex_colors, solver=solver, verbose=verbose,
integrality_tolerance=integrality_tolerance)
elif algorithm == "DLX":
from sage.graphs.graph_coloring import first_coloring
return first_coloring(self, hex_colors=hex_colors)
else:
raise ValueError("The 'algorithm' keyword must be set to either 'DLX' or 'MILP'.")
@doc_index("Coloring")
def chromatic_symmetric_function(self, R=None):
r"""
Return the chromatic symmetric function of ``self``.
Let `G` be a graph. The chromatic symmetric function `X_G` was described
in [Sta1995]_, specifically Theorem 2.5 states that
.. MATH::
X_G = \sum_{F \subseteq E(G)} (-1)^{|F|} p_{\lambda(F)},
where `\lambda(F)` is the partition of the sizes of the connected
components of the subgraph induced by the edges `F` and `p_{\mu}` is the
powersum symmetric function.
INPUT:
- ``R`` -- (optional) the base ring for the symmetric functions;
this uses `\ZZ` by default
EXAMPLES::
sage: s = SymmetricFunctions(ZZ).s()
sage: G = graphs.CycleGraph(5)
sage: XG = G.chromatic_symmetric_function(); XG
p[1, 1, 1, 1, 1] - 5*p[2, 1, 1, 1] + 5*p[2, 2, 1]
+ 5*p[3, 1, 1] - 5*p[3, 2] - 5*p[4, 1] + 4*p[5]
sage: s(XG)
30*s[1, 1, 1, 1, 1] + 10*s[2, 1, 1, 1] + 10*s[2, 2, 1]
Not all graphs have a positive Schur expansion::
sage: G = graphs.ClawGraph()
sage: XG = G.chromatic_symmetric_function(); XG
p[1, 1, 1, 1] - 3*p[2, 1, 1] + 3*p[3, 1] - p[4]
sage: s(XG)
8*s[1, 1, 1, 1] + 5*s[2, 1, 1] - s[2, 2] + s[3, 1]
We show that given a triangle `\{e_1, e_2, e_3\}`, we have
`X_G = X_{G - e_1} + X_{G - e_2} - X_{G - e_1 - e_2}`::
sage: G = Graph([[1,2],[1,3],[2,3]])
sage: XG = G.chromatic_symmetric_function()
sage: G1 = copy(G)
sage: G1.delete_edge([1,2])
sage: XG1 = G1.chromatic_symmetric_function()
sage: G2 = copy(G)
sage: G2.delete_edge([1,3])
sage: XG2 = G2.chromatic_symmetric_function()
sage: G3 = copy(G1)
sage: G3.delete_edge([1,3])
sage: XG3 = G3.chromatic_symmetric_function()
sage: XG == XG1 + XG2 - XG3
True
"""
from sage.combinat.sf.sf import SymmetricFunctions
from sage.combinat.partition import _Partitions
from sage.misc.misc import powerset
if R is None:
R = ZZ
p = SymmetricFunctions(R).p()
ret = p.zero()
for F in powerset(self.edges()):
la = _Partitions(self.subgraph(edges=F).connected_components_sizes())
ret += (-1)**len(F) * p[la]
return ret
@doc_index("Coloring")
def chromatic_quasisymmetric_function(self, t=None, R=None):
r"""
Return the chromatic quasisymmetric function of ``self``.
Let `G` be a graph whose vertex set is totally ordered. The chromatic
quasisymmetric function `X_G(t)` was first described in [SW2012]_. We
use the equivalent definition given in [BC2018]_:
.. MATH::
X_G(t) = \sum_{\sigma=(\sigma_1,\ldots,\sigma_n)}
t^{\operatorname{asc}(\sigma)}
M_{|\sigma_1|,\ldots,|\sigma_n|},
where we sum over all ordered set partitions of the vertex set of `G`
such that each block `\sigma_i` is an independent (i.e., stable) set of
`G`, and where `\operatorname{asc}(\sigma)` denotes the number of edges
`\{u, v\}` of `G` such that `u < v` and `v` appears in a later part of
`\sigma` than `u`.
INPUT:
- ``t`` -- (optional) the parameter `t`; uses the variable `t` in
`\ZZ[t]` by default
- ``R`` -- (optional) the base ring for the quasisymmetric functions;
uses the parent of `t` by default
EXAMPLES::
sage: G = Graph([[1,2,3], [[1,3], [2,3]]])
sage: G.chromatic_quasisymmetric_function()
(2*t^2+2*t+2)*M[1, 1, 1] + M[1, 2] + t^2*M[2, 1]
sage: G = graphs.PathGraph(4)
sage: XG = G.chromatic_quasisymmetric_function(); XG
(t^3+11*t^2+11*t+1)*M[1, 1, 1, 1] + (3*t^2+3*t)*M[1, 1, 2]
+ (3*t^2+3*t)*M[1, 2, 1] + (3*t^2+3*t)*M[2, 1, 1]
+ (t^2+t)*M[2, 2]
sage: XG.to_symmetric_function()
(t^3+11*t^2+11*t+1)*m[1, 1, 1, 1] + (3*t^2+3*t)*m[2, 1, 1]
+ (t^2+t)*m[2, 2]
sage: G = graphs.CompleteGraph(4)
sage: G.chromatic_quasisymmetric_function()
(t^6+3*t^5+5*t^4+6*t^3+5*t^2+3*t+1)*M[1, 1, 1, 1]
Not all chromatic quasisymmetric functions are symmetric::
sage: G = Graph([[1,2], [1,5], [3,4], [3,5]])
sage: G.chromatic_quasisymmetric_function().is_symmetric()
False
We check that at `t = 1`, we recover the usual chromatic symmetric
function::
sage: p = SymmetricFunctions(QQ).p()
sage: G = graphs.CycleGraph(5)
sage: XG = G.chromatic_quasisymmetric_function(t=1); XG
120*M[1, 1, 1, 1, 1] + 30*M[1, 1, 1, 2] + 30*M[1, 1, 2, 1]
+ 30*M[1, 2, 1, 1] + 10*M[1, 2, 2] + 30*M[2, 1, 1, 1]
+ 10*M[2, 1, 2] + 10*M[2, 2, 1]
sage: p(XG.to_symmetric_function())
p[1, 1, 1, 1, 1] - 5*p[2, 1, 1, 1] + 5*p[2, 2, 1]
+ 5*p[3, 1, 1] - 5*p[3, 2] - 5*p[4, 1] + 4*p[5]
sage: G = graphs.ClawGraph()
sage: XG = G.chromatic_quasisymmetric_function(t=1); XG
24*M[1, 1, 1, 1] + 6*M[1, 1, 2] + 6*M[1, 2, 1] + M[1, 3]
+ 6*M[2, 1, 1] + M[3, 1]
sage: p(XG.to_symmetric_function())
p[1, 1, 1, 1] - 3*p[2, 1, 1] + 3*p[3, 1] - p[4]
"""
from sage.combinat.ncsf_qsym.qsym import QuasiSymmetricFunctions
from sage.combinat.set_partition_ordered import OrderedSetPartitions
if t is None:
t = ZZ['t'].gen()
if R is None:
R = t.parent()
M = QuasiSymmetricFunctions(R).M()
ret = M.zero()
V = self.vertices()
def asc(sigma):
stat = 0
for i, s in enumerate(sigma):
for u in s:
stat += sum(1 for p in sigma[i+1:] for v in p
if v > u and self.has_edge(u, v))
return stat
for sigma in OrderedSetPartitions(V):
if any(not self.is_independent_set(s) for s in sigma):
continue
ret += M.term(sigma.to_composition(), t**asc(sigma))
return ret
@doc_index("Leftovers")
def matching(self, value_only=False, algorithm="Edmonds",
use_edge_labels=False, solver=None, verbose=0,
*, integrality_tolerance=1e-3):
r"""
Return a maximum weighted matching of the graph represented by the list
of its edges.
For more information, see the :wikipedia:`Matching_(graph_theory)`.
Given a graph `G` such that each edge `e` has a weight `w_e`, a maximum
matching is a subset `S` of the edges of `G` of maximum weight such that
no two edges of `S` are incident with each other.
As an optimization problem, it can be expressed as:
.. MATH::
\mbox{Maximize : }&\sum_{e\in G.edges()} w_e b_e\\
\mbox{Such that : }&\forall v \in G,
\sum_{(u,v)\in G.edges()} b_{(u,v)}\leq 1\\
&\forall x\in G, b_x\mbox{ is a binary variable}
INPUT:
- ``value_only`` -- boolean (default: ``False``); when set to ``True``,
only the cardinal (or the weight) of the matching is returned
- ``algorithm`` -- string (default: ``"Edmonds"``)
- ``"Edmonds"`` selects Edmonds' algorithm as implemented in NetworkX
- ``"LP"`` uses a Linear Program formulation of the matching problem
- ``use_edge_labels`` -- boolean (default: ``False``)
- when set to ``True``, computes a weighted matching where each edge
is weighted by its label (if an edge has no label, `1` is assumed)
- when set to ``False``, each edge has weight `1`
- ``solver`` -- string (default: ``None``); specify a Mixed Integer
Linear Programming (MILP) solver to be used. If set to ``None``, the
default one is used. For more information on MILP solvers and which
default solver is used, see the method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``); sets the level of verbosity:
set to 0 by default, which means quiet (only useful when ``algorithm
== "LP"``)
- ``integrality_tolerance`` -- float; parameter for use with MILP
solvers over an inexact base ring; see
:meth:`MixedIntegerLinearProgram.get_values`.
OUTPUT:
- When ``value_only=False`` (default), this method returns the list of
edges of a maximum matching of `G`.
- When ``value_only=True``, this method returns the sum of the
weights (default: ``1``) of the edges of a maximum matching of `G`.
The type of the output may vary according to the type of the edge
labels and the algorithm used.
ALGORITHM:
The problem is solved using Edmond's algorithm implemented in NetworkX,
or using Linear Programming depending on the value of ``algorithm``.
EXAMPLES:
Maximum matching in a Pappus Graph::
sage: g = graphs.PappusGraph()
sage: g.matching(value_only=True)
9
Same test with the Linear Program formulation::
sage: g = graphs.PappusGraph()
sage: g.matching(algorithm="LP", value_only=True)
9
.. PLOT::
g = graphs.PappusGraph()
sphinx_plot(g.plot(edge_colors={"red":g.matching()}))
TESTS:
When ``use_edge_labels`` is set to ``False``, with Edmonds' algorithm
and LP formulation::
sage: g = Graph([(0,1,0), (1,2,999), (2,3,-5)])
sage: sorted(g.matching())
[(0, 1, 0), (2, 3, -5)]
sage: sorted(g.matching(algorithm="LP"))
[(0, 1, 0), (2, 3, -5)]
When ``use_edge_labels`` is set to ``True``, with Edmonds' algorithm and
LP formulation::
sage: g = Graph([(0,1,0), (1,2,999), (2,3,-5)])
sage: g.matching(use_edge_labels=True)
[(1, 2, 999)]
sage: g.matching(algorithm="LP", use_edge_labels=True)
[(1, 2, 999)]
With loops and multiedges::
sage: edge_list = [(0,0,5), (0,1,1), (0,2,2), (0,3,3), (1,2,6)
....: , (1,2,3), (1,3,3), (2,3,3)]
sage: g = Graph(edge_list, loops=True, multiedges=True)
sage: g.matching(use_edge_labels=True)
[(1, 2, 6), (0, 3, 3)]
TESTS:
If ``algorithm`` is set to anything different from ``"Edmonds"`` or
``"LP"``, an exception is raised::
sage: g = graphs.PappusGraph()
sage: g.matching(algorithm="somethingdifferent")
Traceback (most recent call last):
...
ValueError: algorithm must be set to either "Edmonds" or "LP"
"""
from sage.rings.real_mpfr import RR
def weight(x):
if x in RR:
return x
else:
return 1
W = {}
L = {}
for u,v,l in self.edge_iterator():
if u is v:
continue
fuv = frozenset((u, v))
if fuv not in L or ( use_edge_labels and W[fuv] < weight(l) ):
L[fuv] = l
if use_edge_labels:
W[fuv] = weight(l)
if algorithm == "Edmonds":
import networkx
g = networkx.Graph()
if use_edge_labels:
for (u, v),w in W.items():
g.add_edge(u, v, weight=w)
else:
for u, v in L:
g.add_edge(u, v)
d = networkx.max_weight_matching(g)
if value_only:
if use_edge_labels:
return sum(W[frozenset(e)] for e in d)
else:
return Integer(len(d))
else:
return [(u, v, L[frozenset((u, v))]) for u, v in d]
elif algorithm == "LP":
g = self
from sage.numerical.mip import MixedIntegerLinearProgram
# returns the weight of an edge considering it may not be
# weighted ...
p = MixedIntegerLinearProgram(maximization=True, solver=solver)
b = p.new_variable(binary=True)
if use_edge_labels:
p.set_objective(p.sum(w * b[fe] for fe,w in W.items()))
else:
p.set_objective(p.sum(b[fe] for fe in L))
# for any vertex v, there is at most one edge incident to v in
# the maximum matching
for v in g:
p.add_constraint(p.sum(b[frozenset(e)] for e in self.edge_iterator(vertices=[v], labels=False)
if e[0] != e[1]), max=1)
p.solve(log=verbose)
b = p.get_values(b, convert=bool, tolerance=integrality_tolerance)
if value_only:
if use_edge_labels:
return sum(w for fe, w in W.items() if b[fe])
else:
return Integer(sum(1 for fe in L if b[fe]))
else:
return [(u, v, L[frozenset((u, v))]) for u, v in L if b[frozenset((u, v))]]
else:
raise ValueError('algorithm must be set to either "Edmonds" or "LP"')
@doc_index("Algorithmically hard stuff")
def has_homomorphism_to(self, H, core=False, solver=None, verbose=0,
*, integrality_tolerance=1e-3):
r"""
Checks whether there is a homomorphism between two graphs.
A homomorphism from a graph `G` to a graph `H` is a function
`\phi:V(G)\mapsto V(H)` such that for any edge `uv \in E(G)` the pair
`\phi(u)\phi(v)` is an edge of `H`.
Saying that a graph can be `k`-colored is equivalent to saying that it
has a homomorphism to `K_k`, the complete graph on `k` elements.
For more information, see the :wikipedia:`Graph_homomorphism`.
INPUT:
- ``H`` -- the graph to which ``self`` should be sent.
- ``core`` -- boolean (default: ``False``; whether to minimize the size
of the mapping's image (see note below). This is set to ``False`` by
default.
- ``solver`` -- string (default: ``None``); specify a Mixed Integer
Linear Programming (MILP) solver to be used. If set to ``None``, the
default one is used. For more information on MILP solvers and which
default solver is used, see the method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``); sets the level of
verbosity. Set to 0 by default, which means quiet.
- ``integrality_tolerance`` -- float; parameter for use with MILP
solvers over an inexact base ring; see
:meth:`MixedIntegerLinearProgram.get_values`.
.. NOTE::
One can compute the core of a graph (with respect to homomorphism)
with this method ::
sage: g = graphs.CycleGraph(10)
sage: mapping = g.has_homomorphism_to(g, core = True)
sage: print("The size of the core is {}".format(len(set(mapping.values()))))
The size of the core is 2
OUTPUT:
This method returns ``False`` when the homomorphism does not exist, and
returns the homomorphism otherwise as a dictionary associating a vertex
of `H` to a vertex of `G`.
EXAMPLES:
Is Petersen's graph 3-colorable::
sage: P = graphs.PetersenGraph()
sage: P.has_homomorphism_to(graphs.CompleteGraph(3)) is not False
True
An odd cycle admits a homomorphism to a smaller odd cycle, but not to an
even cycle::
sage: g = graphs.CycleGraph(9)
sage: g.has_homomorphism_to(graphs.CycleGraph(5)) is not False
True
sage: g.has_homomorphism_to(graphs.CycleGraph(7)) is not False
True
sage: g.has_homomorphism_to(graphs.CycleGraph(4)) is not False
False
"""
self._scream_if_not_simple()
from sage.numerical.mip import MixedIntegerLinearProgram, MIPSolverException
p = MixedIntegerLinearProgram(solver=solver, maximization=False)
b = p.new_variable(binary=True)
# Each vertex has an image
for ug in self:
p.add_constraint(p.sum(b[ug,uh] for uh in H) == 1)
nonedges = H.complement().edges(labels=False)
for ug,vg in self.edges(labels=False):
# Two adjacent vertices cannot be mapped to the same element
for uh in H:
p.add_constraint(b[ug,uh] + b[vg,uh] <= 1)
# Two adjacent vertices cannot be mapped to no adjacent vertices
for uh,vh in nonedges:
p.add_constraint(b[ug,uh] + b[vg,vh] <= 1)
p.add_constraint(b[ug,vh] + b[vg,uh] <= 1)
# Minimize the mapping's size
if core:
# the value of m is one if the corresponding vertex of h is used.
m = p.new_variable(nonnegative=True)
for uh in H:
for ug in self:
p.add_constraint(b[ug,uh] <= m[uh])
p.set_objective(p.sum(m[vh] for vh in H))
try:
p.solve(log=verbose)
except MIPSolverException:
return False
b = p.get_values(b, convert=bool, tolerance=integrality_tolerance)
mapping = dict(x[0] for x in b.items() if x[1])
return mapping
@doc_index("Clique-related methods")
def fractional_clique_number(self, solver='PPL', verbose=0,
check_components=True, check_bipartite=True):
r"""
Return the fractional clique number of the graph.
A fractional clique is a nonnegative weight function on the vertices of
a graph such that the sum of the weights over any independent set is at
most 1. The fractional clique number is the largest total weight of a
fractional clique, which is equal to the fractional chromatic number by
LP-duality.
ALGORITHM:
The fractional clique number is computed via the Linear Program for
fractional chromatic number, see :meth:`fractional_chromatic_number
<sage.graphs.graph_coloring.fractional_chromatic_number>`
INPUT:
- ``solver`` -- (default: ``"PPL"``); specify a Linear Program (LP)
solver to be used. If set to ``None``, the default one is used. For
more information on LP solvers and which default solver is used, see
the method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
.. NOTE::
The default solver used here is ``"PPL"`` which provides exact
results, i.e. a rational number, although this may be slower that
using other solvers.
- ``verbose`` -- integer (default: `0`); sets the level of verbosity of
the LP solver
- ``check_components`` -- boolean (default: ``True``); whether the
method is called on each biconnected component of `G`
- ``check_bipartite`` -- boolean (default: ``True``); whether the graph
is checked for bipartiteness. If the graph is bipartite then we can
avoid creating and solving the LP.
EXAMPLES:
The fractional clique number of a `C_7` is `7/3`::
sage: g = graphs.CycleGraph(7)
sage: g.fractional_clique_number()
7/3
"""
return self.fractional_chromatic_number(solver=solver, verbose=verbose,
check_components=check_components,
check_bipartite=check_bipartite)
@doc_index("Leftovers")
def maximum_average_degree(self, value_only=True, solver=None, verbose=0):
r"""
Return the Maximum Average Degree (MAD) of the current graph.
The Maximum Average Degree (MAD) of a graph is defined as the average
degree of its densest subgraph. More formally, ``Mad(G) =
\max_{H\subseteq G} Ad(H)``, where `Ad(G)` denotes the average degree of
`G`.
This can be computed in polynomial time.
INPUT:
- ``value_only`` -- boolean (default: ``True``);
- If ``value_only=True``, only the numerical value of the `MAD` is
returned.
- Else, the subgraph of `G` realizing the `MAD` is returned.
- ``solver`` -- (default: ``None``); specify a Linear Program (LP)
solver to be used. If set to ``None``, the default one is used. For
more information on LP solvers and which default solver is used, see
the method
:meth:`solve <sage.numerical.mip.MixedIntegerLinearProgram.solve>`
of the class
:class:`MixedIntegerLinearProgram <sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``); sets the level of
verbosity. Set to 0 by default, which means quiet.
EXAMPLES:
In any graph, the `Mad` is always larger than the average degree::
sage: g = graphs.RandomGNP(20,.3)
sage: mad_g = g.maximum_average_degree()
sage: g.average_degree() <= mad_g
True
Unlike the average degree, the `Mad` of the disjoint union of two graphs
is the maximum of the `Mad` of each graphs::
sage: h = graphs.RandomGNP(20,.3)
sage: mad_h = h.maximum_average_degree()
sage: (g+h).maximum_average_degree() == max(mad_g, mad_h)
True
The subgraph of a regular graph realizing the maximum average degree is
always the whole graph ::
sage: g = graphs.CompleteGraph(5)
sage: mad_g = g.maximum_average_degree(value_only=False)
sage: g.is_isomorphic(mad_g)
True
This also works for complete bipartite graphs ::
sage: g = graphs.CompleteBipartiteGraph(3,4)
sage: mad_g = g.maximum_average_degree(value_only=False)
sage: g.is_isomorphic(mad_g)
True
"""
self._scream_if_not_simple()
g = self
from sage.numerical.mip import MixedIntegerLinearProgram
p = MixedIntegerLinearProgram(maximization=True, solver=solver)
d = p.new_variable(nonnegative=True)
one = p.new_variable(nonnegative=True)
for u,v in g.edge_iterator(labels=False):
fuv = frozenset((u, v))
p.add_constraint(one[fuv] - 2 * d[u], max=0)
p.add_constraint(one[fuv] - 2 * d[v], max=0)
p.add_constraint(p.sum(d[v] for v in g), max=1)
p.set_objective(p.sum(one[frozenset(uv)]
for uv in g.edge_iterator(labels=False)))
p.solve(log=verbose)
# Paying attention to numerical error :
# The zero values could be something like 0.000000000001
# so I can not write l > 0
# And the non-zero, though they should be equal to
# 1/(order of the optimal subgraph) may be a bit lower
# setting the minimum to 1/(10 * size of the whole graph )
# should be safe :-)
m = 1/(10 *Integer(g.order()))
d_val = p.get_values(d)
g_mad = g.subgraph(v for v,l in d_val.items() if l > m)
if value_only:
return g_mad.average_degree()
else:
return g_mad
@doc_index("Algorithmically hard stuff")
def independent_set_of_representatives(self, family, solver=None, verbose=0,
*, integrality_tolerance=1e-3):
r"""
Return an independent set of representatives.
Given a graph `G` and a family `F=\{F_i:i\in [1,...,k]\}` of subsets of
``g.vertices()``, an Independent Set of Representatives (ISR) is an
assignation of a vertex `v_i\in F_i` to each set `F_i` such that `v_i !=
v_j` if `i<j` (they are representatives) and the set `\cup_{i}v_i` is an
independent set in `G`.
It generalizes, for example, graph coloring and graph list coloring.
(See [ABZ2007]_ for more information.)
INPUT:
- ``family`` -- A list of lists defining the family `F` (actually, a
Family of subsets of ``G.vertices()``).
- ``solver`` -- string (default: ``None``); specify a Mixed Integer
Linear Programming (MILP) solver to be used. If set to ``None``, the
default one is used. For more information on MILP solvers and which
default solver is used, see the method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``); sets the level of
verbosity. Set to 0 by default, which means quiet.
- ``integrality_tolerance`` -- float; parameter for use with MILP
solvers over an inexact base ring; see
:meth:`MixedIntegerLinearProgram.get_values`.
OUTPUT:
- A list whose `i^{\mbox{th}}` element is the representative of the
`i^{\mbox{th}}` element of the ``family`` list. If there is no ISR,
``None`` is returned.
EXAMPLES:
For a bipartite graph missing one edge, the solution is as expected::
sage: g = graphs.CompleteBipartiteGraph(3,3)
sage: g.delete_edge(1,4)
sage: g.independent_set_of_representatives([[0,1,2],[3,4,5]])
[1, 4]
The Petersen Graph is 3-colorable, which can be expressed as an
independent set of representatives problem : take 3 disjoint copies of
the Petersen Graph, each one representing one color. Then take as a
partition of the set of vertices the family defined by the three copies
of each vertex. The ISR of such a family defines a 3-coloring::
sage: g = 3 * graphs.PetersenGraph()
sage: n = g.order() / 3
sage: f = [[i, i + n, i + 2*n] for i in range(n)]
sage: isr = g.independent_set_of_representatives(f)
sage: c = [integer_floor(i / n) for i in isr]
sage: color_classes = [[], [], []]
sage: for v, i in enumerate(c):
....: color_classes[i].append(v)
sage: for classs in color_classes:
....: g.subgraph(classs).size() == 0
True
True
True
"""
from sage.numerical.mip import MixedIntegerLinearProgram
p = MixedIntegerLinearProgram(solver=solver)
# Boolean variable indicating whether the vertex is the representative
# of some set
vertex_taken = p.new_variable(binary=True)
# Boolean variable in two dimension whose first element is a vertex and
# whose second element is one of the sets given as arguments.
# When true, indicated that the vertex is the representative of the
# corresponding set
classss = p.new_variable(binary=True)
# Associates to the vertices the classes to which they belong
lists = {v: [] for v in self}
for i,f in enumerate(family):
for v in f:
lists[v].append(i)
# a classss has exactly one representative
p.add_constraint(p.sum(classss[v,i] for v in f), max=1, min=1)
# A vertex represents at most one classss (vertex_taken is binary), and
# vertex_taken[v]==1 if v is the representative of some classss
for v in self:
p.add_constraint(p.sum(classss[v,i] for i in lists[v]) - vertex_taken[v], max=0)
# Two adjacent vertices can not both be representatives of a set
for u,v in self.edge_iterator(labels=None):
p.add_constraint(vertex_taken[u] + vertex_taken[v], max=1)
p.set_objective(None)
try:
p.solve(log=verbose)
except Exception:
return None
classss = p.get_values(classss, convert=bool, tolerance=integrality_tolerance)
repr = []
for i,f in enumerate(family):
for v in f:
if classss[v,i]:
repr.append(v)
break
return repr
@doc_index("Algorithmically hard stuff")
def minor(self, H, solver=None, verbose=0, *, integrality_tolerance=1e-3):
r"""
Return the vertices of a minor isomorphic to `H` in the current graph.
We say that a graph `G` has a `H`-minor (or that it has a graph
isomorphic to `H` as a minor), if for all `h\in H`, there exist disjoint
sets `S_h \subseteq V(G)` such that once the vertices of each `S_h` have
been merged to create a new graph `G'`, this new graph contains `H` as a
subgraph.
For more information, see the :wikipedia:`Minor_(graph_theory)`.
INPUT:
- ``H`` -- The minor to find for in the current graph.
- ``solver`` -- string (default: ``None``); specify a Mixed Integer
Linear Programming (MILP) solver to be used. If set to ``None``, the
default one is used. For more information on MILP solvers and which
default solver is used, see the method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``); sets the level of
verbosity. Set to 0 by default, which means quiet.
- ``integrality_tolerance`` -- float; parameter for use with MILP
solvers over an inexact base ring; see
:meth:`MixedIntegerLinearProgram.get_values`.
OUTPUT:
A dictionary associating to each vertex of `H` the set of vertices in
the current graph representing it.
ALGORITHM:
Mixed Integer Linear Programming
COMPLEXITY:
Theoretically, when `H` is fixed, testing for the existence of a
`H`-minor is polynomial. The known algorithms are highly exponential in
`H`, though.
.. NOTE::
This function can be expected to be *very* slow, especially where
the minor does not exist.
EXAMPLES:
Trying to find a minor isomorphic to `K_4` in the `4\times 4` grid::
sage: g = graphs.GridGraph([4,4])
sage: h = graphs.CompleteGraph(4)
sage: L = g.minor(h)
sage: gg = g.subgraph(flatten(L.values(), max_level = 1))
sage: _ = [gg.merge_vertices(l) for l in L.values() if len(l)>1]
sage: gg.is_isomorphic(h)
True
We can also try to prove this way that the Petersen graph is not planar,
as it has a `K_5` minor::
sage: g = graphs.PetersenGraph()
sage: K5_minor = g.minor(graphs.CompleteGraph(5)) # long time
And even a `K_{3,3}` minor::
sage: K33_minor = g.minor(graphs.CompleteBipartiteGraph(3,3)) # long time
(It is much faster to use the linear-time test of planarity in this
situation, though.)
As there is no cycle in a tree, looking for a `K_3` minor is useless.
This function will raise an exception in this case::
sage: g = graphs.RandomGNP(20,.5)
sage: g = g.subgraph(edges = g.min_spanning_tree())
sage: g.is_tree()
True
sage: L = g.minor(graphs.CompleteGraph(3))
Traceback (most recent call last):
...
ValueError: This graph has no minor isomorphic to H !
"""
self._scream_if_not_simple()
H._scream_if_not_simple()
from sage.numerical.mip import MixedIntegerLinearProgram, MIPSolverException
p = MixedIntegerLinearProgram(solver=solver)
# We use frozenset((u, v)) to avoid confusion between (u, v) and (v, u)
# rs = Representative set of a vertex
# for h in H, v in G is such that rs[h,v] == 1 if and only if v
# is a representative of h in self
rs = p.new_variable(binary=True)
for v in self:
p.add_constraint(p.sum(rs[h,v] for h in H), max=1)
# We ensure that the set of representatives of a
# vertex h contains a tree, and thus is connected
# edges represents the edges of the tree
edges = p.new_variable(binary=True)
# there can be a edge for h between two vertices
# only if those vertices represent h
for u,v in self.edge_iterator(labels=None):
fuv = frozenset((u, v))
for h in H:
p.add_constraint(edges[h,fuv] - rs[h,u], max=0)
p.add_constraint(edges[h,fuv] - rs[h,v], max=0)
# The number of edges of the tree in h is exactly the cardinal
# of its representative set minus 1
for h in H:
p.add_constraint( p.sum(edges[h,frozenset(e)] for e in self.edge_iterator(labels=None))
- p.sum(rs[h,v] for v in self), min=-1, max=-1)
# a tree has no cycle
epsilon = 1/(5*Integer(self.order()))
r_edges = p.new_variable(nonnegative=True)
for h in H:
for u,v in self.edge_iterator(labels=None):
p.add_constraint(r_edges[h,(u,v)] + r_edges[h,(v,u)] - edges[h,frozenset((u,v))], min=0)
for v in self:
p.add_constraint(p.sum(r_edges[h,(u,v)] for u in self.neighbor_iterator(v)), max=1-epsilon)
# Once the representative sets are described, we must ensure
# there are arcs corresponding to those of H between them
h_edges = p.new_variable(nonnegative=True)
for h1, h2 in H.edge_iterator(labels=None):
for v1, v2 in self.edge_iterator(labels=None):
fv1v2 = frozenset((v1, v2))
p.add_constraint(h_edges[(h1,h2),fv1v2] - rs[h2,v2], max=0)
p.add_constraint(h_edges[(h1,h2),fv1v2] - rs[h1,v1], max=0)
p.add_constraint(h_edges[(h2,h1),fv1v2] - rs[h1,v2], max=0)
p.add_constraint(h_edges[(h2,h1),fv1v2] - rs[h2,v1], max=0)
p.add_constraint(p.sum(h_edges[(h1,h2),frozenset(e)] + h_edges[(h2,h1),frozenset(e)]
for e in self.edge_iterator(labels=None)), min=1)
p.set_objective(None)
try:
p.solve(log=verbose)
except MIPSolverException:
raise ValueError("This graph has no minor isomorphic to H !")
rs = p.get_values(rs, convert=bool, tolerance=integrality_tolerance)
rs_dict = {}
for h in H:
rs_dict[h] = [v for v in self if rs[h,v]]
return rs_dict
### Convexity
@doc_index("Algorithmically hard stuff")
def convexity_properties(self):
r"""
Return a ``ConvexityProperties`` object corresponding to ``self``.
This object contains the methods related to convexity in graphs (convex
hull, hull number) and caches useful information so that it becomes
comparatively cheaper to compute the convex hull of many different sets
of the same graph.
.. SEEALSO::
In order to know what can be done through this object, please refer
to module :mod:`sage.graphs.convexity_properties`.
.. NOTE::
If you want to compute many convex hulls, keep this object in memory
! When it is created, it builds a table of useful information to
compute convex hulls. As a result ::
sage: g = graphs.PetersenGraph()
sage: g.convexity_properties().hull([1, 3])
[1, 2, 3]
sage: g.convexity_properties().hull([3, 7])
[2, 3, 7]
Is a terrible waste of computations, while ::
sage: g = graphs.PetersenGraph()
sage: CP = g.convexity_properties()
sage: CP.hull([1, 3])
[1, 2, 3]
sage: CP.hull([3, 7])
[2, 3, 7]
Makes perfect sense.
"""
from sage.graphs.convexity_properties import ConvexityProperties
return ConvexityProperties(self)
# Centrality
@doc_index("Distances")
def centrality_degree(self, v=None):
r"""
Return the degree centrality of a vertex.
The degree centrality of a vertex `v` is its degree, divided by
`|V(G)|-1`. For more information, see the :wikipedia:`Centrality`.
INPUT:
- ``v`` -- a vertex (default: ``None``); set to ``None`` (default) to
get a dictionary associating each vertex with its centrality degree.
.. SEEALSO::
- :meth:`~sage.graphs.generic_graph.GenericGraph.centrality_closeness`
- :meth:`~sage.graphs.generic_graph.GenericGraph.centrality_betweenness`
EXAMPLES::
sage: (graphs.ChvatalGraph()).centrality_degree()
{0: 4/11, 1: 4/11, 2: 4/11, 3: 4/11, 4: 4/11, 5: 4/11,
6: 4/11, 7: 4/11, 8: 4/11, 9: 4/11, 10: 4/11, 11: 4/11}
sage: D = graphs.DiamondGraph()
sage: D.centrality_degree()
{0: 2/3, 1: 1, 2: 1, 3: 2/3}
sage: D.centrality_degree(v=1)
1
TESTS::
sage: Graph(1).centrality_degree()
Traceback (most recent call last):
...
ValueError: the centrality degree is not defined on graphs with only one vertex
"""
from sage.rings.integer import Integer
n_minus_one = Integer(self.order() - 1)
if n_minus_one == 0:
raise ValueError("the centrality degree is not defined "
"on graphs with only one vertex")
if v is None:
return {v: self.degree(v)/n_minus_one for v in self}
else:
return self.degree(v)/n_minus_one
### Distances
@doc_index("Distances")
def eccentricity(self, v=None, by_weight=False, algorithm=None,
weight_function=None, check_weight=True, dist_dict=None,
with_labels=False):
"""
Return the eccentricity of vertex (or vertices) ``v``.
The eccentricity of a vertex is the maximum distance to any other
vertex.
For more information and examples on how to use input variables, see
:meth:`~GenericGraph.shortest_path_all_pairs`,
:meth:`~GenericGraph.shortest_path_lengths` and
:meth:`~GenericGraph.shortest_paths`
INPUT:
- ``v`` - either a single vertex or a list of vertices. If it is not
specified, then it is taken to be all vertices.
- ``by_weight`` -- boolean (default: ``False``); if ``True``, edge
weights are taken into account; if False, all edges have weight 1
- ``algorithm`` -- string (default: ``None``); one of the following
algorithms:
- ``'BFS'`` - the computation is done through a BFS centered on each
vertex successively. Works only if ``by_weight==False``.
- ``'DHV'`` - the computation is done using the algorithm proposed in
[Dragan2018]_. Works only if ``self`` has non-negative edge weights
and ``v is None`` or ``v`` should contain all vertices of ``self``.
For more information see method
:func:`sage.graphs.distances_all_pairs.eccentricity` and
:func:`sage.graphs.base.boost_graph.eccentricity_DHV`.
- ``'Floyd-Warshall-Cython'`` - a Cython implementation of the
Floyd-Warshall algorithm. Works only if ``by_weight==False`` and
``v is None`` or ``v`` should contain all vertices of ``self``.
- ``'Floyd-Warshall-Python'`` - a Python implementation of the
Floyd-Warshall algorithm. Works also with weighted graphs, even with
negative weights (but no negative cycle is allowed). However, ``v``
must be ``None`` or ``v`` should contain all vertices of ``self``.
- ``'Dijkstra_NetworkX'`` - the Dijkstra algorithm, implemented in
NetworkX. It works with weighted graphs, but no negative weight is
allowed.
- ``'Dijkstra_Boost'`` - the Dijkstra algorithm, implemented in Boost
(works only with positive weights).
- ``'Johnson_Boost'`` - the Johnson algorithm, implemented in
Boost (works also with negative weights, if there is no negative
cycle). Works only if ``v is None`` or ``v`` should contain all
vertices of ``self``.
- ``'From_Dictionary'`` - uses the (already computed) distances, that
are provided by input variable ``dist_dict``.
- ``None`` (default): Sage chooses the best algorithm:
``'From_Dictionary'`` if ``dist_dict`` is not None, ``'BFS'`` for
unweighted graphs, ``'Dijkstra_Boost'`` if all weights are
positive, ``'Johnson_Boost'`` otherwise.
- ``weight_function`` -- function (default: ``None``); a function that
takes as input an edge ``(u, v, l)`` and outputs its weight. If not
``None``, ``by_weight`` is automatically set to ``True``. If ``None``
and ``by_weight`` is ``True``, we use the edge label ``l`` as a
weight, if ``l`` is not ``None``, else ``1`` as a weight.
- ``check_weight`` -- boolean (default: ``True``); if ``True``, we check
that the ``weight_function`` outputs a number for each edge
- ``dist_dict`` -- a dictionary (default: ``None``); a dict of dicts of
distances (used only if ``algorithm=='From_Dictionary'``)
- ``with_labels`` -- boolean (default: ``False``); whether to return a
list or a dictionary keyed by vertices.
EXAMPLES::
sage: G = graphs.KrackhardtKiteGraph()
sage: G.eccentricity()
[4, 4, 4, 4, 4, 3, 3, 2, 3, 4]
sage: G.vertices()
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
sage: G.eccentricity(7)
2
sage: G.eccentricity([7,8,9])
[2, 3, 4]
sage: G.eccentricity([7,8,9], with_labels=True) == {8: 3, 9: 4, 7: 2}
True
sage: G = Graph( { 0 : [], 1 : [], 2 : [1] } )
sage: G.eccentricity()
[+Infinity, +Infinity, +Infinity]
sage: G = Graph({0:[]})
sage: G.eccentricity(with_labels=True)
{0: 0}
sage: G = Graph({0:[], 1:[]})
sage: G.eccentricity(with_labels=True)
{0: +Infinity, 1: +Infinity}
sage: G = Graph([(0,1,1), (1,2,1), (0,2,3)])
sage: G.eccentricity(algorithm = 'BFS')
[1, 1, 1]
sage: G.eccentricity(algorithm = 'Floyd-Warshall-Cython')
[1, 1, 1]
sage: G.eccentricity(by_weight = True, algorithm = 'Dijkstra_NetworkX')
[2, 1, 2]
sage: G.eccentricity(by_weight = True, algorithm = 'Dijkstra_Boost')
[2, 1, 2]
sage: G.eccentricity(by_weight = True, algorithm = 'Johnson_Boost')
[2, 1, 2]
sage: G.eccentricity(by_weight = True, algorithm = 'Floyd-Warshall-Python')
[2, 1, 2]
sage: G.eccentricity(dist_dict = G.shortest_path_all_pairs(by_weight = True)[0])
[2, 1, 2]
sage: G.eccentricity(by_weight = False, algorithm = 'DHV')
[1, 1, 1]
sage: G.eccentricity(by_weight = True, algorithm = 'DHV')
[2.0, 1.0, 2.0]
TESTS:
A non-implemented algorithm::
sage: G.eccentricity(algorithm = 'boh')
Traceback (most recent call last):
...
ValueError: unknown algorithm "boh"
An algorithm that does not work with edge weights::
sage: G.eccentricity(by_weight = True, algorithm = 'BFS')
Traceback (most recent call last):
...
ValueError: algorithm 'BFS' does not work with weights
sage: G.eccentricity(by_weight = True, algorithm = 'Floyd-Warshall-Cython')
Traceback (most recent call last):
...
ValueError: algorithm 'Floyd-Warshall-Cython' does not work with weights
An algorithm that computes the all-pair-shortest-paths when not all
vertices are needed::
sage: G.eccentricity(0, algorithm = 'Floyd-Warshall-Cython')
Traceback (most recent call last):
...
ValueError: algorithm 'Floyd-Warshall-Cython' works only if all eccentricities are needed
sage: G.eccentricity(0, algorithm = 'Floyd-Warshall-Python')
Traceback (most recent call last):
...
ValueError: algorithm 'Floyd-Warshall-Python' works only if all eccentricities are needed
sage: G.eccentricity(0, algorithm = 'Johnson_Boost')
Traceback (most recent call last):
...
ValueError: algorithm 'Johnson_Boost' works only if all eccentricities are needed
sage: G.eccentricity(0, algorithm = 'DHV')
Traceback (most recent call last):
...
ValueError: algorithm 'DHV' works only if all eccentricities are needed
"""
by_weight, weight_function = self._get_weight_function(by_weight=by_weight,
weight_function=weight_function,
check_weight=check_weight)
if algorithm is None:
if dist_dict is not None:
algorithm = 'From_Dictionary'
elif not by_weight:
algorithm = 'BFS'
elif any(float(weight_function(e)) < 0 for e in self.edge_iterator()):
algorithm = 'Johnson_Boost'
if algorithm is None:
algorithm = 'Dijkstra_Boost'
if algorithm in ['BFS', 'Floyd-Warshall-Cython']:
if by_weight:
raise ValueError("algorithm '{}' does not work with weights".format(algorithm))
# We don't want the default weight function
weight_function = None
if v is not None:
if not isinstance(v, list):
v = [v]
v_set = set(v)
if v is None or all(u in v_set for u in self):
if v is None:
v = list(self)
# If we want to use BFS, we use the Cython routine
if algorithm == 'BFS':
from sage.graphs.distances_all_pairs import eccentricity
algo = 'bounds'
if with_labels:
return dict(zip(v, eccentricity(self, algorithm=algo, vertex_list=v)))
else:
return eccentricity(self, algorithm=algo,vertex_list=v)
if algorithm == 'DHV':
if by_weight:
from sage.graphs.base.boost_graph import eccentricity_DHV
if with_labels:
return dict(zip(v, eccentricity_DHV(self, vertex_list=v,
weight_function=weight_function,
check_weight=check_weight)))
else:
return eccentricity_DHV(self, vertex_list=v,
weight_function=weight_function,
check_weight=check_weight)
else:
from sage.graphs.distances_all_pairs import eccentricity
if with_labels:
return dict(zip(v, eccentricity(self, algorithm=algorithm,
vertex_list=v)))
else:
return eccentricity(self, algorithm=algorithm, vertex_list=v)
if algorithm in ['Floyd-Warshall-Python', 'Floyd-Warshall-Cython', 'Johnson_Boost']:
dist_dict = self.shortest_path_all_pairs(by_weight, algorithm,
weight_function,
check_weight)[0]
algorithm = 'From_Dictionary'
elif algorithm in ['Floyd-Warshall-Python', 'Floyd-Warshall-Cython', 'Johnson_Boost','DHV']:
raise ValueError("algorithm '" + algorithm + "' works only if all" +
" eccentricities are needed")
ecc = {}
from sage.rings.infinity import Infinity
for u in v:
if algorithm == 'From_Dictionary':
length = dist_dict[u]
else:
# If algorithm is wrong, the error is raised by the
# shortest_path_lengths function
length = self.shortest_path_lengths(u, by_weight=by_weight,
algorithm=algorithm,
weight_function=weight_function,
check_weight=check_weight)
if len(length) != self.num_verts():
ecc[u] = Infinity
else:
ecc[u] = max(length.values())
if with_labels:
return ecc
else:
if len(ecc) == 1:
# return single value
v, = ecc.values()
return v
return [ecc[u] for u in v]
@doc_index("Distances")
def radius(self, by_weight=False, algorithm='DHV', weight_function=None,
check_weight=True):
r"""
Return the radius of the graph.
The radius is defined to be the minimum eccentricity of any vertex,
where the eccentricity is the maximum distance to any other
vertex. For more information and examples on how to use input variables,
see :meth:`~GenericGraph.shortest_paths` and
:meth:`~Graph.eccentricity`
INPUT:
- ``by_weight`` -- boolean (default: ``False``); if ``True``, edge
weights are taken into account; if False, all edges have weight 1
- ``algorithm`` -- string (default: ``'DHV'``).
- ``'DHV'`` - Radius computation is done using the algorithm proposed
in [Dragan2018]_. Works for graph with non-negative edge weights.
For more information see method
:func:`sage.graphs.distances_all_pairs.radius_DHV` and
:func:`sage.graphs.base.boost_graph.radius_DHV`.
- see method :meth:`eccentricity` for the list of remaining algorithms
- ``weight_function`` -- function (default: ``None``); a function that
takes as input an edge ``(u, v, l)`` and outputs its weight. If not
``None``, ``by_weight`` is automatically set to ``True``. If ``None``
and ``by_weight`` is ``True``, we use the edge label ``l`` as a
weight, if ``l`` is not ``None``, else ``1`` as a weight.
- ``check_weight`` -- boolean (default: ``True``); if ``True``, we check
that the ``weight_function`` outputs a number for each edge
EXAMPLES:
The more symmetric a graph is, the smaller (diameter - radius) is::
sage: G = graphs.BarbellGraph(9, 3)
sage: G.radius()
3
sage: G.diameter()
6
::
sage: G = graphs.OctahedralGraph()
sage: G.radius()
2
sage: G.diameter()
2
TESTS::
sage: g = Graph()
sage: g.radius()
Traceback (most recent call last):
...
ValueError: radius is not defined for the empty graph
"""
if not self.order():
raise ValueError("radius is not defined for the empty graph")
if not algorithm:
algorithm = 'DHV'
if algorithm == 'DHV':
by_weight, weight_function = self._get_weight_function(by_weight=by_weight,
weight_function=weight_function,
check_weight=check_weight)
if by_weight:
from sage.graphs.base.boost_graph import radius_DHV
return radius_DHV(self, weight_function=weight_function,
check_weight=False)
else:
from sage.graphs.distances_all_pairs import radius_DHV
return radius_DHV(self)
return min(self.eccentricity(v=None, by_weight=by_weight,
weight_function=weight_function,
check_weight=check_weight,
algorithm=algorithm))
@doc_index("Distances")
def diameter(self, by_weight=False, algorithm=None, weight_function=None,
check_weight=True):
r"""
Return the diameter of the graph.
The diameter is defined to be the maximum distance between two vertices.
It is infinite if the graph is not connected.
For more information and examples on how to use input variables, see
:meth:`~GenericGraph.shortest_paths` and
:meth:`~Graph.eccentricity`
INPUT:
- ``by_weight`` -- boolean (default: ``False``); if ``True``, edge
weights are taken into account; if False, all edges have weight 1
- ``algorithm`` -- string (default: ``None``); one of the following
algorithms:
- ``'BFS'``: the computation is done through a BFS centered on each
vertex successively. Works only if ``by_weight==False``.
- ``'Floyd-Warshall-Cython'``: a Cython implementation of the
Floyd-Warshall algorithm. Works only if ``by_weight==False`` and ``v
is None``.
- ``'Floyd-Warshall-Python'``: a Python implementation of the
Floyd-Warshall algorithm. Works also with weighted graphs, even with
negative weights (but no negative cycle is allowed). However, ``v``
must be ``None``.
- ``'Dijkstra_NetworkX'``: the Dijkstra algorithm, implemented in
NetworkX. It works with weighted graphs, but no negative weight is
allowed.
- ``'DHV'`` - diameter computation is done using the algorithm
proposed in [Dragan2018]_. Works only for non-negative edge weights.
For more information see method
:func:`sage.graphs.distances_all_pairs.diameter_DHV` and
:func:`sage.graphs.base.boost_graph.diameter_DHV`.
- ``'standard'``, ``'2sweep'``, ``'multi-sweep'``, ``'iFUB'``:
these algorithms are implemented in
:func:`sage.graphs.distances_all_pairs.diameter`
They work only if ``by_weight==False``. See the function
documentation for more information.
- ``'Dijkstra_Boost'``: the Dijkstra algorithm, implemented in Boost
(works only with positive weights).
- ``'Johnson_Boost'``: the Johnson algorithm, implemented in
Boost (works also with negative weights, if there is no negative
cycle).
- ``None`` (default): Sage chooses the best algorithm: ``'iFUB'`` for
unweighted graphs, ``'Dijkstra_Boost'`` if all weights are positive,
``'Johnson_Boost'`` otherwise.
- ``weight_function`` -- function (default: ``None``); a function that
takes as input an edge ``(u, v, l)`` and outputs its weight. If not
``None``, ``by_weight`` is automatically set to ``True``. If ``None``
and ``by_weight`` is ``True``, we use the edge label ``l`` as a
weight, if ``l`` is not ``None``, else ``1`` as a weight.
- ``check_weight`` -- boolean (default: ``True``); if ``True``, we check
that the ``weight_function`` outputs a number for each edge
EXAMPLES:
The more symmetric a graph is, the smaller (diameter - radius) is::
sage: G = graphs.BarbellGraph(9, 3)
sage: G.radius()
3
sage: G.diameter()
6
::
sage: G = graphs.OctahedralGraph()
sage: G.radius()
2
sage: G.diameter()
2
TESTS::
sage: g = Graph()
sage: g.diameter()
Traceback (most recent call last):
...
ValueError: diameter is not defined for the empty graph
sage: g = Graph([(1, 2, {'weight': 1})])
sage: g.diameter(algorithm='iFUB', weight_function=lambda e: e[2]['weight'])
Traceback (most recent call last):
...
ValueError: algorithm 'iFUB' does not work on weighted graphs
"""
if not self.order():
raise ValueError("diameter is not defined for the empty graph")
by_weight, weight_function = self._get_weight_function(by_weight=by_weight,
weight_function=weight_function,
check_weight=check_weight)
if not by_weight:
# We don't want the default weight function
weight_function = None
if algorithm is None:
if by_weight:
algorithm = 'iFUB'
else:
algorithm = 'DHV'
elif algorithm == 'BFS':
algorithm = 'standard'
if algorithm == 'DHV':
if by_weight:
from sage.graphs.base.boost_graph import diameter_DHV
return diameter_DHV(self, weight_function=weight_function,
check_weight=False)
else:
from sage.graphs.distances_all_pairs import diameter
return diameter(self, algorithm=algorithm)
if algorithm in ['standard', '2sweep', 'multi-sweep', 'iFUB']:
if by_weight:
raise ValueError("algorithm '" + algorithm + "' does not work" +
" on weighted graphs")
from sage.graphs.distances_all_pairs import diameter
return diameter(self, algorithm=algorithm)
return max(self.eccentricity(v=list(self), by_weight=by_weight,
weight_function=weight_function,
check_weight=False,
algorithm=algorithm))
@doc_index("Distances")
def center(self, by_weight=False, algorithm=None, weight_function=None,
check_weight=True):
r"""
Return the set of vertices in the center of the graph.
The center is the set of vertices whose eccentricity is equal to the
radius of the graph, i.e., achieving the minimum eccentricity.
For more information and examples on how to use input variables,
see :meth:`~GenericGraph.shortest_paths` and
:meth:`~Graph.eccentricity`
INPUT:
- ``by_weight`` -- boolean (default: ``False``); if ``True``, edge
weights are taken into account; if False, all edges have weight 1
- ``algorithm`` -- string (default: ``None``); see method
:meth:`eccentricity` for the list of available algorithms
- ``weight_function`` -- function (default: ``None``); a function that
takes as input an edge ``(u, v, l)`` and outputs its weight. If not
``None``, ``by_weight`` is automatically set to ``True``. If ``None``
and ``by_weight`` is ``True``, we use the edge label ``l`` as a
weight, if ``l`` is not ``None``, else ``1`` as a weight.
- ``check_weight`` -- boolean (default: ``True``); if ``True``, we check
that the ``weight_function`` outputs a number for each edge
EXAMPLES:
Is Central African Republic in the center of Africa in graph theoretic
sense? Yes::
sage: A = graphs.AfricaMap(continental=True)
sage: sorted(A.center())
['Cameroon', 'Central Africa']
Some other graphs. Center can be the whole graph::
sage: G = graphs.DiamondGraph()
sage: G.center()
[1, 2]
sage: P = graphs.PetersenGraph()
sage: P.subgraph(P.center()) == P
True
sage: S = graphs.StarGraph(19)
sage: S.center()
[0]
TESTS::
sage: G = Graph()
sage: G.center()
[]
sage: G.add_vertex()
0
sage: G.center()
[0]
"""
ecc = self.eccentricity(v=list(self), by_weight=by_weight,
weight_function=weight_function,
algorithm=algorithm,
check_weight=check_weight,
with_labels=True)
try:
r = min(ecc.values())
except Exception:
return []
return [v for v in self if ecc[v] == r]
@doc_index("Distances")
def periphery(self, by_weight=False, algorithm=None, weight_function=None,
check_weight=True):
r"""
Return the set of vertices in the periphery of the graph.
The periphery is the set of vertices whose eccentricity is equal to the
diameter of the graph, i.e., achieving the maximum eccentricity.
For more information and examples on how to use input variables,
see :meth:`~GenericGraph.shortest_paths` and
:meth:`~Graph.eccentricity`
INPUT:
- ``by_weight`` -- boolean (default: ``False``); if ``True``, edge
weights are taken into account; if False, all edges have weight 1
- ``algorithm`` -- string (default: ``None``); see method
:meth:`eccentricity` for the list of available algorithms
- ``weight_function`` -- function (default: ``None``); a function that
takes as input an edge ``(u, v, l)`` and outputs its weight. If not
``None``, ``by_weight`` is automatically set to ``True``. If ``None``
and ``by_weight`` is ``True``, we use the edge label ``l`` as a
weight, if ``l`` is not ``None``, else ``1`` as a weight.
- ``check_weight`` -- boolean (default: ``True``); if ``True``, we check
that the ``weight_function`` outputs a number for each edge
EXAMPLES::
sage: G = graphs.DiamondGraph()
sage: G.periphery()
[0, 3]
sage: P = graphs.PetersenGraph()
sage: P.subgraph(P.periphery()) == P
True
sage: S = graphs.StarGraph(19)
sage: S.periphery()
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
sage: G = Graph()
sage: G.periphery()
[]
sage: G.add_vertex()
0
sage: G.periphery()
[0]
"""
ecc = self.eccentricity(v=list(self), by_weight=by_weight,
weight_function=weight_function,
algorithm=algorithm,
check_weight=check_weight,
with_labels=True)
try:
d = max(ecc.values())
except Exception:
return []
return [v for v in self if ecc[v] == d]
### Constructors
@doc_index("Basic methods")
def to_directed(self, data_structure=None, sparse=None):
"""
Return a directed version of the graph.
A single edge becomes two edges, one in each direction.
INPUT:
- ``data_structure`` -- one of ``"sparse"``, ``"static_sparse"``, or
``"dense"``. See the documentation of :class:`Graph` or
:class:`DiGraph`.
- ``sparse`` -- boolean (default: ``None``); ``sparse=True`` is an
alias for ``data_structure="sparse"``, and ``sparse=False`` is an
alias for ``data_structure="dense"``.
EXAMPLES::
sage: graphs.PetersenGraph().to_directed()
Petersen graph: Digraph on 10 vertices
TESTS:
Immutable graphs yield immutable graphs::
sage: Graph([[1, 2]], immutable=True).to_directed()._backend
<sage.graphs.base.static_sparse_backend.StaticSparseBackend object at ...>
:trac:`17005`::
sage: Graph([[1,2]], immutable=True).to_directed()
Digraph on 2 vertices
:trac:`22424`::
sage: G1=graphs.RandomGNP(5,0.5)
sage: gp1 = G1.graphplot(save_pos=True)
sage: G2=G1.to_directed()
sage: G2.delete_vertex(0)
sage: G2.add_vertex(5)
sage: gp2 = G2.graphplot()
sage: gp1 = G1.graphplot()
Vertex labels will be retained (:trac:`14708`)::
sage: G = Graph({0: [1, 2], 1: [0]})
sage: G.set_vertex(0, 'foo')
sage: D = G.to_directed()
sage: G.get_vertices()
{0: 'foo', 1: None, 2: None}
sage: D.get_vertices()
{0: 'foo', 1: None, 2: None}
"""
if sparse is not None:
if data_structure is not None:
raise ValueError("The 'sparse' argument is an alias for "
"'data_structure'. Please do not define both.")
data_structure = "sparse" if sparse else "dense"
if data_structure is None:
from sage.graphs.base.dense_graph import DenseGraphBackend
from sage.graphs.base.sparse_graph import SparseGraphBackend
if isinstance(self._backend, DenseGraphBackend):
data_structure = "dense"
elif isinstance(self._backend, SparseGraphBackend):
data_structure = "sparse"
else:
data_structure = "static_sparse"
from sage.graphs.all import DiGraph
D = DiGraph(name = self.name(),
pos = self.get_pos(),
multiedges = self.allows_multiple_edges(),
loops = self.allows_loops(),
data_structure = (data_structure if data_structure!="static_sparse"
else "sparse")) # we need a mutable copy
D.add_vertices(self.vertex_iterator())
D.set_vertices(self.get_vertices())
for u,v,l in self.edge_iterator():
D.add_edge(u,v,l)
D.add_edge(v,u,l)
if hasattr(self, '_embedding'):
D._embedding = copy(self._embedding)
D._weighted = self._weighted
if data_structure == "static_sparse":
D = D.copy(data_structure=data_structure)
return D
@doc_index("Basic methods")
def to_undirected(self):
"""
Since the graph is already undirected, simply returns a copy of itself.
EXAMPLES::
sage: graphs.PetersenGraph().to_undirected()
Petersen graph: Graph on 10 vertices
"""
return self.copy()
@doc_index("Basic methods")
def join(self, other, labels="pairs", immutable=None):
r"""
Return the join of ``self`` and ``other``.
INPUT:
- ``labels`` -- (defaults to 'pairs'); if set to 'pairs', each element
`v` in the first graph will be named `(0, v)` and each element `u` in
``other`` will be named `(1, u)` in the result. If set to 'integers',
the elements of the result will be relabeled with consecutive
integers.
- ``immutable`` -- boolean (default: ``None``); whether to create a
mutable/immutable join. ``immutable=None`` (default) means that the
graphs and their join will behave the same way.
.. SEEALSO::
* :meth:`~sage.graphs.generic_graph.GenericGraph.union`
* :meth:`~sage.graphs.generic_graph.GenericGraph.disjoint_union`
EXAMPLES::
sage: G = graphs.CycleGraph(3)
sage: H = Graph(2)
sage: J = G.join(H); J
Cycle graph join : Graph on 5 vertices
sage: J.vertices()
[(0, 0), (0, 1), (0, 2), (1, 0), (1, 1)]
sage: J = G.join(H, labels='integers'); J
Cycle graph join : Graph on 5 vertices
sage: J.vertices()
[0, 1, 2, 3, 4]
sage: J.edges()
[(0, 1, None), (0, 2, None), (0, 3, None), (0, 4, None), (1, 2, None), (1, 3, None), (1, 4, None), (2, 3, None), (2, 4, None)]
::
sage: G = Graph(3)
sage: G.name("Graph on 3 vertices")
sage: H = Graph(2)
sage: H.name("Graph on 2 vertices")
sage: J = G.join(H); J
Graph on 3 vertices join Graph on 2 vertices: Graph on 5 vertices
sage: J.vertices()
[(0, 0), (0, 1), (0, 2), (1, 0), (1, 1)]
sage: J = G.join(H, labels='integers'); J
Graph on 3 vertices join Graph on 2 vertices: Graph on 5 vertices
sage: J.edges()
[(0, 3, None), (0, 4, None), (1, 3, None), (1, 4, None), (2, 3, None), (2, 4, None)]
"""
G = self.disjoint_union(other, labels=labels, immutable=False)
if labels == "integers":
G.add_edges((u, v) for u in range(self.order())
for v in range(self.order(), self.order() + other.order()))
else:
G.add_edges(((0, u), (1, v)) for u in self for v in other)
G.name('%s join %s'%(self.name(), other.name()))
if immutable is None:
immutable = self.is_immutable() and other.is_immutable()
if immutable:
G = G.copy(immutable=True)
return G
@doc_index("Leftovers")
def seidel_adjacency_matrix(self, vertices=None):
r"""
Return the Seidel adjacency matrix of ``self``.
Returns `J-I-2A`, for `A` the (ordinary) :meth:`adjacency matrix
<sage.graphs.generic_graph.GenericGraph.adjacency_matrix>` of ``self``,
`I` the identity matrix, and `J` the all-1 matrix. It is closely
related to :meth:`twograph`.
The matrix returned is over the integers. If a different ring is
desired, use either the :meth:`sage.matrix.matrix0.Matrix.change_ring`
method or the :func:`matrix` function.
INPUT:
- ``vertices`` -- list of vertices (default: ``None``); the ordering of
the vertices defining how they should appear in the matrix. By
default, the ordering given by
:meth:`~sage.graphs.generic_graph.GenericGraph.vertices` is used.
EXAMPLES::
sage: G = graphs.CycleGraph(5)
sage: G = G.disjoint_union(graphs.CompleteGraph(1))
sage: G.seidel_adjacency_matrix().minpoly()
x^2 - 5
"""
return - self.adjacency_matrix(sparse=False, vertices=vertices) \
+ self.complement().adjacency_matrix(sparse=False, vertices=vertices)
@doc_index("Leftovers")
def seidel_switching(self, s, inplace=True):
r"""
Return the Seidel switching of ``self`` w.r.t. subset of vertices ``s``.
Returns the graph obtained by Seidel switching of ``self`` with respect
to the subset of vertices ``s``. This is the graph given by Seidel
adjacency matrix `DSD`, for `S` the Seidel adjacency matrix of ``self``,
and `D` the diagonal matrix with -1s at positions corresponding to
``s``, and 1s elsewhere.
INPUT:
- ``s`` -- a list of vertices of ``self``.
- ``inplace`` -- boolean (default: ``True``); whether to do the
modification inplace, or to return a copy of the graph after
switching.
EXAMPLES::
sage: G = graphs.CycleGraph(5)
sage: G = G.disjoint_union(graphs.CompleteGraph(1))
sage: G.seidel_switching([(0,1),(1,0),(0,0)])
sage: G.seidel_adjacency_matrix().minpoly()
x^2 - 5
sage: G.is_connected()
True
TESTS::
sage: H = G.seidel_switching([1,4,5],inplace=False)
sage: G.seidel_switching([1,4,5])
sage: G == H
True
"""
G = self if inplace else copy(self)
boundary = self.edge_boundary(s)
G.add_edges(itertools.product(s, set(self).difference(s)))
G.delete_edges(boundary)
if not inplace:
return G
@doc_index("Leftovers")
def twograph(self):
r"""
Return the two-graph of ``self``
Returns the :class:`two-graph <sage.combinat.designs.twographs.TwoGraph>`
with the triples
`T=\{t \in \binom {V}{3} : \left| \binom {t}{2} \cap E \right| \text{odd} \}`
where `V` and `E` are vertices and edges of ``self``, respectively.
EXAMPLES::
sage: p=graphs.PetersenGraph()
sage: p.twograph()
Incidence structure with 10 points and 60 blocks
sage: p=graphs.chang_graphs()
sage: T8 = graphs.CompleteGraph(8).line_graph()
sage: C = T8.seidel_switching([(0,1,None),(2,3,None),(4,5,None),(6,7,None)],inplace=False)
sage: T8.twograph() == C.twograph()
True
sage: T8.is_isomorphic(C)
False
TESTS::
sage: from sage.combinat.designs.twographs import TwoGraph
sage: p=graphs.PetersenGraph().twograph()
sage: TwoGraph(p, check=True)
Incidence structure with 10 points and 60 blocks
.. SEEALSO::
- :meth:`~sage.combinat.designs.twographs.TwoGraph.descendant` --
computes the descendant graph of the two-graph of self at a vertex
- :func:`~sage.combinat.designs.twographs.twograph_descendant`
-- ditto, but much faster.
"""
from sage.combinat.designs.twographs import TwoGraph
G = self.relabel(range(self.order()), inplace=False)
T = []
# Triangles
for x,y,z in G.subgraph_search_iterator(Graph({1:[2,3], 2:[3]})):
if x < y and y < z:
T.append([x, y, z])
# Triples with just one edge
for x,y,z in G.subgraph_search_iterator(Graph({1:[2], 3:[]}), induced=True):
if x < y:
T.append([x, y, z])
T = TwoGraph(T)
T.relabel({i: v for i,v in enumerate(self.vertices())})
return T
### Visualization
@doc_index("Basic methods")
def write_to_eps(self, filename, **options):
r"""
Write a plot of the graph to ``filename`` in ``eps`` format.
INPUT:
- ``filename`` -- a string
- ``**options`` -- same layout options as :meth:`.layout`
EXAMPLES::
sage: P = graphs.PetersenGraph()
sage: P.write_to_eps(tmp_filename(ext='.eps'))
It is relatively simple to include this file in a LaTeX document.
``\usepackage{graphics}`` must appear in the preamble, and
``\includegraphics{filename}`` will include the file. To compile the
document to ``pdf`` with ``pdflatex`` or ``xelatex`` the file needs
first to be converted to ``pdf``, for example with ``ps2pdf filename.eps
filename.pdf``.
"""
from sage.graphs.print_graphs import print_graph_eps
pos = self.layout(**options)
[xmin, xmax, ymin, ymax] = self._layout_bounding_box(pos)
for v in pos:
pos[v] = (1.8*(pos[v][0] - xmin)/(xmax - xmin) - 0.9, 1.8*(pos[v][1] - ymin)/(ymax - ymin) - 0.9)
if filename[-4:] != '.eps':
filename += '.eps'
f = open(filename, 'w')
f.write( print_graph_eps(self.vertices(), self.edge_iterator(), pos) )
f.close()
@doc_index("Algorithmically hard stuff")
def topological_minor(self, H, vertices=False, paths=False, solver=None, verbose=0,
*, integrality_tolerance=1e-3):
r"""
Return a topological `H`-minor from ``self`` if one exists.
We say that a graph `G` has a topological `H`-minor (or that it has a
graph isomorphic to `H` as a topological minor), if `G` contains a
subdivision of a graph isomorphic to `H` (i.e. obtained from `H`
through arbitrary subdivision of its edges) as a subgraph.
For more information, see the :wikipedia:`Minor_(graph_theory)`.
INPUT:
- ``H`` -- The topological minor to find in the current graph.
- ``solver`` -- string (default: ``None``); specify a Mixed Integer
Linear Programming (MILP) solver to be used. If set to ``None``, the
default one is used. For more information on MILP solvers and which
default solver is used, see the method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``); sets the level of
verbosity. Set to 0 by default, which means quiet.
- ``integrality_tolerance`` -- float; parameter for use with MILP
solvers over an inexact base ring; see
:meth:`MixedIntegerLinearProgram.get_values`.
OUTPUT:
The topological `H`-minor found is returned as a subgraph `M` of
``self``, such that the vertex `v` of `M` that represents a vertex `h\in
H` has ``h`` as a label (see :meth:`get_vertex
<sage.graphs.generic_graph.GenericGraph.get_vertex>` and
:meth:`set_vertex <sage.graphs.generic_graph.GenericGraph.set_vertex>`),
and such that every edge of `M` has as a label the edge of `H` it
(partially) represents.
If no topological minor is found, this method returns ``False``.
ALGORITHM:
Mixed Integer Linear Programming.
COMPLEXITY:
Theoretically, when `H` is fixed, testing for the existence of a
topological `H`-minor is polynomial. The known algorithms are highly
exponential in `H`, though.
.. NOTE::
This function can be expected to be *very* slow, especially where
the topological minor does not exist.
(CPLEX seems to be *much* more efficient than GLPK on this kind of
problem)
EXAMPLES:
Petersen's graph has a topological `K_4`-minor::
sage: g = graphs.PetersenGraph()
sage: g.topological_minor(graphs.CompleteGraph(4))
Subgraph of (Petersen graph): Graph on ...
And a topological `K_{3,3}`-minor::
sage: g.topological_minor(graphs.CompleteBipartiteGraph(3,3))
Subgraph of (Petersen graph): Graph on ...
And of course, a tree has no topological `C_3`-minor::
sage: g = graphs.RandomGNP(15,.3)
sage: g = g.subgraph(edges = g.min_spanning_tree())
sage: g.topological_minor(graphs.CycleGraph(3))
False
"""
self._scream_if_not_simple()
H._scream_if_not_simple()
# Useful alias ...
G = self
from sage.numerical.mip import MixedIntegerLinearProgram, MIPSolverException
p = MixedIntegerLinearProgram(solver=solver)
# This is an existence problem
p.set_objective(None)
#######################
# Vertex representative #
#######################
#
# v_repr[h,g] = 1 if vertex h from H is represented by vertex
# g from G, 0 otherwise
v_repr = p.new_variable(binary=True)
# Exactly one representative per vertex of H
for h in H:
p.add_constraint(p.sum(v_repr[h,g] for g in G), min=1, max=1)
# A vertex of G can only represent one vertex of H
for g in G:
p.add_constraint(p.sum(v_repr[h,g] for h in H), max=1)
###################
# Is representent #
###################
#
# is_repr[v] = 1 if v represents some vertex of H
is_repr = p.new_variable(binary=True)
for g in G:
for h in H:
p.add_constraint(v_repr[h,g] - is_repr[g], max=0)
###################################
# paths between the representents #
###################################
#
# For any edge (h1,h2) in H, we have a corresponding path in G
# between the representatives of h1 and h2. Which means there is
# a flow of intensity 1 from one to the other.
# We are then writing a flow problem for each edge of H.
#
# The variable flow[(h1,h2),(g1,g2)] indicates the amount of
# flow on the edge (g1,g2) representing the edge (h1,h2).
flow = p.new_variable(binary=True)
# These functions return the balance of flow corresponding to
# commodity C at vertex v
def flow_in(C, v):
return p.sum(flow[C,(v,u)] for u in G.neighbor_iterator(v))
def flow_out(C, v):
return p.sum(flow[C,(u,v)] for u in G.neighbor_iterator(v))
def flow_balance(C, v):
return flow_in(C,v) - flow_out(C,v)
for h1,h2 in H.edge_iterator(labels=False):
for v in G:
# The flow balance depends on whether the vertex v is a
# representative of h1 or h2 in G, or a representative of none
p.add_constraint(flow_balance((h1,h2),v) == v_repr[h1,v] - v_repr[h2,v])
#############################
# Internal vertex of a path #
#############################
#
# is_internal[C][g] = 1 if a vertex v from G is located on the
# path representing the edge (=commodity) C
is_internal = p.new_variable(binary=True)
# When is a vertex internal for a commodity ?
for C in H.edge_iterator(labels=False):
for g in G:
p.add_constraint(flow_in(C,g) + flow_out(C,g) - is_internal[C,g], max=1)
############################
# Two paths do not cross ! #
############################
# A vertex can only be internal for one commodity, and zero if
# the vertex is a representent
for g in G:
p.add_constraint(p.sum(is_internal[C,g] for C in H.edge_iterator(labels=False))
+ is_repr[g], max=1)
# (The following inequalities are not necessary, but they seem to be of
# help (the solvers find the answer quicker when they are added)
# The flow on one edge can go in only one direction. Besides, it can
# belong to at most one commodity and has a maximum intensity of 1.
for g1,g2 in G.edge_iterator(labels=None):
p.add_constraint( p.sum(flow[C,(g1,g2)] for C in H.edge_iterator(labels=False))
+ p.sum(flow[C,(g2,g1)] for C in H.edge_iterator(labels=False)),
max=1)
# Now we can solve the problem itself !
try:
p.solve(log=verbose)
except MIPSolverException:
return False
minor = G.subgraph(immutable=False)
is_repr = p.get_values(is_repr, convert=bool, tolerance=integrality_tolerance)
v_repr = p.get_values(v_repr, convert=bool, tolerance=integrality_tolerance)
flow = p.get_values(flow, convert=bool, tolerance=integrality_tolerance)
for u,v in minor.edge_iterator(labels=False):
used = False
for C in H.edge_iterator(labels=False):
if flow[C,(u,v)] or flow[C,(v,u)]:
used = True
minor.set_edge_label(u, v, C)
break
if not used:
minor.delete_edge(u, v)
minor.delete_vertices(v for v in minor if minor.degree(v) == 0)
for g in minor:
if is_repr[g]:
for h in H:
if v_repr[h,v]:
minor.set_vertex(g, h)
break
return minor
### Cliques
@doc_index("Clique-related methods")
def cliques_maximal(self, algorithm="native"):
"""
Return the list of all maximal cliques.
Each clique is represented by a list of vertices. A clique is an induced
complete subgraph, and a maximal clique is one not contained in a larger
one.
INPUT:
- ``algorithm`` -- can be set to ``"native"`` (default) to use Sage's
own implementation, or to ``"NetworkX"`` to use NetworkX'
implementation of the Bron and Kerbosch Algorithm [BK1973]_.
.. NOTE::
This method sorts its output before returning it. If you prefer to
save the extra time, you can call
:class:`sage.graphs.independent_sets.IndependentSets` directly.
.. NOTE::
Sage's implementation of the enumeration of *maximal* independent
sets is not much faster than NetworkX' (expect a 2x speedup), which
is surprising as it is written in Cython. This being said, the
algorithm from NetworkX appears to be slightly different from this
one, and that would be a good thing to explore if one wants to
improve the implementation.
ALGORITHM:
This function is based on NetworkX's implementation of the Bron and
Kerbosch Algorithm [BK1973]_.
EXAMPLES::
sage: graphs.ChvatalGraph().cliques_maximal()
[[0, 1], [0, 4], [0, 6], [0, 9], [1, 2], [1, 5], [1, 7], [2, 3],
[2, 6], [2, 8], [3, 4], [3, 7], [3, 9], [4, 5], [4, 8], [5, 10],
[5, 11], [6, 10], [6, 11], [7, 8], [7, 11], [8, 10], [9, 10], [9, 11]]
sage: G = Graph({0:[1,2,3], 1:[2], 3:[0,1]})
sage: G.show(figsize=[2, 2])
sage: G.cliques_maximal()
[[0, 1, 2], [0, 1, 3]]
sage: C = graphs.PetersenGraph()
sage: C.cliques_maximal()
[[0, 1], [0, 4], [0, 5], [1, 2], [1, 6], [2, 3], [2, 7], [3, 4],
[3, 8], [4, 9], [5, 7], [5, 8], [6, 8], [6, 9], [7, 9]]
sage: C = Graph('DJ{')
sage: C.cliques_maximal()
[[0, 4], [1, 2, 3, 4]]
Comparing the two implementations::
sage: g = graphs.RandomGNP(20,.7)
sage: s1 = Set(map(Set, g.cliques_maximal(algorithm="NetworkX")))
sage: s2 = Set(map(Set, g.cliques_maximal(algorithm="native")))
sage: s1 == s2
True
"""
if algorithm == "native":
from sage.graphs.independent_sets import IndependentSets
return list(IndependentSets(self, maximal=True, complement=True))
elif algorithm == "NetworkX":
import networkx
return list(networkx.find_cliques(self.networkx_graph()))
else:
raise ValueError("Algorithm must be equal to 'native' or to 'NetworkX'.")
@doc_index("Clique-related methods")
def clique_maximum(self, algorithm="Cliquer", solver=None, verbose=0,
*, integrality_tolerance=1e-3):
"""
Return the vertex set of a maximal order complete subgraph.
INPUT:
- ``algorithm`` -- the algorithm to be used :
- If ``algorithm = "Cliquer"`` (default), wraps the C program
Cliquer [NO2003]_.
- If ``algorithm = "MILP"``, the problem is solved through a Mixed
Integer Linear Program.
(see :class:`~sage.numerical.mip.MixedIntegerLinearProgram`)
- If ``algorithm = "mcqd"``, uses the MCQD solver
(`<http://www.sicmm.org/~konc/maxclique/>`_). Note that the MCQD
package must be installed.
- ``solver`` -- string (default: ``None``); specify a Mixed Integer
Linear Programming (MILP) solver to be used. If set to ``None``, the
default one is used. For more information on MILP solvers and which
default solver is used, see the method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``); sets the level of
verbosity. Set to 0 by default, which means quiet.
- ``integrality_tolerance`` -- float; parameter for use with MILP
solvers over an inexact base ring; see
:meth:`MixedIntegerLinearProgram.get_values`.
Parameters ``solver`` and ``verbose`` are used only when
``algorithm="MILP"``.
.. NOTE::
Currently only implemented for undirected graphs. Use to_undirected
to convert a digraph to an undirected graph.
ALGORITHM:
This function is based on Cliquer [NO2003]_.
EXAMPLES:
Using Cliquer (default)::
sage: C = graphs.PetersenGraph()
sage: C.clique_maximum()
[7, 9]
sage: C = Graph('DJ{')
sage: C.clique_maximum()
[1, 2, 3, 4]
Through a Linear Program::
sage: len(C.clique_maximum(algorithm="MILP"))
4
TESTS:
Wrong algorithm::
sage: C.clique_maximum(algorithm="BFS")
Traceback (most recent call last):
...
NotImplementedError: Only 'MILP', 'Cliquer' and 'mcqd' are supported.
"""
self._scream_if_not_simple(allow_multiple_edges=True)
if algorithm == "Cliquer":
from sage.graphs.cliquer import max_clique
return max_clique(self)
elif algorithm == "MILP":
return self.complement().independent_set(algorithm=algorithm, solver=solver, verbose=verbose,
integrality_tolerance=integrality_tolerance)
elif algorithm == "mcqd":
return mcqd(self)
else:
raise NotImplementedError("Only 'MILP', 'Cliquer' and 'mcqd' are supported.")
@doc_index("Clique-related methods")
def clique_number(self, algorithm="Cliquer", cliques=None, solver=None, verbose=0,
*, integrality_tolerance=1e-3):
r"""
Return the order of the largest clique of the graph
This is also called as the clique number.
.. NOTE::
Currently only implemented for undirected graphs. Use ``to_undirected``
to convert a digraph to an undirected graph.
INPUT:
- ``algorithm`` -- the algorithm to be used :
- If ``algorithm = "Cliquer"``, wraps the C program Cliquer
[NO2003]_.
- If ``algorithm = "networkx"``, uses the NetworkX's implementation of
the Bron and Kerbosch Algorithm [BK1973]_.
- If ``algorithm = "MILP"``, the problem is solved through a Mixed
Integer Linear Program.
(see :class:`~sage.numerical.mip.MixedIntegerLinearProgram`)
- If ``algorithm = "mcqd"``, uses the MCQD solver
(`<http://insilab.org/maxclique/>`_). Note that the MCQD
package must be installed.
- ``cliques`` -- an optional list of cliques that can be input if
already computed. Ignored unless ``algorithm=="networkx"``.
- ``solver`` -- string (default: ``None``); specify a Mixed Integer
Linear Programming (MILP) solver to be used. If set to ``None``, the
default one is used. For more information on MILP solvers and which
default solver is used, see the method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``); sets the level of
verbosity. Set to 0 by default, which means quiet.
- ``integrality_tolerance`` -- float; parameter for use with MILP
solvers over an inexact base ring; see
:meth:`MixedIntegerLinearProgram.get_values`.
ALGORITHM:
This function is based on Cliquer [NO2003]_ and [BK1973]_.
EXAMPLES::
sage: C = Graph('DJ{')
sage: C.clique_number()
4
sage: G = Graph({0:[1,2,3], 1:[2], 3:[0,1]})
sage: G.show(figsize=[2,2])
sage: G.clique_number()
3
By definition the clique number of a complete graph is its order::
sage: all(graphs.CompleteGraph(i).clique_number() == i for i in range(1,15))
True
A non-empty graph without edges has a clique number of 1::
sage: all((i*graphs.CompleteGraph(1)).clique_number() == 1 for i in range(1,15))
True
A complete multipartite graph with k parts has clique number k::
sage: all((i*graphs.CompleteMultipartiteGraph(i*[5])).clique_number() == i for i in range(1,6))
True
TESTS::
sage: g = graphs.PetersenGraph()
sage: g.clique_number(algorithm="MILP")
2
sage: for i in range(10): # optional - mcqd
....: g = graphs.RandomGNP(15,.5) # optional - mcqd
....: if g.clique_number() != g.clique_number(algorithm="mcqd"): # optional - mcqd
....: print("This is dead wrong !") # optional - mcqd
"""
self._scream_if_not_simple(allow_loops=False)
if algorithm == "Cliquer":
from sage.graphs.cliquer import clique_number
return clique_number(self)
elif algorithm == "networkx":
import networkx
return networkx.graph_clique_number(self.networkx_graph(), cliques)
elif algorithm == "MILP":
return len(self.complement().independent_set(algorithm=algorithm, solver=solver, verbose=verbose,
integrality_tolerance=integrality_tolerance))
elif algorithm == "mcqd":
return len(mcqd(self))
else:
raise NotImplementedError("Only 'networkx' 'MILP' 'Cliquer' and 'mcqd' are supported.")
@doc_index("Clique-related methods")
def cliques_number_of(self, vertices=None, cliques=None):
"""
Return a dictionary of the number of maximal cliques containing each
vertex, keyed by vertex.
This returns a single value if only one input vertex.
.. NOTE::
Currently only implemented for undirected graphs. Use to_undirected
to convert a digraph to an undirected graph.
INPUT:
- ``vertices`` -- the vertices to inspect (default is entire graph)
- ``cliques`` -- list of cliques (if already computed)
EXAMPLES::
sage: C = Graph('DJ{')
sage: C.cliques_number_of()
{0: 1, 1: 1, 2: 1, 3: 1, 4: 2}
sage: E = C.cliques_maximal()
sage: E
[[0, 4], [1, 2, 3, 4]]
sage: C.cliques_number_of(cliques=E)
{0: 1, 1: 1, 2: 1, 3: 1, 4: 2}
sage: F = graphs.Grid2dGraph(2,3)
sage: F.cliques_number_of()
{(0, 0): 2, (0, 1): 3, (0, 2): 2, (1, 0): 2, (1, 1): 3, (1, 2): 2}
sage: F.cliques_number_of(vertices=[(0, 1), (1, 2)])
{(0, 1): 3, (1, 2): 2}
sage: G = Graph({0:[1,2,3], 1:[2], 3:[0,1]})
sage: G.show(figsize=[2,2])
sage: G.cliques_number_of()
{0: 2, 1: 2, 2: 1, 3: 1}
"""
import networkx
return networkx.number_of_cliques(self.networkx_graph(), vertices, cliques)
@doc_index("Clique-related methods")
def cliques_get_max_clique_graph(self):
"""
Return the clique graph.
Vertices of the result are the maximal cliques of the graph, and edges
of the result are between maximal cliques with common members in the
original graph.
For more information, see the :wikipedia:`Clique_graph`.
.. NOTE::
Currently only implemented for undirected graphs. Use to_undirected
to convert a digraph to an undirected graph.
EXAMPLES::
sage: (graphs.ChvatalGraph()).cliques_get_max_clique_graph()
Graph on 24 vertices
sage: ((graphs.ChvatalGraph()).cliques_get_max_clique_graph()).show(figsize=[2,2], vertex_size=20, vertex_labels=False)
sage: G = Graph({0:[1,2,3], 1:[2], 3:[0,1]})
sage: G.show(figsize=[2,2])
sage: G.cliques_get_max_clique_graph()
Graph on 2 vertices
sage: (G.cliques_get_max_clique_graph()).show(figsize=[2,2])
"""
import networkx
return Graph(networkx.make_max_clique_graph(self.networkx_graph(), create_using=networkx.MultiGraph()),
multiedges=False)
@doc_index("Clique-related methods")
def cliques_get_clique_bipartite(self, **kwds):
"""
Return a bipartite graph constructed such that maximal cliques are the
right vertices and the left vertices are retained from the given
graph. Right and left vertices are connected if the bottom vertex
belongs to the clique represented by a top vertex.
.. NOTE::
Currently only implemented for undirected graphs. Use to_undirected
to convert a digraph to an undirected graph.
EXAMPLES::
sage: (graphs.ChvatalGraph()).cliques_get_clique_bipartite()
Bipartite graph on 36 vertices
sage: ((graphs.ChvatalGraph()).cliques_get_clique_bipartite()).show(figsize=[2,2], vertex_size=20, vertex_labels=False)
sage: G = Graph({0:[1,2,3], 1:[2], 3:[0,1]})
sage: G.show(figsize=[2,2])
sage: G.cliques_get_clique_bipartite()
Bipartite graph on 6 vertices
sage: (G.cliques_get_clique_bipartite()).show(figsize=[2,2])
"""
from .bipartite_graph import BipartiteGraph
import networkx
return BipartiteGraph(networkx.make_clique_bipartite(self.networkx_graph(), **kwds))
@doc_index("Algorithmically hard stuff")
@rename_keyword(deprecation=32238, verbosity='verbose')
def independent_set(self, algorithm="Cliquer", value_only=False, reduction_rules=True,
solver=None, verbose=0, *, integrality_tolerance=1e-3):
r"""
Return a maximum independent set.
An independent set of a graph is a set of pairwise non-adjacent
vertices. A maximum independent set is an independent set of maximum
cardinality. It induces an empty subgraph.
Equivalently, an independent set is defined as the complement of a
vertex cover.
For more information, see the
:wikipedia:`Independent_set_(graph_theory)` and the
:wikipedia:`Vertex_cover`.
INPUT:
- ``algorithm`` -- the algorithm to be used
* If ``algorithm = "Cliquer"`` (default), the problem is solved
using Cliquer [NO2003]_.
(see the :mod:`Cliquer modules <sage.graphs.cliquer>`)
* If ``algorithm = "MILP"``, the problem is solved through a Mixed
Integer Linear Program.
(see :class:`~sage.numerical.mip.MixedIntegerLinearProgram`)
* If ``algorithm = "mcqd"``, uses the MCQD solver
(`<http://www.sicmm.org/~konc/maxclique/>`_). Note that the MCQD
package must be installed.
- ``value_only`` -- boolean (default: ``False``); if set to ``True``,
only the size of a maximum independent set is returned. Otherwise,
a maximum independent set is returned as a list of vertices.
- ``reduction_rules`` -- (default: ``True``); specify if the reductions
rules from kernelization must be applied as pre-processing or not.
See [ACFLSS04]_ for more details. Note that depending on the instance,
it might be faster to disable reduction rules.
- ``solver`` -- string (default: ``None``); specify a Mixed Integer
Linear Programming (MILP) solver to be used. If set to ``None``, the
default one is used. For more information on MILP solvers and which
default solver is used, see the method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``); sets the level of
verbosity. Set to 0 by default, which means quiet.
- ``integrality_tolerance`` -- float; parameter for use with MILP
solvers over an inexact base ring; see
:meth:`MixedIntegerLinearProgram.get_values`.
.. NOTE::
While Cliquer/MCAD are usually (and by far) the most efficient
implementations, the MILP formulation sometimes proves faster on
very "symmetrical" graphs.
EXAMPLES:
Using Cliquer::
sage: C = graphs.PetersenGraph()
sage: C.independent_set()
[0, 3, 6, 7]
As a linear program::
sage: C = graphs.PetersenGraph()
sage: len(C.independent_set(algorithm="MILP"))
4
.. PLOT::
g = graphs.PetersenGraph()
sphinx_plot(g.plot(partition=[g.independent_set()]))
"""
my_cover = self.vertex_cover(algorithm=algorithm, value_only=value_only,
reduction_rules=reduction_rules,
solver=solver, verbose=verbose,
integrality_tolerance=integrality_tolerance)
if value_only:
return self.order() - my_cover
else:
my_cover = set(my_cover)
return [u for u in self if u not in my_cover]
@doc_index("Algorithmically hard stuff")
@rename_keyword(deprecation=32238, verbosity='verbose')
def vertex_cover(self, algorithm="Cliquer", value_only=False,
reduction_rules=True, solver=None, verbose=0,
*, integrality_tolerance=1e-3):
r"""
Return a minimum vertex cover of self represented by a set of vertices.
A minimum vertex cover of a graph is a set `S` of vertices such that
each edge is incident to at least one element of `S`, and such that `S`
is of minimum cardinality. For more information, see the
:wikipedia:`Vertex_cover`.
Equivalently, a vertex cover is defined as the complement of an
independent set.
As an optimization problem, it can be expressed as follows:
.. MATH::
\mbox{Minimize : }&\sum_{v\in G} b_v\\
\mbox{Such that : }&\forall (u,v) \in G.edges(), b_u+b_v\geq 1\\
&\forall x\in G, b_x\mbox{ is a binary variable}
INPUT:
- ``algorithm`` -- string (default: ``"Cliquer"``). Indicating which
algorithm to use. It can be one of those values.
- ``"Cliquer"`` will compute a minimum vertex cover using the Cliquer
package.
- ``"MILP"`` will compute a minimum vertex cover through a mixed
integer linear program.
- ``"mcqd"`` will use the MCQD solver
(`<http://www.sicmm.org/~konc/maxclique/>`_). Note that the MCQD
package must be installed.
- ``value_only`` -- boolean (default: ``False``); if set to ``True``,
only the size of a minimum vertex cover is returned. Otherwise,
a minimum vertex cover is returned as a list of vertices.
- ``reduction_rules`` -- (default: ``True``); specify if the reductions
rules from kernelization must be applied as pre-processing or not.
See [ACFLSS04]_ for more details. Note that depending on the instance,
it might be faster to disable reduction rules.
- ``solver`` -- string (default: ``None``); specify a Mixed Integer
Linear Programming (MILP) solver to be used. If set to ``None``, the
default one is used. For more information on MILP solvers and which
default solver is used, see the method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``); sets the level of
verbosity. Set to 0 by default, which means quiet.
- ``integrality_tolerance`` -- float; parameter for use with MILP
solvers over an inexact base ring; see
:meth:`MixedIntegerLinearProgram.get_values`.
EXAMPLES:
On the Pappus graph::
sage: g = graphs.PappusGraph()
sage: g.vertex_cover(value_only=True)
9
.. PLOT::
g = graphs.PappusGraph()
sphinx_plot(g.plot(partition=[g.vertex_cover()]))
TESTS:
The two algorithms should return the same result::
sage: g = graphs.RandomGNP(10, .5)
sage: vc1 = g.vertex_cover(algorithm="MILP")
sage: vc2 = g.vertex_cover(algorithm="Cliquer")
sage: len(vc1) == len(vc2)
True
The cardinality of the vertex cover is unchanged when reduction rules
are used. First for trees::
sage: for i in range(20):
....: g = graphs.RandomTree(20)
....: vc1_set = g.vertex_cover()
....: vc1 = len(vc1_set)
....: vc2 = g.vertex_cover(value_only=True, reduction_rules=False)
....: if vc1 != vc2:
....: print("Error :", vc1, vc2)
....: print("With reduction rules :", vc1)
....: print("Without reduction rules :", vc2)
....: break
....: g.delete_vertices(vc1_set)
....: if g.size():
....: print("This thing is not a vertex cover !")
Then for random GNP graphs::
sage: for i in range(20):
....: g = graphs.RandomGNP(50, 0.08)
....: vc1_set = g.vertex_cover()
....: vc1 = len(vc1_set)
....: vc2 = g.vertex_cover(value_only=True, reduction_rules=False)
....: if vc1 != vc2:
....: print("Error :", vc1, vc2)
....: print("With reduction rules :", vc1)
....: print("Without reduction rules :", vc2)
....: break
....: g.delete_vertices(vc1_set)
....: if g.size():
....: print("This thing is not a vertex cover !")
Testing mcqd::
sage: graphs.PetersenGraph().vertex_cover(algorithm="mcqd", value_only=True) # optional - mcqd
6
Given a wrong algorithm::
sage: graphs.PetersenGraph().vertex_cover(algorithm="guess")
Traceback (most recent call last):
...
ValueError: the algorithm must be "Cliquer", "MILP" or "mcqd"
Ticket :trac:`24287` is fixed::
sage: G = Graph([(0,1)]*5 + [(1,2)]*2, multiedges=True)
sage: G.vertex_cover(reduction_rules=True, algorithm='MILP')
[1]
sage: G.vertex_cover(reduction_rules=False)
[1]
Ticket :trac:`25988` is fixed::
sage: B = BipartiteGraph(graphs.CycleGraph(6))
sage: B.vertex_cover(algorithm='Cliquer', reduction_rules=True)
[1, 3, 5]
"""
self._scream_if_not_simple(allow_multiple_edges=True)
g = self
ppset = []
folded_vertices = []
###################
# Reduction rules #
###################
if reduction_rules:
# We apply simple reduction rules allowing to identify vertices that
# belongs to an optimal vertex cover
# We first take a copy of the graph without multiple edges, if any.
g = Graph(data=self.edges(), format='list_of_edges',
multiedges=self.allows_multiple_edges())
g.allow_multiple_edges(False)
degree_at_most_two = {u for u in g if g.degree(u) <= 2}
while degree_at_most_two:
u = degree_at_most_two.pop()
du = g.degree(u)
if not du:
# RULE 1: isolated vertices are not part of the cover. We
# simply remove them from the graph. The degree of such
# vertices may have been reduced to 0 while applying other
# reduction rules
g.delete_vertex(u)
elif du == 1:
# RULE 2: If a vertex u has degree 1, we select its neighbor
# v and remove both u and v from g.
v = next(g.neighbor_iterator(u))
ppset.append(v)
g.delete_vertex(u)
for w in g.neighbor_iterator(v):
if g.degree(w) <= 3:
# The degree of w will be at most two after the
# deletion of v
degree_at_most_two.add(w)
g.delete_vertex(v)
degree_at_most_two.discard(v)
elif du == 2:
v,w = g.neighbors(u)
if g.has_edge(v, w):
# RULE 3: If the neighbors v and w of a degree 2 vertex
# u are incident, then we select both v and w and remove
# u, v, and w from g.
ppset.append(v)
ppset.append(w)
g.delete_vertex(u)
neigh = set(g.neighbors(v) + g.neighbors(w)).difference([v, w])
g.delete_vertex(v)
g.delete_vertex(w)
for z in neigh:
if g.degree(z) <= 2:
degree_at_most_two.add(z)
else:
# RULE 4, folded vertices: If the neighbors v and w of a
# degree 2 vertex u are not incident, then we contract
# edges (u, v), (u, w). Then, if the solution contains u,
# we replace it with v and w. Otherwise, we let u in the
# solution.
neigh = set(g.neighbors(v) + g.neighbors(w)).difference([u, v, w])
g.delete_vertex(v)
g.delete_vertex(w)
for z in neigh:
g.add_edge(u,z)
folded_vertices.append((u, v, w))
if g.degree(u) <= 2:
degree_at_most_two.add(u)
degree_at_most_two.discard(v)
degree_at_most_two.discard(w)
# RULE 5:
# TODO: add extra reduction rules
##################
# Main Algorithm #
##################
if not g.order():
# Reduction rules were sufficients to get the solution
size_cover_g = 0
cover_g = set()
elif algorithm == "Cliquer" or algorithm == "mcqd":
if g.has_multiple_edges() and not reduction_rules:
g = copy(g)
g.allow_multiple_edges(False)
independent = g.complement().clique_maximum(algorithm=algorithm)
if value_only:
size_cover_g = g.order() - len(independent)
else:
cover_g = set(uu for uu in g if uu not in independent)
elif algorithm == "MILP":
from sage.numerical.mip import MixedIntegerLinearProgram
p = MixedIntegerLinearProgram(maximization=False, solver=solver)
b = p.new_variable(binary=True)
# minimizes the number of vertices in the set
p.set_objective(p.sum(b[v] for v in g))
# an edge contains at least one vertex of the minimum vertex cover
for u,v in g.edge_iterator(labels=None):
p.add_constraint(b[u] + b[v], min=1)
p.solve(log=verbose)
b = p.get_values(b, convert=bool, tolerance=integrality_tolerance)
if value_only:
size_cover_g = sum(1 for v in g if b[v])
else:
cover_g = set(v for v in g if b[v])
else:
raise ValueError('the algorithm must be "Cliquer", "MILP" or "mcqd"')
#########################
# Returning the results #
#########################
# We finally reconstruct the solution according the reduction rules
if value_only:
return len(ppset) + len(folded_vertices) + size_cover_g
else:
# RULES 2 and 3:
cover_g.update(ppset)
# RULE 4:
folded_vertices.reverse()
for u,v,w in folded_vertices:
if u in cover_g:
cover_g.discard(u)
cover_g.add(v)
cover_g.add(w)
else:
cover_g.add(u)
return list(cover_g)
@doc_index("Connectivity, orientations, trees")
def ear_decomposition(self):
r"""
Return an Ear decomposition of the graph.
An ear of an undirected graph `G` is a path `P` where the two endpoints
of the path may coincide (i.e., form a cycle), but where otherwise no
repetition of edges or vertices is allowed, so every internal vertex of
`P` has degree two in `P`.
An ear decomposition of an undirected graph `G` is a partition of its
set of edges into a sequence of ears, such that the one or two endpoints
of each ear belong to earlier ears in the sequence and such that the
internal vertices of each ear do not belong to any earlier ear.
For more information, see the :wikipedia:`Ear_decomposition`.
This method implements the linear time algorithm presented in
[Sch2013]_.
OUTPUT:
- A nested list representing the cycles and chains of the ear
decomposition of the graph.
EXAMPLES:
Ear decomposition of an outer planar graph of order 13::
sage: g = Graph('LlCG{O@?GBOMW?')
sage: g.ear_decomposition()
[[0, 3, 2, 1, 0],
[0, 7, 4, 3],
[0, 11, 9, 8, 7],
[1, 12, 2],
[3, 6, 5, 4],
[4, 6],
[7, 10, 8],
[7, 11],
[8, 11]]
Ear decomposition of a biconnected graph::
sage: g = graphs.CycleGraph(4)
sage: g.ear_decomposition()
[[0, 3, 2, 1, 0]]
Ear decomposition of a connected but not biconnected graph::
sage: G = Graph()
sage: G.add_cycle([0,1,2])
sage: G.add_edge(0,3)
sage: G.add_cycle([3,4,5,6])
sage: G.ear_decomposition()
[[0, 2, 1, 0], [3, 6, 5, 4, 3]]
The ear decomposition of a multigraph with loops is the same as the ear
decomposition of the underlying simple graph::
sage: g = graphs.BullGraph()
sage: g.allow_multiple_edges(True)
sage: g.add_edges(g.edges())
sage: g.allow_loops(True)
sage: u = g.random_vertex()
sage: g.add_edge(u, u)
sage: g
Bull graph: Looped multi-graph on 5 vertices
sage: h = g.to_simple()
sage: g.ear_decomposition() == h.ear_decomposition()
True
TESTS::
sage: g=Graph()
sage: g
Graph on 0 vertices
sage: g.ear_decomposition()
Traceback (most recent call last):
...
ValueError: ear decomposition is defined for graphs of order at least 3
"""
# Ear decomposition of a graph of order < 3 is [].
if self.order() < 3:
raise ValueError("ear decomposition is defined for graphs of order at least 3")
# List to store the order in which dfs visits vertices.
dfs_order = []
# Boolean dict to mark vertices as visited or unvisited during
# Dfs traversal in graph.
seen = set()
# Boolean dict to mark vertices as visited or unvisited in
# Dfs tree traversal.
traversed = set()
# Dictionary to store parent vertex of all the visited vertices.
# Initialized for the first vertex to be visited.
parent = {next(self.vertex_iterator()): None}
# List to store visit_time of vertices in Dfs traversal.
value = {}
# List to store all the chains and cycles of the input graph G.
chains = []
# DFS() : Function that performs depth first search on input graph G and
# stores DFS tree in parent array format.
def DFS(v):
"""
Depth first search step from vertex v.
"""
# make v are visited, update its time of visited and value
seen.add(v)
dfs_order.append(v)
# Traverse though all the neighbor vertices of v
for u in self.neighbor_iterator(v):
# if any neighbor is not visited, enter
if u not in seen:
# Set the parent of u in DFS tree as v and continue
# exploration
parent[u] = v
DFS(u)
# Traverse() : Function that use G-T (non-tree edges) to find cycles
# and chains by traversing in DFS tree.
def traverse(start, pointer):
# Make the first end of non-tree edge visited
traversed.add(start)
chain = [start]
# Traverse DFS Tree of G and print all the not visited vertices
# Appending all the vertices in chain
while True:
chain.append(pointer)
if pointer in traversed:
break
traversed.add(pointer)
pointer = parent[pointer]
chains.append(chain)
# Perform ear decomposition on each connected component of input graph.
for v in self:
if v not in seen:
# Start the depth first search from first vertex
DFS(v)
value = {u:i for i,u in enumerate(dfs_order)}
# Traverse all the non Tree edges, according to DFS order
for u in dfs_order:
for neighbor in self.neighbor_iterator(u):
if value[u] < value[neighbor] and u != parent[neighbor]:
traverse(u, neighbor)
dfs_order = []
return chains
@doc_index("Clique-related methods")
def cliques_vertex_clique_number(self, algorithm="cliquer", vertices=None,
cliques=None):
"""
Return a dictionary of sizes of the largest maximal cliques containing
each vertex, keyed by vertex.
Returns a single value if only one input vertex.
.. NOTE::
Currently only implemented for undirected graphs. Use to_undirected
to convert a digraph to an undirected graph.
INPUT:
- ``algorithm`` -- either ``cliquer`` or ``networkx``
- ``cliquer`` -- This wraps the C program Cliquer [NO2003]_.
- ``networkx`` -- This function is based on NetworkX's implementation
of the Bron and Kerbosch Algorithm [BK1973]_.
- ``vertices`` -- the vertices to inspect (default is entire graph).
Ignored unless ``algorithm=='networkx'``.
- ``cliques`` -- list of cliques (if already computed). Ignored unless
``algorithm=='networkx'``.
EXAMPLES::
sage: C = Graph('DJ{')
sage: C.cliques_vertex_clique_number()
{0: 2, 1: 4, 2: 4, 3: 4, 4: 4}
sage: E = C.cliques_maximal()
sage: E
[[0, 4], [1, 2, 3, 4]]
sage: C.cliques_vertex_clique_number(cliques=E,algorithm="networkx")
{0: 2, 1: 4, 2: 4, 3: 4, 4: 4}
sage: F = graphs.Grid2dGraph(2,3)
sage: F.cliques_vertex_clique_number(algorithm="networkx")
{(0, 0): 2, (0, 1): 2, (0, 2): 2, (1, 0): 2, (1, 1): 2, (1, 2): 2}
sage: F.cliques_vertex_clique_number(vertices=[(0, 1), (1, 2)])
{(0, 1): 2, (1, 2): 2}
sage: G = Graph({0:[1,2,3], 1:[2], 3:[0,1]})
sage: G.show(figsize=[2,2])
sage: G.cliques_vertex_clique_number()
{0: 3, 1: 3, 2: 3, 3: 3}
"""
if algorithm == "cliquer":
from sage.graphs.cliquer import clique_number
if vertices is None:
vertices = self
value = {}
for v in vertices:
value[v] = 1 + clique_number(self.subgraph(self.neighbors(v)))
self.subgraph(self.neighbors(v)).plot()
return value
elif algorithm == "networkx":
import networkx
return networkx.node_clique_number(self.networkx_graph(), vertices, cliques)
else:
raise NotImplementedError("Only 'networkx' and 'cliquer' are supported.")
@doc_index("Clique-related methods")
def cliques_containing_vertex(self, vertices=None, cliques=None):
"""
Return the cliques containing each vertex, represented as a dictionary
of lists of lists, keyed by vertex.
Returns a single list if only one input vertex.
.. NOTE::
Currently only implemented for undirected graphs. Use to_undirected
to convert a digraph to an undirected graph.
INPUT:
- ``vertices`` -- the vertices to inspect (default is entire graph)
- ``cliques`` -- list of cliques (if already computed)
EXAMPLES::
sage: C = Graph('DJ{')
sage: C.cliques_containing_vertex()
{0: [[4, 0]], 1: [[4, 1, 2, 3]], 2: [[4, 1, 2, 3]], 3: [[4, 1, 2, 3]], 4: [[4, 0], [4, 1, 2, 3]]}
sage: E = C.cliques_maximal()
sage: E
[[0, 4], [1, 2, 3, 4]]
sage: C.cliques_containing_vertex(cliques=E)
{0: [[0, 4]], 1: [[1, 2, 3, 4]], 2: [[1, 2, 3, 4]], 3: [[1, 2, 3, 4]], 4: [[0, 4], [1, 2, 3, 4]]}
sage: G = Graph({0:[1,2,3], 1:[2], 3:[0,1]})
sage: G.show(figsize=[2,2])
sage: G.cliques_containing_vertex()
{0: [[0, 1, 2], [0, 1, 3]], 1: [[0, 1, 2], [0, 1, 3]], 2: [[0, 1, 2]], 3: [[0, 1, 3]]}
Since each clique of a 2 dimensional grid corresponds to an edge, the
number of cliques in which a vertex is involved equals its degree::
sage: F = graphs.Grid2dGraph(2,3)
sage: d = F.cliques_containing_vertex()
sage: all(F.degree(u) == len(cliques) for u,cliques in d.items())
True
sage: d = F.cliques_containing_vertex(vertices=[(0, 1)])
sage: list(d)
[(0, 1)]
sage: sorted(sorted(x for x in L) for L in d[(0, 1)])
[[(0, 0), (0, 1)], [(0, 1), (0, 2)], [(0, 1), (1, 1)]]
"""
import networkx
return networkx.cliques_containing_node(self.networkx_graph(), vertices, cliques)
@doc_index("Clique-related methods")
def clique_complex(self):
"""
Return the clique complex of self.
This is the largest simplicial complex on the vertices of self whose
1-skeleton is self.
This is only makes sense for undirected simple graphs.
EXAMPLES::
sage: g = Graph({0:[1,2],1:[2],4:[]})
sage: g.clique_complex()
Simplicial complex with vertex set (0, 1, 2, 4) and facets {(4,), (0, 1, 2)}
sage: h = Graph({0:[1,2,3,4],1:[2,3,4],2:[3]})
sage: x = h.clique_complex()
sage: x
Simplicial complex with vertex set (0, 1, 2, 3, 4) and facets {(0, 1, 4), (0, 1, 2, 3)}
sage: i = x.graph()
sage: i==h
True
sage: x==i.clique_complex()
True
"""
if self.is_directed() or self.has_loops() or self.has_multiple_edges():
raise ValueError("Self must be an undirected simple graph to have a clique complex.")
import sage.topology.simplicial_complex
C = sage.topology.simplicial_complex.SimplicialComplex(self.cliques_maximal(), maximality_check=True)
C._graph = self
return C
@doc_index("Clique-related methods")
def clique_polynomial(self, t=None):
r"""
Return the clique polynomial of self.
This is the polynomial where the coefficient of `t^n` is the number of
cliques in the graph with `n` vertices. The constant term of the clique
polynomial is always taken to be one.
EXAMPLES::
sage: g = Graph()
sage: g.clique_polynomial()
1
sage: g = Graph({0:[1]})
sage: g.clique_polynomial()
t^2 + 2*t + 1
sage: g = graphs.CycleGraph(4)
sage: g.clique_polynomial()
4*t^2 + 4*t + 1
"""
if t is None:
R = PolynomialRing(ZZ, 't')
t = R.gen()
number_of = [0]*(self.order() + 1)
for x in IndependentSets(self, complement=True):
number_of[len(x)] += 1
return sum(coeff*t**i for i,coeff in enumerate(number_of) if coeff)
### Miscellaneous
@doc_index("Leftovers")
def cores(self, k=None, with_labels=False):
r"""
Return the core number for each vertex in an ordered list.
(for homomorphisms cores, see the :meth:`Graph.has_homomorphism_to`
method)
DEFINITIONS:
* *K-cores* in graph theory were introduced by Seidman in 1983 and by
Bollobas in 1984 as a method of (destructively) simplifying graph
topology to aid in analysis and visualization. They have been more
recently defined as the following by Batagelj et al:
*Given a graph `G` with vertices set `V` and edges set `E`, the
`k`-core of `G` is the graph obtained from `G` by recursively removing
the vertices with degree less than `k`, for as long as there are any.*
This operation can be useful to filter or to study some properties of
the graphs. For instance, when you compute the 2-core of graph G, you
are cutting all the vertices which are in a tree part of graph. (A
tree is a graph with no loops). See the :wikipedia:`K-core`.
[PSW1996]_ defines a `k`-core of `G` as the largest subgraph (it is
unique) of `G` with minimum degree at least `k`.
* Core number of a vertex
The core number of a vertex `v` is the largest integer `k` such that
`v` belongs to the `k`-core of `G`.
* Degeneracy
The *degeneracy* of a graph `G`, usually denoted `\delta^*(G)`, is the
smallest integer `k` such that the graph `G` can be reduced to the
empty graph by iteratively removing vertices of degree `\leq k`.
Equivalently, `\delta^*(G)=k` if `k` is the smallest integer such that
the `k`-core of `G` is empty.
IMPLEMENTATION:
This implementation is based on the NetworkX implementation of the
algorithm described in [BZ2003]_.
INPUT:
- ``k`` -- integer (default: ``None``);
* If ``k = None`` (default), returns the core number for each vertex.
* If ``k`` is an integer, returns a pair ``(ordering, core)``, where
``core`` is the list of vertices in the `k`-core of ``self``, and
``ordering`` is an elimination order for the other vertices such
that each vertex is of degree strictly less than `k` when it is to
be eliminated from the graph.
- ``with_labels`` -- boolean (default: ``False``); when set to
``False``, and ``k = None``, the method returns a list whose `i` th
element is the core number of the `i` th vertex. When set to ``True``,
the method returns a dictionary whose keys are vertices, and whose
values are the corresponding core numbers.
.. SEEALSO::
* Graph cores is also a notion related to graph homomorphisms. For
this second meaning, see :meth:`Graph.has_homomorphism_to`.
EXAMPLES::
sage: (graphs.FruchtGraph()).cores()
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
sage: (graphs.FruchtGraph()).cores(with_labels=True)
{0: 3, 1: 3, 2: 3, 3: 3, 4: 3, 5: 3, 6: 3, 7: 3, 8: 3, 9: 3, 10: 3, 11: 3}
sage: set_random_seed(0)
sage: a = random_matrix(ZZ, 20, x=2, sparse=True, density=.1)
sage: b = Graph(20)
sage: b.add_edges(a.nonzero_positions(), loops=False)
sage: cores = b.cores(with_labels=True); cores
{0: 3, 1: 3, 2: 3, 3: 3, 4: 2, 5: 2, 6: 3, 7: 1, 8: 3, 9: 3, 10: 3, 11: 3, 12: 3, 13: 3, 14: 2, 15: 3, 16: 3, 17: 3, 18: 3, 19: 3}
sage: [v for v,c in cores.items() if c >= 2] # the vertices in the 2-core
[0, 1, 2, 3, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
Checking the 2-core of a random lobster is indeed the empty set::
sage: g = graphs.RandomLobster(20, .5, .5)
sage: ordering, core = g.cores(2)
sage: len(core) == 0
True
"""
self._scream_if_not_simple()
# compute the degrees of each vertex
degrees = self.degree(labels=True)
# Sort vertices by degree. Store in a list and keep track of where a
# specific degree starts (effectively, the list is sorted by bins).
verts = sorted(degrees.keys(), key=lambda x: degrees[x])
bin_boundaries = [0]
curr_degree = 0
for i,v in enumerate(verts):
if degrees[v] > curr_degree:
bin_boundaries.extend([i] * (degrees[v] - curr_degree))
curr_degree = degrees[v]
vert_pos = {v: pos for pos,v in enumerate(verts)}
# Set up initial guesses for core and lists of neighbors.
core = degrees
nbrs = {v: set(self.neighbors(v)) for v in self}
# form vertex core building up from smallest
for v in verts:
# If all the vertices have a degree larger than k, we can return our
# answer if k is not None
if k is not None and core[v] >= k:
return verts[:vert_pos[v]], verts[vert_pos[v]:]
for u in nbrs[v]:
if core[u] > core[v]:
nbrs[u].remove(v)
# Cleverly move u to the end of the next smallest bin (i.e.,
# subtract one from the degree of u). We do this by swapping
# u with the first vertex in the bin that contains u, then
# incrementing the bin boundary for the bin that contains u.
pos = vert_pos[u]
bin_start = bin_boundaries[core[u]]
vert_pos[u] = bin_start
vert_pos[verts[bin_start]] = pos
verts[bin_start],verts[pos] = verts[pos],verts[bin_start]
bin_boundaries[core[u]] += 1
core[u] -= 1
if k is not None:
return verts, []
if with_labels:
return core
else:
return list(core.values())
@doc_index("Leftovers")
def modular_decomposition(self, algorithm='habib', style='tuple'):
r"""
Return the modular decomposition of the current graph.
A module of an undirected graph is a subset of vertices such that every
vertex outside the module is either connected to all members of the
module or to none of them. Every graph that has a nontrivial module can
be partitioned into modules, and the increasingly fine partitions into
modules form a tree. The ``modular_decomposition`` function returns
that tree.
INPUT:
- ``algorithm`` -- string (default: ``'habib'``); specifies the
algorithm to use among:
- ``'tedder'`` -- linear time algorithm of [TCHP2008]_
- ``'habib'`` -- `O(n^3)` algorithm of [HM1979]_. This algorithm is
much simpler and so possibly less prone to errors.
- ``style`` -- string (default: ``'tuple'``); specifies the output
format:
- ``'tuple'`` -- as nested tuples.
- ``'tree'`` -- as :class:`~sage.combinat.rooted_tree.LabelledRootedTree`.
OUTPUT:
A pair of two values (recursively encoding the decomposition) :
* The type of the current module :
* ``"PARALLEL"``
* ``"PRIME"``
* ``"SERIES"``
* The list of submodules (as list of pairs ``(type, list)``,
recursively...) or the vertex's name if the module is a singleton.
Crash course on modular decomposition:
A module `M` of a graph `G` is a proper subset of its vertices such
that for all `u \in V(G)-M, v,w\in M` the relation `u \sim v
\Leftrightarrow u \sim w` holds, where `\sim` denotes the adjacency
relation in `G`. Equivalently, `M \subset V(G)` is a module if all its
vertices have the same adjacency relations with each vertex outside of
the module (vertex by vertex).
Hence, for a set like a module, it is very easy to encode the
information of the adjacencies between the vertices inside and outside
the module -- we can actually add a new vertex `v_M` to our graph
representing our module `M`, and let `v_M` be adjacent to `u\in V(G)-M`
if and only if some `v\in M` (and hence all the vertices contained in
the module) is adjacent to `u`. We can now independently (and
recursively) study the structure of our module `M` and the new graph
`G-M+\{v_M\}`, without any loss of information.
Here are two very simple modules :
* A connected component `C` (or the union of some --but not all-- of
them) of a disconnected graph `G`, for instance, is a module, as no
vertex of `C` has a neighbor outside of it.
* An anticomponent `C` (or the union of some --but not all-- of them) of
an non-anticonnected graph `G`, for the same reason (it is just the
complement of the previous graph !).
These modules being of special interest, the disjoint union of graphs is
called a Parallel composition, and the complement of a disjoint union is
called a Series composition. A graph whose only modules are singletons
is called Prime.
For more information on modular decomposition, in particular for an
explanation of the terms "Parallel," "Prime" and "Series," see the
:wikipedia:`Modular_decomposition`.
You may also be interested in the survey from Michel Habib and
Christophe Paul entitled "A survey on Algorithmic aspects of modular
decomposition" [HP2010]_.
EXAMPLES:
The Bull Graph is prime::
sage: graphs.BullGraph().modular_decomposition()
(PRIME, [1, 2, 0, 3, 4])
The Petersen Graph too::
sage: graphs.PetersenGraph().modular_decomposition()
(PRIME, [1, 4, 5, 0, 2, 6, 3, 7, 8, 9])
This a clique on 5 vertices with 2 pendant edges, though, has a more
interesting decomposition::
sage: g = graphs.CompleteGraph(5)
sage: g.add_edge(0,5)
sage: g.add_edge(0,6)
sage: g.modular_decomposition(algorithm='habib')
(SERIES, [(PARALLEL, [(SERIES, [1, 2, 3, 4]), 5, 6]), 0])
We get an equivalent tree when we use the algorithm of [TCHP2008]_::
sage: g.modular_decomposition(algorithm='tedder')
(SERIES, [(PARALLEL, [(SERIES, [4, 3, 2, 1]), 5, 6]), 0])
We can choose output to be a
:class:`~sage.combinat.rooted_tree.LabelledRootedTree`::
sage: g.modular_decomposition(style='tree')
SERIES[0[], PARALLEL[5[], 6[], SERIES[1[], 2[], 3[], 4[]]]]
sage: ascii_art(g.modular_decomposition(style='tree'))
__SERIES
/ /
0 ___PARALLEL
/ / /
5 6 __SERIES
/ / / /
1 2 3 4
ALGORITHM:
When ``algorithm='tedder'`` this function uses python implementation of
algorithm published by Marc Tedder, Derek Corneil, Michel Habib and
Christophe Paul [TCHP2008]_. When ``algorithm='habib'`` this function
uses the algorithm of M. Habib and M. Maurer [HM1979]_.
.. SEEALSO::
- :meth:`is_prime` -- Tests whether a graph is prime.
- :class:`~sage.combinat.rooted_tree.LabelledRootedTree`.
TESTS:
Empty graph::
sage: graphs.EmptyGraph().modular_decomposition(algorithm='habib')
()
sage: graphs.EmptyGraph().modular_decomposition(algorithm='tedder')
()
sage: graphs.EmptyGraph().modular_decomposition(algorithm='habib', style='tree')
None[]
sage: graphs.EmptyGraph().modular_decomposition(algorithm='tedder', style='tree')
None[]
Singleton Vertex::
sage: Graph(1).modular_decomposition(algorithm='habib')
(PRIME, [0])
sage: Graph(1).modular_decomposition(algorithm='tedder')
(PRIME, [0])
sage: Graph(1).modular_decomposition(algorithm='habib', style='tree')
PRIME[0[]]
sage: Graph(1).modular_decomposition(algorithm='tedder', style='tree')
PRIME[0[]]
Vertices may be arbitrary --- check that :trac:`24898` is fixed::
sage: md = Graph({(1,2):[(2,3)],(2,3):[(1,2)]}).modular_decomposition()
sage: md[0]
SERIES
sage: sorted(md[1])
[(1, 2), (2, 3)]
Unknown algorithm::
sage: graphs.PathGraph(2).modular_decomposition(algorithm='abc')
Traceback (most recent call last):
...
ValueError: algorithm must be 'habib' or 'tedder'
Unknown style::
sage: graphs.PathGraph(2).modular_decomposition(style='xyz')
Traceback (most recent call last):
...
ValueError: style must be 'tuple' or 'tree'
"""
from sage.graphs.graph_decompositions.modular_decomposition import (modular_decomposition,
NodeType,
habib_maurer_algorithm,
create_prime_node,
create_normal_node)
self._scream_if_not_simple()
if not self.order():
D = None
elif self.order() == 1:
D = create_prime_node()
D.children.append(create_normal_node(self.vertices()[0]))
else:
if algorithm == 'habib':
D = habib_maurer_algorithm(self)
elif algorithm == 'tedder':
D = modular_decomposition(self)
else:
raise ValueError("algorithm must be 'habib' or 'tedder'")
if style == 'tuple':
if D is None:
return tuple()
def relabel(x):
if x.node_type == NodeType.NORMAL:
return x.children[0]
else:
return x.node_type, [relabel(y) for y in x.children]
return relabel(D)
elif style == 'tree':
from sage.combinat.rooted_tree import LabelledRootedTree
if D is None:
return LabelledRootedTree([])
def to_tree(x):
if x.node_type == NodeType.NORMAL:
return LabelledRootedTree([], label=x.children[0])
else:
return LabelledRootedTree([to_tree(y) for y in x.children], label=x.node_type)
return to_tree(D)
else:
raise ValueError("style must be 'tuple' or 'tree'")
@doc_index("Graph properties")
def is_polyhedral(self):
"""
Check whether the graph is the graph of the polyhedron.
By a theorem of Steinitz (Satz 43, p. 77 of [St1922]_), graphs of
three-dimensional polyhedra are exactly the simple 3-vertex-connected
planar graphs.
EXAMPLES::
sage: C = graphs.CubeGraph(3)
sage: C.is_polyhedral()
True
sage: K33=graphs.CompleteBipartiteGraph(3, 3)
sage: K33.is_polyhedral()
False
sage: graphs.CycleGraph(17).is_polyhedral()
False
sage: [i for i in range(9) if graphs.CompleteGraph(i).is_polyhedral()]
[4]
.. SEEALSO::
* :meth:`~sage.graphs.generic_graph.GenericGraph.vertex_connectivity`
* :meth:`~sage.graphs.generic_graph.GenericGraph.is_planar`
* :meth:`is_circumscribable`
* :meth:`is_inscribable`
* :wikipedia:`Polyhedral_graph`
TESTS::
sage: G = Graph([[1, 2, 3, 4], [[1, 2], [1,1]]], loops=True)
sage: G.is_polyhedral()
False
sage: G = Graph([[1, 2, 3], [[1, 2], [3, 1], [1, 2], [2, 3]]], multiedges=True)
sage: G.is_polyhedral()
False
"""
return (not self.has_loops()
and not self.has_multiple_edges()
and self.vertex_connectivity(k=3)
and self.is_planar())
@doc_index("Graph properties")
def is_circumscribable(self, solver="ppl", verbose=0):
"""
Test whether the graph is the graph of a circumscribed polyhedron.
A polyhedron is circumscribed if all of its facets are tangent to a
sphere. By a theorem of Rivin ([HRS1993]_), this can be checked by
solving a linear program that assigns weights between 0 and 1/2 on each
edge of the polyhedron, so that the weights on any face add to exactly
one and the weights on any non-facial cycle add to more than one. If
and only if this can be done, the polyhedron can be circumscribed.
INPUT:
- ``solver`` -- (default: ``"ppl"``); specify a Linear Program (LP)
solver to be used. If set to ``None``, the default one is used. For
more information on LP solvers and which default solver is used, see
the method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``); sets the level of
verbosity. Set to 0 by default, which means quiet.
EXAMPLES::
sage: C = graphs.CubeGraph(3)
sage: C.is_circumscribable()
True
sage: O = graphs.OctahedralGraph()
sage: O.is_circumscribable()
True
sage: TT = polytopes.truncated_tetrahedron().graph()
sage: TT.is_circumscribable()
False
Stellating in a face of the octahedral graph is not circumscribable::
sage: f = set(flatten(choice(O.faces())))
sage: O.add_edges([[6, i] for i in f])
sage: O.is_circumscribable()
False
.. SEEALSO::
* :meth:`is_polyhedral`
* :meth:`is_inscribable`
TESTS::
sage: G = graphs.CompleteGraph(5)
sage: G.is_circumscribable()
Traceback (most recent call last):
...
NotImplementedError: this method only works for polyhedral graphs
.. TODO::
Allow the use of other, inexact but faster solvers.
"""
if not self.is_polyhedral():
raise NotImplementedError('this method only works for polyhedral graphs')
from sage.numerical.mip import MixedIntegerLinearProgram
from sage.numerical.mip import MIPSolverException
# For a description of the algorithm see paper by Rivin and:
# https://www.ics.uci.edu/~eppstein/junkyard/uninscribable/
# In order to simulate strict inequalities in the following LP, we
# introduce a variable c[0] and maximize it. If it is positive, then
# the LP has a solution, such that all inequalities are strict
# after removing the auxiliary variable c[0].
M = MixedIntegerLinearProgram(maximization=True, solver=solver)
e_var = M.new_variable(nonnegative=True)
c = M.new_variable()
M.set_min(c[0], -1)
M.set_max(c[0], 1)
M.set_objective(c[0])
for e in self.edge_iterator(labels=0):
fe = frozenset(e)
M.set_max(e_var[fe], ZZ(1)/ZZ(2))
M.add_constraint(e_var[fe] - c[0], min=0)
M.add_constraint(e_var[fe] + c[0], max=ZZ(1)/ZZ(2))
# The faces are completely determined by the graph structure:
# for polyhedral graph, there is only one way to choose the faces.
# We add an equality constraint for each face.
efaces = self.faces()
vfaces = set(frozenset([e[0] for e in face]) for face in efaces)
for edges in efaces:
M.add_constraint(M.sum(e_var[frozenset(e)] for e in edges) == 1)
# In order to generate all simple cycles of G, which are not faces,
# we use the "all_simple_cycles" method of directed graphs, generating
# each cycle twice (in both directions). The set below make sure only
# one direction gives rise to an (in)equality
D = self.to_directed()
inequality_constraints = set()
for cycle in D.all_simple_cycles():
if len(cycle) > 3:
scycle = frozenset(cycle)
if scycle not in vfaces:
edges = (frozenset((cycle[i], cycle[i+1])) for i in range(len(cycle)-1))
inequality_constraints.add(frozenset(edges))
for ieq in inequality_constraints:
M.add_constraint(M.sum(e_var[fe] for fe in ieq) - c[0] >= 1)
try:
solution = M.solve(log=verbose)
except MIPSolverException as msg:
if str(msg) == "PPL : There is no feasible solution":
return False
return solution > 0
@doc_index("Graph properties")
def is_inscribable(self, solver="ppl", verbose=0):
"""
Test whether the graph is the graph of an inscribed polyhedron.
A polyhedron is inscribed if all of its vertices are on a sphere.
This is dual to the notion of circumscribed polyhedron: A Polyhedron is
inscribed if and only if its polar dual is circumscribed and hence a
graph is inscribable if and only if its planar dual is circumscribable.
INPUT:
- ``solver`` -- (default: ``"ppl"``); specify a Linear Program (LP)
solver to be used. If set to ``None``, the default one is used. For
more information on LP solvers and which default solver is used, see
the method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``); sets the level of
verbosity. Set to 0 by default, which means quiet.
EXAMPLES::
sage: H = graphs.HerschelGraph()
sage: H.is_inscribable() # long time (> 1 sec)
False
sage: H.planar_dual().is_inscribable() # long time (> 1 sec)
True
sage: C = graphs.CubeGraph(3)
sage: C.is_inscribable()
True
Cutting off a vertex from the cube yields an uninscribable graph::
sage: C = graphs.CubeGraph(3)
sage: v = next(C.vertex_iterator())
sage: triangle = [_ + v for _ in C.neighbors(v)]
sage: C.add_edges(Combinations(triangle, 2))
sage: C.add_edges(zip(triangle, C.neighbors(v)))
sage: C.delete_vertex(v)
sage: C.is_inscribable()
False
Breaking a face of the cube yields an uninscribable graph::
sage: C = graphs.CubeGraph(3)
sage: face = choice(C.faces())
sage: C.add_edge([face[0][0], face[2][0]])
sage: C.is_inscribable()
False
.. SEEALSO::
* :meth:`is_polyhedral`
* :meth:`is_circumscribable`
TESTS::
sage: G = graphs.CompleteBipartiteGraph(3,3)
sage: G.is_inscribable()
Traceback (most recent call last):
...
NotImplementedError: this method only works for polyhedral graphs
"""
if not self.is_polyhedral():
raise NotImplementedError('this method only works for polyhedral graphs')
return self.planar_dual().is_circumscribable(solver=solver, verbose=verbose)
@doc_index("Graph properties")
def is_prime(self, algorithm='habib'):
r"""
Test whether the current graph is prime.
INPUT:
- ``algorithm`` -- (default: ``'tedder'``) specifies the algorithm to
use among:
- ``'tedder'`` -- Use the linear algorithm of [TCHP2008]_.
- ``'habib'`` -- Use the $O(n^3)$ algorithm of [HM1979]_. This is
probably slower, but is much simpler and so possibly less error
prone.
A graph is prime if all its modules are trivial (i.e. empty, all of the
graph or singletons) -- see :meth:`modular_decomposition`.
EXAMPLES:
The Petersen Graph and the Bull Graph are both prime::
sage: graphs.PetersenGraph().is_prime()
True
sage: graphs.BullGraph().is_prime()
True
Though quite obviously, the disjoint union of them is not::
sage: (graphs.PetersenGraph() + graphs.BullGraph()).is_prime()
False
TESTS::
sage: graphs.EmptyGraph().is_prime()
True
"""
from sage.graphs.graph_decompositions.modular_decomposition import NodeType
if self.order() <= 1:
return True
D = self.modular_decomposition(algorithm=algorithm)
return D[0] == NodeType.PRIME and len(D[1]) == self.order()
def _gomory_hu_tree(self, vertices, algorithm=None):
r"""
Return a Gomory-Hu tree associated to self.
This function is the private counterpart of ``gomory_hu_tree()``, with
the difference that it has an optional argument needed for recursive
computations, which the user is not interested in defining himself.
See the documentation of ``gomory_hu_tree()`` for more information.
INPUT:
- ``vertices`` -- a set of "real" vertices, as opposed to the fakes one
introduced during the computations. This variable is useful for the
algorithm and for recursion purposes.
- ``algorithm`` -- select the algorithm used by the :meth:`edge_cut`
method. Refer to its documentation for allowed values and default
behaviour.
EXAMPLES:
This function is actually tested in ``gomory_hu_tree()``, this example
is only present to have a doctest coverage of 100%::
sage: g = graphs.PetersenGraph()
sage: t = g._gomory_hu_tree(frozenset(g.vertices()))
"""
self._scream_if_not_simple()
# Small case, not really a problem ;-)
if len(vertices) == 1:
g = Graph()
g.add_vertices(vertices)
return g
# Take any two vertices (u,v)
it = iter(vertices)
u,v = next(it),next(it)
# Compute a uv min-edge-cut.
#
# The graph is split into U,V with u \in U and v\in V.
flow,edges,[U,V] = self.edge_cut(u, v, use_edge_labels=True, vertices=True, algorithm=algorithm)
# One graph for each part of the previous one
gU,gV = self.subgraph(U, immutable=False), self.subgraph(V, immutable=False)
# A fake vertex fU (resp. fV) to represent U (resp. V)
fU = frozenset(U)
fV = frozenset(V)
# Each edge (uu,vv) with uu \in U and vv\in V yields:
# - an edge (uu,fV) in gU
# - an edge (vv,fU) in gV
#
# If the same edge is added several times their capacities add up.
from sage.rings.real_mpfr import RR
for uu,vv,capacity in edges:
capacity = capacity if capacity in RR else 1
# Assume uu is in gU
if uu in V:
uu,vv = vv,uu
# Create the new edges if necessary
if not gU.has_edge(uu, fV):
gU.add_edge(uu, fV, 0)
if not gV.has_edge(vv, fU):
gV.add_edge(vv, fU, 0)
# update the capacities
gU.set_edge_label(uu, fV, gU.edge_label(uu, fV) + capacity)
gV.set_edge_label(vv, fU, gV.edge_label(vv, fU) + capacity)
# Recursion on each side
gU_tree = gU._gomory_hu_tree(vertices & frozenset(gU), algorithm=algorithm)
gV_tree = gV._gomory_hu_tree(vertices & frozenset(gV), algorithm=algorithm)
# Union of the two partial trees
g = gU_tree.union(gV_tree)
# An edge to connect them, with the appropriate label
g.add_edge(u, v, flow)
return g
@doc_index("Connectivity, orientations, trees")
def gomory_hu_tree(self, algorithm=None):
r"""
Return a Gomory-Hu tree of self.
Given a tree `T` with labeled edges representing capacities, it is very
easy to determine the maximum flow between any pair of vertices :
it is the minimal label on the edges of the unique path between them.
Given a graph `G`, a Gomory-Hu tree `T` of `G` is a tree with the same
set of vertices, and such that the maximum flow between any two vertices
is the same in `G` as in `T`. See the :wikipedia:`Gomory–Hu_tree`. Note
that, in general, a graph admits more than one Gomory-Hu tree.
See also 15.4 (Gomory-Hu trees) from [Sch2003]_.
INPUT:
- ``algorithm`` -- select the algorithm used by the :meth:`edge_cut`
method. Refer to its documentation for allowed values and default
behaviour.
OUTPUT:
A graph with labeled edges
EXAMPLES:
Taking the Petersen graph::
sage: g = graphs.PetersenGraph()
sage: t = g.gomory_hu_tree()
Obviously, this graph is a tree::
sage: t.is_tree()
True
Note that if the original graph is not connected, then the Gomory-Hu
tree is in fact a forest::
sage: (2*g).gomory_hu_tree().is_forest()
True
sage: (2*g).gomory_hu_tree().is_connected()
False
On the other hand, such a tree has lost nothing of the initial graph
connectedness::
sage: all(t.flow(u,v) == g.flow(u,v) for u,v in Subsets(g.vertices(), 2))
True
Just to make sure, we can check that the same is true for two vertices
in a random graph::
sage: g = graphs.RandomGNP(20,.3)
sage: t = g.gomory_hu_tree()
sage: g.flow(0,1) == t.flow(0,1)
True
And also the min cut::
sage: g.edge_connectivity() == min(t.edge_labels()) or not g.is_connected()
True
TESTS:
:trac:`16475`::
sage: G = graphs.PetersenGraph()
sage: for u,v in G.edge_iterator(labels=False):
....: G.set_edge_label(u, v, 1)
sage: for u, v in [(0, 1), (0, 4), (0, 5), (1, 2), (1, 6), (3, 4), (5, 7), (5, 8)]:
....: G.set_edge_label(u, v, 2)
sage: T = G.gomory_hu_tree()
sage: from itertools import combinations
sage: for u,v in combinations(G,2):
....: assert T.flow(u,v,use_edge_labels=True) == G.flow(u,v,use_edge_labels=True)
sage: graphs.EmptyGraph().gomory_hu_tree()
Graph on 0 vertices
"""
if not self.order():
return Graph()
if not self.is_connected():
g = Graph()
for cc in self.connected_components_subgraphs():
g = g.union(cc._gomory_hu_tree(frozenset(cc.vertex_iterator()), algorithm=algorithm))
else:
g = self._gomory_hu_tree(frozenset(self.vertex_iterator()), algorithm=algorithm)
if self.get_pos() is not None:
g.set_pos(dict(self.get_pos()))
return g
@doc_index("Leftovers")
def two_factor_petersen(self, solver=None, verbose=0, *, integrality_tolerance=1e-3):
r"""
Return a decomposition of the graph into 2-factors.
Petersen's 2-factor decomposition theorem asserts that any `2r`-regular
graph `G` can be decomposed into 2-factors. Equivalently, it means that
the edges of any `2r`-regular graphs can be partitionned in `r` sets
`C_1,\dots,C_r` such that for all `i`, the set `C_i` is a disjoint union
of cycles (a 2-regular graph).
As any graph of maximal degree `\Delta` can be completed into a regular
graph of degree `2\lceil\frac\Delta 2\rceil`, this result also means
that the edges of any graph of degree `\Delta` can be partitionned in
`r=2\lceil\frac\Delta 2\rceil` sets `C_1,\dots,C_r` such that for all
`i`, the set `C_i` is a graph of maximal degree `2` (a disjoint union of
paths and cycles).
INPUT:
- ``solver`` -- string (default: ``None``); specify a Mixed Integer
Linear Programming (MILP) solver to be used. If set to ``None``, the
default one is used. For more information on MILP solvers and which
default solver is used, see the method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``); sets the level of
verbosity. Set to 0 by default, which means quiet.
- ``integrality_tolerance`` -- float; parameter for use with MILP
solvers over an inexact base ring; see
:meth:`MixedIntegerLinearProgram.get_values`.
EXAMPLES:
The Complete Graph on `7` vertices is a `6`-regular graph, so it can be
edge-partitionned into `2`-regular graphs::
sage: g = graphs.CompleteGraph(7)
sage: classes = g.two_factor_petersen()
sage: for c in classes:
....: gg = Graph()
....: gg.add_edges(c)
....: print(max(gg.degree())<=2)
True
True
True
sage: Set(set(classes[0]) | set(classes[1]) | set(classes[2])).cardinality() == g.size()
True
::
sage: g = graphs.CirculantGraph(24, [7, 11])
sage: cl = g.two_factor_petersen()
sage: g.plot(edge_colors={'black':cl[0], 'red':cl[1]})
Graphics object consisting of 73 graphics primitives
"""
self._scream_if_not_simple()
d = self.eulerian_orientation()
# This new graph is bipartite, and built the following way :
#
# To each vertex v of the digraph are associated two vertices,
# a sink (-1,v) and a source (1,v)
# Any edge (u,v) in the digraph is then added as ((-1,u),(1,v))
g = Graph()
g.add_edges(((-1, u), (1, v)) for u, v in d.edge_iterator(labels=None))
# This new bipartite graph is now edge_colored
from sage.graphs.graph_coloring import edge_coloring
classes = edge_coloring(g, solver=solver, verbose=verbose,
integrality_tolerance=integrality_tolerance)
# The edges in the classes are of the form ((-1,u),(1,v))
# and have to be translated back to (u,v)
classes_b = []
for c in classes:
classes_b.append([(u,v) for ((uu,u),(vv,v)) in c])
return classes_b
@doc_index("Leftovers")
def kirchhoff_symanzik_polynomial(self, name='t'):
r"""
Return the Kirchhoff-Symanzik polynomial of a graph.
This is a polynomial in variables `t_e` (each of them representing an
edge of the graph `G`) defined as a sum over all spanning trees:
.. MATH::
\Psi_G(t) = \sum_{\substack{T\subseteq V \\ \text{a spanning tree}}} \prod_{e \not\in E(T)} t_e
This is also called the first Symanzik polynomial or the Kirchhoff
polynomial.
INPUT:
- ``name`` -- name of the variables (default: ``'t'``)
OUTPUT:
- a polynomial with integer coefficients
ALGORITHM:
This is computed here using a determinant, as explained in Section
3.1 of [Mar2009a]_.
As an intermediate step, one computes a cycle basis `\mathcal C` of
`G` and a rectangular `|\mathcal C| \times |E(G)|` matrix with
entries in `\{-1,0,1\}`, which describes which edge belong to which
cycle of `\mathcal C` and their respective orientations.
More precisely, after fixing an arbitrary orientation for each edge
`e\in E(G)` and each cycle `C\in\mathcal C`, one gets a sign for
every incident pair (edge, cycle) which is `1` if the orientation
coincide and `-1` otherwise.
EXAMPLES:
For the cycle of length 5::
sage: G = graphs.CycleGraph(5)
sage: G.kirchhoff_symanzik_polynomial()
t0 + t1 + t2 + t3 + t4
One can use another letter for variables::
sage: G.kirchhoff_symanzik_polynomial(name='u')
u0 + u1 + u2 + u3 + u4
For the 'coffee bean' graph::
sage: G = Graph([(0,1,'a'),(0,1,'b'),(0,1,'c')], multiedges=True)
sage: G.kirchhoff_symanzik_polynomial()
t0*t1 + t0*t2 + t1*t2
For the 'parachute' graph::
sage: G = Graph([(0,2,'a'),(0,2,'b'),(0,1,'c'),(1,2,'d')], multiedges=True)
sage: G.kirchhoff_symanzik_polynomial()
t0*t1 + t0*t2 + t1*t2 + t1*t3 + t2*t3
For the complete graph with 4 vertices::
sage: G = graphs.CompleteGraph(4)
sage: G.kirchhoff_symanzik_polynomial()
t0*t1*t3 + t0*t2*t3 + t1*t2*t3 + t0*t1*t4 + t0*t2*t4 + t1*t2*t4
+ t1*t3*t4 + t2*t3*t4 + t0*t1*t5 + t0*t2*t5 + t1*t2*t5 + t0*t3*t5
+ t2*t3*t5 + t0*t4*t5 + t1*t4*t5 + t3*t4*t5
REFERENCES:
[Bro2011]_
"""
from sage.matrix.constructor import matrix
from sage.rings.integer_ring import ZZ
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
# The order of the vertices in each tuple matters, so use a list
edges = list(self.edges(sort=False))
cycles = self.cycle_basis(output='edge')
edge2int = {e: j for j, e in enumerate(edges)}
circuit_mtrx = matrix(ZZ, self.size(), len(cycles))
for i, cycle in enumerate(cycles):
for edge in cycle:
if edge in edges:
circuit_mtrx[edge2int[edge], i] = +1
else:
circuit_mtrx[edge2int[(edge[1], edge[0], edge[2])], i] = -1
D = matrix.diagonal(PolynomialRing(ZZ, name, self.size()).gens())
return (circuit_mtrx.transpose() * D * circuit_mtrx).determinant()
@doc_index("Leftovers")
def magnitude_function(self):
r"""
Return the magnitude function of the graph as a rational function.
This is defined as the sum of all coefficients in the inverse of the
matrix `Z` whose coefficient `Z_{i,j}` indexed by a pair of vertices
`(i,j)` is `q^d(i,j)` where `d` is the distance function in the graph.
By convention, if the distance from `i` to `j` is infinite (for two
vertices not path connected) then `Z_{i,j}=0`.
The value of the magnitude function at `q=0` is the cardinality of the
graph. The magnitude function of a disjoint union is the sum of the
magnitudes functions of the connected components. The magnitude function
of a Cartesian product is the product of the magnitudes functions of the
factors.
EXAMPLES::
sage: g = Graph({1:[], 2:[]})
sage: g.magnitude_function()
2
sage: g = graphs.CycleGraph(4)
sage: g.magnitude_function()
4/(q^2 + 2*q + 1)
sage: g = graphs.CycleGraph(5)
sage: m = g.magnitude_function(); m
5/(2*q^2 + 2*q + 1)
One can expand the magnitude as a power series in `q` as follows::
sage: q = QQ[['q']].gen()
sage: m(q)
5 - 10*q + 10*q^2 - 20*q^4 + 40*q^5 - 40*q^6 + ...
One can also use the substitution `q = exp(-t)` to obtain the magnitude
function as a function of `t`::
sage: g = graphs.CycleGraph(6)
sage: m = g.magnitude_function()
sage: t = var('t') # optional - sage.symbolic
sage: m(exp(-t)) # optional - sage.symbolic
6/(2*e^(-t) + 2*e^(-2*t) + e^(-3*t) + 1)
TESTS::
sage: g = Graph()
sage: g.magnitude_function()
0
sage: g = Graph({1:[]})
sage: g.magnitude_function()
1
sage: g = graphs.PathGraph(4)
sage: g.magnitude_function()
(-2*q + 4)/(q + 1)
REFERENCES:
.. [Lein] Tom Leinster, *The magnitude of metric spaces*.
Doc. Math. 18 (2013), 857-905.
"""
from sage.matrix.constructor import matrix
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.graphs.distances_all_pairs import distances_all_pairs
ring = PolynomialRing(ZZ, 'q')
q = ring.gen()
N = self.order()
if not N:
return ring.zero()
dist = distances_all_pairs(self)
vertices = list(self)
Z = matrix(ring, N, N, ring.zero())
for i in range(N):
Z[i, i] = ring.one()
for i in range(N):
for j in range(i):
dij = dist[vertices[i]][vertices[j]]
if dij in ZZ:
Z[i, j] = Z[j, i] = q ** dij
else:
Z[i, j] = Z[j, i] = ring.zero()
return sum(sum(u) for u in ~Z)
@doc_index("Leftovers")
def ihara_zeta_function_inverse(self):
"""
Compute the inverse of the Ihara zeta function of the graph.
This is a polynomial in one variable with integer coefficients. The
Ihara zeta function itself is the inverse of this polynomial.
See the :wikipedia:`Ihara zeta function` for more information.
ALGORITHM:
This is computed here as the (reversed) characteristic polynomial of a
square matrix of size twice the number of edges, related to the
adjacency matrix of the line graph, see for example Proposition 9 in
[SS2008]_ and Def. 4.1 in [Ter2011]_.
The graph is first replaced by its 2-core, as this does not change the
Ihara zeta function.
EXAMPLES::
sage: G = graphs.CompleteGraph(4)
sage: factor(G.ihara_zeta_function_inverse())
(2*t - 1) * (t + 1)^2 * (t - 1)^3 * (2*t^2 + t + 1)^3
sage: G = graphs.CompleteGraph(5)
sage: factor(G.ihara_zeta_function_inverse())
(-1) * (3*t - 1) * (t + 1)^5 * (t - 1)^6 * (3*t^2 + t + 1)^4
sage: G = graphs.PetersenGraph()
sage: factor(G.ihara_zeta_function_inverse())
(-1) * (2*t - 1) * (t + 1)^5 * (t - 1)^6 * (2*t^2 + 2*t + 1)^4
* (2*t^2 - t + 1)^5
sage: G = graphs.RandomTree(10)
sage: G.ihara_zeta_function_inverse()
1
REFERENCES:
[HST2001]_
"""
from sage.matrix.constructor import matrix
H = self.subgraph(vertices=self.cores(k=2)[1])
E = list(H.edges(sort=False))
m = len(E)
# compute (Hashimoto) edge matrix T
T = matrix(ZZ, 2 * m, 2 * m, 0)
for i in range(m):
for j in range(m):
if i != j:
if E[i][1] == E[j][0]: # same orientation
T[2 * i, 2 * j] = 1
T[2 * j + 1, 2 * i + 1] = 1
elif E[i][1] == E[j][1]: # opposite orientation (towards)
T[2 * i, 2 * j + 1] = 1
T[2 * j, 2 * i + 1] = 1
elif E[i][0] == E[j][0]: # opposite orientation (away)
T[2 * i + 1, 2 * j] = 1
T[2 * j + 1, 2 * i] = 1
return T.charpoly('t').reverse()
@doc_index("Leftovers")
def perfect_matchings(self, labels=False):
r"""
Return an iterator over all perfect matchings of the graph.
ALGORITHM:
Choose a vertex `v`, then recurse through all edges incident to `v`,
removing one edge at a time whenever an edge is added to a matching.
INPUT:
- ``labels`` -- boolean (default: ``False``); when ``True``, the edges
in each perfect matching are triples (containing the label as the
third element), otherwise the edges are pairs.
.. SEEALSO::
:meth:`matching`
EXAMPLES::
sage: G=graphs.GridGraph([2,3])
sage: for m in G.perfect_matchings():
....: print(sorted(m))
[((0, 0), (0, 1)), ((0, 2), (1, 2)), ((1, 0), (1, 1))]
[((0, 0), (1, 0)), ((0, 1), (0, 2)), ((1, 1), (1, 2))]
[((0, 0), (1, 0)), ((0, 1), (1, 1)), ((0, 2), (1, 2))]
sage: G = graphs.CompleteGraph(4)
sage: for m in G.perfect_matchings(labels=True):
....: print(sorted(m))
[(0, 1, None), (2, 3, None)]
[(0, 2, None), (1, 3, None)]
[(0, 3, None), (1, 2, None)]
sage: G = Graph([[1,-1,'a'], [2,-2, 'b'], [1,-2,'x'], [2,-1,'y']])
sage: sorted(sorted(m) for m in G.perfect_matchings(labels=True))
[[(-2, 1, 'x'), (-1, 2, 'y')], [(-2, 2, 'b'), (-1, 1, 'a')]]
sage: G = graphs.CompleteGraph(8)
sage: mpc = G.matching_polynomial().coefficients(sparse=False)[0]
sage: len(list(G.perfect_matchings())) == mpc
True
sage: G = graphs.PetersenGraph().copy(immutable=True)
sage: [sorted(m) for m in G.perfect_matchings()]
[[(0, 1), (2, 3), (4, 9), (5, 7), (6, 8)],
[(0, 1), (2, 7), (3, 4), (5, 8), (6, 9)],
[(0, 4), (1, 2), (3, 8), (5, 7), (6, 9)],
[(0, 4), (1, 6), (2, 3), (5, 8), (7, 9)],
[(0, 5), (1, 2), (3, 4), (6, 8), (7, 9)],
[(0, 5), (1, 6), (2, 7), (3, 8), (4, 9)]]
sage: list(Graph().perfect_matchings())
[[]]
sage: G = graphs.CompleteGraph(5)
sage: list(G.perfect_matchings())
[]
"""
if not self:
yield []
return
if self.order() % 2 or any(len(cc) % 2 for cc in self.connected_components()):
return
def rec(G):
"""
Iterator over all perfect matchings of a simple graph `G`.
"""
if not G:
yield []
return
if G.order() % 2 == 0:
v = next(G.vertex_iterator())
Nv = list(G.neighbor_iterator(v))
G.delete_vertex(v)
for u in Nv:
Nu = list(G.neighbor_iterator(u))
G.delete_vertex(u)
for partial_matching in rec(G):
partial_matching.append((u, v))
yield partial_matching
G.add_vertex(u)
G.add_edges((u, nu) for nu in Nu)
G.add_vertex(v)
G.add_edges((v, nv) for nv in Nv)
# We create a mutable copy of the graph and remove its loops, if any
G = self.copy(immutable=False)
G.allow_loops(False)
# We create a mapping from frozen unlabeled edges to (labeled) edges.
# This ease for instance the manipulation of multiedges (if any)
edges = {}
for e in G.edges(labels=labels):
f = frozenset(e[:2])
if f in edges:
edges[f].append(e)
else:
edges[f] = [e]
# We now get rid of multiple edges, if any
G.allow_multiple_edges(False)
# For each unlabeled matching, we yield all its possible labelings
for m in rec(G):
for pm in itertools.product(*[edges[frozenset(e)] for e in m]):
yield pm
@doc_index("Leftovers")
def has_perfect_matching(self, algorithm="Edmonds", solver=None, verbose=0,
*, integrality_tolerance=1e-3):
r"""
Return whether this graph has a perfect matching.
INPUT:
- ``algorithm`` -- string (default: ``"Edmonds"``)
- ``"Edmonds"`` uses Edmonds' algorithm as implemented in NetworkX to
find a matching of maximal cardinality, then check whether this
cardinality is half the number of vertices of the graph.
- ``"LP_matching"`` uses a Linear Program to find a matching of
maximal cardinality, then check whether this cardinality is half the
number of vertices of the graph.
- ``"LP"`` uses a Linear Program formulation of the perfect matching
problem: put a binary variable ``b[e]`` on each edge `e`, and for
each vertex `v`, require that the sum of the values of the edges
incident to `v` is 1.
- ``solver`` -- string (default: ``None``); specify a Mixed Integer
Linear Programming (MILP) solver to be used. If set to ``None``, the
default one is used. For more information on MILP solvers and which
default solver is used, see the method :meth:`solve
<sage.numerical.mip.MixedIntegerLinearProgram.solve>` of the class
:class:`MixedIntegerLinearProgram
<sage.numerical.mip.MixedIntegerLinearProgram>`.
- ``verbose`` -- integer (default: ``0``); sets the level of verbosity:
set to 0 by default, which means quiet (only useful when
``algorithm == "LP_matching"`` or ``algorithm == "LP"``)
- ``integrality_tolerance`` -- float; parameter for use with MILP
solvers over an inexact base ring; see
:meth:`MixedIntegerLinearProgram.get_values`.
OUTPUT:
A boolean.
EXAMPLES::
sage: graphs.PetersenGraph().has_perfect_matching()
True
sage: graphs.WheelGraph(6).has_perfect_matching()
True
sage: graphs.WheelGraph(5).has_perfect_matching()
False
sage: graphs.PetersenGraph().has_perfect_matching(algorithm="LP_matching")
True
sage: graphs.WheelGraph(6).has_perfect_matching(algorithm="LP_matching")
True
sage: graphs.WheelGraph(5).has_perfect_matching(algorithm="LP_matching")
False
sage: graphs.PetersenGraph().has_perfect_matching(algorithm="LP_matching")
True
sage: graphs.WheelGraph(6).has_perfect_matching(algorithm="LP_matching")
True
sage: graphs.WheelGraph(5).has_perfect_matching(algorithm="LP_matching")
False
TESTS::
sage: G = graphs.EmptyGraph()
sage: all(G.has_perfect_matching(algorithm=algo) for algo in ['Edmonds', 'LP_matching', 'LP'])
True
Be careful with isolated vertices::
sage: G = graphs.PetersenGraph()
sage: G.add_vertex(11)
sage: any(G.has_perfect_matching(algorithm=algo) for algo in ['Edmonds', 'LP_matching', 'LP'])
False
"""
if self.order() % 2:
return False
if algorithm == "Edmonds":
return len(self) == 2*self.matching(value_only=True,
use_edge_labels=False,
algorithm="Edmonds")
elif algorithm == "LP_matching":
return len(self) == 2*self.matching(value_only=True,
use_edge_labels=False,
algorithm="LP",
solver=solver,
verbose=verbose,
integrality_tolerance=integrality_tolerance)
elif algorithm == "LP":
from sage.numerical.mip import MixedIntegerLinearProgram, MIPSolverException
p = MixedIntegerLinearProgram(solver=solver)
b = p.new_variable(binary=True)
for v in self:
edges = self.edges_incident(v, labels=False)
if not edges:
return False
p.add_constraint(p.sum(b[frozenset(e)] for e in edges) == 1)
try:
p.solve(log=verbose)
return True
except MIPSolverException:
return False
else:
raise ValueError('algorithm must be set to "Edmonds", "LP_matching" or "LP"')
@doc_index("Leftovers")
def effective_resistance(self, i, j):
r"""
Return the effective resistance between nodes `i` and `j`.
The resistance distance between vertices `i` and `j` of a simple
connected graph `G` is defined as the effective resistance between the
two vertices on an electrical network constructed from `G` replacing
each edge of the graph by a unit (1 ohm) resistor.
See the :wikipedia:`Resistance_distance` for more information.
INPUT:
- ``i``, ``j`` -- vertices of the graph
OUTPUT: rational number denoting resistance between nodes `i` and `j`
EXAMPLES:
Effective resistances in a straight linear 2-tree on 6 vertices ::
sage: G = Graph([(0,1),(0,2),(1,2),(1,3),(3,5),(2,4),(2,3),(3,4),(4,5)])
sage: G.effective_resistance(0,1)
34/55
sage: G.effective_resistance(0,3)
49/55
sage: G.effective_resistance(1,4)
9/11
sage: G.effective_resistance(0,5)
15/11
Effective resistances in a fan on 6 vertices ::
sage: H = Graph([(0,1),(0,2),(0,3),(0,4),(0,5),(0,6),(1,2),(2,3),(3,4),(4,5)])
sage: H.effective_resistance(1,5)
6/5
sage: H.effective_resistance(1,3)
49/55
.. SEEALSO::
* :meth:`effective_resistance_matrix` --
a similar method giving a matrix full of all effective
resistances between all nodes
* :meth:`least_effective_resistance` --
gives node pairs with least effective resistances
* See :wikipedia:`Resistance_distance` for more details.
TESTS::
sage: G = graphs.CompleteGraph(4)
sage: all(G.effective_resistance(u, v) == 1/2 for u,v in G.edge_iterator(labels=False))
True
sage: Graph(1).effective_resistance(0,0)
0
sage: G = Graph([(0,1),(1,2)])
sage: G.effective_resistance(0,2)
2
sage: G = Graph([(0,1),(1,2),(2,0)])
sage: G.effective_resistance(0,2)
2/3
sage: G = Graph([(0,1),(0,2),(0,3),(0,4),(0,5),(1,2),(2,3),(3,4),(4,5),(5,1)])
sage: r = G.effective_resistance(0,3)
sage: r == fibonacci(2*(5-3)+1)*fibonacci(2*3-1)/fibonacci(2*5)
True
"""
from sage.matrix.constructor import matrix
if i not in self:
raise ValueError("vertex ({0}) is not a vertex of the graph".format(repr(i)))
elif j not in self:
raise ValueError("vertex ({0}) is not a vertex of the graph".format(repr(j)))
if i == j :
return 0
self._scream_if_not_simple()
if not self.is_connected():
raise ValueError('the Graph is not a connected graph')
vert = list(self)
i1 = vert.index(i)
i2 = vert.index(j)
n = self.order()
L = self.laplacian_matrix(vertices=vert)
M = L.pseudoinverse()
Id = matrix.identity(n)
sigma = matrix(Id[i1] - Id[i2])
diff = sigma * M * sigma.transpose()
return diff[0, 0]
@doc_index("Leftovers")
def effective_resistance_matrix(self, vertices=None, nonedgesonly=True):
r"""
Return a matrix whose (`i` , `j`) entry gives the effective resistance
between vertices `i` and `j`.
The resistance distance between vertices `i` and `j` of a simple
connected graph `G` is defined as the effective resistance between the
two vertices on an electrical network constructed from `G` replacing
each edge of the graph by a unit (1 ohm) resistor.
INPUT:
- ``nonedgesonly`` -- boolean (default: ``True``); if ``True`` assign
zero resistance to pairs of adjacent vertices.
- ``vertices`` -- list (default: ``None``); the ordering of the
vertices defining how they should appear in the matrix. By default,
the ordering given by :meth:`GenericGraph.vertices` is used.
OUTPUT: matrix
EXAMPLES:
The effective resistance matrix for a straight linear 2-tree counting
only non-adjacent vertex pairs ::
sage: G = Graph([(0,1),(0,2),(1,2),(1,3),(3,5),(2,4),(2,3),(3,4),(4,5)])
sage: G.effective_resistance_matrix()
[ 0 0 0 49/55 59/55 15/11]
[ 0 0 0 0 9/11 59/55]
[ 0 0 0 0 0 49/55]
[49/55 0 0 0 0 0]
[59/55 9/11 0 0 0 0]
[15/11 59/55 49/55 0 0 0]
The same effective resistance matrix, this time including adjacent
vertices ::
sage: G.effective_resistance_matrix(nonedgesonly=False)
[ 0 34/55 34/55 49/55 59/55 15/11]
[34/55 0 26/55 31/55 9/11 59/55]
[34/55 26/55 0 5/11 31/55 49/55]
[49/55 31/55 5/11 0 26/55 34/55]
[59/55 9/11 31/55 26/55 0 34/55]
[15/11 59/55 49/55 34/55 34/55 0]
This example illustrates the common neighbors matrix for a fan on 6
vertices counting only non-adjacent vertex pairs ::
sage: H = Graph([(0,1),(0,2),(0,3),(0,4),(0,5),(0,6),(1,2),(2,3),(3,4),(4,5)])
sage: H.effective_resistance_matrix()
[ 0 0 0 0 0 0 0]
[ 0 0 0 49/55 56/55 6/5 89/55]
[ 0 0 0 0 4/5 56/55 81/55]
[ 0 49/55 0 0 0 49/55 16/11]
[ 0 56/55 4/5 0 0 0 81/55]
[ 0 6/5 56/55 49/55 0 0 89/55]
[ 0 89/55 81/55 16/11 81/55 89/55 0]
.. SEEALSO::
* :meth:`least_effective_resistance` --
gives node pairs with least effective resistances
* :meth:`effective_resistance` --
computes effective resistance for a single node pair
* See :wikipedia:`Resistance_Distance` for more details.
TESTS::
sage: graphs.CompleteGraph(4).effective_resistance_matrix()
[0 0 0 0]
[0 0 0 0]
[0 0 0 0]
[0 0 0 0]
sage: G = Graph(multiedges=True, sparse=True)
sage: G.add_edges([(0, 1)] * 3)
sage: G.effective_resistance_matrix()
Traceback (most recent call last):
...
ValueError: This method is not known to work on graphs with
multiedges. Perhaps this method can be updated to handle them, but
in the meantime if you want to use it please disallow multiedges
using allow_multiple_edges().
sage: graphs.CompleteGraph(4).effective_resistance_matrix(nonedgesonly=False)
[ 0 1/2 1/2 1/2]
[1/2 0 1/2 1/2]
[1/2 1/2 0 1/2]
[1/2 1/2 1/2 0]
sage: Graph(1).effective_resistance_matrix()
[0]
sage: Graph().effective_resistance_matrix()
Traceback (most recent call last):
...
ValueError: unable to compute effective resistance for an empty Graph object
sage: G = Graph([(0,1),(1,2),(2,3),(3,0),(0,2)])
sage: G.effective_resistance_matrix()
[0 0 0 0]
[0 0 0 1]
[0 0 0 0]
[0 1 0 0]
sage: G = Graph([(0,1),(0,2),(0,3),(0,4),(0,5),(1,2),(2,3),(3,4),(4,5),(5,1)])
sage: r = G.effective_resistance_matrix(nonedgesonly=False)[0,3]
sage: r == fibonacci(2*(5-3)+1)*fibonacci(2*3-1)/fibonacci(2*5)
True
"""
from sage.matrix.constructor import matrix
from sage.rings.rational_field import QQ
n = self.order()
if not n:
raise ValueError('unable to compute effective resistance for an empty Graph object')
if vertices is None:
vertices = self.vertices()
self._scream_if_not_simple()
if not self.is_connected():
raise ValueError('the Graph is not a connected graph')
L = self.laplacian_matrix(vertices=vertices)
M = L.pseudoinverse()
d = matrix(M.diagonal()).transpose()
onesvec = matrix(QQ, n, 1, lambda i, j: 1)
S = d * onesvec.transpose() + onesvec * d.transpose() - 2 * M
onesmat = matrix(QQ, n, n, lambda i, j: 1)
if nonedgesonly:
B = onesmat - self.adjacency_matrix(vertices=vertices) - matrix.identity(n)
S = S.elementwise_product(B)
return S
@doc_index("Leftovers")
def least_effective_resistance(self, nonedgesonly=True):
r"""
Return a list of pairs of nodes with the least effective resistance.
The resistance distance between vertices `i` and `j` of a simple
connected graph `G` is defined as the effective resistance between the
two vertices on an electrical network constructed from `G` replacing
each edge of the graph by a unit (1 ohm) resistor.
INPUT:
- ``nonedgesonly`` -- Boolean (default: `True`); if true, assign zero
resistance to pairs of adjacent vertices
OUTPUT: list
EXAMPLES:
Pairs of non-adjacent nodes with least effective resistance in a
straight linear 2-tree on 6 vertices::
sage: G = Graph([(0,1),(0,2),(1,2),(1,3),(3,5),(2,4),(2,3),(3,4),(4,5)])
sage: G.least_effective_resistance()
[(1, 4)]
Pairs of (adjacent or non-adjacent) nodes with least effective
resistance in a straight linear 2-tree on 6 vertices ::
sage: G.least_effective_resistance(nonedgesonly = False)
[(2, 3)]
Pairs of non-adjacent nodes with least effective resistance in a fan on
6 vertices counting only non-adjacent vertex pairs ::
sage: H = Graph([(0,1),(0,2),(0,3),(0,4),(0,5),(0,6),(1,2),(2,3),(3,4),(4,5)])
sage: H.least_effective_resistance()
[(2, 4)]
.. SEEALSO::
* :meth:`effective_resistance_matrix` --
a similar method giving a matrix full of all effective
resistances
* :meth:`effective_resistance` --
compuetes effective resistance for a single node pair
* See :wikipedia:`Resistance_distance` for more details.
TESTS::
sage: graphs.CompleteGraph(4).least_effective_resistance()
[]
sage: graphs.CompleteGraph(4).least_effective_resistance(nonedgesonly=False)
[(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]
sage: Graph(1).least_effective_resistance()
[]
sage: G = Graph([(0,1),(1,2),(2,3),(3,0),(0,2)])
sage: G.least_effective_resistance()
[(1, 3)]
"""
n = self.order()
if not n:
raise ValueError('unable to compute least resistance on empty Graph')
self._scream_if_not_simple()
if not self.is_connected():
raise ValueError('the Graph is not a connected graph')
if nonedgesonly and self.is_clique():
return []
verts = list(self)
verttoidx = {u: i for i, u in enumerate(verts)}
S = self.effective_resistance_matrix(vertices=verts, nonedgesonly=nonedgesonly)
if nonedgesonly:
edges = self.complement().edges(labels=False)
else:
edges = [(verts[i], verts[j]) for i in range(n) for j in range(i + 1, n)]
rmin = min(S[(verttoidx[e[0]], verttoidx[e[1]])] for e in edges)
return [e for e in edges if S[(verttoidx[e[0]], verttoidx[e[1]])] == rmin]
@doc_index("Leftovers")
def common_neighbors_matrix(self, vertices=None, nonedgesonly=True):
r"""
Return a matrix of numbers of common neighbors between each pairs.
The `(i , j)` entry of the matrix gives the number of common
neighbors between vertices `i` and `j`.
This method is only valid for simple (no loops, no multiple edges)
graphs.
INPUT:
- ``nonedgesonly``-- boolean (default: ``True``); if ``True``, assigns
`0` value to adjacent vertices.
- ``vertices`` -- list (default: ``None``); the ordering of the
vertices defining how they should appear in the matrix. By default,
the ordering given by :meth:`GenericGraph.vertices` is used.
OUTPUT: matrix
EXAMPLES:
The common neighbors matrix for a straight linear 2-tree counting
only non-adjacent vertex pairs ::
sage: G1 = Graph()
sage: G1.add_edges([(0,1),(0,2),(1,2),(1,3),(3,5),(2,4),(2,3),(3,4),(4,5)])
sage: G1.common_neighbors_matrix(nonedgesonly = True)
[0 0 0 2 1 0]
[0 0 0 0 2 1]
[0 0 0 0 0 2]
[2 0 0 0 0 0]
[1 2 0 0 0 0]
[0 1 2 0 0 0]
We now show the common neighbors matrix which includes adjacent
vertices ::
sage: G1.common_neighbors_matrix(nonedgesonly = False)
[0 1 1 2 1 0]
[1 0 2 1 2 1]
[1 2 0 2 1 2]
[2 1 2 0 2 1]
[1 2 1 2 0 1]
[0 1 2 1 1 0]
The common neighbors matrix for a fan on 6 vertices counting only
non-adjacent vertex pairs ::
sage: H = Graph([(0,1),(0,2),(0,3),(0,4),(0,5),(0,6),(1,2),(2,3),(3,4),(4,5)])
sage: H.common_neighbors_matrix()
[0 0 0 0 0 0 0]
[0 0 0 2 1 1 1]
[0 0 0 0 2 1 1]
[0 2 0 0 0 2 1]
[0 1 2 0 0 0 1]
[0 1 1 2 0 0 1]
[0 1 1 1 1 1 0]
It is an error to input anything other than a simple graph::
sage: G = Graph([(0,0)],loops=True)
sage: G.common_neighbors_matrix()
Traceback (most recent call last):
...
ValueError: This method is not known to work on graphs with loops.
Perhaps this method can be updated to handle them, but in the
meantime if you want to use it please disallow loops using
allow_loops().
.. SEEALSO::
* :meth:`most_common_neighbors` --
returns node pairs with most shared neighbors
TESTS::
sage: G = graphs.CompleteGraph(4)
sage: M = G.common_neighbors_matrix()
sage: M.is_zero()
True
sage: Graph(1).common_neighbors_matrix()
[0]
sage: Graph().common_neighbors_matrix()
[]
sage: G = Graph([(0,1),(1,2),(2,3),(3,0),(0,2)])
sage: G.common_neighbors_matrix()
[0 0 0 0]
[0 0 0 2]
[0 0 0 0]
[0 2 0 0]
"""
self._scream_if_not_simple()
if vertices is None:
vertices = self.vertices()
A = self.adjacency_matrix(vertices=vertices)
M = A**2
for v in range(self.order()):
M[v, v] = 0
if nonedgesonly:
for w in range(v + 1, self.order()):
if A[v, w]:
M[v, w] = M[w, v] = 0
return M
@doc_index("Leftovers")
def most_common_neighbors(self, nonedgesonly=True):
r"""
Return vertex pairs with maximal number of common neighbors.
This method is only valid for simple (no loops, no multiple edges)
graphs with order `\geq 2`
INPUT:
- ``nonedgesonly``-- boolean (default: ``True``); if ``True``, assigns
`0` value to adjacent vertices.
OUTPUT: list of tuples of edge pairs
EXAMPLES:
The maximum common neighbor (non-adjacent) pairs for a straight
linear 2-tree ::
sage: G1 = Graph([(0,1),(0,2),(1,2),(1,3),(3,5),(2,4),(2,3),(3,4),(4,5)])
sage: G1.most_common_neighbors()
[(0, 3), (1, 4), (2, 5)]
If we include non-adjacent pairs ::
sage: G1.most_common_neighbors(nonedgesonly = False)
[(0, 3), (1, 2), (1, 4), (2, 3), (2, 5), (3, 4)]
The common neighbors matrix for a fan on 6 vertices counting only
non-adjacent vertex pairs ::
sage: H = Graph([(0,1),(0,2),(0,3),(0,4),(0,5),(0,6),(1,2),(2,3),(3,4),(4,5)])
sage: H.most_common_neighbors()
[(1, 3), (2, 4), (3, 5)]
.. SEEALSO::
* :meth:`common_neighbors_matrix` --
a similar method giving a matrix of number of common neighbors
TESTS::
sage: G=graphs.CompleteGraph(4)
sage: G.most_common_neighbors()
[]
sage: G.most_common_neighbors(nonedgesonly=False)
[(0, 1), (0, 2), (0, 3), (1, 2), (1, 3), (2, 3)]
sage: Graph(1).most_common_neighbors()
Traceback (most recent call last):
...
ValueError: this method is defined for graphs with at least 2 vertices
sage: Graph().most_common_neighbors()
Traceback (most recent call last):
...
ValueError: this method is defined for graphs with at least 2 vertices
sage: G = Graph([(0,1),(1,2),(2,3),(3,0),(0,2)])
sage: G.most_common_neighbors()
[(1, 3)]
sage: G.most_common_neighbors(nonedgesonly=False)
[(0, 2), (1, 3)]
"""
self._scream_if_not_simple()
if self.num_verts() < 2:
raise ValueError('this method is defined for graphs with at least 2 vertices')
verts = list(self)
M = self.common_neighbors_matrix(vertices=verts, nonedgesonly=nonedgesonly)
output = []
coefficients = M.coefficients()
if coefficients:
maximum = max(coefficients)
for v in range(self.num_verts()):
for w in range(v + 1, self.num_verts()):
if M[v, w] == maximum:
output.append((verts[v], verts[w]))
return output
@doc_index("Leftovers")
def arboricity(self, certificate=False):
r"""
Return the arboricity of the graph and an optional certificate.
The arboricity is the minimum number of forests that covers the
graph.
See :wikipedia:`Arboricity`
INPUT:
- ``certificate`` -- boolean (default: ``False``); whether to return
a certificate.
OUTPUT:
When ``certificate = True``, then the function returns `(a, F)`
where `a` is the arboricity and `F` is a list of `a` disjoint forests
that partitions the edge set of `g`. The forests are represented as
subgraphs of the original graph.
If ``certificate = False``, the function returns just a integer
indicating the arboricity.
ALGORITHM:
Represent the graph as a graphical matroid, then apply matroid
:meth:`sage.matroid.partition` algorithm from the matroids module.
EXAMPLES::
sage: G = graphs.PetersenGraph()
sage: a,F = G.arboricity(True)
sage: a
2
sage: all([f.is_forest() for f in F])
True
sage: len(set.union(*[set(f.edges()) for f in F])) == G.size()
True
TESTS::
sage: g = Graph()
sage: g.arboricity(True)
(0, [])
"""
from sage.matroids.constructor import Matroid
P = Matroid(self).partition()
if certificate:
return (len(P), [self.subgraph(edges=forest) for forest in P])
else:
return len(P)
@doc_index("Graph properties")
def is_antipodal(self):
r"""
Check whether this graph is antipodal.
A graph `G` of diameter `d` is said to be antipodal if its distance-`d`
graph is a disjoint union of cliques.
EXAMPLES::
sage: G = graphs.JohnsonGraph(10, 5)
sage: G.is_antipodal()
True
sage: H = G.folded_graph()
sage: H.is_antipodal()
False
REFERENCES:
See [BCN1989]_ p. 438 or [Sam2012]_ for this definition of antipodal
graphs.
TESTS::
sage: G = graphs.PetersenGraph()
sage: G.is_antipodal()
False
sage: G = graphs.HammingGraph(7, 2)
sage: G.is_antipodal()
True
sage: G = Graph([(0,1), (2, 3)])
sage: G.is_antipodal()
False
sage: G = Graph(4)
sage: G.is_antipodal()
True
sage: graphs.CompleteGraph(5).is_antipodal()
True
sage: G = Graph()
sage: G.is_antipodal()
Traceback (most recent call last):
...
ValueError: diameter is not defined for the empty graph
sage: G = Graph(1)
sage: G.is_antipodal()
True
"""
G = self.antipodal_graph()
vertexSet = set(G)
while vertexSet:
v = vertexSet.pop()
# all neighbours of v should be in the same clique as v
clique = set(G.neighbor_iterator(v, closed=True))
for u in clique:
if set(G.neighbor_iterator(u, closed=True)) != clique:
return False
vertexSet.difference_update(clique)
return True
@doc_index("Leftovers")
def folded_graph(self, check=False):
r"""
Return the antipodal fold of this graph.
Given an antipodal graph `G` let `G_d` be its distance-`d` graph.
Then the folded graph of `G` has a vertex for each maximal clique
of `G_d` and two cliques are adjacent if there is an edge in `G`
connecting the two.
.. SEEALSO::
:meth:`sage.graphs.graph.is_antipodal`
INPUT:
- ``check`` -- boolean (default: ``False``); whether to check if the
graph is antipodal. If ``check`` is ``True`` and the graph is not
antipodal, then return ``False``.
OUTPUT:
This function returns a new graph and ``self`` is not touched.
.. NOTE::
The input is expected to be an antipodal graph.
You can check that a graph is antipodal using
:meth:`sage.graphs.graph.is_antipodal`.
EXAMPLES::
sage: G = graphs.JohnsonGraph(10, 5)
sage: H = G.folded_graph(); H
Folded Johnson graph with parameters 10,5: Graph on 126 vertices
sage: Gd = G.distance_graph(G.diameter())
sage: all(i == 1 for i in Gd.degree())
True
sage: H.is_distance_regular(True)
([25, 16, None], [None, 1, 4])
This method doesn't check if the graph is antipodal::
sage: G = graphs.PetersenGraph()
sage: G.is_antipodal()
False
sage: G.folded_graph() # some garbage
Folded Petersen graph: Graph on 2 vertices
sage: G.folded_graph(check=True)
False
REFERENCES:
See [BCN1989]_ p. 438 or [Sam2012]_ for this definition of folded graph.
TESTS::
sage: G = Graph(5)
sage: G.folded_graph()
Folded Graph: Graph on 1 vertex
sage: G = graphs.CompleteGraph(5)
sage: G.folded_graph()
Folded Complete graph: Graph on 1 vertex
sage: G = Graph()
sage: G.folded_graph()
Traceback (most recent call last):
...
ValueError: diameter is not defined for the empty graph
sage: G = Graph(1)
sage: G.folded_graph()
Folded Graph: Graph on 1 vertex
"""
G = self.antipodal_graph()
vertices = set(G)
newVertices = []
while vertices:
v = vertices.pop()
clique = frozenset(G.neighbor_iterator(v, closed=True))
if check:
for u in clique:
if frozenset(G.neighbor_iterator(u, closed=True)) != clique:
return False
newVertices.append(clique)
vertices.difference_update(clique)
# now newVertices is a map {0, ..., numCliques-1} -> antipodal classes
numCliques = len(newVertices)
edges = []
for i, j in itertools.combinations(range(numCliques), 2):
if any(self.has_edge(u, v) for u, v in
itertools.product(newVertices[i], newVertices[j])):
edges.append((i, j))
H = Graph([range(numCliques), edges], format='vertices_and_edges')
name = self.name() if self.name() != "" else "Graph"
H.name(f"Folded {name}")
return H
@doc_index("Leftovers")
def antipodal_graph(self):
r"""
Return the antipodal graph of ``self``.
The antipodal graph of a graph `G` has the same vertex set of `G` and
two vertices are adjacent if their distance in `G` is equal to the
diameter of `G`.
OUTPUT:
A new graph. ``self`` is not touched.
EXAMPLES::
sage: G = graphs.JohnsonGraph(10, 5)
sage: G.antipodal_graph()
Antipodal graph of Johnson graph with parameters 10,5: Graph on 252 vertices
sage: G = graphs.HammingGraph(8, 2)
sage: G.antipodal_graph()
Antipodal graph of Hamming Graph with parameters 8,2: Graph on 256 vertices
The antipodal graph of a disconnected graph is its complement::
sage: G = Graph(5)
sage: H = G.antipodal_graph()
sage: H.is_isomorphic(G.complement())
True
TESTS::
sage: G = Graph([(0, 1), (2, 3)])
sage: H = G.antipodal_graph()
sage: H.is_isomorphic(Graph([(0, 2), (0, 3), (1, 2), (1, 3)]))
True
sage: G = Graph()
sage: G.antipodal_graph()
Traceback (most recent call last):
...
ValueError: diameter is not defined for the empty graph
sage: G = Graph(1)
sage: G.antipodal_graph()
Antipodal graph of Graph: Looped graph on 1 vertex
"""
H = self.distance_graph(self.diameter())
name = self.name() if self.name() != "" else "Graph"
H.name(f"Antipodal graph of {name}")
return H
@doc_index("Basic methods")
def bipartite_double(self, extended=False):
r"""
Return the (extended) bipartite double of this graph.
The bipartite double of a graph `G` has vertex set
`\{ (v,0), (v,1) : v \in G\}` and for any edge `(u, v)` in `G`
it has edges `((u,0),(v,1))` and `((u,1),(v,0))`.
Note that this is the tensor product of `G` with `K_2`.
The extended bipartite double of `G` is the bipartite double of
`G` after added all edges `((v,0),(v,1))` for all vertices `v`.
INPUT:
- ``extended`` -- boolean (default: ``False``); Whether to return the
extended bipartite double, or only the bipartite double (default)
OUTPUT:
A graph; ``self`` is left untouched.
EXAMPLES::
sage: G = graphs.PetersenGraph()
sage: H = G.bipartite_double()
sage: G == graphs.PetersenGraph() # G is left invariant
True
sage: H.order() == 2 * G.order()
True
sage: H.size() == 2 * G.size()
True
sage: H.is_bipartite()
True
sage: H.bipartite_sets() == (set([(v, 0) for v in G]),
....: set([(v, 1) for v in G]))
True
sage: H.is_isomorphic(G.tensor_product(graphs.CompleteGraph(2)))
True
Behaviour with disconnected graphs::
sage: G1 = graphs.PetersenGraph()
sage: G2 = graphs.HoffmanGraph()
sage: G = G1.disjoint_union(G2)
sage: H = G.bipartite_double()
sage: H1 = G1.bipartite_double()
sage: H2 = G2.bipartite_double()
sage: H.is_isomorphic(H1.disjoint_union(H2))
True
.. SEEALSO::
:wikipedia:`Bipartite_double_cover`,
`WolframAlpha Bipartite Double
<https://mathworld.wolfram.com/BipartiteDoubleGraph.html>`_,
[VDKT2016]_ p. 20 for the extended bipartite double.
TESTS::
sage: G = graphs.PetersenGraph()
sage: H = G.bipartite_double(True)
sage: G == graphs.PetersenGraph() # G is left invariant
True
sage: H.order() == 2 * G.order()
True
sage: H.size() == 2 * G.size() + G.order()
True
sage: H.is_bipartite()
True
sage: H.bipartite_sets() == (set([(v, 0) for v in G]),
....: set([(v, 1) for v in G]))
True
sage: H.is_isomorphic(G.tensor_product(graphs.CompleteGraph(2)))
False
Test edge cases::
sage: G = Graph()
sage: H = G.bipartite_double()
sage: H.size() + H.order()
0
sage: H = G.bipartite_double(True)
sage: H.size() + H.order()
0
sage: G = Graph(1)
sage: H = G.bipartite_double()
sage: H.size() == 0 and H.order() == 2
True
sage: H = G.bipartite_double(True)
sage: H.is_isomorphic(Graph([(0, 1)]))
True
"""
G = self.tensor_product(Graph([(0, 1)]))
if extended:
G.add_edges(((v, 0), (v, 1)) for v in self)
prefix = "Extended " if extended else ""
G.name("%sBipartite Double of %s"%(prefix, self.name()))
return G
# Aliases to functions defined in other modules
from sage.graphs.weakly_chordal import is_long_hole_free, is_long_antihole_free, is_weakly_chordal
from sage.graphs.asteroidal_triples import is_asteroidal_triple_free
from sage.graphs.chrompoly import chromatic_polynomial
from sage.graphs.graph_decompositions.rankwidth import rank_decomposition
from sage.graphs.graph_decompositions.tree_decomposition import treewidth
from sage.graphs.graph_decompositions.vertex_separation import pathwidth
from sage.graphs.graph_decompositions.tree_decomposition import treelength
from sage.graphs.graph_decompositions.clique_separators import atoms_and_clique_separators
from sage.graphs.matchpoly import matching_polynomial
from sage.graphs.cliquer import all_max_clique as cliques_maximum
from sage.graphs.cliquer import all_cliques
from sage.graphs.spanning_tree import random_spanning_tree
from sage.graphs.spanning_tree import spanning_trees
from sage.graphs.graph_decompositions.graph_products import is_cartesian_product
from sage.graphs.distances_all_pairs import is_distance_regular
from sage.graphs.base.static_dense_graph import is_strongly_regular
from sage.graphs.line_graph import is_line_graph
from sage.graphs.tutte_polynomial import tutte_polynomial
from sage.graphs.lovasz_theta import lovasz_theta
from sage.graphs.partial_cube import is_partial_cube
from sage.graphs.orientations import strong_orientations_iterator, random_orientation
from sage.graphs.connectivity import bridges, cleave, spqr_tree
from sage.graphs.connectivity import is_triconnected
from sage.graphs.comparability import is_comparability
from sage.graphs.comparability import is_permutation
from sage.graphs.convexity_properties import geodetic_closure
from sage.graphs.domination import is_dominating
from sage.graphs.domination import is_redundant
from sage.graphs.domination import private_neighbors
from sage.graphs.domination import minimal_dominating_sets
from sage.graphs.traversals import (lex_M, maximum_cardinality_search,
maximum_cardinality_search_M)
from sage.graphs.isoperimetric_inequalities import cheeger_constant, edge_isoperimetric_number, vertex_isoperimetric_number
from sage.graphs.graph_coloring import fractional_chromatic_number
from sage.graphs.graph_coloring import fractional_chromatic_index
_additional_categories = {
"is_long_hole_free" : "Graph properties",
"is_long_antihole_free" : "Graph properties",
"is_weakly_chordal" : "Graph properties",
"is_asteroidal_triple_free" : "Graph properties",
"chromatic_polynomial" : "Coloring",
"rank_decomposition" : "Algorithmically hard stuff",
"treewidth" : "Algorithmically hard stuff",
"pathwidth" : "Algorithmically hard stuff",
"treelength" : "Algorithmically hard stuff",
"matching_polynomial" : "Algorithmically hard stuff",
"all_max_clique" : "Clique-related methods",
"cliques_maximum" : "Clique-related methods",
"all_cliques" : "Clique-related methods",
"atoms_and_clique_separators" : "Clique-related methods",
"random_spanning_tree" : "Connectivity, orientations, trees",
"spanning_trees" : "Connectivity, orientations, trees",
"is_cartesian_product" : "Graph properties",
"is_distance_regular" : "Graph properties",
"is_strongly_regular" : "Graph properties",
"is_line_graph" : "Graph properties",
"is_partial_cube" : "Graph properties",
"is_comparability" : "Graph properties",
"is_permutation" : "Graph properties",
"tutte_polynomial" : "Algorithmically hard stuff",
"lovasz_theta" : "Leftovers",
"strong_orientations_iterator" : "Connectivity, orientations, trees",
"random_orientation" : "Connectivity, orientations, trees",
"bridges" : "Connectivity, orientations, trees",
"cleave" : "Connectivity, orientations, trees",
"spqr_tree" : "Connectivity, orientations, trees",
"is_triconnected" : "Connectivity, orientations, trees",
"is_dominating" : "Domination",
"is_redundant" : "Domination",
"private_neighbors" : "Domination",
"minimal_dominating_sets" : "Domination",
"lex_M" : "Traversals",
"maximum_cardinality_search" : "Traversals",
"maximum_cardinality_search_M" : "Traversals",
"cheeger_constant" : "Expansion properties",
"edge_isoperimetric_number" : "Expansion properties",
"vertex_isoperimetric_number" : "Expansion properties",
"fractional_chromatic_number" : "Coloring",
"fractional_chromatic_index" : "Coloring",
"geodetic_closure" : "Leftovers"
}
__doc__ = __doc__.replace("{INDEX_OF_METHODS}",gen_thematic_rest_table_index(Graph,_additional_categories))
| 37.325798
| 157
| 0.553028
|
import itertools
from copy import copy
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.rings.integer import Integer
from sage.rings.integer_ring import ZZ
import sage.graphs.generic_graph_pyx as generic_graph_pyx
from sage.graphs.generic_graph import GenericGraph
from sage.graphs.digraph import DiGraph
from sage.graphs.independent_sets import IndependentSets
from sage.misc.rest_index_of_methods import doc_index, gen_thematic_rest_table_index
from sage.graphs.views import EdgesView
from sage.misc.lazy_import import lazy_import
from sage.features import PythonModule
lazy_import('sage.graphs.mcqd', ['mcqd'],
feature=PythonModule('sage.graphs.mcqd', spkg='mcqd'))
from sage.misc.decorators import rename_keyword
class Graph(GenericGraph):
_directed = False
def __init__(self, data=None, pos=None, loops=None, format=None,
weighted=None, data_structure="sparse",
vertex_labels=True, name=None,
multiedges=None, convert_empty_dict_labels_to_None=None,
sparse=True, immutable=False):
GenericGraph.__init__(self)
from sage.structure.element import is_Matrix
if sparse is False:
if data_structure != "sparse":
raise ValueError("The 'sparse' argument is an alias for "
"'data_structure'. Please do not define both.")
data_structure = "dense"
if multiedges or weighted:
if data_structure == "dense":
raise RuntimeError("Multiedge and weighted c_graphs must be sparse.")
if immutable:
data_structure = 'static_sparse'
from sage.graphs.base.sparse_graph import SparseGraphBackend
from sage.graphs.base.dense_graph import DenseGraphBackend
if data_structure in ["sparse", "static_sparse"]:
CGB = SparseGraphBackend
elif data_structure == "dense":
CGB = DenseGraphBackend
else:
raise ValueError("data_structure must be equal to 'sparse', "
"'static_sparse' or 'dense'")
self._backend = CGB(0, directed=False)
if format is None and isinstance(data, str):
if data.startswith(">>graph6<<"):
data = data[10:]
format = 'graph6'
elif data.startswith(">>sparse6<<"):
data = data[11:]
format = 'sparse6'
elif data[0] == ':':
format = 'sparse6'
else:
format = 'graph6'
if format is None and is_Matrix(data):
if data.is_symmetric():
format = 'adjacency_matrix'
else:
format = 'incidence_matrix'
if format is None and isinstance(data, Graph):
format = 'Graph'
from sage.graphs.all import DiGraph
if format is None and isinstance(data, DiGraph):
data = data.to_undirected()
format = 'Graph'
if (format is None and
isinstance(data, list) and
len(data) >= 2 and
callable(data[1])):
format = 'rule'
if (format is None and
isinstance(data, list) and
len(data) == 2 and
isinstance(data[0], list) and
((isinstance(data[1], list) and
(not data[1] or callable(getattr(data[1][0], "__iter__", None)))) or
(isinstance(data[1], EdgesView)))):
format = "vertices_and_edges"
if format is None and isinstance(data, dict):
if not data:
format = 'dict_of_dicts'
else:
val = next(iter(data.values()))
if isinstance(val, (list, EdgesView)):
format = 'dict_of_lists'
elif isinstance(val, dict):
format = 'dict_of_dicts'
if format is None and hasattr(data, 'adj'):
format = 'NX'
if (format is None and
hasattr(data, 'vcount') and
hasattr(data, 'get_edgelist')):
try:
import igraph
except ImportError:
raise ImportError("The data seems to be a igraph object, but "+
"igraph is not installed in Sage. To install "+
"it, run 'sage -i python_igraph'")
if format is None and isinstance(data, igraph.Graph):
format = 'igraph'
if format is None and isinstance(data, (int, Integer)):
format = 'int'
if format is None and data is None:
format = 'int'
data = 0
if format is None and isinstance(data, (list, EdgesView)):
format = "list_of_edges"
if weighted is None:
weighted = False
if format is None:
raise ValueError("This input cannot be turned into a graph")
if format == 'weighted_adjacency_matrix':
if weighted is False:
raise ValueError("Format was weighted_adjacency_matrix but weighted was False.")
if weighted is None:
weighted = True
if multiedges is None:
multiedges = False
format = 'adjacency_matrix'
if format == 'graph6':
if weighted is None:
weighted = False
self.allow_loops(loops if loops else False, check=False)
self.allow_multiple_edges(multiedges if multiedges else False, check=False)
from .graph_input import from_graph6
from_graph6(self, data)
elif format == 'sparse6':
if weighted is None:
weighted = False
self.allow_loops(False if loops is False else True, check=False)
self.allow_multiple_edges(False if multiedges is False else True, check=False)
from .graph_input import from_sparse6
from_sparse6(self, data)
elif format == 'adjacency_matrix':
from .graph_input import from_adjacency_matrix
from_adjacency_matrix(self, data, loops=loops, multiedges=multiedges, weighted=weighted)
elif format == 'incidence_matrix':
from .graph_input import from_incidence_matrix
from_incidence_matrix(self, data, loops=loops, multiedges=multiedges, weighted=weighted)
elif format == 'seidel_adjacency_matrix':
weighted = False
self.allow_loops(False)
self.allow_multiple_edges(False)
from .graph_input import from_seidel_adjacency_matrix
from_seidel_adjacency_matrix(self, data)
elif format == 'Graph':
if loops is None:
loops = data.allows_loops()
if multiedges is None:
multiedges = data.allows_multiple_edges()
if weighted is None:
weighted = data.weighted()
self.allow_loops(loops, check=False)
self.allow_multiple_edges(multiedges, check=False)
if data.get_pos() is not None:
pos = data.get_pos()
self.name(data.name())
self.set_vertices(data.get_vertices())
data._backend.subgraph_given_vertices(self._backend, data)
elif format == 'NX':
from sage.graphs.graph_input import from_networkx_graph
from_networkx_graph(self, data,
weighted=weighted, multiedges=multiedges, loops=loops,
convert_empty_dict_labels_to_None=convert_empty_dict_labels_to_None)
if weighted is None:
weighted = self.allows_multiple_edges()
elif format == 'igraph':
if data.is_directed():
raise ValueError("An *undirected* igraph graph was expected. "+
"To build an directed graph, call the DiGraph "+
"constructor.")
self.add_vertices(range(data.vcount()))
self.add_edges((e.source, e.target, e.attributes()) for e in data.es())
if vertex_labels and 'name' in data.vertex_attributes():
vs = data.vs()
self.relabel({v:vs[v]['name'] for v in self})
elif format == 'rule':
f = data[1]
verts = data[0]
if loops is None:
loops = any(f(v,v) for v in verts)
if weighted is None:
weighted = False
self.allow_loops(loops, check=False)
self.allow_multiple_edges(True if multiedges else False, check=False)
self.add_vertices(verts)
self.add_edges(e for e in itertools.combinations(verts,2) if f(*e))
if loops:
self.add_edges((v,v) for v in verts if f(v,v))
elif format == "vertices_and_edges":
self.allow_multiple_edges(bool(multiedges), check=False)
self.allow_loops(bool(loops), check=False)
self.add_vertices(data[0])
self.add_edges(data[1])
elif format == 'dict_of_dicts':
from .graph_input import from_dict_of_dicts
from_dict_of_dicts(self, data, loops=loops, multiedges=multiedges, weighted=weighted,
convert_empty_dict_labels_to_None = False if convert_empty_dict_labels_to_None is None else convert_empty_dict_labels_to_None)
elif format == 'dict_of_lists':
from .graph_input import from_dict_of_lists
from_dict_of_lists(self, data, loops=loops, multiedges=multiedges, weighted=weighted)
elif format == 'int':
self.allow_loops(loops if loops else False, check=False)
self.allow_multiple_edges(multiedges if multiedges else False, check=False)
if data < 0:
raise ValueError("The number of vertices cannot be strictly negative!")
if data:
self.add_vertices(range(data))
elif format == 'list_of_edges':
self.allow_multiple_edges(True if multiedges else False,
check=False)
self.allow_loops(True if loops else False, check=False)
self.add_edges(data)
else:
raise ValueError("Unknown input format '{}'".format(format))
if weighted is None:
weighted = False
self._weighted = getattr(self, '_weighted', weighted)
self._pos = copy(pos)
if format != 'Graph' or name is not None:
self.name(name)
if data_structure == "static_sparse":
from sage.graphs.base.static_sparse_backend import StaticSparseBackend
ib = StaticSparseBackend(self,
loops = self.allows_loops(),
multiedges = self.allows_multiple_edges())
self._backend = ib
self._immutable = True
asic methods")
def graph6_string(self):
n = self.order()
if n > 262143:
raise ValueError('graph6 format supports graphs on 0 to 262143 vertices only.')
elif self.has_loops() or self.has_multiple_edges():
raise ValueError('graph6 format supports only simple graphs (no loops, no multiple edges)')
else:
return generic_graph_pyx.small_integer_to_graph6(n) + generic_graph_pyx.binary_string_to_graph6(self._bit_vector())
@doc_index("Basic methods")
def sparse6_string(self):
n = self.order()
if not n:
return ':?'
if n > 262143:
raise ValueError('sparse6 format supports graphs on 0 to 262143 vertices only.')
if n == 1:
s = '0' * self.size()
else:
try:
V = sorted(self)
except TypeError:
V = self
v_to_int = {v:i for i,v in enumerate(V)}
edges = [sorted((v_to_int[u], v_to_int[v])) for u,v in self.edge_iterator(labels=False)]
edges.sort(key=lambda e: (e[1], e[0]))
k = int((ZZ(n) - 1).nbits())
v = 0
i = 0
m = 0
s = ''
while m < len(edges):
if edges[m][1] > v + 1:
sp = generic_graph_pyx.int_to_binary_string(edges[m][1])
sp = '0'*(k-len(sp)) + sp
s += '1' + sp
v = edges[m][1]
elif edges[m][1] == v + 1:
sp = generic_graph_pyx.int_to_binary_string(edges[m][0])
sp = '0'*(k-len(sp)) + sp
s += '1' + sp
v += 1
m += 1
else:
sp = generic_graph_pyx.int_to_binary_string(edges[m][0])
sp = '0'*(k-len(sp)) + sp
s += '0' + sp
m += 1
# pad on the right to make a multiple of 6
s = s + ( '1' * ((6 - len(s))%6) )
# split into groups of 6, and convert numbers to decimal, adding 63
six_bits = ''
for i in range(0, len(s), 6):
six_bits += chr( int( s[i:i+6], 2) + 63 )
return ':' + generic_graph_pyx.small_integer_to_graph6(n) + six_bits
### Attributes
@doc_index("Basic methods")
def is_directed(self):
return False
### Properties
@doc_index("Graph properties")
def is_tree(self, certificate=False, output='vertex'):
if output not in ['vertex', 'edge']:
raise ValueError('output must be either vertex or edge')
if not self.order() or not self.is_connected():
return (False, None) if certificate else False
if certificate:
if self.order() == self.size() + 1:
return (True, None)
if self.allows_loops():
L = self.loop_edges() if output == 'edge' else self.loop_vertices()
if L:
return False, L[:1]
if self.has_multiple_edges():
if output == 'vertex':
return (False, list(self.multiple_edges(sort=True)[0][:2]))
edge1, edge2 = self.multiple_edges(sort=True)[:2]
if edge1[0] != edge2[0]:
return (False, [edge1, edge2])
return (False, [edge1, (edge2[1], edge2[0], edge2[2])])
if output == 'edge':
if self.allows_multiple_edges():
def vertices_to_edges(x):
return [(u[0], u[1], self.edge_label(u[0], u[1])[0])
for u in zip(x, x[1:] + [x[0]])]
else:
def vertices_to_edges(x):
return [(u[0], u[1], self.edge_label(u[0], u[1]))
for u in zip(x, x[1:] + [x[0]])]
# This code is a depth-first search that looks for a cycle in the
# graph. We *know* it exists as there are too many edges around.
seen = {}
u = next(self.vertex_iterator())
seen[u] = u
stack = [(u, v) for v in self.neighbor_iterator(u)]
while stack:
u, v = stack.pop()
if v in seen:
continue
for w in self.neighbor_iterator(v):
if u == w:
continue
elif w in seen:
cycle = [w, v]
while u != w:
cycle.append(u)
u = seen[u]
cycle.reverse()
if output == 'vertex':
return (False, cycle)
return (False, vertices_to_edges(cycle))
else:
stack.append((v, w))
seen[v] = u
else:
return self.order() == self.size() + 1
@doc_index("Graph properties")
def is_forest(self, certificate=False, output='vertex'):
connected_components = self.connected_components()
number_of_connected_components = len(connected_components)
isit = (self.order() ==
self.size() + number_of_connected_components)
if not certificate:
return isit
else:
if isit:
return (True, None)
# The graph contains a cycle, and the user wants to see it.
# No need to copy the graph
if number_of_connected_components == 1:
return self.is_tree(certificate=True, output=output)
# We try to find a cycle in each connected component
for cc in connected_components:
isit, cycle = self.subgraph(cc).is_tree(certificate=True, output=output)
if not isit:
return (False, cycle)
@doc_index("Graph properties")
def is_cactus(self):
self._scream_if_not_simple()
# Special cases
if self.order() < 4:
return True
if self.size() > 3 * (self.order() - 1) / 2:
return False
# Every cactus graph is outerplanar
if not self.is_circular_planar():
return False
if not self.is_connected():
return False
# the number of faces is 1 plus the number of blocks of order > 2
B = self.blocks_and_cut_vertices()[0]
return len(self.faces()) == sum(1 for b in B if len(b) > 2) + 1
@doc_index("Graph properties")
def is_biconnected(self):
if self.order() < 2 or not self.is_connected():
return False
if self.blocks_and_cut_vertices()[1]:
return False
return True
@doc_index("Graph properties")
def is_block_graph(self):
if not self.is_connected():
return False
if self.is_clique():
return True
B,C = self.blocks_and_cut_vertices()
return all(self.is_clique(vertices=block) for block in B)
@doc_index("Graph properties")
def is_cograph(self):
# A cograph has no 4-vertex path as an induced subgraph.
# We will first try to "decompose" graph by complements and
# split to connected components, and use fairly slow
# subgraph search if that fails.
self._scream_if_not_simple()
if self.order() < 4:
return True
if self.density()*2 > 1:
return self.complement().is_cograph()
if not self.is_connected():
return all(part.is_cograph() for part in self.connected_components_subgraphs())
P4 = Graph({0: [1], 1: [2], 2: [3]})
return self.subgraph_search(P4, induced=True) is None
@doc_index("Graph properties")
def is_apex(self):
# Easy cases: null graph, subgraphs of K_5 and K_3,3
if self.order() <= 5 or ( self.order() <= 6 and self.is_bipartite() ):
return True
return len(self.apex_vertices(k=1)) > 0
@doc_index("Graph properties")
def apex_vertices(self, k=None):
if k is None:
k = self.order()
elif k < 0:
raise ValueError("parameter k must be a non negative integer")
# Easy cases: null graph, subgraphs of K_5 and K_3,3
if self.order() <= 5 or (self.order() <= 6 and self.is_bipartite()):
it = self.vertex_iterator()
return [next(it) for _ in range(k)]
if not self.is_connected():
# We search for its non planar connected components. If it has more
# than one such component, the graph is not apex. It is apex if
# either it has no such component, in which case the graph is
# planar, or if its unique non planar component is apex.
P = [H for H in self.connected_components_subgraphs() if not H.is_planar()]
if not P: # The graph is planar
it = self.vertex_iterator()
return [next(it) for _ in range(k)]
elif len(P) > 1:
return []
else:
# We proceed with the non planar component
if P[0].is_immutable():
H = Graph(P[0].edges(labels=0, sort=False), immutable=False, loops=False, multiedges=False)
else:
H = P[0]
elif self.is_planar():
# A planar graph is apex.
it = self.vertex_iterator()
return [next(it) for _ in range(k)]
else:
# We make a basic copy of the graph since we will modify it
H = Graph(self.edges(labels=0, sort=False), immutable=False, loops=False, multiedges=False)
# General case: basic implementation
#
# Test for each vertex if its removal makes the graph planar.
# Obviously, we don't test vertices of degree one. Furthermore, if a
V = {}
for u in H:
d = H.degree(u)
if d > 1:
if d in V:
V[d].append(u)
else:
V[d] = [u]
apex = set()
for deg in sorted(V):
for u in V[deg]:
if u in apex:
if deg == 2:
apex.update(H.neighbor_iterator(u))
if len(apex) >= k:
return list(apex)[:k]
continue
E = H.edges_incident(u, labels=0)
H.delete_vertex(u)
if H.is_planar():
apex.add(u)
if deg == 2:
apex.update(self.neighbor_iterator(u))
if len(apex) >= k:
return list(apex)[:k]
H.add_edges(E)
return list(apex)
@doc_index("Graph properties")
def is_overfull(self):
size() > max(self.degree()) * (self.order() - 1))
@doc_index("Graph properties")
def is_even_hole_free(self, certificate=False):
girth = self.girth()
if girth > self.order():
start = 4
elif not girth % 2:
if not certificate:
return False
start = girth
else:
start = girth + 1
from sage.graphs.generators.basic import CycleGraph
while start <= self.order():
subgraph = self.subgraph_search(CycleGraph(start), induced=True)
if subgraph is not None:
if certificate:
return subgraph
else:
return False
start += 2
return True
@doc_index("Graph properties")
def is_odd_hole_free(self, certificate=False):
girth = self.odd_girth()
if girth > self.order():
return True
if girth == 3:
start = 5
else:
if not certificate:
return False
start = girth
from sage.graphs.generators.basic import CycleGraph
while start <= self.order():
subgraph = self.subgraph_search(CycleGraph(start), induced=True)
if subgraph is not None:
if certificate:
return subgraph
else:
return False
start += 2
return True
@doc_index("Graph properties")
def is_triangle_free(self, algorithm='dense_graph', certificate=False):
if algorithm == 'dense_graph':
from sage.graphs.base.static_dense_graph import is_triangle_free
return is_triangle_free(self, certificate=certificate)
if algorithm == 'bitset':
if self.order() < 3:
return (True, []) if certificate else True
from sage.data_structures.bitset import Bitset
N = self.order()
vertex_to_int = {}
B = {}
for i, u in enumerate(self):
vertex_to_int[u] = i
B[u] = Bitset(capacity=N)
for u, v in self.edge_iterator(labels=None):
if u != v:
B[u].add(vertex_to_int[v])
B[v].add(vertex_to_int[u])
for u, v in self.edge_iterator(labels=None):
BB = B[u] & B[v]
if BB:
if certificate:
for w in self.neighbor_iterator(u):
if vertex_to_int[w] in BB:
return False, [u, v, w]
return False
return (True, []) if certificate else True
elif algorithm == 'matrix':
if self.order() < 3:
return True
return (self.adjacency_matrix()**3).trace() == 0
else:
raise ValueError("Algorithm '%s' not yet implemented. Please contribute." %(algorithm))
@doc_index("Graph properties")
def is_split(self):
self._scream_if_not_simple()
degree_sequence = [0] + sorted(self.degree(), reverse=True)
for i, d in enumerate(degree_sequence):
if d >= i - 1:
omega = i
else:
break
left = sum(degree_sequence[:omega + 1])
right = omega * (omega - 1) + sum(degree_sequence[omega + 1:])
return left == right
@doc_index("Algorithmically hard stuff")
def is_perfect(self, certificate=False):
if self.has_multiple_edges() or self.has_loops():
raise ValueError("This method is only defined for simple graphs,"
" and yours is not one of them !")
if self.is_bipartite():
return True if not certificate else None
self_complement = self.complement()
self_complement.remove_loops()
self_complement.remove_multiple_edges()
if self_complement.is_bipartite():
return True if not certificate else None
answer = self.is_odd_hole_free(certificate=certificate)
if not (answer is True):
return answer
return self_complement.is_odd_hole_free(certificate=certificate)
@doc_index("Graph properties")
def is_edge_transitive(self):
from sage.libs.gap.libgap import libgap
if not self.size():
return True
A = self.automorphism_group()
e = next(self.edge_iterator(labels=False))
e = [A._domain_to_gap[e[0]], A._domain_to_gap[e[1]]]
e.sort()
return libgap(A).OrbitLength(e, libgap.OnSets) == self.size()
@doc_index("Graph properties")
def is_arc_transitive(self):
from sage.libs.gap.libgap import libgap
if not self.size():
return True
A = self.automorphism_group()
e = next(self.edge_iterator(labels=False))
e = [A._domain_to_gap[e[0]], A._domain_to_gap[e[1]]]
return libgap(A).OrbitLength(e,libgap.OnTuples) == 2*self.size()
@doc_index("Graph properties")
def is_half_transitive(self):
# A half-transitive graph always has only vertices of even degree
if any(d % 2 for d in self.degree_iterator()):
return False
return (self.is_edge_transitive() and
self.is_vertex_transitive() and
not self.is_arc_transitive())
@doc_index("Graph properties")
def is_semi_symmetric(self):
# A semi-symmetric graph is always bipartite
if not self.is_bipartite():
return False
return (self.is_regular() and
self.is_edge_transitive() and not
self.is_vertex_transitive())
@doc_index("Graph properties")
def is_path(self):
order = self.order()
if order != self.size() + 1:
return False
if order <= 1:
return order == 1
deg_one_counter = 0
seen_counter = 0
for v in self.depth_first_search(next(self.vertex_iterator())):
seen_counter += 1
deg = self._backend.degree(v, False)
if deg == 1:
deg_one_counter += 1
if deg_one_counter > 2:
return False
elif deg != 2:
return False
return deg_one_counter == 2 and seen_counter == order
@doc_index("Connectivity, orientations, trees")
def degree_constrained_subgraph(self, bounds, solver=None, verbose=0,
*, integrality_tolerance=1e-3):
self._scream_if_not_simple()
from sage.numerical.mip import MixedIntegerLinearProgram, MIPSolverException
p = MixedIntegerLinearProgram(maximization=False, solver=solver)
b = p.new_variable(binary=True)
if isinstance(bounds,dict):
f_bounds = lambda x: bounds[x]
else:
f_bounds = bounds
if self.weighted():
from sage.rings.real_mpfr import RR
weight = lambda x: x if x in RR else 1
else:
weight = lambda x: 1
for v in self:
minimum,maximum = f_bounds(v)
p.add_constraint(p.sum(b[frozenset((x,y))]*weight(l) for x,y,l in self.edges_incident(v)),
min=minimum, max=maximum)
p.set_objective(p.sum(b[frozenset((x,y))]*weight(l) for x,y,l in self.edge_iterator()))
try:
p.solve(log=verbose)
except MIPSolverException:
return False
g = copy(self)
b = p.get_values(b, convert=bool, tolerance=integrality_tolerance)
g.delete_edges(e for e in g.edge_iterator(labels=False) if not b[frozenset(e)])
return g
### Orientations
@doc_index("Connectivity, orientations, trees")
def strong_orientation(self):
from sage.graphs.digraph import DiGraph
d = DiGraph(multiedges=self.allows_multiple_edges())
i = 0
# The algorithm works through a depth-first search. Any edge
# used in the depth-first search is oriented in the direction
# in which it has been used. All the other edges are oriented
# backward
v = next(self.vertex_iterator())
seen = {}
i = 1
# Time at which the vertices have been discovered
seen[v] = i
# indicates the stack of edges to explore
next_ = self.edges_incident(v)
while next_:
e = next_.pop()
# Ignore loops
if e[0] == e[1]:
continue
# We assume e[0] to be a `seen` vertex
e = e if seen.get(e[0], False) is not False else (e[1], e[0], e[2])
# If we discovered a new vertex
if seen.get(e[1], False) is False:
d.add_edge(e)
next_.extend(ee for ee in self.edges_incident(e[1])
if ((e[0],e[1]) != (ee[0],ee[1])) and ((e[0],e[1]) != (ee[1],ee[0])))
i += 1
seen[e[1]] = i
# Else, we orient the edges backward
else:
if seen[e[0]] < seen[e[1]]:
d.add_edge(e[1], e[0], e[2])
else:
d.add_edge(e)
# Case of multiple edges. If another edge has already been inserted, we
# add the new one in the opposite direction.
tmp = None
for e in self.multiple_edges():
if tmp == (e[0], e[1]):
if d.has_edge(e[0], e[1]):
d.add_edge(e[1], e[0], e[2])
else:
d.add_edge(e)
tmp = (e[0], e[1])
return d
@doc_index("Connectivity, orientations, trees")
def minimum_outdegree_orientation(self, use_edge_labels=False, solver=None, verbose=0,
*, integrality_tolerance=1e-3):
self._scream_if_not_simple()
if self.is_directed():
raise ValueError("Cannot compute an orientation of a DiGraph. "+\
"Please convert it to a Graph if you really mean it.")
if use_edge_labels:
from sage.rings.real_mpfr import RR
def weight(e):
l = self.edge_label(e)
return l if l in RR else 1
else:
def weight(e):
return 1
from sage.numerical.mip import MixedIntegerLinearProgram
p = MixedIntegerLinearProgram(maximization=False, solver=solver)
degree = p.new_variable(nonnegative=True)
# The orientation of an edge is boolean and indicates whether the edge
# uv goes from u to v ( equal to 0 ) or from v to u ( equal to 1)
orientation = p.new_variable(binary=True)
# Whether an edge adjacent to a vertex u counts positively or
# negatively. To do so, we first fix an arbitrary extremity per edge uv.
ext = {frozenset(e): e[0] for e in self.edge_iterator(labels=False)}
def outgoing(u, e, variable):
if u == ext[frozenset(e)]:
return variable
else:
return 1 - variable
for u in self:
p.add_constraint(p.sum(weight(e) * outgoing(u, e, orientation[frozenset(e)])
for e in self.edge_iterator(vertices=[u], labels=False))
- degree['max'], max=0)
p.set_objective(degree['max'])
p.solve(log=verbose)
orientation = p.get_values(orientation, convert=bool, tolerance=integrality_tolerance)
# All the edges from self are doubled in O
# ( one in each direction )
from sage.graphs.digraph import DiGraph
O = DiGraph(self)
# Builds the list of edges that should be removed
edges = []
for e in self.edge_iterator(labels=None):
if orientation[frozenset(e)]:
edges.append(e[::-1])
else:
edges.append(e)
O.delete_edges(edges)
return O
@doc_index("Connectivity, orientations, trees")
def bounded_outdegree_orientation(self, bound, solver=None, verbose=False,
*, integrality_tolerance=1e-3):
self._scream_if_not_simple()
from sage.graphs.all import DiGraph
n = self.order()
if not n:
return DiGraph()
vertices = list(self)
vertices_id = {y: x for x,y in enumerate(vertices)}
b = {}
# Checking the input type. We make a dictionary out of it
if isinstance(bound, dict):
b = bound
else:
try:
b = dict(zip(vertices,map(bound, vertices)))
except TypeError:
b = dict(zip(vertices, [bound]*n))
d = DiGraph()
# Adding the edges (s,v) and ((u,v),t)
d.add_edges(('s', vertices_id[v], b[v]) for v in vertices)
d.add_edges(((vertices_id[u], vertices_id[v]), 't', 1)
for u,v in self.edges(labels=None) )
# each v is linked to its incident edges
for u,v in self.edge_iterator(labels=None):
u,v = vertices_id[u], vertices_id[v]
d.add_edge(u, (u,v), 1)
d.add_edge(v, (u,v), 1)
# Solving the maximum flow
value, flow = d.flow('s','t', value_only=False, integer=True,
use_edge_labels=True, solver=solver, verbose=verbose,
integrality_tolerance=integrality_tolerance)
if value != self.size():
raise ValueError("No orientation exists for the given bound")
D = DiGraph()
D.add_vertices(vertices)
# The flow graph may not contain all the vertices, if they are
# not part of the flow...
for u in [x for x in range(n) if x in flow]:
for uu,vv in flow.neighbors_out(u):
v = vv if vv != u else uu
D.add_edge(vertices[u], vertices[v])
# I do not like when a method destroys the embedding ;-)
D.set_pos(self.get_pos())
return D
@doc_index("Connectivity, orientations, trees")
def orientations(self, data_structure=None, sparse=None):
if sparse is not None:
if data_structure is not None:
raise ValueError("cannot specify both 'sparse' and 'data_structure'")
data_structure = "sparse" if sparse else "dense"
if data_structure is None:
from sage.graphs.base.dense_graph import DenseGraphBackend
from sage.graphs.base.sparse_graph import SparseGraphBackend
if isinstance(self._backend, DenseGraphBackend):
data_structure = "dense"
elif isinstance(self._backend, SparseGraphBackend):
data_structure = "sparse"
else:
data_structure = "static_sparse"
name = self.name()
if name:
name = 'An orientation of ' + name
if not self.size():
D = DiGraph(data=[self.vertices(), []],
format='vertices_and_edges',
name=name,
pos=self._pos,
multiedges=self.allows_multiple_edges(),
loops=self.allows_loops(),
data_structure=data_structure)
if hasattr(self, '_embedding'):
D._embedding = copy(self._embedding)
yield D
return
E = [[(u,v,label), (v,u,label)] if u != v else [(u,v,label)]
for u,v,label in self.edge_iterator()]
verts = self.vertices()
for edges in itertools.product(*E):
D = DiGraph(data=[verts, edges],
format='vertices_and_edges',
name=name,
pos=self._pos,
multiedges=self.allows_multiple_edges(),
loops=self.allows_loops(),
data_structure=data_structure)
if hasattr(self, '_embedding'):
D._embedding = copy(self._embedding)
yield D
### Coloring
@doc_index("Basic methods")
def bipartite_color(self):
isit, certificate = self.is_bipartite(certificate=True)
if isit:
return certificate
else:
raise RuntimeError("Graph is not bipartite.")
@doc_index("Basic methods")
def bipartite_sets(self):
color = self.bipartite_color()
left = set()
right = set()
for u,s in color.items():
if s:
left.add(u)
else:
right.add(u)
return left, right
@doc_index("Coloring")
def chromatic_index(self, solver=None, verbose=0, *, integrality_tolerance=1e-3):
if not self.order() or not self.size():
return 0
from sage.graphs.graph_coloring import edge_coloring
return edge_coloring(self, value_only=True, solver=solver, verbose=verbose,
integrality_tolerance=integrality_tolerance)
@doc_index("Coloring")
def chromatic_number(self, algorithm="DLX", solver=None, verbose=0,
*, integrality_tolerance=1e-3):
self._scream_if_not_simple(allow_multiple_edges=True)
# default built-in algorithm; bad performance
if algorithm == "DLX":
from sage.graphs.graph_coloring import chromatic_number
return chromatic_number(self)
# Algorithm with good performance, but requires an optional
# package: choose any of GLPK or CBC.
elif algorithm == "MILP":
from sage.graphs.graph_coloring import vertex_coloring
return vertex_coloring(self, value_only=True, solver=solver, verbose=verbose,
integrality_tolerance=integrality_tolerance)
# another algorithm with bad performance; only good for small graphs
elif algorithm == "CP":
f = self.chromatic_polynomial()
i = 0
while not f(i):
i += 1
return i
else:
raise ValueError("The 'algorithm' keyword must be set to either 'DLX', 'MILP' or 'CP'.")
@doc_index("Coloring")
def coloring(self, algorithm="DLX", hex_colors=False, solver=None, verbose=0,
*, integrality_tolerance=1e-3):
self._scream_if_not_simple(allow_multiple_edges=True)
if algorithm == "MILP":
from sage.graphs.graph_coloring import vertex_coloring
return vertex_coloring(self, hex_colors=hex_colors, solver=solver, verbose=verbose,
integrality_tolerance=integrality_tolerance)
elif algorithm == "DLX":
from sage.graphs.graph_coloring import first_coloring
return first_coloring(self, hex_colors=hex_colors)
else:
raise ValueError("The 'algorithm' keyword must be set to either 'DLX' or 'MILP'.")
@doc_index("Coloring")
def chromatic_symmetric_function(self, R=None):
from sage.combinat.sf.sf import SymmetricFunctions
from sage.combinat.partition import _Partitions
from sage.misc.misc import powerset
if R is None:
R = ZZ
p = SymmetricFunctions(R).p()
ret = p.zero()
for F in powerset(self.edges()):
la = _Partitions(self.subgraph(edges=F).connected_components_sizes())
ret += (-1)**len(F) * p[la]
return ret
@doc_index("Coloring")
def chromatic_quasisymmetric_function(self, t=None, R=None):
from sage.combinat.ncsf_qsym.qsym import QuasiSymmetricFunctions
from sage.combinat.set_partition_ordered import OrderedSetPartitions
if t is None:
t = ZZ['t'].gen()
if R is None:
R = t.parent()
M = QuasiSymmetricFunctions(R).M()
ret = M.zero()
V = self.vertices()
def asc(sigma):
stat = 0
for i, s in enumerate(sigma):
for u in s:
stat += sum(1 for p in sigma[i+1:] for v in p
if v > u and self.has_edge(u, v))
return stat
for sigma in OrderedSetPartitions(V):
if any(not self.is_independent_set(s) for s in sigma):
continue
ret += M.term(sigma.to_composition(), t**asc(sigma))
return ret
@doc_index("Leftovers")
def matching(self, value_only=False, algorithm="Edmonds",
use_edge_labels=False, solver=None, verbose=0,
*, integrality_tolerance=1e-3):
from sage.rings.real_mpfr import RR
def weight(x):
if x in RR:
return x
else:
return 1
W = {}
L = {}
for u,v,l in self.edge_iterator():
if u is v:
continue
fuv = frozenset((u, v))
if fuv not in L or ( use_edge_labels and W[fuv] < weight(l) ):
L[fuv] = l
if use_edge_labels:
W[fuv] = weight(l)
if algorithm == "Edmonds":
import networkx
g = networkx.Graph()
if use_edge_labels:
for (u, v),w in W.items():
g.add_edge(u, v, weight=w)
else:
for u, v in L:
g.add_edge(u, v)
d = networkx.max_weight_matching(g)
if value_only:
if use_edge_labels:
return sum(W[frozenset(e)] for e in d)
else:
return Integer(len(d))
else:
return [(u, v, L[frozenset((u, v))]) for u, v in d]
elif algorithm == "LP":
g = self
from sage.numerical.mip import MixedIntegerLinearProgram
# returns the weight of an edge considering it may not be
# weighted ...
p = MixedIntegerLinearProgram(maximization=True, solver=solver)
b = p.new_variable(binary=True)
if use_edge_labels:
p.set_objective(p.sum(w * b[fe] for fe,w in W.items()))
else:
p.set_objective(p.sum(b[fe] for fe in L))
# for any vertex v, there is at most one edge incident to v in
# the maximum matching
for v in g:
p.add_constraint(p.sum(b[frozenset(e)] for e in self.edge_iterator(vertices=[v], labels=False)
if e[0] != e[1]), max=1)
p.solve(log=verbose)
b = p.get_values(b, convert=bool, tolerance=integrality_tolerance)
if value_only:
if use_edge_labels:
return sum(w for fe, w in W.items() if b[fe])
else:
return Integer(sum(1 for fe in L if b[fe]))
else:
return [(u, v, L[frozenset((u, v))]) for u, v in L if b[frozenset((u, v))]]
else:
raise ValueError('algorithm must be set to either "Edmonds" or "LP"')
@doc_index("Algorithmically hard stuff")
def has_homomorphism_to(self, H, core=False, solver=None, verbose=0,
*, integrality_tolerance=1e-3):
self._scream_if_not_simple()
from sage.numerical.mip import MixedIntegerLinearProgram, MIPSolverException
p = MixedIntegerLinearProgram(solver=solver, maximization=False)
b = p.new_variable(binary=True)
# Each vertex has an image
for ug in self:
p.add_constraint(p.sum(b[ug,uh] for uh in H) == 1)
nonedges = H.complement().edges(labels=False)
for ug,vg in self.edges(labels=False):
# Two adjacent vertices cannot be mapped to the same element
for uh in H:
p.add_constraint(b[ug,uh] + b[vg,uh] <= 1)
# Two adjacent vertices cannot be mapped to no adjacent vertices
for uh,vh in nonedges:
p.add_constraint(b[ug,uh] + b[vg,vh] <= 1)
p.add_constraint(b[ug,vh] + b[vg,uh] <= 1)
# Minimize the mapping's size
if core:
m = p.new_variable(nonnegative=True)
for uh in H:
for ug in self:
p.add_constraint(b[ug,uh] <= m[uh])
p.set_objective(p.sum(m[vh] for vh in H))
try:
p.solve(log=verbose)
except MIPSolverException:
return False
b = p.get_values(b, convert=bool, tolerance=integrality_tolerance)
mapping = dict(x[0] for x in b.items() if x[1])
return mapping
@doc_index("Clique-related methods")
def fractional_clique_number(self, solver='PPL', verbose=0,
check_components=True, check_bipartite=True):
return self.fractional_chromatic_number(solver=solver, verbose=verbose,
check_components=check_components,
check_bipartite=check_bipartite)
@doc_index("Leftovers")
def maximum_average_degree(self, value_only=True, solver=None, verbose=0):
self._scream_if_not_simple()
g = self
from sage.numerical.mip import MixedIntegerLinearProgram
p = MixedIntegerLinearProgram(maximization=True, solver=solver)
d = p.new_variable(nonnegative=True)
one = p.new_variable(nonnegative=True)
for u,v in g.edge_iterator(labels=False):
fuv = frozenset((u, v))
p.add_constraint(one[fuv] - 2 * d[u], max=0)
p.add_constraint(one[fuv] - 2 * d[v], max=0)
p.add_constraint(p.sum(d[v] for v in g), max=1)
p.set_objective(p.sum(one[frozenset(uv)]
for uv in g.edge_iterator(labels=False)))
p.solve(log=verbose)
m = 1/(10 *Integer(g.order()))
d_val = p.get_values(d)
g_mad = g.subgraph(v for v,l in d_val.items() if l > m)
if value_only:
return g_mad.average_degree()
else:
return g_mad
@doc_index("Algorithmically hard stuff")
def independent_set_of_representatives(self, family, solver=None, verbose=0,
*, integrality_tolerance=1e-3):
from sage.numerical.mip import MixedIntegerLinearProgram
p = MixedIntegerLinearProgram(solver=solver)
vertex_taken = p.new_variable(binary=True)
classss = p.new_variable(binary=True)
lists = {v: [] for v in self}
for i,f in enumerate(family):
for v in f:
lists[v].append(i)
p.add_constraint(p.sum(classss[v,i] for v in f), max=1, min=1)
for v in self:
p.add_constraint(p.sum(classss[v,i] for i in lists[v]) - vertex_taken[v], max=0)
for u,v in self.edge_iterator(labels=None):
p.add_constraint(vertex_taken[u] + vertex_taken[v], max=1)
p.set_objective(None)
try:
p.solve(log=verbose)
except Exception:
return None
classss = p.get_values(classss, convert=bool, tolerance=integrality_tolerance)
repr = []
for i,f in enumerate(family):
for v in f:
if classss[v,i]:
repr.append(v)
break
return repr
@doc_index("Algorithmically hard stuff")
def minor(self, H, solver=None, verbose=0, *, integrality_tolerance=1e-3):
self._scream_if_not_simple()
H._scream_if_not_simple()
from sage.numerical.mip import MixedIntegerLinearProgram, MIPSolverException
p = MixedIntegerLinearProgram(solver=solver)
rs = p.new_variable(binary=True)
for v in self:
p.add_constraint(p.sum(rs[h,v] for h in H), max=1)
edges = p.new_variable(binary=True)
for u,v in self.edge_iterator(labels=None):
fuv = frozenset((u, v))
for h in H:
p.add_constraint(edges[h,fuv] - rs[h,u], max=0)
p.add_constraint(edges[h,fuv] - rs[h,v], max=0)
for h in H:
p.add_constraint( p.sum(edges[h,frozenset(e)] for e in self.edge_iterator(labels=None))
- p.sum(rs[h,v] for v in self), min=-1, max=-1)
epsilon = 1/(5*Integer(self.order()))
r_edges = p.new_variable(nonnegative=True)
for h in H:
for u,v in self.edge_iterator(labels=None):
p.add_constraint(r_edges[h,(u,v)] + r_edges[h,(v,u)] - edges[h,frozenset((u,v))], min=0)
for v in self:
p.add_constraint(p.sum(r_edges[h,(u,v)] for u in self.neighbor_iterator(v)), max=1-epsilon)
h_edges = p.new_variable(nonnegative=True)
for h1, h2 in H.edge_iterator(labels=None):
for v1, v2 in self.edge_iterator(labels=None):
fv1v2 = frozenset((v1, v2))
p.add_constraint(h_edges[(h1,h2),fv1v2] - rs[h2,v2], max=0)
p.add_constraint(h_edges[(h1,h2),fv1v2] - rs[h1,v1], max=0)
p.add_constraint(h_edges[(h2,h1),fv1v2] - rs[h1,v2], max=0)
p.add_constraint(h_edges[(h2,h1),fv1v2] - rs[h2,v1], max=0)
p.add_constraint(p.sum(h_edges[(h1,h2),frozenset(e)] + h_edges[(h2,h1),frozenset(e)]
for e in self.edge_iterator(labels=None)), min=1)
p.set_objective(None)
try:
p.solve(log=verbose)
except MIPSolverException:
raise ValueError("This graph has no minor isomorphic to H !")
rs = p.get_values(rs, convert=bool, tolerance=integrality_tolerance)
rs_dict = {}
for h in H:
rs_dict[h] = [v for v in self if rs[h,v]]
return rs_dict
ithmically hard stuff")
def convexity_properties(self):
from sage.graphs.convexity_properties import ConvexityProperties
return ConvexityProperties(self)
@doc_index("Distances")
def centrality_degree(self, v=None):
from sage.rings.integer import Integer
n_minus_one = Integer(self.order() - 1)
if n_minus_one == 0:
raise ValueError("the centrality degree is not defined "
"on graphs with only one vertex")
if v is None:
return {v: self.degree(v)/n_minus_one for v in self}
else:
return self.degree(v)/n_minus_one
nces")
def eccentricity(self, v=None, by_weight=False, algorithm=None,
weight_function=None, check_weight=True, dist_dict=None,
with_labels=False):
by_weight, weight_function = self._get_weight_function(by_weight=by_weight,
weight_function=weight_function,
check_weight=check_weight)
if algorithm is None:
if dist_dict is not None:
algorithm = 'From_Dictionary'
elif not by_weight:
algorithm = 'BFS'
elif any(float(weight_function(e)) < 0 for e in self.edge_iterator()):
algorithm = 'Johnson_Boost'
if algorithm is None:
algorithm = 'Dijkstra_Boost'
if algorithm in ['BFS', 'Floyd-Warshall-Cython']:
if by_weight:
raise ValueError("algorithm '{}' does not work with weights".format(algorithm))
weight_function = None
if v is not None:
if not isinstance(v, list):
v = [v]
v_set = set(v)
if v is None or all(u in v_set for u in self):
if v is None:
v = list(self)
# If we want to use BFS, we use the Cython routine
if algorithm == 'BFS':
from sage.graphs.distances_all_pairs import eccentricity
algo = 'bounds'
if with_labels:
return dict(zip(v, eccentricity(self, algorithm=algo, vertex_list=v)))
else:
return eccentricity(self, algorithm=algo,vertex_list=v)
if algorithm == 'DHV':
if by_weight:
from sage.graphs.base.boost_graph import eccentricity_DHV
if with_labels:
return dict(zip(v, eccentricity_DHV(self, vertex_list=v,
weight_function=weight_function,
check_weight=check_weight)))
else:
return eccentricity_DHV(self, vertex_list=v,
weight_function=weight_function,
check_weight=check_weight)
else:
from sage.graphs.distances_all_pairs import eccentricity
if with_labels:
return dict(zip(v, eccentricity(self, algorithm=algorithm,
vertex_list=v)))
else:
return eccentricity(self, algorithm=algorithm, vertex_list=v)
if algorithm in ['Floyd-Warshall-Python', 'Floyd-Warshall-Cython', 'Johnson_Boost']:
dist_dict = self.shortest_path_all_pairs(by_weight, algorithm,
weight_function,
check_weight)[0]
algorithm = 'From_Dictionary'
elif algorithm in ['Floyd-Warshall-Python', 'Floyd-Warshall-Cython', 'Johnson_Boost','DHV']:
raise ValueError("algorithm '" + algorithm + "' works only if all" +
" eccentricities are needed")
ecc = {}
from sage.rings.infinity import Infinity
for u in v:
if algorithm == 'From_Dictionary':
length = dist_dict[u]
else:
# If algorithm is wrong, the error is raised by the
# shortest_path_lengths function
length = self.shortest_path_lengths(u, by_weight=by_weight,
algorithm=algorithm,
weight_function=weight_function,
check_weight=check_weight)
if len(length) != self.num_verts():
ecc[u] = Infinity
else:
ecc[u] = max(length.values())
if with_labels:
return ecc
else:
if len(ecc) == 1:
# return single value
v, = ecc.values()
return v
return [ecc[u] for u in v]
@doc_index("Distances")
def radius(self, by_weight=False, algorithm='DHV', weight_function=None,
check_weight=True):
if not self.order():
raise ValueError("radius is not defined for the empty graph")
if not algorithm:
algorithm = 'DHV'
if algorithm == 'DHV':
by_weight, weight_function = self._get_weight_function(by_weight=by_weight,
weight_function=weight_function,
check_weight=check_weight)
if by_weight:
from sage.graphs.base.boost_graph import radius_DHV
return radius_DHV(self, weight_function=weight_function,
check_weight=False)
else:
from sage.graphs.distances_all_pairs import radius_DHV
return radius_DHV(self)
return min(self.eccentricity(v=None, by_weight=by_weight,
weight_function=weight_function,
check_weight=check_weight,
algorithm=algorithm))
@doc_index("Distances")
def diameter(self, by_weight=False, algorithm=None, weight_function=None,
check_weight=True):
if not self.order():
raise ValueError("diameter is not defined for the empty graph")
by_weight, weight_function = self._get_weight_function(by_weight=by_weight,
weight_function=weight_function,
check_weight=check_weight)
if not by_weight:
# We don't want the default weight function
weight_function = None
if algorithm is None:
if by_weight:
algorithm = 'iFUB'
else:
algorithm = 'DHV'
elif algorithm == 'BFS':
algorithm = 'standard'
if algorithm == 'DHV':
if by_weight:
from sage.graphs.base.boost_graph import diameter_DHV
return diameter_DHV(self, weight_function=weight_function,
check_weight=False)
else:
from sage.graphs.distances_all_pairs import diameter
return diameter(self, algorithm=algorithm)
if algorithm in ['standard', '2sweep', 'multi-sweep', 'iFUB']:
if by_weight:
raise ValueError("algorithm '" + algorithm + "' does not work" +
" on weighted graphs")
from sage.graphs.distances_all_pairs import diameter
return diameter(self, algorithm=algorithm)
return max(self.eccentricity(v=list(self), by_weight=by_weight,
weight_function=weight_function,
check_weight=False,
algorithm=algorithm))
@doc_index("Distances")
def center(self, by_weight=False, algorithm=None, weight_function=None,
check_weight=True):
ecc = self.eccentricity(v=list(self), by_weight=by_weight,
weight_function=weight_function,
algorithm=algorithm,
check_weight=check_weight,
with_labels=True)
try:
r = min(ecc.values())
except Exception:
return []
return [v for v in self if ecc[v] == r]
@doc_index("Distances")
def periphery(self, by_weight=False, algorithm=None, weight_function=None,
check_weight=True):
ecc = self.eccentricity(v=list(self), by_weight=by_weight,
weight_function=weight_function,
algorithm=algorithm,
check_weight=check_weight,
with_labels=True)
try:
d = max(ecc.values())
except Exception:
return []
return [v for v in self if ecc[v] == d]
ds")
def to_directed(self, data_structure=None, sparse=None):
if sparse is not None:
if data_structure is not None:
raise ValueError("The 'sparse' argument is an alias for "
"'data_structure'. Please do not define both.")
data_structure = "sparse" if sparse else "dense"
if data_structure is None:
from sage.graphs.base.dense_graph import DenseGraphBackend
from sage.graphs.base.sparse_graph import SparseGraphBackend
if isinstance(self._backend, DenseGraphBackend):
data_structure = "dense"
elif isinstance(self._backend, SparseGraphBackend):
data_structure = "sparse"
else:
data_structure = "static_sparse"
from sage.graphs.all import DiGraph
D = DiGraph(name = self.name(),
pos = self.get_pos(),
multiedges = self.allows_multiple_edges(),
loops = self.allows_loops(),
data_structure = (data_structure if data_structure!="static_sparse"
else "sparse"))
D.add_vertices(self.vertex_iterator())
D.set_vertices(self.get_vertices())
for u,v,l in self.edge_iterator():
D.add_edge(u,v,l)
D.add_edge(v,u,l)
if hasattr(self, '_embedding'):
D._embedding = copy(self._embedding)
D._weighted = self._weighted
if data_structure == "static_sparse":
D = D.copy(data_structure=data_structure)
return D
@doc_index("Basic methods")
def to_undirected(self):
return self.copy()
@doc_index("Basic methods")
def join(self, other, labels="pairs", immutable=None):
G = self.disjoint_union(other, labels=labels, immutable=False)
if labels == "integers":
G.add_edges((u, v) for u in range(self.order())
for v in range(self.order(), self.order() + other.order()))
else:
G.add_edges(((0, u), (1, v)) for u in self for v in other)
G.name('%s join %s'%(self.name(), other.name()))
if immutable is None:
immutable = self.is_immutable() and other.is_immutable()
if immutable:
G = G.copy(immutable=True)
return G
@doc_index("Leftovers")
def seidel_adjacency_matrix(self, vertices=None):
return - self.adjacency_matrix(sparse=False, vertices=vertices) \
+ self.complement().adjacency_matrix(sparse=False, vertices=vertices)
@doc_index("Leftovers")
def seidel_switching(self, s, inplace=True):
G = self if inplace else copy(self)
boundary = self.edge_boundary(s)
G.add_edges(itertools.product(s, set(self).difference(s)))
G.delete_edges(boundary)
if not inplace:
return G
@doc_index("Leftovers")
def twograph(self):
from sage.combinat.designs.twographs import TwoGraph
G = self.relabel(range(self.order()), inplace=False)
T = []
for x,y,z in G.subgraph_search_iterator(Graph({1:[2,3], 2:[3]})):
if x < y and y < z:
T.append([x, y, z])
for x,y,z in G.subgraph_search_iterator(Graph({1:[2], 3:[]}), induced=True):
if x < y:
T.append([x, y, z])
T = TwoGraph(T)
T.relabel({i: v for i,v in enumerate(self.vertices())})
return T
")
def write_to_eps(self, filename, **options):
from sage.graphs.print_graphs import print_graph_eps
pos = self.layout(**options)
[xmin, xmax, ymin, ymax] = self._layout_bounding_box(pos)
for v in pos:
pos[v] = (1.8*(pos[v][0] - xmin)/(xmax - xmin) - 0.9, 1.8*(pos[v][1] - ymin)/(ymax - ymin) - 0.9)
if filename[-4:] != '.eps':
filename += '.eps'
f = open(filename, 'w')
f.write( print_graph_eps(self.vertices(), self.edge_iterator(), pos) )
f.close()
@doc_index("Algorithmically hard stuff")
def topological_minor(self, H, vertices=False, paths=False, solver=None, verbose=0,
*, integrality_tolerance=1e-3):
self._scream_if_not_simple()
H._scream_if_not_simple()
G = self
from sage.numerical.mip import MixedIntegerLinearProgram, MIPSolverException
p = MixedIntegerLinearProgram(solver=solver)
p.set_objective(None)
None, verbose=0,
*, integrality_tolerance=1e-3):
self._scream_if_not_simple(allow_multiple_edges=True)
if algorithm == "Cliquer":
from sage.graphs.cliquer import max_clique
return max_clique(self)
elif algorithm == "MILP":
return self.complement().independent_set(algorithm=algorithm, solver=solver, verbose=verbose,
integrality_tolerance=integrality_tolerance)
elif algorithm == "mcqd":
return mcqd(self)
else:
raise NotImplementedError("Only 'MILP', 'Cliquer' and 'mcqd' are supported.")
@doc_index("Clique-related methods")
def clique_number(self, algorithm="Cliquer", cliques=None, solver=None, verbose=0,
*, integrality_tolerance=1e-3):
self._scream_if_not_simple(allow_loops=False)
if algorithm == "Cliquer":
from sage.graphs.cliquer import clique_number
return clique_number(self)
elif algorithm == "networkx":
import networkx
return networkx.graph_clique_number(self.networkx_graph(), cliques)
elif algorithm == "MILP":
return len(self.complement().independent_set(algorithm=algorithm, solver=solver, verbose=verbose,
integrality_tolerance=integrality_tolerance))
elif algorithm == "mcqd":
return len(mcqd(self))
else:
raise NotImplementedError("Only 'networkx' 'MILP' 'Cliquer' and 'mcqd' are supported.")
@doc_index("Clique-related methods")
def cliques_number_of(self, vertices=None, cliques=None):
import networkx
return networkx.number_of_cliques(self.networkx_graph(), vertices, cliques)
@doc_index("Clique-related methods")
def cliques_get_max_clique_graph(self):
import networkx
return Graph(networkx.make_max_clique_graph(self.networkx_graph(), create_using=networkx.MultiGraph()),
multiedges=False)
@doc_index("Clique-related methods")
def cliques_get_clique_bipartite(self, **kwds):
from .bipartite_graph import BipartiteGraph
import networkx
return BipartiteGraph(networkx.make_clique_bipartite(self.networkx_graph(), **kwds))
@doc_index("Algorithmically hard stuff")
@rename_keyword(deprecation=32238, verbosity='verbose')
def independent_set(self, algorithm="Cliquer", value_only=False, reduction_rules=True,
solver=None, verbose=0, *, integrality_tolerance=1e-3):
my_cover = self.vertex_cover(algorithm=algorithm, value_only=value_only,
reduction_rules=reduction_rules,
solver=solver, verbose=verbose,
integrality_tolerance=integrality_tolerance)
if value_only:
return self.order() - my_cover
else:
my_cover = set(my_cover)
return [u for u in self if u not in my_cover]
@doc_index("Algorithmically hard stuff")
@rename_keyword(deprecation=32238, verbosity='verbose')
def vertex_cover(self, algorithm="Cliquer", value_only=False,
reduction_rules=True, solver=None, verbose=0,
*, integrality_tolerance=1e-3):
self._scream_if_not_simple(allow_multiple_edges=True)
g = self
ppset = []
folded_vertices = []
while degree_at_most_two:
u = degree_at_most_two.pop()
du = g.degree(u)
if not du:
g.delete_vertex(u)
elif du == 1:
v = next(g.neighbor_iterator(u))
ppset.append(v)
g.delete_vertex(u)
for w in g.neighbor_iterator(v):
if g.degree(w) <= 3:
degree_at_most_two.add(w)
g.delete_vertex(v)
degree_at_most_two.discard(v)
elif du == 2:
v,w = g.neighbors(u)
if g.has_edge(v, w):
ppset.append(v)
ppset.append(w)
g.delete_vertex(u)
neigh = set(g.neighbors(v) + g.neighbors(w)).difference([v, w])
g.delete_vertex(v)
g.delete_vertex(w)
for z in neigh:
if g.degree(z) <= 2:
degree_at_most_two.add(z)
else:
neigh = set(g.neighbors(v) + g.neighbors(w)).difference([u, v, w])
g.delete_vertex(v)
g.delete_vertex(w)
for z in neigh:
g.add_edge(u,z)
folded_vertices.append((u, v, w))
if g.degree(u) <= 2:
degree_at_most_two.add(u)
degree_at_most_two.discard(v)
degree_at_most_two.discard(w)
False)
independent = g.complement().clique_maximum(algorithm=algorithm)
if value_only:
size_cover_g = g.order() - len(independent)
else:
cover_g = set(uu for uu in g if uu not in independent)
elif algorithm == "MILP":
from sage.numerical.mip import MixedIntegerLinearProgram
p = MixedIntegerLinearProgram(maximization=False, solver=solver)
b = p.new_variable(binary=True)
p.set_objective(p.sum(b[v] for v in g))
for u,v in g.edge_iterator(labels=None):
p.add_constraint(b[u] + b[v], min=1)
p.solve(log=verbose)
b = p.get_values(b, convert=bool, tolerance=integrality_tolerance)
if value_only:
size_cover_g = sum(1 for v in g if b[v])
else:
cover_g = set(v for v in g if b[v])
else:
raise ValueError('the algorithm must be "Cliquer", "MILP" or "mcqd"')
if self.order() < 3:
raise ValueError("ear decomposition is defined for graphs of order at least 3")
dfs_order = []
seen = set()
traversed = set()
parent = {next(self.vertex_iterator()): None}
value = {}
chains = []
def DFS(v):
seen.add(v)
dfs_order.append(v)
for u in self.neighbor_iterator(v):
if u not in seen:
parent[u] = v
DFS(u)
def traverse(start, pointer):
traversed.add(start)
chain = [start]
while True:
chain.append(pointer)
if pointer in traversed:
break
traversed.add(pointer)
pointer = parent[pointer]
chains.append(chain)
for v in self:
if v not in seen:
DFS(v)
value = {u:i for i,u in enumerate(dfs_order)}
for u in dfs_order:
for neighbor in self.neighbor_iterator(u):
if value[u] < value[neighbor] and u != parent[neighbor]:
traverse(u, neighbor)
dfs_order = []
return chains
@doc_index("Clique-related methods")
def cliques_vertex_clique_number(self, algorithm="cliquer", vertices=None,
cliques=None):
if algorithm == "cliquer":
from sage.graphs.cliquer import clique_number
if vertices is None:
vertices = self
value = {}
for v in vertices:
value[v] = 1 + clique_number(self.subgraph(self.neighbors(v)))
self.subgraph(self.neighbors(v)).plot()
return value
elif algorithm == "networkx":
import networkx
return networkx.node_clique_number(self.networkx_graph(), vertices, cliques)
else:
raise NotImplementedError("Only 'networkx' and 'cliquer' are supported.")
@doc_index("Clique-related methods")
def cliques_containing_vertex(self, vertices=None, cliques=None):
import networkx
return networkx.cliques_containing_node(self.networkx_graph(), vertices, cliques)
@doc_index("Clique-related methods")
def clique_complex(self):
if self.is_directed() or self.has_loops() or self.has_multiple_edges():
raise ValueError("Self must be an undirected simple graph to have a clique complex.")
import sage.topology.simplicial_complex
C = sage.topology.simplicial_complex.SimplicialComplex(self.cliques_maximal(), maximality_check=True)
C._graph = self
return C
@doc_index("Clique-related methods")
def clique_polynomial(self, t=None):
if t is None:
R = PolynomialRing(ZZ, 't')
t = R.gen()
number_of = [0]*(self.order() + 1)
for x in IndependentSets(self, complement=True):
number_of[len(x)] += 1
return sum(coeff*t**i for i,coeff in enumerate(number_of) if coeff)
def cores(self, k=None, with_labels=False):
self._scream_if_not_simple()
degrees = self.degree(labels=True)
verts = sorted(degrees.keys(), key=lambda x: degrees[x])
bin_boundaries = [0]
curr_degree = 0
for i,v in enumerate(verts):
if degrees[v] > curr_degree:
bin_boundaries.extend([i] * (degrees[v] - curr_degree))
curr_degree = degrees[v]
vert_pos = {v: pos for pos,v in enumerate(verts)}
core = degrees
nbrs = {v: set(self.neighbors(v)) for v in self}
for v in verts:
if k is not None and core[v] >= k:
return verts[:vert_pos[v]], verts[vert_pos[v]:]
for u in nbrs[v]:
if core[u] > core[v]:
nbrs[u].remove(v)
pos = vert_pos[u]
bin_start = bin_boundaries[core[u]]
vert_pos[u] = bin_start
vert_pos[verts[bin_start]] = pos
verts[bin_start],verts[pos] = verts[pos],verts[bin_start]
bin_boundaries[core[u]] += 1
core[u] -= 1
if k is not None:
return verts, []
if with_labels:
return core
else:
return list(core.values())
@doc_index("Leftovers")
def modular_decomposition(self, algorithm='habib', style='tuple'):
from sage.graphs.graph_decompositions.modular_decomposition import (modular_decomposition,
NodeType,
habib_maurer_algorithm,
create_prime_node,
create_normal_node)
self._scream_if_not_simple()
if not self.order():
D = None
elif self.order() == 1:
D = create_prime_node()
D.children.append(create_normal_node(self.vertices()[0]))
else:
if algorithm == 'habib':
D = habib_maurer_algorithm(self)
elif algorithm == 'tedder':
D = modular_decomposition(self)
else:
raise ValueError("algorithm must be 'habib' or 'tedder'")
if style == 'tuple':
if D is None:
return tuple()
def relabel(x):
if x.node_type == NodeType.NORMAL:
return x.children[0]
else:
return x.node_type, [relabel(y) for y in x.children]
return relabel(D)
elif style == 'tree':
from sage.combinat.rooted_tree import LabelledRootedTree
if D is None:
return LabelledRootedTree([])
def to_tree(x):
if x.node_type == NodeType.NORMAL:
return LabelledRootedTree([], label=x.children[0])
else:
return LabelledRootedTree([to_tree(y) for y in x.children], label=x.node_type)
return to_tree(D)
else:
raise ValueError("style must be 'tuple' or 'tree'")
@doc_index("Graph properties")
def is_polyhedral(self):
return (not self.has_loops()
and not self.has_multiple_edges()
and self.vertex_connectivity(k=3)
and self.is_planar())
@doc_index("Graph properties")
def is_circumscribable(self, solver="ppl", verbose=0):
if not self.is_polyhedral():
raise NotImplementedError('this method only works for polyhedral graphs')
from sage.numerical.mip import MixedIntegerLinearProgram
from sage.numerical.mip import MIPSolverException
M = MixedIntegerLinearProgram(maximization=True, solver=solver)
e_var = M.new_variable(nonnegative=True)
c = M.new_variable()
M.set_min(c[0], -1)
M.set_max(c[0], 1)
M.set_objective(c[0])
for e in self.edge_iterator(labels=0):
fe = frozenset(e)
M.set_max(e_var[fe], ZZ(1)/ZZ(2))
M.add_constraint(e_var[fe] - c[0], min=0)
M.add_constraint(e_var[fe] + c[0], max=ZZ(1)/ZZ(2))
efaces = self.faces()
vfaces = set(frozenset([e[0] for e in face]) for face in efaces)
for edges in efaces:
M.add_constraint(M.sum(e_var[frozenset(e)] for e in edges) == 1)
D = self.to_directed()
inequality_constraints = set()
for cycle in D.all_simple_cycles():
if len(cycle) > 3:
scycle = frozenset(cycle)
if scycle not in vfaces:
edges = (frozenset((cycle[i], cycle[i+1])) for i in range(len(cycle)-1))
inequality_constraints.add(frozenset(edges))
for ieq in inequality_constraints:
M.add_constraint(M.sum(e_var[fe] for fe in ieq) - c[0] >= 1)
try:
solution = M.solve(log=verbose)
except MIPSolverException as msg:
if str(msg) == "PPL : There is no feasible solution":
return False
return solution > 0
@doc_index("Graph properties")
def is_inscribable(self, solver="ppl", verbose=0):
if not self.is_polyhedral():
raise NotImplementedError('this method only works for polyhedral graphs')
return self.planar_dual().is_circumscribable(solver=solver, verbose=verbose)
@doc_index("Graph properties")
def is_prime(self, algorithm='habib'):
from sage.graphs.graph_decompositions.modular_decomposition import NodeType
if self.order() <= 1:
return True
D = self.modular_decomposition(algorithm=algorithm)
return D[0] == NodeType.PRIME and len(D[1]) == self.order()
def _gomory_hu_tree(self, vertices, algorithm=None):
self._scream_if_not_simple()
if len(vertices) == 1:
g = Graph()
g.add_vertices(vertices)
return g
it = iter(vertices)
u,v = next(it),next(it)
flow,edges,[U,V] = self.edge_cut(u, v, use_edge_labels=True, vertices=True, algorithm=algorithm)
gU,gV = self.subgraph(U, immutable=False), self.subgraph(V, immutable=False)
fU = frozenset(U)
fV = frozenset(V)
from sage.rings.real_mpfr import RR
for uu,vv,capacity in edges:
capacity = capacity if capacity in RR else 1
if uu in V:
uu,vv = vv,uu
if not gU.has_edge(uu, fV):
gU.add_edge(uu, fV, 0)
if not gV.has_edge(vv, fU):
gV.add_edge(vv, fU, 0)
gU.set_edge_label(uu, fV, gU.edge_label(uu, fV) + capacity)
gV.set_edge_label(vv, fU, gV.edge_label(vv, fU) + capacity)
gU_tree = gU._gomory_hu_tree(vertices & frozenset(gU), algorithm=algorithm)
gV_tree = gV._gomory_hu_tree(vertices & frozenset(gV), algorithm=algorithm)
g = gU_tree.union(gV_tree)
g.add_edge(u, v, flow)
return g
@doc_index("Connectivity, orientations, trees")
def gomory_hu_tree(self, algorithm=None):
if not self.order():
return Graph()
if not self.is_connected():
g = Graph()
for cc in self.connected_components_subgraphs():
g = g.union(cc._gomory_hu_tree(frozenset(cc.vertex_iterator()), algorithm=algorithm))
else:
g = self._gomory_hu_tree(frozenset(self.vertex_iterator()), algorithm=algorithm)
if self.get_pos() is not None:
g.set_pos(dict(self.get_pos()))
return g
@doc_index("Leftovers")
def two_factor_petersen(self, solver=None, verbose=0, *, integrality_tolerance=1e-3):
self._scream_if_not_simple()
d = self.eulerian_orientation()
g = Graph()
g.add_edges(((-1, u), (1, v)) for u, v in d.edge_iterator(labels=None))
from sage.graphs.graph_coloring import edge_coloring
classes = edge_coloring(g, solver=solver, verbose=verbose,
integrality_tolerance=integrality_tolerance)
classes_b = []
for c in classes:
classes_b.append([(u,v) for ((uu,u),(vv,v)) in c])
return classes_b
@doc_index("Leftovers")
def kirchhoff_symanzik_polynomial(self, name='t'):
from sage.matrix.constructor import matrix
from sage.rings.integer_ring import ZZ
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
edges = list(self.edges(sort=False))
cycles = self.cycle_basis(output='edge')
edge2int = {e: j for j, e in enumerate(edges)}
circuit_mtrx = matrix(ZZ, self.size(), len(cycles))
for i, cycle in enumerate(cycles):
for edge in cycle:
if edge in edges:
circuit_mtrx[edge2int[edge], i] = +1
else:
circuit_mtrx[edge2int[(edge[1], edge[0], edge[2])], i] = -1
D = matrix.diagonal(PolynomialRing(ZZ, name, self.size()).gens())
return (circuit_mtrx.transpose() * D * circuit_mtrx).determinant()
@doc_index("Leftovers")
def magnitude_function(self):
from sage.matrix.constructor import matrix
from sage.rings.polynomial.polynomial_ring_constructor import PolynomialRing
from sage.graphs.distances_all_pairs import distances_all_pairs
ring = PolynomialRing(ZZ, 'q')
q = ring.gen()
N = self.order()
if not N:
return ring.zero()
dist = distances_all_pairs(self)
vertices = list(self)
Z = matrix(ring, N, N, ring.zero())
for i in range(N):
Z[i, i] = ring.one()
for i in range(N):
for j in range(i):
dij = dist[vertices[i]][vertices[j]]
if dij in ZZ:
Z[i, j] = Z[j, i] = q ** dij
else:
Z[i, j] = Z[j, i] = ring.zero()
return sum(sum(u) for u in ~Z)
@doc_index("Leftovers")
def ihara_zeta_function_inverse(self):
from sage.matrix.constructor import matrix
H = self.subgraph(vertices=self.cores(k=2)[1])
E = list(H.edges(sort=False))
m = len(E)
T = matrix(ZZ, 2 * m, 2 * m, 0)
for i in range(m):
for j in range(m):
if i != j:
if E[i][1] == E[j][0]:
T[2 * i, 2 * j] = 1
T[2 * j + 1, 2 * i + 1] = 1
elif E[i][1] == E[j][1]:
T[2 * i, 2 * j + 1] = 1
T[2 * j, 2 * i + 1] = 1
elif E[i][0] == E[j][0]:
T[2 * i + 1, 2 * j] = 1
T[2 * j + 1, 2 * i] = 1
return T.charpoly('t').reverse()
@doc_index("Leftovers")
def perfect_matchings(self, labels=False):
if not self:
yield []
return
if self.order() % 2 or any(len(cc) % 2 for cc in self.connected_components()):
return
def rec(G):
if not G:
yield []
return
if G.order() % 2 == 0:
v = next(G.vertex_iterator())
Nv = list(G.neighbor_iterator(v))
G.delete_vertex(v)
for u in Nv:
Nu = list(G.neighbor_iterator(u))
G.delete_vertex(u)
for partial_matching in rec(G):
partial_matching.append((u, v))
yield partial_matching
G.add_vertex(u)
G.add_edges((u, nu) for nu in Nu)
G.add_vertex(v)
G.add_edges((v, nv) for nv in Nv)
G = self.copy(immutable=False)
G.allow_loops(False)
edges = {}
for e in G.edges(labels=labels):
f = frozenset(e[:2])
if f in edges:
edges[f].append(e)
else:
edges[f] = [e]
G.allow_multiple_edges(False)
for m in rec(G):
for pm in itertools.product(*[edges[frozenset(e)] for e in m]):
yield pm
@doc_index("Leftovers")
def has_perfect_matching(self, algorithm="Edmonds", solver=None, verbose=0,
*, integrality_tolerance=1e-3):
if self.order() % 2:
return False
if algorithm == "Edmonds":
return len(self) == 2*self.matching(value_only=True,
use_edge_labels=False,
algorithm="Edmonds")
elif algorithm == "LP_matching":
return len(self) == 2*self.matching(value_only=True,
use_edge_labels=False,
algorithm="LP",
solver=solver,
verbose=verbose,
integrality_tolerance=integrality_tolerance)
elif algorithm == "LP":
from sage.numerical.mip import MixedIntegerLinearProgram, MIPSolverException
p = MixedIntegerLinearProgram(solver=solver)
b = p.new_variable(binary=True)
for v in self:
edges = self.edges_incident(v, labels=False)
if not edges:
return False
p.add_constraint(p.sum(b[frozenset(e)] for e in edges) == 1)
try:
p.solve(log=verbose)
return True
except MIPSolverException:
return False
else:
raise ValueError('algorithm must be set to "Edmonds", "LP_matching" or "LP"')
@doc_index("Leftovers")
def effective_resistance(self, i, j):
from sage.matrix.constructor import matrix
if i not in self:
raise ValueError("vertex ({0}) is not a vertex of the graph".format(repr(i)))
elif j not in self:
raise ValueError("vertex ({0}) is not a vertex of the graph".format(repr(j)))
if i == j :
return 0
self._scream_if_not_simple()
if not self.is_connected():
raise ValueError('the Graph is not a connected graph')
vert = list(self)
i1 = vert.index(i)
i2 = vert.index(j)
n = self.order()
L = self.laplacian_matrix(vertices=vert)
M = L.pseudoinverse()
Id = matrix.identity(n)
sigma = matrix(Id[i1] - Id[i2])
diff = sigma * M * sigma.transpose()
return diff[0, 0]
@doc_index("Leftovers")
def effective_resistance_matrix(self, vertices=None, nonedgesonly=True):
from sage.matrix.constructor import matrix
from sage.rings.rational_field import QQ
n = self.order()
if not n:
raise ValueError('unable to compute effective resistance for an empty Graph object')
if vertices is None:
vertices = self.vertices()
self._scream_if_not_simple()
if not self.is_connected():
raise ValueError('the Graph is not a connected graph')
L = self.laplacian_matrix(vertices=vertices)
M = L.pseudoinverse()
d = matrix(M.diagonal()).transpose()
onesvec = matrix(QQ, n, 1, lambda i, j: 1)
S = d * onesvec.transpose() + onesvec * d.transpose() - 2 * M
onesmat = matrix(QQ, n, n, lambda i, j: 1)
if nonedgesonly:
B = onesmat - self.adjacency_matrix(vertices=vertices) - matrix.identity(n)
S = S.elementwise_product(B)
return S
@doc_index("Leftovers")
def least_effective_resistance(self, nonedgesonly=True):
n = self.order()
if not n:
raise ValueError('unable to compute least resistance on empty Graph')
self._scream_if_not_simple()
if not self.is_connected():
raise ValueError('the Graph is not a connected graph')
if nonedgesonly and self.is_clique():
return []
verts = list(self)
verttoidx = {u: i for i, u in enumerate(verts)}
S = self.effective_resistance_matrix(vertices=verts, nonedgesonly=nonedgesonly)
if nonedgesonly:
edges = self.complement().edges(labels=False)
else:
edges = [(verts[i], verts[j]) for i in range(n) for j in range(i + 1, n)]
rmin = min(S[(verttoidx[e[0]], verttoidx[e[1]])] for e in edges)
return [e for e in edges if S[(verttoidx[e[0]], verttoidx[e[1]])] == rmin]
@doc_index("Leftovers")
def common_neighbors_matrix(self, vertices=None, nonedgesonly=True):
self._scream_if_not_simple()
if vertices is None:
vertices = self.vertices()
A = self.adjacency_matrix(vertices=vertices)
M = A**2
for v in range(self.order()):
M[v, v] = 0
if nonedgesonly:
for w in range(v + 1, self.order()):
if A[v, w]:
M[v, w] = M[w, v] = 0
return M
@doc_index("Leftovers")
def most_common_neighbors(self, nonedgesonly=True):
self._scream_if_not_simple()
if self.num_verts() < 2:
raise ValueError('this method is defined for graphs with at least 2 vertices')
verts = list(self)
M = self.common_neighbors_matrix(vertices=verts, nonedgesonly=nonedgesonly)
output = []
coefficients = M.coefficients()
if coefficients:
maximum = max(coefficients)
for v in range(self.num_verts()):
for w in range(v + 1, self.num_verts()):
if M[v, w] == maximum:
output.append((verts[v], verts[w]))
return output
@doc_index("Leftovers")
def arboricity(self, certificate=False):
from sage.matroids.constructor import Matroid
P = Matroid(self).partition()
if certificate:
return (len(P), [self.subgraph(edges=forest) for forest in P])
else:
return len(P)
@doc_index("Graph properties")
def is_antipodal(self):
G = self.antipodal_graph()
vertexSet = set(G)
while vertexSet:
v = vertexSet.pop()
clique = set(G.neighbor_iterator(v, closed=True))
for u in clique:
if set(G.neighbor_iterator(u, closed=True)) != clique:
return False
vertexSet.difference_update(clique)
return True
@doc_index("Leftovers")
def folded_graph(self, check=False):
G = self.antipodal_graph()
vertices = set(G)
newVertices = []
while vertices:
v = vertices.pop()
clique = frozenset(G.neighbor_iterator(v, closed=True))
if check:
for u in clique:
if frozenset(G.neighbor_iterator(u, closed=True)) != clique:
return False
newVertices.append(clique)
vertices.difference_update(clique)
numCliques = len(newVertices)
edges = []
for i, j in itertools.combinations(range(numCliques), 2):
if any(self.has_edge(u, v) for u, v in
itertools.product(newVertices[i], newVertices[j])):
edges.append((i, j))
H = Graph([range(numCliques), edges], format='vertices_and_edges')
name = self.name() if self.name() != "" else "Graph"
H.name(f"Folded {name}")
return H
@doc_index("Leftovers")
def antipodal_graph(self):
H = self.distance_graph(self.diameter())
name = self.name() if self.name() != "" else "Graph"
H.name(f"Antipodal graph of {name}")
return H
@doc_index("Basic methods")
def bipartite_double(self, extended=False):
G = self.tensor_product(Graph([(0, 1)]))
if extended:
G.add_edges(((v, 0), (v, 1)) for v in self)
prefix = "Extended " if extended else ""
G.name("%sBipartite Double of %s"%(prefix, self.name()))
return G
from sage.graphs.weakly_chordal import is_long_hole_free, is_long_antihole_free, is_weakly_chordal
from sage.graphs.asteroidal_triples import is_asteroidal_triple_free
from sage.graphs.chrompoly import chromatic_polynomial
from sage.graphs.graph_decompositions.rankwidth import rank_decomposition
from sage.graphs.graph_decompositions.tree_decomposition import treewidth
from sage.graphs.graph_decompositions.vertex_separation import pathwidth
from sage.graphs.graph_decompositions.tree_decomposition import treelength
from sage.graphs.graph_decompositions.clique_separators import atoms_and_clique_separators
from sage.graphs.matchpoly import matching_polynomial
from sage.graphs.cliquer import all_max_clique as cliques_maximum
from sage.graphs.cliquer import all_cliques
from sage.graphs.spanning_tree import random_spanning_tree
from sage.graphs.spanning_tree import spanning_trees
from sage.graphs.graph_decompositions.graph_products import is_cartesian_product
from sage.graphs.distances_all_pairs import is_distance_regular
from sage.graphs.base.static_dense_graph import is_strongly_regular
from sage.graphs.line_graph import is_line_graph
from sage.graphs.tutte_polynomial import tutte_polynomial
from sage.graphs.lovasz_theta import lovasz_theta
from sage.graphs.partial_cube import is_partial_cube
from sage.graphs.orientations import strong_orientations_iterator, random_orientation
from sage.graphs.connectivity import bridges, cleave, spqr_tree
from sage.graphs.connectivity import is_triconnected
from sage.graphs.comparability import is_comparability
from sage.graphs.comparability import is_permutation
from sage.graphs.convexity_properties import geodetic_closure
from sage.graphs.domination import is_dominating
from sage.graphs.domination import is_redundant
from sage.graphs.domination import private_neighbors
from sage.graphs.domination import minimal_dominating_sets
from sage.graphs.traversals import (lex_M, maximum_cardinality_search,
maximum_cardinality_search_M)
from sage.graphs.isoperimetric_inequalities import cheeger_constant, edge_isoperimetric_number, vertex_isoperimetric_number
from sage.graphs.graph_coloring import fractional_chromatic_number
from sage.graphs.graph_coloring import fractional_chromatic_index
_additional_categories = {
"is_long_hole_free" : "Graph properties",
"is_long_antihole_free" : "Graph properties",
"is_weakly_chordal" : "Graph properties",
"is_asteroidal_triple_free" : "Graph properties",
"chromatic_polynomial" : "Coloring",
"rank_decomposition" : "Algorithmically hard stuff",
"treewidth" : "Algorithmically hard stuff",
"pathwidth" : "Algorithmically hard stuff",
"treelength" : "Algorithmically hard stuff",
"matching_polynomial" : "Algorithmically hard stuff",
"all_max_clique" : "Clique-related methods",
"cliques_maximum" : "Clique-related methods",
"all_cliques" : "Clique-related methods",
"atoms_and_clique_separators" : "Clique-related methods",
"random_spanning_tree" : "Connectivity, orientations, trees",
"spanning_trees" : "Connectivity, orientations, trees",
"is_cartesian_product" : "Graph properties",
"is_distance_regular" : "Graph properties",
"is_strongly_regular" : "Graph properties",
"is_line_graph" : "Graph properties",
"is_partial_cube" : "Graph properties",
"is_comparability" : "Graph properties",
"is_permutation" : "Graph properties",
"tutte_polynomial" : "Algorithmically hard stuff",
"lovasz_theta" : "Leftovers",
"strong_orientations_iterator" : "Connectivity, orientations, trees",
"random_orientation" : "Connectivity, orientations, trees",
"bridges" : "Connectivity, orientations, trees",
"cleave" : "Connectivity, orientations, trees",
"spqr_tree" : "Connectivity, orientations, trees",
"is_triconnected" : "Connectivity, orientations, trees",
"is_dominating" : "Domination",
"is_redundant" : "Domination",
"private_neighbors" : "Domination",
"minimal_dominating_sets" : "Domination",
"lex_M" : "Traversals",
"maximum_cardinality_search" : "Traversals",
"maximum_cardinality_search_M" : "Traversals",
"cheeger_constant" : "Expansion properties",
"edge_isoperimetric_number" : "Expansion properties",
"vertex_isoperimetric_number" : "Expansion properties",
"fractional_chromatic_number" : "Coloring",
"fractional_chromatic_index" : "Coloring",
"geodetic_closure" : "Leftovers"
}
__doc__ = __doc__.replace("{INDEX_OF_METHODS}",gen_thematic_rest_table_index(Graph,_additional_categories))
| true
| true
|
790804c1eadbae957866f5c47caf26a4baebcb69
| 2,111
|
py
|
Python
|
integration_tests/test_update_ranges.py
|
FlexiGroBots-H2020/datacube-ows
|
8e3e1343582c00ae46b498247ac98d8e98bd000f
|
[
"Apache-2.0"
] | 4
|
2017-11-02T04:22:30.000Z
|
2018-05-01T14:16:23.000Z
|
integration_tests/test_update_ranges.py
|
FlexiGroBots-H2020/datacube-ows
|
8e3e1343582c00ae46b498247ac98d8e98bd000f
|
[
"Apache-2.0"
] | 33
|
2018-05-23T01:32:06.000Z
|
2018-11-05T01:07:09.000Z
|
integration_tests/test_update_ranges.py
|
FlexiGroBots-H2020/datacube-ows
|
8e3e1343582c00ae46b498247ac98d8e98bd000f
|
[
"Apache-2.0"
] | 7
|
2017-10-09T00:09:44.000Z
|
2018-07-27T00:41:19.000Z
|
# This file is part of datacube-ows, part of the Open Data Cube project.
# See https://opendatacube.org for more information.
#
# Copyright (c) 2017-2021 OWS Contributors
# SPDX-License-Identifier: Apache-2.0
"""Test update ranges on DB using Click testing
https://click.palletsprojects.com/en/7.x/testing/
"""
from datacube_ows.update_ranges_impl import main
def test_updates_ranges_schema(runner, role_name):
result = runner.invoke(main, ["--schema", "--role", role_name])
assert "Cannot find SQL resource" not in result.output
assert result.exit_code == 0
def test_update_ranges_views(runner):
result = runner.invoke(main, ["--views"])
assert "Cannot find SQL resource" not in result.output
assert result.exit_code == 0
def test_update_version(runner):
result = runner.invoke(main, ["--version"])
assert "Open Data Cube Open Web Services (datacube-ows) version" in result.output
assert result.exit_code == 0
def test_update_ranges_product(runner, product_name):
result = runner.invoke(main, [product_name])
assert "ERROR" not in result.output
assert result.exit_code == 0
def test_update_ranges_bad_product(runner, product_name):
result = runner.invoke(main, ["not_a_real_product_name"])
assert "not_a_real_product_name" in result.output
assert "Unrecognised product name" in result.output
assert result.exit_code == 1
def test_update_ranges(runner):
result = runner.invoke(main)
assert "ERROR" not in result.output
assert result.exit_code == 0
def test_update_ranges_misuse_cases(runner, role_name, product_name):
result = runner.invoke(main, ["--schema"])
assert "Sorry" in result.output
assert result.exit_code == 1
result = runner.invoke(main, ["--role", role_name])
assert "Sorry" in result.output
assert result.exit_code == 1
result = runner.invoke(main, ["--views", product_name])
assert "Sorry" in result.output
assert result.exit_code == 1
result = runner.invoke(main, ["--schema", product_name])
assert "Sorry" in result.output
assert result.exit_code == 1
| 32.476923
| 85
| 0.721933
|
from datacube_ows.update_ranges_impl import main
def test_updates_ranges_schema(runner, role_name):
result = runner.invoke(main, ["--schema", "--role", role_name])
assert "Cannot find SQL resource" not in result.output
assert result.exit_code == 0
def test_update_ranges_views(runner):
result = runner.invoke(main, ["--views"])
assert "Cannot find SQL resource" not in result.output
assert result.exit_code == 0
def test_update_version(runner):
result = runner.invoke(main, ["--version"])
assert "Open Data Cube Open Web Services (datacube-ows) version" in result.output
assert result.exit_code == 0
def test_update_ranges_product(runner, product_name):
result = runner.invoke(main, [product_name])
assert "ERROR" not in result.output
assert result.exit_code == 0
def test_update_ranges_bad_product(runner, product_name):
result = runner.invoke(main, ["not_a_real_product_name"])
assert "not_a_real_product_name" in result.output
assert "Unrecognised product name" in result.output
assert result.exit_code == 1
def test_update_ranges(runner):
result = runner.invoke(main)
assert "ERROR" not in result.output
assert result.exit_code == 0
def test_update_ranges_misuse_cases(runner, role_name, product_name):
result = runner.invoke(main, ["--schema"])
assert "Sorry" in result.output
assert result.exit_code == 1
result = runner.invoke(main, ["--role", role_name])
assert "Sorry" in result.output
assert result.exit_code == 1
result = runner.invoke(main, ["--views", product_name])
assert "Sorry" in result.output
assert result.exit_code == 1
result = runner.invoke(main, ["--schema", product_name])
assert "Sorry" in result.output
assert result.exit_code == 1
| true
| true
|
790804e9fda8749313f013440a5d152a18eb296b
| 397
|
py
|
Python
|
tests/test_calvestbr.py
|
IsaacHiguchi/calvestbr
|
ebf702e9e67299c822a6cc21cad60b247446fcfa
|
[
"MIT"
] | null | null | null |
tests/test_calvestbr.py
|
IsaacHiguchi/calvestbr
|
ebf702e9e67299c822a6cc21cad60b247446fcfa
|
[
"MIT"
] | null | null | null |
tests/test_calvestbr.py
|
IsaacHiguchi/calvestbr
|
ebf702e9e67299c822a6cc21cad60b247446fcfa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Tests for `calvestbr` package."""
import unittest
from calvestbr import calvestbr
class TestCalvestbr(unittest.TestCase):
"""Tests for `calvestbr` package."""
def setUp(self):
"""Set up test fixtures, if any."""
def tearDown(self):
"""Tear down test fixtures, if any."""
def test_000_something(self):
"""Test something."""
| 18.045455
| 46
| 0.632242
|
import unittest
from calvestbr import calvestbr
class TestCalvestbr(unittest.TestCase):
def setUp(self):
def tearDown(self):
def test_000_something(self):
| true
| true
|
790804f388e72af2f2b67453edb3a003c8e8aa74
| 576
|
py
|
Python
|
test/rules/test_fires_child.py
|
rileyhazard/SmartVA-Analyze-1
|
0573eeff27d03f54e7506db4f1631c0cd9f54bbb
|
[
"MIT"
] | 4
|
2019-01-23T12:57:47.000Z
|
2020-04-18T17:13:08.000Z
|
test/rules/test_fires_child.py
|
rileyhazard/SmartVA-Analyze-1
|
0573eeff27d03f54e7506db4f1631c0cd9f54bbb
|
[
"MIT"
] | 4
|
2019-01-09T22:10:07.000Z
|
2022-02-16T04:57:06.000Z
|
test/rules/test_fires_child.py
|
rileyhazard/SmartVA-Analyze-1
|
0573eeff27d03f54e7506db4f1631c0cd9f54bbb
|
[
"MIT"
] | 11
|
2018-12-11T22:01:13.000Z
|
2022-01-07T11:38:02.000Z
|
from smartva.rules import fires_child as fires
from smartva.data.constants import *
VA = Child
def test_pass():
row = {
VA.BURN: YES,
VA.INJURY_DAYS: 0,
}
assert fires.logic_rule(row) is True
def test_fail_fires():
row = {
VA.BURN: NO,
VA.INJURY_DAYS: 0,
}
assert fires.logic_rule(row) is False
def test_fail_days():
row = {
VA.BURN: YES,
VA.INJURY_DAYS: 31,
}
assert fires.logic_rule(row) is False
def test_fail_no_data():
row = {}
assert fires.logic_rule(row) is False
| 15.157895
| 46
| 0.604167
|
from smartva.rules import fires_child as fires
from smartva.data.constants import *
VA = Child
def test_pass():
row = {
VA.BURN: YES,
VA.INJURY_DAYS: 0,
}
assert fires.logic_rule(row) is True
def test_fail_fires():
row = {
VA.BURN: NO,
VA.INJURY_DAYS: 0,
}
assert fires.logic_rule(row) is False
def test_fail_days():
row = {
VA.BURN: YES,
VA.INJURY_DAYS: 31,
}
assert fires.logic_rule(row) is False
def test_fail_no_data():
row = {}
assert fires.logic_rule(row) is False
| true
| true
|
79080530baf43a4ccb2acf223fe275c811cda025
| 8,136
|
py
|
Python
|
myvenv/lib/python3.5/site-packages/psycopg2/pool.py
|
tuvapp/tuvappcom
|
5ca2be19f4b0c86a1d4a9553711a4da9d3f32841
|
[
"MIT"
] | 1
|
2019-01-10T16:43:38.000Z
|
2019-01-10T16:43:38.000Z
|
myvenv/lib/python3.5/site-packages/psycopg2/pool.py
|
tuvapp/tuvappcom
|
5ca2be19f4b0c86a1d4a9553711a4da9d3f32841
|
[
"MIT"
] | 7
|
2020-06-05T18:33:09.000Z
|
2021-09-20T23:07:52.000Z
|
myvenv/lib/python3.5/site-packages/psycopg2/pool.py
|
tuvapp/tuvappcom
|
5ca2be19f4b0c86a1d4a9553711a4da9d3f32841
|
[
"MIT"
] | null | null | null |
"""Connection pooling for psycopg2
This module implements thread-safe (and not) connection pools.
"""
# psycopg/pool.py - pooling code for psycopg
#
# Copyright (C) 2003-2010 Federico Di Gregorio <fog@debian.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import psycopg2
import psycopg2.extensions as _ext
class PoolError(psycopg2.Error):
pass
class AbstractConnectionPool(object):
"""Generic key-based pooling code."""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the connection pool.
New 'minconn' connections are created immediately calling 'connfunc'
with given parameters. The connection pool will support a maximum of
about 'maxconn' connections.
"""
self.minconn = int(minconn)
self.maxconn = int(maxconn)
self.closed = False
self._args = args
self._kwargs = kwargs
self._pool = []
self._used = {}
self._rused = {} # id(conn) -> key map
self._keys = 0
for i in range(self.minconn):
self._connect()
def _connect(self, key=None):
"""Create a new connection and assign it to 'key' if not None."""
conn = psycopg2.connect(*self._args, **self._kwargs)
if key is not None:
self._used[key] = conn
self._rused[id(conn)] = key
else:
self._pool.append(conn)
return conn
def _getkey(self):
"""Return a new unique key."""
self._keys += 1
return self._keys
def _getconn(self, key=None):
"""Get a free connection and assign it to 'key' if not None."""
if self.closed: raise PoolError("connection pool is closed")
if key is None: key = self._getkey()
if key in self._used:
return self._used[key]
if self._pool:
self._used[key] = conn = self._pool.pop()
self._rused[id(conn)] = key
return conn
else:
if len(self._used) == self.maxconn:
raise PoolError("connection pool exhausted")
return self._connect(key)
def _putconn(self, conn, key=None, close=False):
"""Put away a connection."""
if self.closed: raise PoolError("connection pool is closed")
if key is None: key = self._rused.get(id(conn))
if not key:
raise PoolError("trying to put unkeyed connection")
if len(self._pool) < self.minconn and not close:
# Return the connection into a consistent state before putting
# it back into the pool
if not conn.closed:
status = conn.get_transaction_status()
if status == _ext.TRANSACTION_STATUS_UNKNOWN:
# server connection lost
conn.close()
elif status != _ext.TRANSACTION_STATUS_IDLE:
# connection in error or in transaction
conn.rollback()
self._pool.append(conn)
else:
# regular idle connection
self._pool.append(conn)
# If the connection is closed, we just discard it.
else:
conn.close()
# here we check for the presence of key because it can happen that a
# thread tries to put back a connection after a call to close
if not self.closed or key in self._used:
del self._used[key]
del self._rused[id(conn)]
def _closeall(self):
"""Close all connections.
Note that this can lead to some code fail badly when trying to use
an already closed connection. If you call .closeall() make sure
your code can deal with it.
"""
if self.closed: raise PoolError("connection pool is closed")
for conn in self._pool + list(self._used.values()):
try:
conn.close()
except:
pass
self.closed = True
class SimpleConnectionPool(AbstractConnectionPool):
"""A connection pool that can't be shared across different threads."""
getconn = AbstractConnectionPool._getconn
putconn = AbstractConnectionPool._putconn
closeall = AbstractConnectionPool._closeall
class ThreadedConnectionPool(AbstractConnectionPool):
"""A connection pool that works with the threading module."""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the threading lock."""
import threading
AbstractConnectionPool.__init__(
self, minconn, maxconn, *args, **kwargs)
self._lock = threading.Lock()
def getconn(self, key=None):
"""Get a free connection and assign it to 'key' if not None."""
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release()
def putconn(self, conn=None, key=None, close=False):
"""Put away an unused connection."""
self._lock.acquire()
try:
self._putconn(conn, key, close)
finally:
self._lock.release()
def closeall(self):
"""Close all connections (even the one currently in use.)"""
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release()
class PersistentConnectionPool(AbstractConnectionPool):
"""A pool that assigns persistent connections to different threads.
Note that this connection pool generates by itself the required keys
using the current thread id. This means that until a thread puts away
a connection it will always get the same connection object by successive
`!getconn()` calls. This also means that a thread can't use more than one
single connection from the pool.
"""
def __init__(self, minconn, maxconn, *args, **kwargs):
"""Initialize the threading lock."""
import warnings
warnings.warn("deprecated: use ZPsycopgDA.pool implementation",
DeprecationWarning)
import threading
AbstractConnectionPool.__init__(
self, minconn, maxconn, *args, **kwargs)
self._lock = threading.Lock()
# we we'll need the thread module, to determine thread ids, so we
# import it here and copy it in an instance variable
import _thread as _thread # work around for 2to3 bug - see ticket #348
self.__thread = _thread
def getconn(self):
"""Generate thread id and return a connection."""
key = self.__thread.get_ident()
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release()
def putconn(self, conn=None, close=False):
"""Put away an unused connection."""
key = self.__thread.get_ident()
self._lock.acquire()
try:
if not conn: conn = self._used[key]
self._putconn(conn, key, close)
finally:
self._lock.release()
def closeall(self):
"""Close all connections (even the one currently in use.)"""
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release()
| 34.474576
| 78
| 0.615044
|
import psycopg2
import psycopg2.extensions as _ext
class PoolError(psycopg2.Error):
pass
class AbstractConnectionPool(object):
def __init__(self, minconn, maxconn, *args, **kwargs):
self.minconn = int(minconn)
self.maxconn = int(maxconn)
self.closed = False
self._args = args
self._kwargs = kwargs
self._pool = []
self._used = {}
self._rused = {}
self._keys = 0
for i in range(self.minconn):
self._connect()
def _connect(self, key=None):
conn = psycopg2.connect(*self._args, **self._kwargs)
if key is not None:
self._used[key] = conn
self._rused[id(conn)] = key
else:
self._pool.append(conn)
return conn
def _getkey(self):
self._keys += 1
return self._keys
def _getconn(self, key=None):
if self.closed: raise PoolError("connection pool is closed")
if key is None: key = self._getkey()
if key in self._used:
return self._used[key]
if self._pool:
self._used[key] = conn = self._pool.pop()
self._rused[id(conn)] = key
return conn
else:
if len(self._used) == self.maxconn:
raise PoolError("connection pool exhausted")
return self._connect(key)
def _putconn(self, conn, key=None, close=False):
if self.closed: raise PoolError("connection pool is closed")
if key is None: key = self._rused.get(id(conn))
if not key:
raise PoolError("trying to put unkeyed connection")
if len(self._pool) < self.minconn and not close:
if not conn.closed:
status = conn.get_transaction_status()
if status == _ext.TRANSACTION_STATUS_UNKNOWN:
conn.close()
elif status != _ext.TRANSACTION_STATUS_IDLE:
conn.rollback()
self._pool.append(conn)
else:
self._pool.append(conn)
else:
conn.close()
if not self.closed or key in self._used:
del self._used[key]
del self._rused[id(conn)]
def _closeall(self):
if self.closed: raise PoolError("connection pool is closed")
for conn in self._pool + list(self._used.values()):
try:
conn.close()
except:
pass
self.closed = True
class SimpleConnectionPool(AbstractConnectionPool):
getconn = AbstractConnectionPool._getconn
putconn = AbstractConnectionPool._putconn
closeall = AbstractConnectionPool._closeall
class ThreadedConnectionPool(AbstractConnectionPool):
def __init__(self, minconn, maxconn, *args, **kwargs):
import threading
AbstractConnectionPool.__init__(
self, minconn, maxconn, *args, **kwargs)
self._lock = threading.Lock()
def getconn(self, key=None):
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release()
def putconn(self, conn=None, key=None, close=False):
self._lock.acquire()
try:
self._putconn(conn, key, close)
finally:
self._lock.release()
def closeall(self):
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release()
class PersistentConnectionPool(AbstractConnectionPool):
def __init__(self, minconn, maxconn, *args, **kwargs):
import warnings
warnings.warn("deprecated: use ZPsycopgDA.pool implementation",
DeprecationWarning)
import threading
AbstractConnectionPool.__init__(
self, minconn, maxconn, *args, **kwargs)
self._lock = threading.Lock()
# import it here and copy it in an instance variable
import _thread as _thread # work around for 2to3 bug - see ticket #348
self.__thread = _thread
def getconn(self):
key = self.__thread.get_ident()
self._lock.acquire()
try:
return self._getconn(key)
finally:
self._lock.release()
def putconn(self, conn=None, close=False):
key = self.__thread.get_ident()
self._lock.acquire()
try:
if not conn: conn = self._used[key]
self._putconn(conn, key, close)
finally:
self._lock.release()
def closeall(self):
self._lock.acquire()
try:
self._closeall()
finally:
self._lock.release()
| true
| true
|
79080633244efcc19454c598305cafbf94d51929
| 35,911
|
py
|
Python
|
vertica_python/vertica/connection.py
|
uber/vertica-python
|
bd28d2dc473a017daa92933f7864bab7346f8b14
|
[
"Apache-2.0"
] | 183
|
2015-01-20T14:57:22.000Z
|
2018-08-09T21:13:19.000Z
|
vertica_python/vertica/connection.py
|
uber/vertica-python
|
bd28d2dc473a017daa92933f7864bab7346f8b14
|
[
"Apache-2.0"
] | 139
|
2015-01-09T18:37:53.000Z
|
2018-08-13T07:09:26.000Z
|
vertica_python/vertica/connection.py
|
uber/vertica-python
|
bd28d2dc473a017daa92933f7864bab7346f8b14
|
[
"Apache-2.0"
] | 110
|
2015-03-02T15:46:11.000Z
|
2018-07-27T15:50:29.000Z
|
# Copyright (c) 2018-2022 Micro Focus or one of its affiliates.
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) 2013-2017 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import print_function, division, absolute_import
import base64
import logging
import socket
import ssl
import getpass
import uuid
from struct import unpack
from collections import deque, namedtuple
import random
# noinspection PyCompatibility,PyUnresolvedReferences
from six import raise_from, string_types, integer_types, PY2
if PY2:
from urlparse import urlparse, parse_qs
else:
from urllib.parse import urlparse, parse_qs
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, Dict, Literal, Optional, Type, Union
from typing_extensions import Self
import vertica_python
from .. import errors
from ..vertica import messages
from ..vertica.cursor import Cursor
from ..vertica.messages.message import BackendMessage, FrontendMessage
from ..vertica.messages.frontend_messages import CancelRequest
from ..vertica.log import VerticaLogging
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 5433
DEFAULT_PASSWORD = ''
DEFAULT_AUTOCOMMIT = False
DEFAULT_BACKUP_SERVER_NODE = []
DEFAULT_KRB_SERVICE_NAME = 'vertica'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_PATH = 'vertica_python.log'
try:
DEFAULT_USER = getpass.getuser()
except Exception as e:
DEFAULT_USER = None
print("WARN: Cannot get the login user name: {}".format(str(e)))
def connect(**kwargs):
# type: (Any) -> Connection
"""Opens a new connection to a Vertica database."""
return Connection(kwargs)
def parse_dsn(dsn):
"""Parse connection string into a dictionary of keywords and values.
Connection string format:
vertica://<user>:<password>@<host>:<port>/<database>?k1=v1&k2=v2&...
"""
url = urlparse(dsn)
if url.scheme != 'vertica':
raise ValueError("Only vertica:// scheme is supported.")
# Ignore blank/invalid values
result = {k: v for k, v in (
('host', url.hostname),
('port', url.port),
('user', url.username),
('password', url.password),
('database', url.path[1:])) if v
}
for key, values in parse_qs(url.query, keep_blank_values=True).items():
# Try to get the last non-blank value in the list of values for each key
for i in reversed(range(len(values))):
value = values[i]
if value != '':
break
if value == '' and key != 'log_path':
# blank values are to be ignored
continue
elif key == 'backup_server_node':
continue
elif key in ('connection_load_balance', 'use_prepared_statements',
'disable_copy_local', 'ssl', 'autocommit'):
lower = value.lower()
if lower in ('true', 'on', '1'):
result[key] = True
elif lower in ('false', 'off', '0'):
result[key] = False
elif key == 'connection_timeout':
result[key] = float(value)
elif key == 'log_level' and value.isdigit():
result[key] = int(value)
else:
result[key] = value
return result
_AddressEntry = namedtuple('_AddressEntry', ['host', 'resolved', 'data'])
class _AddressList(object):
def __init__(self, host, port, backup_nodes, logger):
"""Creates a new deque with the primary host first, followed by any backup hosts"""
self._logger = logger
# Items in address_deque are _AddressEntry values.
# host is the original hostname/ip, used by SSL option check_hostname
# - when resolved is False, data is port
# - when resolved is True, data is the 5-tuple from socket.getaddrinfo
# This allows for lazy resolution. Seek peek() for more.
self.address_deque = deque()
# load primary host into address_deque
self._append(host, port)
# load backup nodes into address_deque
if not isinstance(backup_nodes, list):
err_msg = 'Connection option "backup_server_node" must be a list'
self._logger.error(err_msg)
raise TypeError(err_msg)
# Each item in backup_nodes should be either
# a host name or IP address string (using default port) or
# a (host, port) tuple
for node in backup_nodes:
if isinstance(node, string_types):
self._append(node, DEFAULT_PORT)
elif isinstance(node, tuple) and len(node) == 2:
self._append(node[0], node[1])
else:
err_msg = ('Each item of connection option "backup_server_node"'
' must be a host string or a (host, port) tuple')
self._logger.error(err_msg)
raise TypeError(err_msg)
self._logger.debug('Address list: {0}'.format(list(self.address_deque)))
def _append(self, host, port):
if not isinstance(host, string_types):
err_msg = 'Host must be a string: invalid value: {0}'.format(host)
self._logger.error(err_msg)
raise TypeError(err_msg)
if not isinstance(port, (string_types, integer_types)):
err_msg = 'Port must be an integer or a string: invalid value: {0}'.format(port)
self._logger.error(err_msg)
raise TypeError(err_msg)
elif isinstance(port, string_types):
try:
port = int(port)
except ValueError as e:
err_msg = 'Port "{0}" is not a valid string: {1}'.format(port, e)
self._logger.error(err_msg)
raise ValueError(err_msg)
if port < 0 or port > 65535:
err_msg = 'Invalid port number: {0}'.format(port)
self._logger.error(err_msg)
raise ValueError(err_msg)
self.address_deque.append(_AddressEntry(host=host, resolved=False, data=port))
def push(self, host, port):
self.address_deque.appendleft(_AddressEntry(host=host, resolved=False, data=port))
def pop(self):
self.address_deque.popleft()
def peek(self):
# do lazy DNS resolution, returning the leftmost socket.getaddrinfo result
if len(self.address_deque) == 0:
return None
while len(self.address_deque) > 0:
self._logger.debug('Peek at address list: {0}'.format(list(self.address_deque)))
entry = self.address_deque[0]
if entry.resolved:
# return a resolved sockaddrinfo
return entry.data
else:
# DNS resolve a single host name to multiple IP addresses
self.pop()
# keep host and port info for adding address entry to deque once it has been resolved
host, port = entry.host, entry.data
try:
resolved_hosts = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
except Exception as e:
self._logger.warning('Error resolving host "{0}" on port {1}: {2}'.format(host, port, e))
continue
# add resolved addrinfo (AF_INET and AF_INET6 only) to deque
random.shuffle(resolved_hosts)
for addrinfo in resolved_hosts:
if addrinfo[0] in (socket.AF_INET, socket.AF_INET6):
self.address_deque.appendleft(_AddressEntry(
host=host, resolved=True, data=addrinfo))
return None
def peek_host(self):
# returning the leftmost host result
self._logger.debug('Peek host at address list: {0}'.format(list(self.address_deque)))
if len(self.address_deque) == 0:
return None
return self.address_deque[0].host
def _generate_session_label():
return '{type}-{version}-{id}'.format(
type='vertica-python',
version=vertica_python.__version__,
id=uuid.uuid1()
)
class Connection(object):
def __init__(self, options=None):
# type: (Optional[Dict[str, Any]]) -> None
self.parameters = {}
self.session_id = None
self.backend_pid = None
self.backend_key = None
self.transaction_status = None
self.socket = None
self.socket_as_file = None
options = options or {}
self.options = parse_dsn(options['dsn']) if 'dsn' in options else {}
self.options.update({key: value for key, value in options.items() \
if key == 'log_path' or (key != 'dsn' and value is not None)})
# Set up connection logger
logger_name = 'vertica_{0}_{1}'.format(id(self), str(uuid.uuid4())) # must be a unique value
self._logger = logging.getLogger(logger_name)
if 'log_level' not in self.options and 'log_path' not in self.options:
# logger is disabled by default
self._logger.disabled = True
else:
self.options.setdefault('log_level', DEFAULT_LOG_LEVEL)
self.options.setdefault('log_path', DEFAULT_LOG_PATH)
VerticaLogging.setup_logging(logger_name, self.options['log_path'],
self.options['log_level'], id(self))
self.options.setdefault('host', DEFAULT_HOST)
self.options.setdefault('port', DEFAULT_PORT)
if 'user' not in self.options:
if DEFAULT_USER:
self.options['user'] = DEFAULT_USER
else:
msg = 'Connection option "user" is required'
self._logger.error(msg)
raise KeyError(msg)
self.options.setdefault('database', self.options['user'])
self.options.setdefault('password', DEFAULT_PASSWORD)
self.options.setdefault('autocommit', DEFAULT_AUTOCOMMIT)
self.options.setdefault('session_label', _generate_session_label())
self.options.setdefault('backup_server_node', DEFAULT_BACKUP_SERVER_NODE)
self.options.setdefault('kerberos_service_name', DEFAULT_KRB_SERVICE_NAME)
# Kerberos authentication hostname defaults to the host value here so
# the correct value cannot be overwritten by load balancing or failover
self.options.setdefault('kerberos_host_name', self.options['host'])
self.address_list = _AddressList(self.options['host'], self.options['port'],
self.options['backup_server_node'], self._logger)
# we only support one cursor per connection
self.options.setdefault('unicode_error', None)
self._cursor = Cursor(self, self._logger, cursor_type=None,
unicode_error=self.options['unicode_error'])
# knob for using server-side prepared statements
self.options.setdefault('use_prepared_statements', False)
self._logger.debug('Connection prepared statements is {}'.format(
'enabled' if self.options['use_prepared_statements'] else 'disabled'))
# knob for disabling COPY LOCAL operations
self.options.setdefault('disable_copy_local', False)
self._logger.debug('COPY LOCAL operation is {}'.format(
'disabled' if self.options['disable_copy_local'] else 'enabled'))
self._logger.info('Connecting as user "{}" to database "{}" on host "{}" with port {}'.format(
self.options['user'], self.options['database'],
self.options['host'], self.options['port']))
self.startup_connection()
# Initially, for a new session, autocommit is off
if self.options['autocommit']:
self.autocommit = True
self._logger.info('Connection is ready')
#############################################
# supporting `with` statements
#############################################
def __enter__(self):
# type: () -> Self
return self
def __exit__(self, type_, value, traceback):
self.close()
#############################################
# dbapi methods
#############################################
def close(self):
self._logger.info('Close the connection')
try:
self.write(messages.Terminate())
finally:
self.close_socket()
def commit(self):
if self.closed():
raise errors.ConnectionError('Connection is closed')
cur = self.cursor()
cur.execute('COMMIT;')
def rollback(self):
if self.closed():
raise errors.ConnectionError('Connection is closed')
cur = self.cursor()
cur.execute('ROLLBACK;')
def cursor(self, cursor_type=None):
# type: (Self, Optional[Union[Literal['list', 'dict'], Type[list[Any]], Type[dict[Any, Any]]]]) -> Cursor
if self.closed():
raise errors.ConnectionError('Connection is closed')
if self._cursor.closed():
self._cursor._closed = False
# let user change type if they want?
self._cursor.cursor_type = cursor_type
return self._cursor
#############################################
# non-dbapi methods
#############################################
@property
def autocommit(self):
"""Read the connection's AUTOCOMMIT setting from cache"""
return self.parameters.get('auto_commit', 'off') == 'on'
@autocommit.setter
def autocommit(self, value):
"""Change the connection's AUTOCOMMIT setting"""
if self.autocommit is value:
return
val = 'on' if value else 'off'
cur = self.cursor()
cur.execute('SET SESSION AUTOCOMMIT TO {}'.format(val), use_prepared_statements=False)
cur.fetchall() # check for errors and update the cache
def cancel(self):
"""Cancel the current database operation. This can be called from a
different thread than the one currently executing a database operation.
"""
if self.closed():
raise errors.ConnectionError('Connection is closed')
self._logger.info('Canceling the current database operation')
# Must create a new socket connection to the server
temp_socket = self.establish_socket_connection(self.address_list)
self.write(CancelRequest(self.backend_pid, self.backend_key), temp_socket)
temp_socket.close()
self._logger.info('Cancel request issued')
def opened(self):
return (self.socket is not None
and self.backend_pid is not None
and self.transaction_status is not None)
def closed(self):
return not self.opened()
def __str__(self):
safe_options = {key: value for key, value in self.options.items() if key != 'password'}
s1 = "<Vertica.Connection:{0} parameters={1} backend_pid={2}, ".format(
id(self), self.parameters, self.backend_pid)
s2 = "backend_key={0}, transaction_status={1}, socket={2}, options={3}>".format(
self.backend_key, self.transaction_status, self.socket, safe_options)
return ''.join([s1, s2])
#############################################
# internal
#############################################
def reset_values(self):
self.parameters = {}
self.session_id = None
self.backend_pid = None
self.backend_key = None
self.transaction_status = None
self.socket = None
self.socket_as_file = None
self.address_list = _AddressList(self.options['host'], self.options['port'],
self.options['backup_server_node'], self._logger)
def _socket(self):
if self.socket:
return self.socket
# the initial establishment of the client connection
raw_socket = self.establish_socket_connection(self.address_list)
# enable load balancing
load_balance_options = self.options.get('connection_load_balance')
self._logger.debug('Connection load balance option is {0}'.format(
'enabled' if load_balance_options else 'disabled'))
if load_balance_options:
raw_socket = self.balance_load(raw_socket)
# enable SSL
ssl_options = self.options.get('ssl')
self._logger.debug('SSL option is {0}'.format('enabled' if ssl_options else 'disabled'))
if ssl_options:
raw_socket = self.enable_ssl(raw_socket, ssl_options)
self.socket = raw_socket
return self.socket
def _socket_as_file(self):
if self.socket_as_file is None:
self.socket_as_file = self._socket().makefile('rb')
return self.socket_as_file
def create_socket(self, family):
"""Create a TCP socket object"""
raw_socket = socket.socket(family, socket.SOCK_STREAM)
raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
connection_timeout = self.options.get('connection_timeout')
if connection_timeout is not None:
self._logger.debug('Set socket connection timeout: {0}'.format(connection_timeout))
raw_socket.settimeout(connection_timeout)
return raw_socket
def balance_load(self, raw_socket):
# Send load balance request and read server response
self._logger.debug('=> %s', messages.LoadBalanceRequest())
raw_socket.sendall(messages.LoadBalanceRequest().get_message())
response = raw_socket.recv(1)
if response == b'Y':
size = unpack('!I', raw_socket.recv(4))[0]
if size < 4:
err_msg = "Bad message size: {0}".format(size)
self._logger.error(err_msg)
raise errors.MessageError(err_msg)
res = BackendMessage.from_type(type_=response, data=raw_socket.recv(size - 4))
self._logger.debug('<= %s', res)
host = res.get_host()
port = res.get_port()
self._logger.info('Load balancing to host "{0}" on port {1}'.format(host, port))
peer = raw_socket.getpeername()
socket_host, socket_port = peer[0], peer[1]
if host == socket_host and port == socket_port:
self._logger.info('Already connecting to host "{0}" on port {1}. Ignore load balancing.'.format(host, port))
return raw_socket
# Push the new host onto the address list before connecting again. Note that this
# will leave the originally-specified host as the first failover possibility.
self.address_list.push(host, port)
raw_socket.close()
raw_socket = self.establish_socket_connection(self.address_list)
else:
self._logger.debug('<= LoadBalanceResponse: %s', response)
self._logger.warning("Load balancing requested but not supported by server")
return raw_socket
def enable_ssl(self, raw_socket, ssl_options):
# Send SSL request and read server response
self._logger.debug('=> %s', messages.SslRequest())
raw_socket.sendall(messages.SslRequest().get_message())
response = raw_socket.recv(1)
self._logger.debug('<= SslResponse: %s', response)
if response == b'S':
self._logger.info('Enabling SSL')
try:
if isinstance(ssl_options, ssl.SSLContext):
server_host = self.address_list.peek_host()
if server_host is None: # This should not happen
msg = 'Cannot get the connected server host while enabling SSL'
self._logger.error(msg)
raise errors.ConnectionError(msg)
raw_socket = ssl_options.wrap_socket(raw_socket, server_hostname=server_host)
else:
raw_socket = ssl.wrap_socket(raw_socket)
except ssl.CertificateError as e:
raise_from(errors.ConnectionError(str(e)), e)
except ssl.SSLError as e:
raise_from(errors.ConnectionError(str(e)), e)
else:
err_msg = "SSL requested but not supported by server"
self._logger.error(err_msg)
raise errors.SSLNotSupported(err_msg)
return raw_socket
def establish_socket_connection(self, address_list):
"""Given a list of database node addresses, establish the socket
connection to the database server. Return a connected socket object.
"""
addrinfo = address_list.peek()
raw_socket = None
last_exception = None
# Failover: loop to try all addresses
while addrinfo:
(family, socktype, proto, canonname, sockaddr) = addrinfo
last_exception = None
# _AddressList filters all addrs to AF_INET and AF_INET6, which both
# have host and port as values 0, 1 in the sockaddr tuple.
host = sockaddr[0]
port = sockaddr[1]
self._logger.info('Establishing connection to host "{0}" on port {1}'.format(host, port))
try:
raw_socket = self.create_socket(family)
raw_socket.connect(sockaddr)
break
except Exception as e:
self._logger.info('Failed to connect to host "{0}" on port {1}: {2}'.format(host, port, e))
last_exception = e
address_list.pop()
addrinfo = address_list.peek()
raw_socket.close()
# all of the addresses failed
if raw_socket is None or last_exception:
err_msg = 'Failed to establish a connection to the primary server or any backup address.'
self._logger.error(err_msg)
raise errors.ConnectionError(err_msg)
return raw_socket
def ssl(self):
return self.socket is not None and isinstance(self.socket, ssl.SSLSocket)
def write(self, message, vsocket=None):
if not isinstance(message, FrontendMessage):
raise TypeError("invalid message: ({0})".format(message))
if vsocket is None:
vsocket = self._socket()
self._logger.debug('=> %s', message)
try:
for data in message.fetch_message():
size = 8192 # Max msg size, consistent with how the server works
pos = 0
while pos < len(data):
sent = vsocket.send(data[pos : pos + size])
if sent == 0:
raise errors.ConnectionError("Couldn't send message: Socket connection broken")
pos += sent
except Exception as e:
self.close_socket()
self._logger.error(str(e))
if isinstance(e, IOError):
raise_from(errors.ConnectionError(str(e)), e)
else:
raise
def close_socket(self):
try:
if self.socket is not None:
self._socket().close()
if self.socket_as_file is not None:
self._socket_as_file().close()
finally:
self.reset_values()
def reset_connection(self):
self.close()
self.startup_connection()
def is_asynchronous_message(self, message):
# Check if it is an asynchronous response message
# Note: ErrorResponse is a subclass of NoticeResponse
return (isinstance(message, messages.ParameterStatus) or
(isinstance(message, messages.NoticeResponse) and
not isinstance(message, messages.ErrorResponse)))
def handle_asynchronous_message(self, message):
if isinstance(message, messages.ParameterStatus):
if message.name == 'protocol_version':
message.value = int(message.value)
self.parameters[message.name] = message.value
elif (isinstance(message, messages.NoticeResponse) and
not isinstance(message, messages.ErrorResponse)):
if getattr(self, 'notice_handler', None) is not None:
self.notice_handler(message)
else:
self._logger.warning(message.error_message())
def read_string(self):
s = bytearray()
while True:
char = self.read_bytes(1)
if char == b'\x00':
break
s.extend(char)
return s
def read_message(self):
while True:
try:
type_ = self.read_bytes(1)
size = unpack('!I', self.read_bytes(4))[0]
if size < 4:
raise errors.MessageError("Bad message size: {0}".format(size))
if type_ == messages.WriteFile.message_id:
# The whole WriteFile message may not be read at here.
# Instead, only the file name and file length is read.
# This is because the message could be too large to read all at once.
f = self.read_string()
filename = f.decode('utf-8')
file_length = unpack('!I', self.read_bytes(4))[0]
size -= 4 + len(f) + 1 + 4
if size != file_length:
raise errors.MessageError("Bad message size: {0}".format(size))
if filename == '':
# If there is no filename, then this is really RETURNREJECTED data, not a rejected file
if file_length % 8 != 0:
raise errors.MessageError("Bad RETURNREJECTED data size: {0}".format(file_length))
data = self.read_bytes(file_length)
message = messages.WriteFile(filename, file_length, data)
else:
# The rest of the message is read later with write_to_disk()
message = messages.WriteFile(filename, file_length)
else:
message = BackendMessage.from_type(type_, self.read_bytes(size - 4))
self._logger.debug('<= %s', message)
self.handle_asynchronous_message(message)
# handle transaction status
if isinstance(message, messages.ReadyForQuery):
self.transaction_status = message.transaction_status
except (SystemError, IOError) as e:
self.close_socket()
# noinspection PyTypeChecker
self._logger.error(e)
raise_from(errors.ConnectionError(str(e)), e)
if not self.is_asynchronous_message(message):
break
return message
def read_expected_message(self, expected_types, error_handler=None):
# Reads a message and does some basic error handling.
# expected_types must be a class (e.g. messages.BindComplete) or a tuple of classes
message = self.read_message()
if isinstance(message, expected_types):
return message
elif isinstance(message, messages.ErrorResponse):
if error_handler is not None:
error_handler(message)
else:
raise errors.DatabaseError(message.error_message())
else:
msg = 'Received unexpected message type: {}. '.format(type(message).__name__)
if isinstance(expected_types, tuple):
msg += 'Expected types: {}'.format(", ".join([t.__name__ for t in expected_types]))
else:
msg += 'Expected type: {}'.format(expected_types.__name__)
self._logger.error(msg)
raise errors.MessageError(msg)
def read_bytes(self, n):
if n == 1:
result = self._socket_as_file().read(1)
if not result:
raise errors.ConnectionError("Connection closed by Vertica")
return result
else:
buf = b""
to_read = n
while to_read > 0:
data = self._socket_as_file().read(to_read)
received = len(data)
if received == 0:
raise errors.ConnectionError("Connection closed by Vertica")
buf += data
to_read -= received
return buf
def send_GSS_response_and_receive_challenge(self, response):
# Send the GSS response data to the vertica server
token = base64.b64decode(response)
self.write(messages.Password(token, messages.Authentication.GSS))
# Receive the challenge from the vertica server
message = self.read_expected_message(messages.Authentication)
if message.code != messages.Authentication.GSS_CONTINUE:
msg = ('Received unexpected message type: Authentication(type={}).'
' Expected type: Authentication(type={})'.format(
message.code, messages.Authentication.GSS_CONTINUE))
self._logger.error(msg)
raise errors.MessageError(msg)
return message.auth_data
def make_GSS_authentication(self):
try:
import kerberos
except ImportError as e:
raise errors.ConnectionError("{}\nCannot make a Kerberos "
"authentication because no Kerberos package is installed. "
"Get it with 'pip install kerberos'.".format(str(e)))
# Set GSS flags
gssflag = (kerberos.GSS_C_DELEG_FLAG | kerberos.GSS_C_MUTUAL_FLAG |
kerberos.GSS_C_SEQUENCE_FLAG | kerberos.GSS_C_REPLAY_FLAG)
# Generate the GSS-style service principal name
service_principal = "{}@{}".format(self.options['kerberos_service_name'],
self.options['kerberos_host_name'])
# Initializes a context object with a service principal
self._logger.info('Initializing a context for GSSAPI client-side '
'authentication with service principal {}'.format(service_principal))
try:
result, context = kerberos.authGSSClientInit(service_principal, gssflags=gssflag)
except kerberos.GSSError as err:
msg = "GSSAPI initialization error: {}".format(str(err))
self._logger.error(msg)
raise errors.KerberosError(msg)
if result != kerberos.AUTH_GSS_COMPLETE:
msg = ('Failed to initialize a context for GSSAPI client-side '
'authentication with service principal {}'.format(service_principal))
self._logger.error(msg)
raise errors.KerberosError(msg)
# Processes GSSAPI client-side steps
try:
challenge = b''
while True:
self._logger.info('Processing a single GSSAPI client-side step')
challenge = base64.b64encode(challenge).decode("utf-8")
result = kerberos.authGSSClientStep(context, challenge)
if result == kerberos.AUTH_GSS_COMPLETE:
self._logger.info('Result: GSSAPI step complete')
break
elif result == kerberos.AUTH_GSS_CONTINUE:
self._logger.info('Result: GSSAPI step continuation')
# Get the response from the last successful GSSAPI client-side step
response = kerberos.authGSSClientResponse(context)
challenge = self.send_GSS_response_and_receive_challenge(response)
else:
msg = "GSSAPI client-side step error status {}".format(result)
self._logger.error(msg)
raise errors.KerberosError(msg)
except kerberos.GSSError as err:
msg = "GSSAPI client-side step error: {}".format(str(err))
self._logger.error(msg)
raise errors.KerberosError(msg)
def startup_connection(self):
user = self.options['user']
database = self.options['database']
session_label = self.options['session_label']
os_user_name = DEFAULT_USER if DEFAULT_USER else ''
password = self.options['password']
self.write(messages.Startup(user, database, session_label, os_user_name))
while True:
message = self.read_message()
if isinstance(message, messages.Authentication):
if message.code == messages.Authentication.OK:
self._logger.info("User {} successfully authenticated"
.format(self.options['user']))
elif message.code == messages.Authentication.CHANGE_PASSWORD:
msg = "The password for user {} has expired".format(self.options['user'])
self._logger.error(msg)
raise errors.ConnectionError(msg)
elif message.code == messages.Authentication.PASSWORD_GRACE:
self._logger.warning('The password for user {} will expire soon.'
' Please consider changing it.'.format(self.options['user']))
elif message.code == messages.Authentication.GSS:
self.make_GSS_authentication()
else:
self.write(messages.Password(password, message.code,
{'user': user,
'salt': getattr(message, 'salt', None),
'usersalt': getattr(message, 'usersalt', None)}))
elif isinstance(message, messages.BackendKeyData):
self.backend_pid = message.pid
self.backend_key = message.key
elif isinstance(message, messages.ReadyForQuery):
break
elif isinstance(message, messages.ErrorResponse):
self._logger.error(message.error_message())
raise errors.ConnectionError(message.error_message())
else:
msg = "Received unexpected startup message: {0}".format(message)
self._logger.error(msg)
raise errors.MessageError(msg)
| 42.904421
| 124
| 0.599649
|
from __future__ import print_function, division, absolute_import
import base64
import logging
import socket
import ssl
import getpass
import uuid
from struct import unpack
from collections import deque, namedtuple
import random
from six import raise_from, string_types, integer_types, PY2
if PY2:
from urlparse import urlparse, parse_qs
else:
from urllib.parse import urlparse, parse_qs
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from typing import Any, Dict, Literal, Optional, Type, Union
from typing_extensions import Self
import vertica_python
from .. import errors
from ..vertica import messages
from ..vertica.cursor import Cursor
from ..vertica.messages.message import BackendMessage, FrontendMessage
from ..vertica.messages.frontend_messages import CancelRequest
from ..vertica.log import VerticaLogging
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 5433
DEFAULT_PASSWORD = ''
DEFAULT_AUTOCOMMIT = False
DEFAULT_BACKUP_SERVER_NODE = []
DEFAULT_KRB_SERVICE_NAME = 'vertica'
DEFAULT_LOG_LEVEL = logging.WARNING
DEFAULT_LOG_PATH = 'vertica_python.log'
try:
DEFAULT_USER = getpass.getuser()
except Exception as e:
DEFAULT_USER = None
print("WARN: Cannot get the login user name: {}".format(str(e)))
def connect(**kwargs):
return Connection(kwargs)
def parse_dsn(dsn):
url = urlparse(dsn)
if url.scheme != 'vertica':
raise ValueError("Only vertica:// scheme is supported.")
result = {k: v for k, v in (
('host', url.hostname),
('port', url.port),
('user', url.username),
('password', url.password),
('database', url.path[1:])) if v
}
for key, values in parse_qs(url.query, keep_blank_values=True).items():
for i in reversed(range(len(values))):
value = values[i]
if value != '':
break
if value == '' and key != 'log_path':
continue
elif key == 'backup_server_node':
continue
elif key in ('connection_load_balance', 'use_prepared_statements',
'disable_copy_local', 'ssl', 'autocommit'):
lower = value.lower()
if lower in ('true', 'on', '1'):
result[key] = True
elif lower in ('false', 'off', '0'):
result[key] = False
elif key == 'connection_timeout':
result[key] = float(value)
elif key == 'log_level' and value.isdigit():
result[key] = int(value)
else:
result[key] = value
return result
_AddressEntry = namedtuple('_AddressEntry', ['host', 'resolved', 'data'])
class _AddressList(object):
def __init__(self, host, port, backup_nodes, logger):
self._logger = logger
self.address_deque = deque()
self._append(host, port)
if not isinstance(backup_nodes, list):
err_msg = 'Connection option "backup_server_node" must be a list'
self._logger.error(err_msg)
raise TypeError(err_msg)
for node in backup_nodes:
if isinstance(node, string_types):
self._append(node, DEFAULT_PORT)
elif isinstance(node, tuple) and len(node) == 2:
self._append(node[0], node[1])
else:
err_msg = ('Each item of connection option "backup_server_node"'
' must be a host string or a (host, port) tuple')
self._logger.error(err_msg)
raise TypeError(err_msg)
self._logger.debug('Address list: {0}'.format(list(self.address_deque)))
def _append(self, host, port):
if not isinstance(host, string_types):
err_msg = 'Host must be a string: invalid value: {0}'.format(host)
self._logger.error(err_msg)
raise TypeError(err_msg)
if not isinstance(port, (string_types, integer_types)):
err_msg = 'Port must be an integer or a string: invalid value: {0}'.format(port)
self._logger.error(err_msg)
raise TypeError(err_msg)
elif isinstance(port, string_types):
try:
port = int(port)
except ValueError as e:
err_msg = 'Port "{0}" is not a valid string: {1}'.format(port, e)
self._logger.error(err_msg)
raise ValueError(err_msg)
if port < 0 or port > 65535:
err_msg = 'Invalid port number: {0}'.format(port)
self._logger.error(err_msg)
raise ValueError(err_msg)
self.address_deque.append(_AddressEntry(host=host, resolved=False, data=port))
def push(self, host, port):
self.address_deque.appendleft(_AddressEntry(host=host, resolved=False, data=port))
def pop(self):
self.address_deque.popleft()
def peek(self):
if len(self.address_deque) == 0:
return None
while len(self.address_deque) > 0:
self._logger.debug('Peek at address list: {0}'.format(list(self.address_deque)))
entry = self.address_deque[0]
if entry.resolved:
return entry.data
else:
self.pop()
host, port = entry.host, entry.data
try:
resolved_hosts = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)
except Exception as e:
self._logger.warning('Error resolving host "{0}" on port {1}: {2}'.format(host, port, e))
continue
random.shuffle(resolved_hosts)
for addrinfo in resolved_hosts:
if addrinfo[0] in (socket.AF_INET, socket.AF_INET6):
self.address_deque.appendleft(_AddressEntry(
host=host, resolved=True, data=addrinfo))
return None
def peek_host(self):
self._logger.debug('Peek host at address list: {0}'.format(list(self.address_deque)))
if len(self.address_deque) == 0:
return None
return self.address_deque[0].host
def _generate_session_label():
return '{type}-{version}-{id}'.format(
type='vertica-python',
version=vertica_python.__version__,
id=uuid.uuid1()
)
class Connection(object):
def __init__(self, options=None):
self.parameters = {}
self.session_id = None
self.backend_pid = None
self.backend_key = None
self.transaction_status = None
self.socket = None
self.socket_as_file = None
options = options or {}
self.options = parse_dsn(options['dsn']) if 'dsn' in options else {}
self.options.update({key: value for key, value in options.items() \
if key == 'log_path' or (key != 'dsn' and value is not None)})
logger_name = 'vertica_{0}_{1}'.format(id(self), str(uuid.uuid4()))
self._logger = logging.getLogger(logger_name)
if 'log_level' not in self.options and 'log_path' not in self.options:
self._logger.disabled = True
else:
self.options.setdefault('log_level', DEFAULT_LOG_LEVEL)
self.options.setdefault('log_path', DEFAULT_LOG_PATH)
VerticaLogging.setup_logging(logger_name, self.options['log_path'],
self.options['log_level'], id(self))
self.options.setdefault('host', DEFAULT_HOST)
self.options.setdefault('port', DEFAULT_PORT)
if 'user' not in self.options:
if DEFAULT_USER:
self.options['user'] = DEFAULT_USER
else:
msg = 'Connection option "user" is required'
self._logger.error(msg)
raise KeyError(msg)
self.options.setdefault('database', self.options['user'])
self.options.setdefault('password', DEFAULT_PASSWORD)
self.options.setdefault('autocommit', DEFAULT_AUTOCOMMIT)
self.options.setdefault('session_label', _generate_session_label())
self.options.setdefault('backup_server_node', DEFAULT_BACKUP_SERVER_NODE)
self.options.setdefault('kerberos_service_name', DEFAULT_KRB_SERVICE_NAME)
self.options.setdefault('kerberos_host_name', self.options['host'])
self.address_list = _AddressList(self.options['host'], self.options['port'],
self.options['backup_server_node'], self._logger)
self.options.setdefault('unicode_error', None)
self._cursor = Cursor(self, self._logger, cursor_type=None,
unicode_error=self.options['unicode_error'])
self.options.setdefault('use_prepared_statements', False)
self._logger.debug('Connection prepared statements is {}'.format(
'enabled' if self.options['use_prepared_statements'] else 'disabled'))
self.options.setdefault('disable_copy_local', False)
self._logger.debug('COPY LOCAL operation is {}'.format(
'disabled' if self.options['disable_copy_local'] else 'enabled'))
self._logger.info('Connecting as user "{}" to database "{}" on host "{}" with port {}'.format(
self.options['user'], self.options['database'],
self.options['host'], self.options['port']))
self.startup_connection()
if self.options['autocommit']:
self.autocommit = True
self._logger.info('Connection is ready')
host "{0}" on port {1}: {2}'.format(host, port, e))
last_exception = e
address_list.pop()
addrinfo = address_list.peek()
raw_socket.close()
if raw_socket is None or last_exception:
err_msg = 'Failed to establish a connection to the primary server or any backup address.'
self._logger.error(err_msg)
raise errors.ConnectionError(err_msg)
return raw_socket
def ssl(self):
return self.socket is not None and isinstance(self.socket, ssl.SSLSocket)
def write(self, message, vsocket=None):
if not isinstance(message, FrontendMessage):
raise TypeError("invalid message: ({0})".format(message))
if vsocket is None:
vsocket = self._socket()
self._logger.debug('=> %s', message)
try:
for data in message.fetch_message():
size = 8192
pos = 0
while pos < len(data):
sent = vsocket.send(data[pos : pos + size])
if sent == 0:
raise errors.ConnectionError("Couldn't send message: Socket connection broken")
pos += sent
except Exception as e:
self.close_socket()
self._logger.error(str(e))
if isinstance(e, IOError):
raise_from(errors.ConnectionError(str(e)), e)
else:
raise
def close_socket(self):
try:
if self.socket is not None:
self._socket().close()
if self.socket_as_file is not None:
self._socket_as_file().close()
finally:
self.reset_values()
def reset_connection(self):
self.close()
self.startup_connection()
def is_asynchronous_message(self, message):
# Check if it is an asynchronous response message
# Note: ErrorResponse is a subclass of NoticeResponse
return (isinstance(message, messages.ParameterStatus) or
(isinstance(message, messages.NoticeResponse) and
not isinstance(message, messages.ErrorResponse)))
def handle_asynchronous_message(self, message):
if isinstance(message, messages.ParameterStatus):
if message.name == 'protocol_version':
message.value = int(message.value)
self.parameters[message.name] = message.value
elif (isinstance(message, messages.NoticeResponse) and
not isinstance(message, messages.ErrorResponse)):
if getattr(self, 'notice_handler', None) is not None:
self.notice_handler(message)
else:
self._logger.warning(message.error_message())
def read_string(self):
s = bytearray()
while True:
char = self.read_bytes(1)
if char == b'\x00':
break
s.extend(char)
return s
def read_message(self):
while True:
try:
type_ = self.read_bytes(1)
size = unpack('!I', self.read_bytes(4))[0]
if size < 4:
raise errors.MessageError("Bad message size: {0}".format(size))
if type_ == messages.WriteFile.message_id:
# The whole WriteFile message may not be read at here.
# Instead, only the file name and file length is read.
# This is because the message could be too large to read all at once.
f = self.read_string()
filename = f.decode('utf-8')
file_length = unpack('!I', self.read_bytes(4))[0]
size -= 4 + len(f) + 1 + 4
if size != file_length:
raise errors.MessageError("Bad message size: {0}".format(size))
if filename == '':
# If there is no filename, then this is really RETURNREJECTED data, not a rejected file
if file_length % 8 != 0:
raise errors.MessageError("Bad RETURNREJECTED data size: {0}".format(file_length))
data = self.read_bytes(file_length)
message = messages.WriteFile(filename, file_length, data)
else:
# The rest of the message is read later with write_to_disk()
message = messages.WriteFile(filename, file_length)
else:
message = BackendMessage.from_type(type_, self.read_bytes(size - 4))
self._logger.debug('<= %s', message)
self.handle_asynchronous_message(message)
# handle transaction status
if isinstance(message, messages.ReadyForQuery):
self.transaction_status = message.transaction_status
except (SystemError, IOError) as e:
self.close_socket()
# noinspection PyTypeChecker
self._logger.error(e)
raise_from(errors.ConnectionError(str(e)), e)
if not self.is_asynchronous_message(message):
break
return message
def read_expected_message(self, expected_types, error_handler=None):
# Reads a message and does some basic error handling.
# expected_types must be a class (e.g. messages.BindComplete) or a tuple of classes
message = self.read_message()
if isinstance(message, expected_types):
return message
elif isinstance(message, messages.ErrorResponse):
if error_handler is not None:
error_handler(message)
else:
raise errors.DatabaseError(message.error_message())
else:
msg = 'Received unexpected message type: {}. '.format(type(message).__name__)
if isinstance(expected_types, tuple):
msg += 'Expected types: {}'.format(", ".join([t.__name__ for t in expected_types]))
else:
msg += 'Expected type: {}'.format(expected_types.__name__)
self._logger.error(msg)
raise errors.MessageError(msg)
def read_bytes(self, n):
if n == 1:
result = self._socket_as_file().read(1)
if not result:
raise errors.ConnectionError("Connection closed by Vertica")
return result
else:
buf = b""
to_read = n
while to_read > 0:
data = self._socket_as_file().read(to_read)
received = len(data)
if received == 0:
raise errors.ConnectionError("Connection closed by Vertica")
buf += data
to_read -= received
return buf
def send_GSS_response_and_receive_challenge(self, response):
# Send the GSS response data to the vertica server
token = base64.b64decode(response)
self.write(messages.Password(token, messages.Authentication.GSS))
# Receive the challenge from the vertica server
message = self.read_expected_message(messages.Authentication)
if message.code != messages.Authentication.GSS_CONTINUE:
msg = ('Received unexpected message type: Authentication(type={}).'
' Expected type: Authentication(type={})'.format(
message.code, messages.Authentication.GSS_CONTINUE))
self._logger.error(msg)
raise errors.MessageError(msg)
return message.auth_data
def make_GSS_authentication(self):
try:
import kerberos
except ImportError as e:
raise errors.ConnectionError("{}\nCannot make a Kerberos "
"authentication because no Kerberos package is installed. "
"Get it with 'pip install kerberos'.".format(str(e)))
# Set GSS flags
gssflag = (kerberos.GSS_C_DELEG_FLAG | kerberos.GSS_C_MUTUAL_FLAG |
kerberos.GSS_C_SEQUENCE_FLAG | kerberos.GSS_C_REPLAY_FLAG)
# Generate the GSS-style service principal name
service_principal = "{}@{}".format(self.options['kerberos_service_name'],
self.options['kerberos_host_name'])
# Initializes a context object with a service principal
self._logger.info('Initializing a context for GSSAPI client-side '
'authentication with service principal {}'.format(service_principal))
try:
result, context = kerberos.authGSSClientInit(service_principal, gssflags=gssflag)
except kerberos.GSSError as err:
msg = "GSSAPI initialization error: {}".format(str(err))
self._logger.error(msg)
raise errors.KerberosError(msg)
if result != kerberos.AUTH_GSS_COMPLETE:
msg = ('Failed to initialize a context for GSSAPI client-side '
'authentication with service principal {}'.format(service_principal))
self._logger.error(msg)
raise errors.KerberosError(msg)
# Processes GSSAPI client-side steps
try:
challenge = b''
while True:
self._logger.info('Processing a single GSSAPI client-side step')
challenge = base64.b64encode(challenge).decode("utf-8")
result = kerberos.authGSSClientStep(context, challenge)
if result == kerberos.AUTH_GSS_COMPLETE:
self._logger.info('Result: GSSAPI step complete')
break
elif result == kerberos.AUTH_GSS_CONTINUE:
self._logger.info('Result: GSSAPI step continuation')
# Get the response from the last successful GSSAPI client-side step
response = kerberos.authGSSClientResponse(context)
challenge = self.send_GSS_response_and_receive_challenge(response)
else:
msg = "GSSAPI client-side step error status {}".format(result)
self._logger.error(msg)
raise errors.KerberosError(msg)
except kerberos.GSSError as err:
msg = "GSSAPI client-side step error: {}".format(str(err))
self._logger.error(msg)
raise errors.KerberosError(msg)
def startup_connection(self):
user = self.options['user']
database = self.options['database']
session_label = self.options['session_label']
os_user_name = DEFAULT_USER if DEFAULT_USER else ''
password = self.options['password']
self.write(messages.Startup(user, database, session_label, os_user_name))
while True:
message = self.read_message()
if isinstance(message, messages.Authentication):
if message.code == messages.Authentication.OK:
self._logger.info("User {} successfully authenticated"
.format(self.options['user']))
elif message.code == messages.Authentication.CHANGE_PASSWORD:
msg = "The password for user {} has expired".format(self.options['user'])
self._logger.error(msg)
raise errors.ConnectionError(msg)
elif message.code == messages.Authentication.PASSWORD_GRACE:
self._logger.warning('The password for user {} will expire soon.'
' Please consider changing it.'.format(self.options['user']))
elif message.code == messages.Authentication.GSS:
self.make_GSS_authentication()
else:
self.write(messages.Password(password, message.code,
{'user': user,
'salt': getattr(message, 'salt', None),
'usersalt': getattr(message, 'usersalt', None)}))
elif isinstance(message, messages.BackendKeyData):
self.backend_pid = message.pid
self.backend_key = message.key
elif isinstance(message, messages.ReadyForQuery):
break
elif isinstance(message, messages.ErrorResponse):
self._logger.error(message.error_message())
raise errors.ConnectionError(message.error_message())
else:
msg = "Received unexpected startup message: {0}".format(message)
self._logger.error(msg)
raise errors.MessageError(msg)
| true
| true
|
790806fc7e64af17b5a7f763354c486df50043d9
| 6,142
|
py
|
Python
|
src/dev/arm/css/Scmi.py
|
fei-shan/gem5-experiment
|
70781db30d42b1fe50e495bd04f7755a4b0e0e59
|
[
"BSD-3-Clause"
] | 2
|
2021-01-15T17:32:18.000Z
|
2021-12-21T02:53:58.000Z
|
src/dev/arm/css/Scmi.py
|
fei-shan/gem5-experiment
|
70781db30d42b1fe50e495bd04f7755a4b0e0e59
|
[
"BSD-3-Clause"
] | 3
|
2021-03-26T20:33:59.000Z
|
2022-01-24T22:54:03.000Z
|
src/dev/arm/css/Scmi.py
|
fei-shan/gem5-experiment
|
70781db30d42b1fe50e495bd04f7755a4b0e0e59
|
[
"BSD-3-Clause"
] | 3
|
2021-03-27T16:36:19.000Z
|
2022-03-28T18:32:57.000Z
|
# Copyright (c) 2020 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from m5.params import *
from m5.proxy import *
from m5.objects.Scp import Scp
from m5.objects.Doorbell import Doorbell
from m5.util.fdthelper import *
from m5.SimObject import SimObject
class ScmiChannel(SimObject):
"""
Unidirectional channel
"""
type = 'ScmiChannel'
cxx_header = "dev/arm/css/scmi_platform.hh"
cxx_class = "SCMI::VirtualChannel"
shmem_range = Param.AddrRange(
"Virtual channel's shared memory address range")
phys_id = Param.Unsigned(4,
"Physical slot of the channel")
virt_id = Param.Unsigned(0,
"Virtual slot of the channel (within the physical)")
doorbell = Param.Doorbell(
"This is the doorbell used to notify the SCMI platform")
def __init__(self, shmem, *args, **kwargs):
super(ScmiChannel, self).__init__(**kwargs)
def shmemGenerator(state):
shmem_node = FdtNode("scp-shmem@%x" % 0)
shmem_node.appendCompatible(["arm,scmi-shmem"])
shmem_node.append(FdtPropertyWords("reg",
state.addrCells(0) +
state.sizeCells(0x200)) )
#shmem_node.appendPhandle(self._parent.unproxy(self).channel)
shmem_node.appendPhandle("scmi_virt" + str(self.virt_id))
return shmem_node
self._shmem = shmem
self._shmem.addSubnodeGenerator(shmemGenerator)
class ScmiAgentChannel(ScmiChannel):
"""
This is a Agent to Platform channel (The agent is the initiator)
"""
type = 'ScmiAgentChannel'
cxx_header = "dev/arm/css/scmi_platform.hh"
cxx_class = "SCMI::AgentChannel"
class ScmiPlatformChannel(ScmiChannel):
"""
This is a Platform to Agent channel (The platform is the initiator)
"""
type = 'ScmiPlatformChannel'
cxx_header = "dev/arm/css/scmi_platform.hh"
cxx_class = "SCMI::PlatformChannel"
class ScmiCommunication(SimObject):
"""
The SCMI Communication class models a bidirectional
communication between the SCMI platform and the agent.
As such it has a ScmiAgentChannel and a ScmiPlatformChannel
object as members.
"""
type = 'ScmiCommunication'
cxx_header = "dev/arm/css/scmi_platform.hh"
cxx_class = "SCMI::Communication"
agent_channel = Param.ScmiAgentChannel(
"Agent to Platform channel")
platform_channel = Param.ScmiPlatformChannel(
"Platform to Agent channel")
class ScmiPlatform(Scp):
type = 'ScmiPlatform'
cxx_header = "dev/arm/css/scmi_platform.hh"
cxx_class = "SCMI::Platform"
comms = VectorParam.ScmiCommunication([],
"SCMI Communications")
agents = VectorParam.String([ "OSPM" ],
"Vector of SCMI agents (names) in the system")
sys = Param.System(Parent.any, "System object parameter")
dma = MasterPort("DMA port")
# Protocol params
base_vendor = Param.String("arm",
"Return string for the Base protocol DISCOVER_VENDOR command")
base_subvendor = Param.String("gem5",
"Return string for the Base protocol DISCOVER_SUBVENDOR command")
base_impl_version = Param.Unsigned(0,
"Return value for the Base protocol "
"DISCOVER_IMPLEMENTATION_VERSION command")
def generateDeviceTree(self, state):
scmi_node = self.generateScmiNode(state)
fw_node = FdtNode("firmware")
fw_node.append(scmi_node)
yield fw_node
def generateScmiNode(self, state):
node = FdtNode("scmi")
node.appendCompatible(["arm,scmi"])
mbox_phandle = state.phandle(self._parent.unproxy(self).mailbox)
shmem_phandles = []
for comm in self.unproxy(self).comms:
shmem_phandles.append(state.phandle(
"scmi_virt" + str(comm.agent_channel.virt_id)))
shmem_phandles.append(state.phandle(
"scmi_virt" + str(comm.platform_channel.virt_id)))
phys_channel = 1 # HP-NonSecure
node.append(FdtPropertyWords("mboxes", [ mbox_phandle, phys_channel ]))
node.append(FdtPropertyWords("shmem", shmem_phandles))
return node
| 40.143791
| 79
| 0.712146
|
from m5.params import *
from m5.proxy import *
from m5.objects.Scp import Scp
from m5.objects.Doorbell import Doorbell
from m5.util.fdthelper import *
from m5.SimObject import SimObject
class ScmiChannel(SimObject):
type = 'ScmiChannel'
cxx_header = "dev/arm/css/scmi_platform.hh"
cxx_class = "SCMI::VirtualChannel"
shmem_range = Param.AddrRange(
"Virtual channel's shared memory address range")
phys_id = Param.Unsigned(4,
"Physical slot of the channel")
virt_id = Param.Unsigned(0,
"Virtual slot of the channel (within the physical)")
doorbell = Param.Doorbell(
"This is the doorbell used to notify the SCMI platform")
def __init__(self, shmem, *args, **kwargs):
super(ScmiChannel, self).__init__(**kwargs)
def shmemGenerator(state):
shmem_node = FdtNode("scp-shmem@%x" % 0)
shmem_node.appendCompatible(["arm,scmi-shmem"])
shmem_node.append(FdtPropertyWords("reg",
state.addrCells(0) +
state.sizeCells(0x200)) )
#shmem_node.appendPhandle(self._parent.unproxy(self).channel)
shmem_node.appendPhandle("scmi_virt" + str(self.virt_id))
return shmem_node
self._shmem = shmem
self._shmem.addSubnodeGenerator(shmemGenerator)
class ScmiAgentChannel(ScmiChannel):
type = 'ScmiAgentChannel'
cxx_header = "dev/arm/css/scmi_platform.hh"
cxx_class = "SCMI::AgentChannel"
class ScmiPlatformChannel(ScmiChannel):
type = 'ScmiPlatformChannel'
cxx_header = "dev/arm/css/scmi_platform.hh"
cxx_class = "SCMI::PlatformChannel"
class ScmiCommunication(SimObject):
type = 'ScmiCommunication'
cxx_header = "dev/arm/css/scmi_platform.hh"
cxx_class = "SCMI::Communication"
agent_channel = Param.ScmiAgentChannel(
"Agent to Platform channel")
platform_channel = Param.ScmiPlatformChannel(
"Platform to Agent channel")
class ScmiPlatform(Scp):
type = 'ScmiPlatform'
cxx_header = "dev/arm/css/scmi_platform.hh"
cxx_class = "SCMI::Platform"
comms = VectorParam.ScmiCommunication([],
"SCMI Communications")
agents = VectorParam.String([ "OSPM" ],
"Vector of SCMI agents (names) in the system")
sys = Param.System(Parent.any, "System object parameter")
dma = MasterPort("DMA port")
# Protocol params
base_vendor = Param.String("arm",
"Return string for the Base protocol DISCOVER_VENDOR command")
base_subvendor = Param.String("gem5",
"Return string for the Base protocol DISCOVER_SUBVENDOR command")
base_impl_version = Param.Unsigned(0,
"Return value for the Base protocol "
"DISCOVER_IMPLEMENTATION_VERSION command")
def generateDeviceTree(self, state):
scmi_node = self.generateScmiNode(state)
fw_node = FdtNode("firmware")
fw_node.append(scmi_node)
yield fw_node
def generateScmiNode(self, state):
node = FdtNode("scmi")
node.appendCompatible(["arm,scmi"])
mbox_phandle = state.phandle(self._parent.unproxy(self).mailbox)
shmem_phandles = []
for comm in self.unproxy(self).comms:
shmem_phandles.append(state.phandle(
"scmi_virt" + str(comm.agent_channel.virt_id)))
shmem_phandles.append(state.phandle(
"scmi_virt" + str(comm.platform_channel.virt_id)))
phys_channel = 1 # HP-NonSecure
node.append(FdtPropertyWords("mboxes", [ mbox_phandle, phys_channel ]))
node.append(FdtPropertyWords("shmem", shmem_phandles))
return node
| true
| true
|
790809721ce85f4566e4aa149b960fd755db4dae
| 6,842
|
py
|
Python
|
indico/core/cache.py
|
errikos/indico
|
72b75d63a896e5defb8e9acf64fe147748c7ccce
|
[
"MIT"
] | null | null | null |
indico/core/cache.py
|
errikos/indico
|
72b75d63a896e5defb8e9acf64fe147748c7ccce
|
[
"MIT"
] | null | null | null |
indico/core/cache.py
|
errikos/indico
|
72b75d63a896e5defb8e9acf64fe147748c7ccce
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask_caching import Cache
from flask_caching.backends.rediscache import RedisCache
from flask_caching.backends.simplecache import SimpleCache
from redis import RedisError
from indico.core.logger import Logger
_logger = Logger.get('cache')
class CachedNone:
__slots__ = ()
@classmethod
def wrap(cls, value):
return cls() if value is None else value
@classmethod
def unwrap(cls, value, default=None):
if value is None:
return default
elif isinstance(value, cls):
return None
else:
return value
class IndicoCacheMixin:
def get(self, key, default=None):
return CachedNone.unwrap(super().get(key), default)
def get_many(self, *keys, default=None):
return [CachedNone.unwrap(val, default) for val in super().get_many(*keys)]
def get_dict(self, *keys, default=None):
return dict(zip(keys, self.get_many(*keys, default=default)))
class IndicoRedisCache(IndicoCacheMixin, RedisCache):
"""
This is similar to the original RedisCache from Flask-Caching, but it
allows specifying a default value when retrieving cache data and
distinguishing between a cached ``None`` value and a cache miss.
"""
def dump_object(self, value):
# We are not overriding the `load_object` counterpart to this method o
# purpose because we need to have access to the wrapped value in `get`
# and `get_many`.
return super().dump_object(CachedNone.wrap(value))
class IndicoSimpleCache(IndicoCacheMixin, SimpleCache):
"""
This is similar to the original SimpleCache from Flask-Caching, but it
allows specifying a default value when retrieving cache data and
distinguishing between a cached ``None`` value and a cache miss.
"""
def set(self, key, value, timeout=None):
return super().set(key, CachedNone.wrap(value), timeout=timeout)
def add(self, key, value, timeout=None):
return super().add(key, CachedNone.wrap(value), timeout=timeout)
def make_indico_simple_cache(app, config, args, kwargs):
return IndicoSimpleCache(*args, **kwargs)
def make_indico_redis_cache(app, config, args, kwargs):
from redis import from_url as redis_from_url
key_prefix = config.get('CACHE_KEY_PREFIX')
if key_prefix:
kwargs['key_prefix'] = key_prefix
kwargs['host'] = redis_from_url(config['CACHE_REDIS_URL'], socket_timeout=1)
return IndicoRedisCache(*args, **kwargs)
class ScopedCache:
def __init__(self, cache, scope):
self.cache = cache
self.scope = scope
def _scoped(self, key):
return f'{self.scope}/{key}'
def get(self, key, default=None):
return self.cache.get(self._scoped(key), default=default)
def set(self, key, value, timeout=None):
self.cache.set(self._scoped(key), value, timeout=timeout)
def add(self, key, value, timeout=None):
self.cache.add(self._scoped(key), value, timeout=timeout)
def delete(self, key):
self.cache.delete(self._scoped(key))
def delete_many(self, *keys):
keys = [self._scoped(key) for key in keys]
self.cache.delete_many(*keys)
def clear(self):
raise NotImplementedError('Clearing scoped caches is not supported')
def get_dict(self, *keys, default=None):
return dict(zip(keys, self.get_many(*keys, default=default)))
def get_many(self, *keys, default=None):
keys = [self._scoped(key) for key in keys]
return self.cache.get_many(*keys, default=default)
def set_many(self, mapping, timeout=None):
mapping = {self._scoped(key): value for key, value in mapping.items()}
self.cache.set_many(mapping, timeout=timeout)
def __repr__(self):
return f'<ScopedCache: {self.scope}>'
class IndicoCache(Cache):
"""
This is basicaly the Cache class from Flask-Caching but it silences all
exceptions that happen during a cache operation since cache failures should
not take down the whole page.
While this cache can in principle support many different backends, we only
consider redis and (for unittests) a simple dict-based cache. This allows
us to be more specific in catching exceptions since the Redis cache has
exactly one base exception.
"""
def get(self, key, default=None):
try:
return super().get(key, default)
except RedisError:
_logger.exception('get(%r) failed', key)
return default
def set(self, key, value, timeout=None):
try:
super().set(key, value, timeout=timeout)
except RedisError:
_logger.exception('set(%r) failed', key)
def add(self, key, value, timeout=None):
try:
super().add(key, value, timeout=timeout)
except RedisError:
_logger.exception('add(%r) failed', key)
def delete(self, key):
try:
super().delete(key)
except RedisError:
_logger.exception('delete(%r) failed', key)
def delete_many(self, *keys):
try:
super().delete_many(*keys)
except RedisError:
_logger.exception('delete_many(%s) failed', ', '.join(map(repr, keys)))
def clear(self):
try:
super().clear()
except RedisError:
_logger.exception('clear() failed')
def get_many(self, *keys, default=None):
try:
return super().get_many(*keys, default=default)
except RedisError:
logkeys = ', '.join(map(repr, keys))
_logger.exception('get_many(%s) failed', logkeys)
return [default] * len(keys)
def set_many(self, mapping, timeout=None):
try:
super().set_many(mapping, timeout=timeout)
except RedisError:
_logger.exception('set_many(%r) failed', mapping)
def get_dict(self, *keys, default=None):
try:
return super().get_dict(*keys, default=default)
except RedisError:
logkeys = ', '.join(map(repr, keys))
_logger.exception('get_dict(%s) failed', logkeys)
return dict(zip(keys, [default] * len(keys)))
def make_scoped_cache(scope):
"""Create a new scoped cache.
In most cases the global cache should not be used directly but rather
with a scope depending on the module a cache is used for. This is
especially important when passing user-provided data as the cache key
to prevent reading other unrelated cache keys.
"""
return ScopedCache(cache, scope)
cache = IndicoCache()
| 31.971963
| 83
| 0.653464
|
from flask_caching import Cache
from flask_caching.backends.rediscache import RedisCache
from flask_caching.backends.simplecache import SimpleCache
from redis import RedisError
from indico.core.logger import Logger
_logger = Logger.get('cache')
class CachedNone:
__slots__ = ()
@classmethod
def wrap(cls, value):
return cls() if value is None else value
@classmethod
def unwrap(cls, value, default=None):
if value is None:
return default
elif isinstance(value, cls):
return None
else:
return value
class IndicoCacheMixin:
def get(self, key, default=None):
return CachedNone.unwrap(super().get(key), default)
def get_many(self, *keys, default=None):
return [CachedNone.unwrap(val, default) for val in super().get_many(*keys)]
def get_dict(self, *keys, default=None):
return dict(zip(keys, self.get_many(*keys, default=default)))
class IndicoRedisCache(IndicoCacheMixin, RedisCache):
def dump_object(self, value):
return super().dump_object(CachedNone.wrap(value))
class IndicoSimpleCache(IndicoCacheMixin, SimpleCache):
def set(self, key, value, timeout=None):
return super().set(key, CachedNone.wrap(value), timeout=timeout)
def add(self, key, value, timeout=None):
return super().add(key, CachedNone.wrap(value), timeout=timeout)
def make_indico_simple_cache(app, config, args, kwargs):
return IndicoSimpleCache(*args, **kwargs)
def make_indico_redis_cache(app, config, args, kwargs):
from redis import from_url as redis_from_url
key_prefix = config.get('CACHE_KEY_PREFIX')
if key_prefix:
kwargs['key_prefix'] = key_prefix
kwargs['host'] = redis_from_url(config['CACHE_REDIS_URL'], socket_timeout=1)
return IndicoRedisCache(*args, **kwargs)
class ScopedCache:
def __init__(self, cache, scope):
self.cache = cache
self.scope = scope
def _scoped(self, key):
return f'{self.scope}/{key}'
def get(self, key, default=None):
return self.cache.get(self._scoped(key), default=default)
def set(self, key, value, timeout=None):
self.cache.set(self._scoped(key), value, timeout=timeout)
def add(self, key, value, timeout=None):
self.cache.add(self._scoped(key), value, timeout=timeout)
def delete(self, key):
self.cache.delete(self._scoped(key))
def delete_many(self, *keys):
keys = [self._scoped(key) for key in keys]
self.cache.delete_many(*keys)
def clear(self):
raise NotImplementedError('Clearing scoped caches is not supported')
def get_dict(self, *keys, default=None):
return dict(zip(keys, self.get_many(*keys, default=default)))
def get_many(self, *keys, default=None):
keys = [self._scoped(key) for key in keys]
return self.cache.get_many(*keys, default=default)
def set_many(self, mapping, timeout=None):
mapping = {self._scoped(key): value for key, value in mapping.items()}
self.cache.set_many(mapping, timeout=timeout)
def __repr__(self):
return f'<ScopedCache: {self.scope}>'
class IndicoCache(Cache):
def get(self, key, default=None):
try:
return super().get(key, default)
except RedisError:
_logger.exception('get(%r) failed', key)
return default
def set(self, key, value, timeout=None):
try:
super().set(key, value, timeout=timeout)
except RedisError:
_logger.exception('set(%r) failed', key)
def add(self, key, value, timeout=None):
try:
super().add(key, value, timeout=timeout)
except RedisError:
_logger.exception('add(%r) failed', key)
def delete(self, key):
try:
super().delete(key)
except RedisError:
_logger.exception('delete(%r) failed', key)
def delete_many(self, *keys):
try:
super().delete_many(*keys)
except RedisError:
_logger.exception('delete_many(%s) failed', ', '.join(map(repr, keys)))
def clear(self):
try:
super().clear()
except RedisError:
_logger.exception('clear() failed')
def get_many(self, *keys, default=None):
try:
return super().get_many(*keys, default=default)
except RedisError:
logkeys = ', '.join(map(repr, keys))
_logger.exception('get_many(%s) failed', logkeys)
return [default] * len(keys)
def set_many(self, mapping, timeout=None):
try:
super().set_many(mapping, timeout=timeout)
except RedisError:
_logger.exception('set_many(%r) failed', mapping)
def get_dict(self, *keys, default=None):
try:
return super().get_dict(*keys, default=default)
except RedisError:
logkeys = ', '.join(map(repr, keys))
_logger.exception('get_dict(%s) failed', logkeys)
return dict(zip(keys, [default] * len(keys)))
def make_scoped_cache(scope):
return ScopedCache(cache, scope)
cache = IndicoCache()
| true
| true
|
79080ab85c70df8806700fcfe98355dc711038da
| 2,184
|
py
|
Python
|
lib/surface/service_management/operations/describe.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/service_management/operations/describe.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/service_management/operations/describe.py
|
bshaffer/google-cloud-sdk
|
f587382fd112f238c0d6d5ca3dab8f52d2b5c5f9
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*- #
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""service-management operations describe command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.endpoints import common_flags
_ERROR = ('The `service-management operations describe` command has been '
'replaced by `endpoints operations describe` and '
'`services operations describe`.')
@base.Deprecate(is_removed=True, error=_ERROR)
class Describe(base.DescribeCommand):
"""Describes an operation resource for a given operation name."""
@staticmethod
def Args(parser):
"""Args is called by calliope to gather arguments for this command.
Args:
parser: An argparse parser that you can use to add arguments that go
on the command line after this command. Positional arguments are
allowed.
"""
common_flags.operation_flag(suffix='to describe').AddToParser(parser)
parser.display_info.AddFormat(
':(metadata.startTime.date(format="%Y-%m-%d %H:%M:%S %Z", tz=LOCAL)) '
'[transforms] default')
parser.add_argument(
'--full',
action='store_true',
default=False,
help=('Print the entire operation resource, which could be large. '
'By default, a summary will be printed instead.'))
def Run(self, args):
"""Stubs 'service-management operations describe'.
Args:
args: argparse.Namespace, The arguments that this command was invoked
with.
"""
pass
| 33.6
| 78
| 0.708333
|
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.endpoints import common_flags
_ERROR = ('The `service-management operations describe` command has been '
'replaced by `endpoints operations describe` and '
'`services operations describe`.')
@base.Deprecate(is_removed=True, error=_ERROR)
class Describe(base.DescribeCommand):
@staticmethod
def Args(parser):
common_flags.operation_flag(suffix='to describe').AddToParser(parser)
parser.display_info.AddFormat(
':(metadata.startTime.date(format="%Y-%m-%d %H:%M:%S %Z", tz=LOCAL)) '
'[transforms] default')
parser.add_argument(
'--full',
action='store_true',
default=False,
help=('Print the entire operation resource, which could be large. '
'By default, a summary will be printed instead.'))
def Run(self, args):
pass
| true
| true
|
79080c9dea72eb3be5c8bd55f7e41768a8ebb07d
| 11,319
|
py
|
Python
|
packages/python/plotly/plotly/validators/volume/_colorbar.py
|
adehad/plotly.py
|
bca292530c400c61e8b7f8a6571262a9dde43ee3
|
[
"MIT"
] | 7
|
2021-09-29T09:46:36.000Z
|
2022-03-24T08:30:41.000Z
|
packages/python/plotly/plotly/validators/volume/_colorbar.py
|
adehad/plotly.py
|
bca292530c400c61e8b7f8a6571262a9dde43ee3
|
[
"MIT"
] | 1
|
2021-09-30T16:56:21.000Z
|
2021-10-15T09:14:12.000Z
|
packages/python/plotly/plotly/validators/volume/_colorbar.py
|
adehad/plotly.py
|
bca292530c400c61e8b7f8a6571262a9dde43ee3
|
[
"MIT"
] | 1
|
2021-09-29T22:34:05.000Z
|
2021-09-29T22:34:05.000Z
|
import _plotly_utils.basevalidators
class ColorbarValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="colorbar", parent_name="volume", **kwargs):
super(ColorbarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "ColorBar"),
data_docs=kwargs.pop(
"data_docs",
"""
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this
number. This only has an effect when
`tickformat` is "SI" or "B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-time-
format#locale_format We add one item to d3's
date formatter: "%{n}f" for fractional seconds
with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f"
would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.volume.
colorbar.Tickformatstop` instances or dicts
with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.volume.colorbar.tickformatstopdefaults), sets
the default property values to use for elements
of volume.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of
the axis. The default value for inside tick
labels is *hide past domain*. In other cases
the default is *hide past div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.volume.colorbar.Ti
tle` instance or dict with compatible
properties
titlefont
Deprecated: Please use
volume.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's
font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use
volume.colorbar.title.side instead. Determines
the location of color bar's title with respect
to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
""",
),
**kwargs
)
| 47.359833
| 79
| 0.526372
|
import _plotly_utils.basevalidators
class ColorbarValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="colorbar", parent_name="volume", **kwargs):
super(ColorbarValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "ColorBar"),
data_docs=kwargs.pop(
"data_docs",
"""
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing
this color bar.
dtick
Sets the step in-between ticks on this axis.
Use with `tick0`. Must be a positive number, or
special strings available to "log" and "date"
axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick
number. For example, to set a tick mark at 1,
10, 100, 1000, ... set dtick to 1. To set tick
marks at 1, 100, 10000, ... set dtick to 2. To
set tick marks at 1, 5, 25, 125, 625, 3125, ...
set dtick to log_10(5), or 0.69897000433. "log"
has several special values; "L<f>", where `f`
is a positive number, gives ticks linearly
spaced in value (but not position). For example
`tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10
plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is
ignored for "D1" and "D2". If the axis `type`
is "date", then you must convert the time to
milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to
86400000.0. "date" also has special values
"M<n>" gives ticks spaced by a number of
months. `n` must be a positive integer. To set
ticks on the 15th of every third month, set
`tick0` to "2000-01-15" and `dtick` to "M3". To
set ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick
exponents. For example, consider the number
1,000,000,000. If "none", it appears as
1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If
"SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure
excludes the padding of both ends. That is, the
color bar length is this length minus the
padding on both ends.
lenmode
Determines whether this color bar's length
(i.e. the measure in the color variation
direction) is set in units of plot "fraction"
or in *pixels. Use `len` to set the value.
minexponent
Hide SI prefix for 10^n if |n| is below this
number. This only has an effect when
`tickformat` is "SI" or "B".
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks
will be chosen automatically to be less than or
equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of
the first tick is shown. If "last", only the
exponent of the last tick is shown. If "none",
no exponents appear.
showticklabels
Determines whether or not the tick labels are
drawn.
showtickprefix
If "all", all tick labels are displayed with a
prefix. If "first", only the first tick is
displayed with a prefix. If "last", only the
last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This
measure excludes the size of the padding, ticks
and labels.
thicknessmode
Determines whether this color bar's thickness
(i.e. the measure in the constant color
direction) is set in units of plot "fraction"
or in "pixels". Use `thickness` to set the
value.
tick0
Sets the placement of the first tick on this
axis. Use with `dtick`. If the axis `type` is
"log", then you must take the log of your
starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when
`dtick`=*L<f>* (see `dtick` for more info). If
the axis `type` is "date", it should be a date
string, like date data. If the axis `type` is
"category", it should be a number, using the
scale where each category is assigned a serial
number from zero in the order it appears.
tickangle
Sets the angle of the tick labels with respect
to the horizontal. For example, a `tickangle`
of -90 draws the tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3
formatting mini-languages which are very
similar to those in Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format
And for dates see:
https://github.com/d3/d3-time-
format#locale_format We add one item to d3's
date formatter: "%{n}f" for fractional seconds
with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f"
would display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.volume.
colorbar.Tickformatstop` instances or dicts
with compatible properties
tickformatstopdefaults
When used in a template (as layout.template.dat
a.volume.colorbar.tickformatstopdefaults), sets
the default property values to use for elements
of volume.colorbar.tickformatstops
ticklabeloverflow
Determines how we handle tick labels that would
overflow either the graph div or the domain of
the axis. The default value for inside tick
labels is *hide past domain*. In other cases
the default is *hide past div*.
ticklabelposition
Determines where tick labels are drawn.
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto",
the number of ticks is set via `nticks`. If
"linear", the placement of the ticks is
determined by a starting position `tick0` and a
tick step `dtick` ("linear" is the default
value if `tick0` and `dtick` are provided). If
"array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`.
("array" is the default value if `tickvals` is
provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If
"", this axis' ticks are not drawn. If
"outside" ("inside"), this axis' are drawn
outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position
via `tickvals`. Only has an effect if
`tickmode` is set to "array". Used with
`tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud
for ticktext .
tickvals
Sets the values at which ticks on this axis
appear. Only has an effect if `tickmode` is set
to "array". Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud
for tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.volume.colorbar.Ti
tle` instance or dict with compatible
properties
titlefont
Deprecated: Please use
volume.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's
font used to be set by the now deprecated
`titlefont` attribute.
titleside
Deprecated: Please use
volume.colorbar.title.side instead. Determines
the location of color bar's title with respect
to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position
anchor. This anchor binds the `x` position to
the "left", "center" or "right" of the color
bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor
This anchor binds the `y` position to the
"top", "middle" or "bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
""",
),
**kwargs
)
| true
| true
|
79080d194f59b7ebce045ab3e3d262ca948d9391
| 22,561
|
py
|
Python
|
tensorflow/contrib/linalg/python/ops/linear_operator_kronecker.py
|
tucaiyong/tensorflow
|
3cc3c87f375f1bc292bd58db4928b810ac888bc6
|
[
"Apache-2.0"
] | 5
|
2018-09-22T20:16:46.000Z
|
2022-02-28T10:35:19.000Z
|
tensorflow/contrib/linalg/python/ops/linear_operator_kronecker.py
|
tucaiyong/tensorflow
|
3cc3c87f375f1bc292bd58db4928b810ac888bc6
|
[
"Apache-2.0"
] | 10
|
2018-02-04T18:41:52.000Z
|
2018-05-02T09:00:46.000Z
|
tensorflow/contrib/linalg/python/ops/linear_operator_kronecker.py
|
tucaiyong/tensorflow
|
3cc3c87f375f1bc292bd58db4928b810ac888bc6
|
[
"Apache-2.0"
] | 4
|
2018-01-17T14:22:49.000Z
|
2018-02-27T15:06:41.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Construct the Kronecker product of one or more `LinearOperators`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
def _vec(x):
"""Stacks column of matrix to form a single column."""
return array_ops.reshape(
array_ops.matrix_transpose(x),
array_ops.concat(
[array_ops.shape(x)[:-2], [-1]], axis=0))
def _unvec_by(y, num_col):
"""Unstack vector to form a matrix, with a specified amount of columns."""
return array_ops.matrix_transpose(
array_ops.reshape(
y,
array_ops.concat(
[array_ops.shape(y)[:-1], [num_col, -1]], axis=0)))
def _rotate_last_dim(x, rotate_right=False):
"""Rotate the last dimension either left or right."""
ndims = array_ops.rank(x)
if rotate_right:
transpose_perm = array_ops.concat(
[[ndims - 1], math_ops.range(0, ndims - 1)], axis=0)
else:
transpose_perm = array_ops.concat(
[math_ops.range(1, ndims), [0]], axis=0)
return array_ops.transpose(x, transpose_perm)
class LinearOperatorKronecker(linear_operator.LinearOperator):
"""Kronecker product between two `LinearOperators`.
This operator composes one or more linear operators `[op1,...,opJ]`,
building a new `LinearOperator` representing the Kronecker product:
`op1 x op2 x .. opJ` (we omit parentheses as the Kronecker product is
associative).
If `opj` has shape `batch_shape_j` + [M_j, N_j`, then the composed operator
will have shape equal to `broadcast_batch_shape + [prod M_j, prod N_j]`,
where the product is over all operators.
```python
# Create a 4 x 4 linear operator composed of two 2 x 2 operators.
operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]])
operator_2 = LinearOperatorFullMatrix([[1., 0.], [2., 1.]])
operator = LinearOperatorKronecker([operator_1, operator_2])
operator.to_dense()
==> [[1., 2., 0., 0.],
[3., 4., 0., 0.],
[2., 4., 1., 2.],
[6., 8., 3., 4.]]
operator.shape
==> [4, 4]
operator.log_abs_determinant()
==> scalar Tensor
x = ... Shape [4, 2] Tensor
operator.matmul(x)
==> Shape [4, 2] Tensor
# Create a [2, 3] batch of 4 x 5 linear operators.
matrix_45 = tf.random_normal(shape=[2, 3, 4, 5])
operator_45 = LinearOperatorFullMatrix(matrix)
# Create a [2, 3] batch of 5 x 6 linear operators.
matrix_56 = tf.random_normal(shape=[2, 3, 5, 6])
operator_56 = LinearOperatorFullMatrix(matrix_56)
# Compose to create a [2, 3] batch of 20 x 30 operators.
operator_large = LinearOperatorKronecker([operator_45, operator_56])
# Create a shape [2, 3, 20, 2] vector.
x = tf.random_normal(shape=[2, 3, 6, 2])
operator_large.matmul(x)
==> Shape [2, 3, 30, 2] Tensor
```
#### Performance
The performance of `LinearOperatorKronecker` on any operation is equal to
the sum of the individual operators' operations.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, square`.
These have the following meaning:
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
operators,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
r"""Initialize a `LinearOperatorKronecker`.
`LinearOperatorKronecker` is initialized with a list of operators
`[op_1,...,op_J]`.
Args:
operators: Iterable of `LinearOperator` objects, each with
the same `dtype` and composable shape, representing the Kronecker
factors.
is_non_singular: Expect that this operator is non-singular.
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose.
is_positive_definite: Expect that this operator is positive definite,
meaning the quadratic form `x^H A x` has positive real part for all
nonzero `x`. Note that we do not require the operator to be
self-adjoint to be positive-definite. See:
https://en.wikipedia.org/wiki/Positive-definite_matrix\
#Extension_for_non_symmetric_matrices
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`. Default is the individual
operators names joined with `_x_`.
Raises:
TypeError: If all operators do not have the same `dtype`.
ValueError: If `operators` is empty.
"""
# Validate operators.
check_ops.assert_proper_iterable(operators)
operators = list(operators)
if not operators:
raise ValueError(
"Expected a list of >=1 operators. Found: %s" % operators)
self._operators = operators
# Validate dtype.
dtype = operators[0].dtype
for operator in operators:
if operator.dtype != dtype:
name_type = (str((o.name, o.dtype)) for o in operators)
raise TypeError(
"Expected all operators to have the same dtype. Found %s"
% " ".join(name_type))
# Auto-set and check hints.
# A Kronecker product is invertible, if and only if all factors are
# invertible.
if all(operator.is_non_singular for operator in operators):
if is_non_singular is False:
raise ValueError(
"The Kronecker product of non-singular operators is always "
"non-singular.")
is_non_singular = True
if all(operator.is_self_adjoint for operator in operators):
if is_self_adjoint is False:
raise ValueError(
"The Kronecker product of self-adjoint operators is always "
"self-adjoint.")
is_self_adjoint = True
# The eigenvalues of a Kronecker product are equal to the products of eigen
# values of the corresponding factors.
if all(operator.is_positive_definite for operator in operators):
if is_positive_definite is False:
raise ValueError("The Kronecker product of positive-definite operators "
"is always positive-definite.")
is_positive_definite = True
# Initialization.
graph_parents = []
for operator in operators:
graph_parents.extend(operator.graph_parents)
if name is None:
name = operators[0].name
for operator in operators[1:]:
name += "_x_" + operator.name
with ops.name_scope(name, values=graph_parents):
super(LinearOperatorKronecker, self).__init__(
dtype=dtype,
graph_parents=graph_parents,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
@property
def operators(self):
return self._operators
def _shape(self):
# Get final matrix shape.
domain_dimension = self.operators[0].domain_dimension
for operator in self.operators[1:]:
domain_dimension *= operator.domain_dimension
range_dimension = self.operators[0].range_dimension
for operator in self.operators[1:]:
range_dimension *= operator.range_dimension
matrix_shape = tensor_shape.TensorShape([
range_dimension, domain_dimension])
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0].batch_shape
for operator in self.operators[1:]:
batch_shape = common_shapes.broadcast_shape(
batch_shape, operator.batch_shape)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
domain_dimension = self.operators[0].domain_dimension_tensor()
for operator in self.operators[1:]:
domain_dimension *= operator.domain_dimension_tensor()
range_dimension = self.operators[0].range_dimension_tensor()
for operator in self.operators[1:]:
range_dimension *= operator.range_dimension_tensor()
matrix_shape = [range_dimension, domain_dimension]
# Get broadcast batch shape.
# broadcast_shape checks for compatibility.
batch_shape = self.operators[0].batch_shape_tensor()
for operator in self.operators[1:]:
batch_shape = array_ops.broadcast_dynamic_shape(
batch_shape, operator.batch_shape_tensor())
return array_ops.concat((batch_shape, matrix_shape), 0)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# Here we heavily rely on Roth's column Lemma [1]:
# (A x B) * vec X = vec BXA^T,
# where vec stacks all the columns of the matrix under each other. In our
# case, x represents a batch of vec X (i.e. we think of x as a batch of
# column vectors, rather than a matrix). Each member of the batch can be
# reshaped to a matrix (hence we get a batch of matrices).
# We can iteratively apply this lemma by noting that if B is a Kronecker
# product, then we can apply the lemma again.
# [1] W. E. Roth, "On direct product matrices,"
# Bulletin of the American Mathematical Society, vol. 40, pp. 461-468,
# 1934
# Efficiency
# Naively doing the Kronecker product, by calculating the dense matrix and
# applying it will can take cubic time in the size of domain_dimension
# (assuming a square matrix). The other issue is that calculating the dense
# matrix can be prohibitively expensive, in that it can take a large amount
# of memory.
#
# This implementation avoids this memory blow up by only computing matmuls
# with the factors. In this way, we don't have to realize the dense matrix.
# In terms of complexity, if we have Kronecker Factors of size:
# (n1, n1), (n2, n2), (n3, n3), ... (nJ, nJ), with N = \prod n_i, and we
# have as input a [N, M] matrix, the naive approach would take O(N^2 M).
# With this approach (ignoring reshaping of tensors and transposes for now),
# the time complexity can be O(M * (\sum n_i) * N). There is also the
# benefit of batched multiplication (In this example, the batch size is
# roughly M * N) so this can be much faster. However, not factored in are
# the costs of the several transposing of tensors, which can affect cache
# behavior.
# Below we document the shape manipulation for adjoint=False,
# adjoint_arg=False, but the general case of different adjoints is still
# handled.
if adjoint_arg:
x = linalg.adjoint(x)
# Always add a batch dimension to enable broadcasting to work.
batch_shape = array_ops.concat(
[array_ops.ones_like(self.batch_shape_tensor()), [1, 1]], 0)
x += array_ops.zeros(batch_shape, dtype=x.dtype.base_dtype)
# x has shape [B, R, C], where B represent some number of batch dimensions,
# R represents the number of rows, and C represents the number of columns.
# In order to apply Roth's column lemma, we need to operate on a batch of
# column vectors, so we reshape into a batch of column vectors. We put it
# at the front to ensure that broadcasting between operators to the batch
# dimensions B still works.
output = _rotate_last_dim(x, rotate_right=True)
# Also expand the shape to be [A, C, B, R]. The first dimension will be
# used to accumulate dimensions from each operator matmul.
output = output[array_ops.newaxis, ...]
# In this loop, A is going to refer to the value of the accumulated
# dimension. A = 1 at the start, and will end up being self.range_dimension.
# V will refer to the last dimension. V = R at the start, and will end up
# being 1 in the end.
for operator in self.operators[:-1]:
# Reshape output from [A, C, B, V] to be
# [A, C, B, V / op.domain_dimension, op.domain_dimension]
if adjoint:
operator_dimension = operator.range_dimension_tensor()
else:
operator_dimension = operator.domain_dimension_tensor()
output = _unvec_by(output, operator_dimension)
# We are computing (XA^T) = (AX^T)^T.
# output has [A, C, B, V / op.domain_dimension, op.domain_dimension],
# which is being converted to:
# [A, C, B, V / op.domain_dimension, op.range_dimension]
output = array_ops.matrix_transpose(output)
output = operator.matmul(output, adjoint=adjoint, adjoint_arg=False)
output = array_ops.matrix_transpose(output)
# Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=True)
# After the loop, we will have
# A = self.range_dimension / op[-1].range_dimension
# V = op[-1].domain_dimension
# We convert that using matvec to get:
# [A, C, B, op[-1].range_dimension]
output = self.operators[-1].matvec(output, adjoint=adjoint)
# Rearrange shape to be [B1, ... Bn, self.range_dimension, C]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=False)
if x.shape.is_fully_defined():
column_dim = x.shape[-1]
broadcast_batch_shape = common_shapes.broadcast_shape(
x.shape[:-2], self.batch_shape)
if adjoint:
matrix_dimensions = [self.domain_dimension, column_dim]
else:
matrix_dimensions = [self.range_dimension, column_dim]
print("x: ", x)
print("bathc_shape:", self.batch_shape)
print("self.shape:", self.shape)
print("output: ", output)
output.set_shape(broadcast_batch_shape.concatenate(
matrix_dimensions))
return output
def _determinant(self):
# Note that we have |X1 x X2| = |X1| ** n * |X2| ** m, where X1 is an m x m
# matrix, and X2 is an n x n matrix. We can iteratively apply this property
# to get the determinant of |X1 x X2 x X3 ...|. If T is the product of the
# domain dimension of all operators, then we have:
# |X1 x X2 x X3 ...| =
# |X1| ** (T / m) * |X2 x X3 ... | ** m =
# |X1| ** (T / m) * |X2| ** (m * (T / m) / n) * ... =
# |X1| ** (T / m) * |X2| ** (T / n) * | X3 x X4... | ** (m * n)
# And by doing induction we have product(|X_i| ** (T / dim(X_i))).
total = self.domain_dimension_tensor()
determinant = 1.
for operator in self.operators:
determinant *= operator.determinant() ** math_ops.cast(
total / operator.domain_dimension_tensor(),
dtype=operator.dtype)
return determinant
def _log_abs_determinant(self):
# This will be sum((total / dim(x_i)) * log |X_i|)
total = self.domain_dimension_tensor()
log_abs_det = 0.
for operator in self.operators:
log_abs_det += operator.log_abs_determinant() * math_ops.cast(
total / operator.domain_dimension_tensor(),
dtype=operator.dtype)
return log_abs_det
def _trace(self):
# tr(A x B) = tr(A) * tr(B)
trace = 1.
for operator in self.operators:
trace *= operator.trace()
return trace
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
# Here we follow the same use of Roth's column lemma as in `matmul`, with
# the key difference that we replace all `matmul` instances with `solve`.
# This follows from the property that inv(A x B) = inv(A) x inv(B).
# Below we document the shape manipulation for adjoint=False,
# adjoint_arg=False, but the general case of different adjoints is still
# handled.
if adjoint_arg:
rhs = linalg.adjoint(rhs)
# Always add a batch dimension to enable broadcasting to work.
batch_shape = array_ops.concat(
[array_ops.ones_like(self.batch_shape_tensor()), [1, 1]], 0)
rhs += array_ops.zeros(batch_shape, dtype=rhs.dtype.base_dtype)
# rhs has shape [B, R, C], where B represent some number of batch
# dimensions,
# R represents the number of rows, and C represents the number of columns.
# In order to apply Roth's column lemma, we need to operate on a batch of
# column vectors, so we reshape into a batch of column vectors. We put it
# at the front to ensure that broadcasting between operators to the batch
# dimensions B still works.
output = _rotate_last_dim(rhs, rotate_right=True)
# Also expand the shape to be [A, C, B, R]. The first dimension will be
# used to accumulate dimensions from each operator matmul.
output = output[array_ops.newaxis, ...]
# In this loop, A is going to refer to the value of the accumulated
# dimension. A = 1 at the start, and will end up being self.range_dimension.
# V will refer to the last dimension. V = R at the start, and will end up
# being 1 in the end.
for operator in self.operators[:-1]:
# Reshape output from [A, C, B, V] to be
# [A, C, B, V / op.domain_dimension, op.domain_dimension]
if adjoint:
operator_dimension = operator.range_dimension_tensor()
else:
operator_dimension = operator.domain_dimension_tensor()
output = _unvec_by(output, operator_dimension)
# We are computing (XA^-1^T) = (A^-1 X^T)^T.
# output has [A, C, B, V / op.domain_dimension, op.domain_dimension],
# which is being converted to:
# [A, C, B, V / op.domain_dimension, op.range_dimension]
output = array_ops.matrix_transpose(output)
output = operator.solve(output, adjoint=adjoint, adjoint_arg=False)
output = array_ops.matrix_transpose(output)
# Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=True)
# After the loop, we will have
# A = self.range_dimension / op[-1].range_dimension
# V = op[-1].domain_dimension
# We convert that using matvec to get:
# [A, C, B, op[-1].range_dimension]
output = self.operators[-1].solvevec(output, adjoint=adjoint)
# Rearrange shape to be [B1, ... Bn, self.range_dimension, C]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=False)
if rhs.shape.is_fully_defined():
column_dim = rhs.shape[-1]
broadcast_batch_shape = common_shapes.broadcast_shape(
rhs.shape[:-2], self.batch_shape)
if adjoint:
matrix_dimensions = [self.domain_dimension, column_dim]
else:
matrix_dimensions = [self.range_dimension, column_dim]
output.set_shape(broadcast_batch_shape.concatenate(
matrix_dimensions))
return output
def _diag_part(self):
diag_part = self.operators[0].diag_part()
for operator in self.operators[1:]:
diag_part = diag_part[..., :, array_ops.newaxis]
op_diag_part = operator.diag_part()[..., array_ops.newaxis, :]
diag_part *= op_diag_part
diag_part = array_ops.reshape(
diag_part,
shape=array_ops.concat(
[array_ops.shape(diag_part)[:-2], [-1]], axis=0))
if self.range_dimension > self.domain_dimension:
diag_dimension = self.domain_dimension
else:
diag_dimension = self.range_dimension
diag_part.set_shape(
self.batch_shape.concatenate(diag_dimension))
return diag_part
def _to_dense(self):
product = self.operators[0].to_dense()
for operator in self.operators[1:]:
# Product has shape [B, R1, 1, C1].
product = product[
..., :, array_ops.newaxis, :, array_ops.newaxis]
# Operator has shape [B, 1, R2, 1, C2].
op_to_mul = operator.to_dense()[
..., array_ops.newaxis, :, array_ops.newaxis, :]
# This is now [B, R1, R2, C1, C2].
product *= op_to_mul
# Now merge together dimensions to get [B, R1 * R2, C1 * C2].
product = array_ops.reshape(
product,
shape=array_ops.concat(
[array_ops.shape(product)[:-4],
[array_ops.shape(product)[-4] * array_ops.shape(product)[-3],
array_ops.shape(product)[-2] * array_ops.shape(product)[-1]]
], axis=0))
product.set_shape(self.shape)
return product
def _assert_non_singular(self):
if all(operator.is_square for operator in self.operators):
asserts = [operator.assert_non_singular() for operator in self.operators]
return control_flow_ops.group(asserts)
else:
raise errors.InvalidArgumentError(
node_def=None, op=None, message="All Kronecker factors must be "
"square for the product to be invertible.")
def _assert_self_adjoint(self):
if all(operator.is_square for operator in self.operators):
asserts = [operator.assert_self_adjoint() for operator in self.operators]
return control_flow_ops.group(asserts)
else:
raise errors.InvalidArgumentError(
node_def=None, op=None, message="All Kronecker factors must be "
"square for the product to be self adjoint.")
| 40.215686
| 80
| 0.673685
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.linalg import linalg_impl as linalg
from tensorflow.python.ops.linalg import linear_operator
def _vec(x):
return array_ops.reshape(
array_ops.matrix_transpose(x),
array_ops.concat(
[array_ops.shape(x)[:-2], [-1]], axis=0))
def _unvec_by(y, num_col):
return array_ops.matrix_transpose(
array_ops.reshape(
y,
array_ops.concat(
[array_ops.shape(y)[:-1], [num_col, -1]], axis=0)))
def _rotate_last_dim(x, rotate_right=False):
ndims = array_ops.rank(x)
if rotate_right:
transpose_perm = array_ops.concat(
[[ndims - 1], math_ops.range(0, ndims - 1)], axis=0)
else:
transpose_perm = array_ops.concat(
[math_ops.range(1, ndims), [0]], axis=0)
return array_ops.transpose(x, transpose_perm)
class LinearOperatorKronecker(linear_operator.LinearOperator):
def __init__(self,
operators,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name=None):
check_ops.assert_proper_iterable(operators)
operators = list(operators)
if not operators:
raise ValueError(
"Expected a list of >=1 operators. Found: %s" % operators)
self._operators = operators
dtype = operators[0].dtype
for operator in operators:
if operator.dtype != dtype:
name_type = (str((o.name, o.dtype)) for o in operators)
raise TypeError(
"Expected all operators to have the same dtype. Found %s"
% " ".join(name_type))
if all(operator.is_non_singular for operator in operators):
if is_non_singular is False:
raise ValueError(
"The Kronecker product of non-singular operators is always "
"non-singular.")
is_non_singular = True
if all(operator.is_self_adjoint for operator in operators):
if is_self_adjoint is False:
raise ValueError(
"The Kronecker product of self-adjoint operators is always "
"self-adjoint.")
is_self_adjoint = True
if all(operator.is_positive_definite for operator in operators):
if is_positive_definite is False:
raise ValueError("The Kronecker product of positive-definite operators "
"is always positive-definite.")
is_positive_definite = True
graph_parents = []
for operator in operators:
graph_parents.extend(operator.graph_parents)
if name is None:
name = operators[0].name
for operator in operators[1:]:
name += "_x_" + operator.name
with ops.name_scope(name, values=graph_parents):
super(LinearOperatorKronecker, self).__init__(
dtype=dtype,
graph_parents=graph_parents,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
@property
def operators(self):
return self._operators
def _shape(self):
domain_dimension = self.operators[0].domain_dimension
for operator in self.operators[1:]:
domain_dimension *= operator.domain_dimension
range_dimension = self.operators[0].range_dimension
for operator in self.operators[1:]:
range_dimension *= operator.range_dimension
matrix_shape = tensor_shape.TensorShape([
range_dimension, domain_dimension])
batch_shape = self.operators[0].batch_shape
for operator in self.operators[1:]:
batch_shape = common_shapes.broadcast_shape(
batch_shape, operator.batch_shape)
return batch_shape.concatenate(matrix_shape)
def _shape_tensor(self):
domain_dimension = self.operators[0].domain_dimension_tensor()
for operator in self.operators[1:]:
domain_dimension *= operator.domain_dimension_tensor()
range_dimension = self.operators[0].range_dimension_tensor()
for operator in self.operators[1:]:
range_dimension *= operator.range_dimension_tensor()
matrix_shape = [range_dimension, domain_dimension]
batch_shape = self.operators[0].batch_shape_tensor()
for operator in self.operators[1:]:
batch_shape = array_ops.broadcast_dynamic_shape(
batch_shape, operator.batch_shape_tensor())
return array_ops.concat((batch_shape, matrix_shape), 0)
def _matmul(self, x, adjoint=False, adjoint_arg=False):
# (A x B) * vec X = vec BXA^T,
# where vec stacks all the columns of the matrix under each other. In our
# case, x represents a batch of vec X (i.e. we think of x as a batch of
# column vectors, rather than a matrix). Each member of the batch can be
# reshaped to a matrix (hence we get a batch of matrices).
# We can iteratively apply this lemma by noting that if B is a Kronecker
# product, then we can apply the lemma again.
# [1] W. E. Roth, "On direct product matrices,"
# Bulletin of the American Mathematical Society, vol. 40, pp. 461-468,
# 1934
# Efficiency
# Naively doing the Kronecker product, by calculating the dense matrix and
# applying it will can take cubic time in the size of domain_dimension
# (assuming a square matrix). The other issue is that calculating the dense
# matrix can be prohibitively expensive, in that it can take a large amount
# of memory.
#
# This implementation avoids this memory blow up by only computing matmuls
# with the factors. In this way, we don't have to realize the dense matrix.
if adjoint_arg:
x = linalg.adjoint(x)
batch_shape = array_ops.concat(
[array_ops.ones_like(self.batch_shape_tensor()), [1, 1]], 0)
x += array_ops.zeros(batch_shape, dtype=x.dtype.base_dtype)
# column vectors, so we reshape into a batch of column vectors. We put it
# at the front to ensure that broadcasting between operators to the batch
# dimensions B still works.
output = _rotate_last_dim(x, rotate_right=True)
# Also expand the shape to be [A, C, B, R]. The first dimension will be
# used to accumulate dimensions from each operator matmul.
output = output[array_ops.newaxis, ...]
# In this loop, A is going to refer to the value of the accumulated
# dimension. A = 1 at the start, and will end up being self.range_dimension.
# V will refer to the last dimension. V = R at the start, and will end up
# being 1 in the end.
for operator in self.operators[:-1]:
# Reshape output from [A, C, B, V] to be
# [A, C, B, V / op.domain_dimension, op.domain_dimension]
if adjoint:
operator_dimension = operator.range_dimension_tensor()
else:
operator_dimension = operator.domain_dimension_tensor()
output = _unvec_by(output, operator_dimension)
# We are computing (XA^T) = (AX^T)^T.
# output has [A, C, B, V / op.domain_dimension, op.domain_dimension],
# which is being converted to:
# [A, C, B, V / op.domain_dimension, op.range_dimension]
output = array_ops.matrix_transpose(output)
output = operator.matmul(output, adjoint=adjoint, adjoint_arg=False)
output = array_ops.matrix_transpose(output)
# Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=True)
# After the loop, we will have
# A = self.range_dimension / op[-1].range_dimension
# V = op[-1].domain_dimension
# We convert that using matvec to get:
# [A, C, B, op[-1].range_dimension]
output = self.operators[-1].matvec(output, adjoint=adjoint)
# Rearrange shape to be [B1, ... Bn, self.range_dimension, C]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=False)
if x.shape.is_fully_defined():
column_dim = x.shape[-1]
broadcast_batch_shape = common_shapes.broadcast_shape(
x.shape[:-2], self.batch_shape)
if adjoint:
matrix_dimensions = [self.domain_dimension, column_dim]
else:
matrix_dimensions = [self.range_dimension, column_dim]
print("x: ", x)
print("bathc_shape:", self.batch_shape)
print("self.shape:", self.shape)
print("output: ", output)
output.set_shape(broadcast_batch_shape.concatenate(
matrix_dimensions))
return output
def _determinant(self):
# Note that we have |X1 x X2| = |X1| ** n * |X2| ** m, where X1 is an m x m
# matrix, and X2 is an n x n matrix. We can iteratively apply this property
# to get the determinant of |X1 x X2 x X3 ...|. If T is the product of the
# domain dimension of all operators, then we have:
# |X1 x X2 x X3 ...| =
# |X1| ** (T / m) * |X2 x X3 ... | ** m =
# |X1| ** (T / m) * |X2| ** (m * (T / m) / n) * ... =
# |X1| ** (T / m) * |X2| ** (T / n) * | X3 x X4... | ** (m * n)
# And by doing induction we have product(|X_i| ** (T / dim(X_i))).
total = self.domain_dimension_tensor()
determinant = 1.
for operator in self.operators:
determinant *= operator.determinant() ** math_ops.cast(
total / operator.domain_dimension_tensor(),
dtype=operator.dtype)
return determinant
def _log_abs_determinant(self):
# This will be sum((total / dim(x_i)) * log |X_i|)
total = self.domain_dimension_tensor()
log_abs_det = 0.
for operator in self.operators:
log_abs_det += operator.log_abs_determinant() * math_ops.cast(
total / operator.domain_dimension_tensor(),
dtype=operator.dtype)
return log_abs_det
def _trace(self):
# tr(A x B) = tr(A) * tr(B)
trace = 1.
for operator in self.operators:
trace *= operator.trace()
return trace
def _solve(self, rhs, adjoint=False, adjoint_arg=False):
# Here we follow the same use of Roth's column lemma as in `matmul`, with
if adjoint_arg:
rhs = linalg.adjoint(rhs)
batch_shape = array_ops.concat(
[array_ops.ones_like(self.batch_shape_tensor()), [1, 1]], 0)
rhs += array_ops.zeros(batch_shape, dtype=rhs.dtype.base_dtype)
# column vectors, so we reshape into a batch of column vectors. We put it
# at the front to ensure that broadcasting between operators to the batch
# dimensions B still works.
output = _rotate_last_dim(rhs, rotate_right=True)
# Also expand the shape to be [A, C, B, R]. The first dimension will be
# used to accumulate dimensions from each operator matmul.
output = output[array_ops.newaxis, ...]
# In this loop, A is going to refer to the value of the accumulated
# dimension. A = 1 at the start, and will end up being self.range_dimension.
# V will refer to the last dimension. V = R at the start, and will end up
# being 1 in the end.
for operator in self.operators[:-1]:
# Reshape output from [A, C, B, V] to be
# [A, C, B, V / op.domain_dimension, op.domain_dimension]
if adjoint:
operator_dimension = operator.range_dimension_tensor()
else:
operator_dimension = operator.domain_dimension_tensor()
output = _unvec_by(output, operator_dimension)
# We are computing (XA^-1^T) = (A^-1 X^T)^T.
# output has [A, C, B, V / op.domain_dimension, op.domain_dimension],
# which is being converted to:
# [A, C, B, V / op.domain_dimension, op.range_dimension]
output = array_ops.matrix_transpose(output)
output = operator.solve(output, adjoint=adjoint, adjoint_arg=False)
output = array_ops.matrix_transpose(output)
# Rearrange it to [A * op.range_dimension, C, B, V / op.domain_dimension]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=True)
# After the loop, we will have
# A = self.range_dimension / op[-1].range_dimension
# V = op[-1].domain_dimension
# We convert that using matvec to get:
# [A, C, B, op[-1].range_dimension]
output = self.operators[-1].solvevec(output, adjoint=adjoint)
# Rearrange shape to be [B1, ... Bn, self.range_dimension, C]
output = _rotate_last_dim(output, rotate_right=False)
output = _vec(output)
output = _rotate_last_dim(output, rotate_right=False)
if rhs.shape.is_fully_defined():
column_dim = rhs.shape[-1]
broadcast_batch_shape = common_shapes.broadcast_shape(
rhs.shape[:-2], self.batch_shape)
if adjoint:
matrix_dimensions = [self.domain_dimension, column_dim]
else:
matrix_dimensions = [self.range_dimension, column_dim]
output.set_shape(broadcast_batch_shape.concatenate(
matrix_dimensions))
return output
def _diag_part(self):
diag_part = self.operators[0].diag_part()
for operator in self.operators[1:]:
diag_part = diag_part[..., :, array_ops.newaxis]
op_diag_part = operator.diag_part()[..., array_ops.newaxis, :]
diag_part *= op_diag_part
diag_part = array_ops.reshape(
diag_part,
shape=array_ops.concat(
[array_ops.shape(diag_part)[:-2], [-1]], axis=0))
if self.range_dimension > self.domain_dimension:
diag_dimension = self.domain_dimension
else:
diag_dimension = self.range_dimension
diag_part.set_shape(
self.batch_shape.concatenate(diag_dimension))
return diag_part
def _to_dense(self):
product = self.operators[0].to_dense()
for operator in self.operators[1:]:
# Product has shape [B, R1, 1, C1].
product = product[
..., :, array_ops.newaxis, :, array_ops.newaxis]
# Operator has shape [B, 1, R2, 1, C2].
op_to_mul = operator.to_dense()[
..., array_ops.newaxis, :, array_ops.newaxis, :]
# This is now [B, R1, R2, C1, C2].
product *= op_to_mul
# Now merge together dimensions to get [B, R1 * R2, C1 * C2].
product = array_ops.reshape(
product,
shape=array_ops.concat(
[array_ops.shape(product)[:-4],
[array_ops.shape(product)[-4] * array_ops.shape(product)[-3],
array_ops.shape(product)[-2] * array_ops.shape(product)[-1]]
], axis=0))
product.set_shape(self.shape)
return product
def _assert_non_singular(self):
if all(operator.is_square for operator in self.operators):
asserts = [operator.assert_non_singular() for operator in self.operators]
return control_flow_ops.group(asserts)
else:
raise errors.InvalidArgumentError(
node_def=None, op=None, message="All Kronecker factors must be "
"square for the product to be invertible.")
def _assert_self_adjoint(self):
if all(operator.is_square for operator in self.operators):
asserts = [operator.assert_self_adjoint() for operator in self.operators]
return control_flow_ops.group(asserts)
else:
raise errors.InvalidArgumentError(
node_def=None, op=None, message="All Kronecker factors must be "
"square for the product to be self adjoint.")
| true
| true
|
79080d455b977e90b4d287d5c1fbf40379286d55
| 1,431
|
py
|
Python
|
dajare_detector/featurize/make_decide_kana_feature.py
|
vaaaaanquish/dajare-detector
|
e8f2d6c861dc0e03b6bc38ba64463bf95376f949
|
[
"MIT"
] | 14
|
2020-12-11T01:42:53.000Z
|
2021-06-22T06:14:03.000Z
|
dajare_detector/featurize/make_decide_kana_feature.py
|
vaaaaanquish/dajare-detector
|
e8f2d6c861dc0e03b6bc38ba64463bf95376f949
|
[
"MIT"
] | null | null | null |
dajare_detector/featurize/make_decide_kana_feature.py
|
vaaaaanquish/dajare-detector
|
e8f2d6c861dc0e03b6bc38ba64463bf95376f949
|
[
"MIT"
] | null | null | null |
from logging import getLogger
import gokart
import luigi
import swifter # noqa
from dajare_detector.utils.base_task import DajareTask
from dajare_detector.preprocessing.make_kana_pattern import MakeKanaPattern
from dajare_detector.preprocessing.make_splited_pattern import MakeSplitedPattern
from dajare_detector.preprocessing.decide_kana_pattern import DecideKanaPattern
from dajare_detector.preprocessing.normalize_kana_pattern import NormalizeKanaPattern
logger = getLogger(__name__)
class MakeDecideKanaFeature(DajareTask):
"""カタカナの繰り返しが発生したか"""
target = gokart.TaskInstanceParameter()
split_window_size = luigi.IntParameter()
def requires(self):
kana_task = NormalizeKanaPattern(target=MakeKanaPattern(
target=self.target))
split_task = MakeSplitedPattern(
target=kana_task, split_window_size=self.split_window_size)
return DecideKanaPattern(split_pattern_target=split_task,
kana_pattern_target=kana_task,
split_window_size=self.split_window_size)
def run(self):
df = self.load_data_frame().reset_index(drop=True)
df[f'decide_kana_{self.split_window_size}'] = df[
'decide_kana_flag_list'].swifter.apply(lambda x: 1
if any(x) else 0)
self.dump(df[['_id', f'decide_kana_{self.split_window_size}']])
| 39.75
| 85
| 0.715584
|
from logging import getLogger
import gokart
import luigi
import swifter
from dajare_detector.utils.base_task import DajareTask
from dajare_detector.preprocessing.make_kana_pattern import MakeKanaPattern
from dajare_detector.preprocessing.make_splited_pattern import MakeSplitedPattern
from dajare_detector.preprocessing.decide_kana_pattern import DecideKanaPattern
from dajare_detector.preprocessing.normalize_kana_pattern import NormalizeKanaPattern
logger = getLogger(__name__)
class MakeDecideKanaFeature(DajareTask):
target = gokart.TaskInstanceParameter()
split_window_size = luigi.IntParameter()
def requires(self):
kana_task = NormalizeKanaPattern(target=MakeKanaPattern(
target=self.target))
split_task = MakeSplitedPattern(
target=kana_task, split_window_size=self.split_window_size)
return DecideKanaPattern(split_pattern_target=split_task,
kana_pattern_target=kana_task,
split_window_size=self.split_window_size)
def run(self):
df = self.load_data_frame().reset_index(drop=True)
df[f'decide_kana_{self.split_window_size}'] = df[
'decide_kana_flag_list'].swifter.apply(lambda x: 1
if any(x) else 0)
self.dump(df[['_id', f'decide_kana_{self.split_window_size}']])
| true
| true
|
79080dd0aea28217f017a0122279bc4a555f92ba
| 7,766
|
py
|
Python
|
analysis/opensimulator-stats-analyzer/src/osta/osta.py
|
second-life/opensimulator-tools
|
0a0bee66dee0fc93fd0b2dd5043675dc9ec305f1
|
[
"BSD-3-Clause-Clear"
] | 11
|
2016-01-05T14:25:18.000Z
|
2022-01-08T07:45:09.000Z
|
analysis/opensimulator-stats-analyzer/src/osta/osta.py
|
ConnectionMaster/opensimulator-tools
|
0a0bee66dee0fc93fd0b2dd5043675dc9ec305f1
|
[
"BSD-3-Clause-Clear"
] | 1
|
2021-05-30T07:54:55.000Z
|
2021-12-26T02:26:51.000Z
|
analysis/opensimulator-stats-analyzer/src/osta/osta.py
|
ConnectionMaster/opensimulator-tools
|
0a0bee66dee0fc93fd0b2dd5043675dc9ec305f1
|
[
"BSD-3-Clause-Clear"
] | 14
|
2016-04-13T01:15:54.000Z
|
2021-01-07T19:50:14.000Z
|
import argparse
import collections
import fnmatch
import os.path
import pprint
import re
import sys
#######################
### OSimStatsHelper ###
#######################
class OSimStatsHelper:
"""Takes a list of stats and returns a stat containing their summation by each sample."""
@staticmethod
def sumStats(stats):
totalStat = {
'abs' : { 'units' : stats[0]['abs']['units'] },
'category' : stats[0]['category'],
'container' : "Total",
'name' : stats[0]['name'],
'fullName' : ".".join((stats[0]['category'], "Total", stats[0]['name']))
}
totalStat['abs']['values'] = OSimStatsHelper.sumStatsToValues(stats, 'abs')
#print "Summing %s" % (totalStat['name'])
if 'delta' in stats[0]:
totalStat['delta'] = { 'units' : stats[0]['delta']['units'] }
totalStat['delta']['values'] = OSimStatsHelper.sumStatsToValues(stats, 'delta')
return totalStat
@staticmethod
def sumStatsToValues(stats, type):
totals = []
for stat in stats:
values = stat[type]['values']
for i in range(0, len(values)):
if i + 1 > len(totals):
totals.append(values[i])
else:
totals[i] += values[i]
return totals
@staticmethod
def splitStatsFullName(fullName):
return statNamePartsRe.match(fullName).groups();
#lineRe = re.compile("(.* .*) - (.*) : (\d+)[ ,]([^:]*)")
#lineRe = re.compile("(.* .*) - (.*) : (?P<abs>[\d\.-]+)(?: (?:\D+))?(?P<delta>[\d\.-]+)?")
lineRe = re.compile("(.* .*) - (.*) : (?P<abs>[^,]+)(?:, )?(?P<delta>[^,]+)?")
statsReportStartRe = re.compile(" - \*\*\* STATS REPORT AT")
statNamePartsRe = re.compile("^(.*?)\.(.*)\.(.*?)$");
valueRe = re.compile("([^ %/]+)(.*)")
#######################
### OSimStatsCorpus ###
#######################
class OSimStatsCorpus:
_data = {}
_samplesCount = 0
@property
def data(self):
return self._data
def __init__(self):
self.clear()
def __len__(self):
return self._samplesCount
@staticmethod
def parseValue(rawValue, valueRe):
valueMatch = valueRe.match(rawValue)
return float(valueMatch.group(1)), valueMatch.group(2)
def getStat(self, statFullName):
"""
Get a statistic given its full name.
FIXME: Does not allow one to interrogate a given set yet.
"""
if self._data == None:
return None
(category, container, name) = OSimStatsHelper.splitStatsFullName(statFullName);
for set in self._data.items():
if category in set and container in set[category] and name in set[category][container]:
return set[category][container][name]
else:
return None
def getStats(self, setGlob = "*", selectGlob = "*"):
"""
Returns a dictionary of stats where fullName => stat.
If glob is specified then this is used to match stats using their full name
If no stats are found then an empty dictionary is returned.
"""
if selectGlob == None:
selectGlob = "*"
if setGlob == None:
setGlob = "*"
matchingStats = collections.OrderedDict()
for setName, set in self._data.items():
if fnmatch.fnmatch(setName, setGlob):
for category, containers in set.items():
for container, stats in containers.items():
for statName, stat in stats.items():
if fnmatch.fnmatch(stat['fullName'], selectGlob):
matchingStats[stat['fullName']] = stat
return matchingStats
def clear(self):
"""Clear out any existing dataset."""
self._data = {}
self._samplesCount = 0
def load(self, path):
"""Load OpenSimulator stats log data from the given path and merge into any existing data."""
# Set structure
# category : {
# container : {
# stat : {
# 'abs' : { 'values' : [], 'units' : "" },
# 'delta' : { 'values' : [], 'units' : "" }
# 'name' : string
# 'fullName' : string
# 'category' : string
# 'container' : string
# }
# delta may not be present
with open(path) as f:
setName = os.path.splitext(os.path.basename(path))[0]
print "Loading set %s" % (setName)
if not setName in self._data:
self._data[setName] = {}
set = self.data[setName]
for line in f:
match = lineRe.match(line)
if match != None:
statFullName = match.group(2)
#(category, container, name) = statFullName.split(".")
(category, container, name) = OSimStatsHelper.splitStatsFullName(statFullName);
rawValue = match.group("abs")
#print match.lastindex
#print rawValue
value = OSimStatsCorpus.parseValue(rawValue, valueRe)
if not category in set:
set[category] = collections.OrderedDict()
if not container in set[category]:
set[category][container] = collections.OrderedDict()
if not name in set[category][container]:
entry = {
'abs' : { 'values' : [], 'units' : value[1] },
'category' : category,
'container' : container,
'fullName' : statFullName,
'name' : name
}
set[category][container][name] = entry
stat = set[category][container][name]
stat['abs']['values'].append(value[0])
# Handle delta value if present
if match.group("delta"):
rawValue = match.group("delta")
value = OSimStatsCorpus.parseValue(rawValue, valueRe)
if not 'delta' in stat:
stat['delta'] = { 'values' : [], 'units' : value[1] }
stat['delta']['values'].append(value[0])
else:
match = statsReportStartRe.search(line)
if (match != None):
self._samplesCount += 1
else:
print "Ignoring [%s]" % (line)
| 38.068627
| 109
| 0.425573
|
import argparse
import collections
import fnmatch
import os.path
import pprint
import re
import sys
.sumStatsToValues(stats, 'abs')
if 'delta' in stats[0]:
totalStat['delta'] = { 'units' : stats[0]['delta']['units'] }
totalStat['delta']['values'] = OSimStatsHelper.sumStatsToValues(stats, 'delta')
return totalStat
@staticmethod
def sumStatsToValues(stats, type):
totals = []
for stat in stats:
values = stat[type]['values']
for i in range(0, len(values)):
if i + 1 > len(totals):
totals.append(values[i])
else:
totals[i] += values[i]
return totals
@staticmethod
def splitStatsFullName(fullName):
return statNamePartsRe.match(fullName).groups();
lineRe = re.compile("(.* .*) - (.*) : (?P<abs>[^,]+)(?:, )?(?P<delta>[^,]+)?")
statsReportStartRe = re.compile(" - \*\*\* STATS REPORT AT")
statNamePartsRe = re.compile("^(.*?)\.(.*)\.(.*?)$");
valueRe = re.compile("([^ %/]+)(.*)")
es not allow one to interrogate a given set yet.
"""
if self._data == None:
return None
(category, container, name) = OSimStatsHelper.splitStatsFullName(statFullName);
for set in self._data.items():
if category in set and container in set[category] and name in set[category][container]:
return set[category][container][name]
else:
return None
def getStats(self, setGlob = "*", selectGlob = "*"):
"""
Returns a dictionary of stats where fullName => stat.
If glob is specified then this is used to match stats using their full name
If no stats are found then an empty dictionary is returned.
"""
if selectGlob == None:
selectGlob = "*"
if setGlob == None:
setGlob = "*"
matchingStats = collections.OrderedDict()
for setName, set in self._data.items():
if fnmatch.fnmatch(setName, setGlob):
for category, containers in set.items():
for container, stats in containers.items():
for statName, stat in stats.items():
if fnmatch.fnmatch(stat['fullName'], selectGlob):
matchingStats[stat['fullName']] = stat
return matchingStats
def clear(self):
"""Clear out any existing dataset."""
self._data = {}
self._samplesCount = 0
def load(self, path):
"""Load OpenSimulator stats log data from the given path and merge into any existing data."""
with open(path) as f:
setName = os.path.splitext(os.path.basename(path))[0]
print "Loading set %s" % (setName)
if not setName in self._data:
self._data[setName] = {}
set = self.data[setName]
for line in f:
match = lineRe.match(line)
if match != None:
statFullName = match.group(2)
(category, container, name) = OSimStatsHelper.splitStatsFullName(statFullName);
rawValue = match.group("abs")
value = OSimStatsCorpus.parseValue(rawValue, valueRe)
if not category in set:
set[category] = collections.OrderedDict()
if not container in set[category]:
set[category][container] = collections.OrderedDict()
if not name in set[category][container]:
entry = {
'abs' : { 'values' : [], 'units' : value[1] },
'category' : category,
'container' : container,
'fullName' : statFullName,
'name' : name
}
set[category][container][name] = entry
stat = set[category][container][name]
stat['abs']['values'].append(value[0])
if match.group("delta"):
rawValue = match.group("delta")
value = OSimStatsCorpus.parseValue(rawValue, valueRe)
if not 'delta' in stat:
stat['delta'] = { 'values' : [], 'units' : value[1] }
stat['delta']['values'].append(value[0])
else:
match = statsReportStartRe.search(line)
if (match != None):
self._samplesCount += 1
else:
print "Ignoring [%s]" % (line)
| false
| true
|
79080edfb9a52d85dbd60d7c0e19866dcde15e5c
| 2,446
|
py
|
Python
|
traffic_monitor/services/detectors/detector_cvlib.py
|
mcdomx/monitor
|
55082a3ea985224b819e4e2b7e13f44e70ac0b74
|
[
"MIT"
] | 1
|
2020-09-23T14:36:30.000Z
|
2020-09-23T14:36:30.000Z
|
traffic_monitor/services/detectors/detector_cvlib.py
|
mcdomx/monitor
|
55082a3ea985224b819e4e2b7e13f44e70ac0b74
|
[
"MIT"
] | 3
|
2021-09-08T02:32:20.000Z
|
2022-03-12T00:49:29.000Z
|
traffic_monitor/services/detectors/detector_cvlib.py
|
mcdomx/monitor
|
55082a3ea985224b819e4e2b7e13f44e70ac0b74
|
[
"MIT"
] | null | null | null |
import logging
import numpy as np
from cvlib.object_detection import populate_class_labels, draw_bbox, detect_common_objects
from traffic_monitor.services.detectors.detector_abstract import DetectorAbstract
logger = logging.getLogger('detector')
class DetectorCVlib(DetectorAbstract):
"""
Implementation of DetectorAbstract. This implementation is from the OpenCV
implementation of object instance detection.
https://github.com/arunponnusamy/cvlib
Yolov4 cfg and weights are available at: https://github.com/AlexeyAB/darknet
Supports models:
yolov3-tiny
yolov3
Requires that .cfg file and .weights files are in ~/.cvlib/object_detection/yolo/yolov3
"""
def __init__(self, monitor_config: dict):
DetectorAbstract.__init__(self, monitor_config)
self.detector_name: str = monitor_config.get('detector_name')
self.detector_model: str = monitor_config.get('detector_model')
self.detector_confidence: float = monitor_config.get('detector_confidence')
# note that colors in cvlib uses BGR not RGB colors
self.bgr_colors = np.float64([monitor_config.get('class_colors').get(o)[::-1] for o in populate_class_labels()])
def set_detector_value(self, kwargs_list: list):
""" Only allow changes to confidence or the model """
try:
for kwargs in kwargs_list:
field = kwargs.get('field')
value = kwargs.get('value')
if field in ['detector_confidence', 'detector_model']:
logger.info(f"{self.detector_name}: setting value: {field}: {value}")
self.monitor_config[field] = value
except Exception as e:
logger.error(f"{self.__class__.__name__}: Error setting value: {e}")
def detect(self, frame: np.array) -> (np.array, list):
# colors is a list of BGR values in a list ([[#b,#g,#r],[#b,#g,#r], ... ])
try:
bbox, labels, conf = detect_common_objects(frame, confidence=self.detector_confidence, model=self.detector_model)
frame = draw_bbox(img=frame, bbox=bbox, labels=labels, confidence=conf, write_conf=False, colors=self.bgr_colors)
return frame, labels
except Exception as e:
logger.error(f"{self.__class__.__name__} Exception: {e}")
@classmethod
def get_trained_objects(cls) -> list:
return populate_class_labels()
| 41.457627
| 125
| 0.674571
|
import logging
import numpy as np
from cvlib.object_detection import populate_class_labels, draw_bbox, detect_common_objects
from traffic_monitor.services.detectors.detector_abstract import DetectorAbstract
logger = logging.getLogger('detector')
class DetectorCVlib(DetectorAbstract):
def __init__(self, monitor_config: dict):
DetectorAbstract.__init__(self, monitor_config)
self.detector_name: str = monitor_config.get('detector_name')
self.detector_model: str = monitor_config.get('detector_model')
self.detector_confidence: float = monitor_config.get('detector_confidence')
self.bgr_colors = np.float64([monitor_config.get('class_colors').get(o)[::-1] for o in populate_class_labels()])
def set_detector_value(self, kwargs_list: list):
try:
for kwargs in kwargs_list:
field = kwargs.get('field')
value = kwargs.get('value')
if field in ['detector_confidence', 'detector_model']:
logger.info(f"{self.detector_name}: setting value: {field}: {value}")
self.monitor_config[field] = value
except Exception as e:
logger.error(f"{self.__class__.__name__}: Error setting value: {e}")
def detect(self, frame: np.array) -> (np.array, list):
l=self.detector_model)
frame = draw_bbox(img=frame, bbox=bbox, labels=labels, confidence=conf, write_conf=False, colors=self.bgr_colors)
return frame, labels
except Exception as e:
logger.error(f"{self.__class__.__name__} Exception: {e}")
@classmethod
def get_trained_objects(cls) -> list:
return populate_class_labels()
| true
| true
|
79080f4461a4c72524ec43c35bb46daf03bb2d9a
| 523
|
py
|
Python
|
manual/unicos/src-groups/script/is_sundanese_unicos.py
|
Tikubonn/unico
|
c76de5309f8a3a6fda3110e463b7e9718ea530e3
|
[
"MIT"
] | null | null | null |
manual/unicos/src-groups/script/is_sundanese_unicos.py
|
Tikubonn/unico
|
c76de5309f8a3a6fda3110e463b7e9718ea530e3
|
[
"MIT"
] | null | null | null |
manual/unicos/src-groups/script/is_sundanese_unicos.py
|
Tikubonn/unico
|
c76de5309f8a3a6fda3110e463b7e9718ea530e3
|
[
"MIT"
] | null | null | null |
import json
from lib import node
from lib.generator import predicate_function
from lib.generator import predicate_function_declaration
with open("json/sundanese.json", "r") as stream:
data = json.load(stream)
nd = node.RootNode()
for dat in data:
nd.extend(dat, True)
with open("dist/is_sundanese_unicos.h", "w") as stream:
predicate_function_declaration.write("is_sundanese_unicos", stream)
with open("dist/is_sundanese_unicos.c", "w") as stream:
predicate_function.write("is_sundanese_unicos", nd, stream)
| 26.15
| 69
| 0.772467
|
import json
from lib import node
from lib.generator import predicate_function
from lib.generator import predicate_function_declaration
with open("json/sundanese.json", "r") as stream:
data = json.load(stream)
nd = node.RootNode()
for dat in data:
nd.extend(dat, True)
with open("dist/is_sundanese_unicos.h", "w") as stream:
predicate_function_declaration.write("is_sundanese_unicos", stream)
with open("dist/is_sundanese_unicos.c", "w") as stream:
predicate_function.write("is_sundanese_unicos", nd, stream)
| true
| true
|
79080f8192b6248770f4f2ca0ce09d129cf8bebf
| 2,933
|
py
|
Python
|
tests/core/test_visualization.py
|
n01deas/rasa
|
79f0feeb02919142eb06b8c52da5632f1c25c251
|
[
"Apache-2.0"
] | 5
|
2019-06-06T08:59:15.000Z
|
2020-01-19T10:56:45.000Z
|
tests/core/test_visualization.py
|
RakibulAsheeque/rasa
|
7d3804cd081c73d78ab5e973f95a55845eed1e89
|
[
"Apache-2.0"
] | 21
|
2019-12-16T17:37:54.000Z
|
2020-07-06T06:19:04.000Z
|
tests/core/test_visualization.py
|
RakibulAsheeque/rasa
|
7d3804cd081c73d78ab5e973f95a55845eed1e89
|
[
"Apache-2.0"
] | 4
|
2019-05-19T21:19:32.000Z
|
2021-01-06T14:26:37.000Z
|
from rasa.core.events import ActionExecuted, SlotSet, UserUttered
from rasa.core.training import visualization
def test_style_transfer():
r = visualization._transfer_style({"class": "dashed great"}, {"class": "myclass"})
assert r["class"] == "myclass dashed"
def test_style_transfer_empty():
r = visualization._transfer_style({"class": "dashed great"}, {"something": "else"})
assert r["class"] == "dashed"
def test_common_action_prefix():
this = [
ActionExecuted("action_listen"),
ActionExecuted("greet"),
UserUttered("hey"),
ActionExecuted("amazing"),
# until this point they are the same
SlotSet("my_slot", "a"),
ActionExecuted("a"),
ActionExecuted("after_a"),
]
other = [
ActionExecuted("action_listen"),
ActionExecuted("greet"),
UserUttered("hey"),
ActionExecuted("amazing"),
# until this point they are the same
SlotSet("my_slot", "b"),
ActionExecuted("b"),
ActionExecuted("after_b"),
]
num_common = visualization._length_of_common_action_prefix(this, other)
assert num_common == 3
def test_common_action_prefix_equal():
this = [
ActionExecuted("action_listen"),
ActionExecuted("greet"),
UserUttered("hey"),
ActionExecuted("amazing"),
]
other = [
ActionExecuted("action_listen"),
ActionExecuted("greet"),
UserUttered("hey"),
ActionExecuted("amazing"),
]
num_common = visualization._length_of_common_action_prefix(this, other)
assert num_common == 3
def test_common_action_prefix_unequal():
this = [
ActionExecuted("action_listen"),
ActionExecuted("greet"),
UserUttered("hey"),
]
other = [
ActionExecuted("greet"),
ActionExecuted("action_listen"),
UserUttered("hey"),
]
num_common = visualization._length_of_common_action_prefix(this, other)
assert num_common == 0
async def test_graph_persistence(default_domain, tmpdir):
from os.path import isfile
from networkx.drawing import nx_pydot
from rasa.core.training.dsl import StoryFileReader
from rasa.core.interpreter import RegexInterpreter
story_steps = await StoryFileReader.read_from_file(
"data/test_stories/stories.md", default_domain, interpreter=RegexInterpreter()
)
out_file = tmpdir.join("graph.html").strpath
generated_graph = await visualization.visualize_stories(
story_steps,
default_domain,
output_file=out_file,
max_history=3,
should_merge_nodes=False,
)
generated_graph = nx_pydot.to_pydot(generated_graph)
assert isfile(out_file)
with open(out_file, "r") as graph_file:
content = graph_file.read()
assert "isClient = true" in content
assert "graph = `{}`".format(generated_graph.to_string()) in content
| 28.754902
| 87
| 0.65837
|
from rasa.core.events import ActionExecuted, SlotSet, UserUttered
from rasa.core.training import visualization
def test_style_transfer():
r = visualization._transfer_style({"class": "dashed great"}, {"class": "myclass"})
assert r["class"] == "myclass dashed"
def test_style_transfer_empty():
r = visualization._transfer_style({"class": "dashed great"}, {"something": "else"})
assert r["class"] == "dashed"
def test_common_action_prefix():
this = [
ActionExecuted("action_listen"),
ActionExecuted("greet"),
UserUttered("hey"),
ActionExecuted("amazing"),
SlotSet("my_slot", "a"),
ActionExecuted("a"),
ActionExecuted("after_a"),
]
other = [
ActionExecuted("action_listen"),
ActionExecuted("greet"),
UserUttered("hey"),
ActionExecuted("amazing"),
SlotSet("my_slot", "b"),
ActionExecuted("b"),
ActionExecuted("after_b"),
]
num_common = visualization._length_of_common_action_prefix(this, other)
assert num_common == 3
def test_common_action_prefix_equal():
this = [
ActionExecuted("action_listen"),
ActionExecuted("greet"),
UserUttered("hey"),
ActionExecuted("amazing"),
]
other = [
ActionExecuted("action_listen"),
ActionExecuted("greet"),
UserUttered("hey"),
ActionExecuted("amazing"),
]
num_common = visualization._length_of_common_action_prefix(this, other)
assert num_common == 3
def test_common_action_prefix_unequal():
this = [
ActionExecuted("action_listen"),
ActionExecuted("greet"),
UserUttered("hey"),
]
other = [
ActionExecuted("greet"),
ActionExecuted("action_listen"),
UserUttered("hey"),
]
num_common = visualization._length_of_common_action_prefix(this, other)
assert num_common == 0
async def test_graph_persistence(default_domain, tmpdir):
from os.path import isfile
from networkx.drawing import nx_pydot
from rasa.core.training.dsl import StoryFileReader
from rasa.core.interpreter import RegexInterpreter
story_steps = await StoryFileReader.read_from_file(
"data/test_stories/stories.md", default_domain, interpreter=RegexInterpreter()
)
out_file = tmpdir.join("graph.html").strpath
generated_graph = await visualization.visualize_stories(
story_steps,
default_domain,
output_file=out_file,
max_history=3,
should_merge_nodes=False,
)
generated_graph = nx_pydot.to_pydot(generated_graph)
assert isfile(out_file)
with open(out_file, "r") as graph_file:
content = graph_file.read()
assert "isClient = true" in content
assert "graph = `{}`".format(generated_graph.to_string()) in content
| true
| true
|
79080f9e4ddf161342b46cc0970ccca738e03a30
| 17,390
|
py
|
Python
|
openmdao/test_suite/components/cycle_comps.py
|
hwangjt/blue
|
609defbe476c86a4a2eddd12977b47e649ea7f50
|
[
"Apache-2.0"
] | null | null | null |
openmdao/test_suite/components/cycle_comps.py
|
hwangjt/blue
|
609defbe476c86a4a2eddd12977b47e649ea7f50
|
[
"Apache-2.0"
] | null | null | null |
openmdao/test_suite/components/cycle_comps.py
|
hwangjt/blue
|
609defbe476c86a4a2eddd12977b47e649ea7f50
|
[
"Apache-2.0"
] | null | null | null |
"""Components for use in `CycleGroup`. For details, see `CycleGroup`."""
from __future__ import division, print_function
from six.moves import range
import numpy as np
import scipy.sparse as sparse
import unittest
from openmdao.core.explicitcomponent import ExplicitComponent
PSI = 1.
_vec_terms = {}
def _compute_vector_terms(system_size):
# Try/Except pattern is much faster than if key in ... if the key is present (which it will be
# outside of the first invocation).
try:
return _vec_terms[system_size]
except KeyError:
u = np.zeros(system_size)
u[[0, -1]] = np.sqrt(2)/2
v = np.zeros(system_size)
v[1:-1] = 1 / np.sqrt(system_size - 2)
cross_terms = np.outer(v, u) - np.outer(u, v)
same_terms = np.outer(u, u) + np.outer(v, v)
_vec_terms[system_size] = u, v, cross_terms, same_terms
return u, v, cross_terms, same_terms
def _compute_A(system_size, theta):
u, v, cross_terms, same_terms = _compute_vector_terms(system_size)
return (np.eye(system_size)
+ np.sin(theta) * cross_terms
+ (np.cos(theta) - 1) * same_terms)
def _compute_dA(system_size, theta):
u, v, cross_terms, same_terms = _compute_vector_terms(system_size)
return np.cos(theta) * cross_terms - np.sin(theta) * same_terms
def array_idx(i, var_size):
return slice(i * var_size, (i + 1) * var_size)
class ExplicitCycleComp(ExplicitComponent):
def _inputs_to_vector(self, inputs):
var_shape = self.metadata['var_shape']
num_var = self.metadata['num_var']
size = np.prod(var_shape)
x = np.zeros(num_var * size)
for i in range(num_var):
x_i = inputs[self._cycle_names['x'].format(i)].flat
x[size * i:size * (i + 1)] = x_i
return x
def _vector_to_outputs(self, vec, outputs):
var_shape = self.metadata['var_shape']
num_var = self.metadata['num_var']
size = np.prod(var_shape)
for i in range(num_var):
y_i = vec[size * i:size * (i + 1)].reshape(var_shape)
outputs[self._cycle_names['y'].format(i)] = y_i
def __str__(self):
return 'Explicit Cycle Component'
def initialize(self):
self.metadata.declare('jacobian_type', default='matvec',
values=['matvec', 'dense', 'sparse-coo', 'sparse-csr',
'sparse-csc'],
desc='method of assembling derivatives')
self.metadata.declare('partial_type', default='array',
values=['array', 'sparse', 'aij'],
desc='type of partial derivatives')
self.metadata.declare('num_var', type_=int, default=1,
desc='Number of variables per component')
self.metadata.declare('var_shape', type_=tuple, default=(3,),
desc='Shape of each variable')
self.metadata.declare('index', type_=int,
desc='Index of the component. Used for testing implicit connections')
self.metadata.declare('connection_type', type_=str, default='explicit',
values=['explicit', 'implicit'],
desc='How to connect variables.')
self.metadata.declare('finite_difference', default=False,
type_=bool,
desc='If the derivatives should be finite differenced.')
self.metadata.declare('num_comp', type_=int, default=2,
desc='Total number of components')
self.angle_param = 'theta'
self._cycle_names = {}
def _init_parameterized(self):
self.num_var = self.metadata['num_var']
self.var_shape = self.metadata['var_shape']
self.size = self.num_var * np.prod(self.var_shape)
if self.metadata['jacobian_type'] == 'matvec':
self.compute_jacvec_product = self.jacvec_product
if self.metadata['connection_type'] == 'implicit':
idx = self.metadata['index']
self._cycle_names['x'] = 'x_{}_{{}}'.format(idx)
self._cycle_names['y'] = 'x_{}_{{}}'.format(idx + 1)
self._cycle_names['theta'] = 'theta_{}'.format(idx)
self._cycle_names['theta_out'] = 'theta_{}'.format(idx + 1)
num_var = self.metadata['num_var']
self._cycle_promotes_in = [self._cycle_names['x'].format(i) for i in range(num_var)]
self._cycle_promotes_out = [self._cycle_names['y'].format(i) for i in range(num_var)]
self._cycle_promotes_in.append(self._cycle_names['theta'])
self._cycle_promotes_out.append(self._cycle_names['theta_out'])
else:
self._cycle_names['x'] = 'x_{}'
self._cycle_names['y'] = 'y_{}'
self._cycle_names['theta'] = 'theta'
self._cycle_names['theta_out'] = 'theta_out'
self._cycle_promotes_in = self._cycle_promotes_out = []
def setup(self):
for i in range(self.num_var):
self.add_input(self._cycle_names['x'].format(i), shape=self.var_shape)
self.add_output(self._cycle_names['y'].format(i), shape=self.var_shape)
self.add_input(self._cycle_names['theta'], val=1.)
self.add_output(self._cycle_names['theta_out'], shape=(1,))
# Setup partials
pd_type = self.metadata['partial_type']
if self.metadata['finite_difference']:
if self.metadata['jacobian_type'] == 'matvec':
raise unittest.SkipTest('not testing FD and matvec')
if pd_type != 'array':
raise unittest.SkipTest('only dense FD supported')
self.declare_partials('*', '*', method='fd')
elif self.metadata['jacobian_type'] != 'matvec' and pd_type != 'array':
num_var = self.num_var
var_shape = self.var_shape
var_size = np.prod(var_shape)
A = np.ones((self.size, self.size))
dA_x = np.ones((self.size, 1))
dtheta = np.array([[1.]])
angle_param = self._cycle_names[self.angle_param]
# if our subjacs are not dense, we must assign values here that
# match their type (data values don't matter, only structure).
# Otherwise, we assume they are dense and we'll get an error later
# when we assign a subjac with a type that doesn't match.
for out_idx in range(num_var):
out_var = self._cycle_names['y'].format(out_idx)
for in_idx in range(num_var):
in_var = self._cycle_names['x'].format(in_idx)
Aij = A[array_idx(out_idx, var_size), array_idx(in_idx, var_size)]
self.declare_partials(out_var, in_var,
**self._array2kwargs(Aij, pd_type))
self.declare_partials(out_var, angle_param,
**self._array2kwargs(dA_x[array_idx(out_idx, var_size)],
pd_type))
self.declare_partials(self._cycle_names['theta_out'], self._cycle_names['theta'],
**self._array2kwargs(dtheta, pd_type))
else:
# Declare everything
self.declare_partials(of='*', wrt='*')
def compute(self, inputs, outputs):
theta = inputs[self._cycle_names['theta']]
A = _compute_A(self.size, theta)
x = self._inputs_to_vector(inputs)
y = A.dot(x)
self._vector_to_outputs(y, outputs)
outputs[self._cycle_names['theta_out']] = theta
def jacvec_product(self, inputs, d_inputs, d_outputs, mode):
angle_param = self._cycle_names[self.angle_param]
x = self._inputs_to_vector(inputs)
angle = inputs[angle_param]
A = _compute_A(self.size, angle)
dA = _compute_dA(self.size, angle)
var_shape = self.metadata['var_shape']
var_size = np.prod(var_shape)
num_var = self.metadata['num_var']
x_name = self._cycle_names['x']
y_name = self._cycle_names['y']
theta_name = self._cycle_names['theta']
theta_out_name = self._cycle_names['theta_out']
if mode == 'fwd':
for j in range(num_var):
x_j = x_name.format(j)
if x_j in d_inputs:
dx = d_inputs[x_j].flat[:]
for i in range(num_var):
y_i = y_name.format(i)
if y_i in d_outputs:
Aij = A[array_idx(i, var_size), array_idx(j, var_size)]
d_outputs[y_i] += Aij.dot(dx).reshape(var_shape)
if theta_name in d_inputs and theta_out_name in d_outputs:
dtheta = d_inputs[theta_name]
d_outputs[theta_out_name] += dtheta
if angle_param in d_inputs:
dangle = d_inputs[angle_param]
dy_dangle = (dA.dot(x)) * dangle
for i in range(num_var):
y_i = y_name.format(i)
if y_i in d_outputs:
d_outputs[y_i] += dy_dangle[array_idx(i, var_size)].reshape(var_shape)
elif mode == 'rev':
for i in range(num_var):
y_i = y_name.format(i)
if y_i in d_outputs:
dy_i = d_outputs[y_i].flat[:]
for j in range(num_var):
x_j = x_name.format(j)
if x_j in d_inputs:
Aij = A[array_idx(i, var_size), array_idx(j, var_size)]
d_inputs[x_j] += Aij.T.dot(dy_i).reshape(var_shape)
if angle_param in d_inputs:
dAij = dA[array_idx(i, var_size), array_idx(j, var_size)]
x_j_vec = inputs[x_j].flat[:]
d_inputs[angle_param] += x_j_vec.T.dot(dAij.T.dot(dy_i))
if theta_out_name in d_outputs and theta_name in d_inputs:
dtheta_out = d_outputs[theta_out_name]
d_inputs[theta_name] += dtheta_out
def make_jacobian_entry(self, A, pd_type):
if pd_type == 'aij':
return self.make_sub_jacobian(A, pd_type)[0]
return self.make_sub_jacobian(A, pd_type)
def make_sub_jacobian(self, A, pd_type):
if pd_type == 'array':
return A
if pd_type == 'sparse':
return sparse.csr_matrix(A)
if pd_type == 'aij':
data = []
rows = []
cols = []
A = np.atleast_2d(A)
for i in range(A.shape[0]):
for j in range(A.shape[1]):
if np.abs(A[i, j]) > 1e-15:
data.append(A[i, j])
rows.append(i)
cols.append(j)
return [np.array(data), np.array(rows), np.array(cols)]
raise ValueError('Unknown partial_type: {}'.format(pd_type))
def _array2kwargs(self, arr, pd_type):
jac = self.make_sub_jacobian(arr, pd_type)
if pd_type == 'aij':
return {'val': jac[0], 'rows': jac[1], 'cols': jac[2]}
else:
return {'val': jac}
def compute_partials(self, inputs, partials):
if self.metadata['jacobian_type'] != 'matvec' and not self.metadata['finite_difference']:
angle_param = self._cycle_names[self.angle_param]
angle = inputs[angle_param]
num_var = self.num_var
var_shape = self.var_shape
var_size = np.prod(var_shape)
x = self._inputs_to_vector(inputs)
size = self.size
A = _compute_A(size, angle)
dA = _compute_dA(size, angle)
dA_x = np.atleast_2d(dA.dot(x)).T
pd_type = self.metadata['partial_type']
dtheta = np.array([[1.]])
y_name = self._cycle_names['y']
x_name = self._cycle_names['x']
for out_idx in range(num_var):
out_var = y_name.format(out_idx)
for in_idx in range(num_var):
in_var = x_name.format(in_idx)
Aij = A[array_idx(out_idx, var_size), array_idx(in_idx, var_size)]
J_y_x = self.make_jacobian_entry(Aij, pd_type)
J_y_angle = self.make_jacobian_entry(dA_x[array_idx(out_idx, var_size)],
pd_type)
partials[out_var, in_var] = J_y_x
partials[out_var, angle_param] = J_y_angle
theta_out = self._cycle_names['theta_out']
theta = self._cycle_names['theta']
partials[theta_out, theta] = self.make_jacobian_entry(dtheta, pd_type)
class ExplicitFirstComp(ExplicitCycleComp):
def __str__(self):
return 'Explicit Cycle Component - First'
def setup(self):
self.add_input('psi', val=1.)
self.angle_param = 'psi'
self._cycle_names['psi'] = 'psi'
super(ExplicitFirstComp, self).setup()
def compute(self, inputs, outputs):
theta = inputs[self._cycle_names['theta']]
psi = inputs[self._cycle_names['psi']]
A = _compute_A(self.size, psi)
y = A.dot(np.ones(self.size))
self._vector_to_outputs(y, outputs)
outputs[self._cycle_names['theta_out']] = theta
class ExplicitLastComp(ExplicitFirstComp):
def __str__(self):
return 'Explicit Cycle Component - Last'
def setup(self):
super(ExplicitLastComp, self).setup()
self.add_output('x_norm2', shape=(1,))
self._n = 1
# Setup partials
pd_type = self.metadata['partial_type']
if self.metadata['jacobian_type'] != 'matvec' and pd_type != 'array':
x = np.ones(self.var_shape)
for i in range(self.metadata['num_var']):
in_var = self._cycle_names['x'].format(i)
self.declare_partials('x_norm2', in_var,
**self._array2kwargs(x.flatten(), pd_type))
self.declare_partials(self._cycle_names['theta_out'], self._cycle_names['psi'],
**self._array2kwargs(np.array([1.]), pd_type))
def compute(self, inputs, outputs):
theta = inputs[self._cycle_names['theta']]
psi = inputs[self._cycle_names['psi']]
k = self.metadata['num_comp']
x = self._inputs_to_vector(inputs)
outputs['x_norm2'] = 0.5*np.dot(x,x)
# theta_out has 1/2 the error as theta does to the correct angle.
outputs[self._cycle_names['theta_out']] = theta / 2 + (self._n * 2 * np.pi - psi) / (2 * k - 2)
def compute_partials(self, inputs, partials):
if self.metadata['jacobian_type'] != 'matvec' and not self.metadata['finite_difference']:
pd_type = self.metadata['partial_type']
for i in range(self.metadata['num_var']):
in_var = self._cycle_names['x'].format(i)
partials['x_norm2', in_var] = self.make_jacobian_entry(inputs[in_var].flat[:],
pd_type)
k = self.metadata['num_comp']
theta_out = self._cycle_names['theta_out']
theta = self._cycle_names['theta']
partials[theta_out, theta] = self.make_jacobian_entry(np.array([.5]), pd_type)
partials[theta_out, self._cycle_names['psi']] = \
self.make_jacobian_entry(np.array([-1/(2*k-2)]), pd_type)
def jacvec_product(self, inputs, d_inputs, d_outputs, mode):
if self.metadata['jacobian_type'] == 'matvec':
k = self.metadata['num_comp']
num_var = self.metadata['num_var']
theta_out = self._cycle_names['theta_out']
theta = self._cycle_names['theta']
psi = self._cycle_names['psi']
if mode == 'fwd':
if theta_out in d_outputs:
if theta in d_inputs:
d_outputs[theta_out] += 0.5 * d_inputs[theta]
if psi in d_inputs:
d_outputs[theta_out] += -d_inputs[psi] / (2 * k - 2)
for i in range(num_var):
in_var = self._cycle_names['x'].format(i)
if in_var in d_inputs and 'x_norm2' in d_outputs:
d_outputs['x_norm2'] += np.dot(inputs[in_var].flat, d_inputs[in_var].flat)
elif mode == 'rev':
if 'x_norm2' in d_outputs:
dxnorm = d_outputs['x_norm2']
for i in range(num_var):
x_i_name = self._cycle_names['x'].format(i)
if x_i_name in d_inputs:
d_inputs[x_i_name] += inputs[x_i_name] * dxnorm
if theta_out in d_outputs:
dtheta_out = d_outputs[theta_out]
if theta in d_inputs:
d_inputs[theta] += .5*dtheta_out
if psi in d_inputs:
d_inputs[psi] += -dtheta_out/(2*k-2)
| 41.802885
| 103
| 0.552444
|
from __future__ import division, print_function
from six.moves import range
import numpy as np
import scipy.sparse as sparse
import unittest
from openmdao.core.explicitcomponent import ExplicitComponent
PSI = 1.
_vec_terms = {}
def _compute_vector_terms(system_size):
try:
return _vec_terms[system_size]
except KeyError:
u = np.zeros(system_size)
u[[0, -1]] = np.sqrt(2)/2
v = np.zeros(system_size)
v[1:-1] = 1 / np.sqrt(system_size - 2)
cross_terms = np.outer(v, u) - np.outer(u, v)
same_terms = np.outer(u, u) + np.outer(v, v)
_vec_terms[system_size] = u, v, cross_terms, same_terms
return u, v, cross_terms, same_terms
def _compute_A(system_size, theta):
u, v, cross_terms, same_terms = _compute_vector_terms(system_size)
return (np.eye(system_size)
+ np.sin(theta) * cross_terms
+ (np.cos(theta) - 1) * same_terms)
def _compute_dA(system_size, theta):
u, v, cross_terms, same_terms = _compute_vector_terms(system_size)
return np.cos(theta) * cross_terms - np.sin(theta) * same_terms
def array_idx(i, var_size):
return slice(i * var_size, (i + 1) * var_size)
class ExplicitCycleComp(ExplicitComponent):
def _inputs_to_vector(self, inputs):
var_shape = self.metadata['var_shape']
num_var = self.metadata['num_var']
size = np.prod(var_shape)
x = np.zeros(num_var * size)
for i in range(num_var):
x_i = inputs[self._cycle_names['x'].format(i)].flat
x[size * i:size * (i + 1)] = x_i
return x
def _vector_to_outputs(self, vec, outputs):
var_shape = self.metadata['var_shape']
num_var = self.metadata['num_var']
size = np.prod(var_shape)
for i in range(num_var):
y_i = vec[size * i:size * (i + 1)].reshape(var_shape)
outputs[self._cycle_names['y'].format(i)] = y_i
def __str__(self):
return 'Explicit Cycle Component'
def initialize(self):
self.metadata.declare('jacobian_type', default='matvec',
values=['matvec', 'dense', 'sparse-coo', 'sparse-csr',
'sparse-csc'],
desc='method of assembling derivatives')
self.metadata.declare('partial_type', default='array',
values=['array', 'sparse', 'aij'],
desc='type of partial derivatives')
self.metadata.declare('num_var', type_=int, default=1,
desc='Number of variables per component')
self.metadata.declare('var_shape', type_=tuple, default=(3,),
desc='Shape of each variable')
self.metadata.declare('index', type_=int,
desc='Index of the component. Used for testing implicit connections')
self.metadata.declare('connection_type', type_=str, default='explicit',
values=['explicit', 'implicit'],
desc='How to connect variables.')
self.metadata.declare('finite_difference', default=False,
type_=bool,
desc='If the derivatives should be finite differenced.')
self.metadata.declare('num_comp', type_=int, default=2,
desc='Total number of components')
self.angle_param = 'theta'
self._cycle_names = {}
def _init_parameterized(self):
self.num_var = self.metadata['num_var']
self.var_shape = self.metadata['var_shape']
self.size = self.num_var * np.prod(self.var_shape)
if self.metadata['jacobian_type'] == 'matvec':
self.compute_jacvec_product = self.jacvec_product
if self.metadata['connection_type'] == 'implicit':
idx = self.metadata['index']
self._cycle_names['x'] = 'x_{}_{{}}'.format(idx)
self._cycle_names['y'] = 'x_{}_{{}}'.format(idx + 1)
self._cycle_names['theta'] = 'theta_{}'.format(idx)
self._cycle_names['theta_out'] = 'theta_{}'.format(idx + 1)
num_var = self.metadata['num_var']
self._cycle_promotes_in = [self._cycle_names['x'].format(i) for i in range(num_var)]
self._cycle_promotes_out = [self._cycle_names['y'].format(i) for i in range(num_var)]
self._cycle_promotes_in.append(self._cycle_names['theta'])
self._cycle_promotes_out.append(self._cycle_names['theta_out'])
else:
self._cycle_names['x'] = 'x_{}'
self._cycle_names['y'] = 'y_{}'
self._cycle_names['theta'] = 'theta'
self._cycle_names['theta_out'] = 'theta_out'
self._cycle_promotes_in = self._cycle_promotes_out = []
def setup(self):
for i in range(self.num_var):
self.add_input(self._cycle_names['x'].format(i), shape=self.var_shape)
self.add_output(self._cycle_names['y'].format(i), shape=self.var_shape)
self.add_input(self._cycle_names['theta'], val=1.)
self.add_output(self._cycle_names['theta_out'], shape=(1,))
pd_type = self.metadata['partial_type']
if self.metadata['finite_difference']:
if self.metadata['jacobian_type'] == 'matvec':
raise unittest.SkipTest('not testing FD and matvec')
if pd_type != 'array':
raise unittest.SkipTest('only dense FD supported')
self.declare_partials('*', '*', method='fd')
elif self.metadata['jacobian_type'] != 'matvec' and pd_type != 'array':
num_var = self.num_var
var_shape = self.var_shape
var_size = np.prod(var_shape)
A = np.ones((self.size, self.size))
dA_x = np.ones((self.size, 1))
dtheta = np.array([[1.]])
angle_param = self._cycle_names[self.angle_param]
# Otherwise, we assume they are dense and we'll get an error later
for out_idx in range(num_var):
out_var = self._cycle_names['y'].format(out_idx)
for in_idx in range(num_var):
in_var = self._cycle_names['x'].format(in_idx)
Aij = A[array_idx(out_idx, var_size), array_idx(in_idx, var_size)]
self.declare_partials(out_var, in_var,
**self._array2kwargs(Aij, pd_type))
self.declare_partials(out_var, angle_param,
**self._array2kwargs(dA_x[array_idx(out_idx, var_size)],
pd_type))
self.declare_partials(self._cycle_names['theta_out'], self._cycle_names['theta'],
**self._array2kwargs(dtheta, pd_type))
else:
# Declare everything
self.declare_partials(of='*', wrt='*')
def compute(self, inputs, outputs):
theta = inputs[self._cycle_names['theta']]
A = _compute_A(self.size, theta)
x = self._inputs_to_vector(inputs)
y = A.dot(x)
self._vector_to_outputs(y, outputs)
outputs[self._cycle_names['theta_out']] = theta
def jacvec_product(self, inputs, d_inputs, d_outputs, mode):
angle_param = self._cycle_names[self.angle_param]
x = self._inputs_to_vector(inputs)
angle = inputs[angle_param]
A = _compute_A(self.size, angle)
dA = _compute_dA(self.size, angle)
var_shape = self.metadata['var_shape']
var_size = np.prod(var_shape)
num_var = self.metadata['num_var']
x_name = self._cycle_names['x']
y_name = self._cycle_names['y']
theta_name = self._cycle_names['theta']
theta_out_name = self._cycle_names['theta_out']
if mode == 'fwd':
for j in range(num_var):
x_j = x_name.format(j)
if x_j in d_inputs:
dx = d_inputs[x_j].flat[:]
for i in range(num_var):
y_i = y_name.format(i)
if y_i in d_outputs:
Aij = A[array_idx(i, var_size), array_idx(j, var_size)]
d_outputs[y_i] += Aij.dot(dx).reshape(var_shape)
if theta_name in d_inputs and theta_out_name in d_outputs:
dtheta = d_inputs[theta_name]
d_outputs[theta_out_name] += dtheta
if angle_param in d_inputs:
dangle = d_inputs[angle_param]
dy_dangle = (dA.dot(x)) * dangle
for i in range(num_var):
y_i = y_name.format(i)
if y_i in d_outputs:
d_outputs[y_i] += dy_dangle[array_idx(i, var_size)].reshape(var_shape)
elif mode == 'rev':
for i in range(num_var):
y_i = y_name.format(i)
if y_i in d_outputs:
dy_i = d_outputs[y_i].flat[:]
for j in range(num_var):
x_j = x_name.format(j)
if x_j in d_inputs:
Aij = A[array_idx(i, var_size), array_idx(j, var_size)]
d_inputs[x_j] += Aij.T.dot(dy_i).reshape(var_shape)
if angle_param in d_inputs:
dAij = dA[array_idx(i, var_size), array_idx(j, var_size)]
x_j_vec = inputs[x_j].flat[:]
d_inputs[angle_param] += x_j_vec.T.dot(dAij.T.dot(dy_i))
if theta_out_name in d_outputs and theta_name in d_inputs:
dtheta_out = d_outputs[theta_out_name]
d_inputs[theta_name] += dtheta_out
def make_jacobian_entry(self, A, pd_type):
if pd_type == 'aij':
return self.make_sub_jacobian(A, pd_type)[0]
return self.make_sub_jacobian(A, pd_type)
def make_sub_jacobian(self, A, pd_type):
if pd_type == 'array':
return A
if pd_type == 'sparse':
return sparse.csr_matrix(A)
if pd_type == 'aij':
data = []
rows = []
cols = []
A = np.atleast_2d(A)
for i in range(A.shape[0]):
for j in range(A.shape[1]):
if np.abs(A[i, j]) > 1e-15:
data.append(A[i, j])
rows.append(i)
cols.append(j)
return [np.array(data), np.array(rows), np.array(cols)]
raise ValueError('Unknown partial_type: {}'.format(pd_type))
def _array2kwargs(self, arr, pd_type):
jac = self.make_sub_jacobian(arr, pd_type)
if pd_type == 'aij':
return {'val': jac[0], 'rows': jac[1], 'cols': jac[2]}
else:
return {'val': jac}
def compute_partials(self, inputs, partials):
if self.metadata['jacobian_type'] != 'matvec' and not self.metadata['finite_difference']:
angle_param = self._cycle_names[self.angle_param]
angle = inputs[angle_param]
num_var = self.num_var
var_shape = self.var_shape
var_size = np.prod(var_shape)
x = self._inputs_to_vector(inputs)
size = self.size
A = _compute_A(size, angle)
dA = _compute_dA(size, angle)
dA_x = np.atleast_2d(dA.dot(x)).T
pd_type = self.metadata['partial_type']
dtheta = np.array([[1.]])
y_name = self._cycle_names['y']
x_name = self._cycle_names['x']
for out_idx in range(num_var):
out_var = y_name.format(out_idx)
for in_idx in range(num_var):
in_var = x_name.format(in_idx)
Aij = A[array_idx(out_idx, var_size), array_idx(in_idx, var_size)]
J_y_x = self.make_jacobian_entry(Aij, pd_type)
J_y_angle = self.make_jacobian_entry(dA_x[array_idx(out_idx, var_size)],
pd_type)
partials[out_var, in_var] = J_y_x
partials[out_var, angle_param] = J_y_angle
theta_out = self._cycle_names['theta_out']
theta = self._cycle_names['theta']
partials[theta_out, theta] = self.make_jacobian_entry(dtheta, pd_type)
class ExplicitFirstComp(ExplicitCycleComp):
def __str__(self):
return 'Explicit Cycle Component - First'
def setup(self):
self.add_input('psi', val=1.)
self.angle_param = 'psi'
self._cycle_names['psi'] = 'psi'
super(ExplicitFirstComp, self).setup()
def compute(self, inputs, outputs):
theta = inputs[self._cycle_names['theta']]
psi = inputs[self._cycle_names['psi']]
A = _compute_A(self.size, psi)
y = A.dot(np.ones(self.size))
self._vector_to_outputs(y, outputs)
outputs[self._cycle_names['theta_out']] = theta
class ExplicitLastComp(ExplicitFirstComp):
def __str__(self):
return 'Explicit Cycle Component - Last'
def setup(self):
super(ExplicitLastComp, self).setup()
self.add_output('x_norm2', shape=(1,))
self._n = 1
# Setup partials
pd_type = self.metadata['partial_type']
if self.metadata['jacobian_type'] != 'matvec' and pd_type != 'array':
x = np.ones(self.var_shape)
for i in range(self.metadata['num_var']):
in_var = self._cycle_names['x'].format(i)
self.declare_partials('x_norm2', in_var,
**self._array2kwargs(x.flatten(), pd_type))
self.declare_partials(self._cycle_names['theta_out'], self._cycle_names['psi'],
**self._array2kwargs(np.array([1.]), pd_type))
def compute(self, inputs, outputs):
theta = inputs[self._cycle_names['theta']]
psi = inputs[self._cycle_names['psi']]
k = self.metadata['num_comp']
x = self._inputs_to_vector(inputs)
outputs['x_norm2'] = 0.5*np.dot(x,x)
# theta_out has 1/2 the error as theta does to the correct angle.
outputs[self._cycle_names['theta_out']] = theta / 2 + (self._n * 2 * np.pi - psi) / (2 * k - 2)
def compute_partials(self, inputs, partials):
if self.metadata['jacobian_type'] != 'matvec' and not self.metadata['finite_difference']:
pd_type = self.metadata['partial_type']
for i in range(self.metadata['num_var']):
in_var = self._cycle_names['x'].format(i)
partials['x_norm2', in_var] = self.make_jacobian_entry(inputs[in_var].flat[:],
pd_type)
k = self.metadata['num_comp']
theta_out = self._cycle_names['theta_out']
theta = self._cycle_names['theta']
partials[theta_out, theta] = self.make_jacobian_entry(np.array([.5]), pd_type)
partials[theta_out, self._cycle_names['psi']] = \
self.make_jacobian_entry(np.array([-1/(2*k-2)]), pd_type)
def jacvec_product(self, inputs, d_inputs, d_outputs, mode):
if self.metadata['jacobian_type'] == 'matvec':
k = self.metadata['num_comp']
num_var = self.metadata['num_var']
theta_out = self._cycle_names['theta_out']
theta = self._cycle_names['theta']
psi = self._cycle_names['psi']
if mode == 'fwd':
if theta_out in d_outputs:
if theta in d_inputs:
d_outputs[theta_out] += 0.5 * d_inputs[theta]
if psi in d_inputs:
d_outputs[theta_out] += -d_inputs[psi] / (2 * k - 2)
for i in range(num_var):
in_var = self._cycle_names['x'].format(i)
if in_var in d_inputs and 'x_norm2' in d_outputs:
d_outputs['x_norm2'] += np.dot(inputs[in_var].flat, d_inputs[in_var].flat)
elif mode == 'rev':
if 'x_norm2' in d_outputs:
dxnorm = d_outputs['x_norm2']
for i in range(num_var):
x_i_name = self._cycle_names['x'].format(i)
if x_i_name in d_inputs:
d_inputs[x_i_name] += inputs[x_i_name] * dxnorm
if theta_out in d_outputs:
dtheta_out = d_outputs[theta_out]
if theta in d_inputs:
d_inputs[theta] += .5*dtheta_out
if psi in d_inputs:
d_inputs[psi] += -dtheta_out/(2*k-2)
| true
| true
|
790810188028addd63ebdc7a6d382463b54b3059
| 10,719
|
py
|
Python
|
ab3dmot.py
|
johnwlambert/argoverse_cbgs_kf_tracker
|
9268cb6dd9844f80eb107a0cc5e77e880d3b3e76
|
[
"BSD-Source-Code"
] | 27
|
2020-04-24T07:45:20.000Z
|
2022-03-08T09:17:34.000Z
|
ab3dmot.py
|
johnwlambert/argoverse_cbgs_kf_tracker
|
9268cb6dd9844f80eb107a0cc5e77e880d3b3e76
|
[
"BSD-Source-Code"
] | 4
|
2020-07-16T07:15:12.000Z
|
2022-02-17T01:24:56.000Z
|
ab3dmot.py
|
johnwlambert/argoverse_cbgs_kf_tracker
|
9268cb6dd9844f80eb107a0cc5e77e880d3b3e76
|
[
"BSD-Source-Code"
] | 22
|
2020-05-21T07:35:03.000Z
|
2021-12-24T05:24:17.000Z
|
#!/usr/bin/env python3
from filterpy.kalman import KalmanFilter
import matplotlib.pyplot as plt
import numpy as np
import pdb
from sklearn.utils.linear_assignment_ import linear_assignment
import sys
import time
from transform_utils import convert_3dbox_to_8corner
from iou_utils import compute_iou_2d_bboxes
class KalmanBoxTracker(object):
"""
This class represents the internel state of individual tracked objects observed as bbox.
"""
count = 0
def __init__(self, bbox3D, info):
"""
Initialises a tracker using initial bounding box.
"""
#define constant velocity model
self.kf = KalmanFilter(dim_x=10, dim_z=7)
self.kf.F = np.array([[1,0,0,0,0,0,0,1,0,0], # state transition matrix
[0,1,0,0,0,0,0,0,1,0],
[0,0,1,0,0,0,0,0,0,1],
[0,0,0,1,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,1,0,0,0,0],
[0,0,0,0,0,0,1,0,0,0],
[0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,0,1,0],
[0,0,0,0,0,0,0,0,0,1]])
self.kf.H = np.array([[1,0,0,0,0,0,0,0,0,0], # measurement function,
[0,1,0,0,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0,0,0],
[0,0,0,1,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,1,0,0,0,0],
[0,0,0,0,0,0,1,0,0,0]])
# with angular velocity
# self.kf = KalmanFilter(dim_x=11, dim_z=7)
# self.kf.F = np.array([[1,0,0,0,0,0,0,1,0,0,0], # state transition matrix
# [0,1,0,0,0,0,0,0,1,0,0],
# [0,0,1,0,0,0,0,0,0,1,0],
# [0,0,0,1,0,0,0,0,0,0,1],
# [0,0,0,0,1,0,0,0,0,0,0],
# [0,0,0,0,0,1,0,0,0,0,0],
# [0,0,0,0,0,0,1,0,0,0,0],
# [0,0,0,0,0,0,0,1,0,0,0],
# [0,0,0,0,0,0,0,0,1,0,0],
# [0,0,0,0,0,0,0,0,0,1,0],
# [0,0,0,0,0,0,0,0,0,0,1]])
# self.kf.H = np.array([[1,0,0,0,0,0,0,0,0,0,0], # measurement function,
# [0,1,0,0,0,0,0,0,0,0,0],
# [0,0,1,0,0,0,0,0,0,0,0],
# [0,0,0,1,0,0,0,0,0,0,0],
# [0,0,0,0,1,0,0,0,0,0,0],
# [0,0,0,0,0,1,0,0,0,0,0],
# [0,0,0,0,0,0,1,0,0,0,0]])
# self.kf.R[0:,0:] *= 10. # measurement uncertainty
self.kf.P[7:,7:] *= 1000. #state uncertainty, give high uncertainty to the unobservable initial velocities, covariance matrix
self.kf.P *= 10.
# self.kf.Q[-1,-1] *= 0.01 # process uncertainty
self.kf.Q[7:,7:] *= 0.01
self.kf.x[:7] = bbox3D.reshape((7, 1))
self.time_since_update = 0
self.id = KalmanBoxTracker.count
KalmanBoxTracker.count += 1
self.history = []
self.hits = 1 # number of total hits including the first detection
self.hit_streak = 1 # number of continuing hit considering the first detection
self.first_continuing_hit = 1
self.still_first = True
self.age = 0
self.info = info # other info
def update(self, bbox3D, info):
"""
Updates the state vector with observed bbox.
"""
self.time_since_update = 0
self.history = []
self.hits += 1
self.hit_streak += 1 # number of continuing hit
if self.still_first:
self.first_continuing_hit += 1 # number of continuing hit in the fist time
######################### orientation correction
if self.kf.x[3] >= np.pi: self.kf.x[3] -= np.pi * 2 # make the theta still in the range
if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2
new_theta = bbox3D[3]
if new_theta >= np.pi: new_theta -= np.pi * 2 # make the theta still in the range
if new_theta < -np.pi: new_theta += np.pi * 2
bbox3D[3] = new_theta
predicted_theta = self.kf.x[3]
if abs(new_theta - predicted_theta) > np.pi / 2.0 and abs(new_theta - predicted_theta) < np.pi * 3 / 2.0: # if the angle of two theta is not acute angle
self.kf.x[3] += np.pi
if self.kf.x[3] > np.pi: self.kf.x[3] -= np.pi * 2 # make the theta still in the range
if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2
# now the angle is acute: < 90 or > 270, convert the case of > 270 to < 90
if abs(new_theta - self.kf.x[3]) >= np.pi * 3 / 2.0:
if new_theta > 0: self.kf.x[3] += np.pi * 2
else: self.kf.x[3] -= np.pi * 2
#########################
self.kf.update(bbox3D)
if self.kf.x[3] >= np.pi: self.kf.x[3] -= np.pi * 2 # make the theta still in the range
if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2
self.info = info
def predict(self):
"""
Advances the state vector and returns the predicted bounding box estimate.
"""
self.kf.predict()
if self.kf.x[3] >= np.pi: self.kf.x[3] -= np.pi * 2
if self.kf.x[3] < -np.pi: self.kf.x[3] += np.pi * 2
self.age += 1
if(self.time_since_update>0):
self.hit_streak = 0
self.still_first = False
self.time_since_update += 1
self.history.append(self.kf.x)
return self.history[-1]
def get_state(self):
"""
Returns the current bounding box estimate.
"""
return self.kf.x[:7].reshape((7, ))
def associate_detections_to_trackers(detections,trackers,iou_threshold=0.1):
# def associate_detections_to_trackers(detections,trackers,iou_threshold=0.01): # ablation study
# def associate_detections_to_trackers(detections,trackers,iou_threshold=0.25):
"""
Assigns detections to tracked object (both represented as bounding boxes)
detections: N x 8 x 3
trackers: M x 8 x 3
Returns 3 lists of matches, unmatched_detections and unmatched_trackers
"""
if(len(trackers)==0):
return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,8,3),dtype=int)
iou_matrix = np.zeros((len(detections),len(trackers)),dtype=np.float32)
for d,det in enumerate(detections):
for t,trk in enumerate(trackers):
#print(f'On d={d}, t={t}')
#iou_matrix[d,t] = iou3d(det,trk)[1] # try 2d iou instead # det: 8 x 3, trk: 8 x 3
iou_matrix[d,t] = compute_iou_2d_bboxes(det, trk)
matched_indices = linear_assignment(-iou_matrix) # hungarian algorithm
unmatched_detections = []
for d,det in enumerate(detections):
if(d not in matched_indices[:,0]):
unmatched_detections.append(d)
unmatched_trackers = []
for t,trk in enumerate(trackers):
if(t not in matched_indices[:,1]):
unmatched_trackers.append(t)
#print(iou_matrix)
#filter out matched with low IOU
matches = []
for m in matched_indices:
if(iou_matrix[m[0],m[1]]<iou_threshold):
unmatched_detections.append(m[0])
unmatched_trackers.append(m[1])
else:
matches.append(m.reshape(1,2))
if(len(matches)==0):
matches = np.empty((0,2),dtype=int)
else:
matches = np.concatenate(matches,axis=0)
return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
class AB3DMOT(object):
def __init__(self,max_age=2,min_hits=3): # max age will preserve the bbox does not appear no more than 2 frames, interpolate the detection
# def __init__(self,max_age=3,min_hits=3): # ablation study
# def __init__(self,max_age=1,min_hits=3):
# def __init__(self,max_age=2,min_hits=1):
# def __init__(self,max_age=2,min_hits=5):
"""
"""
self.max_age = max_age
self.min_hits = min_hits
self.trackers = []
self.frame_count = 0
# self.reorder = [3, 4, 5, 6, 2, 1, 0]
# self.reorder_back = [6, 5, 4, 0, 1, 2, 3]
def update(self,dets_all):
"""
Params:
dets_all: dict
dets - a numpy array of detections in the format [[x,y,z,theta,l,w,h],[x,y,z,theta,l,w,h],...]
info: a array of other info for each det
Requires: this method must be called once for each frame even with empty detections.
Returns the a similar array, where the last column is the object ID.
NOTE: The number of objects returned may differ from the number of detections provided.
"""
dets, info = dets_all['dets'], dets_all['info'] # dets: N x 7, float numpy array
# dets = dets[:, self.reorder]
self.frame_count += 1
trks = np.zeros((len(self.trackers),7)) # N x 7 , #get predicted locations from existing trackers.
to_del = []
ret = []
for t,trk in enumerate(trks):
pos = self.trackers[t].predict().reshape((-1, 1))
trk[:] = [pos[0], pos[1], pos[2], pos[3], pos[4], pos[5], pos[6]]
if(np.any(np.isnan(pos))):
to_del.append(t)
trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
for t in reversed(to_del):
self.trackers.pop(t)
dets_8corner = [convert_3dbox_to_8corner(det_tmp) for det_tmp in dets]
if len(dets_8corner) > 0: dets_8corner = np.stack(dets_8corner, axis=0)
else: dets_8corner = []
trks_8corner = [convert_3dbox_to_8corner(trk_tmp) for trk_tmp in trks]
if len(trks_8corner) > 0: trks_8corner = np.stack(trks_8corner, axis=0)
matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets_8corner, trks_8corner)
#update matched trackers with assigned detections
for t,trk in enumerate(self.trackers):
if t not in unmatched_trks:
d = matched[np.where(matched[:,1]==t)[0],0] # a list of index
trk.update(dets[d,:][0], info[d, :][0])
#create and initialise new trackers for unmatched detections
for i in unmatched_dets: # a scalar of index
trk = KalmanBoxTracker(dets[i,:], info[i, :])
self.trackers.append(trk)
i = len(self.trackers)
for trk in reversed(self.trackers):
d = trk.get_state() # bbox location
# d = d[self.reorder_back]
if((trk.time_since_update < self.max_age) and (trk.hits >= self.min_hits or self.frame_count <= self.min_hits)):
ret.append(np.concatenate((d, [trk.id+1], trk.info)).reshape(1,-1)) # +1 as MOT benchmark requires positive
i -= 1
#remove dead tracklet
if(trk.time_since_update >= self.max_age):
self.trackers.pop(i)
if(len(ret)>0):
return np.concatenate(ret) # x, y, z, theta, l, w, h, ID, other info, confidence
return np.empty((0,15))
| 38.836957
| 160
| 0.566844
|
from filterpy.kalman import KalmanFilter
import matplotlib.pyplot as plt
import numpy as np
import pdb
from sklearn.utils.linear_assignment_ import linear_assignment
import sys
import time
from transform_utils import convert_3dbox_to_8corner
from iou_utils import compute_iou_2d_bboxes
class KalmanBoxTracker(object):
count = 0
def __init__(self, bbox3D, info):
self.kf = KalmanFilter(dim_x=10, dim_z=7)
self.kf.F = np.array([[1,0,0,0,0,0,0,1,0,0],
[0,1,0,0,0,0,0,0,1,0],
[0,0,1,0,0,0,0,0,0,1],
[0,0,0,1,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,1,0,0,0,0],
[0,0,0,0,0,0,1,0,0,0],
[0,0,0,0,0,0,0,1,0,0],
[0,0,0,0,0,0,0,0,1,0],
[0,0,0,0,0,0,0,0,0,1]])
self.kf.H = np.array([[1,0,0,0,0,0,0,0,0,0],
[0,1,0,0,0,0,0,0,0,0],
[0,0,1,0,0,0,0,0,0,0],
[0,0,0,1,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,1,0,0,0,0],
[0,0,0,0,0,0,1,0,0,0]])
1000.
self.kf.P *= 10.
*= 0.01
self.kf.x[:7] = bbox3D.reshape((7, 1))
self.time_since_update = 0
self.id = KalmanBoxTracker.count
KalmanBoxTracker.count += 1
self.history = []
self.hits = 1
self.hit_streak = 1
self.first_continuing_hit = 1
self.still_first = True
self.age = 0
self.info = info
def update(self, bbox3D, info):
self.time_since_update = 0
self.history = []
self.hits += 1
self.hit_streak += 1
if self.still_first:
self.first_continuing_hit += 1
_first = False
self.time_since_update += 1
self.history.append(self.kf.x)
return self.history[-1]
def get_state(self):
return self.kf.x[:7].reshape((7, ))
def associate_detections_to_trackers(detections,trackers,iou_threshold=0.1):
ers)==0):
return np.empty((0,2),dtype=int), np.arange(len(detections)), np.empty((0,8,3),dtype=int)
iou_matrix = np.zeros((len(detections),len(trackers)),dtype=np.float32)
for d,det in enumerate(detections):
for t,trk in enumerate(trackers):
ear_assignment(-iou_matrix)
unmatched_detections = []
for d,det in enumerate(detections):
if(d not in matched_indices[:,0]):
unmatched_detections.append(d)
unmatched_trackers = []
for t,trk in enumerate(trackers):
if(t not in matched_indices[:,1]):
unmatched_trackers.append(t)
matches = []
for m in matched_indices:
if(iou_matrix[m[0],m[1]]<iou_threshold):
unmatched_detections.append(m[0])
unmatched_trackers.append(m[1])
else:
matches.append(m.reshape(1,2))
if(len(matches)==0):
matches = np.empty((0,2),dtype=int)
else:
matches = np.concatenate(matches,axis=0)
return matches, np.array(unmatched_detections), np.array(unmatched_trackers)
class AB3DMOT(object):
def __init__(self,max_age=2,min_hits=3):
lf.max_age = max_age
self.min_hits = min_hits
self.trackers = []
self.frame_count = 0
def update(self,dets_all):
dets, info = dets_all['dets'], dets_all['info']
self.frame_count += 1
trks = np.zeros((len(self.trackers),7)) numerate(trks):
pos = self.trackers[t].predict().reshape((-1, 1))
trk[:] = [pos[0], pos[1], pos[2], pos[3], pos[4], pos[5], pos[6]]
if(np.any(np.isnan(pos))):
to_del.append(t)
trks = np.ma.compress_rows(np.ma.masked_invalid(trks))
for t in reversed(to_del):
self.trackers.pop(t)
dets_8corner = [convert_3dbox_to_8corner(det_tmp) for det_tmp in dets]
if len(dets_8corner) > 0: dets_8corner = np.stack(dets_8corner, axis=0)
else: dets_8corner = []
trks_8corner = [convert_3dbox_to_8corner(trk_tmp) for trk_tmp in trks]
if len(trks_8corner) > 0: trks_8corner = np.stack(trks_8corner, axis=0)
matched, unmatched_dets, unmatched_trks = associate_detections_to_trackers(dets_8corner, trks_8corner)
for t,trk in enumerate(self.trackers):
if t not in unmatched_trks:
d = matched[np.where(matched[:,1]==t)[0],0]
trk.update(dets[d,:][0], info[d, :][0])
for i in unmatched_dets:
trk = KalmanBoxTracker(dets[i,:], info[i, :])
self.trackers.append(trk)
i = len(self.trackers)
for trk in reversed(self.trackers):
d = trk.get_state()
if((trk.time_since_update < self.max_age) and (trk.hits >= self.min_hits or self.frame_count <= self.min_hits)):
ret.append(np.concatenate((d, [trk.id+1], trk.info)).reshape(1,-1))
i -= 1
if(trk.time_since_update >= self.max_age):
self.trackers.pop(i)
if(len(ret)>0):
return np.concatenate(ret)
return np.empty((0,15))
| true
| true
|
7908102323cd6d3ecb96ace4c798612538fe5146
| 3,242
|
py
|
Python
|
worktickets.py
|
benhg/work-tickets
|
dda344084736f9446cb6a1a49406754861aca19a
|
[
"MIT"
] | 1
|
2017-11-23T01:39:07.000Z
|
2017-11-23T01:39:07.000Z
|
worktickets.py
|
benhg/work-tickets
|
dda344084736f9446cb6a1a49406754861aca19a
|
[
"MIT"
] | null | null | null |
worktickets.py
|
benhg/work-tickets
|
dda344084736f9446cb6a1a49406754861aca19a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import json
import argparse
import datetime
class TicketManager:
ticketfile = '/Users/ben/ticketing/tickets.json'
def __init__(self: object, ticketfile: str='/Users/ben/Google Drive/code/ticketing/tickets.json')->object:
self.ticketfille = ticketfile
self.read_tickets()
def read_tickets(self)-> None:
self.tickets = json.load(open(self.ticketfile))
def write_tickets(self)-> None:
json.dump(self.tickets, open(self.ticketfile, "w"), indent=4)
def create_ticket(self, title="", desc="", dest="", due="", pri=0, completed=False):
ticket = {"title": title,
"desc": desc,
"for": dest,
"time_in": datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
"time_out": due,
"nice": pri,
"completed": completed
}
self.tickets[title] = ticket
self.write_tickets()
self.read_tickets()
def update_ticket(self, title, new_completed):
self.tickets[title]["completed"] = new_completed
self.write_tickets()
self.read_tickets()
def show_all_tickets(self):
for ticket in self.tickets.values():
print("""TICKET NAME: {}
\tTICKET DESCRIPTION: {}
\tTICKET CREATED: {}
\tTICKET DUE: {}
\tTICKET FOR: {}
\tTICKET DONE: {}
\tTICKET PRIORITY: {}
""".format(ticket['title'], ticket['desc'], ticket['time_in'], ticket['time_out'],
ticket['for'], ticket['completed'], ticket['nice']))
def show_unifnished(self):
flag = False
for ticket in self.tickets.values():
if not ticket['completed']:
flag = True
print("""TICKET NAME: {}
\tTICKET DESCRIPTION: {}
\tTICKET CREATED: {}
\tTICKET DUE: {}
\tTICKET FOR: {}
\tTICKET PRIORITY: {}
""".format(ticket['title'], ticket['desc'], ticket['time_in'], ticket['time_out'],
ticket['for'], ticket['nice']))
if not flag:
print("No Unfinished Tasks!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument("--mode", action="store", dest="mode", default='ls')
parser.add_argument("--title", action="store", dest="title")
parser.add_argument("--desc", action="store", dest="desc")
parser.add_argument("--for", action="store", dest="dest")
parser.add_argument("--due", action="store", dest="time_out")
parser.add_argument("--pri", action="store", dest="nice")
parser.add_argument("--done", action="store_true",
dest="completed", default=False)
args = parser.parse_args()
tm = TicketManager("tickets.json")
if args.mode == "ls":
tm.show_unifnished()
elif args.mode == "ls2":
tm.show_all_tickets()
elif args.mode == "new" or args.mode == "add":
tm.create_ticket(title=args.title, desc=args.desc, dest=args.dest,
due=args.time_out, pri=args.nice, completed=args.completed)
print("New Task '{}' Added".format(args.title))
elif args.mode == "up":
tm.update_ticket(args.title, args.completed)
| 35.23913
| 110
| 0.588834
|
import json
import argparse
import datetime
class TicketManager:
ticketfile = '/Users/ben/ticketing/tickets.json'
def __init__(self: object, ticketfile: str='/Users/ben/Google Drive/code/ticketing/tickets.json')->object:
self.ticketfille = ticketfile
self.read_tickets()
def read_tickets(self)-> None:
self.tickets = json.load(open(self.ticketfile))
def write_tickets(self)-> None:
json.dump(self.tickets, open(self.ticketfile, "w"), indent=4)
def create_ticket(self, title="", desc="", dest="", due="", pri=0, completed=False):
ticket = {"title": title,
"desc": desc,
"for": dest,
"time_in": datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
"time_out": due,
"nice": pri,
"completed": completed
}
self.tickets[title] = ticket
self.write_tickets()
self.read_tickets()
def update_ticket(self, title, new_completed):
self.tickets[title]["completed"] = new_completed
self.write_tickets()
self.read_tickets()
def show_all_tickets(self):
for ticket in self.tickets.values():
print("""TICKET NAME: {}
\tTICKET DESCRIPTION: {}
\tTICKET CREATED: {}
\tTICKET DUE: {}
\tTICKET FOR: {}
\tTICKET DONE: {}
\tTICKET PRIORITY: {}
""".format(ticket['title'], ticket['desc'], ticket['time_in'], ticket['time_out'],
ticket['for'], ticket['completed'], ticket['nice']))
def show_unifnished(self):
flag = False
for ticket in self.tickets.values():
if not ticket['completed']:
flag = True
print("""TICKET NAME: {}
\tTICKET DESCRIPTION: {}
\tTICKET CREATED: {}
\tTICKET DUE: {}
\tTICKET FOR: {}
\tTICKET PRIORITY: {}
""".format(ticket['title'], ticket['desc'], ticket['time_in'], ticket['time_out'],
ticket['for'], ticket['nice']))
if not flag:
print("No Unfinished Tasks!")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument("--mode", action="store", dest="mode", default='ls')
parser.add_argument("--title", action="store", dest="title")
parser.add_argument("--desc", action="store", dest="desc")
parser.add_argument("--for", action="store", dest="dest")
parser.add_argument("--due", action="store", dest="time_out")
parser.add_argument("--pri", action="store", dest="nice")
parser.add_argument("--done", action="store_true",
dest="completed", default=False)
args = parser.parse_args()
tm = TicketManager("tickets.json")
if args.mode == "ls":
tm.show_unifnished()
elif args.mode == "ls2":
tm.show_all_tickets()
elif args.mode == "new" or args.mode == "add":
tm.create_ticket(title=args.title, desc=args.desc, dest=args.dest,
due=args.time_out, pri=args.nice, completed=args.completed)
print("New Task '{}' Added".format(args.title))
elif args.mode == "up":
tm.update_ticket(args.title, args.completed)
| true
| true
|
7908104c7294138f801d289a8830574b608a1b70
| 3,354
|
py
|
Python
|
tint/peer.py
|
bmuller/tint
|
e74a3e4c46f71dfcb2574920467ad791d29de6fe
|
[
"MIT"
] | 1
|
2015-02-18T18:33:44.000Z
|
2015-02-18T18:33:44.000Z
|
tint/peer.py
|
8468/tint
|
e74a3e4c46f71dfcb2574920467ad791d29de6fe
|
[
"MIT"
] | null | null | null |
tint/peer.py
|
8468/tint
|
e74a3e4c46f71dfcb2574920467ad791d29de6fe
|
[
"MIT"
] | null | null | null |
from tint.ssl.context import PFSContextFactory
from tint.log import Logger
from tint.protocols.tintp import ConnectionPool
from tint.protocols.tintp import TintProtocolFactory
from tint.friends import FriendsList
class Peer(object):
def __init__(self, keyStore, storage, resolver):
self.keyStore = keyStore
self.storage = storage
self.contextFactory = PFSContextFactory(self.keyStore)
self.pool = ConnectionPool(resolver, self.contextFactory, self.keyStore, self.storage)
self.protocolFactory = TintProtocolFactory(self.pool)
self.friends = FriendsList(self.storage, self.keyStore, resolver)
self.log = Logger(system=self)
def getKeyId(self):
"""
Get the keyId used by this peer (this peer's identifier).
This is stored in the key store.
"""
return self.keyStore.getKeyId()
def getPublicKey(self):
"""
Get the keyId used by this peer (this peer's identifier).
This is stored in the key store.
"""
return self.keyStore.getPublicKey()
def set(self, hostKeyId, storagePath, storageValue):
"""
Set a value on a host.
@param hostKeyId: The key id for the destination host to set the
given key. This could be the local host, in which case the hostKey
will be the same as this C{Peer}'s keyStore keyId.
@param storagePath: The path to the key to set. For instance, this
could be something like /chat/<somekey>/inbox.
@param storageValue: The value to set.
"""
if hostKeyId == self.getKeyId():
return self.storage.set(hostKeyId, storagePath, storageValue)
return self.pool.send(hostKeyId, 'set', storagePath, storageValue)
def get(self, hostKeyId, storagePath):
"""
Get a value from a host.
@param hostKeyId: The key id for the destination host to get the
given key. This could be the local host, in which case the hostKey
will be the same as this C{Peer}'s keyStore keyId.
@param storagePath: The path to the key to get. For instance, this
could be something like /chat/<somekey>/inbox.
"""
if hostKeyId == self.getKeyId():
self.log.debug("getting storagePath %s on self" % storagePath)
return self.storage.get(hostKeyId, storagePath)
self.log.debug("getting storagePath %s on %s" % (storagePath, hostKeyId))
return self.pool.send(hostKeyId, 'get', storagePath)
def push(self, hostKeyId, storagePath, storageValue):
"""
Given key, create a new key at <key>/<id> with the given value, where <id>
is an auto-incrementing integer value starting at 0.
"""
if hostKeyId == self.getKeyId():
return self.storage.push(hostKeyId, storagePath, storageValue)
return self.pool.send(hostKeyId, 'push', storagePath, storageValue)
def ls(self, hostKeyId, storagePath, offset, length):
"""
Given key, get all children keys (with the given offset and length). Length cannot
be more than 1000.
"""
if hostKeyId == self.getKeyId():
return self.storage.ls(hostKeyId, storagePath, offset, length)
return self.pool.send(hostKeyId, 'ls', storagePath, offset, length)
| 39
| 94
| 0.651163
|
from tint.ssl.context import PFSContextFactory
from tint.log import Logger
from tint.protocols.tintp import ConnectionPool
from tint.protocols.tintp import TintProtocolFactory
from tint.friends import FriendsList
class Peer(object):
def __init__(self, keyStore, storage, resolver):
self.keyStore = keyStore
self.storage = storage
self.contextFactory = PFSContextFactory(self.keyStore)
self.pool = ConnectionPool(resolver, self.contextFactory, self.keyStore, self.storage)
self.protocolFactory = TintProtocolFactory(self.pool)
self.friends = FriendsList(self.storage, self.keyStore, resolver)
self.log = Logger(system=self)
def getKeyId(self):
return self.keyStore.getKeyId()
def getPublicKey(self):
return self.keyStore.getPublicKey()
def set(self, hostKeyId, storagePath, storageValue):
if hostKeyId == self.getKeyId():
return self.storage.set(hostKeyId, storagePath, storageValue)
return self.pool.send(hostKeyId, 'set', storagePath, storageValue)
def get(self, hostKeyId, storagePath):
if hostKeyId == self.getKeyId():
self.log.debug("getting storagePath %s on self" % storagePath)
return self.storage.get(hostKeyId, storagePath)
self.log.debug("getting storagePath %s on %s" % (storagePath, hostKeyId))
return self.pool.send(hostKeyId, 'get', storagePath)
def push(self, hostKeyId, storagePath, storageValue):
if hostKeyId == self.getKeyId():
return self.storage.push(hostKeyId, storagePath, storageValue)
return self.pool.send(hostKeyId, 'push', storagePath, storageValue)
def ls(self, hostKeyId, storagePath, offset, length):
if hostKeyId == self.getKeyId():
return self.storage.ls(hostKeyId, storagePath, offset, length)
return self.pool.send(hostKeyId, 'ls', storagePath, offset, length)
| true
| true
|
790810ef3cb25b64b82b046ed9c5a60d5c9d539f
| 463
|
py
|
Python
|
provision/onboarding/onboard_namespaces.py
|
hamshif/dags
|
6daf6313d35824b58efa7f61f90e30a169946532
|
[
"Apache-2.0"
] | null | null | null |
provision/onboarding/onboard_namespaces.py
|
hamshif/dags
|
6daf6313d35824b58efa7f61f90e30a169946532
|
[
"Apache-2.0"
] | null | null | null |
provision/onboarding/onboard_namespaces.py
|
hamshif/dags
|
6daf6313d35824b58efa7f61f90e30a169946532
|
[
"Apache-2.0"
] | null | null | null |
from data_common.config.configurer import get_conf
from data_common.provision.gs_buckets import confirm_bucket
def init_namespace_poc():
conf = get_conf()
project_id = conf.cloud.gcp.project
namespaces = conf.namespaces
for namespace, v in namespaces.items():
print(f'namespace: {namespace}')
bucket = confirm_bucket(
bucket_name=namespace,
project_id=project_id
)
print(bucket.name)
| 22.047619
| 59
| 0.678186
|
from data_common.config.configurer import get_conf
from data_common.provision.gs_buckets import confirm_bucket
def init_namespace_poc():
conf = get_conf()
project_id = conf.cloud.gcp.project
namespaces = conf.namespaces
for namespace, v in namespaces.items():
print(f'namespace: {namespace}')
bucket = confirm_bucket(
bucket_name=namespace,
project_id=project_id
)
print(bucket.name)
| true
| true
|
79081130720823c2bd9ee0cb306dd1540e6b5886
| 401
|
py
|
Python
|
etnapy/__init__.py
|
Astropilot/etnapy
|
6b97f4deca095a820e420b59fc0eaaadd054d771
|
[
"MIT"
] | null | null | null |
etnapy/__init__.py
|
Astropilot/etnapy
|
6b97f4deca095a820e420b59fc0eaaadd054d771
|
[
"MIT"
] | null | null | null |
etnapy/__init__.py
|
Astropilot/etnapy
|
6b97f4deca095a820e420b59fc0eaaadd054d771
|
[
"MIT"
] | null | null | null |
"""
ETNA School API Wrapper
~~~~~~~~~~~~~~~~~~~~~~~
A python wrapper to help make python3 apps/bots using the ETNA API.
:copyright: (c) 2019 Yohann MARTIN
:license: MIT, see LICENSE for more details.
"""
__title__ = 'etnapy'
__author__ = 'Yohann MARTIN'
__license__ = 'MIT'
__version__ = "1.0.0"
from .user import User
from .promo import Promo
from .trophy import Trophy
from .etnapy import Intra
| 20.05
| 67
| 0.698254
|
__title__ = 'etnapy'
__author__ = 'Yohann MARTIN'
__license__ = 'MIT'
__version__ = "1.0.0"
from .user import User
from .promo import Promo
from .trophy import Trophy
from .etnapy import Intra
| true
| true
|
79081164975c8271a4a95835b01770ed32e72d4f
| 14,437
|
py
|
Python
|
segmentation_models_pytorch/encoders/zerocenter.py
|
vinnamkim/segmentation_models.pytorch
|
f967ded34df6fb536e8e8cba9b6491ae63b939f5
|
[
"MIT"
] | null | null | null |
segmentation_models_pytorch/encoders/zerocenter.py
|
vinnamkim/segmentation_models.pytorch
|
f967ded34df6fb536e8e8cba9b6491ae63b939f5
|
[
"MIT"
] | null | null | null |
segmentation_models_pytorch/encoders/zerocenter.py
|
vinnamkim/segmentation_models.pytorch
|
f967ded34df6fb536e8e8cba9b6491ae63b939f5
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
#from .utils import load_state_dict_from_url
from .utils import zerocenter
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = zerocenter(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
out = zerocenter(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = zerocenter(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = zerocenter(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
out = zerocenter(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = zerocenter(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
# if pretrained:
# state_dict = load_state_dict_from_url(model_urls[arch],
# progress=progress)
# model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
class ZeroCenterEncoder(ResNet):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pretrained = False
del self.fc
def forward(self, x):
x0 = self.conv1(x)
x0 = self.bn1(x0)
x0 = self.relu(x0)
x1 = self.maxpool(x0)
x1 = zerocenter(x1)
x1 = self.layer1(x1)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
return [x4, x3, x2, x1, x0]
def load_state_dict(self, state_dict, **kwargs):
state_dict.pop('fc.bias')
state_dict.pop('fc.weight')
super().load_state_dict(state_dict, **kwargs)
| 38.396277
| 107
| 0.626377
|
import torch
import torch.nn as nn
from .utils import zerocenter
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = zerocenter(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
out = zerocenter(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = zerocenter(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = zerocenter(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
out = zerocenter(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = zerocenter(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
class ZeroCenterEncoder(ResNet):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.pretrained = False
del self.fc
def forward(self, x):
x0 = self.conv1(x)
x0 = self.bn1(x0)
x0 = self.relu(x0)
x1 = self.maxpool(x0)
x1 = zerocenter(x1)
x1 = self.layer1(x1)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
return [x4, x3, x2, x1, x0]
def load_state_dict(self, state_dict, **kwargs):
state_dict.pop('fc.bias')
state_dict.pop('fc.weight')
super().load_state_dict(state_dict, **kwargs)
| true
| true
|
79081216f75f33759b780ec16eb23dc9dac30bc1
| 7,185
|
py
|
Python
|
backend/shiny_lake_28693/settings.py
|
crowdbotics-apps/shiny-lake-28693
|
be8eac9d53473f5251f4e1a091caf4cd54beb62e
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
backend/shiny_lake_28693/settings.py
|
crowdbotics-apps/shiny-lake-28693
|
be8eac9d53473f5251f4e1a091caf4cd54beb62e
|
[
"FTL",
"AML",
"RSA-MD"
] | 20
|
2021-07-10T18:43:17.000Z
|
2021-07-10T18:43:19.000Z
|
backend/shiny_lake_28693/settings.py
|
crowdbotics-apps/shiny-lake-28693
|
be8eac9d53473f5251f4e1a091caf4cd54beb62e
|
[
"FTL",
"AML",
"RSA-MD"
] | null | null | null |
"""
Django settings for shiny_lake_28693 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'shiny_lake_28693.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'shiny_lake_28693.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| 28.971774
| 112
| 0.730689
|
import os
import environ
import logging
from modules.manifest import get_modules
env = environ.Env()
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
MODULES_APPS = get_modules()
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS + MODULES_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'shiny_lake_28693.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'web_build')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'shiny_lake_28693.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), os.path.join(BASE_DIR, 'web_build/static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| true
| true
|
79081276f4271dbca464776a1324868d791594cf
| 26,254
|
py
|
Python
|
Common/ComputationalGeometry/Testing/Python/TestParametricFunctions.py
|
biddisco/VTK
|
80fa7c3a767ce306586a596a6c6f3518a34e2f11
|
[
"BSD-3-Clause"
] | 1
|
2021-10-13T01:57:14.000Z
|
2021-10-13T01:57:14.000Z
|
Common/ComputationalGeometry/Testing/Python/TestParametricFunctions.py
|
heartvalve/VTK
|
b90a7749fc1491d53aadce5fb460f69713b14837
|
[
"BSD-3-Clause"
] | null | null | null |
Common/ComputationalGeometry/Testing/Python/TestParametricFunctions.py
|
heartvalve/VTK
|
b90a7749fc1491d53aadce5fb460f69713b14837
|
[
"BSD-3-Clause"
] | 5
|
2015-10-09T04:12:29.000Z
|
2021-12-15T16:57:11.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# ------------------------------------------------------------
# Purpose: Test the parametric functions.
# ------------------------------------------------------------
class TestParametricFunctions(vtk.test.Testing.vtkTest):
def testParametricFunctions(self):
# ------------------------------------------------------------
# Get a texture
# ------------------------------------------------------------
textureReader = vtk.vtkJPEGReader()
textureReader.SetFileName(VTK_DATA_ROOT + "/Data/beach.jpg")
texture = vtk.vtkTexture()
texture.SetInputConnection(textureReader.GetOutputPort())
# ------------------------------------------------------------
# For each parametric surface:
# 1) Create it
# 2) Assign mappers and actors
# 3) Position this object
# 5) Add a label
# ------------------------------------------------------------
# ------------------------------------------------------------
# Create a torus
# ------------------------------------------------------------
torus = vtk.vtkParametricTorus()
torusSource = vtk.vtkParametricFunctionSource()
torusSource.SetParametricFunction(torus)
torusSource.SetScalarModeToPhase()
torusMapper = vtk.vtkPolyDataMapper()
torusMapper.SetInputConnection(torusSource.GetOutputPort())
torusMapper.SetScalarRange(0, 360)
torusActor = vtk.vtkActor()
torusActor.SetMapper(torusMapper)
torusActor.SetPosition(0, 12, 0)
torusTextMapper = vtk.vtkTextMapper()
torusTextMapper.SetInput("Torus")
torusTextMapper.GetTextProperty().SetJustificationToCentered()
torusTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
torusTextMapper.GetTextProperty().SetColor(1, 0, 0)
torusTextMapper.GetTextProperty().SetFontSize(14)
torusTextActor = vtk.vtkActor2D()
torusTextActor.SetMapper(torusTextMapper)
torusTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
torusTextActor.GetPositionCoordinate().SetValue(0, 9.5, 0)
# ------------------------------------------------------------
# Create a klein bottle
# ------------------------------------------------------------
klein = vtk.vtkParametricKlein()
kleinSource = vtk.vtkParametricFunctionSource()
kleinSource.SetParametricFunction(klein)
kleinSource.SetScalarModeToU0V0()
kleinMapper = vtk.vtkPolyDataMapper()
kleinMapper.SetInputConnection(kleinSource.GetOutputPort())
kleinMapper.SetScalarRange(0, 3)
kleinActor = vtk.vtkActor()
kleinActor.SetMapper(kleinMapper)
kleinActor.SetPosition(8, 10.5, 0)
kleinTextMapper = vtk.vtkTextMapper()
kleinTextMapper.SetInput("Klein")
kleinTextMapper.GetTextProperty().SetJustificationToCentered()
kleinTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
kleinTextMapper.GetTextProperty().SetColor(1, 0, 0)
kleinTextMapper.GetTextProperty().SetFontSize(14)
kleinTextActor = vtk.vtkActor2D()
kleinTextActor.SetMapper(kleinTextMapper)
kleinTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
kleinTextActor.GetPositionCoordinate().SetValue(8, 9.5, 0)
# ------------------------------------------------------------
# Create a Figure-8 Klein
# ------------------------------------------------------------
klein2 = vtk.vtkParametricFigure8Klein()
klein2Source = vtk.vtkParametricFunctionSource()
klein2Source.SetParametricFunction(klein2)
klein2Source.GenerateTextureCoordinatesOn()
klein2Mapper = vtk.vtkPolyDataMapper()
klein2Mapper.SetInputConnection(klein2Source.GetOutputPort())
klein2Mapper.SetScalarRange(0, 3)
klein2Actor = vtk.vtkActor()
klein2Actor.SetMapper(klein2Mapper)
klein2Actor.SetPosition(16, 12, 0)
klein2Actor.SetTexture(texture)
fig8KleinTextMapper = vtk.vtkTextMapper()
fig8KleinTextMapper.SetInput("Fig-8.Klein")
fig8KleinTextMapper.GetTextProperty().SetJustificationToCentered()
fig8KleinTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
fig8KleinTextMapper.GetTextProperty().SetColor(1, 0, 0)
fig8KleinTextMapper.GetTextProperty().SetFontSize(14)
fig8KleinTextActor = vtk.vtkActor2D()
fig8KleinTextActor.SetMapper(fig8KleinTextMapper)
fig8KleinTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
fig8KleinTextActor.GetPositionCoordinate().SetValue(16, 9.5, 0)
# ------------------------------------------------------------
# Create a mobius strip
# ------------------------------------------------------------
mobius = vtk.vtkParametricMobius()
mobiusSource = vtk.vtkParametricFunctionSource()
mobiusSource.SetParametricFunction(mobius)
mobiusSource.GenerateTextureCoordinatesOn()
mobiusMapper = vtk.vtkPolyDataMapper()
mobiusMapper.SetInputConnection(mobiusSource.GetOutputPort())
mobiusActor = vtk.vtkActor()
mobiusActor.SetMapper(mobiusMapper)
mobiusActor.RotateX(45)
mobiusActor.SetPosition(24, 12, 0)
mobiusActor.SetTexture(texture)
mobiusTextMapper = vtk.vtkTextMapper()
mobiusTextMapper.SetInput("Mobius")
mobiusTextMapper.GetTextProperty().SetJustificationToCentered()
mobiusTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
mobiusTextMapper.GetTextProperty().SetColor(1, 0, 0)
mobiusTextMapper.GetTextProperty().SetFontSize(14)
mobiusTextActor = vtk.vtkActor2D()
mobiusTextActor.SetMapper(mobiusTextMapper)
mobiusTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
mobiusTextActor.GetPositionCoordinate().SetValue(24, 9.5, 0)
# ------------------------------------------------------------
# Create a super toroid
# ------------------------------------------------------------
toroid = vtk.vtkParametricSuperToroid()
toroid.SetN1(2)
toroid.SetN2(3)
toroidSource = vtk.vtkParametricFunctionSource()
toroidSource.SetParametricFunction(toroid)
toroidSource.SetScalarModeToU()
toroidMapper = vtk.vtkPolyDataMapper()
toroidMapper.SetInputConnection(toroidSource.GetOutputPort())
toroidMapper.SetScalarRange(0, 6.28)
toroidActor = vtk.vtkActor()
toroidActor.SetMapper(toroidMapper)
toroidActor.SetPosition(0, 4, 0)
superToroidTextMapper = vtk.vtkTextMapper()
superToroidTextMapper.SetInput("Super.Toroid")
superToroidTextMapper.GetTextProperty().SetJustificationToCentered()
superToroidTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
superToroidTextMapper.GetTextProperty().SetColor(1, 0, 0)
superToroidTextMapper.GetTextProperty().SetFontSize(14)
superToroidTextActor = vtk.vtkActor2D()
superToroidTextActor.SetMapper(superToroidTextMapper)
superToroidTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
superToroidTextActor.GetPositionCoordinate().SetValue(0, 1.5, 0)
# ------------------------------------------------------------
# Create a super ellipsoid
# ------------------------------------------------------------
superEllipsoid = vtk.vtkParametricSuperEllipsoid()
superEllipsoid.SetXRadius(1.25)
superEllipsoid.SetYRadius(1.5)
superEllipsoid.SetZRadius(1.0)
superEllipsoid.SetN1(1.1)
superEllipsoid.SetN2(1.75)
superEllipsoidSource = vtk.vtkParametricFunctionSource()
superEllipsoidSource.SetParametricFunction(superEllipsoid)
superEllipsoidSource.SetScalarModeToV()
superEllipsoidMapper = vtk.vtkPolyDataMapper()
superEllipsoidMapper.SetInputConnection(superEllipsoidSource.GetOutputPort())
superEllipsoidMapper.SetScalarRange(0, 3.14)
superEllipsoidActor = vtk.vtkActor()
superEllipsoidActor.SetMapper(superEllipsoidMapper)
superEllipsoidActor.SetPosition(8, 4, 0)
superEllipsoidTextMapper = vtk.vtkTextMapper()
superEllipsoidTextMapper.SetInput("Super.Ellipsoid")
superEllipsoidTextMapper.GetTextProperty().SetJustificationToCentered()
superEllipsoidTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
superEllipsoidTextMapper.GetTextProperty().SetColor(1, 0, 0)
superEllipsoidTextMapper.GetTextProperty().SetFontSize(14)
superEllipsoidTextActor = vtk.vtkActor2D()
superEllipsoidTextActor.SetMapper(superEllipsoidTextMapper)
superEllipsoidTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
superEllipsoidTextActor.GetPositionCoordinate().SetValue(8, 1.5, 0)
# ------------------------------------------------------------
# Create an open 1D spline
# ------------------------------------------------------------
splinePoints = [
[0.50380158308139134, -0.60679315105396936, -0.37248976406291578],
[-0.4354646054261665, -0.85362339758017258, -0.84844312996065385],
[0.2163147512899315, -0.39797507012168643, -0.76700353518454523],
[0.97158415334838644, -0.58513467367046257, -0.35846037946569753],
[-0.64359767997804918, -0.94620739107309249, -0.90762176546623086],
[-0.39901219094126117, -0.1978931497772658, 0.0098316934936828471],
[-0.75872745167404765, 0.067719714281950116, 0.165237936733867],
[-0.84599731389712418, -0.67685466896596114, 0.10357868909071133],
[0.84702754758625654, -0.0080077177882230677, -0.58571286666473044],
[-0.076150034124101484, 0.14637647622561856, 0.1494359239700418] ]
inputPoints = vtk.vtkPoints()
for i in range(0, 10):
inputPoints.InsertPoint(i, splinePoints[i])
spline = vtk.vtkParametricSpline()
spline.SetPoints(inputPoints)
spline.ClosedOff()
splineSource = vtk.vtkParametricFunctionSource()
splineSource.SetParametricFunction(spline)
splineMapper = vtk.vtkPolyDataMapper()
splineMapper.SetInputConnection(splineSource.GetOutputPort())
splineActor = vtk.vtkActor()
splineActor.SetMapper(splineMapper)
splineActor.SetPosition(16, 4, 0)
splineActor.GetProperty().SetColor(0, 0, 0)
splineTextMapper = vtk.vtkTextMapper()
splineTextMapper.SetInput("Open.Spline")
splineTextMapper.GetTextProperty().SetJustificationToCentered()
splineTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
splineTextMapper.GetTextProperty().SetColor(1, 0, 0)
splineTextMapper.GetTextProperty().SetFontSize(14)
splineTextActor = vtk.vtkActor2D()
splineTextActor.SetMapper(splineTextMapper)
splineTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
splineTextActor.GetPositionCoordinate().SetValue(16, 1.5, 0)
# ------------------------------------------------------------
# Create a closed 1D spline
# ------------------------------------------------------------
spline2 = vtk.vtkParametricSpline()
spline2.SetPoints(inputPoints)
spline2.ClosedOn()
spline2Source = vtk.vtkParametricFunctionSource()
spline2Source.SetParametricFunction(spline2)
spline2Mapper = vtk.vtkPolyDataMapper()
spline2Mapper.SetInputConnection(spline2Source.GetOutputPort())
spline2Actor = vtk.vtkActor()
spline2Actor.SetMapper(spline2Mapper)
spline2Actor.SetPosition(24, 4, 0)
spline2Actor.GetProperty().SetColor(0, 0, 0)
spline2TextMapper = vtk.vtkTextMapper()
spline2TextMapper.SetInput("Closed.Spline")
spline2TextMapper.GetTextProperty().SetJustificationToCentered()
spline2TextMapper.GetTextProperty().SetVerticalJustificationToCentered()
spline2TextMapper.GetTextProperty().SetColor(1, 0, 0)
spline2TextMapper.GetTextProperty().SetFontSize(14)
spline2TextActor = vtk.vtkActor2D()
spline2TextActor.SetMapper(spline2TextMapper)
spline2TextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
spline2TextActor.GetPositionCoordinate().SetValue(24, 1.5, 0)
# ------------------------------------------------------------
# Create a spiral conic
# ------------------------------------------------------------
sconic = vtk.vtkParametricConicSpiral()
sconic.SetA(0.8)
sconic.SetB(2.5)
sconic.SetC(0.4)
sconicSource = vtk.vtkParametricFunctionSource()
sconicSource.SetParametricFunction(sconic)
sconicSource.SetScalarModeToDistance()
sconicMapper = vtk.vtkPolyDataMapper()
sconicMapper.SetInputConnection(sconicSource.GetOutputPort())
sconicActor = vtk.vtkActor()
sconicActor.SetMapper(sconicMapper)
sconicMapper.SetScalarRange(0, 9)
sconicActor.SetPosition(0, -4, 0)
sconicActor.SetScale(1.2, 1.2, 1.2)
sconicTextMapper = vtk.vtkTextMapper()
sconicTextMapper.SetInput("Spiral.Conic")
sconicTextMapper.GetTextProperty().SetJustificationToCentered()
sconicTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
sconicTextMapper.GetTextProperty().SetColor(1, 0, 0)
sconicTextMapper.GetTextProperty().SetFontSize(14)
sconicTextActor = vtk.vtkActor2D()
sconicTextActor.SetMapper(sconicTextMapper)
sconicTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
sconicTextActor.GetPositionCoordinate().SetValue(0, -6.5, 0)
# ------------------------------------------------------------
# Create Boy's surface
# ------------------------------------------------------------
boy = vtk.vtkParametricBoy()
boySource = vtk.vtkParametricFunctionSource()
boySource.SetParametricFunction(boy)
boySource.SetScalarModeToModulus()
boyMapper = vtk.vtkPolyDataMapper()
boyMapper.SetInputConnection(boySource.GetOutputPort())
boyMapper.SetScalarRange(0, 2)
boyActor = vtk.vtkActor()
boyActor.SetMapper(boyMapper)
boyActor.SetPosition(8, -4, 0)
boyActor.SetScale(1.5, 1.5, 1.5)
boyTextMapper = vtk.vtkTextMapper()
boyTextMapper.SetInput("Boy")
boyTextMapper.GetTextProperty().SetJustificationToCentered()
boyTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
boyTextMapper.GetTextProperty().SetColor(1, 0, 0)
boyTextMapper.GetTextProperty().SetFontSize(14)
boyTextActor = vtk.vtkActor2D()
boyTextActor.SetMapper(boyTextMapper)
boyTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
boyTextActor.GetPositionCoordinate().SetValue(8, -6.5, 0)
# ------------------------------------------------------------
# Create a cross cap
# ------------------------------------------------------------
crossCap = vtk.vtkParametricCrossCap()
crossCapSource = vtk.vtkParametricFunctionSource()
crossCapSource.SetParametricFunction(crossCap)
crossCapSource.SetScalarModeToY()
crossCapMapper = vtk.vtkPolyDataMapper()
crossCapMapper.SetInputConnection(crossCapSource.GetOutputPort())
crossCapActor = vtk.vtkActor()
crossCapActor.SetMapper(crossCapMapper)
crossCapActor.RotateX(65)
crossCapActor.SetPosition(16, -4, 0)
crossCapActor.SetScale(1.5, 1.5, 1.5)
crossCapTextMapper = vtk.vtkTextMapper()
crossCapTextMapper.SetInput("Cross.Cap")
crossCapTextMapper.GetTextProperty().SetJustificationToCentered()
crossCapTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
crossCapTextMapper.GetTextProperty().SetColor(1, 0, 0)
crossCapTextMapper.GetTextProperty().SetFontSize(14)
crossCapTextActor = vtk.vtkActor2D()
crossCapTextActor.SetMapper(crossCapTextMapper)
crossCapTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
crossCapTextActor.GetPositionCoordinate().SetValue(16, -6.5, 0)
# ------------------------------------------------------------
# Create Dini's surface
# ------------------------------------------------------------
dini = vtk.vtkParametricDini()
diniSource = vtk.vtkParametricFunctionSource()
diniSource.SetScalarModeToDistance()
diniSource.SetParametricFunction(dini)
diniMapper = vtk.vtkPolyDataMapper()
diniMapper.SetInputConnection(diniSource.GetOutputPort())
diniActor = vtk.vtkActor()
diniActor.SetMapper(diniMapper)
diniActor.RotateX(-90)
diniActor.SetPosition(24, -3, 0)
diniActor.SetScale(1.5, 1.5, 0.5)
diniTextMapper = vtk.vtkTextMapper()
diniTextMapper.SetInput("Dini")
diniTextMapper.GetTextProperty().SetJustificationToCentered()
diniTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
diniTextMapper.GetTextProperty().SetColor(1, 0, 0)
diniTextMapper.GetTextProperty().SetFontSize(14)
diniTextActor = vtk.vtkActor2D()
diniTextActor.SetMapper(diniTextMapper)
diniTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
diniTextActor.GetPositionCoordinate().SetValue(24, -6.5, 0)
# ------------------------------------------------------------
# Create Enneper's surface
# ------------------------------------------------------------
enneper = vtk.vtkParametricEnneper()
enneperSource = vtk.vtkParametricFunctionSource()
enneperSource.SetParametricFunction(enneper)
enneperSource.SetScalarModeToQuadrant()
enneperMapper = vtk.vtkPolyDataMapper()
enneperMapper.SetInputConnection(enneperSource.GetOutputPort())
enneperMapper.SetScalarRange(1, 4)
enneperActor = vtk.vtkActor()
enneperActor.SetMapper(enneperMapper)
enneperActor.SetPosition(0, -12, 0)
enneperActor.SetScale(0.25, 0.25, 0.25)
enneperTextMapper = vtk.vtkTextMapper()
enneperTextMapper.SetInput("Enneper")
enneperTextMapper.GetTextProperty().SetJustificationToCentered()
enneperTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
enneperTextMapper.GetTextProperty().SetColor(1, 0, 0)
enneperTextMapper.GetTextProperty().SetFontSize(14)
enneperTextActor = vtk.vtkActor2D()
enneperTextActor.SetMapper(enneperTextMapper)
enneperTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
enneperTextActor.GetPositionCoordinate().SetValue(0, -14.5, 0)
# ------------------------------------------------------------
# Create an ellipsoidal surface
# ------------------------------------------------------------
ellipsoid = vtk.vtkParametricEllipsoid()
ellipsoid.SetXRadius(1)
ellipsoid.SetYRadius(0.75)
ellipsoid.SetZRadius(0.5)
ellipsoidSource = vtk.vtkParametricFunctionSource()
ellipsoidSource.SetParametricFunction(ellipsoid)
ellipsoidSource.SetScalarModeToZ()
ellipsoidMapper = vtk.vtkPolyDataMapper()
ellipsoidMapper.SetInputConnection(ellipsoidSource.GetOutputPort())
ellipsoidMapper.SetScalarRange(-0.5, 0.5)
ellipsoidActor = vtk.vtkActor()
ellipsoidActor.SetMapper(ellipsoidMapper)
ellipsoidActor.SetPosition(8, -12, 0)
ellipsoidActor.SetScale(1.5, 1.5, 1.5)
ellipsoidTextMapper = vtk.vtkTextMapper()
ellipsoidTextMapper.SetInput("Ellipsoid")
ellipsoidTextMapper.GetTextProperty().SetJustificationToCentered()
ellipsoidTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
ellipsoidTextMapper.GetTextProperty().SetColor(1, 0, 0)
ellipsoidTextMapper.GetTextProperty().SetFontSize(14)
ellipsoidTextActor = vtk.vtkActor2D()
ellipsoidTextActor.SetMapper(ellipsoidTextMapper)
ellipsoidTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
ellipsoidTextActor.GetPositionCoordinate().SetValue(8, -14.5, 0)
# ------------------------------------------------------------
# Create an surface with random hills on it.
# Note that for testing, we will disable the
# random generation of the surfaces. This is
# because random number generators do not
# return the same result on different operating
# systems.
# ------------------------------------------------------------
randomHills = vtk.vtkParametricRandomHills()
randomHills.AllowRandomGenerationOff()
randomHills.GenerateTheHills()
randomHillsSource = vtk.vtkParametricFunctionSource()
randomHillsSource.SetParametricFunction(randomHills)
randomHillsSource.GenerateTextureCoordinatesOn()
randomHillsMapper = vtk.vtkPolyDataMapper()
randomHillsMapper.SetInputConnection(randomHillsSource.GetOutputPort())
randomHillsActor = vtk.vtkActor()
randomHillsActor.SetMapper(randomHillsMapper)
randomHillsActor.SetPosition(16, -14, 0)
randomHillsActor.SetScale(0.2, 0.2, 0.2)
randomHillsActor.SetTexture(texture)
randomHillsTextMapper = vtk.vtkTextMapper()
randomHillsTextMapper.SetInput("Random.Hills")
randomHillsTextMapper.GetTextProperty().SetJustificationToCentered()
randomHillsTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
randomHillsTextMapper.GetTextProperty().SetColor(1, 0, 0)
randomHillsTextMapper.GetTextProperty().SetFontSize(14)
randomHillsTextActor = vtk.vtkActor2D()
randomHillsTextActor.SetMapper(randomHillsTextMapper)
randomHillsTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
randomHillsTextActor.GetPositionCoordinate().SetValue(16, -14.5, 0)
# ------------------------------------------------------------
# Create an Steiner's Roman Surface.
# ------------------------------------------------------------
roman = vtk.vtkParametricRoman()
roman.SetRadius(1.5)
romanSource = vtk.vtkParametricFunctionSource()
romanSource.SetParametricFunction(roman)
romanSource.SetScalarModeToX()
romanMapper = vtk.vtkPolyDataMapper()
romanMapper.SetInputConnection(romanSource.GetOutputPort())
romanActor = vtk.vtkActor()
romanActor.SetMapper(romanMapper)
romanActor.SetPosition(24, -12, 0)
romanTextMapper = vtk.vtkTextMapper()
romanTextMapper.SetInput("Roman")
romanTextMapper.GetTextProperty().SetJustificationToCentered()
romanTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
romanTextMapper.GetTextProperty().SetColor(1, 0, 0)
romanTextMapper.GetTextProperty().SetFontSize(14)
romanTextActor = vtk.vtkActor2D()
romanTextActor.SetMapper(romanTextMapper)
romanTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
romanTextActor.GetPositionCoordinate().SetValue(24, -14.5, 0)
# ------------------------------------------------------------
# Create the RenderWindow, Renderer and both Actors
# ------------------------------------------------------------
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# add actors
ren.AddViewProp(torusActor)
ren.AddViewProp(kleinActor)
ren.AddViewProp(klein2Actor)
ren.AddViewProp(toroidActor)
ren.AddViewProp(superEllipsoidActor)
ren.AddViewProp(mobiusActor)
ren.AddViewProp(splineActor)
ren.AddViewProp(spline2Actor)
ren.AddViewProp(sconicActor)
ren.AddViewProp(boyActor)
ren.AddViewProp(crossCapActor)
ren.AddViewProp(diniActor)
ren.AddViewProp(enneperActor)
ren.AddViewProp(ellipsoidActor)
ren.AddViewProp(randomHillsActor)
ren.AddViewProp(romanActor)
#add text actors
ren.AddViewProp(torusTextActor)
ren.AddViewProp(kleinTextActor)
ren.AddViewProp(fig8KleinTextActor)
ren.AddViewProp(mobiusTextActor)
ren.AddViewProp(superToroidTextActor)
ren.AddViewProp(superEllipsoidTextActor)
ren.AddViewProp(splineTextActor)
ren.AddViewProp(spline2TextActor)
ren.AddViewProp(sconicTextActor)
ren.AddViewProp(boyTextActor)
ren.AddViewProp(crossCapTextActor)
ren.AddViewProp(diniTextActor)
ren.AddViewProp(enneperTextActor)
ren.AddViewProp(ellipsoidTextActor)
ren.AddViewProp(randomHillsTextActor)
ren.AddViewProp(romanTextActor)
ren.SetBackground(0.7, 0.8, 1)
renWin.SetSize(500, 500)
ren.ResetCamera()
ren.GetActiveCamera().Zoom(1.3)
iren.Initialize()
renWin.Render()
img_file = "TestParametricFunctions.png"
# NOTE: this test has a companion .tcl test. The threshold set
# here should be the same as the threshold in the .tcl
# test. Both tests should produce exactly the same results.
vtk.test.Testing.compareImage(iren.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=10)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestParametricFunctions, 'test')])
| 46.221831
| 119
| 0.634418
|
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class TestParametricFunctions(vtk.test.Testing.vtkTest):
def testParametricFunctions(self):
textureReader = vtk.vtkJPEGReader()
textureReader.SetFileName(VTK_DATA_ROOT + "/Data/beach.jpg")
texture = vtk.vtkTexture()
texture.SetInputConnection(textureReader.GetOutputPort())
torus = vtk.vtkParametricTorus()
torusSource = vtk.vtkParametricFunctionSource()
torusSource.SetParametricFunction(torus)
torusSource.SetScalarModeToPhase()
torusMapper = vtk.vtkPolyDataMapper()
torusMapper.SetInputConnection(torusSource.GetOutputPort())
torusMapper.SetScalarRange(0, 360)
torusActor = vtk.vtkActor()
torusActor.SetMapper(torusMapper)
torusActor.SetPosition(0, 12, 0)
torusTextMapper = vtk.vtkTextMapper()
torusTextMapper.SetInput("Torus")
torusTextMapper.GetTextProperty().SetJustificationToCentered()
torusTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
torusTextMapper.GetTextProperty().SetColor(1, 0, 0)
torusTextMapper.GetTextProperty().SetFontSize(14)
torusTextActor = vtk.vtkActor2D()
torusTextActor.SetMapper(torusTextMapper)
torusTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
torusTextActor.GetPositionCoordinate().SetValue(0, 9.5, 0)
klein = vtk.vtkParametricKlein()
kleinSource = vtk.vtkParametricFunctionSource()
kleinSource.SetParametricFunction(klein)
kleinSource.SetScalarModeToU0V0()
kleinMapper = vtk.vtkPolyDataMapper()
kleinMapper.SetInputConnection(kleinSource.GetOutputPort())
kleinMapper.SetScalarRange(0, 3)
kleinActor = vtk.vtkActor()
kleinActor.SetMapper(kleinMapper)
kleinActor.SetPosition(8, 10.5, 0)
kleinTextMapper = vtk.vtkTextMapper()
kleinTextMapper.SetInput("Klein")
kleinTextMapper.GetTextProperty().SetJustificationToCentered()
kleinTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
kleinTextMapper.GetTextProperty().SetColor(1, 0, 0)
kleinTextMapper.GetTextProperty().SetFontSize(14)
kleinTextActor = vtk.vtkActor2D()
kleinTextActor.SetMapper(kleinTextMapper)
kleinTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
kleinTextActor.GetPositionCoordinate().SetValue(8, 9.5, 0)
klein2 = vtk.vtkParametricFigure8Klein()
klein2Source = vtk.vtkParametricFunctionSource()
klein2Source.SetParametricFunction(klein2)
klein2Source.GenerateTextureCoordinatesOn()
klein2Mapper = vtk.vtkPolyDataMapper()
klein2Mapper.SetInputConnection(klein2Source.GetOutputPort())
klein2Mapper.SetScalarRange(0, 3)
klein2Actor = vtk.vtkActor()
klein2Actor.SetMapper(klein2Mapper)
klein2Actor.SetPosition(16, 12, 0)
klein2Actor.SetTexture(texture)
fig8KleinTextMapper = vtk.vtkTextMapper()
fig8KleinTextMapper.SetInput("Fig-8.Klein")
fig8KleinTextMapper.GetTextProperty().SetJustificationToCentered()
fig8KleinTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
fig8KleinTextMapper.GetTextProperty().SetColor(1, 0, 0)
fig8KleinTextMapper.GetTextProperty().SetFontSize(14)
fig8KleinTextActor = vtk.vtkActor2D()
fig8KleinTextActor.SetMapper(fig8KleinTextMapper)
fig8KleinTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
fig8KleinTextActor.GetPositionCoordinate().SetValue(16, 9.5, 0)
mobius = vtk.vtkParametricMobius()
mobiusSource = vtk.vtkParametricFunctionSource()
mobiusSource.SetParametricFunction(mobius)
mobiusSource.GenerateTextureCoordinatesOn()
mobiusMapper = vtk.vtkPolyDataMapper()
mobiusMapper.SetInputConnection(mobiusSource.GetOutputPort())
mobiusActor = vtk.vtkActor()
mobiusActor.SetMapper(mobiusMapper)
mobiusActor.RotateX(45)
mobiusActor.SetPosition(24, 12, 0)
mobiusActor.SetTexture(texture)
mobiusTextMapper = vtk.vtkTextMapper()
mobiusTextMapper.SetInput("Mobius")
mobiusTextMapper.GetTextProperty().SetJustificationToCentered()
mobiusTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
mobiusTextMapper.GetTextProperty().SetColor(1, 0, 0)
mobiusTextMapper.GetTextProperty().SetFontSize(14)
mobiusTextActor = vtk.vtkActor2D()
mobiusTextActor.SetMapper(mobiusTextMapper)
mobiusTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
mobiusTextActor.GetPositionCoordinate().SetValue(24, 9.5, 0)
toroid = vtk.vtkParametricSuperToroid()
toroid.SetN1(2)
toroid.SetN2(3)
toroidSource = vtk.vtkParametricFunctionSource()
toroidSource.SetParametricFunction(toroid)
toroidSource.SetScalarModeToU()
toroidMapper = vtk.vtkPolyDataMapper()
toroidMapper.SetInputConnection(toroidSource.GetOutputPort())
toroidMapper.SetScalarRange(0, 6.28)
toroidActor = vtk.vtkActor()
toroidActor.SetMapper(toroidMapper)
toroidActor.SetPosition(0, 4, 0)
superToroidTextMapper = vtk.vtkTextMapper()
superToroidTextMapper.SetInput("Super.Toroid")
superToroidTextMapper.GetTextProperty().SetJustificationToCentered()
superToroidTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
superToroidTextMapper.GetTextProperty().SetColor(1, 0, 0)
superToroidTextMapper.GetTextProperty().SetFontSize(14)
superToroidTextActor = vtk.vtkActor2D()
superToroidTextActor.SetMapper(superToroidTextMapper)
superToroidTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
superToroidTextActor.GetPositionCoordinate().SetValue(0, 1.5, 0)
superEllipsoid = vtk.vtkParametricSuperEllipsoid()
superEllipsoid.SetXRadius(1.25)
superEllipsoid.SetYRadius(1.5)
superEllipsoid.SetZRadius(1.0)
superEllipsoid.SetN1(1.1)
superEllipsoid.SetN2(1.75)
superEllipsoidSource = vtk.vtkParametricFunctionSource()
superEllipsoidSource.SetParametricFunction(superEllipsoid)
superEllipsoidSource.SetScalarModeToV()
superEllipsoidMapper = vtk.vtkPolyDataMapper()
superEllipsoidMapper.SetInputConnection(superEllipsoidSource.GetOutputPort())
superEllipsoidMapper.SetScalarRange(0, 3.14)
superEllipsoidActor = vtk.vtkActor()
superEllipsoidActor.SetMapper(superEllipsoidMapper)
superEllipsoidActor.SetPosition(8, 4, 0)
superEllipsoidTextMapper = vtk.vtkTextMapper()
superEllipsoidTextMapper.SetInput("Super.Ellipsoid")
superEllipsoidTextMapper.GetTextProperty().SetJustificationToCentered()
superEllipsoidTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
superEllipsoidTextMapper.GetTextProperty().SetColor(1, 0, 0)
superEllipsoidTextMapper.GetTextProperty().SetFontSize(14)
superEllipsoidTextActor = vtk.vtkActor2D()
superEllipsoidTextActor.SetMapper(superEllipsoidTextMapper)
superEllipsoidTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
superEllipsoidTextActor.GetPositionCoordinate().SetValue(8, 1.5, 0)
splinePoints = [
[0.50380158308139134, -0.60679315105396936, -0.37248976406291578],
[-0.4354646054261665, -0.85362339758017258, -0.84844312996065385],
[0.2163147512899315, -0.39797507012168643, -0.76700353518454523],
[0.97158415334838644, -0.58513467367046257, -0.35846037946569753],
[-0.64359767997804918, -0.94620739107309249, -0.90762176546623086],
[-0.39901219094126117, -0.1978931497772658, 0.0098316934936828471],
[-0.75872745167404765, 0.067719714281950116, 0.165237936733867],
[-0.84599731389712418, -0.67685466896596114, 0.10357868909071133],
[0.84702754758625654, -0.0080077177882230677, -0.58571286666473044],
[-0.076150034124101484, 0.14637647622561856, 0.1494359239700418] ]
inputPoints = vtk.vtkPoints()
for i in range(0, 10):
inputPoints.InsertPoint(i, splinePoints[i])
spline = vtk.vtkParametricSpline()
spline.SetPoints(inputPoints)
spline.ClosedOff()
splineSource = vtk.vtkParametricFunctionSource()
splineSource.SetParametricFunction(spline)
splineMapper = vtk.vtkPolyDataMapper()
splineMapper.SetInputConnection(splineSource.GetOutputPort())
splineActor = vtk.vtkActor()
splineActor.SetMapper(splineMapper)
splineActor.SetPosition(16, 4, 0)
splineActor.GetProperty().SetColor(0, 0, 0)
splineTextMapper = vtk.vtkTextMapper()
splineTextMapper.SetInput("Open.Spline")
splineTextMapper.GetTextProperty().SetJustificationToCentered()
splineTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
splineTextMapper.GetTextProperty().SetColor(1, 0, 0)
splineTextMapper.GetTextProperty().SetFontSize(14)
splineTextActor = vtk.vtkActor2D()
splineTextActor.SetMapper(splineTextMapper)
splineTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
splineTextActor.GetPositionCoordinate().SetValue(16, 1.5, 0)
spline2 = vtk.vtkParametricSpline()
spline2.SetPoints(inputPoints)
spline2.ClosedOn()
spline2Source = vtk.vtkParametricFunctionSource()
spline2Source.SetParametricFunction(spline2)
spline2Mapper = vtk.vtkPolyDataMapper()
spline2Mapper.SetInputConnection(spline2Source.GetOutputPort())
spline2Actor = vtk.vtkActor()
spline2Actor.SetMapper(spline2Mapper)
spline2Actor.SetPosition(24, 4, 0)
spline2Actor.GetProperty().SetColor(0, 0, 0)
spline2TextMapper = vtk.vtkTextMapper()
spline2TextMapper.SetInput("Closed.Spline")
spline2TextMapper.GetTextProperty().SetJustificationToCentered()
spline2TextMapper.GetTextProperty().SetVerticalJustificationToCentered()
spline2TextMapper.GetTextProperty().SetColor(1, 0, 0)
spline2TextMapper.GetTextProperty().SetFontSize(14)
spline2TextActor = vtk.vtkActor2D()
spline2TextActor.SetMapper(spline2TextMapper)
spline2TextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
spline2TextActor.GetPositionCoordinate().SetValue(24, 1.5, 0)
sconic = vtk.vtkParametricConicSpiral()
sconic.SetA(0.8)
sconic.SetB(2.5)
sconic.SetC(0.4)
sconicSource = vtk.vtkParametricFunctionSource()
sconicSource.SetParametricFunction(sconic)
sconicSource.SetScalarModeToDistance()
sconicMapper = vtk.vtkPolyDataMapper()
sconicMapper.SetInputConnection(sconicSource.GetOutputPort())
sconicActor = vtk.vtkActor()
sconicActor.SetMapper(sconicMapper)
sconicMapper.SetScalarRange(0, 9)
sconicActor.SetPosition(0, -4, 0)
sconicActor.SetScale(1.2, 1.2, 1.2)
sconicTextMapper = vtk.vtkTextMapper()
sconicTextMapper.SetInput("Spiral.Conic")
sconicTextMapper.GetTextProperty().SetJustificationToCentered()
sconicTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
sconicTextMapper.GetTextProperty().SetColor(1, 0, 0)
sconicTextMapper.GetTextProperty().SetFontSize(14)
sconicTextActor = vtk.vtkActor2D()
sconicTextActor.SetMapper(sconicTextMapper)
sconicTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
sconicTextActor.GetPositionCoordinate().SetValue(0, -6.5, 0)
# ------------------------------------------------------------
boy = vtk.vtkParametricBoy()
boySource = vtk.vtkParametricFunctionSource()
boySource.SetParametricFunction(boy)
boySource.SetScalarModeToModulus()
boyMapper = vtk.vtkPolyDataMapper()
boyMapper.SetInputConnection(boySource.GetOutputPort())
boyMapper.SetScalarRange(0, 2)
boyActor = vtk.vtkActor()
boyActor.SetMapper(boyMapper)
boyActor.SetPosition(8, -4, 0)
boyActor.SetScale(1.5, 1.5, 1.5)
boyTextMapper = vtk.vtkTextMapper()
boyTextMapper.SetInput("Boy")
boyTextMapper.GetTextProperty().SetJustificationToCentered()
boyTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
boyTextMapper.GetTextProperty().SetColor(1, 0, 0)
boyTextMapper.GetTextProperty().SetFontSize(14)
boyTextActor = vtk.vtkActor2D()
boyTextActor.SetMapper(boyTextMapper)
boyTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
boyTextActor.GetPositionCoordinate().SetValue(8, -6.5, 0)
# ------------------------------------------------------------
# Create a cross cap
# ------------------------------------------------------------
crossCap = vtk.vtkParametricCrossCap()
crossCapSource = vtk.vtkParametricFunctionSource()
crossCapSource.SetParametricFunction(crossCap)
crossCapSource.SetScalarModeToY()
crossCapMapper = vtk.vtkPolyDataMapper()
crossCapMapper.SetInputConnection(crossCapSource.GetOutputPort())
crossCapActor = vtk.vtkActor()
crossCapActor.SetMapper(crossCapMapper)
crossCapActor.RotateX(65)
crossCapActor.SetPosition(16, -4, 0)
crossCapActor.SetScale(1.5, 1.5, 1.5)
crossCapTextMapper = vtk.vtkTextMapper()
crossCapTextMapper.SetInput("Cross.Cap")
crossCapTextMapper.GetTextProperty().SetJustificationToCentered()
crossCapTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
crossCapTextMapper.GetTextProperty().SetColor(1, 0, 0)
crossCapTextMapper.GetTextProperty().SetFontSize(14)
crossCapTextActor = vtk.vtkActor2D()
crossCapTextActor.SetMapper(crossCapTextMapper)
crossCapTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
crossCapTextActor.GetPositionCoordinate().SetValue(16, -6.5, 0)
# ------------------------------------------------------------
# Create Dini's surface
dini = vtk.vtkParametricDini()
diniSource = vtk.vtkParametricFunctionSource()
diniSource.SetScalarModeToDistance()
diniSource.SetParametricFunction(dini)
diniMapper = vtk.vtkPolyDataMapper()
diniMapper.SetInputConnection(diniSource.GetOutputPort())
diniActor = vtk.vtkActor()
diniActor.SetMapper(diniMapper)
diniActor.RotateX(-90)
diniActor.SetPosition(24, -3, 0)
diniActor.SetScale(1.5, 1.5, 0.5)
diniTextMapper = vtk.vtkTextMapper()
diniTextMapper.SetInput("Dini")
diniTextMapper.GetTextProperty().SetJustificationToCentered()
diniTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
diniTextMapper.GetTextProperty().SetColor(1, 0, 0)
diniTextMapper.GetTextProperty().SetFontSize(14)
diniTextActor = vtk.vtkActor2D()
diniTextActor.SetMapper(diniTextMapper)
diniTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
diniTextActor.GetPositionCoordinate().SetValue(24, -6.5, 0)
# ------------------------------------------------------------
enneper = vtk.vtkParametricEnneper()
enneperSource = vtk.vtkParametricFunctionSource()
enneperSource.SetParametricFunction(enneper)
enneperSource.SetScalarModeToQuadrant()
enneperMapper = vtk.vtkPolyDataMapper()
enneperMapper.SetInputConnection(enneperSource.GetOutputPort())
enneperMapper.SetScalarRange(1, 4)
enneperActor = vtk.vtkActor()
enneperActor.SetMapper(enneperMapper)
enneperActor.SetPosition(0, -12, 0)
enneperActor.SetScale(0.25, 0.25, 0.25)
enneperTextMapper = vtk.vtkTextMapper()
enneperTextMapper.SetInput("Enneper")
enneperTextMapper.GetTextProperty().SetJustificationToCentered()
enneperTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
enneperTextMapper.GetTextProperty().SetColor(1, 0, 0)
enneperTextMapper.GetTextProperty().SetFontSize(14)
enneperTextActor = vtk.vtkActor2D()
enneperTextActor.SetMapper(enneperTextMapper)
enneperTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
enneperTextActor.GetPositionCoordinate().SetValue(0, -14.5, 0)
# ------------------------------------------------------------
# Create an ellipsoidal surface
# ------------------------------------------------------------
ellipsoid = vtk.vtkParametricEllipsoid()
ellipsoid.SetXRadius(1)
ellipsoid.SetYRadius(0.75)
ellipsoid.SetZRadius(0.5)
ellipsoidSource = vtk.vtkParametricFunctionSource()
ellipsoidSource.SetParametricFunction(ellipsoid)
ellipsoidSource.SetScalarModeToZ()
ellipsoidMapper = vtk.vtkPolyDataMapper()
ellipsoidMapper.SetInputConnection(ellipsoidSource.GetOutputPort())
ellipsoidMapper.SetScalarRange(-0.5, 0.5)
ellipsoidActor = vtk.vtkActor()
ellipsoidActor.SetMapper(ellipsoidMapper)
ellipsoidActor.SetPosition(8, -12, 0)
ellipsoidActor.SetScale(1.5, 1.5, 1.5)
ellipsoidTextMapper = vtk.vtkTextMapper()
ellipsoidTextMapper.SetInput("Ellipsoid")
ellipsoidTextMapper.GetTextProperty().SetJustificationToCentered()
ellipsoidTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
ellipsoidTextMapper.GetTextProperty().SetColor(1, 0, 0)
ellipsoidTextMapper.GetTextProperty().SetFontSize(14)
ellipsoidTextActor = vtk.vtkActor2D()
ellipsoidTextActor.SetMapper(ellipsoidTextMapper)
ellipsoidTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
ellipsoidTextActor.GetPositionCoordinate().SetValue(8, -14.5, 0)
# ------------------------------------------------------------
# Create an surface with random hills on it.
# Note that for testing, we will disable the
# random generation of the surfaces. This is
# because random number generators do not
# return the same result on different operating
# systems.
# ------------------------------------------------------------
randomHills = vtk.vtkParametricRandomHills()
randomHills.AllowRandomGenerationOff()
randomHills.GenerateTheHills()
randomHillsSource = vtk.vtkParametricFunctionSource()
randomHillsSource.SetParametricFunction(randomHills)
randomHillsSource.GenerateTextureCoordinatesOn()
randomHillsMapper = vtk.vtkPolyDataMapper()
randomHillsMapper.SetInputConnection(randomHillsSource.GetOutputPort())
randomHillsActor = vtk.vtkActor()
randomHillsActor.SetMapper(randomHillsMapper)
randomHillsActor.SetPosition(16, -14, 0)
randomHillsActor.SetScale(0.2, 0.2, 0.2)
randomHillsActor.SetTexture(texture)
randomHillsTextMapper = vtk.vtkTextMapper()
randomHillsTextMapper.SetInput("Random.Hills")
randomHillsTextMapper.GetTextProperty().SetJustificationToCentered()
randomHillsTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
randomHillsTextMapper.GetTextProperty().SetColor(1, 0, 0)
randomHillsTextMapper.GetTextProperty().SetFontSize(14)
randomHillsTextActor = vtk.vtkActor2D()
randomHillsTextActor.SetMapper(randomHillsTextMapper)
randomHillsTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
randomHillsTextActor.GetPositionCoordinate().SetValue(16, -14.5, 0)
# ------------------------------------------------------------
# Create an Steiner's Roman Surface.
roman = vtk.vtkParametricRoman()
roman.SetRadius(1.5)
romanSource = vtk.vtkParametricFunctionSource()
romanSource.SetParametricFunction(roman)
romanSource.SetScalarModeToX()
romanMapper = vtk.vtkPolyDataMapper()
romanMapper.SetInputConnection(romanSource.GetOutputPort())
romanActor = vtk.vtkActor()
romanActor.SetMapper(romanMapper)
romanActor.SetPosition(24, -12, 0)
romanTextMapper = vtk.vtkTextMapper()
romanTextMapper.SetInput("Roman")
romanTextMapper.GetTextProperty().SetJustificationToCentered()
romanTextMapper.GetTextProperty().SetVerticalJustificationToCentered()
romanTextMapper.GetTextProperty().SetColor(1, 0, 0)
romanTextMapper.GetTextProperty().SetFontSize(14)
romanTextActor = vtk.vtkActor2D()
romanTextActor.SetMapper(romanTextMapper)
romanTextActor.GetPositionCoordinate().SetCoordinateSystemToWorld()
romanTextActor.GetPositionCoordinate().SetValue(24, -14.5, 0)
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.AddViewProp(torusActor)
ren.AddViewProp(kleinActor)
ren.AddViewProp(klein2Actor)
ren.AddViewProp(toroidActor)
ren.AddViewProp(superEllipsoidActor)
ren.AddViewProp(mobiusActor)
ren.AddViewProp(splineActor)
ren.AddViewProp(spline2Actor)
ren.AddViewProp(sconicActor)
ren.AddViewProp(boyActor)
ren.AddViewProp(crossCapActor)
ren.AddViewProp(diniActor)
ren.AddViewProp(enneperActor)
ren.AddViewProp(ellipsoidActor)
ren.AddViewProp(randomHillsActor)
ren.AddViewProp(romanActor)
ren.AddViewProp(torusTextActor)
ren.AddViewProp(kleinTextActor)
ren.AddViewProp(fig8KleinTextActor)
ren.AddViewProp(mobiusTextActor)
ren.AddViewProp(superToroidTextActor)
ren.AddViewProp(superEllipsoidTextActor)
ren.AddViewProp(splineTextActor)
ren.AddViewProp(spline2TextActor)
ren.AddViewProp(sconicTextActor)
ren.AddViewProp(boyTextActor)
ren.AddViewProp(crossCapTextActor)
ren.AddViewProp(diniTextActor)
ren.AddViewProp(enneperTextActor)
ren.AddViewProp(ellipsoidTextActor)
ren.AddViewProp(randomHillsTextActor)
ren.AddViewProp(romanTextActor)
ren.SetBackground(0.7, 0.8, 1)
renWin.SetSize(500, 500)
ren.ResetCamera()
ren.GetActiveCamera().Zoom(1.3)
iren.Initialize()
renWin.Render()
img_file = "TestParametricFunctions.png"
vtk.test.Testing.compareImage(iren.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=10)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestParametricFunctions, 'test')])
| true
| true
|
790814a2bac30de6a65438a4a744bdc0de13f2d6
| 1,727
|
py
|
Python
|
perfrunner/tests/fio.py
|
bochun/perfrunner
|
e215c73240381cf82fddc40856f560369c9b75a8
|
[
"Apache-2.0"
] | 18
|
2015-10-28T23:12:07.000Z
|
2022-01-04T14:23:37.000Z
|
perfrunner/tests/fio.py
|
bochun/perfrunner
|
e215c73240381cf82fddc40856f560369c9b75a8
|
[
"Apache-2.0"
] | 11
|
2019-03-19T12:02:31.000Z
|
2022-02-11T03:39:44.000Z
|
perfrunner/tests/fio.py
|
bochun/perfrunner
|
e215c73240381cf82fddc40856f560369c9b75a8
|
[
"Apache-2.0"
] | 39
|
2015-06-07T09:17:16.000Z
|
2022-03-06T20:32:01.000Z
|
from collections import defaultdict
import requests
from logger import logger
from perfrunner.helpers.misc import pretty_dict
from perfrunner.helpers.remote import RemoteHelper
from perfrunner.tests import PerfTest
class FIOTest(PerfTest):
TRACKER = 'fio.sc.couchbase.com'
TEMPLATE = {
'group': '{}, random mixed reads and writes, IOPS',
'metric': None,
'value': None,
}
def __init__(self, cluster_spec, test_config, verbose):
self.cluster_spec = cluster_spec
self.test_config = test_config
self.remote = RemoteHelper(cluster_spec, verbose)
def __exit__(self, *args, **kwargs):
pass
@staticmethod
def _parse(results):
"""Parse the test output.
See also https://github.com/axboe/fio/blob/master/HOWTO
"""
stats = defaultdict(int)
for host, output in results.items():
for job in output.split():
stats[host] += int(job.split(';')[7]) # reads
stats[host] += int(job.split(';')[48]) # writes
return stats
def _post(self, data):
data = pretty_dict(data)
logger.info('Posting: {}'.format(data))
requests.post('http://{}/api/v1/benchmarks'.format(self.TRACKER),
data=data)
def _report_kpi(self, stats):
for host, iops in stats.items():
data = self.TEMPLATE.copy()
data['group'] = data['group'].format(self.cluster_spec.name.title())
data['metric'] = host
data['value'] = iops
self._post(data)
def run(self):
stats = self.remote.fio(self.test_config.fio['config'])
self._report_kpi(self._parse(stats))
| 28.783333
| 80
| 0.600463
|
from collections import defaultdict
import requests
from logger import logger
from perfrunner.helpers.misc import pretty_dict
from perfrunner.helpers.remote import RemoteHelper
from perfrunner.tests import PerfTest
class FIOTest(PerfTest):
TRACKER = 'fio.sc.couchbase.com'
TEMPLATE = {
'group': '{}, random mixed reads and writes, IOPS',
'metric': None,
'value': None,
}
def __init__(self, cluster_spec, test_config, verbose):
self.cluster_spec = cluster_spec
self.test_config = test_config
self.remote = RemoteHelper(cluster_spec, verbose)
def __exit__(self, *args, **kwargs):
pass
@staticmethod
def _parse(results):
stats = defaultdict(int)
for host, output in results.items():
for job in output.split():
stats[host] += int(job.split(';')[7])
stats[host] += int(job.split(';')[48])
return stats
def _post(self, data):
data = pretty_dict(data)
logger.info('Posting: {}'.format(data))
requests.post('http://{}/api/v1/benchmarks'.format(self.TRACKER),
data=data)
def _report_kpi(self, stats):
for host, iops in stats.items():
data = self.TEMPLATE.copy()
data['group'] = data['group'].format(self.cluster_spec.name.title())
data['metric'] = host
data['value'] = iops
self._post(data)
def run(self):
stats = self.remote.fio(self.test_config.fio['config'])
self._report_kpi(self._parse(stats))
| true
| true
|
790818afe12e30c308b59599db9c9a02a9c4f36c
| 7,317
|
py
|
Python
|
checkov/terraform/plan_runner.py
|
BenjaDiaz/checkov
|
c53e32f1654e4ee771abf2001b3cb7df16752f6e
|
[
"Apache-2.0"
] | 1
|
2022-02-20T21:20:39.000Z
|
2022-02-20T21:20:39.000Z
|
checkov/terraform/plan_runner.py
|
BenjaDiaz/checkov
|
c53e32f1654e4ee771abf2001b3cb7df16752f6e
|
[
"Apache-2.0"
] | 3
|
2022-03-07T20:37:31.000Z
|
2022-03-21T20:20:14.000Z
|
checkov/terraform/plan_runner.py
|
BenjaDiaz/checkov
|
c53e32f1654e4ee771abf2001b3cb7df16752f6e
|
[
"Apache-2.0"
] | null | null | null |
import json
import logging
import os
from typing import Optional, List
from checkov.common.checks_infra.registry import get_graph_checks_registry
from checkov.common.graph.graph_builder.graph_components.attribute_names import CustomAttributes
from checkov.common.output.record import Record
from checkov.common.output.report import Report, CheckType
from checkov.common.runners.base_runner import filter_ignored_paths
from checkov.runner_filter import RunnerFilter
from checkov.terraform.checks.resource.registry import resource_registry
from checkov.terraform.context_parsers.registry import parser_registry
from checkov.terraform.plan_parser import parse_tf_plan
from checkov.terraform.runner import Runner as TerraformRunner, merge_reports
class Runner(TerraformRunner):
check_type = CheckType.TERRAFORM_PLAN
def __init__(self):
super().__init__()
self.template_lines = {}
self.graph_registry = get_graph_checks_registry(super().check_type)
block_type_registries = {
'resource': resource_registry,
}
def run(
self,
root_folder: Optional[str] = None,
external_checks_dir: Optional[List[str]] = None,
files: Optional[List[str]] = None,
runner_filter: RunnerFilter = RunnerFilter(),
collect_skip_comments: bool = True
) -> Report:
report = Report(self.check_type)
self.tf_definitions = {}
parsing_errors = {}
if external_checks_dir:
for directory in external_checks_dir:
resource_registry.load_external_checks(directory)
self.graph_registry.load_external_checks(directory)
if root_folder:
files = [] if not files else files
for root, d_names, f_names in os.walk(root_folder):
filter_ignored_paths(root, d_names, runner_filter.excluded_paths)
filter_ignored_paths(root, f_names, runner_filter.excluded_paths)
for file in f_names:
file_ending = os.path.splitext(file)[1]
if file_ending == '.json':
try:
with open(f'{root}/{file}') as f:
content = json.load(f)
if isinstance(content, dict) and content.get('terraform_version'):
files.append(os.path.join(root, file))
except Exception as e:
logging.debug(f'Failed to load json file {root}/{file}, skipping')
logging.debug('Failure message:')
logging.debug(e, stack_info=True)
if files:
files = [os.path.abspath(file) for file in files]
for file in files:
if file.endswith(".json"):
tf_definitions, template_lines = parse_tf_plan(file)
if not tf_definitions:
continue
self.tf_definitions = tf_definitions
self.template_lines = template_lines
self.check_tf_definition(report, runner_filter)
else:
logging.debug(f'Failed to load {file} as is not a .json file, skipping')
report.add_parsing_errors(parsing_errors.keys())
if self.tf_definitions:
graph = self.graph_manager.build_graph_from_definitions(self.tf_definitions, render_variables=False)
self.graph_manager.save_graph(graph)
graph_report = self.get_graph_checks_report(root_folder, runner_filter)
merge_reports(report, graph_report)
return report
def get_entity_context_and_evaluations(self, entity):
raw_context = self.get_entity_context(entity[CustomAttributes.BLOCK_NAME].split("."), entity[CustomAttributes.FILE_PATH])
raw_context['definition_path'] = entity[CustomAttributes.BLOCK_NAME].split('.')
return raw_context, None
def check_tf_definition(self, report, runner_filter):
for full_file_path, definition in self.tf_definitions.items():
scanned_file = f"/{os.path.relpath(full_file_path)}"
logging.debug(f"Scanning file: {scanned_file}")
for block_type in definition.keys():
if block_type in self.block_type_registries.keys():
self.run_block(definition[block_type], full_file_path, report, scanned_file,
block_type, runner_filter)
def run_block(self, entities, full_file_path, report, scanned_file, block_type, runner_filter=None):
registry = self.block_type_registries[block_type]
if registry:
for entity in entities:
context_parser = parser_registry.context_parsers[block_type]
definition_path = context_parser.get_entity_context_path(entity)
entity_id = ".".join(definition_path)
# Entity can exist only once per dir, for file as well
entity_context = self.get_entity_context(definition_path, full_file_path)
entity_lines_range = [entity_context.get('start_line'), entity_context.get('end_line')]
entity_code_lines = entity_context.get('code_lines')
entity_address = entity_context.get('address')
results = registry.scan(scanned_file, entity, [], runner_filter)
for check, check_result in results.items():
record = Record(check_id=check.id, bc_check_id=check.bc_id, check_name=check.name, check_result=check_result,
code_block=entity_code_lines, file_path=scanned_file,
file_line_range=entity_lines_range,
resource=entity_id, resource_address=entity_address, evaluations=None,
check_class=check.__class__.__module__, file_abs_path=full_file_path)
record.set_guideline(check.guideline)
report.add_record(record=record)
def get_entity_context(self, definition_path, full_file_path):
entity_context = {}
if full_file_path not in self.tf_definitions:
logging.debug(f'Tried to look up file {full_file_path} in TF plan entity definitions, but it does not exist')
return entity_context
for resource in self.tf_definitions.get(full_file_path, {}).get('resource', []):
resource_type = definition_path[0]
if resource_type in resource.keys():
resource_name = definition_path[1]
if resource_name in resource[resource_type].keys():
resource_defintion = resource[resource_type][resource_name]
entity_context['start_line'] = resource_defintion['start_line'][0]
entity_context['end_line'] = resource_defintion['end_line'][0]
entity_context['code_lines'] = self.template_lines[
entity_context['start_line']:entity_context['end_line']]
entity_context['address'] = resource_defintion['__address__']
return entity_context
return entity_context
| 50.116438
| 129
| 0.632636
|
import json
import logging
import os
from typing import Optional, List
from checkov.common.checks_infra.registry import get_graph_checks_registry
from checkov.common.graph.graph_builder.graph_components.attribute_names import CustomAttributes
from checkov.common.output.record import Record
from checkov.common.output.report import Report, CheckType
from checkov.common.runners.base_runner import filter_ignored_paths
from checkov.runner_filter import RunnerFilter
from checkov.terraform.checks.resource.registry import resource_registry
from checkov.terraform.context_parsers.registry import parser_registry
from checkov.terraform.plan_parser import parse_tf_plan
from checkov.terraform.runner import Runner as TerraformRunner, merge_reports
class Runner(TerraformRunner):
check_type = CheckType.TERRAFORM_PLAN
def __init__(self):
super().__init__()
self.template_lines = {}
self.graph_registry = get_graph_checks_registry(super().check_type)
block_type_registries = {
'resource': resource_registry,
}
def run(
self,
root_folder: Optional[str] = None,
external_checks_dir: Optional[List[str]] = None,
files: Optional[List[str]] = None,
runner_filter: RunnerFilter = RunnerFilter(),
collect_skip_comments: bool = True
) -> Report:
report = Report(self.check_type)
self.tf_definitions = {}
parsing_errors = {}
if external_checks_dir:
for directory in external_checks_dir:
resource_registry.load_external_checks(directory)
self.graph_registry.load_external_checks(directory)
if root_folder:
files = [] if not files else files
for root, d_names, f_names in os.walk(root_folder):
filter_ignored_paths(root, d_names, runner_filter.excluded_paths)
filter_ignored_paths(root, f_names, runner_filter.excluded_paths)
for file in f_names:
file_ending = os.path.splitext(file)[1]
if file_ending == '.json':
try:
with open(f'{root}/{file}') as f:
content = json.load(f)
if isinstance(content, dict) and content.get('terraform_version'):
files.append(os.path.join(root, file))
except Exception as e:
logging.debug(f'Failed to load json file {root}/{file}, skipping')
logging.debug('Failure message:')
logging.debug(e, stack_info=True)
if files:
files = [os.path.abspath(file) for file in files]
for file in files:
if file.endswith(".json"):
tf_definitions, template_lines = parse_tf_plan(file)
if not tf_definitions:
continue
self.tf_definitions = tf_definitions
self.template_lines = template_lines
self.check_tf_definition(report, runner_filter)
else:
logging.debug(f'Failed to load {file} as is not a .json file, skipping')
report.add_parsing_errors(parsing_errors.keys())
if self.tf_definitions:
graph = self.graph_manager.build_graph_from_definitions(self.tf_definitions, render_variables=False)
self.graph_manager.save_graph(graph)
graph_report = self.get_graph_checks_report(root_folder, runner_filter)
merge_reports(report, graph_report)
return report
def get_entity_context_and_evaluations(self, entity):
raw_context = self.get_entity_context(entity[CustomAttributes.BLOCK_NAME].split("."), entity[CustomAttributes.FILE_PATH])
raw_context['definition_path'] = entity[CustomAttributes.BLOCK_NAME].split('.')
return raw_context, None
def check_tf_definition(self, report, runner_filter):
for full_file_path, definition in self.tf_definitions.items():
scanned_file = f"/{os.path.relpath(full_file_path)}"
logging.debug(f"Scanning file: {scanned_file}")
for block_type in definition.keys():
if block_type in self.block_type_registries.keys():
self.run_block(definition[block_type], full_file_path, report, scanned_file,
block_type, runner_filter)
def run_block(self, entities, full_file_path, report, scanned_file, block_type, runner_filter=None):
registry = self.block_type_registries[block_type]
if registry:
for entity in entities:
context_parser = parser_registry.context_parsers[block_type]
definition_path = context_parser.get_entity_context_path(entity)
entity_id = ".".join(definition_path)
entity_context = self.get_entity_context(definition_path, full_file_path)
entity_lines_range = [entity_context.get('start_line'), entity_context.get('end_line')]
entity_code_lines = entity_context.get('code_lines')
entity_address = entity_context.get('address')
results = registry.scan(scanned_file, entity, [], runner_filter)
for check, check_result in results.items():
record = Record(check_id=check.id, bc_check_id=check.bc_id, check_name=check.name, check_result=check_result,
code_block=entity_code_lines, file_path=scanned_file,
file_line_range=entity_lines_range,
resource=entity_id, resource_address=entity_address, evaluations=None,
check_class=check.__class__.__module__, file_abs_path=full_file_path)
record.set_guideline(check.guideline)
report.add_record(record=record)
def get_entity_context(self, definition_path, full_file_path):
entity_context = {}
if full_file_path not in self.tf_definitions:
logging.debug(f'Tried to look up file {full_file_path} in TF plan entity definitions, but it does not exist')
return entity_context
for resource in self.tf_definitions.get(full_file_path, {}).get('resource', []):
resource_type = definition_path[0]
if resource_type in resource.keys():
resource_name = definition_path[1]
if resource_name in resource[resource_type].keys():
resource_defintion = resource[resource_type][resource_name]
entity_context['start_line'] = resource_defintion['start_line'][0]
entity_context['end_line'] = resource_defintion['end_line'][0]
entity_context['code_lines'] = self.template_lines[
entity_context['start_line']:entity_context['end_line']]
entity_context['address'] = resource_defintion['__address__']
return entity_context
return entity_context
| true
| true
|
7908194a286eaf38694ca25eb254c4b4b6db95fd
| 1,829
|
py
|
Python
|
CNN/code/filter_visualiton.py
|
Zeng-WH/ML2020
|
f467a6260cd782968696950ef74f3780933cdcdd
|
[
"MIT"
] | 2
|
2020-11-26T14:46:18.000Z
|
2021-02-06T06:25:43.000Z
|
CNN/code/filter_visualiton.py
|
Zeng-WH/ML2020
|
f467a6260cd782968696950ef74f3780933cdcdd
|
[
"MIT"
] | null | null | null |
CNN/code/filter_visualiton.py
|
Zeng-WH/ML2020
|
f467a6260cd782968696950ef74f3780933cdcdd
|
[
"MIT"
] | null | null | null |
import os
import sys
import argparse
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import pickle
def normalize(image):
return (image - image.min()) / (image.max() - image.min())
layer_activations = None
def filter_explanation(x, model, cnnid, filterid, iteration=100, lr=1):
# x: 需要训练的图片
# cnnid, filterid: 指定第几层cnn中第几个filter
model.eval()
def hook(model, input, output):
global layer_activations
layer_activations = output
hook_handle = model.cnn[cnnid].register_forward_hook(hook)
# 当forward了第cnnid层cnn后, 要先呼叫hook, 才可以继续forward下一层cnn
# Filter activation: 我们先观察x经过被指定filter的activation map
model(x.cuda())
# 正式执行forward的步骤
filter_activations = layer_activations[:, filterid, :, :].detach().cpu()
# 根据function argument 指定的filterid把待定filter的activation map取出来
x = x.cuda()
x.requires_grad_()
optimizer = Adam([x], lr=lr)
# 利用偏微分和optimizer, 逐步修改input image来让filter activation越来越大
for iter in range(iteration):
optimizer.zero_grad()
model(x)
objective = -layer_activations[:, filterid, :, :].sum()
# 探究image的微量变化会怎样影响activation的程度,加负号代表做maximization
objective.backward()
optimizer.step()
# 修改input image来最大化filter activation
filter_visualization = x.detach().cpu().squeeze()[0]
# 完成图片修改,只剩下要画出来,因此可以直接detach并转成cpu tensor
hook_handle.remove()
# 一旦model register hook, 该hook就一致存在。如果之后继续register更多hook
# 那model一次forward要做的事情就越来越来越多,因此需要把hook拿掉
return filter_activations, filter_visualization
| 28.578125
| 77
| 0.697649
|
import os
import sys
import argparse
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import pickle
def normalize(image):
return (image - image.min()) / (image.max() - image.min())
layer_activations = None
def filter_explanation(x, model, cnnid, filterid, iteration=100, lr=1):
model.eval()
def hook(model, input, output):
global layer_activations
layer_activations = output
hook_handle = model.cnn[cnnid].register_forward_hook(hook)
model(x.cuda())
filter_activations = layer_activations[:, filterid, :, :].detach().cpu()
x = x.cuda()
x.requires_grad_()
optimizer = Adam([x], lr=lr)
for iter in range(iteration):
optimizer.zero_grad()
model(x)
objective = -layer_activations[:, filterid, :, :].sum()
objective.backward()
optimizer.step()
filter_visualization = x.detach().cpu().squeeze()[0]
hook_handle.remove()
return filter_activations, filter_visualization
| true
| true
|
790819ac744c6466b16fd5e4429510da065b78dc
| 3,550
|
py
|
Python
|
bsm/audit_record.py
|
haginara/openbsm-python
|
54d3127f69e1af216344540d46696aad12e95c12
|
[
"MIT"
] | 3
|
2019-11-18T18:32:21.000Z
|
2021-05-12T18:23:00.000Z
|
bsm/audit_record.py
|
haginara/openbsm-python
|
54d3127f69e1af216344540d46696aad12e95c12
|
[
"MIT"
] | null | null | null |
bsm/audit_record.py
|
haginara/openbsm-python
|
54d3127f69e1af216344540d46696aad12e95c12
|
[
"MIT"
] | null | null | null |
# Token type identifiers.
AUT_INVALID = 0x00
AUT_OTHER_FILE32 = 0x11
AUT_OHEADER = 0x12
AUT_TRAILER = 0x13
AUT_HEADER32 = 0x14
AUT_HEADER32_EX = 0x15
AUT_DATA = 0x21
AUT_IPC = 0x22
AUT_PATH = 0x23
AUT_SUBJECT32 = 0x24
AUT_XATPATH = 0x25
AUT_PROCESS32 = 0x26
AUT_RETURN32 = 0x27
AUT_TEXT = 0x28
AUT_OPAQUE = 0x29
AUT_IN_ADDR = 0x2A
AUT_IP = 0x2B
AUT_IPORT = 0x2C
AUT_ARG32 = 0x2D
AUT_SOCKET = 0x2E
AUT_SEQ = 0x2F
AUT_ACL = 0x30
AUT_ATTR = 0x31
AUT_IPC_PERM = 0x32
AUT_LABEL = 0x33
AUT_GROUPS = 0x34
AUT_ACE = 0x35
AUT_PRIV = 0x38
AUT_UPRIV = 0x39
AUT_LIAISON = 0x3A
AUT_NEWGROUPS = 0x3B
AUT_EXEC_ARGS = 0x3C
AUT_EXEC_ENV = 0x3D
AUT_ATTR32 = 0x3E
AUT_UNAUTH = 0x3F
AUT_XATOM = 0x40
AUT_XOBJ = 0x41
AUT_XPROTO = 0x42
AUT_XSELECT = 0x43
AUT_XCOLORMAP = 0x44
AUT_XCURSOR = 0x45
AUT_XFONT = 0x46
AUT_XGC = 0x47
AUT_XPIXMAP = 0x48
AUT_XPROPERTY = 0x49
AUT_XWINDOW = 0x4A
AUT_XCLIENT = 0x4B
AUT_CMD = 0x51
AUT_EXIT = 0x52
AUT_ZONENAME = 0x60
AUT_HOST = 0x70
AUT_ARG64 = 0x71
AUT_RETURN64 = 0x72
AUT_ATTR64 = 0x73
AUT_HEADER64 = 0x74
AUT_SUBJECT64 = 0x75
AUT_PROCESS64 = 0x77
AUT_OTHER_FILE64 = 0x78
AUT_HEADER64_EX = 0x79
AUT_SUBJECT32_EX = 0x7A
AUT_PROCESS32_EX = 0x7B
AUT_SUBJECT64_EX = 0x7C
AUT_PROCESS64_EX = 0x7D
AUT_IN_ADDR_EX = 0x7E
AUT_SOCKET_EX = 0x7F
#
# Pre-64-bit BSM, 32-bit tokens weren't explicitly named as '32'. We have
# compatibility defines.
AUT_HEADER = AUT_HEADER32
AUT_ARG = AUT_ARG32
AUT_RETURN = AUT_RETURN32
AUT_SUBJECT = AUT_SUBJECT32
AUT_PROCESS = AUT_PROCESS32
AUT_OTHER_FILE = AUT_OTHER_FILE32
#
# *
# The values for the following token ids are not defined by BSM.
#
# XXXRW: Not sure how to handle these in OpenBSM yet, but I'll give them
# names more consistent with Sun's BSM. These originally came from Apple's
# BSM.
AUT_SOCKINET32 = 0x80 # XXX
AUT_SOCKINET128 = 0x81 # XXX
AUT_SOCKUNIX = 0x82 # XXX
_AUT_RIGHTS = 0x83 # XXX FreeBSD
AUT_ARG_UUID = 0x84 # UUID of argument object
AUT_RETURN_UUID = 0x85 # UUID of returned object
#
# Apple specific tokens
AUT_IDENTITY = 0xED
AUT_KRB5_PRINCIPA = 0xEE
AUT_CERT_HAHSH = 0xEF
# print values for the arbitrary token
AUP_BINARY = 0
AUP_OCTAL = 1
AUP_DECIMAL = 2
AUP_HEX = 3
AUP_STRING = 4
#
# data-types for the arbitrary token
AUR_BYTE = 0
AUR_CHAR = AUR_BYTE
AUR_SHORT = 1
AUR_INT32 = 2
AUR_INT = AUR_INT32
AUR_INT64 = 3
#
# ... and their sizes
AUR_BYTE_SIZE = 1 # sizeof(u_char)
AUR_CHAR_SIZE = AUR_BYTE_SIZE
AUR_SHORT_SIZE = 2 # sizeof(uint16_t)
AUR_INT32_SIZE = 4 # sizeof(uint32_t)
AUR_INT_SIZE = AUR_INT32_SIZE
AUR_INT64_SIZE = 8 # sizeof(uint64_t)
AUR_BYTE_FORMAT = "B"
AUR_CHAR_FORMAT = "c"
AUR_SHORT_FORMAT = "H"
AUR_INT32_FORMAT = "I"
AUR_INT_FORMAT = AUR_INT32_FORMAT
AUR_INT64_FORMAT = "Q"
#
# Modifiers for the header token
PAD_NOTATTR = 0x4000 # nonattributable event
PAD_FAILURE = 0x8000 # fail audit event
#
AUDIT_MAX_GROUPS = 16
#
# *
# A number of BSM versions are floating around and defined. Here are
# constants for them. OpenBSM uses the same token types, etc, used in the
# Solaris BSM version, but has a separate version number in order to
# identify a potentially different event identifier name space.
AUDIT_HEADER_VERSION_OLDDARWIN = 1 # In = retrospect, a mistake.
AUDIT_HEADER_VERSION_SOLARIS = 2
AUDIT_HEADER_VERSION_TSOL25 = 3
AUDIT_HEADER_VERSION_TSOL = 4
AUDIT_HEADER_VERSION_OPENBSM10 = 10
AUDIT_HEADER_VERSION_OPENBSM11 = 11
AUDIT_HEADER_VERSION_OPENBSM = AUDIT_HEADER_VERSION_OPENBSM11
#
AUT_TRAILER_MAGIC = 0xB105
#
AUT_HEADERS = [AUT_HEADER32, AUT_HEADER32_EX, AUT_HEADER64, AUT_HEADER64_EX]
| 23.825503
| 76
| 0.773803
|
AUT_INVALID = 0x00
AUT_OTHER_FILE32 = 0x11
AUT_OHEADER = 0x12
AUT_TRAILER = 0x13
AUT_HEADER32 = 0x14
AUT_HEADER32_EX = 0x15
AUT_DATA = 0x21
AUT_IPC = 0x22
AUT_PATH = 0x23
AUT_SUBJECT32 = 0x24
AUT_XATPATH = 0x25
AUT_PROCESS32 = 0x26
AUT_RETURN32 = 0x27
AUT_TEXT = 0x28
AUT_OPAQUE = 0x29
AUT_IN_ADDR = 0x2A
AUT_IP = 0x2B
AUT_IPORT = 0x2C
AUT_ARG32 = 0x2D
AUT_SOCKET = 0x2E
AUT_SEQ = 0x2F
AUT_ACL = 0x30
AUT_ATTR = 0x31
AUT_IPC_PERM = 0x32
AUT_LABEL = 0x33
AUT_GROUPS = 0x34
AUT_ACE = 0x35
AUT_PRIV = 0x38
AUT_UPRIV = 0x39
AUT_LIAISON = 0x3A
AUT_NEWGROUPS = 0x3B
AUT_EXEC_ARGS = 0x3C
AUT_EXEC_ENV = 0x3D
AUT_ATTR32 = 0x3E
AUT_UNAUTH = 0x3F
AUT_XATOM = 0x40
AUT_XOBJ = 0x41
AUT_XPROTO = 0x42
AUT_XSELECT = 0x43
AUT_XCOLORMAP = 0x44
AUT_XCURSOR = 0x45
AUT_XFONT = 0x46
AUT_XGC = 0x47
AUT_XPIXMAP = 0x48
AUT_XPROPERTY = 0x49
AUT_XWINDOW = 0x4A
AUT_XCLIENT = 0x4B
AUT_CMD = 0x51
AUT_EXIT = 0x52
AUT_ZONENAME = 0x60
AUT_HOST = 0x70
AUT_ARG64 = 0x71
AUT_RETURN64 = 0x72
AUT_ATTR64 = 0x73
AUT_HEADER64 = 0x74
AUT_SUBJECT64 = 0x75
AUT_PROCESS64 = 0x77
AUT_OTHER_FILE64 = 0x78
AUT_HEADER64_EX = 0x79
AUT_SUBJECT32_EX = 0x7A
AUT_PROCESS32_EX = 0x7B
AUT_SUBJECT64_EX = 0x7C
AUT_PROCESS64_EX = 0x7D
AUT_IN_ADDR_EX = 0x7E
AUT_SOCKET_EX = 0x7F
# compatibility defines.
AUT_HEADER = AUT_HEADER32
AUT_ARG = AUT_ARG32
AUT_RETURN = AUT_RETURN32
AUT_SUBJECT = AUT_SUBJECT32
AUT_PROCESS = AUT_PROCESS32
AUT_OTHER_FILE = AUT_OTHER_FILE32
#
# *
# The values for the following token ids are not defined by BSM.
#
# XXXRW: Not sure how to handle these in OpenBSM yet, but I'll give them
AUT_SOCKINET32 = 0x80
AUT_SOCKINET128 = 0x81
AUT_SOCKUNIX = 0x82
_AUT_RIGHTS = 0x83
AUT_ARG_UUID = 0x84
AUT_RETURN_UUID = 0x85
AUT_IDENTITY = 0xED
AUT_KRB5_PRINCIPA = 0xEE
AUT_CERT_HAHSH = 0xEF
AUP_BINARY = 0
AUP_OCTAL = 1
AUP_DECIMAL = 2
AUP_HEX = 3
AUP_STRING = 4
AUR_BYTE = 0
AUR_CHAR = AUR_BYTE
AUR_SHORT = 1
AUR_INT32 = 2
AUR_INT = AUR_INT32
AUR_INT64 = 3
AUR_BYTE_SIZE = 1
AUR_CHAR_SIZE = AUR_BYTE_SIZE
AUR_SHORT_SIZE = 2
AUR_INT32_SIZE = 4
AUR_INT_SIZE = AUR_INT32_SIZE
AUR_INT64_SIZE = 8
AUR_BYTE_FORMAT = "B"
AUR_CHAR_FORMAT = "c"
AUR_SHORT_FORMAT = "H"
AUR_INT32_FORMAT = "I"
AUR_INT_FORMAT = AUR_INT32_FORMAT
AUR_INT64_FORMAT = "Q"
PAD_NOTATTR = 0x4000
PAD_FAILURE = 0x8000
AUDIT_MAX_GROUPS = 16
AUDIT_HEADER_VERSION_OLDDARWIN = 1
AUDIT_HEADER_VERSION_SOLARIS = 2
AUDIT_HEADER_VERSION_TSOL25 = 3
AUDIT_HEADER_VERSION_TSOL = 4
AUDIT_HEADER_VERSION_OPENBSM10 = 10
AUDIT_HEADER_VERSION_OPENBSM11 = 11
AUDIT_HEADER_VERSION_OPENBSM = AUDIT_HEADER_VERSION_OPENBSM11
AUT_TRAILER_MAGIC = 0xB105
AUT_HEADERS = [AUT_HEADER32, AUT_HEADER32_EX, AUT_HEADER64, AUT_HEADER64_EX]
| true
| true
|
79081a406dcde59514362123ec47547ee87dcc3b
| 2,692
|
py
|
Python
|
scripts/SCZ_RNAseq/syn4590909/rank_individual_genes.py
|
omarmaddouri/GCNCC_cross_validated
|
89576ad2c8459f065604656fd38a786d042f09e0
|
[
"MIT"
] | 1
|
2022-03-12T13:34:34.000Z
|
2022-03-12T13:34:34.000Z
|
scripts/SCZ_RNAseq/syn4590909/rank_individual_genes.py
|
omarmaddouri/GCNCC_cross_validated
|
89576ad2c8459f065604656fd38a786d042f09e0
|
[
"MIT"
] | 3
|
2022-02-09T23:28:07.000Z
|
2022-02-11T19:08:53.000Z
|
scripts/SCZ_RNAseq/syn4590909/rank_individual_genes.py
|
omarmaddouri/GCNCC_cross_validated
|
89576ad2c8459f065604656fd38a786d042f09e0
|
[
"MIT"
] | null | null | null |
import sys
from os.path import dirname, abspath
sys.path.append(dirname(dirname(abspath(__file__))))
from SCZ_RNAseq.syn4590909.utils import *
path="../../data/SCZ_RNAseq/output/syn4590909/"
dataset="PPI"
features = np.genfromtxt("{}{}.GE_Features.txt".format(path, dataset), dtype=np.dtype(np.float32))
labels = get_clinical_status_syn4590909()
clusters = open("{}{}.clusters_individual_gene.txt".format(path, dataset), encoding="utf-8")
total_clusters = get_top_clusters_without_network(path, dataset, features, labels, clusters)
print("The complete set of clusters that passed the minimal threshold is \n {}".format(total_clusters))
with open("{}{}.top_features_individual_gene.txt".format(path, dataset), "w", newline='', encoding="utf-8") as f:
w_top_clusters = csv.writer(f, delimiter ='\t')
w_top_clusters.writerow(total_clusters)
clust = []
nb_columns = len(labels)
baseline_accuracy = 0
eps = 0.01 #minimum accuracy improvement to consider new cluster (1%)
tmp_Data = object
for i in range(len(total_clusters)):
clust.append(total_clusters[i])
nb_rows = len(clust)
Data = np.zeros((nb_rows, nb_columns), dtype=object)
if(i>0):#if temporary Data vector exist, copy all lines except last
for j in range(nb_rows-1):
Data[j, :] = tmp_Data[j, :]
#Just compute score of newly added cluster
Data[-1, :] = prepare_activity_score_feature_vector(features, labels, clust[nb_rows-1], clusters)
accuracy = logistic_regression_classification_aggregate_activity_scores(np.transpose(Data), labels)
if( accuracy < baseline_accuracy + eps ):
clust = clust[:-1]
tmp_Data = Data
tmp_Data = np.delete(tmp_Data, tmp_Data.shape[0]-1, axis=0)
print("SFS: feature {}/{} checked and rejected".format(i, len(total_clusters)-1))
else:
baseline_accuracy = accuracy
tmp_Data = Data
print("SFS: feature {}/{} checked and retained".format(i, len(total_clusters)-1))
print("The set of clusters to be used in classification is \n {}".format(clust))
with open("{}{}.final_features_individual_gene.txt".format(path, dataset), "w", newline='', encoding="utf-8") as f:
w_final_clusters = csv.writer(f, delimiter ='\t')
w_final_clusters.writerow(clust)
print("Logistic regression accuracy: {}".format(accuracy))
#accuracy = LDA_classification_aggregate_activity_scores(np.transpose(Data), labels)
#print("LDA accuracy: {}".format(accuracy))
#accuracy = SVM_classification_aggregate_activity_scores(np.transpose(Data), labels)
#print("SVM(Linear Kernel) accuracy: {}".format(accuracy))
clusters.close()
| 42.0625
| 115
| 0.69948
|
import sys
from os.path import dirname, abspath
sys.path.append(dirname(dirname(abspath(__file__))))
from SCZ_RNAseq.syn4590909.utils import *
path="../../data/SCZ_RNAseq/output/syn4590909/"
dataset="PPI"
features = np.genfromtxt("{}{}.GE_Features.txt".format(path, dataset), dtype=np.dtype(np.float32))
labels = get_clinical_status_syn4590909()
clusters = open("{}{}.clusters_individual_gene.txt".format(path, dataset), encoding="utf-8")
total_clusters = get_top_clusters_without_network(path, dataset, features, labels, clusters)
print("The complete set of clusters that passed the minimal threshold is \n {}".format(total_clusters))
with open("{}{}.top_features_individual_gene.txt".format(path, dataset), "w", newline='', encoding="utf-8") as f:
w_top_clusters = csv.writer(f, delimiter ='\t')
w_top_clusters.writerow(total_clusters)
clust = []
nb_columns = len(labels)
baseline_accuracy = 0
eps = 0.01
tmp_Data = object
for i in range(len(total_clusters)):
clust.append(total_clusters[i])
nb_rows = len(clust)
Data = np.zeros((nb_rows, nb_columns), dtype=object)
if(i>0):
for j in range(nb_rows-1):
Data[j, :] = tmp_Data[j, :]
Data[-1, :] = prepare_activity_score_feature_vector(features, labels, clust[nb_rows-1], clusters)
accuracy = logistic_regression_classification_aggregate_activity_scores(np.transpose(Data), labels)
if( accuracy < baseline_accuracy + eps ):
clust = clust[:-1]
tmp_Data = Data
tmp_Data = np.delete(tmp_Data, tmp_Data.shape[0]-1, axis=0)
print("SFS: feature {}/{} checked and rejected".format(i, len(total_clusters)-1))
else:
baseline_accuracy = accuracy
tmp_Data = Data
print("SFS: feature {}/{} checked and retained".format(i, len(total_clusters)-1))
print("The set of clusters to be used in classification is \n {}".format(clust))
with open("{}{}.final_features_individual_gene.txt".format(path, dataset), "w", newline='', encoding="utf-8") as f:
w_final_clusters = csv.writer(f, delimiter ='\t')
w_final_clusters.writerow(clust)
print("Logistic regression accuracy: {}".format(accuracy))
clusters.close()
| true
| true
|
79081ab4bea93de238d2c9a4070d4985b1833fb3
| 4,304
|
py
|
Python
|
libica/openapi/libgds/models/folder_update_request.py
|
umccr-illumina/libica
|
916d27eea499f29bee590268b84208effb0cc576
|
[
"MIT"
] | null | null | null |
libica/openapi/libgds/models/folder_update_request.py
|
umccr-illumina/libica
|
916d27eea499f29bee590268b84208effb0cc576
|
[
"MIT"
] | 4
|
2021-11-15T10:47:51.000Z
|
2022-02-22T04:43:20.000Z
|
libica/openapi/libgds/models/folder_update_request.py
|
umccr-illumina/libica
|
916d27eea499f29bee590268b84208effb0cc576
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Genomic Data Store Service
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from libica.openapi.libgds.configuration import Configuration
class FolderUpdateRequest(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'metadata': 'object',
'acl': 'list[str]'
}
attribute_map = {
'metadata': 'metadata',
'acl': 'acl'
}
def __init__(self, metadata=None, acl=None, local_vars_configuration=None): # noqa: E501
"""FolderUpdateRequest - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._metadata = None
self._acl = None
self.discriminator = None
if metadata is not None:
self.metadata = metadata
if acl is not None:
self.acl = acl
@property
def metadata(self):
"""Gets the metadata of this FolderUpdateRequest. # noqa: E501
Metadata about this folder and its contents # noqa: E501
:return: The metadata of this FolderUpdateRequest. # noqa: E501
:rtype: object
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this FolderUpdateRequest.
Metadata about this folder and its contents # noqa: E501
:param metadata: The metadata of this FolderUpdateRequest. # noqa: E501
:type: object
"""
self._metadata = metadata
@property
def acl(self):
"""Gets the acl of this FolderUpdateRequest. # noqa: E501
Optional array to replace the acl on the resource. # noqa: E501
:return: The acl of this FolderUpdateRequest. # noqa: E501
:rtype: list[str]
"""
return self._acl
@acl.setter
def acl(self, acl):
"""Sets the acl of this FolderUpdateRequest.
Optional array to replace the acl on the resource. # noqa: E501
:param acl: The acl of this FolderUpdateRequest. # noqa: E501
:type: list[str]
"""
self._acl = acl
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FolderUpdateRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, FolderUpdateRequest):
return True
return self.to_dict() != other.to_dict()
| 28.503311
| 124
| 0.58434
|
import pprint
import re
import six
from libica.openapi.libgds.configuration import Configuration
class FolderUpdateRequest(object):
openapi_types = {
'metadata': 'object',
'acl': 'list[str]'
}
attribute_map = {
'metadata': 'metadata',
'acl': 'acl'
}
def __init__(self, metadata=None, acl=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._metadata = None
self._acl = None
self.discriminator = None
if metadata is not None:
self.metadata = metadata
if acl is not None:
self.acl = acl
@property
def metadata(self):
return self._metadata
@metadata.setter
def metadata(self, metadata):
self._metadata = metadata
@property
def acl(self):
return self._acl
@acl.setter
def acl(self, acl):
self._acl = acl
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, FolderUpdateRequest):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, FolderUpdateRequest):
return True
return self.to_dict() != other.to_dict()
| true
| true
|
79081ad7c255e11da14b60003ca7937ed73bd33a
| 5,841
|
py
|
Python
|
ccws/base.py
|
applezjm/testsub
|
051348bb852d8e3cefe764a6315f53da66cd413e
|
[
"MIT"
] | null | null | null |
ccws/base.py
|
applezjm/testsub
|
051348bb852d8e3cefe764a6315f53da66cd413e
|
[
"MIT"
] | null | null | null |
ccws/base.py
|
applezjm/testsub
|
051348bb852d8e3cefe764a6315f53da66cd413e
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import websocket
import datetime
import csv
import time
import logging
import redis
import json
import copy
import pytz
from hftcoin.mdagent.ccws.configs import REDIS_HOST
from hftcoin.mdagent.ccws.configs import TIMEZONE
from hftcoin.mdagent.ccws.configs import ExConfigs
from hftcoin.mdagent.ccws.configs import HOME_PATH
class Exchange(object):
ExchangeId = ''
WebSocketConnection = None
RedisConnection = None
def __init__(self):
self.Logger = logging.getLogger(self.ExchangeId)
[self.ExConfig, self._WebSocketAddress] = ExConfigs[self.ExchangeId]
self.Config = {}
def set_market(self, currency, mode):
self.Config = self.ExConfig[currency][mode]
self.Logger = logging.getLogger('%s.%s.%s' % (self.ExchangeId, currency, mode))
def run_websocketapp(self, **kwargs):
self.Logger.info('Begin Connection')
url = self._WebSocketAddress + kwargs.pop('url_append', '')
on_error = kwargs.pop('on_error', self.on_error)
on_close = kwargs.pop('on_close', self.on_close)
on_message = kwargs.pop('on_message', self.on_message)
self.WebSocketConnection = websocket.WebSocketApp(
url,
on_error=on_error,
on_close=on_close,
on_message=on_message,
**kwargs,
)
while True:
try:
self.WebSocketConnection.run_forever()
except Exception as e:
self.Logger.exception(e)
def on_message(self, _ws, msg):
ts = int(time.time()*1000)
rdk = self.Config['RedisCollectKey']
# self.Logger.debug(msg)
self.RedisConnection.lpush(rdk, json.dumps([ts, msg]))
def on_error(self, _ws, error):
self.Logger.exception(error)
def on_close(self, _ws):
self.Logger.info('Connection closed.')
def connect_redis(self):
try:
self.RedisConnection = redis.StrictRedis(host=REDIS_HOST)
self.RedisConnection.ping()
except Exception as e:
self.Logger.exception(e)
def write_data_csv(self):
self.connect_redis()
[fn, rdk] = [self.Config.get(item) for item in ['FileName', 'RedisOutputKey']]
error_count = 100
while True:
try:
if self.RedisConnection.llen(rdk) > 0:
data = json.loads(self.RedisConnection.rpop(rdk).decode('utf8'))
# data[1] is timestamp
dt = datetime.datetime.fromtimestamp(data[1] / 1000, TIMEZONE)
calendar_path = '%4d/%02d/%02d' % (dt.year, dt.month, dt.day)
with open('%s/%s/%s' % (HOME_PATH, calendar_path, fn), 'a+') as csvFile:
csvwriter = csv.writer(csvFile)
csvwriter.writerow(data)
else:
time.sleep(60)
except RuntimeWarning:
break
except Exception as e:
self.Logger.exception(e)
error_count -= 1
if error_count < 0:
break
def collect_data(self):
pass
def process_data(self):
self.connect_redis()
getattr(self, self.Config.get('DataHandler', object))()
def _check_price_eq(self, p1, p2):
# divide by 2 to avoid precision
return abs(p1-p2) < self.Config['TickSize']/2
def _binary_search(self, find, list1, low, high):
while low <= high:
mid = int((low + high) / 2)
if self._check_price_eq(list1[mid][0], find):
return [mid, 'True']
elif list1[mid][0] > find:
high = mid - 1
else:
low = mid + 1
return [low, 'False']
def _update_order_book(self, bids, asks, side, price, remaining):
if side in ['bid', 'buy']:
book = bids
cut = int(99*(len(book)-1)/100)
else:
book = asks
cut = int((len(book)-1)/100)
if price < book[cut][0]:
res = self._binary_search(price, book, 0, cut-1)
else:
res = self._binary_search(price, book, cut, len(book)-1)
if res[1] == 'True':
if remaining < self.Config['AmountMin']:
del book[res[0]]
else:
book[res[0]][1] = remaining
else:
if remaining >= self.Config['AmountMin']:
book.insert(res[0], [price, remaining])
def check_data_validation(self, book):
length = int(len(book)/2)
for i in range(0, length - 2, 2):
if book[i] <= book[i + 2]:
return False
for i in range(length, 2 * length - 2, 2):
if book[i] >= book[i + 2]:
return False
for i in range(1, 2 * length, 2):
if book[i] < self.Config['AmountMin']:
return False
if book[0] > book[length]:
return False
return True
@staticmethod
def _cut_order_book(bids, asks, depth):
if len(bids) >= depth:
book = bids[-depth:]
book.reverse()
else:
book = copy.deepcopy(bids)
book.reverse()
book += [['None', 'None']] * (depth - len(bids))
if len(asks) >= depth:
book += asks[:depth]
else:
book += asks + [['None', 'None']] * (depth - len(asks))
book = [x[0:2] for x in book]
return sum(book, [])
@staticmethod
def fmt_date(ts):
return datetime.datetime.fromtimestamp(ts / 1000, TIMEZONE).strftime('%Y-%m-%d %H:%M:%S.%f %z')
@staticmethod
def date_from_str(ts):
return pytz.utc.localize(datetime.datetime.strptime(ts, '%Y-%m-%dT%H:%M:%S.%fZ'))
| 32.631285
| 103
| 0.546995
|
import websocket
import datetime
import csv
import time
import logging
import redis
import json
import copy
import pytz
from hftcoin.mdagent.ccws.configs import REDIS_HOST
from hftcoin.mdagent.ccws.configs import TIMEZONE
from hftcoin.mdagent.ccws.configs import ExConfigs
from hftcoin.mdagent.ccws.configs import HOME_PATH
class Exchange(object):
ExchangeId = ''
WebSocketConnection = None
RedisConnection = None
def __init__(self):
self.Logger = logging.getLogger(self.ExchangeId)
[self.ExConfig, self._WebSocketAddress] = ExConfigs[self.ExchangeId]
self.Config = {}
def set_market(self, currency, mode):
self.Config = self.ExConfig[currency][mode]
self.Logger = logging.getLogger('%s.%s.%s' % (self.ExchangeId, currency, mode))
def run_websocketapp(self, **kwargs):
self.Logger.info('Begin Connection')
url = self._WebSocketAddress + kwargs.pop('url_append', '')
on_error = kwargs.pop('on_error', self.on_error)
on_close = kwargs.pop('on_close', self.on_close)
on_message = kwargs.pop('on_message', self.on_message)
self.WebSocketConnection = websocket.WebSocketApp(
url,
on_error=on_error,
on_close=on_close,
on_message=on_message,
**kwargs,
)
while True:
try:
self.WebSocketConnection.run_forever()
except Exception as e:
self.Logger.exception(e)
def on_message(self, _ws, msg):
ts = int(time.time()*1000)
rdk = self.Config['RedisCollectKey']
self.RedisConnection.lpush(rdk, json.dumps([ts, msg]))
def on_error(self, _ws, error):
self.Logger.exception(error)
def on_close(self, _ws):
self.Logger.info('Connection closed.')
def connect_redis(self):
try:
self.RedisConnection = redis.StrictRedis(host=REDIS_HOST)
self.RedisConnection.ping()
except Exception as e:
self.Logger.exception(e)
def write_data_csv(self):
self.connect_redis()
[fn, rdk] = [self.Config.get(item) for item in ['FileName', 'RedisOutputKey']]
error_count = 100
while True:
try:
if self.RedisConnection.llen(rdk) > 0:
data = json.loads(self.RedisConnection.rpop(rdk).decode('utf8'))
dt = datetime.datetime.fromtimestamp(data[1] / 1000, TIMEZONE)
calendar_path = '%4d/%02d/%02d' % (dt.year, dt.month, dt.day)
with open('%s/%s/%s' % (HOME_PATH, calendar_path, fn), 'a+') as csvFile:
csvwriter = csv.writer(csvFile)
csvwriter.writerow(data)
else:
time.sleep(60)
except RuntimeWarning:
break
except Exception as e:
self.Logger.exception(e)
error_count -= 1
if error_count < 0:
break
def collect_data(self):
pass
def process_data(self):
self.connect_redis()
getattr(self, self.Config.get('DataHandler', object))()
def _check_price_eq(self, p1, p2):
return abs(p1-p2) < self.Config['TickSize']/2
def _binary_search(self, find, list1, low, high):
while low <= high:
mid = int((low + high) / 2)
if self._check_price_eq(list1[mid][0], find):
return [mid, 'True']
elif list1[mid][0] > find:
high = mid - 1
else:
low = mid + 1
return [low, 'False']
def _update_order_book(self, bids, asks, side, price, remaining):
if side in ['bid', 'buy']:
book = bids
cut = int(99*(len(book)-1)/100)
else:
book = asks
cut = int((len(book)-1)/100)
if price < book[cut][0]:
res = self._binary_search(price, book, 0, cut-1)
else:
res = self._binary_search(price, book, cut, len(book)-1)
if res[1] == 'True':
if remaining < self.Config['AmountMin']:
del book[res[0]]
else:
book[res[0]][1] = remaining
else:
if remaining >= self.Config['AmountMin']:
book.insert(res[0], [price, remaining])
def check_data_validation(self, book):
length = int(len(book)/2)
for i in range(0, length - 2, 2):
if book[i] <= book[i + 2]:
return False
for i in range(length, 2 * length - 2, 2):
if book[i] >= book[i + 2]:
return False
for i in range(1, 2 * length, 2):
if book[i] < self.Config['AmountMin']:
return False
if book[0] > book[length]:
return False
return True
@staticmethod
def _cut_order_book(bids, asks, depth):
if len(bids) >= depth:
book = bids[-depth:]
book.reverse()
else:
book = copy.deepcopy(bids)
book.reverse()
book += [['None', 'None']] * (depth - len(bids))
if len(asks) >= depth:
book += asks[:depth]
else:
book += asks + [['None', 'None']] * (depth - len(asks))
book = [x[0:2] for x in book]
return sum(book, [])
@staticmethod
def fmt_date(ts):
return datetime.datetime.fromtimestamp(ts / 1000, TIMEZONE).strftime('%Y-%m-%d %H:%M:%S.%f %z')
@staticmethod
def date_from_str(ts):
return pytz.utc.localize(datetime.datetime.strptime(ts, '%Y-%m-%dT%H:%M:%S.%fZ'))
| true
| true
|
79081d4f882fb3db9f68d5af7078fe845f869a13
| 12,493
|
py
|
Python
|
src/interface_py/h2o4gpu/solvers/factorization.py
|
aaron8tang/h2o4gpu
|
602275375cb0dfb4acd070a8c86c3ded0bef1156
|
[
"Apache-2.0"
] | null | null | null |
src/interface_py/h2o4gpu/solvers/factorization.py
|
aaron8tang/h2o4gpu
|
602275375cb0dfb4acd070a8c86c3ded0bef1156
|
[
"Apache-2.0"
] | null | null | null |
src/interface_py/h2o4gpu/solvers/factorization.py
|
aaron8tang/h2o4gpu
|
602275375cb0dfb4acd070a8c86c3ded0bef1156
|
[
"Apache-2.0"
] | null | null | null |
# - * - encoding : utf - 8 - * -
# pylint: disable=fixme, line-too-long
"""
Matrix factorization solver.
:copyright: 2017-2019 H2O.ai, Inc.
:license: Apache License Version 2.0 (see LICENSE for details)
"""
import numpy as np
import scipy
import scipy.sparse
def _get_sparse_matrixes(X):
'''Create csc, csr and coo sparse matrix from any of the above
Arguments:
X {array-like, csc, csr or coo sparse matrix}
Returns:
csc, csr, coo
'''
X_coo = X_csc = X_csr = None
if scipy.sparse.isspmatrix_coo(X):
X_coo = X
X_csr = X_coo.tocsr(True)
X_csc = X_coo.tocsc(True)
elif scipy.sparse.isspmatrix_csr(X):
X_csr = X
X_csc = X_csr.tocoo(True)
X_coo = X_csr.tocsc(True)
elif scipy.sparse.isspmatrix_csc(X):
X_csc = X
X_csr = X_csc.tocsr(True)
X_coo = X_csc.tocoo(True)
else:
assert False, "only coo, csc and csr sparse matrixes are supported"
return X_csc, X_csr, X_coo
class FactorizationH2O(object):
'''Matrix Factorization on GPU with Alternating Least Square (ALS) algorithm.
Factors a sparse rating matrix X (m by n, with N_z non-zero elements)
into a m-by-f and a f-by-n matrices.
Parameters
----------
f int
decomposition size
lambda_ float
lambda regularization
max_iter int, default: 100
number of training iterations
double_precision bool, default: False
use double precision, not yet supported
thetaT {array-like} shape (n, f), default: None
initial theta matrix
XT {array-like} shape (m, f), default: None
initial XT matrix
random_state int, default: 1234
Attributes
----------
XT {array-like} shape (m, f)
XT matrix contains user's features
thetaT {array-like} shape (n, f)
transposed theta matrix, item's features
Warnings
--------
Matrixes ``XT`` and ``thetaT`` may contain nan elements. This is because in some datasets,
there are users or items with no ratings in training set. That results in solutions of
a system of linear equations becomes nan. Such elements can be easily removed with numpy
functions like numpy.nan_to_num, but existence of them may be useful for troubleshooting
purposes.
'''
def __init__(self, f, lambda_, max_iter=100, double_precision=False, thetaT=None, XT=None, random_state=1234):
assert not double_precision, 'double precision is not yet supported'
assert f % 10 == 0, 'f has to be a multiple of 10'
self.f = f
self.lambda_ = lambda_
self.double_precision = double_precision
self.dtype = np.float64 if self.double_precision else np.float32
self.thetaT = thetaT
self.XT = XT
self.max_iter = max_iter
self.random_state = random_state
def _load_lib(self):
from ..libs.lib_utils import GPUlib
gpu_lib = GPUlib().get(1)
return gpu_lib
def fit(self, X, y=None, X_test=None, X_BATCHES=1, THETA_BATCHES=1, early_stopping_rounds=None, verbose=False, scores=None):
#pylint: disable=unused-argument
'''Learn model from rating matrix X.
Parameters
----------
X {array-like, sparse matrix}, shape (m, n)
Data matrix to be decomposed.
y None
Ignored
X_test {array-like, coo sparse matrix}, shape (m, n)
Data matrix for cross validation.
X_BATCHES int, default: 1
Batches to split XT, increase this parameter in case out of memory error.
THETA_BATCHES int, default: 1
Batches to split theta, increase this parameter in case out of memory error.
early_stopping_rounds int, default: None
Activates early stopping. Cross validation error needs to decrease
at least every <early_stopping_rounds> round(s) to continue training. Requires <X_test>.
Returns the model from the last iteration (not the best one). If early stopping occurs,
the model will have three additional fields: best_cv_score, best_train_score and best_iteration.
verbose bool, default: False
Prints training and validation score(if applicable) on each iteration.
scores {list}
List of tuples with train, cv score for every iteration.
Returns
-------
self : returns an instance of self.
'''
csc_X, csr_X, coo_X = _get_sparse_matrixes(X)
if early_stopping_rounds is not None:
assert X_test is not None, 'X_test is mandatory with early stopping'
if X_test is not None:
assert scipy.sparse.isspmatrix_coo(
X_test), 'X_test must be a coo sparse scipy matrix'
assert X.shape == X_test.shape
assert X_test.dtype == self.dtype
assert X.dtype == self.dtype
coo_X_test = X_test
lib = self._load_lib()
if self.double_precision:
make_data = lib.make_factorization_data_double
run_step = lib.run_factorization_step_double
factorization_score = lib.factorization_score_double
copy_fecatorization_result = lib.copy_fecatorization_result_double
free_data = lib.free_data_double
else:
make_data = lib.make_factorization_data_float
run_step = lib.run_factorization_step_float
factorization_score = lib.factorization_score_float
copy_fecatorization_result = lib.copy_fecatorization_result_float
free_data = lib.free_data_float
m = coo_X.shape[0]
n = coo_X.shape[1]
nnz = csc_X.nnz
if coo_X_test is None:
nnz_test = 0
else:
nnz_test = coo_X_test.nnz
rs = np.random.RandomState(self.random_state)
if self.thetaT is None:
self.thetaT = rs.rand(n, self.f).astype(self.dtype)
else:
assert self.thetaT.dtype == self.dtype
if self.XT is None:
self.XT = rs.rand(m, self.f).astype(self.dtype)
else:
assert self.XT.dtype == self.dtype
csrRowIndexDevicePtr = None
csrColIndexDevicePtr = None
csrValDevicePtr = None
cscRowIndexDevicePtr = None
cscColIndexDevicePtr = None
cscValDevicePtr = None
cooRowIndexDevicePtr = None
cooColIndexDevicePtr = None
cooValDevicePtr = None
thetaTDevice = None
XTDevice = None
cooRowIndexTestDevicePtr = None
cooColIndexTestDevicePtr = None
cooValTestDevicePtr = None
status, csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, \
cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr, \
cooRowIndexDevicePtr, cooColIndexDevicePtr, cooValDevicePtr, \
thetaTDevice, XTDevice, cooRowIndexTestDevicePtr, \
cooColIndexTestDevicePtr, cooValTestDevicePtr = make_data( # pylint: disable=W0212
m, n, self.f, nnz, nnz_test, csr_X.indptr, csr_X.indices, csr_X.data,
csc_X.indices, csc_X.indptr, csc_X.data,
coo_X.row, coo_X.col, coo_X.data,
self.thetaT, self.XT, coo_X_test.row if coo_X_test is not None else None,
coo_X_test.col if coo_X_test is not None else None, coo_X_test.data if coo_X_test is not None else None,
csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr,
cooRowIndexDevicePtr, cooColIndexDevicePtr, cooValDevicePtr,
thetaTDevice, XTDevice, cooRowIndexTestDevicePtr,
cooColIndexTestDevicePtr, cooValTestDevicePtr)
assert status == 0, 'Failure uploading the data'
self.best_train_score = np.inf
self.best_cv_score = np.inf
self.best_iteration = -1
cv_score = train_score = np.inf
for i in range(self.max_iter):
status = run_step(m,
n,
self.f,
nnz,
self.lambda_,
csrRowIndexDevicePtr,
csrColIndexDevicePtr,
csrValDevicePtr,
cscRowIndexDevicePtr,
cscColIndexDevicePtr,
cscValDevicePtr,
thetaTDevice,
XTDevice,
X_BATCHES,
THETA_BATCHES)
if verbose or scores is not None:
result = factorization_score(m,
n,
self.f,
nnz,
self.lambda_,
thetaTDevice,
XTDevice,
cooRowIndexDevicePtr,
cooColIndexDevicePtr,
cooValDevicePtr)
train_score = result[0]
if X_test is not None and (verbose or early_stopping_rounds is not None or scores is not None):
result = factorization_score(m,
n,
self.f,
nnz_test,
self.lambda_,
thetaTDevice,
XTDevice,
cooRowIndexTestDevicePtr,
cooColIndexTestDevicePtr,
cooValTestDevicePtr)
cv_score = result[0]
if verbose:
print("iteration {0} train: {1} cv: {2}".format(
i, train_score, cv_score))
if scores is not None:
scores.append((train_score, cv_score))
if early_stopping_rounds is not None:
if self.best_cv_score > cv_score:
self.best_cv_score = cv_score
self.best_train_score = train_score
self.best_iteration = i
if (i - self.best_iteration) > early_stopping_rounds:
if verbose:
print('best iteration:{0} train: {1} cv: {2}'.format(
self.best_iteration, self.best_train_score, self.best_cv_score))
break
lib.free_data_int(csrRowIndexDevicePtr)
lib.free_data_int(csrColIndexDevicePtr)
free_data(csrValDevicePtr)
lib.free_data_int(cscRowIndexDevicePtr)
lib.free_data_int(cscColIndexDevicePtr)
free_data(cscValDevicePtr)
lib.free_data_int(cooRowIndexDevicePtr)
lib.free_data_int(cooColIndexDevicePtr)
free_data(cooValDevicePtr)
lib.free_data_int(cooRowIndexTestDevicePtr)
lib.free_data_int(cooColIndexTestDevicePtr)
free_data(cooValTestDevicePtr)
copy_fecatorization_result(self.XT, XTDevice, m * self.f)
copy_fecatorization_result(self.thetaT, thetaTDevice, n * self.f)
free_data(thetaTDevice)
free_data(XTDevice)
return self
def predict(self, X):
'''Predict none zero elements of coo sparse matrix X according to the fitted model.
Parameters
----------
X {array-like, sparse coo matrix} shape (m, n)
Data matrix in coo format. Values are ignored.
Returns
-------
{array-like, sparse coo matrix} shape (m, n)
Predicted values.
'''
assert self.XT is not None and self.thetaT is not None, 'tranform is invoked on an unfitted model'
assert scipy.sparse.isspmatrix_coo(
X), 'convert X to coo sparse matrix'
assert X.dtype == self.dtype
a = np.take(self.XT, X.row, axis=0)
b = np.take(self.thetaT, X.col, axis=0)
val = np.sum(a * b, axis=1)
return scipy.sparse.coo_matrix((val, (X.row, X.col)), shape=X.shape)
| 39.410095
| 137
| 0.574802
|
import numpy as np
import scipy
import scipy.sparse
def _get_sparse_matrixes(X):
X_coo = X_csc = X_csr = None
if scipy.sparse.isspmatrix_coo(X):
X_coo = X
X_csr = X_coo.tocsr(True)
X_csc = X_coo.tocsc(True)
elif scipy.sparse.isspmatrix_csr(X):
X_csr = X
X_csc = X_csr.tocoo(True)
X_coo = X_csr.tocsc(True)
elif scipy.sparse.isspmatrix_csc(X):
X_csc = X
X_csr = X_csc.tocsr(True)
X_coo = X_csc.tocoo(True)
else:
assert False, "only coo, csc and csr sparse matrixes are supported"
return X_csc, X_csr, X_coo
class FactorizationH2O(object):
def __init__(self, f, lambda_, max_iter=100, double_precision=False, thetaT=None, XT=None, random_state=1234):
assert not double_precision, 'double precision is not yet supported'
assert f % 10 == 0, 'f has to be a multiple of 10'
self.f = f
self.lambda_ = lambda_
self.double_precision = double_precision
self.dtype = np.float64 if self.double_precision else np.float32
self.thetaT = thetaT
self.XT = XT
self.max_iter = max_iter
self.random_state = random_state
def _load_lib(self):
from ..libs.lib_utils import GPUlib
gpu_lib = GPUlib().get(1)
return gpu_lib
def fit(self, X, y=None, X_test=None, X_BATCHES=1, THETA_BATCHES=1, early_stopping_rounds=None, verbose=False, scores=None):
csc_X, csr_X, coo_X = _get_sparse_matrixes(X)
if early_stopping_rounds is not None:
assert X_test is not None, 'X_test is mandatory with early stopping'
if X_test is not None:
assert scipy.sparse.isspmatrix_coo(
X_test), 'X_test must be a coo sparse scipy matrix'
assert X.shape == X_test.shape
assert X_test.dtype == self.dtype
assert X.dtype == self.dtype
coo_X_test = X_test
lib = self._load_lib()
if self.double_precision:
make_data = lib.make_factorization_data_double
run_step = lib.run_factorization_step_double
factorization_score = lib.factorization_score_double
copy_fecatorization_result = lib.copy_fecatorization_result_double
free_data = lib.free_data_double
else:
make_data = lib.make_factorization_data_float
run_step = lib.run_factorization_step_float
factorization_score = lib.factorization_score_float
copy_fecatorization_result = lib.copy_fecatorization_result_float
free_data = lib.free_data_float
m = coo_X.shape[0]
n = coo_X.shape[1]
nnz = csc_X.nnz
if coo_X_test is None:
nnz_test = 0
else:
nnz_test = coo_X_test.nnz
rs = np.random.RandomState(self.random_state)
if self.thetaT is None:
self.thetaT = rs.rand(n, self.f).astype(self.dtype)
else:
assert self.thetaT.dtype == self.dtype
if self.XT is None:
self.XT = rs.rand(m, self.f).astype(self.dtype)
else:
assert self.XT.dtype == self.dtype
csrRowIndexDevicePtr = None
csrColIndexDevicePtr = None
csrValDevicePtr = None
cscRowIndexDevicePtr = None
cscColIndexDevicePtr = None
cscValDevicePtr = None
cooRowIndexDevicePtr = None
cooColIndexDevicePtr = None
cooValDevicePtr = None
thetaTDevice = None
XTDevice = None
cooRowIndexTestDevicePtr = None
cooColIndexTestDevicePtr = None
cooValTestDevicePtr = None
status, csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, \
cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr, \
cooRowIndexDevicePtr, cooColIndexDevicePtr, cooValDevicePtr, \
thetaTDevice, XTDevice, cooRowIndexTestDevicePtr, \
cooColIndexTestDevicePtr, cooValTestDevicePtr = make_data(
m, n, self.f, nnz, nnz_test, csr_X.indptr, csr_X.indices, csr_X.data,
csc_X.indices, csc_X.indptr, csc_X.data,
coo_X.row, coo_X.col, coo_X.data,
self.thetaT, self.XT, coo_X_test.row if coo_X_test is not None else None,
coo_X_test.col if coo_X_test is not None else None, coo_X_test.data if coo_X_test is not None else None,
csrRowIndexDevicePtr, csrColIndexDevicePtr, csrValDevicePtr, cscRowIndexDevicePtr, cscColIndexDevicePtr, cscValDevicePtr,
cooRowIndexDevicePtr, cooColIndexDevicePtr, cooValDevicePtr,
thetaTDevice, XTDevice, cooRowIndexTestDevicePtr,
cooColIndexTestDevicePtr, cooValTestDevicePtr)
assert status == 0, 'Failure uploading the data'
self.best_train_score = np.inf
self.best_cv_score = np.inf
self.best_iteration = -1
cv_score = train_score = np.inf
for i in range(self.max_iter):
status = run_step(m,
n,
self.f,
nnz,
self.lambda_,
csrRowIndexDevicePtr,
csrColIndexDevicePtr,
csrValDevicePtr,
cscRowIndexDevicePtr,
cscColIndexDevicePtr,
cscValDevicePtr,
thetaTDevice,
XTDevice,
X_BATCHES,
THETA_BATCHES)
if verbose or scores is not None:
result = factorization_score(m,
n,
self.f,
nnz,
self.lambda_,
thetaTDevice,
XTDevice,
cooRowIndexDevicePtr,
cooColIndexDevicePtr,
cooValDevicePtr)
train_score = result[0]
if X_test is not None and (verbose or early_stopping_rounds is not None or scores is not None):
result = factorization_score(m,
n,
self.f,
nnz_test,
self.lambda_,
thetaTDevice,
XTDevice,
cooRowIndexTestDevicePtr,
cooColIndexTestDevicePtr,
cooValTestDevicePtr)
cv_score = result[0]
if verbose:
print("iteration {0} train: {1} cv: {2}".format(
i, train_score, cv_score))
if scores is not None:
scores.append((train_score, cv_score))
if early_stopping_rounds is not None:
if self.best_cv_score > cv_score:
self.best_cv_score = cv_score
self.best_train_score = train_score
self.best_iteration = i
if (i - self.best_iteration) > early_stopping_rounds:
if verbose:
print('best iteration:{0} train: {1} cv: {2}'.format(
self.best_iteration, self.best_train_score, self.best_cv_score))
break
lib.free_data_int(csrRowIndexDevicePtr)
lib.free_data_int(csrColIndexDevicePtr)
free_data(csrValDevicePtr)
lib.free_data_int(cscRowIndexDevicePtr)
lib.free_data_int(cscColIndexDevicePtr)
free_data(cscValDevicePtr)
lib.free_data_int(cooRowIndexDevicePtr)
lib.free_data_int(cooColIndexDevicePtr)
free_data(cooValDevicePtr)
lib.free_data_int(cooRowIndexTestDevicePtr)
lib.free_data_int(cooColIndexTestDevicePtr)
free_data(cooValTestDevicePtr)
copy_fecatorization_result(self.XT, XTDevice, m * self.f)
copy_fecatorization_result(self.thetaT, thetaTDevice, n * self.f)
free_data(thetaTDevice)
free_data(XTDevice)
return self
def predict(self, X):
assert self.XT is not None and self.thetaT is not None, 'tranform is invoked on an unfitted model'
assert scipy.sparse.isspmatrix_coo(
X), 'convert X to coo sparse matrix'
assert X.dtype == self.dtype
a = np.take(self.XT, X.row, axis=0)
b = np.take(self.thetaT, X.col, axis=0)
val = np.sum(a * b, axis=1)
return scipy.sparse.coo_matrix((val, (X.row, X.col)), shape=X.shape)
| true
| true
|
79081dd13c1cbd7c95a5b2b85e279978b4920270
| 961
|
py
|
Python
|
infoblox/komand_infoblox/actions/delete_host/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
infoblox/komand_infoblox/actions/delete_host/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
infoblox/komand_infoblox/actions/delete_host/schema.py
|
xhennessy-r7/insightconnect-plugins
|
59268051313d67735b5dd3a30222eccb92aca8e9
|
[
"MIT"
] | null | null | null |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
_REF = "_ref"
class Output:
_REF = "_ref"
class DeleteHostInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"_ref": {
"type": "string",
"title": "Ref",
"description": "Object Reference of the host to remove",
"order": 1
}
},
"required": [
"_ref"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class DeleteHostOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"_ref": {
"type": "string",
"title": "Ref",
"description": "Object Reference of the removed host",
"order": 1
}
},
"required": [
"_ref"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 16.568966
| 62
| 0.546306
|
import komand
import json
class Input:
_REF = "_ref"
class Output:
_REF = "_ref"
class DeleteHostInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"_ref": {
"type": "string",
"title": "Ref",
"description": "Object Reference of the host to remove",
"order": 1
}
},
"required": [
"_ref"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class DeleteHostOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"_ref": {
"type": "string",
"title": "Ref",
"description": "Object Reference of the removed host",
"order": 1
}
},
"required": [
"_ref"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| true
| true
|
79081efb431e116d29c7b9895c23c30c45fb6063
| 553
|
py
|
Python
|
pythons/pythons/pythons_auth/migrations/0002_auto_20210723_1847.py
|
BoyanPeychinov/python_web_framework
|
bb3a78c36790821d8b3a2b847494a1138d063193
|
[
"MIT"
] | null | null | null |
pythons/pythons/pythons_auth/migrations/0002_auto_20210723_1847.py
|
BoyanPeychinov/python_web_framework
|
bb3a78c36790821d8b3a2b847494a1138d063193
|
[
"MIT"
] | null | null | null |
pythons/pythons/pythons_auth/migrations/0002_auto_20210723_1847.py
|
BoyanPeychinov/python_web_framework
|
bb3a78c36790821d8b3a2b847494a1138d063193
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.4 on 2021-07-23 15:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pythons_auth', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='pythonsuser',
name='is_staff',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='pythonsuser',
name='is_superuser',
field=models.BooleanField(default=False),
),
]
| 23.041667
| 53
| 0.584087
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pythons_auth', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='pythonsuser',
name='is_staff',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='pythonsuser',
name='is_superuser',
field=models.BooleanField(default=False),
),
]
| true
| true
|
79081f127977d650f3c97bab5df425b5b4db4a4c
| 4,384
|
py
|
Python
|
data/prepare_data_2d_h36m_sh.py
|
fullmoonhalf/SemGCN
|
ce1dce98f8b7cc600ba7e733d17d71192c24b596
|
[
"Apache-2.0"
] | null | null | null |
data/prepare_data_2d_h36m_sh.py
|
fullmoonhalf/SemGCN
|
ce1dce98f8b7cc600ba7e733d17d71192c24b596
|
[
"Apache-2.0"
] | null | null | null |
data/prepare_data_2d_h36m_sh.py
|
fullmoonhalf/SemGCN
|
ce1dce98f8b7cc600ba7e733d17d71192c24b596
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function, absolute_import, division
import argparse
import os
import zipfile
import tarfile
import numpy as np
import h5py
from glob import glob
from shutil import rmtree
import sys
sys.path.append('../')
from common.h36m_dataset import H36M_NAMES
output_filename_pt = 'data_2d_h36m_sh_pt_mpii'
output_filename_ft = 'data_2d_h36m_sh_ft_h36m'
subjects = ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11']
cam_map = {
'54138969': 0,
'55011271': 1,
'58860488': 2,
'60457274': 3,
}
metadata = {
'num_joints': 16,
'keypoints_symmetry': [
[3, 4, 5, 13, 14, 15],
[2, 1, 0, 12, 11, 10],
]
}
# Stacked Hourglass produces 16 joints. These are the names.
SH_NAMES = [''] * 16
SH_NAMES[0] = 'RFoot'
SH_NAMES[1] = 'RKnee'
SH_NAMES[2] = 'RHip'
SH_NAMES[3] = 'LHip'
SH_NAMES[4] = 'LKnee'
SH_NAMES[5] = 'LFoot'
SH_NAMES[6] = 'Hip'
SH_NAMES[7] = 'Spine'
SH_NAMES[8] = 'Thorax'
SH_NAMES[9] = 'Head'
SH_NAMES[10] = 'RWrist'
SH_NAMES[11] = 'RElbow'
SH_NAMES[12] = 'RShoulder'
SH_NAMES[13] = 'LShoulder'
SH_NAMES[14] = 'LElbow'
SH_NAMES[15] = 'LWrist'
# Permutation that goes from SH detections to H36M ordering.
SH_TO_GT_PERM = np.array([SH_NAMES.index(h) for h in H36M_NAMES if h != '' and h in SH_NAMES])
assert np.all(SH_TO_GT_PERM == np.array([6, 2, 1, 0, 3, 4, 5, 7, 8, 9, 13, 14, 15, 12, 11, 10]))
metadata['keypoints_symmetry'][0] = [SH_TO_GT_PERM.tolist().index(h) for h in metadata['keypoints_symmetry'][0]]
metadata['keypoints_symmetry'][1] = [SH_TO_GT_PERM.tolist().index(h) for h in metadata['keypoints_symmetry'][1]]
def process_subject(subject, file_list, output):
if subject == 'S11':
assert len(file_list) == 119, "Expected 119 files for subject " + subject + ", got " + str(len(file_list))
else:
assert len(file_list) == 120, "Expected 120 files for subject " + subject + ", got " + str(len(file_list))
for f in file_list:
action, cam = os.path.splitext(os.path.basename(f))[0].replace('_', ' ').split('.')
if subject == 'S11' and action == 'Directions':
continue # Discard corrupted video
if action not in output[subject]:
output[subject][action] = [None, None, None, None]
with h5py.File(f) as hf:
# positions = hf['poses'].value
positions = np.array(hf['poses'])
positions = positions[:, SH_TO_GT_PERM, :]
output[subject][action][cam_map[cam]] = positions.astype('float32')
if __name__ == '__main__':
if os.path.basename(os.getcwd()) != 'data':
print('This script must be launched from the "data" directory')
exit(0)
parser = argparse.ArgumentParser(description='Human3.6M dataset downloader/converter')
parser.add_argument('-pt', '--pretrained', default='', type=str, metavar='PATH', help='convert pretrained dataset')
parser.add_argument('-ft', '--fine-tuned', default='', type=str, metavar='PATH', help='convert fine-tuned dataset')
args = parser.parse_args()
if args.pretrained:
print('Converting pretrained dataset from', args.pretrained)
print('Extracting...')
with zipfile.ZipFile(args.pretrained, 'r') as archive:
archive.extractall('sh_pt')
print('Converting...')
output = {}
for subject in subjects:
output[subject] = {}
file_list = glob('sh_pt/h36m/' + subject + '/StackedHourglass/*.h5')
process_subject(subject, file_list, output)
print('Saving...')
np.savez_compressed(output_filename_pt, positions_2d=output, metadata=metadata)
print('Cleaning up...')
rmtree('sh_pt')
print('Done.')
if args.fine_tuned:
print('Converting fine-tuned dataset from', args.fine_tuned)
print('Extracting...')
with tarfile.open(args.fine_tuned, 'r:gz') as archive:
archive.extractall('sh_ft')
print('Converting...')
output = {}
for subject in subjects:
output[subject] = {}
file_list = glob('sh_ft/' + subject + '/StackedHourglassFineTuned240/*.h5')
process_subject(subject, file_list, output)
print('Saving...')
np.savez_compressed(output_filename_ft, positions_2d=output, metadata=metadata)
print('Cleaning up...')
rmtree('sh_ft')
print('Done.')
| 31.768116
| 119
| 0.62979
|
from __future__ import print_function, absolute_import, division
import argparse
import os
import zipfile
import tarfile
import numpy as np
import h5py
from glob import glob
from shutil import rmtree
import sys
sys.path.append('../')
from common.h36m_dataset import H36M_NAMES
output_filename_pt = 'data_2d_h36m_sh_pt_mpii'
output_filename_ft = 'data_2d_h36m_sh_ft_h36m'
subjects = ['S1', 'S5', 'S6', 'S7', 'S8', 'S9', 'S11']
cam_map = {
'54138969': 0,
'55011271': 1,
'58860488': 2,
'60457274': 3,
}
metadata = {
'num_joints': 16,
'keypoints_symmetry': [
[3, 4, 5, 13, 14, 15],
[2, 1, 0, 12, 11, 10],
]
}
SH_NAMES = [''] * 16
SH_NAMES[0] = 'RFoot'
SH_NAMES[1] = 'RKnee'
SH_NAMES[2] = 'RHip'
SH_NAMES[3] = 'LHip'
SH_NAMES[4] = 'LKnee'
SH_NAMES[5] = 'LFoot'
SH_NAMES[6] = 'Hip'
SH_NAMES[7] = 'Spine'
SH_NAMES[8] = 'Thorax'
SH_NAMES[9] = 'Head'
SH_NAMES[10] = 'RWrist'
SH_NAMES[11] = 'RElbow'
SH_NAMES[12] = 'RShoulder'
SH_NAMES[13] = 'LShoulder'
SH_NAMES[14] = 'LElbow'
SH_NAMES[15] = 'LWrist'
SH_TO_GT_PERM = np.array([SH_NAMES.index(h) for h in H36M_NAMES if h != '' and h in SH_NAMES])
assert np.all(SH_TO_GT_PERM == np.array([6, 2, 1, 0, 3, 4, 5, 7, 8, 9, 13, 14, 15, 12, 11, 10]))
metadata['keypoints_symmetry'][0] = [SH_TO_GT_PERM.tolist().index(h) for h in metadata['keypoints_symmetry'][0]]
metadata['keypoints_symmetry'][1] = [SH_TO_GT_PERM.tolist().index(h) for h in metadata['keypoints_symmetry'][1]]
def process_subject(subject, file_list, output):
if subject == 'S11':
assert len(file_list) == 119, "Expected 119 files for subject " + subject + ", got " + str(len(file_list))
else:
assert len(file_list) == 120, "Expected 120 files for subject " + subject + ", got " + str(len(file_list))
for f in file_list:
action, cam = os.path.splitext(os.path.basename(f))[0].replace('_', ' ').split('.')
if subject == 'S11' and action == 'Directions':
continue
if action not in output[subject]:
output[subject][action] = [None, None, None, None]
with h5py.File(f) as hf:
positions = np.array(hf['poses'])
positions = positions[:, SH_TO_GT_PERM, :]
output[subject][action][cam_map[cam]] = positions.astype('float32')
if __name__ == '__main__':
if os.path.basename(os.getcwd()) != 'data':
print('This script must be launched from the "data" directory')
exit(0)
parser = argparse.ArgumentParser(description='Human3.6M dataset downloader/converter')
parser.add_argument('-pt', '--pretrained', default='', type=str, metavar='PATH', help='convert pretrained dataset')
parser.add_argument('-ft', '--fine-tuned', default='', type=str, metavar='PATH', help='convert fine-tuned dataset')
args = parser.parse_args()
if args.pretrained:
print('Converting pretrained dataset from', args.pretrained)
print('Extracting...')
with zipfile.ZipFile(args.pretrained, 'r') as archive:
archive.extractall('sh_pt')
print('Converting...')
output = {}
for subject in subjects:
output[subject] = {}
file_list = glob('sh_pt/h36m/' + subject + '/StackedHourglass/*.h5')
process_subject(subject, file_list, output)
print('Saving...')
np.savez_compressed(output_filename_pt, positions_2d=output, metadata=metadata)
print('Cleaning up...')
rmtree('sh_pt')
print('Done.')
if args.fine_tuned:
print('Converting fine-tuned dataset from', args.fine_tuned)
print('Extracting...')
with tarfile.open(args.fine_tuned, 'r:gz') as archive:
archive.extractall('sh_ft')
print('Converting...')
output = {}
for subject in subjects:
output[subject] = {}
file_list = glob('sh_ft/' + subject + '/StackedHourglassFineTuned240/*.h5')
process_subject(subject, file_list, output)
print('Saving...')
np.savez_compressed(output_filename_ft, positions_2d=output, metadata=metadata)
print('Cleaning up...')
rmtree('sh_ft')
print('Done.')
| true
| true
|
79081fd3189187f98d1e806a77b8e869d24c78a8
| 8,388
|
py
|
Python
|
lpp/newlpp/lppTransform.py
|
exoplanetvetting/DAVE
|
aea19a30d987b214fb4c0cf01aa733f127c411b9
|
[
"MIT"
] | 7
|
2019-05-07T02:01:51.000Z
|
2022-03-16T08:09:39.000Z
|
lpp/newlpp/lppTransform.py
|
barentsen/dave
|
45ba97b7b535ad26dd555c33c963c6224a9af23c
|
[
"MIT"
] | 18
|
2015-12-09T22:18:59.000Z
|
2017-04-26T13:11:44.000Z
|
lpp/newlpp/lppTransform.py
|
barentsen/dave
|
45ba97b7b535ad26dd555c33c963c6224a9af23c
|
[
"MIT"
] | 5
|
2017-03-08T11:42:53.000Z
|
2020-05-07T00:10:37.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 23 20:32:12 2018
Functions to correctly fold and bin a light curve.
Calculate the lpp metric: transform to lower dimensions, knn
Depends on class from reading in a previously created LPP metric Map
Depends on reading in the light curve to data structure.
input is a class called data
data contains
data.time (days)
data.tzero (day)
data.dur (hours)
data.period (days)
data.flux (normalized to 0)
After foldBinLightCurve it contains
data.binned
After transform it contains
data.lpp_transform
@author: smullally
"""
from __future__ import division
import numpy as np
from sklearn.neighbors import NearestNeighbors
from lpproj import LocalityPreservingProjection
import copy
def computeLPPTransitMetric(data,mapInfo):
"""
This function takes a data class with light curve info
and the mapInfo with information about the mapping to use.
It then returns a lpp metric value.
"""
binFlux, binPhase=foldBinLightCurve(data,mapInfo.ntrfr,mapInfo.npts)
#plt.figure()
#plt.plot(binPhase,binFlux,'.--')
#Dimensionality Reduction and knn parts
rawTLpp,transformedTransit=computeRawLPPTransitMetric(binFlux,mapInfo)
#Normalize by Period Dependence
normTLpp=periodNormalLPPTransitMetric(rawTLpp,np.array([data.period,data.mes]), mapInfo)
return normTLpp,rawTLpp,transformedTransit
def runningMedian(t,y,dt,runt):
"""
Take a running median of size dt
Return values at times given in runt
"""
newy=np.zeros(len(y))
newt=np.zeros(len(y))
srt = np.argsort(t)
newt = t[srt]
newy = y[srt]
runy=[]
for i in range(len(runt)):
tmp=[]
for j in range(len(newt)):
if (newt[j] >= (runt[i]-dt)) and (newt[j] <= (runt[i]+dt)):
tmp.append(newy[j])
if np.isnan(np.nanmedian(np.array(tmp))) :
runy.append(0)
else:
runy.append(np.nanmedian(np.array(tmp)))
return(list(runt),runy)
def foldBinLightCurve (data, ntrfr, npts):
"""
Fold and bin light curve for input to LPP metric calculation
data contains time, tzero, dur, priod,mes and flux (centered around zero)
ntrfr -- number of transit fraction for binning around transit ~1.5
npts -- number of points in the final binning.
"""
#Create phase light curve
phaselc =np.mod((data.time-(data.tzero-0.5*data.period))/data.period,1)
flux=data.flux
mes=data.mes
#Determine the fraction of the time the planet transits the star.
#Insist that ntrfr * transit fraction
if ~np.isnan(data.dur) & (data.dur >0):
transit_dur = data.dur
else:
transit_dur = 0.2 * data.period/24.
transit_fr=transit_dur/24./data.period
if (transit_fr * ntrfr) > 0.5 :
transit_fr = 0.5/ntrfr
#Specify the out of transit (a) and the in transit regions
binover=1.3
if mes <= 20:
binover=-(1/8.0)*mes + 3.8
endfr = .03
midfr= .11
a = np.concatenate((np.arange(endfr,.5-midfr,1/npts) , \
np.arange((0.5+midfr),(1-endfr),1/npts)), axis=None)
ovsamp=4.0
#bstep=(ovsamp*ntrfr*transit_fr)/npts
b_num=41
b =np.linspace((0.5-ntrfr*transit_fr),(0.5+ntrfr*transit_fr),b_num)
#print "length a: %u " % len(a)
#print "length b: %u" % len(b)
[runta,runya] = runningMedian(phaselc,flux,binover/npts,a)
[runtb,runyb] = runningMedian(phaselc,flux,\
(binover*ovsamp*ntrfr*transit_fr)/npts,b)
#Combine the two sets of bins
runymess=np.array(runya + runyb)
runtmess = np.array(runta + runtb)
srt=np.argsort(runtmess)
runy=runymess[srt]
runt=runtmess[srt]
#Scale the flux by the depth so everything has the same depth.
#Catch or dividing by zero is to not scale.
scale = -1*np.min(runyb)
if scale != 0:
scaledFlux=runy/scale
else:
scaledFlux=runy
binnedFlux=scaledFlux
phasebins=runt
return binnedFlux,phasebins
def computeRawLPPTransitMetric(binFlux,mapInfo):
"""
Perform the matrix transformation with LPP
Do the knn test to get a raw LPP transit metric number.
"""
Yorig=mapInfo.YmapMapped
lpp=LocalityPreservingProjection(n_components=mapInfo.n_dim)
lpp.projection_=mapInfo.YmapM
#To equate to Matlab LPP methods, we need to remove mean of transform.
normBinFlux=binFlux-mapInfo.YmapMean
inputY=lpp.transform(normBinFlux.reshape(1,-1))
knownTransitsY=Yorig[mapInfo.knnGood,:]
dist,ind = knnDistance_fromKnown(knownTransitsY,inputY,mapInfo.knn)
rawLppTrMetric=np.mean(dist)
return rawLppTrMetric,inputY
def knnDistance_fromKnown(knownTransits,new,knn):
"""
For a group of known transits and a new one.
Use knn to determine how close the new one is to the known transits
using knn minkowski p = 3 ()
Using scipy signal to do this.
"""
#p=3 sets a minkowski distance of 3. #Check that you really used 3 for matlab.
nbrs=NearestNeighbors(n_neighbors=int(knn), algorithm='kd_tree', p=2)
nbrs.fit(knownTransits)
distances,indices = nbrs.kneighbors(new)
return distances, indices
def periodNormalLPPTransitMetric(rawTLpp,newPerMes, mapInfo):
"""
Normalize the rawTransitMetric value by those with the closest period.
This part removes the period dependence of the metric at short periods.
Plus it makes a value near one be the threshold between good and bad.
newPerMes is the np.array([period, mes]) of the new sample
"""
knownTrPeriods=mapInfo.mappedPeriods[mapInfo.knnGood]
knownTrMes=mapInfo.mappedMes[mapInfo.knnGood]
knownTrrawLpp=mapInfo.dymeans[mapInfo.knnGood]
nPercentil=mapInfo.nPercentil
nPsample=mapInfo.nPsample
#Find the those with the nearest periods Npsample-nneighbors
logPeriods=np.log10(knownTrPeriods)
logMes=np.log10(knownTrMes)
knownPerMes=np.stack((logPeriods, logMes), axis=-1)
np.shape(knownPerMes)
logNew=np.log10(newPerMes).reshape(1,-1)
#logNew=np.array([np.log10(newPeriod)]).reshape(1,1)
dist,ind = knnDistance_fromKnown(knownPerMes,logNew,nPsample)
#Find the nthPercentile of the rawLpp of these indicies
nearPeriodLpp=knownTrrawLpp[ind]
LppNPercentile = np.percentile(nearPeriodLpp,nPercentil)
NormLppTransitMetric=rawTLpp/LppNPercentile
return NormLppTransitMetric
def lpp_onetransit(tcedata,mapInfo,ntransit):
"""
Chop down the full time series to one orbital period.
Then gather the lpp value for that one transit.
"""
startTime=tcedata.time[0]+ntransit*tcedata.period
endTime=tcedata.time[0]+(ntransit+1)*tcedata.period + 3/24.0 #A few cadences of overlap
want=(tcedata.time>=startTime) & (tcedata.time<=endTime)
newtime=tcedata.time[want]
newflux=tcedata.flux[want]
nExpCad=(tcedata.time[-1]-tcedata.time[0])/tcedata.period
if len(newtime>nExpCad*0.75):
onetransit=copy.deepcopy(tcedata)
onetransit.time=newtime
onetransit.flux=newflux
normTLpp, rawTLpp, transformedTr=computeLPPTransitMetric(onetransit,mapInfo)
else:
normTLpp=np.nan
rawTLpp=np.nan
return normTLpp,rawTLpp
def lpp_averageIndivTransit(tcedata,mapInfo):
"""
Create the loop over individual transits and return
array normalized lpp values, mean and std.
Input TCE object and mapInfo object.
It is unclear that this individual transit approach
separates out several new false positives.
It probably would require retuning for low SNR signals.
"""
length=tcedata.time[-1]-tcedata.time[0]
ntransits=int(np.floor(length/tcedata.period))
lppNorms=np.ones(ntransits)
lppRaws=np.ones(ntransits)
nExpCad=(tcedata.time[-1]-tcedata.time[0])/tcedata.period
for i in range(ntransits):
lppNorms[i],lppRaws[i] = lpp_onetransit(tcedata,mapInfo,i)
lppMed=np.nanmedian(lppNorms)
lppStd=np.nanstd(lppNorms)
return lppNorms,lppMed, lppStd, ntransits
| 28.627986
| 92
| 0.66762
|
from __future__ import division
import numpy as np
from sklearn.neighbors import NearestNeighbors
from lpproj import LocalityPreservingProjection
import copy
def computeLPPTransitMetric(data,mapInfo):
binFlux, binPhase=foldBinLightCurve(data,mapInfo.ntrfr,mapInfo.npts)
rawTLpp,transformedTransit=computeRawLPPTransitMetric(binFlux,mapInfo)
normTLpp=periodNormalLPPTransitMetric(rawTLpp,np.array([data.period,data.mes]), mapInfo)
return normTLpp,rawTLpp,transformedTransit
def runningMedian(t,y,dt,runt):
newy=np.zeros(len(y))
newt=np.zeros(len(y))
srt = np.argsort(t)
newt = t[srt]
newy = y[srt]
runy=[]
for i in range(len(runt)):
tmp=[]
for j in range(len(newt)):
if (newt[j] >= (runt[i]-dt)) and (newt[j] <= (runt[i]+dt)):
tmp.append(newy[j])
if np.isnan(np.nanmedian(np.array(tmp))) :
runy.append(0)
else:
runy.append(np.nanmedian(np.array(tmp)))
return(list(runt),runy)
def foldBinLightCurve (data, ntrfr, npts):
phaselc =np.mod((data.time-(data.tzero-0.5*data.period))/data.period,1)
flux=data.flux
mes=data.mes
if ~np.isnan(data.dur) & (data.dur >0):
transit_dur = data.dur
else:
transit_dur = 0.2 * data.period/24.
transit_fr=transit_dur/24./data.period
if (transit_fr * ntrfr) > 0.5 :
transit_fr = 0.5/ntrfr
binover=1.3
if mes <= 20:
binover=-(1/8.0)*mes + 3.8
endfr = .03
midfr= .11
a = np.concatenate((np.arange(endfr,.5-midfr,1/npts) , \
np.arange((0.5+midfr),(1-endfr),1/npts)), axis=None)
ovsamp=4.0
b_num=41
b =np.linspace((0.5-ntrfr*transit_fr),(0.5+ntrfr*transit_fr),b_num)
[runta,runya] = runningMedian(phaselc,flux,binover/npts,a)
[runtb,runyb] = runningMedian(phaselc,flux,\
(binover*ovsamp*ntrfr*transit_fr)/npts,b)
runymess=np.array(runya + runyb)
runtmess = np.array(runta + runtb)
srt=np.argsort(runtmess)
runy=runymess[srt]
runt=runtmess[srt]
scale = -1*np.min(runyb)
if scale != 0:
scaledFlux=runy/scale
else:
scaledFlux=runy
binnedFlux=scaledFlux
phasebins=runt
return binnedFlux,phasebins
def computeRawLPPTransitMetric(binFlux,mapInfo):
Yorig=mapInfo.YmapMapped
lpp=LocalityPreservingProjection(n_components=mapInfo.n_dim)
lpp.projection_=mapInfo.YmapM
normBinFlux=binFlux-mapInfo.YmapMean
inputY=lpp.transform(normBinFlux.reshape(1,-1))
knownTransitsY=Yorig[mapInfo.knnGood,:]
dist,ind = knnDistance_fromKnown(knownTransitsY,inputY,mapInfo.knn)
rawLppTrMetric=np.mean(dist)
return rawLppTrMetric,inputY
def knnDistance_fromKnown(knownTransits,new,knn):
t(knn), algorithm='kd_tree', p=2)
nbrs.fit(knownTransits)
distances,indices = nbrs.kneighbors(new)
return distances, indices
def periodNormalLPPTransitMetric(rawTLpp,newPerMes, mapInfo):
knownTrPeriods=mapInfo.mappedPeriods[mapInfo.knnGood]
knownTrMes=mapInfo.mappedMes[mapInfo.knnGood]
knownTrrawLpp=mapInfo.dymeans[mapInfo.knnGood]
nPercentil=mapInfo.nPercentil
nPsample=mapInfo.nPsample
logPeriods=np.log10(knownTrPeriods)
logMes=np.log10(knownTrMes)
knownPerMes=np.stack((logPeriods, logMes), axis=-1)
np.shape(knownPerMes)
logNew=np.log10(newPerMes).reshape(1,-1)
dist,ind = knnDistance_fromKnown(knownPerMes,logNew,nPsample)
nearPeriodLpp=knownTrrawLpp[ind]
LppNPercentile = np.percentile(nearPeriodLpp,nPercentil)
NormLppTransitMetric=rawTLpp/LppNPercentile
return NormLppTransitMetric
def lpp_onetransit(tcedata,mapInfo,ntransit):
startTime=tcedata.time[0]+ntransit*tcedata.period
endTime=tcedata.time[0]+(ntransit+1)*tcedata.period + 3/24.0
want=(tcedata.time>=startTime) & (tcedata.time<=endTime)
newtime=tcedata.time[want]
newflux=tcedata.flux[want]
nExpCad=(tcedata.time[-1]-tcedata.time[0])/tcedata.period
if len(newtime>nExpCad*0.75):
onetransit=copy.deepcopy(tcedata)
onetransit.time=newtime
onetransit.flux=newflux
normTLpp, rawTLpp, transformedTr=computeLPPTransitMetric(onetransit,mapInfo)
else:
normTLpp=np.nan
rawTLpp=np.nan
return normTLpp,rawTLpp
def lpp_averageIndivTransit(tcedata,mapInfo):
length=tcedata.time[-1]-tcedata.time[0]
ntransits=int(np.floor(length/tcedata.period))
lppNorms=np.ones(ntransits)
lppRaws=np.ones(ntransits)
nExpCad=(tcedata.time[-1]-tcedata.time[0])/tcedata.period
for i in range(ntransits):
lppNorms[i],lppRaws[i] = lpp_onetransit(tcedata,mapInfo,i)
lppMed=np.nanmedian(lppNorms)
lppStd=np.nanstd(lppNorms)
return lppNorms,lppMed, lppStd, ntransits
| true
| true
|
790820f81db747211f4813bf9cfafdfa5ae0200d
| 33,180
|
py
|
Python
|
tests/cases/item_test.py
|
RemiCecchinato/girder
|
455d5c60d59112b65b45daf51c2d2ccda2e84a9a
|
[
"Apache-2.0"
] | null | null | null |
tests/cases/item_test.py
|
RemiCecchinato/girder
|
455d5c60d59112b65b45daf51c2d2ccda2e84a9a
|
[
"Apache-2.0"
] | null | null | null |
tests/cases/item_test.py
|
RemiCecchinato/girder
|
455d5c60d59112b65b45daf51c2d2ccda2e84a9a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import io
import json
import shutil
import six
import zipfile
from .. import base
from girder.constants import AccessType
from girder.models.assetstore import Assetstore
from girder.models.folder import Folder
from girder.models.item import Item
from girder.models.token import Token
from girder.models.user import User
def setUpModule():
base.startServer()
def tearDownModule():
base.stopServer()
class ItemTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
# Create a set of users so we can have some folders.
self.users = [User().createUser(
'usr%s' % num, 'passwd', 'tst', 'usr', 'u%s@u.com' % num)
for num in [0, 1]]
folders = Folder().childFolders(self.users[0], 'user', user=self.users[0])
for folder in folders:
if folder['name'] == 'Public':
self.publicFolder = folder
else:
self.privateFolder = folder
self.assetstore = Assetstore().getCurrent()
root = self.assetstore['root']
# Clean out the test assetstore on disk
shutil.rmtree(root)
# First clean out the temp directory
tmpdir = os.path.join(root, 'temp')
if os.path.isdir(tmpdir):
for tempname in os.listdir(tmpdir):
os.remove(os.path.join(tmpdir, tempname))
def _createItem(self, parentId, name, description, user):
params = {
'name': name,
'description': description,
'folderId': parentId
}
resp = self.request(path='/item', method='POST', params=params,
user=user)
self.assertStatusOk(resp)
assert 'meta' in resp.json
return resp.json
def _testUploadFileToItem(self, item, name, user, contents):
"""
Uploads a non-empty file to the server.
"""
# Initialize the upload
resp = self.request(
path='/file', method='POST', user=user, params={
'parentType': 'item',
'parentId': item['_id'],
'name': name,
'size': len(contents)
})
self.assertStatusOk(resp)
uploadId = resp.json['_id']
# Send the first chunk
resp = self.request(
path='/file/chunk', method='POST', body=contents, user=user, params={
'uploadId': uploadId
}, type='application/octet-stream')
self.assertStatusOk(resp)
def _testDownloadSingleFileItem(self, item, user, contents):
"""
Downloads a single-file item from the server
:param item: The item to download.
:type item: dict
:param contents: The expected contents.
:type contents: str
"""
resp = self.request(path='/item/%s/download' % item['_id'],
method='GET', user=user, isJson=False)
self.assertStatusOk(resp)
self.assertEqual(contents, self.getBody(resp))
self.assertEqual(resp.headers['Content-Disposition'],
'attachment; filename="file_1"')
# Test downloading the item with contentDisposition=inline.
params = {'contentDisposition': 'inline'}
resp = self.request(path='/item/%s/download' % item['_id'],
method='GET', user=user, isJson=False,
params=params)
self.assertStatusOk(resp)
self.assertEqual(contents, self.getBody(resp))
self.assertEqual(resp.headers['Content-Disposition'],
'inline; filename="file_1"')
# Test downloading with an offset
resp = self.request(path='/item/%s/download' % item['_id'],
method='GET', user=user, isJson=False,
params={'offset': 1})
self.assertStatus(resp, 206)
self.assertEqual(contents[1:], self.getBody(resp))
def _testDownloadMultiFileItem(self, item, user, contents, format=None):
params = None
if format:
params = {'format': format}
resp = self.request(path='/item/%s/download' % item['_id'],
method='GET', user=user, isJson=False,
params=params)
self.assertStatusOk(resp)
zipFile = zipfile.ZipFile(io.BytesIO(self.getBody(resp, text=False)),
'r')
prefix = os.path.split(zipFile.namelist()[0])[0]
expectedZip = {}
for name in contents:
expectedZip[os.path.join(prefix, name)] = contents[name]
self.assertHasKeys(expectedZip, zipFile.namelist())
self.assertHasKeys(zipFile.namelist(), expectedZip)
for name in zipFile.namelist():
expected = expectedZip[name]
if not isinstance(expected, six.binary_type):
expected = expected.encode('utf8')
self.assertEqual(expected, zipFile.read(name))
def testLegacyItems(self):
folder = Folder().createFolder(
parent=self.users[0], parentType='user', creator=self.users[0],
name='New Folder')
item = Item().createItem(
name='LegacyItem',
creator=self.users[0],
folder=folder)
del item['meta']
item = Item().save(item)
assert 'meta' not in item
item = Item().load(item['_id'], user=self.users[0])
assert 'meta' in item
def testItemDownloadAndChildren(self):
curItem = self._createItem(self.publicFolder['_id'],
'test_for_download', 'fake description',
self.users[0])
self._testUploadFileToItem(curItem, 'file_1', self.users[0], 'foobar')
self._testDownloadSingleFileItem(curItem, self.users[0], 'foobar')
self._testDownloadMultiFileItem(curItem, self.users[0],
{'file_1': 'foobar'}, format='zip')
self._testUploadFileToItem(curItem, 'file_2', self.users[0], 'foobz')
resp = self.request(path='/item/%s/files' % curItem['_id'],
method='GET', user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['name'], 'file_1')
self.assertEqual(resp.json[1]['name'], 'file_2')
self.assertEqual(resp.json[0]['size'], 6)
self.assertEqual(resp.json[1]['size'], 5)
self._testDownloadMultiFileItem(curItem, self.users[0],
{'file_1': 'foobar', 'file_2': 'foobz'})
def testItemCrud(self):
"""
Test Create, Read, Update, and Delete of items.
"""
self.ensureRequiredParams(
path='/item', method='POST', required=('folderId',),
user=self.users[1])
# Attempt to create an item without write permission, should fail
params = {
'name': ' ',
'description': ' a description ',
'folderId': self.publicFolder['_id']
}
resp = self.request(path='/item', method='POST', params=params,
user=self.users[1])
self.assertStatus(resp, 403)
# Shouldn't be allowed to have an empty name
resp = self.request(path='/item', method='POST', params=params,
user=self.users[0])
self.assertValidationError(resp, 'name')
# Actually create the item in user 0's private folder
params['name'] = ' my item name'
params['folderId'] = self.privateFolder['_id']
resp = self.request(path='/item', method='POST', params=params,
user=self.users[0])
self.assertStatusOk(resp)
item = resp.json
self.assertEqual(item['name'], params['name'].strip())
self.assertEqual(item['description'], params['description'].strip())
# User 1 should not be able to see the item via find by folderId
params = {
'folderId': self.privateFolder['_id']
}
resp = self.request(path='/item', method='GET', user=self.users[1],
params=params)
self.assertStatus(resp, 403)
# Or by just requesting the item itself by ID
resp = self.request(path='/item/%s' % str(item['_id']), method='GET',
user=self.users[1])
self.assertStatus(resp, 403)
# User 0 should be able to see the item
resp = self.request(path='/item/%s' % str(item['_id']), method='GET',
user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(resp.json['_id'], item['_id'])
self.assertEqual(resp.json['_modelType'], 'item')
# Also from the children call
resp = self.request(path='/item', method='GET', user=self.users[0],
params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], item['_id'])
# Test finding the item using a text string with and without a folderId
params['text'] = 'my item name'
resp = self.request(path='/item', method='GET', user=self.users[0],
params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], item['_id'])
del params['folderId']
resp = self.request(path='/item', method='GET', user=self.users[0],
params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], item['_id'])
# A limit should work
params['limit'] = 1
resp = self.request(path='/item', method='GET', user=self.users[0],
params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], item['_id'])
# An offset should give us nothing
params['offset'] = 1
resp = self.request(path='/item', method='GET', user=self.users[0],
params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 0)
# Finding should fail with no parameters
resp = self.request(path='/item', method='GET', user=self.users[0],
params={})
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid search mode.')
# Test update of the item
params = {
'name': 'changed name',
'description': 'new description'
}
resp = self.request(path='/item/%s' % item['_id'], method='PUT',
params=params, user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(resp.json['name'], params['name'])
self.assertEqual(resp.json['description'], params['description'])
# Test moving an item to the public folder
item = Item().load(item['_id'], force=True)
self.assertFalse(Item().hasAccess(item))
resp = self.request(path='/item/%s' % item['_id'], method='PUT',
user=self.users[0], params={
'folderId': self.publicFolder['_id']})
self.assertStatusOk(resp)
item = Item().load(resp.json['_id'], force=True)
self.assertTrue(Item().hasAccess(item))
# Move should fail if we don't have write permission on the
# destination folder
self.publicFolder = Folder().setUserAccess(
self.publicFolder, self.users[1], AccessType.WRITE, save=True)
resp = self.request(path='/item/%s' % item['_id'], method='PUT',
user=self.users[1], params={
'folderId': self.privateFolder['_id']})
self.assertStatus(resp, 403)
self.assertTrue(resp.json['message'].startswith(
'Write access denied for folder'))
# Try to update/PUT without an id
resp = self.request(path='/item/', method='PUT',
params=params, user=self.users[0])
self.assertStatus(resp, 400)
# Try a bad endpoint (should 400)
resp = self.request(path='/item/%s/blurgh' % item['_id'],
method='GET',
user=self.users[1])
self.assertStatus(resp, 400)
# Try delete with no ID (should 400)
resp = self.request(path='/item/', method='DELETE', user=self.users[1])
self.assertStatus(resp, 400)
# User 1 should not be able to delete the item with read access
self.publicFolder = Folder().setUserAccess(
self.publicFolder, self.users[1], AccessType.READ, save=True)
resp = self.request(path='/item/%s' % str(item['_id']), method='DELETE',
user=self.users[1])
self.assertStatus(resp, 403)
# User 1 should be able to delete the item with write access
self.publicFolder = Folder().setUserAccess(
self.publicFolder, self.users[1], AccessType.WRITE, save=True)
resp = self.request(path='/item/%s' % str(item['_id']), method='DELETE',
user=self.users[1])
self.assertStatusOk(resp)
# Verify that the item is deleted
item = Item().load(item['_id'])
self.assertEqual(item, None)
def testItemMetadataDirect(self):
params = {
'name': 'item with metadata via POST',
'description': ' a description ',
'folderId': self.privateFolder['_id'],
'metadata': 'not JSON'
}
resp = self.request(
path='/item', method='POST', params=params, user=self.users[0])
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'], 'Parameter metadata must be valid JSON.')
# Add some metadata
metadata = {
'foo': 'bar',
'test': 2
}
params['metadata'] = json.dumps(metadata)
resp = self.request(
path='/item', method='POST', params=params, user=self.users[0])
self.assertStatusOk(resp)
item = resp.json
self.assertEqual(item['meta']['foo'], metadata['foo'])
self.assertEqual(item['meta']['test'], metadata['test'])
metadata = {
'foo': None,
'test': 3,
'bar': 'baz'
}
resp = self.request(
path='/item/{_id}'.format(**item), method='PUT',
user=self.users[0], params={'metadata': json.dumps(metadata)}
)
self.assertStatusOk(resp)
item = resp.json
self.assertNotHasKeys(item['meta'], ['foo'])
self.assertEqual(item['meta']['test'], metadata['test'])
self.assertEqual(item['meta']['bar'], metadata['bar'])
def testItemMetadataCrud(self):
"""
Test CRUD of metadata.
"""
# Create an item
params = {
'name': 'item with metadata',
'description': ' a description ',
'folderId': self.privateFolder['_id']
}
resp = self.request(path='/item', method='POST', params=params,
user=self.users[0])
self.assertStatusOk(resp)
item = resp.json
# Try to delete metadata from an item that doesn't have any set on it
# yet.
resp = self.request(path='/item/%s/metadata' % (item['_id']),
method='DELETE', user=self.users[0],
body=json.dumps(['foobar']), type='application/json')
item = resp.json
self.assertStatusOk(resp)
self.assertEqual(item['meta'], {})
# Add some metadata
metadata = {
'foo': 'bar',
'test': 2
}
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata), type='application/json')
item = resp.json
self.assertEqual(item['meta']['foo'], metadata['foo'])
self.assertEqual(item['meta']['test'], metadata['test'])
# Test invalid JSON constants
body = '{"key": {"foo": Infinity}}'
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=body, type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'], 'Error: "Infinity" is not valid JSON.')
# Edit and remove metadata
metadata['test'] = None
metadata['foo'] = 'baz'
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata), type='application/json')
item = resp.json
self.assertEqual(item['meta']['foo'], metadata['foo'])
self.assertNotHasKeys(item['meta'], ['test'])
# Test insertion of null values
metadata['nullVal'] = None
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata), params={'allowNull': True},
type='application/json')
item = resp.json
self.assertEqual(item['meta']['nullVal'], None)
# Adding an unrelated key should not affect existing keys
del metadata['nullVal']
metadata['other'] = 'macguffin'
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata), type='application/json')
item = resp.json
self.assertEqual(item['meta']['other'], metadata['other'])
self.assertEqual(item['meta']['nullVal'], None)
# Test metadata deletion
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='DELETE', user=self.users[0],
body=json.dumps(['other']), type='application/json')
item = resp.json
self.assertNotHasKeys(item['meta'], ['other'])
# Error when deletion field names contain a period.
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='DELETE', user=self.users[0],
body=json.dumps(['foo', 'foo.bar']), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'], 'Invalid key foo.bar: keys must not contain the "." character.')
# Error when deletion field names begin with a dollar-sign.
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='DELETE', user=self.users[0],
body=json.dumps(['foo', '$bar']), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'], 'Invalid key $bar: keys must not start with the "$" character.')
# Make sure metadata cannot be added with invalid JSON
metadata = {
'test': 'allowed'
}
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata).replace('"', "'"),
type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'],
'Invalid JSON passed in request body.')
# Make sure metadata cannot be added if there is a period in the key
# name
metadata = {
'foo.bar': 'notallowed'
}
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'], 'Invalid key foo.bar: keys must not contain the "." character.')
# Make sure metadata cannot be added if the key begins with a
# dollar sign
metadata = {
'$foobar': 'alsonotallowed'
}
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'],
'Invalid key $foobar: keys must not start with the "$" character.')
# Make sure metadata cannot be added with a blank key
metadata = {
'': 'stillnotallowed'
}
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'], 'Key names must not be empty.')
def testItemFiltering(self):
"""
Test filtering private metadata from items.
"""
# Create an item
params = {
'name': 'item with metadata',
'description': ' a description ',
'folderId': self.privateFolder['_id']
}
resp = self.request(path='/item', method='POST', params=params,
user=self.users[0])
self.assertStatusOk(resp)
# get the item object from the database
item = Item().load(resp.json['_id'], force=True)
# set a private property
item['private'] = 'very secret metadata'
item = Item().save(item)
# get the item from the rest api
resp = self.request(path='/item/%s' % str(item['_id']), method='GET',
user=self.users[0])
self.assertStatusOk(resp)
# assert that the private data is not included
self.assertNotHasKeys(resp.json, ['private'])
def testPathToRoot(self):
firstChildName = 'firstChild'
firstChildDesc = 'firstDesc'
secondChildName = 'secondChild'
secondChildDesc = 'secondDesc'
firstChild = Folder().createFolder(
self.publicFolder, firstChildName, firstChildDesc, creator=self.users[0])
secondChild = Folder().createFolder(
firstChild, secondChildName, secondChildDesc, creator=self.users[0])
baseItem = Item().createItem('blah', self.users[0], secondChild, 'foo')
resp = self.request(path='/item/%s/rootpath' % baseItem['_id'], method='GET')
self.assertStatusOk(resp)
pathToRoot = resp.json
self.assertEqual(pathToRoot[0]['type'], 'user')
self.assertEqual(pathToRoot[0]['object']['login'],
self.users[0]['login'])
self.assertEqual(pathToRoot[1]['type'], 'folder')
self.assertEqual(pathToRoot[1]['object']['name'],
self.publicFolder['name'])
self.assertEqual(pathToRoot[2]['type'], 'folder')
self.assertEqual(pathToRoot[2]['object']['name'], firstChild['name'])
self.assertEqual(pathToRoot[3]['type'], 'folder')
self.assertEqual(pathToRoot[3]['object']['name'], secondChild['name'])
def testLazyFieldComputation(self):
"""
Demonstrate that an item that is saved in the database without
derived fields (like lowerName or baseParentId) get those values
computed at load() time.
"""
item = Item().createItem('My Item Name', creator=self.users[0], folder=self.publicFolder)
self.assertEqual(item['lowerName'], 'my item name')
self.assertEqual(item['baseParentId'], self.users[0]['_id'])
# Force the item to be saved without lowerName and baseParentType fields
del item['lowerName']
del item['baseParentType']
item = Item().save(item, validate=False)
item = Item().find({'_id': item['_id']})[0]
self.assertNotHasKeys(item, ('lowerName', 'baseParentType'))
# Now ensure that calling load() actually populates those fields and
# saves the results persistently
Item().load(item['_id'], force=True)
item = Item().find({'_id': item['_id']})[0]
self.assertHasKeys(item, ('lowerName', 'baseParentType'))
self.assertEqual(item['lowerName'], 'my item name')
self.assertEqual(item['baseParentType'], 'user')
self.assertEqual(item['baseParentId'], self.users[0]['_id'])
# Also test that this works for a duplicate item, such that the
# automatically renamed item still has the correct lowerName, and a
# None description is changed to an empty string.
item = Item().createItem(
'My Item Name', creator=self.users[0], folder=self.publicFolder, description=None)
# test if non-strings are coerced
self.assertEqual(item['description'], '')
item['description'] = 1
item = Item().save(item)
item = Item().findOne({'_id': item['_id']})
self.assertEqual(item['description'], '1')
# test if just missing lowerName is corrected.
self.assertEqual(item['lowerName'], 'my item name (1)')
del item['lowerName']
item = Item().save(item, validate=False)
item = Item().findOne({'_id': item['_id']})
self.assertNotHasKeys(item, ('lowerName', ))
Item().load(item['_id'], force=True)
item = Item().findOne({'_id': item['_id']})
self.assertHasKeys(item, ('lowerName', ))
self.assertEqual(item['lowerName'], 'my item name (1)')
def testParentsToRoot(self):
"""
Demonstrate that forcing parentsToRoot will cause it to skip the
filtering process.
"""
item = Item().createItem('My Item Name', creator=self.users[0], folder=self.publicFolder)
parents = Item().parentsToRoot(item, force=True)
for parent in parents:
self.assertNotIn('_accessLevel', parent['object'])
parents = Item().parentsToRoot(item)
for parent in parents:
self.assertIn('_accessLevel', parent['object'])
def testItemCopy(self):
origItem = self._createItem(self.publicFolder['_id'],
'test_for_copy', 'fake description',
self.users[0])
# Add metadata and files, since we want to make sure those get copied
metadata = {
'foo': 'value1',
'test': 2
}
resp = self.request(
path='/item/%s/metadata' % origItem['_id'], method='PUT', user=self.users[0],
body=json.dumps(metadata), type='application/json')
self.assertStatusOk(resp)
self._testUploadFileToItem(origItem, 'file_1', self.users[0], 'foobar')
self._testUploadFileToItem(origItem, 'file_2', self.users[0], 'foobz')
# Also upload a link
params = {
'parentType': 'item',
'parentId': origItem['_id'],
'name': 'link_file',
'linkUrl': 'http://www.google.com'
}
resp = self.request(path='/file', method='POST', user=self.users[0],
params=params)
self.assertStatusOk(resp)
# Copy to a new item. It will be in the same folder, but we want a
# different name.
params = {
'name': 'copied_item'
}
resp = self.request(path='/item/%s/copy' % origItem['_id'],
method='POST', user=self.users[0], params=params)
self.assertStatusOk(resp)
# Make sure size was returned correctly
self.assertEqual(resp.json['size'], 11)
# Now ask for the new item explicitly and check its metadata
self.request(path='/item/%s' % resp.json['_id'],
user=self.users[0], type='application/json')
self.assertStatusOk(resp)
newItem = resp.json
self.assertEqual(newItem['name'], 'copied_item')
self.assertEqual(newItem['meta']['foo'], metadata['foo'])
self.assertEqual(newItem['meta']['test'], metadata['test'])
# Check if we can download the files from the new item
resp = self.request(path='/item/%s/files' % newItem['_id'],
method='GET', user=self.users[0])
self.assertStatusOk(resp)
newFiles = resp.json
self.assertEqual(newFiles[0]['name'], 'file_1')
self.assertEqual(newFiles[1]['name'], 'file_2')
self.assertEqual(newFiles[2]['name'], 'link_file')
self.assertEqual(newFiles[0]['size'], 6)
self.assertEqual(newFiles[1]['size'], 5)
self._testDownloadMultiFileItem(newItem, self.users[0],
{'file_1': 'foobar', 'file_2': 'foobz',
'link_file': 'http://www.google.com'})
# Check to make sure the original item is still present
resp = self.request(path='/item', method='GET', user=self.users[0],
params={'folderId': self.publicFolder['_id'],
'text': 'test_for_copy'})
self.assertStatusOk(resp)
self.assertEqual(origItem['_id'], resp.json[0]['_id'])
# Check to make sure the new item is still present
resp = self.request(path='/item', method='GET', user=self.users[0],
params={'folderId': self.publicFolder['_id'],
'text': 'copied_item'})
self.assertStatusOk(resp)
self.assertEqual(newItem['_id'], resp.json[0]['_id'])
# Check that the provenance tag correctly points back
# to the original item
self.assertEqual(newItem['copyOfItem'], origItem['_id'])
# Check if we can download the files from the old item and that they
# are distinct from the files in the original item
resp = self.request(path='/item/%s/files' % origItem['_id'],
method='GET', user=self.users[0])
self.assertStatusOk(resp)
origFiles = resp.json
self._testDownloadMultiFileItem(origItem, self.users[0],
{'file_1': 'foobar', 'file_2': 'foobz',
'link_file': 'http://www.google.com'})
for index, file in enumerate(origFiles):
self.assertNotEqual(origFiles[index]['_id'],
newFiles[index]['_id'])
def testCookieAuth(self):
"""
We make sure a cookie is sufficient for authentication for the item
download endpoint. Also, while we're at it, we make sure it's not
sufficient for other endpoints.
"""
item = self._createItem(self.privateFolder['_id'],
'cookie_auth_download', '', self.users[0])
self._testUploadFileToItem(item, 'file', self.users[0], 'foo')
token = Token().createToken(self.users[0])
cookie = 'girderToken=%s' % token['_id']
# We should be able to download a private item using a cookie token
resp = self.request(path='/item/%s/download' % item['_id'],
isJson=False, cookie=cookie)
self.assertStatusOk(resp)
self.assertEqual(self.getBody(resp), 'foo')
# We should not be able to call GET /item/:id with a cookie token
resp = self.request(path='/item/%s' % item['_id'], cookie=cookie)
self.assertStatus(resp, 401)
# Make sure the cookie has to be a valid token
resp = self.request(path='/item/%s/download' % item['_id'],
cookie='girderToken=invalid_token')
self.assertStatus(resp, 401)
def testReuseExisting(self):
item1 = Item().createItem('to be reused', creator=self.users[0], folder=self.publicFolder)
item2 = Item().createItem('to be reused', creator=self.users[0], folder=self.publicFolder)
item3 = Item().createItem(
'to be reused', creator=self.users[0], folder=self.publicFolder, reuseExisting=True)
self.assertNotEqual(item1['_id'], item2['_id'])
self.assertEqual(item1['_id'], item3['_id'])
self.assertEqual(item2['name'], 'to be reused (1)')
self.assertEqual(item3['name'], 'to be reused')
def testUpdateDuplicatedName(self):
item1 = Item().createItem('foo', creator=self.users[0], folder=self.publicFolder)
item2 = Item().createItem('bar', creator=self.users[0], folder=self.publicFolder)
item2['name'] = 'foo'
Item().save(item2, validate=False)
self.assertEqual(item2['name'], 'foo')
item1['size'] = 3
Item().save(item1)
self.assertEqual(item1['name'], 'foo')
| 42.053232
| 98
| 0.555787
|
import os
import io
import json
import shutil
import six
import zipfile
from .. import base
from girder.constants import AccessType
from girder.models.assetstore import Assetstore
from girder.models.folder import Folder
from girder.models.item import Item
from girder.models.token import Token
from girder.models.user import User
def setUpModule():
base.startServer()
def tearDownModule():
base.stopServer()
class ItemTestCase(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
self.users = [User().createUser(
'usr%s' % num, 'passwd', 'tst', 'usr', 'u%s@u.com' % num)
for num in [0, 1]]
folders = Folder().childFolders(self.users[0], 'user', user=self.users[0])
for folder in folders:
if folder['name'] == 'Public':
self.publicFolder = folder
else:
self.privateFolder = folder
self.assetstore = Assetstore().getCurrent()
root = self.assetstore['root']
shutil.rmtree(root)
tmpdir = os.path.join(root, 'temp')
if os.path.isdir(tmpdir):
for tempname in os.listdir(tmpdir):
os.remove(os.path.join(tmpdir, tempname))
def _createItem(self, parentId, name, description, user):
params = {
'name': name,
'description': description,
'folderId': parentId
}
resp = self.request(path='/item', method='POST', params=params,
user=user)
self.assertStatusOk(resp)
assert 'meta' in resp.json
return resp.json
def _testUploadFileToItem(self, item, name, user, contents):
resp = self.request(
path='/file', method='POST', user=user, params={
'parentType': 'item',
'parentId': item['_id'],
'name': name,
'size': len(contents)
})
self.assertStatusOk(resp)
uploadId = resp.json['_id']
resp = self.request(
path='/file/chunk', method='POST', body=contents, user=user, params={
'uploadId': uploadId
}, type='application/octet-stream')
self.assertStatusOk(resp)
def _testDownloadSingleFileItem(self, item, user, contents):
resp = self.request(path='/item/%s/download' % item['_id'],
method='GET', user=user, isJson=False)
self.assertStatusOk(resp)
self.assertEqual(contents, self.getBody(resp))
self.assertEqual(resp.headers['Content-Disposition'],
'attachment; filename="file_1"')
params = {'contentDisposition': 'inline'}
resp = self.request(path='/item/%s/download' % item['_id'],
method='GET', user=user, isJson=False,
params=params)
self.assertStatusOk(resp)
self.assertEqual(contents, self.getBody(resp))
self.assertEqual(resp.headers['Content-Disposition'],
'inline; filename="file_1"')
resp = self.request(path='/item/%s/download' % item['_id'],
method='GET', user=user, isJson=False,
params={'offset': 1})
self.assertStatus(resp, 206)
self.assertEqual(contents[1:], self.getBody(resp))
def _testDownloadMultiFileItem(self, item, user, contents, format=None):
params = None
if format:
params = {'format': format}
resp = self.request(path='/item/%s/download' % item['_id'],
method='GET', user=user, isJson=False,
params=params)
self.assertStatusOk(resp)
zipFile = zipfile.ZipFile(io.BytesIO(self.getBody(resp, text=False)),
'r')
prefix = os.path.split(zipFile.namelist()[0])[0]
expectedZip = {}
for name in contents:
expectedZip[os.path.join(prefix, name)] = contents[name]
self.assertHasKeys(expectedZip, zipFile.namelist())
self.assertHasKeys(zipFile.namelist(), expectedZip)
for name in zipFile.namelist():
expected = expectedZip[name]
if not isinstance(expected, six.binary_type):
expected = expected.encode('utf8')
self.assertEqual(expected, zipFile.read(name))
def testLegacyItems(self):
folder = Folder().createFolder(
parent=self.users[0], parentType='user', creator=self.users[0],
name='New Folder')
item = Item().createItem(
name='LegacyItem',
creator=self.users[0],
folder=folder)
del item['meta']
item = Item().save(item)
assert 'meta' not in item
item = Item().load(item['_id'], user=self.users[0])
assert 'meta' in item
def testItemDownloadAndChildren(self):
curItem = self._createItem(self.publicFolder['_id'],
'test_for_download', 'fake description',
self.users[0])
self._testUploadFileToItem(curItem, 'file_1', self.users[0], 'foobar')
self._testDownloadSingleFileItem(curItem, self.users[0], 'foobar')
self._testDownloadMultiFileItem(curItem, self.users[0],
{'file_1': 'foobar'}, format='zip')
self._testUploadFileToItem(curItem, 'file_2', self.users[0], 'foobz')
resp = self.request(path='/item/%s/files' % curItem['_id'],
method='GET', user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['name'], 'file_1')
self.assertEqual(resp.json[1]['name'], 'file_2')
self.assertEqual(resp.json[0]['size'], 6)
self.assertEqual(resp.json[1]['size'], 5)
self._testDownloadMultiFileItem(curItem, self.users[0],
{'file_1': 'foobar', 'file_2': 'foobz'})
def testItemCrud(self):
self.ensureRequiredParams(
path='/item', method='POST', required=('folderId',),
user=self.users[1])
params = {
'name': ' ',
'description': ' a description ',
'folderId': self.publicFolder['_id']
}
resp = self.request(path='/item', method='POST', params=params,
user=self.users[1])
self.assertStatus(resp, 403)
resp = self.request(path='/item', method='POST', params=params,
user=self.users[0])
self.assertValidationError(resp, 'name')
# Actually create the item in user 0's private folder
params['name'] = ' my item name'
params['folderId'] = self.privateFolder['_id']
resp = self.request(path='/item', method='POST', params=params,
user=self.users[0])
self.assertStatusOk(resp)
item = resp.json
self.assertEqual(item['name'], params['name'].strip())
self.assertEqual(item['description'], params['description'].strip())
params = {
'folderId': self.privateFolder['_id']
}
resp = self.request(path='/item', method='GET', user=self.users[1],
params=params)
self.assertStatus(resp, 403)
resp = self.request(path='/item/%s' % str(item['_id']), method='GET',
user=self.users[1])
self.assertStatus(resp, 403)
resp = self.request(path='/item/%s' % str(item['_id']), method='GET',
user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(resp.json['_id'], item['_id'])
self.assertEqual(resp.json['_modelType'], 'item')
resp = self.request(path='/item', method='GET', user=self.users[0],
params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], item['_id'])
params['text'] = 'my item name'
resp = self.request(path='/item', method='GET', user=self.users[0],
params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], item['_id'])
del params['folderId']
resp = self.request(path='/item', method='GET', user=self.users[0],
params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], item['_id'])
params['limit'] = 1
resp = self.request(path='/item', method='GET', user=self.users[0],
params=params)
self.assertStatusOk(resp)
self.assertEqual(resp.json[0]['_id'], item['_id'])
params['offset'] = 1
resp = self.request(path='/item', method='GET', user=self.users[0],
params=params)
self.assertStatusOk(resp)
self.assertEqual(len(resp.json), 0)
resp = self.request(path='/item', method='GET', user=self.users[0],
params={})
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'], 'Invalid search mode.')
params = {
'name': 'changed name',
'description': 'new description'
}
resp = self.request(path='/item/%s' % item['_id'], method='PUT',
params=params, user=self.users[0])
self.assertStatusOk(resp)
self.assertEqual(resp.json['name'], params['name'])
self.assertEqual(resp.json['description'], params['description'])
item = Item().load(item['_id'], force=True)
self.assertFalse(Item().hasAccess(item))
resp = self.request(path='/item/%s' % item['_id'], method='PUT',
user=self.users[0], params={
'folderId': self.publicFolder['_id']})
self.assertStatusOk(resp)
item = Item().load(resp.json['_id'], force=True)
self.assertTrue(Item().hasAccess(item))
# destination folder
self.publicFolder = Folder().setUserAccess(
self.publicFolder, self.users[1], AccessType.WRITE, save=True)
resp = self.request(path='/item/%s' % item['_id'], method='PUT',
user=self.users[1], params={
'folderId': self.privateFolder['_id']})
self.assertStatus(resp, 403)
self.assertTrue(resp.json['message'].startswith(
'Write access denied for folder'))
# Try to update/PUT without an id
resp = self.request(path='/item/', method='PUT',
params=params, user=self.users[0])
self.assertStatus(resp, 400)
# Try a bad endpoint (should 400)
resp = self.request(path='/item/%s/blurgh' % item['_id'],
method='GET',
user=self.users[1])
self.assertStatus(resp, 400)
# Try delete with no ID (should 400)
resp = self.request(path='/item/', method='DELETE', user=self.users[1])
self.assertStatus(resp, 400)
# User 1 should not be able to delete the item with read access
self.publicFolder = Folder().setUserAccess(
self.publicFolder, self.users[1], AccessType.READ, save=True)
resp = self.request(path='/item/%s' % str(item['_id']), method='DELETE',
user=self.users[1])
self.assertStatus(resp, 403)
# User 1 should be able to delete the item with write access
self.publicFolder = Folder().setUserAccess(
self.publicFolder, self.users[1], AccessType.WRITE, save=True)
resp = self.request(path='/item/%s' % str(item['_id']), method='DELETE',
user=self.users[1])
self.assertStatusOk(resp)
# Verify that the item is deleted
item = Item().load(item['_id'])
self.assertEqual(item, None)
def testItemMetadataDirect(self):
params = {
'name': 'item with metadata via POST',
'description': ' a description ',
'folderId': self.privateFolder['_id'],
'metadata': 'not JSON'
}
resp = self.request(
path='/item', method='POST', params=params, user=self.users[0])
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'], 'Parameter metadata must be valid JSON.')
# Add some metadata
metadata = {
'foo': 'bar',
'test': 2
}
params['metadata'] = json.dumps(metadata)
resp = self.request(
path='/item', method='POST', params=params, user=self.users[0])
self.assertStatusOk(resp)
item = resp.json
self.assertEqual(item['meta']['foo'], metadata['foo'])
self.assertEqual(item['meta']['test'], metadata['test'])
metadata = {
'foo': None,
'test': 3,
'bar': 'baz'
}
resp = self.request(
path='/item/{_id}'.format(**item), method='PUT',
user=self.users[0], params={'metadata': json.dumps(metadata)}
)
self.assertStatusOk(resp)
item = resp.json
self.assertNotHasKeys(item['meta'], ['foo'])
self.assertEqual(item['meta']['test'], metadata['test'])
self.assertEqual(item['meta']['bar'], metadata['bar'])
def testItemMetadataCrud(self):
# Create an item
params = {
'name': 'item with metadata',
'description': ' a description ',
'folderId': self.privateFolder['_id']
}
resp = self.request(path='/item', method='POST', params=params,
user=self.users[0])
self.assertStatusOk(resp)
item = resp.json
# Try to delete metadata from an item that doesn't have any set on it
resp = self.request(path='/item/%s/metadata' % (item['_id']),
method='DELETE', user=self.users[0],
body=json.dumps(['foobar']), type='application/json')
item = resp.json
self.assertStatusOk(resp)
self.assertEqual(item['meta'], {})
metadata = {
'foo': 'bar',
'test': 2
}
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata), type='application/json')
item = resp.json
self.assertEqual(item['meta']['foo'], metadata['foo'])
self.assertEqual(item['meta']['test'], metadata['test'])
body = '{"key": {"foo": Infinity}}'
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=body, type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'], 'Error: "Infinity" is not valid JSON.')
metadata['test'] = None
metadata['foo'] = 'baz'
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata), type='application/json')
item = resp.json
self.assertEqual(item['meta']['foo'], metadata['foo'])
self.assertNotHasKeys(item['meta'], ['test'])
metadata['nullVal'] = None
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata), params={'allowNull': True},
type='application/json')
item = resp.json
self.assertEqual(item['meta']['nullVal'], None)
del metadata['nullVal']
metadata['other'] = 'macguffin'
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata), type='application/json')
item = resp.json
self.assertEqual(item['meta']['other'], metadata['other'])
self.assertEqual(item['meta']['nullVal'], None)
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='DELETE', user=self.users[0],
body=json.dumps(['other']), type='application/json')
item = resp.json
self.assertNotHasKeys(item['meta'], ['other'])
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='DELETE', user=self.users[0],
body=json.dumps(['foo', 'foo.bar']), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'], 'Invalid key foo.bar: keys must not contain the "." character.')
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='DELETE', user=self.users[0],
body=json.dumps(['foo', '$bar']), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'], 'Invalid key $bar: keys must not start with the "$" character.')
metadata = {
'test': 'allowed'
}
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata).replace('"', "'"),
type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(resp.json['message'],
'Invalid JSON passed in request body.')
# Make sure metadata cannot be added if there is a period in the key
# name
metadata = {
'foo.bar': 'notallowed'
}
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'], 'Invalid key foo.bar: keys must not contain the "." character.')
# Make sure metadata cannot be added if the key begins with a
# dollar sign
metadata = {
'$foobar': 'alsonotallowed'
}
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'],
'Invalid key $foobar: keys must not start with the "$" character.')
# Make sure metadata cannot be added with a blank key
metadata = {
'': 'stillnotallowed'
}
resp = self.request(path='/item/%s/metadata' % item['_id'],
method='PUT', user=self.users[0],
body=json.dumps(metadata), type='application/json')
self.assertStatus(resp, 400)
self.assertEqual(
resp.json['message'], 'Key names must not be empty.')
def testItemFiltering(self):
# Create an item
params = {
'name': 'item with metadata',
'description': ' a description ',
'folderId': self.privateFolder['_id']
}
resp = self.request(path='/item', method='POST', params=params,
user=self.users[0])
self.assertStatusOk(resp)
# get the item object from the database
item = Item().load(resp.json['_id'], force=True)
# set a private property
item['private'] = 'very secret metadata'
item = Item().save(item)
# get the item from the rest api
resp = self.request(path='/item/%s' % str(item['_id']), method='GET',
user=self.users[0])
self.assertStatusOk(resp)
# assert that the private data is not included
self.assertNotHasKeys(resp.json, ['private'])
def testPathToRoot(self):
firstChildName = 'firstChild'
firstChildDesc = 'firstDesc'
secondChildName = 'secondChild'
secondChildDesc = 'secondDesc'
firstChild = Folder().createFolder(
self.publicFolder, firstChildName, firstChildDesc, creator=self.users[0])
secondChild = Folder().createFolder(
firstChild, secondChildName, secondChildDesc, creator=self.users[0])
baseItem = Item().createItem('blah', self.users[0], secondChild, 'foo')
resp = self.request(path='/item/%s/rootpath' % baseItem['_id'], method='GET')
self.assertStatusOk(resp)
pathToRoot = resp.json
self.assertEqual(pathToRoot[0]['type'], 'user')
self.assertEqual(pathToRoot[0]['object']['login'],
self.users[0]['login'])
self.assertEqual(pathToRoot[1]['type'], 'folder')
self.assertEqual(pathToRoot[1]['object']['name'],
self.publicFolder['name'])
self.assertEqual(pathToRoot[2]['type'], 'folder')
self.assertEqual(pathToRoot[2]['object']['name'], firstChild['name'])
self.assertEqual(pathToRoot[3]['type'], 'folder')
self.assertEqual(pathToRoot[3]['object']['name'], secondChild['name'])
def testLazyFieldComputation(self):
item = Item().createItem('My Item Name', creator=self.users[0], folder=self.publicFolder)
self.assertEqual(item['lowerName'], 'my item name')
self.assertEqual(item['baseParentId'], self.users[0]['_id'])
# Force the item to be saved without lowerName and baseParentType fields
del item['lowerName']
del item['baseParentType']
item = Item().save(item, validate=False)
item = Item().find({'_id': item['_id']})[0]
self.assertNotHasKeys(item, ('lowerName', 'baseParentType'))
# Now ensure that calling load() actually populates those fields and
# saves the results persistently
Item().load(item['_id'], force=True)
item = Item().find({'_id': item['_id']})[0]
self.assertHasKeys(item, ('lowerName', 'baseParentType'))
self.assertEqual(item['lowerName'], 'my item name')
self.assertEqual(item['baseParentType'], 'user')
self.assertEqual(item['baseParentId'], self.users[0]['_id'])
# Also test that this works for a duplicate item, such that the
# automatically renamed item still has the correct lowerName, and a
# None description is changed to an empty string.
item = Item().createItem(
'My Item Name', creator=self.users[0], folder=self.publicFolder, description=None)
# test if non-strings are coerced
self.assertEqual(item['description'], '')
item['description'] = 1
item = Item().save(item)
item = Item().findOne({'_id': item['_id']})
self.assertEqual(item['description'], '1')
# test if just missing lowerName is corrected.
self.assertEqual(item['lowerName'], 'my item name (1)')
del item['lowerName']
item = Item().save(item, validate=False)
item = Item().findOne({'_id': item['_id']})
self.assertNotHasKeys(item, ('lowerName', ))
Item().load(item['_id'], force=True)
item = Item().findOne({'_id': item['_id']})
self.assertHasKeys(item, ('lowerName', ))
self.assertEqual(item['lowerName'], 'my item name (1)')
def testParentsToRoot(self):
item = Item().createItem('My Item Name', creator=self.users[0], folder=self.publicFolder)
parents = Item().parentsToRoot(item, force=True)
for parent in parents:
self.assertNotIn('_accessLevel', parent['object'])
parents = Item().parentsToRoot(item)
for parent in parents:
self.assertIn('_accessLevel', parent['object'])
def testItemCopy(self):
origItem = self._createItem(self.publicFolder['_id'],
'test_for_copy', 'fake description',
self.users[0])
# Add metadata and files, since we want to make sure those get copied
metadata = {
'foo': 'value1',
'test': 2
}
resp = self.request(
path='/item/%s/metadata' % origItem['_id'], method='PUT', user=self.users[0],
body=json.dumps(metadata), type='application/json')
self.assertStatusOk(resp)
self._testUploadFileToItem(origItem, 'file_1', self.users[0], 'foobar')
self._testUploadFileToItem(origItem, 'file_2', self.users[0], 'foobz')
# Also upload a link
params = {
'parentType': 'item',
'parentId': origItem['_id'],
'name': 'link_file',
'linkUrl': 'http://www.google.com'
}
resp = self.request(path='/file', method='POST', user=self.users[0],
params=params)
self.assertStatusOk(resp)
# Copy to a new item. It will be in the same folder, but we want a
# different name.
params = {
'name': 'copied_item'
}
resp = self.request(path='/item/%s/copy' % origItem['_id'],
method='POST', user=self.users[0], params=params)
self.assertStatusOk(resp)
# Make sure size was returned correctly
self.assertEqual(resp.json['size'], 11)
# Now ask for the new item explicitly and check its metadata
self.request(path='/item/%s' % resp.json['_id'],
user=self.users[0], type='application/json')
self.assertStatusOk(resp)
newItem = resp.json
self.assertEqual(newItem['name'], 'copied_item')
self.assertEqual(newItem['meta']['foo'], metadata['foo'])
self.assertEqual(newItem['meta']['test'], metadata['test'])
# Check if we can download the files from the new item
resp = self.request(path='/item/%s/files' % newItem['_id'],
method='GET', user=self.users[0])
self.assertStatusOk(resp)
newFiles = resp.json
self.assertEqual(newFiles[0]['name'], 'file_1')
self.assertEqual(newFiles[1]['name'], 'file_2')
self.assertEqual(newFiles[2]['name'], 'link_file')
self.assertEqual(newFiles[0]['size'], 6)
self.assertEqual(newFiles[1]['size'], 5)
self._testDownloadMultiFileItem(newItem, self.users[0],
{'file_1': 'foobar', 'file_2': 'foobz',
'link_file': 'http://www.google.com'})
# Check to make sure the original item is still present
resp = self.request(path='/item', method='GET', user=self.users[0],
params={'folderId': self.publicFolder['_id'],
'text': 'test_for_copy'})
self.assertStatusOk(resp)
self.assertEqual(origItem['_id'], resp.json[0]['_id'])
# Check to make sure the new item is still present
resp = self.request(path='/item', method='GET', user=self.users[0],
params={'folderId': self.publicFolder['_id'],
'text': 'copied_item'})
self.assertStatusOk(resp)
self.assertEqual(newItem['_id'], resp.json[0]['_id'])
# Check that the provenance tag correctly points back
# to the original item
self.assertEqual(newItem['copyOfItem'], origItem['_id'])
# Check if we can download the files from the old item and that they
# are distinct from the files in the original item
resp = self.request(path='/item/%s/files' % origItem['_id'],
method='GET', user=self.users[0])
self.assertStatusOk(resp)
origFiles = resp.json
self._testDownloadMultiFileItem(origItem, self.users[0],
{'file_1': 'foobar', 'file_2': 'foobz',
'link_file': 'http://www.google.com'})
for index, file in enumerate(origFiles):
self.assertNotEqual(origFiles[index]['_id'],
newFiles[index]['_id'])
def testCookieAuth(self):
item = self._createItem(self.privateFolder['_id'],
'cookie_auth_download', '', self.users[0])
self._testUploadFileToItem(item, 'file', self.users[0], 'foo')
token = Token().createToken(self.users[0])
cookie = 'girderToken=%s' % token['_id']
# We should be able to download a private item using a cookie token
resp = self.request(path='/item/%s/download' % item['_id'],
isJson=False, cookie=cookie)
self.assertStatusOk(resp)
self.assertEqual(self.getBody(resp), 'foo')
# We should not be able to call GET /item/:id with a cookie token
resp = self.request(path='/item/%s' % item['_id'], cookie=cookie)
self.assertStatus(resp, 401)
# Make sure the cookie has to be a valid token
resp = self.request(path='/item/%s/download' % item['_id'],
cookie='girderToken=invalid_token')
self.assertStatus(resp, 401)
def testReuseExisting(self):
item1 = Item().createItem('to be reused', creator=self.users[0], folder=self.publicFolder)
item2 = Item().createItem('to be reused', creator=self.users[0], folder=self.publicFolder)
item3 = Item().createItem(
'to be reused', creator=self.users[0], folder=self.publicFolder, reuseExisting=True)
self.assertNotEqual(item1['_id'], item2['_id'])
self.assertEqual(item1['_id'], item3['_id'])
self.assertEqual(item2['name'], 'to be reused (1)')
self.assertEqual(item3['name'], 'to be reused')
def testUpdateDuplicatedName(self):
item1 = Item().createItem('foo', creator=self.users[0], folder=self.publicFolder)
item2 = Item().createItem('bar', creator=self.users[0], folder=self.publicFolder)
item2['name'] = 'foo'
Item().save(item2, validate=False)
self.assertEqual(item2['name'], 'foo')
item1['size'] = 3
Item().save(item1)
self.assertEqual(item1['name'], 'foo')
| true
| true
|
7908217b6d7c51a3f3fffc389aafce8ac0d0ade8
| 6,934
|
py
|
Python
|
armi/materials/tests/test_water.py
|
youngmit/armi
|
67688e4e67d2a217dfc7b1ccfa64028c20b57a5b
|
[
"Apache-2.0"
] | null | null | null |
armi/materials/tests/test_water.py
|
youngmit/armi
|
67688e4e67d2a217dfc7b1ccfa64028c20b57a5b
|
[
"Apache-2.0"
] | null | null | null |
armi/materials/tests/test_water.py
|
youngmit/armi
|
67688e4e67d2a217dfc7b1ccfa64028c20b57a5b
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 TerraPower, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from armi.materials.water import SaturatedWater, SaturatedSteam
"""
unit tests for water materials
"""
class Test_Water(unittest.TestCase):
"""
unit tests for water materials
"""
def test_water_at_freezing(self):
"""
Reproduce verification results from IAPWS-IF97 for water at 0C
http://www.iapws.org/relguide/supsat.pdf
"""
water = SaturatedWater()
steam = SaturatedSteam()
Tk = 273.16
ref_vapor_pressure = 611.657
ref_dp_dT = 44.436693
ref_saturated_water_rho = 999.789
ref_saturated_steam_rho = 0.00485426
ref_alpha = -11.529101
ref_saturated_water_enthalpy = 0.611786
ref_saturated_steam_enthalpy = 2500.5e3
ref_phi = -0.04
ref_saturated_water_entropy = 0
ref_saturated_steam_entropy = 9.154e3
self.assertAlmostEqual(ref_vapor_pressure, water.vaporPressure(Tk=Tk), 3)
self.assertAlmostEqual(ref_vapor_pressure, steam.vaporPressure(Tk=Tk), 3)
self.assertAlmostEqual(ref_dp_dT, water.vaporPressurePrime(Tk=Tk), 3)
self.assertAlmostEqual(ref_dp_dT, steam.vaporPressurePrime(Tk=Tk), 3)
self.assertAlmostEqual(ref_saturated_water_rho, water.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_saturated_steam_rho, steam.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(
ref_alpha, water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 3
)
self.assertAlmostEqual(
ref_alpha, steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 3
)
self.assertAlmostEqual(ref_saturated_water_enthalpy, water.enthalpy(Tk=Tk), 2)
self.assertAlmostEqual(
ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk), 1, 2
)
self.assertAlmostEqual(
ref_phi, water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 2
)
self.assertAlmostEqual(
ref_phi, steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 2
)
self.assertAlmostEqual(ref_saturated_water_entropy, water.entropy(Tk=Tk), 3)
self.assertAlmostEqual(ref_saturated_steam_entropy / steam.entropy(Tk=Tk), 1, 3)
def test_water_at_boiling(self):
"""
Reproduce verification results from IAPWS-IF97 for water at 100C
http://www.iapws.org/relguide/supsat.pdf
"""
water = SaturatedWater()
steam = SaturatedSteam()
Tk = 373.1243
ref_vapor_pressure = 0.101325e6
ref_dp_dT = 3.616e3
ref_saturated_water_rho = 958.365
ref_saturated_steam_rho = 0.597586
ref_alpha = 417.65e3
ref_saturated_water_enthalpy = 417.05e3
ref_saturated_steam_enthalpy = 2675.7e3
ref_phi = 1.303e3
ref_saturated_water_entropy = 1.307e3
ref_saturated_steam_entropy = 7.355e3
self.assertAlmostEqual(ref_vapor_pressure / water.vaporPressure(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_vapor_pressure / steam.vaporPressure(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_dp_dT / water.vaporPressurePrime(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_dp_dT / steam.vaporPressurePrime(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_water_rho, water.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_saturated_steam_rho, steam.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(
ref_alpha / water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_alpha / steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_saturated_water_enthalpy / water.enthalpy(Tk=Tk), 1, 2
)
self.assertAlmostEqual(
ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk), 1, 2
)
self.assertAlmostEqual(
ref_phi / water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_phi / steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(ref_saturated_water_entropy / water.entropy(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_steam_entropy / steam.entropy(Tk=Tk), 1, 3)
def test_water_at_critcalPoint(self):
"""
Reproduce verification results from IAPWS-IF97 for water at 647.096K
http://www.iapws.org/relguide/supsat.pdf
"""
water = SaturatedWater()
steam = SaturatedSteam()
Tk = 647.096
ref_vapor_pressure = 22.064e6
ref_dp_dT = 268e3
ref_saturated_water_rho = 322
ref_saturated_steam_rho = 322
ref_alpha = 1548e3
ref_saturated_water_enthalpy = 2086.6e3
ref_saturated_steam_enthalpy = 2086.6e3
ref_phi = 3.578e3
ref_saturated_water_entropy = 4.410e3
ref_saturated_steam_entropy = 4.410e3
self.assertAlmostEqual(ref_vapor_pressure / water.vaporPressure(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_vapor_pressure / steam.vaporPressure(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_dp_dT / water.vaporPressurePrime(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_dp_dT / steam.vaporPressurePrime(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_water_rho, water.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_saturated_steam_rho, steam.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(
ref_alpha / water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_alpha / steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_saturated_water_enthalpy / water.enthalpy(Tk=Tk), 1, 2
)
self.assertAlmostEqual(
ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk), 1, 2
)
self.assertAlmostEqual(
ref_phi / water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_phi / steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(ref_saturated_water_entropy / water.entropy(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_steam_entropy / steam.entropy(Tk=Tk), 1, 3)
if __name__ == "__main__":
unittest.main()
| 36.114583
| 88
| 0.675224
|
import unittest
from armi.materials.water import SaturatedWater, SaturatedSteam
class Test_Water(unittest.TestCase):
def test_water_at_freezing(self):
water = SaturatedWater()
steam = SaturatedSteam()
Tk = 273.16
ref_vapor_pressure = 611.657
ref_dp_dT = 44.436693
ref_saturated_water_rho = 999.789
ref_saturated_steam_rho = 0.00485426
ref_alpha = -11.529101
ref_saturated_water_enthalpy = 0.611786
ref_saturated_steam_enthalpy = 2500.5e3
ref_phi = -0.04
ref_saturated_water_entropy = 0
ref_saturated_steam_entropy = 9.154e3
self.assertAlmostEqual(ref_vapor_pressure, water.vaporPressure(Tk=Tk), 3)
self.assertAlmostEqual(ref_vapor_pressure, steam.vaporPressure(Tk=Tk), 3)
self.assertAlmostEqual(ref_dp_dT, water.vaporPressurePrime(Tk=Tk), 3)
self.assertAlmostEqual(ref_dp_dT, steam.vaporPressurePrime(Tk=Tk), 3)
self.assertAlmostEqual(ref_saturated_water_rho, water.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_saturated_steam_rho, steam.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(
ref_alpha, water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 3
)
self.assertAlmostEqual(
ref_alpha, steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 3
)
self.assertAlmostEqual(ref_saturated_water_enthalpy, water.enthalpy(Tk=Tk), 2)
self.assertAlmostEqual(
ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk), 1, 2
)
self.assertAlmostEqual(
ref_phi, water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 2
)
self.assertAlmostEqual(
ref_phi, steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 2
)
self.assertAlmostEqual(ref_saturated_water_entropy, water.entropy(Tk=Tk), 3)
self.assertAlmostEqual(ref_saturated_steam_entropy / steam.entropy(Tk=Tk), 1, 3)
def test_water_at_boiling(self):
water = SaturatedWater()
steam = SaturatedSteam()
Tk = 373.1243
ref_vapor_pressure = 0.101325e6
ref_dp_dT = 3.616e3
ref_saturated_water_rho = 958.365
ref_saturated_steam_rho = 0.597586
ref_alpha = 417.65e3
ref_saturated_water_enthalpy = 417.05e3
ref_saturated_steam_enthalpy = 2675.7e3
ref_phi = 1.303e3
ref_saturated_water_entropy = 1.307e3
ref_saturated_steam_entropy = 7.355e3
self.assertAlmostEqual(ref_vapor_pressure / water.vaporPressure(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_vapor_pressure / steam.vaporPressure(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_dp_dT / water.vaporPressurePrime(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_dp_dT / steam.vaporPressurePrime(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_water_rho, water.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_saturated_steam_rho, steam.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(
ref_alpha / water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_alpha / steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_saturated_water_enthalpy / water.enthalpy(Tk=Tk), 1, 2
)
self.assertAlmostEqual(
ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk), 1, 2
)
self.assertAlmostEqual(
ref_phi / water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_phi / steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(ref_saturated_water_entropy / water.entropy(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_steam_entropy / steam.entropy(Tk=Tk), 1, 3)
def test_water_at_critcalPoint(self):
water = SaturatedWater()
steam = SaturatedSteam()
Tk = 647.096
ref_vapor_pressure = 22.064e6
ref_dp_dT = 268e3
ref_saturated_water_rho = 322
ref_saturated_steam_rho = 322
ref_alpha = 1548e3
ref_saturated_water_enthalpy = 2086.6e3
ref_saturated_steam_enthalpy = 2086.6e3
ref_phi = 3.578e3
ref_saturated_water_entropy = 4.410e3
ref_saturated_steam_entropy = 4.410e3
self.assertAlmostEqual(ref_vapor_pressure / water.vaporPressure(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_vapor_pressure / steam.vaporPressure(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_dp_dT / water.vaporPressurePrime(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_dp_dT / steam.vaporPressurePrime(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_water_rho, water.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(ref_saturated_steam_rho, steam.densityKgM3(Tk=Tk), 0)
self.assertAlmostEqual(
ref_alpha / water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_alpha / steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_saturated_water_enthalpy / water.enthalpy(Tk=Tk), 1, 2
)
self.assertAlmostEqual(
ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk), 1, 2
)
self.assertAlmostEqual(
ref_phi / water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(
ref_phi / steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3
)
self.assertAlmostEqual(ref_saturated_water_entropy / water.entropy(Tk=Tk), 1, 3)
self.assertAlmostEqual(ref_saturated_steam_entropy / steam.entropy(Tk=Tk), 1, 3)
if __name__ == "__main__":
unittest.main()
| true
| true
|
790822218d5379c4992da6a0efed925359e649a9
| 125
|
py
|
Python
|
assets/student-resources/blank_template.py
|
chaoryan5/website
|
931ff3ace728cfb54089665a5d2cfbb48c488530
|
[
"Apache-2.0"
] | 18
|
2016-09-22T03:24:43.000Z
|
2019-11-21T02:30:41.000Z
|
assets/student-resources/blank_template.py
|
chaoryan5/website
|
931ff3ace728cfb54089665a5d2cfbb48c488530
|
[
"Apache-2.0"
] | 395
|
2016-08-28T01:26:06.000Z
|
2020-06-17T19:33:59.000Z
|
assets/student-resources/blank_template.py
|
chaoryan5/website
|
931ff3ace728cfb54089665a5d2cfbb48c488530
|
[
"Apache-2.0"
] | 60
|
2015-10-09T00:58:06.000Z
|
2021-07-31T21:16:29.000Z
|
def autonomous_setup():
pass
def autonomous_main():
pass
def teleop_setup():
pass
def teleop_main():
pass
| 10.416667
| 23
| 0.656
|
def autonomous_setup():
pass
def autonomous_main():
pass
def teleop_setup():
pass
def teleop_main():
pass
| true
| true
|
7908224bb67a898e3c14c3d3fcd61729cf70005f
| 87
|
py
|
Python
|
test_app/apps.py
|
eamigo86/django3_asgi
|
deb6d2d7ff8faee24a78af1b570900b7e7062263
|
[
"MIT"
] | 1
|
2020-08-25T15:51:14.000Z
|
2020-08-25T15:51:14.000Z
|
test_app/apps.py
|
eamigo86/django3_asgi
|
deb6d2d7ff8faee24a78af1b570900b7e7062263
|
[
"MIT"
] | 3
|
2021-03-30T12:40:19.000Z
|
2021-09-22T18:33:48.000Z
|
test_app/apps.py
|
eamigo86/django3_asgi
|
deb6d2d7ff8faee24a78af1b570900b7e7062263
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
class TestConfig(AppConfig):
name = "test_app"
| 14.5
| 33
| 0.747126
|
from django.apps import AppConfig
class TestConfig(AppConfig):
name = "test_app"
| true
| true
|
790822df11cab707b156199dcde4cff72d1aa112
| 1,320
|
py
|
Python
|
qa327_test/frontend/geek_base.py
|
nicoleooi/cmpe327
|
73f6bdcbd2f382a54dfec3e0e79120bd60c9513f
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
qa327_test/frontend/geek_base.py
|
nicoleooi/cmpe327
|
73f6bdcbd2f382a54dfec3e0e79120bd60c9513f
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
qa327_test/frontend/geek_base.py
|
nicoleooi/cmpe327
|
73f6bdcbd2f382a54dfec3e0e79120bd60c9513f
|
[
"Apache-2.0",
"MIT"
] | 2
|
2021-01-14T02:58:39.000Z
|
2021-02-04T19:18:47.000Z
|
from seleniumbase import BaseCase
from werkzeug.security import generate_password_hash
from qa327_test.conftest import base_url
from qa327.models import User, Ticket
# Mock a sample user
TEST_USER = User(
email='test_frontend@test.com',
name='test_frontend',
password=generate_password_hash('test_frontend'),
balance=500
)
TEST_USER_SELLER = User(
email='test_seller@test.com',
name='test_seller',
password=generate_password_hash('Password99!'),
balance=500
)
# Mock a sample ticket
TEST_TICKET = Ticket(
name='helloworld',
seller=TEST_USER_SELLER,
price=20,
quantity=20,
expires="20220101"
)
class GeekBaseCase(BaseCase):
'''
Selenium base case with some
GeekSeek utilities
'''
def assert_flash(self, text):
'''asserts that message exists in flashes'''
for flash_dom in self.find_elements('.flash'):
if flash_dom.text == text:
return
print(flash_dom.text)
raise AssertionError(f'Flash not found for text "{text}"')
def login_test_user(self, email=TEST_USER.email, password='test_frontend'):
'''login our test user'''
self.open(base_url+'/login')
self.input('#email', email)
self.input('#password', password)
self.click('#btn-submit')
| 26.938776
| 79
| 0.669697
|
from seleniumbase import BaseCase
from werkzeug.security import generate_password_hash
from qa327_test.conftest import base_url
from qa327.models import User, Ticket
TEST_USER = User(
email='test_frontend@test.com',
name='test_frontend',
password=generate_password_hash('test_frontend'),
balance=500
)
TEST_USER_SELLER = User(
email='test_seller@test.com',
name='test_seller',
password=generate_password_hash('Password99!'),
balance=500
)
TEST_TICKET = Ticket(
name='helloworld',
seller=TEST_USER_SELLER,
price=20,
quantity=20,
expires="20220101"
)
class GeekBaseCase(BaseCase):
def assert_flash(self, text):
for flash_dom in self.find_elements('.flash'):
if flash_dom.text == text:
return
print(flash_dom.text)
raise AssertionError(f'Flash not found for text "{text}"')
def login_test_user(self, email=TEST_USER.email, password='test_frontend'):
self.open(base_url+'/login')
self.input('#email', email)
self.input('#password', password)
self.click('#btn-submit')
| true
| true
|
7908247a89be8fd5b33d6849b6918ba0cbfb6699
| 685
|
py
|
Python
|
telemetryPlugin/forms.py
|
YangKaiting/kiwitcms-telemetry-failed-test-cases
|
10ccd6db1ed0d3a08af87da8411baed0b822ef4d
|
[
"MIT"
] | 1
|
2019-05-28T09:21:42.000Z
|
2019-05-28T09:21:42.000Z
|
telemetryPlugin/forms.py
|
YangKaiting/kiwitcms-telemetry-failed-test-cases
|
10ccd6db1ed0d3a08af87da8411baed0b822ef4d
|
[
"MIT"
] | null | null | null |
telemetryPlugin/forms.py
|
YangKaiting/kiwitcms-telemetry-failed-test-cases
|
10ccd6db1ed0d3a08af87da8411baed0b822ef4d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django import forms
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from tcms.core.utils import string_to_list
from tcms.core.forms.fields import UserField
from tcms.management.models import Product, Version, Build
from tcms.testplans.models import TestPlan
from tcms.testcases.models import TestCase
# =========== Forms for search/filter ==============
class SearchProductForm(forms.Form):
"""
Includes *only* fields used in search.html b/c
the actual search is now done via JSON RPC.
"""
name_product = forms.CharField(label='Product', max_length=100, required=False)
| 34.25
| 83
| 0.734307
|
from django import forms
from django.contrib.auth import get_user_model
from django.utils.translation import ugettext_lazy as _
from tcms.core.utils import string_to_list
from tcms.core.forms.fields import UserField
from tcms.management.models import Product, Version, Build
from tcms.testplans.models import TestPlan
from tcms.testcases.models import TestCase
class SearchProductForm(forms.Form):
name_product = forms.CharField(label='Product', max_length=100, required=False)
| true
| true
|
7908247cbd31c734c39dfaae689b0ec2312ffaa5
| 1,576
|
py
|
Python
|
Parsers/vcru.py
|
OverFitted/hacksai2021spb
|
552cfe3f5d1d0f89770bdf8e99414ec01e1f4145
|
[
"MIT"
] | null | null | null |
Parsers/vcru.py
|
OverFitted/hacksai2021spb
|
552cfe3f5d1d0f89770bdf8e99414ec01e1f4145
|
[
"MIT"
] | null | null | null |
Parsers/vcru.py
|
OverFitted/hacksai2021spb
|
552cfe3f5d1d0f89770bdf8e99414ec01e1f4145
|
[
"MIT"
] | null | null | null |
import aiohttp, asyncio
from bs4 import BeautifulSoup
import json
import time
VC_SEARCH = "https://vc.ru/search/v2/content/new"
async def parse_urls(key_word):
async with aiohttp.ClientSession() as session:
async with session.get(VC_SEARCH, params={
"query": key_word,
"target_type": 'posts',
}) as r:
soup = BeautifulSoup(await r.text(), 'html.parser')
urls = [x["href"] for x in soup.find_all("a", {"class": "content-feed__link"})]
return urls
async def get_text(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as r:
soup = BeautifulSoup(await r.text(), 'html.parser')
text = " ".join(map(lambda x: x.text, soup.find("div", {"class": "l-entry__content"}).find_all("p")))
return text
async def get_all_texts(keyword):
urls = await parse_urls(keyword)
all_texts = []
for u in urls[:25]:
text = await get_text(u)
all_texts.append(text)
return all_texts
async def vc_get_data(keyword, result_file_path='result-vc.json'):
texts = await get_all_texts(keyword)
result_dict = {"company": keyword,
"texts": texts}
result_json = json.loads(json.dumps(result_dict))
return result_json
#with open(result_file_path, 'w', encoding='utf-8') as f:
# json.dump(result_json, f, ensure_ascii=False, indent=4)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(vc_get_data("сбер", "other/sber-vc.json"))
| 31.52
| 113
| 0.636421
|
import aiohttp, asyncio
from bs4 import BeautifulSoup
import json
import time
VC_SEARCH = "https://vc.ru/search/v2/content/new"
async def parse_urls(key_word):
async with aiohttp.ClientSession() as session:
async with session.get(VC_SEARCH, params={
"query": key_word,
"target_type": 'posts',
}) as r:
soup = BeautifulSoup(await r.text(), 'html.parser')
urls = [x["href"] for x in soup.find_all("a", {"class": "content-feed__link"})]
return urls
async def get_text(url):
async with aiohttp.ClientSession() as session:
async with session.get(url) as r:
soup = BeautifulSoup(await r.text(), 'html.parser')
text = " ".join(map(lambda x: x.text, soup.find("div", {"class": "l-entry__content"}).find_all("p")))
return text
async def get_all_texts(keyword):
urls = await parse_urls(keyword)
all_texts = []
for u in urls[:25]:
text = await get_text(u)
all_texts.append(text)
return all_texts
async def vc_get_data(keyword, result_file_path='result-vc.json'):
texts = await get_all_texts(keyword)
result_dict = {"company": keyword,
"texts": texts}
result_json = json.loads(json.dumps(result_dict))
return result_json
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(vc_get_data("сбер", "other/sber-vc.json"))
| true
| true
|
790825a7f680576fc366b31d156e124340ea5bf2
| 745
|
py
|
Python
|
cool.py
|
anay97/python-scraper
|
a09cb319448eae271833eaf59cd3372e8ff71a08
|
[
"MIT"
] | null | null | null |
cool.py
|
anay97/python-scraper
|
a09cb319448eae271833eaf59cd3372e8ff71a08
|
[
"MIT"
] | null | null | null |
cool.py
|
anay97/python-scraper
|
a09cb319448eae271833eaf59cd3372e8ff71a08
|
[
"MIT"
] | null | null | null |
#For the whatsapp statuses url given below
#COOL
import requests
from bs4 import BeautifulSoup
url_to_scrape = 'https://www.appstatustxt.com/cool-whatsapp-status/'
r = requests.get(url_to_scrape)
soup = BeautifulSoup(r.text,"html5lib")
status_object=[]
statuses=[]
title=soup.title.string
print(title)
status_object=soup.find_all('span',style="color: #008000;")
fo = open("cool.txt", "a")
#Adding basic stuff for json syntax
#fo.write("{\n")
i=1;
for status in status_object:
if len(status.string)<=135:
statuses.append(status.string+'\n')
print(status.string)
# actual_status=status.string.encode('utf-8')
fo.write(status.string.encode('utf-8')+'\n')
# fo.write('"'+str(i)+'":"'+actual_status+'",\n')
i=i+1
| 29.8
| 69
| 0.695302
|
import requests
from bs4 import BeautifulSoup
url_to_scrape = 'https://www.appstatustxt.com/cool-whatsapp-status/'
r = requests.get(url_to_scrape)
soup = BeautifulSoup(r.text,"html5lib")
status_object=[]
statuses=[]
title=soup.title.string
print(title)
status_object=soup.find_all('span',style="color: #008000;")
fo = open("cool.txt", "a")
i=1;
for status in status_object:
if len(status.string)<=135:
statuses.append(status.string+'\n')
print(status.string)
fo.write(status.string.encode('utf-8')+'\n')
i=i+1
| true
| true
|
7908272c12fc9d8c42fdfc8035c17f9a72cf1243
| 7,515
|
py
|
Python
|
rajk_appman/invoke_rajk.py
|
rajk-apps/rajk-appman
|
2053aa15b6dc17747022f15840cfaead06e6e8c6
|
[
"MIT"
] | null | null | null |
rajk_appman/invoke_rajk.py
|
rajk-apps/rajk-appman
|
2053aa15b6dc17747022f15840cfaead06e6e8c6
|
[
"MIT"
] | null | null | null |
rajk_appman/invoke_rajk.py
|
rajk-apps/rajk-appman
|
2053aa15b6dc17747022f15840cfaead06e6e8c6
|
[
"MIT"
] | null | null | null |
import requests
import json
import datetime
import os
import io
from invoke import task
from .invoke_utils import ServerConnection, use_dump_modifier_function
RAJK_PASSWORD = os.environ.get("RAJK_PASSWORD")
RAJK_RSA = os.environ.get("RAJK_RSA")
TEST_DEPLOY_DIRECTORY = os.getcwd() + "/build"
rajk_server_connection = ServerConnection(
"rajk", "146.110.60.20", 2222, "/var/www/rajkdjango2/bin/python"
)
def redo_rsa_from_text(c, rsa_text):
os.makedirs("{}/.ssh".format(os.path.expanduser("~")), exist_ok=True)
rsa_path = "{}/.ssh/id_rsa".format(os.path.expanduser("~"))
with open(rsa_path, "w") as fp:
fp.write(rsa_text)
c.run("chmod 600 {}".format(rsa_path))
@task
def backup_django(c):
os.makedirs("backups", exist_ok=True)
bup_dir = os.path.join("backups", datetime.date.today().isoformat())
c.run("mkdir {}".format(bup_dir))
scp_command = rajk_server_connection.copy_from_server_command(
bup_dir, "/var/www/rajkdjango2"
)
c.run(scp_command)
@task
def restart_server(c):
command = rajk_server_connection.run_sudo_command(
"service django2 restart", RAJK_PASSWORD
)
c.run(command)
@task
def stop_server(c):
command = rajk_server_connection.run_sudo_command(
"service django2 stop", RAJK_PASSWORD
)
c.run(command)
@task
def start_server(c):
command = rajk_server_connection.run_sudo_command(
"service django2 start", RAJK_PASSWORD
)
c.run(command)
@task
def dump(c, fname="dump.json", no_contenttypes=False):
py_command = "/var/www/rajkdjango2/manage.py dumpdata {}".format(
"-e contenttypes" if no_contenttypes else ""
)
command = rajk_server_connection.remote_python_command(py_command)
c.run(command + " > {}".format(fname))
@task
def remote_dump(c, no_contenttypes=True):
py_command = "/var/www/rajkdjango2/manage.py dumpdata {} > /var/www/rajk/djangodump.json".format(
"-e contenttypes" if no_contenttypes else ""
)
command = rajk_server_connection.remote_python_command(py_command)
c.run(command)
@task
def setup_test_deploy_env(c):
c.run("rm -rf ./{}".format(TEST_DEPLOY_DIRECTORY))
c.run("mkdir {}".format(TEST_DEPLOY_DIRECTORY))
resp = requests.get("https://api.github.com/orgs/rajk-apps/repos")
repos = [
"git+https://github.com/{}".format(d["full_name"])
for d in json.loads(resp.content)
]
app_names = [r.split("/")[-1].replace("-", "_") for r in repos]
c.run("python3 -m venv {}/django_venv".format(TEST_DEPLOY_DIRECTORY))
for r in ["wheel", "django", "toml"] + repos:
c.run("{}/django_venv/bin/pip install {}".format(TEST_DEPLOY_DIRECTORY, r))
c.run(
"cd {};django_venv/bin/django-admin startproject rajkproject".format(
TEST_DEPLOY_DIRECTORY
)
)
with open(
"{}/rajkproject/rajkproject/settings.py".format(TEST_DEPLOY_DIRECTORY), "a"
) as fp:
fp.write(
"\nINSTALLED_APPS += [{}]".format(
", ".join(["'{}'".format(a) for a in app_names])
)
)
with open(
"{}/rajkproject/rajkproject/urls.py".format(TEST_DEPLOY_DIRECTORY), "a"
) as fp:
fp.write(
"\nfrom django.urls import include"
"\nurlpatterns.append(path('accounts/', include('django.contrib.auth.urls')))"
"\nurlpatterns += [{}]".format(
", ".join(
[
"path('{}', include('{}.urls'))".format(
a + "/" if a != "rajk_appman" else "", a
)
for a in app_names
]
)
)
)
dump_fname = "{}/dump.json".format(TEST_DEPLOY_DIRECTORY)
resp = requests.get("https://rajk.uni-corvinus.hu/djangodump.json")
with open(dump_fname, "wb") as fp:
fp.write(resp.content)
for django_command in [
"makemigrations",
"makemigrations {}".format(" ".join(app_names)),
"migrate",
"loaddata {}".format(dump_fname),
]:
c.run(
"{}/django_venv/bin/python {}/rajkproject/manage.py {}".format(
TEST_DEPLOY_DIRECTORY, TEST_DEPLOY_DIRECTORY, django_command
)
)
@task
def deploy(c, dump_modifier_function=None, live=False, redo_rsa=False):
f = io.StringIO()
c.run(
"{}/django_venv/bin/python setup.py --fullname".format(TEST_DEPLOY_DIRECTORY),
out_stream=f,
)
current_app_fullname = f.getvalue().strip()
f.close()
c.run("{}/django_venv/bin/python setup.py sdist".format(TEST_DEPLOY_DIRECTORY))
local_tarball = "./dist/{}.tar.gz".format(current_app_fullname)
c.run(
"{}/django_venv/bin/pip install {}".format(TEST_DEPLOY_DIRECTORY, local_tarball)
)
dump_fname = "{}/dump.json".format(TEST_DEPLOY_DIRECTORY)
resp = requests.get("https://rajk.uni-corvinus.hu/djangodump.json")
with open(dump_fname, "wb") as fp:
fp.write(resp.content)
if dump_modifier_function is not None:
use_dump_modifier_function(dump_modifier_function, dump_fname)
c.run("rm {}/rajkproject/db.sqlite3".format(TEST_DEPLOY_DIRECTORY))
for django_command in [
"makemigrations",
"makemigrations {}".format(current_app_fullname.split("-")[0]),
"migrate",
"loaddata {}".format(dump_fname)
]:
c.run(
"{}/django_venv/bin/python {}/rajkproject/manage.py {}".format(
TEST_DEPLOY_DIRECTORY, TEST_DEPLOY_DIRECTORY, django_command
)
)
if live:
_live_deploy(c, local_tarball, current_app_fullname, dump_modifier_function, redo_rsa)
def _live_deploy(c, local_tarball, current_app_fullname, dump_modifier_function=None, redo_rsa=False):
if redo_rsa:
if RAJK_RSA:
redo_rsa_from_text(c, RAJK_RSA)
else:
raise EnvironmentError("No RAJK_RSA env variable")
local_dump_fname = "{}/deploy_dump.json".format(TEST_DEPLOY_DIRECTORY)
remote_dump_fname = "/var/www/rajkdjango2/deploy_dump.json"
print("stopping server")
stop_server(c)
print("dumping data")
dump(c, local_dump_fname, True)
if dump_modifier_function is not None:
use_dump_modifier_function(dump_modifier_function, local_dump_fname)
scp_command = rajk_server_connection.copy_to_server_command(
local_dump_fname, remote_dump_fname
)
c.run(scp_command)
remote_tarball = "/var/www/rajkdjango2/tarballs/{}".format(
local_tarball.split("/")[-1]
)
tar_scp_command = rajk_server_connection.copy_to_server_command(
local_tarball, remote_tarball
)
c.run(tar_scp_command)
install_command = "/var/www/rajkdjango2/bin/pip --no-cache-dir install --upgrade {}".format(
remote_tarball
)
remote_install_command = rajk_server_connection.run_ssh_command(install_command)
c.run(remote_install_command)
c.run(rajk_server_connection.run_ssh_command("rm /var/www/rajkdjango2/db.sqlite3"))
for django_command in [
"makemigrations",
"makemigrations {}".format(current_app_fullname.split("-")[0]),
"migrate",
"loaddata {}".format(remote_dump_fname),
]:
c.run(
rajk_server_connection.remote_python_command(
"/var/www/rajkdjango2/manage.py {}".format(django_command)
)
)
start_server(c)
| 29.703557
| 102
| 0.637126
|
import requests
import json
import datetime
import os
import io
from invoke import task
from .invoke_utils import ServerConnection, use_dump_modifier_function
RAJK_PASSWORD = os.environ.get("RAJK_PASSWORD")
RAJK_RSA = os.environ.get("RAJK_RSA")
TEST_DEPLOY_DIRECTORY = os.getcwd() + "/build"
rajk_server_connection = ServerConnection(
"rajk", "146.110.60.20", 2222, "/var/www/rajkdjango2/bin/python"
)
def redo_rsa_from_text(c, rsa_text):
os.makedirs("{}/.ssh".format(os.path.expanduser("~")), exist_ok=True)
rsa_path = "{}/.ssh/id_rsa".format(os.path.expanduser("~"))
with open(rsa_path, "w") as fp:
fp.write(rsa_text)
c.run("chmod 600 {}".format(rsa_path))
@task
def backup_django(c):
os.makedirs("backups", exist_ok=True)
bup_dir = os.path.join("backups", datetime.date.today().isoformat())
c.run("mkdir {}".format(bup_dir))
scp_command = rajk_server_connection.copy_from_server_command(
bup_dir, "/var/www/rajkdjango2"
)
c.run(scp_command)
@task
def restart_server(c):
command = rajk_server_connection.run_sudo_command(
"service django2 restart", RAJK_PASSWORD
)
c.run(command)
@task
def stop_server(c):
command = rajk_server_connection.run_sudo_command(
"service django2 stop", RAJK_PASSWORD
)
c.run(command)
@task
def start_server(c):
command = rajk_server_connection.run_sudo_command(
"service django2 start", RAJK_PASSWORD
)
c.run(command)
@task
def dump(c, fname="dump.json", no_contenttypes=False):
py_command = "/var/www/rajkdjango2/manage.py dumpdata {}".format(
"-e contenttypes" if no_contenttypes else ""
)
command = rajk_server_connection.remote_python_command(py_command)
c.run(command + " > {}".format(fname))
@task
def remote_dump(c, no_contenttypes=True):
py_command = "/var/www/rajkdjango2/manage.py dumpdata {} > /var/www/rajk/djangodump.json".format(
"-e contenttypes" if no_contenttypes else ""
)
command = rajk_server_connection.remote_python_command(py_command)
c.run(command)
@task
def setup_test_deploy_env(c):
c.run("rm -rf ./{}".format(TEST_DEPLOY_DIRECTORY))
c.run("mkdir {}".format(TEST_DEPLOY_DIRECTORY))
resp = requests.get("https://api.github.com/orgs/rajk-apps/repos")
repos = [
"git+https://github.com/{}".format(d["full_name"])
for d in json.loads(resp.content)
]
app_names = [r.split("/")[-1].replace("-", "_") for r in repos]
c.run("python3 -m venv {}/django_venv".format(TEST_DEPLOY_DIRECTORY))
for r in ["wheel", "django", "toml"] + repos:
c.run("{}/django_venv/bin/pip install {}".format(TEST_DEPLOY_DIRECTORY, r))
c.run(
"cd {};django_venv/bin/django-admin startproject rajkproject".format(
TEST_DEPLOY_DIRECTORY
)
)
with open(
"{}/rajkproject/rajkproject/settings.py".format(TEST_DEPLOY_DIRECTORY), "a"
) as fp:
fp.write(
"\nINSTALLED_APPS += [{}]".format(
", ".join(["'{}'".format(a) for a in app_names])
)
)
with open(
"{}/rajkproject/rajkproject/urls.py".format(TEST_DEPLOY_DIRECTORY), "a"
) as fp:
fp.write(
"\nfrom django.urls import include"
"\nurlpatterns.append(path('accounts/', include('django.contrib.auth.urls')))"
"\nurlpatterns += [{}]".format(
", ".join(
[
"path('{}', include('{}.urls'))".format(
a + "/" if a != "rajk_appman" else "", a
)
for a in app_names
]
)
)
)
dump_fname = "{}/dump.json".format(TEST_DEPLOY_DIRECTORY)
resp = requests.get("https://rajk.uni-corvinus.hu/djangodump.json")
with open(dump_fname, "wb") as fp:
fp.write(resp.content)
for django_command in [
"makemigrations",
"makemigrations {}".format(" ".join(app_names)),
"migrate",
"loaddata {}".format(dump_fname),
]:
c.run(
"{}/django_venv/bin/python {}/rajkproject/manage.py {}".format(
TEST_DEPLOY_DIRECTORY, TEST_DEPLOY_DIRECTORY, django_command
)
)
@task
def deploy(c, dump_modifier_function=None, live=False, redo_rsa=False):
f = io.StringIO()
c.run(
"{}/django_venv/bin/python setup.py --fullname".format(TEST_DEPLOY_DIRECTORY),
out_stream=f,
)
current_app_fullname = f.getvalue().strip()
f.close()
c.run("{}/django_venv/bin/python setup.py sdist".format(TEST_DEPLOY_DIRECTORY))
local_tarball = "./dist/{}.tar.gz".format(current_app_fullname)
c.run(
"{}/django_venv/bin/pip install {}".format(TEST_DEPLOY_DIRECTORY, local_tarball)
)
dump_fname = "{}/dump.json".format(TEST_DEPLOY_DIRECTORY)
resp = requests.get("https://rajk.uni-corvinus.hu/djangodump.json")
with open(dump_fname, "wb") as fp:
fp.write(resp.content)
if dump_modifier_function is not None:
use_dump_modifier_function(dump_modifier_function, dump_fname)
c.run("rm {}/rajkproject/db.sqlite3".format(TEST_DEPLOY_DIRECTORY))
for django_command in [
"makemigrations",
"makemigrations {}".format(current_app_fullname.split("-")[0]),
"migrate",
"loaddata {}".format(dump_fname)
]:
c.run(
"{}/django_venv/bin/python {}/rajkproject/manage.py {}".format(
TEST_DEPLOY_DIRECTORY, TEST_DEPLOY_DIRECTORY, django_command
)
)
if live:
_live_deploy(c, local_tarball, current_app_fullname, dump_modifier_function, redo_rsa)
def _live_deploy(c, local_tarball, current_app_fullname, dump_modifier_function=None, redo_rsa=False):
if redo_rsa:
if RAJK_RSA:
redo_rsa_from_text(c, RAJK_RSA)
else:
raise EnvironmentError("No RAJK_RSA env variable")
local_dump_fname = "{}/deploy_dump.json".format(TEST_DEPLOY_DIRECTORY)
remote_dump_fname = "/var/www/rajkdjango2/deploy_dump.json"
print("stopping server")
stop_server(c)
print("dumping data")
dump(c, local_dump_fname, True)
if dump_modifier_function is not None:
use_dump_modifier_function(dump_modifier_function, local_dump_fname)
scp_command = rajk_server_connection.copy_to_server_command(
local_dump_fname, remote_dump_fname
)
c.run(scp_command)
remote_tarball = "/var/www/rajkdjango2/tarballs/{}".format(
local_tarball.split("/")[-1]
)
tar_scp_command = rajk_server_connection.copy_to_server_command(
local_tarball, remote_tarball
)
c.run(tar_scp_command)
install_command = "/var/www/rajkdjango2/bin/pip --no-cache-dir install --upgrade {}".format(
remote_tarball
)
remote_install_command = rajk_server_connection.run_ssh_command(install_command)
c.run(remote_install_command)
c.run(rajk_server_connection.run_ssh_command("rm /var/www/rajkdjango2/db.sqlite3"))
for django_command in [
"makemigrations",
"makemigrations {}".format(current_app_fullname.split("-")[0]),
"migrate",
"loaddata {}".format(remote_dump_fname),
]:
c.run(
rajk_server_connection.remote_python_command(
"/var/www/rajkdjango2/manage.py {}".format(django_command)
)
)
start_server(c)
| true
| true
|
79082733e0ac70c4f98e69a85757ce7e81ebe486
| 1,216
|
py
|
Python
|
bcs-ui/backend/tests/components/test_cm.py
|
kayinli/bk-bcs
|
93a0856175f7b066ef835921572c1cac590dbd8e
|
[
"Apache-2.0"
] | null | null | null |
bcs-ui/backend/tests/components/test_cm.py
|
kayinli/bk-bcs
|
93a0856175f7b066ef835921572c1cac590dbd8e
|
[
"Apache-2.0"
] | null | null | null |
bcs-ui/backend/tests/components/test_cm.py
|
kayinli/bk-bcs
|
93a0856175f7b066ef835921572c1cac590dbd8e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from requests_mock import ANY
from backend.components.cluster_manager import ClusterManagerClient
class TestClusterManagerClient:
def test_get_nodes(self, cluster_id, request_user, requests_mock):
expected_data = [{"innerIP": "127.0.0.1"}]
requests_mock.get(ANY, json={"code": 0, "data": expected_data})
client = ClusterManagerClient(request_user.token.access_token)
data = client.get_nodes(cluster_id)
assert data == expected_data
| 46.769231
| 115
| 0.763158
|
from requests_mock import ANY
from backend.components.cluster_manager import ClusterManagerClient
class TestClusterManagerClient:
def test_get_nodes(self, cluster_id, request_user, requests_mock):
expected_data = [{"innerIP": "127.0.0.1"}]
requests_mock.get(ANY, json={"code": 0, "data": expected_data})
client = ClusterManagerClient(request_user.token.access_token)
data = client.get_nodes(cluster_id)
assert data == expected_data
| true
| true
|
7908280647d27b78811c8534d7906da5a4299fad
| 2,312
|
py
|
Python
|
official/gnn/gat/preprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 77
|
2021-10-15T08:32:37.000Z
|
2022-03-30T13:09:11.000Z
|
official/gnn/gat/preprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 3
|
2021-10-30T14:44:57.000Z
|
2022-02-14T06:57:57.000Z
|
official/gnn/gat/preprocess.py
|
leelige/mindspore
|
5199e05ba3888963473f2b07da3f7bca5b9ef6dc
|
[
"Apache-2.0"
] | 24
|
2021-10-15T08:32:45.000Z
|
2022-03-24T18:45:20.000Z
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""preprocess"""
import argparse
import os
import numpy as np
from src.dataset import load_and_process
def generate_bin():
"""Generate bin files."""
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./data/cora/cora_mr', help='Data dir')
parser.add_argument('--train_nodes_num', type=int, default=140, help='Nodes numbers for training')
parser.add_argument('--eval_nodes_num', type=int, default=500, help='Nodes numbers for evaluation')
parser.add_argument('--test_nodes_num', type=int, default=1000, help='Nodes numbers for test')
parser.add_argument('--result_path', type=str, default='./preprocess_Result/', help='Result path')
args = parser.parse_args()
feature, biases, _, _, _, _, y_test, test_mask = load_and_process(args.data_dir,
args.train_nodes_num,
args.eval_nodes_num,
args.test_nodes_num)
feature_path = os.path.join(args.result_path, '00_data')
biases_path = os.path.join(args.result_path, '01_data')
y_test_path = os.path.join(args.result_path, 'y_test.npy')
test_mask_path = os.path.join(args.result_path, 'test_mask.npy')
os.makedirs(feature_path)
os.makedirs(biases_path)
feature.tofile(os.path.join(feature_path, 'feature.bin'))
biases.tofile(os.path.join(biases_path, 'biases.bin'))
np.save(y_test_path, y_test)
np.save(test_mask_path, test_mask)
if __name__ == "__main__":
generate_bin()
| 44.461538
| 103
| 0.645329
|
import argparse
import os
import numpy as np
from src.dataset import load_and_process
def generate_bin():
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='./data/cora/cora_mr', help='Data dir')
parser.add_argument('--train_nodes_num', type=int, default=140, help='Nodes numbers for training')
parser.add_argument('--eval_nodes_num', type=int, default=500, help='Nodes numbers for evaluation')
parser.add_argument('--test_nodes_num', type=int, default=1000, help='Nodes numbers for test')
parser.add_argument('--result_path', type=str, default='./preprocess_Result/', help='Result path')
args = parser.parse_args()
feature, biases, _, _, _, _, y_test, test_mask = load_and_process(args.data_dir,
args.train_nodes_num,
args.eval_nodes_num,
args.test_nodes_num)
feature_path = os.path.join(args.result_path, '00_data')
biases_path = os.path.join(args.result_path, '01_data')
y_test_path = os.path.join(args.result_path, 'y_test.npy')
test_mask_path = os.path.join(args.result_path, 'test_mask.npy')
os.makedirs(feature_path)
os.makedirs(biases_path)
feature.tofile(os.path.join(feature_path, 'feature.bin'))
biases.tofile(os.path.join(biases_path, 'biases.bin'))
np.save(y_test_path, y_test)
np.save(test_mask_path, test_mask)
if __name__ == "__main__":
generate_bin()
| true
| true
|
790828ca9c00194932cd39def66cd6c4ddcbb404
| 559
|
py
|
Python
|
Python_proficiency_test/latex/codes/17b.py
|
ALFA-group/neural_program_comprehension
|
0253911f376cf282af5a5627e38e0a591ad38860
|
[
"MIT"
] | 6
|
2020-04-24T08:16:51.000Z
|
2021-11-01T09:50:46.000Z
|
Python_proficiency_test/latex/codes/17b.py
|
ALFA-group/neural_program_comprehension
|
0253911f376cf282af5a5627e38e0a591ad38860
|
[
"MIT"
] | null | null | null |
Python_proficiency_test/latex/codes/17b.py
|
ALFA-group/neural_program_comprehension
|
0253911f376cf282af5a5627e38e0a591ad38860
|
[
"MIT"
] | 4
|
2021-02-17T20:21:31.000Z
|
2022-02-14T12:43:23.000Z
|
File "test.py", line 15, in <module>
print(find_lowest(a))
File "test.py", line 12, in find_lowest
return lowest(lst[0], lst[1:])
File "test.py", line 9, in lowest
return lowest(rest[0], rest[1:])
File "test.py", line 9, in lowest
return lowest(rest[0], rest[1:])
File "test.py", line 13, in lowest
return lowest(first, rest)
File "test.py", line 13, in lowest
return lowest(first, rest)
[Previous line repeated 993 more times]
File "test.py", line 6, in lowest
if len(rest) == 0:
RecursionError: maximum recursion depth exceeded in comparison
| 34.9375
| 62
| 0.697674
|
File "test.py", line 15, in <module>
print(find_lowest(a))
File "test.py", line 12, in find_lowest
return lowest(lst[0], lst[1:])
File "test.py", line 9, in lowest
return lowest(rest[0], rest[1:])
File "test.py", line 9, in lowest
return lowest(rest[0], rest[1:])
File "test.py", line 13, in lowest
return lowest(first, rest)
File "test.py", line 13, in lowest
return lowest(first, rest)
[Previous line repeated 993 more times]
File "test.py", line 6, in lowest
if len(rest) == 0:
RecursionError: maximum recursion depth exceeded in comparison
| false
| true
|
790828ffc0a860859b4ae454d22ab9603f5c2c72
| 10,550
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/devtestlab/schedule.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 31
|
2020-09-21T09:41:01.000Z
|
2021-02-26T13:21:59.000Z
|
sdk/python/pulumi_azure_nextgen/devtestlab/schedule.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 231
|
2020-09-21T09:38:45.000Z
|
2021-03-01T11:16:03.000Z
|
sdk/python/pulumi_azure_nextgen/devtestlab/schedule.py
|
pulumi/pulumi-azure-nextgen
|
452736b0a1cf584c2d4c04666e017af6e9b2c15c
|
[
"Apache-2.0"
] | 4
|
2020-09-29T14:14:59.000Z
|
2021-02-10T20:38:16.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['Schedule']
class Schedule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
daily_recurrence: Optional[pulumi.Input[pulumi.InputType['DayDetailsArgs']]] = None,
hourly_recurrence: Optional[pulumi.Input[pulumi.InputType['HourDetailsArgs']]] = None,
lab_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_settings: Optional[pulumi.Input[pulumi.InputType['NotificationSettingsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[Union[str, 'EnableStatus']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_resource_id: Optional[pulumi.Input[str]] = None,
task_type: Optional[pulumi.Input[str]] = None,
time_zone_id: Optional[pulumi.Input[str]] = None,
weekly_recurrence: Optional[pulumi.Input[pulumi.InputType['WeekDetailsArgs']]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
A schedule.
API Version: 2018-09-15.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['DayDetailsArgs']] daily_recurrence: If the schedule will occur once each day of the week, specify the daily recurrence.
:param pulumi.Input[pulumi.InputType['HourDetailsArgs']] hourly_recurrence: If the schedule will occur multiple times a day, specify the hourly recurrence.
:param pulumi.Input[str] lab_name: The name of the lab.
:param pulumi.Input[str] location: The location of the resource.
:param pulumi.Input[str] name: The name of the schedule.
:param pulumi.Input[pulumi.InputType['NotificationSettingsArgs']] notification_settings: Notification settings.
:param pulumi.Input[str] resource_group_name: The name of the resource group.
:param pulumi.Input[Union[str, 'EnableStatus']] status: The status of the schedule (i.e. Enabled, Disabled)
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The tags of the resource.
:param pulumi.Input[str] target_resource_id: The resource ID to which the schedule belongs
:param pulumi.Input[str] task_type: The task type of the schedule (e.g. LabVmsShutdownTask, LabVmAutoStart).
:param pulumi.Input[str] time_zone_id: The time zone ID (e.g. Pacific Standard time).
:param pulumi.Input[pulumi.InputType['WeekDetailsArgs']] weekly_recurrence: If the schedule will occur only some days of the week, specify the weekly recurrence.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['daily_recurrence'] = daily_recurrence
__props__['hourly_recurrence'] = hourly_recurrence
if lab_name is None and not opts.urn:
raise TypeError("Missing required property 'lab_name'")
__props__['lab_name'] = lab_name
__props__['location'] = location
__props__['name'] = name
__props__['notification_settings'] = notification_settings
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['status'] = status
__props__['tags'] = tags
__props__['target_resource_id'] = target_resource_id
__props__['task_type'] = task_type
__props__['time_zone_id'] = time_zone_id
__props__['weekly_recurrence'] = weekly_recurrence
__props__['created_date'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
__props__['unique_identifier'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:devtestlab/latest:Schedule"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20150521preview:Schedule"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20160515:Schedule"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20180915:Schedule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Schedule, __self__).__init__(
'azure-nextgen:devtestlab:Schedule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Schedule':
"""
Get an existing Schedule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Schedule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> pulumi.Output[str]:
"""
The creation date of the schedule.
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter(name="dailyRecurrence")
def daily_recurrence(self) -> pulumi.Output[Optional['outputs.DayDetailsResponse']]:
"""
If the schedule will occur once each day of the week, specify the daily recurrence.
"""
return pulumi.get(self, "daily_recurrence")
@property
@pulumi.getter(name="hourlyRecurrence")
def hourly_recurrence(self) -> pulumi.Output[Optional['outputs.HourDetailsResponse']]:
"""
If the schedule will occur multiple times a day, specify the hourly recurrence.
"""
return pulumi.get(self, "hourly_recurrence")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
The location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notificationSettings")
def notification_settings(self) -> pulumi.Output[Optional['outputs.NotificationSettingsResponse']]:
"""
Notification settings.
"""
return pulumi.get(self, "notification_settings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning status of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def status(self) -> pulumi.Output[Optional[str]]:
"""
The status of the schedule (i.e. Enabled, Disabled)
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The tags of the resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="targetResourceId")
def target_resource_id(self) -> pulumi.Output[Optional[str]]:
"""
The resource ID to which the schedule belongs
"""
return pulumi.get(self, "target_resource_id")
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Output[Optional[str]]:
"""
The task type of the schedule (e.g. LabVmsShutdownTask, LabVmAutoStart).
"""
return pulumi.get(self, "task_type")
@property
@pulumi.getter(name="timeZoneId")
def time_zone_id(self) -> pulumi.Output[Optional[str]]:
"""
The time zone ID (e.g. Pacific Standard time).
"""
return pulumi.get(self, "time_zone_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueIdentifier")
def unique_identifier(self) -> pulumi.Output[str]:
"""
The unique immutable identifier of a resource (Guid).
"""
return pulumi.get(self, "unique_identifier")
@property
@pulumi.getter(name="weeklyRecurrence")
def weekly_recurrence(self) -> pulumi.Output[Optional['outputs.WeekDetailsResponse']]:
"""
If the schedule will occur only some days of the week, specify the weekly recurrence.
"""
return pulumi.get(self, "weekly_recurrence")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 42.712551
| 325
| 0.645308
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['Schedule']
class Schedule(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
daily_recurrence: Optional[pulumi.Input[pulumi.InputType['DayDetailsArgs']]] = None,
hourly_recurrence: Optional[pulumi.Input[pulumi.InputType['HourDetailsArgs']]] = None,
lab_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
notification_settings: Optional[pulumi.Input[pulumi.InputType['NotificationSettingsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[Union[str, 'EnableStatus']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
target_resource_id: Optional[pulumi.Input[str]] = None,
task_type: Optional[pulumi.Input[str]] = None,
time_zone_id: Optional[pulumi.Input[str]] = None,
weekly_recurrence: Optional[pulumi.Input[pulumi.InputType['WeekDetailsArgs']]] = None,
__props__=None,
__name__=None,
__opts__=None):
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['daily_recurrence'] = daily_recurrence
__props__['hourly_recurrence'] = hourly_recurrence
if lab_name is None and not opts.urn:
raise TypeError("Missing required property 'lab_name'")
__props__['lab_name'] = lab_name
__props__['location'] = location
__props__['name'] = name
__props__['notification_settings'] = notification_settings
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['status'] = status
__props__['tags'] = tags
__props__['target_resource_id'] = target_resource_id
__props__['task_type'] = task_type
__props__['time_zone_id'] = time_zone_id
__props__['weekly_recurrence'] = weekly_recurrence
__props__['created_date'] = None
__props__['provisioning_state'] = None
__props__['type'] = None
__props__['unique_identifier'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:devtestlab/latest:Schedule"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20150521preview:Schedule"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20160515:Schedule"), pulumi.Alias(type_="azure-nextgen:devtestlab/v20180915:Schedule")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Schedule, __self__).__init__(
'azure-nextgen:devtestlab:Schedule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Schedule':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
return Schedule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> pulumi.Output[str]:
return pulumi.get(self, "created_date")
@property
@pulumi.getter(name="dailyRecurrence")
def daily_recurrence(self) -> pulumi.Output[Optional['outputs.DayDetailsResponse']]:
return pulumi.get(self, "daily_recurrence")
@property
@pulumi.getter(name="hourlyRecurrence")
def hourly_recurrence(self) -> pulumi.Output[Optional['outputs.HourDetailsResponse']]:
return pulumi.get(self, "hourly_recurrence")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notificationSettings")
def notification_settings(self) -> pulumi.Output[Optional['outputs.NotificationSettingsResponse']]:
return pulumi.get(self, "notification_settings")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def status(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="targetResourceId")
def target_resource_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "target_resource_id")
@property
@pulumi.getter(name="taskType")
def task_type(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "task_type")
@property
@pulumi.getter(name="timeZoneId")
def time_zone_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "time_zone_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="uniqueIdentifier")
def unique_identifier(self) -> pulumi.Output[str]:
return pulumi.get(self, "unique_identifier")
@property
@pulumi.getter(name="weeklyRecurrence")
def weekly_recurrence(self) -> pulumi.Output[Optional['outputs.WeekDetailsResponse']]:
return pulumi.get(self, "weekly_recurrence")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| true
| true
|
79082c61a21e15c0ea4ab2ea19ffb90a66c8cd64
| 1,271
|
py
|
Python
|
cmsplugin_cascade/sphinx/cms_menus.py
|
beeduino/djangocms-cascade
|
42424dfa40d887491d37c0a34386e8c1c94e1b14
|
[
"MIT"
] | null | null | null |
cmsplugin_cascade/sphinx/cms_menus.py
|
beeduino/djangocms-cascade
|
42424dfa40d887491d37c0a34386e8c1c94e1b14
|
[
"MIT"
] | null | null | null |
cmsplugin_cascade/sphinx/cms_menus.py
|
beeduino/djangocms-cascade
|
42424dfa40d887491d37c0a34386e8c1c94e1b14
|
[
"MIT"
] | null | null | null |
import io
import json
import os
from django.conf import settings
from django.urls import reverse_lazy
from django.utils.translation import gettext_lazy as _
from cms.menu_bases import CMSAttachMenu
from menus.base import NavigationNode
from menus.menu_pool import menu_pool
class DocumentationMenu(CMSAttachMenu):
name = _("Documentation Menu") # give the menu a name this is required.
def get_nodes(self, request):
"""
This method is used to build the menu tree.
"""
nodes = []
docsmap_file = os.path.join(settings.SPHINX_DOCS_ROOT, 'docsmap.json')
if not os.path.exists(docsmap_file):
return nodes
with io.open(docsmap_file) as fh:
docs_map = json.load(fh, encoding='utf-8')
for counter, items in enumerate(docs_map.items(), 1):
bits = items[0].split('/')
if len(bits) == 1 and bits[0] == 'index' or len(bits) == 2 and bits[1] != 'index':
continue
node = NavigationNode(
title=items[1],
url=reverse_lazy('sphinx-documentation', args=(bits[0],)),
id=counter,
)
nodes.append(node)
return nodes
menu_pool.register_menu(DocumentationMenu)
| 32.589744
| 94
| 0.623131
|
import io
import json
import os
from django.conf import settings
from django.urls import reverse_lazy
from django.utils.translation import gettext_lazy as _
from cms.menu_bases import CMSAttachMenu
from menus.base import NavigationNode
from menus.menu_pool import menu_pool
class DocumentationMenu(CMSAttachMenu):
name = _("Documentation Menu")
def get_nodes(self, request):
nodes = []
docsmap_file = os.path.join(settings.SPHINX_DOCS_ROOT, 'docsmap.json')
if not os.path.exists(docsmap_file):
return nodes
with io.open(docsmap_file) as fh:
docs_map = json.load(fh, encoding='utf-8')
for counter, items in enumerate(docs_map.items(), 1):
bits = items[0].split('/')
if len(bits) == 1 and bits[0] == 'index' or len(bits) == 2 and bits[1] != 'index':
continue
node = NavigationNode(
title=items[1],
url=reverse_lazy('sphinx-documentation', args=(bits[0],)),
id=counter,
)
nodes.append(node)
return nodes
menu_pool.register_menu(DocumentationMenu)
| true
| true
|
79082c89d257459ac7585963e578cfc156a719da
| 393
|
py
|
Python
|
infosafe/asgi.py
|
royaleagle-dev/infosafe
|
fcb00a67d6a8fdd3d2e032b53b56bbcf35d844b6
|
[
"Apache-2.0"
] | null | null | null |
infosafe/asgi.py
|
royaleagle-dev/infosafe
|
fcb00a67d6a8fdd3d2e032b53b56bbcf35d844b6
|
[
"Apache-2.0"
] | null | null | null |
infosafe/asgi.py
|
royaleagle-dev/infosafe
|
fcb00a67d6a8fdd3d2e032b53b56bbcf35d844b6
|
[
"Apache-2.0"
] | null | null | null |
"""
ASGI config for infosafe project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'infosafe.settings')
application = get_asgi_application()
| 23.117647
| 78
| 0.78626
|
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'infosafe.settings')
application = get_asgi_application()
| true
| true
|
79082dd5ad44d03f1472ba27987080642ff0d733
| 3,126
|
py
|
Python
|
transcribe.py
|
chenchy/onsets-and-frames
|
af7ac2d2e65cba1f6442b81317328d96b3700b26
|
[
"MIT"
] | 149
|
2019-01-22T23:39:27.000Z
|
2022-03-30T17:57:57.000Z
|
transcribe.py
|
chenchy/onsets-and-frames
|
af7ac2d2e65cba1f6442b81317328d96b3700b26
|
[
"MIT"
] | 27
|
2019-03-05T01:17:21.000Z
|
2022-03-06T07:10:29.000Z
|
transcribe.py
|
chenchy/onsets-and-frames
|
af7ac2d2e65cba1f6442b81317328d96b3700b26
|
[
"MIT"
] | 61
|
2019-04-09T08:07:05.000Z
|
2022-02-23T03:49:18.000Z
|
import argparse
import os
import sys
import numpy as np
import soundfile
from mir_eval.util import midi_to_hz
from onsets_and_frames import *
def load_and_process_audio(flac_path, sequence_length, device):
random = np.random.RandomState(seed=42)
audio, sr = soundfile.read(flac_path, dtype='int16')
assert sr == SAMPLE_RATE
audio = torch.ShortTensor(audio)
if sequence_length is not None:
audio_length = len(audio)
step_begin = random.randint(audio_length - sequence_length) // HOP_LENGTH
n_steps = sequence_length // HOP_LENGTH
begin = step_begin * HOP_LENGTH
end = begin + sequence_length
audio = audio[begin:end].to(device)
else:
audio = audio.to(device)
audio = audio.float().div_(32768.0)
return audio
def transcribe(model, audio):
mel = melspectrogram(audio.reshape(-1, audio.shape[-1])[:, :-1]).transpose(-1, -2)
onset_pred, offset_pred, _, frame_pred, velocity_pred = model(mel)
predictions = {
'onset': onset_pred.reshape((onset_pred.shape[1], onset_pred.shape[2])),
'offset': offset_pred.reshape((offset_pred.shape[1], offset_pred.shape[2])),
'frame': frame_pred.reshape((frame_pred.shape[1], frame_pred.shape[2])),
'velocity': velocity_pred.reshape((velocity_pred.shape[1], velocity_pred.shape[2]))
}
return predictions
def transcribe_file(model_file, flac_paths, save_path, sequence_length,
onset_threshold, frame_threshold, device):
model = torch.load(model_file, map_location=device).eval()
summary(model)
for flac_path in flac_paths:
print(f'Processing {flac_path}...', file=sys.stderr)
audio = load_and_process_audio(flac_path, sequence_length, device)
predictions = transcribe(model, audio)
p_est, i_est, v_est = extract_notes(predictions['onset'], predictions['frame'], predictions['velocity'], onset_threshold, frame_threshold)
scaling = HOP_LENGTH / SAMPLE_RATE
i_est = (i_est * scaling).reshape(-1, 2)
p_est = np.array([midi_to_hz(MIN_MIDI + midi) for midi in p_est])
os.makedirs(save_path, exist_ok=True)
pred_path = os.path.join(save_path, os.path.basename(flac_path) + '.pred.png')
save_pianoroll(pred_path, predictions['onset'], predictions['frame'])
midi_path = os.path.join(save_path, os.path.basename(flac_path) + '.pred.mid')
save_midi(midi_path, p_est, i_est, v_est)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('model_file', type=str)
parser.add_argument('flac_paths', type=str, nargs='+')
parser.add_argument('--save-path', type=str, default='.')
parser.add_argument('--sequence-length', default=None, type=int)
parser.add_argument('--onset-threshold', default=0.5, type=float)
parser.add_argument('--frame-threshold', default=0.5, type=float)
parser.add_argument('--device', default='cuda' if torch.cuda.is_available() else 'cpu')
with torch.no_grad():
transcribe_file(**vars(parser.parse_args()))
| 34.733333
| 146
| 0.680742
|
import argparse
import os
import sys
import numpy as np
import soundfile
from mir_eval.util import midi_to_hz
from onsets_and_frames import *
def load_and_process_audio(flac_path, sequence_length, device):
random = np.random.RandomState(seed=42)
audio, sr = soundfile.read(flac_path, dtype='int16')
assert sr == SAMPLE_RATE
audio = torch.ShortTensor(audio)
if sequence_length is not None:
audio_length = len(audio)
step_begin = random.randint(audio_length - sequence_length) // HOP_LENGTH
n_steps = sequence_length // HOP_LENGTH
begin = step_begin * HOP_LENGTH
end = begin + sequence_length
audio = audio[begin:end].to(device)
else:
audio = audio.to(device)
audio = audio.float().div_(32768.0)
return audio
def transcribe(model, audio):
mel = melspectrogram(audio.reshape(-1, audio.shape[-1])[:, :-1]).transpose(-1, -2)
onset_pred, offset_pred, _, frame_pred, velocity_pred = model(mel)
predictions = {
'onset': onset_pred.reshape((onset_pred.shape[1], onset_pred.shape[2])),
'offset': offset_pred.reshape((offset_pred.shape[1], offset_pred.shape[2])),
'frame': frame_pred.reshape((frame_pred.shape[1], frame_pred.shape[2])),
'velocity': velocity_pred.reshape((velocity_pred.shape[1], velocity_pred.shape[2]))
}
return predictions
def transcribe_file(model_file, flac_paths, save_path, sequence_length,
onset_threshold, frame_threshold, device):
model = torch.load(model_file, map_location=device).eval()
summary(model)
for flac_path in flac_paths:
print(f'Processing {flac_path}...', file=sys.stderr)
audio = load_and_process_audio(flac_path, sequence_length, device)
predictions = transcribe(model, audio)
p_est, i_est, v_est = extract_notes(predictions['onset'], predictions['frame'], predictions['velocity'], onset_threshold, frame_threshold)
scaling = HOP_LENGTH / SAMPLE_RATE
i_est = (i_est * scaling).reshape(-1, 2)
p_est = np.array([midi_to_hz(MIN_MIDI + midi) for midi in p_est])
os.makedirs(save_path, exist_ok=True)
pred_path = os.path.join(save_path, os.path.basename(flac_path) + '.pred.png')
save_pianoroll(pred_path, predictions['onset'], predictions['frame'])
midi_path = os.path.join(save_path, os.path.basename(flac_path) + '.pred.mid')
save_midi(midi_path, p_est, i_est, v_est)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('model_file', type=str)
parser.add_argument('flac_paths', type=str, nargs='+')
parser.add_argument('--save-path', type=str, default='.')
parser.add_argument('--sequence-length', default=None, type=int)
parser.add_argument('--onset-threshold', default=0.5, type=float)
parser.add_argument('--frame-threshold', default=0.5, type=float)
parser.add_argument('--device', default='cuda' if torch.cuda.is_available() else 'cpu')
with torch.no_grad():
transcribe_file(**vars(parser.parse_args()))
| true
| true
|
79082e1284ddd2cfd157e322ff9b49d4d7a692b5
| 2,018
|
py
|
Python
|
app/blog/models.py
|
jayjodev/oncollegehub
|
5633df8beaef232d58025c4407bd9e25bd349e49
|
[
"MIT"
] | 2
|
2018-11-14T17:08:05.000Z
|
2018-11-14T17:08:38.000Z
|
app/blog/models.py
|
jayjodev/oncollegehub
|
5633df8beaef232d58025c4407bd9e25bd349e49
|
[
"MIT"
] | 16
|
2020-01-11T04:09:50.000Z
|
2022-03-12T00:11:19.000Z
|
app/blog/models.py
|
jayjodev/oncollegehub
|
5633df8beaef232d58025c4407bd9e25bd349e49
|
[
"MIT"
] | 2
|
2018-11-14T17:08:07.000Z
|
2018-11-28T21:38:16.000Z
|
from django.db import models
from django.utils import timezone
from django.core.exceptions import ValidationError
# from django.contrib.auth.models import User
from users.models import Student, College
from django.urls import reverse
from django.core import validators
class AbstractPostModel(models.Model):
title = models.CharField(validators=[validators.MinLengthValidator(10)],
null=False, max_length=500)
content = models.TextField(validators=[validators.MinLengthValidator(10)], null=False)
post_date = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(Student, on_delete=models.CASCADE)
rating = models.IntegerField(default=0)
college = models.ForeignKey(College, on_delete=models.CASCADE)
class Meta:
abstract = True
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post-detail', kwargs={'pk': self.pk, 'title': self.title})
class Question(AbstractPostModel):
is_answered = models.BooleanField(default=False)
class Answer(AbstractPostModel):
is_approved = models.BooleanField(default=False)
question = models.ForeignKey(Question, on_delete=models.CASCADE, null=True)
class Voter(models.Model):
Question = models.ForeignKey(Question, on_delete=models.CASCADE)
Answer = models.ForeignKey(Answer, on_delete=models.CASCADE, null=True)
user = models.ForeignKey(Student, on_delete=models.CASCADE)
def __str__(self):
return self.user.username + ' vote on post: ' + self.Question.title
class Comment(AbstractPostModel):
Question = models.ForeignKey(Question, on_delete=models.CASCADE)
author = models.ForeignKey(Student, on_delete=models.CASCADE)
content = models.TextField(null=False)
def __str__(self):
return self.author.username + ' comment on post: ' + self.Question.title
def get_absolute_url(self):
return reverse('post-detail', kwargs={'pk': self.pk, 'title': self.Question.title})
| 35.403509
| 91
| 0.733399
|
from django.db import models
from django.utils import timezone
from django.core.exceptions import ValidationError
from users.models import Student, College
from django.urls import reverse
from django.core import validators
class AbstractPostModel(models.Model):
title = models.CharField(validators=[validators.MinLengthValidator(10)],
null=False, max_length=500)
content = models.TextField(validators=[validators.MinLengthValidator(10)], null=False)
post_date = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(Student, on_delete=models.CASCADE)
rating = models.IntegerField(default=0)
college = models.ForeignKey(College, on_delete=models.CASCADE)
class Meta:
abstract = True
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('post-detail', kwargs={'pk': self.pk, 'title': self.title})
class Question(AbstractPostModel):
is_answered = models.BooleanField(default=False)
class Answer(AbstractPostModel):
is_approved = models.BooleanField(default=False)
question = models.ForeignKey(Question, on_delete=models.CASCADE, null=True)
class Voter(models.Model):
Question = models.ForeignKey(Question, on_delete=models.CASCADE)
Answer = models.ForeignKey(Answer, on_delete=models.CASCADE, null=True)
user = models.ForeignKey(Student, on_delete=models.CASCADE)
def __str__(self):
return self.user.username + ' vote on post: ' + self.Question.title
class Comment(AbstractPostModel):
Question = models.ForeignKey(Question, on_delete=models.CASCADE)
author = models.ForeignKey(Student, on_delete=models.CASCADE)
content = models.TextField(null=False)
def __str__(self):
return self.author.username + ' comment on post: ' + self.Question.title
def get_absolute_url(self):
return reverse('post-detail', kwargs={'pk': self.pk, 'title': self.Question.title})
| true
| true
|
79082ed18ca17e046b01b7f68ba9c15e03e31ff6
| 8,102
|
py
|
Python
|
connectomics/config/config.py
|
divyam-goel/pytorch_connectomics
|
a2c70a7cc60fd84d67be6f225c123ff11daadb83
|
[
"MIT"
] | null | null | null |
connectomics/config/config.py
|
divyam-goel/pytorch_connectomics
|
a2c70a7cc60fd84d67be6f225c123ff11daadb83
|
[
"MIT"
] | null | null | null |
connectomics/config/config.py
|
divyam-goel/pytorch_connectomics
|
a2c70a7cc60fd84d67be6f225c123ff11daadb83
|
[
"MIT"
] | null | null | null |
import os
from yacs.config import CfgNode as CN
# -----------------------------------------------------------------------------
# Config definition
# -----------------------------------------------------------------------------
_C = CN()
# -----------------------------------------------------------------------------
# System
# -----------------------------------------------------------------------------
_C.SYSTEM = CN()
_C.SYSTEM.NUM_GPUS = 4
_C.SYSTEM.NUM_CPUS = 4
# -----------------------------------------------------------------------------
# Model
# -----------------------------------------------------------------------------
_C.MODEL = CN()
# Model architectures defined in the package: unet_super, super, fpn, unet_residual_3d
_C.MODEL.ARCHITECTURE = 'unet_residual_3d'
# Number of filters per unet block
_C.MODEL.FILTERS = [28, 36, 48, 64, 80]
_C.MODEL.TARGET_OPT = ['0']
_C.MODEL.WEIGHT_OPT = [['1']]
# Choose the right loss function for each target:
# 'WeightedMSE', 'WeightedBCE', 'JaccardLoss', 'DiceLoss'
_C.MODEL.LOSS_OPTION = [['WeightedBCE']]
# Weight for each loss function
_C.MODEL.LOSS_WEIGHT = [[1.0]]
# Define the number of input channels. Usually EM images are
# single-channel gray-scale image.
_C.MODEL.IN_PLANES = 1
# Define the number of output channels.
_C.MODEL.OUT_PLANES = 1
# Padding mode, possible options: 'zeros','circular', 'rep'
_C.MODEL.PAD_MODE = 'rep'
# Normalization mode, possible options: 'bn', 'abn', 'in', 'bin'
_C.MODEL.NORM_MODE = 'bn'
# Activation mode, possible options: 'relu', 'elu', 'leaky'
_C.MODEL.ACT_MODE = 'elu'
# If MODEL.EMBEDDING = 1 will do embedding
_C.MODEL.EMBEDDING = 1
# Last decoder head depth
_C.MODEL.HEAD_DEPTH = 1
_C.MODEL.INPUT_SIZE = [8, 256, 256]
_C.MODEL.OUTPUT_SIZE = [8, 256, 256]
_C.MODEL.REGU_OPT = []
_C.MODEL.REGU_WEIGHT = []
# Fine-tune suffix for model saving
_C.MODEL.FINETUNE = ''
# Exact matching: the weights shape in pretrain model and current model are identical
_C.MODEL.EXACT = True
_C.MODEL.SIZE_MATCH = True
_C.MODEL.PRE_MODEL = ''
_C.MODEL.PRE_MODEL_LAYER = ['']
_C.MODEL.PRE_MODEL_ITER = 0
_C.MODEL.PRE_MODEL_LAYER_SELECT = [-1]
# -----------------------------------------------------------------------------
# Dataset
# -----------------------------------------------------------------------------
_C.DATASET = CN()
# Scale ratio of the input data for different resolutions.
# Using a DATA_SCALE of [1., 0.5, 0.5] will downsample the
# original image by two times (e.g., 4nm -> 8nm).
_C.DATASET.DATA_SCALE = [1., 1., 1.]
# Scaling factor for super resolution
_C.DATASET.SCALE_FACTOR = [2, 3, 3]
# Specify the data path in the *.yaml files for different experiments.
_C.DATASET.IMAGE_NAME = ''
_C.DATASET.LABEL_NAME = ''
_C.DATASET.INPUT_PATH = ''
_C.DATASET.OUTPUT_PATH = ''
# Padding size for the input volumes
_C.DATASET.PAD_SIZE = [2, 64, 64]
# Half Patch size for 2D label erosion
_C.DATASET.LABEL_EROSION = 0
# If it's a binary label
_C.DATASET.LABEL_BINARY = False
_C.DATASET.LABEL_MAG = 0
# Data in tile format or not.
_C.DATASET.DO_CHUNK_TITLE = 0
# Chunk parameters for tile format: chunk_num (z,y,x), chunk_stride
_C.DATASET.DATA_CHUNK_NUM = [1, 1, 1]
# Predefined data chunk to iterate through
_C.DATASET.DATA_CHUNK_NUM_IND = []
# Boolean variable, euqal to 'int(args.data_chunk_num[-1:])==1'
_C.DATASET.DATA_CHUNK_STRIDE = True
# Chunk parameters for tile format: chunk_iter_num
_C.DATASET.DATA_CHUNK_ITER = 1000
# Number of voxel to exceed for a valid sample
_C.DATASET.DATA_INVALID_THRES = [0., 0.]
_C.DATASET.PRE_LOAD_DATA = [None,None,None]
# Reject sampling
_C.DATASET.REJECT_SIZE_THRES = 100
_C.DATASET.REJECT_P = 0.95
# -----------------------------------------------------------------------------
# Augmentor
# -----------------------------------------------------------------------------
_C.AUGMENTOR = CN()
_C.AUGMENTOR.ROTATE = True
# Probability of applying the rotation augmentation
_C.AUGMENTOR.ROTATE_P = 0.1
_C.AUGMENTOR.RESCALE = True
# Probability of applying the rescale augmentation
_C.AUGMENTOR.RESCALE_P = 0.5
_C.AUGMENTOR.FLIP = True
# Probability of applying the flip augmentation
_C.AUGMENTOR.FLIP_P = 1.0
# Conducting x-z and y-z flip only when the dataset is isotropic.
_C.AUGMENTOR.FLIP_DO_ZTRANS = 0
_C.AUGMENTOR.ELASTIC = True
# Maximum pixel-moving distance of elastic transformation
_C.AUGMENTOR.ELASTIC_ALPHA = 12.0
# Standard deviation of the Gaussian filter
_C.AUGMENTOR.ELASTIC_SIGMA = 4.0
# Probability of applying the elastic augmentation
_C.AUGMENTOR.ELASTIC_P = 0.75
_C.AUGMENTOR.GRAYSCALE = True
# Probability of applying the grayscale augmentation
_C.AUGMENTOR.GRAYSCALE_P = 0.75
_C.AUGMENTOR.MISSINGPARTS = True
# Probability of applying the missingparts augmentation
_C.AUGMENTOR.MISSINGPARTS_P = 0.9
_C.AUGMENTOR.MISSINGSECTION = True
# Probability of applying the missingsection augmentation
_C.AUGMENTOR.MISSINGSECTION_P = 0.5
_C.AUGMENTOR.MISALIGNMENT = True
# Probability of applying the misalignment augmentation
_C.AUGMENTOR.MISALIGNMENT_P = 1.0
# Maximum pixel displacement in each direction (x and y) (int)
_C.AUGMENTOR.MISALIGNMENT_DISPLACEMENT = 16
# -----------------------------------------------------------------------------
# Solver
# -----------------------------------------------------------------------------
_C.SOLVER = CN()
# Specify the learning rate scheduler.
_C.SOLVER.LR_SCHEDULER_NAME = "MultiStepLR"
_C.SOLVER.ITERATION_STEP = 1
_C.SOLVER.ITERATION_SAVE = 5000
_C.SOLVER.ITERATION_TOTAL = 40000
_C.SOLVER.BASE_LR = 0.001
_C.SOLVER.BIAS_LR_FACTOR = 1.0
_C.SOLVER.WEIGHT_DECAY_BIAS = 0.0
_C.SOLVER.MOMENTUM = 0.9
# The weight decay that's applied to parameters of normalization layers
# (typically the affine transformation)
_C.SOLVER.WEIGHT_DECAY = 0.0001
_C.SOLVER.WEIGHT_DECAY_NORM = 0.0
# The iteration number to decrease learning rate by GAMMA
_C.SOLVER.GAMMA = 0.1
# should be a tuple like (30000,)
_C.SOLVER.STEPS = (30000, 35000)
_C.SOLVER.WARMUP_FACTOR = 1.0 / 1000
_C.SOLVER.WARMUP_ITERS = 1000
_C.SOLVER.WARMUP_METHOD = "linear"
# Save a checkpoint after every this number of iterations
_C.SOLVER.CHECKPOINT_PERIOD = 5000
# Number of samples per batch across all machines.
# If we have 16 GPUs and IMS_PER_BATCH = 32,
# each GPU will see 2 images per batch.
_C.SOLVER.SAMPLES_PER_BATCH = 16
# -----------------------------------------------------------------------------
# Monitor
# -----------------------------------------------------------------------------
_C.MONITOR = CN()
_C.MONITOR.LOG_OPT = [1, 1, 0]
_C.MONITOR.VIS_OPT = [0, 8]
_C.MONITOR.ITERATION_NUM = [10, 50]
# # -----------------------------------------------------------------------------
# # Inference
# # -----------------------------------------------------------------------------
_C.INFERENCE = CN()
_C.INFERENCE.INPUT_SIZE = [8, 256, 256]
_C.INFERENCE.OUTPUT_SIZE = [8, 256, 256]
_C.INFERENCE.IMAGE_NAME = ''
_C.INFERENCE.OUTPUT_PATH = ''
_C.INFERENCE.OUTPUT_NAME = 'result.h5'
_C.INFERENCE.PAD_SIZE = [8, 64, 64]
_C.INFERENCE.STRIDE = [1, 192, 192]
_C.INFERENCE.AUG_MODE = 'mean'
_C.INFERENCE.AUG_NUM = 4
_C.INFERENCE.DO_EVAL = True
_C.INFERENCE.DO_3D = True
# If not None then select channel of output
_C.INFERENCE.MODEL_OUTPUT_ID = [None]
# Number of test workers
_C.INFERENCE.TEST_NUM = 1
# Test worker id
_C.INFERENCE.TEST_ID = 0
# Batchsize for inference
_C.INFERENCE.SAMPLES_PER_BATCH = 32
def get_cfg_defaults():
"""Get a yacs CfgNode object with default values for my_project."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
return _C.clone()
def save_all_cfg(cfg, output_dir):
"""Save configs in the output directory."""
# Save config.yaml in the experiment directory after combine all
# non-default configurations from yaml file and command line.
path = os.path.join(output_dir, "config.yaml")
with open(path, "w") as f:
f.write(cfg.dump())
print("Full config saved to {}".format(path))
| 25.639241
| 86
| 0.619477
|
import os
from yacs.config import CfgNode as CN
_C = CN()
_C.SYSTEM = CN()
_C.SYSTEM.NUM_GPUS = 4
_C.SYSTEM.NUM_CPUS = 4
_C.MODEL = CN()
_C.MODEL.ARCHITECTURE = 'unet_residual_3d'
_C.MODEL.FILTERS = [28, 36, 48, 64, 80]
_C.MODEL.TARGET_OPT = ['0']
_C.MODEL.WEIGHT_OPT = [['1']]
_C.MODEL.LOSS_OPTION = [['WeightedBCE']]
_C.MODEL.LOSS_WEIGHT = [[1.0]]
_C.MODEL.IN_PLANES = 1
_C.MODEL.OUT_PLANES = 1
_C.MODEL.PAD_MODE = 'rep'
_C.MODEL.NORM_MODE = 'bn'
_C.MODEL.ACT_MODE = 'elu'
_C.MODEL.EMBEDDING = 1
_C.MODEL.HEAD_DEPTH = 1
_C.MODEL.INPUT_SIZE = [8, 256, 256]
_C.MODEL.OUTPUT_SIZE = [8, 256, 256]
_C.MODEL.REGU_OPT = []
_C.MODEL.REGU_WEIGHT = []
_C.MODEL.FINETUNE = ''
_C.MODEL.EXACT = True
_C.MODEL.SIZE_MATCH = True
_C.MODEL.PRE_MODEL = ''
_C.MODEL.PRE_MODEL_LAYER = ['']
_C.MODEL.PRE_MODEL_ITER = 0
_C.MODEL.PRE_MODEL_LAYER_SELECT = [-1]
_C.DATASET = CN()
_C.DATASET.DATA_SCALE = [1., 1., 1.]
_C.DATASET.SCALE_FACTOR = [2, 3, 3]
_C.DATASET.IMAGE_NAME = ''
_C.DATASET.LABEL_NAME = ''
_C.DATASET.INPUT_PATH = ''
_C.DATASET.OUTPUT_PATH = ''
_C.DATASET.PAD_SIZE = [2, 64, 64]
_C.DATASET.LABEL_EROSION = 0
_C.DATASET.LABEL_BINARY = False
_C.DATASET.LABEL_MAG = 0
# Data in tile format or not.
_C.DATASET.DO_CHUNK_TITLE = 0
# Chunk parameters for tile format: chunk_num (z,y,x), chunk_stride
_C.DATASET.DATA_CHUNK_NUM = [1, 1, 1]
# Predefined data chunk to iterate through
_C.DATASET.DATA_CHUNK_NUM_IND = []
# Boolean variable, euqal to 'int(args.data_chunk_num[-1:])==1'
_C.DATASET.DATA_CHUNK_STRIDE = True
# Chunk parameters for tile format: chunk_iter_num
_C.DATASET.DATA_CHUNK_ITER = 1000
# Number of voxel to exceed for a valid sample
_C.DATASET.DATA_INVALID_THRES = [0., 0.]
_C.DATASET.PRE_LOAD_DATA = [None,None,None]
# Reject sampling
_C.DATASET.REJECT_SIZE_THRES = 100
_C.DATASET.REJECT_P = 0.95
# -----------------------------------------------------------------------------
# Augmentor
# -----------------------------------------------------------------------------
_C.AUGMENTOR = CN()
_C.AUGMENTOR.ROTATE = True
# Probability of applying the rotation augmentation
_C.AUGMENTOR.ROTATE_P = 0.1
_C.AUGMENTOR.RESCALE = True
# Probability of applying the rescale augmentation
_C.AUGMENTOR.RESCALE_P = 0.5
_C.AUGMENTOR.FLIP = True
# Probability of applying the flip augmentation
_C.AUGMENTOR.FLIP_P = 1.0
# Conducting x-z and y-z flip only when the dataset is isotropic.
_C.AUGMENTOR.FLIP_DO_ZTRANS = 0
_C.AUGMENTOR.ELASTIC = True
# Maximum pixel-moving distance of elastic transformation
_C.AUGMENTOR.ELASTIC_ALPHA = 12.0
# Standard deviation of the Gaussian filter
_C.AUGMENTOR.ELASTIC_SIGMA = 4.0
# Probability of applying the elastic augmentation
_C.AUGMENTOR.ELASTIC_P = 0.75
_C.AUGMENTOR.GRAYSCALE = True
# Probability of applying the grayscale augmentation
_C.AUGMENTOR.GRAYSCALE_P = 0.75
_C.AUGMENTOR.MISSINGPARTS = True
# Probability of applying the missingparts augmentation
_C.AUGMENTOR.MISSINGPARTS_P = 0.9
_C.AUGMENTOR.MISSINGSECTION = True
# Probability of applying the missingsection augmentation
_C.AUGMENTOR.MISSINGSECTION_P = 0.5
_C.AUGMENTOR.MISALIGNMENT = True
# Probability of applying the misalignment augmentation
_C.AUGMENTOR.MISALIGNMENT_P = 1.0
# Maximum pixel displacement in each direction (x and y) (int)
_C.AUGMENTOR.MISALIGNMENT_DISPLACEMENT = 16
# -----------------------------------------------------------------------------
# Solver
# -----------------------------------------------------------------------------
_C.SOLVER = CN()
# Specify the learning rate scheduler.
_C.SOLVER.LR_SCHEDULER_NAME = "MultiStepLR"
_C.SOLVER.ITERATION_STEP = 1
_C.SOLVER.ITERATION_SAVE = 5000
_C.SOLVER.ITERATION_TOTAL = 40000
_C.SOLVER.BASE_LR = 0.001
_C.SOLVER.BIAS_LR_FACTOR = 1.0
_C.SOLVER.WEIGHT_DECAY_BIAS = 0.0
_C.SOLVER.MOMENTUM = 0.9
# The weight decay that's applied to parameters of normalization layers
_C.SOLVER.WEIGHT_DECAY = 0.0001
_C.SOLVER.WEIGHT_DECAY_NORM = 0.0
_C.SOLVER.GAMMA = 0.1
_C.SOLVER.STEPS = (30000, 35000)
_C.SOLVER.WARMUP_FACTOR = 1.0 / 1000
_C.SOLVER.WARMUP_ITERS = 1000
_C.SOLVER.WARMUP_METHOD = "linear"
_C.SOLVER.CHECKPOINT_PERIOD = 5000
_C.SOLVER.SAMPLES_PER_BATCH = 16
_C.MONITOR = CN()
_C.MONITOR.LOG_OPT = [1, 1, 0]
_C.MONITOR.VIS_OPT = [0, 8]
_C.MONITOR.ITERATION_NUM = [10, 50]
C.INFERENCE.OUTPUT_NAME = 'result.h5'
_C.INFERENCE.PAD_SIZE = [8, 64, 64]
_C.INFERENCE.STRIDE = [1, 192, 192]
_C.INFERENCE.AUG_MODE = 'mean'
_C.INFERENCE.AUG_NUM = 4
_C.INFERENCE.DO_EVAL = True
_C.INFERENCE.DO_3D = True
_C.INFERENCE.MODEL_OUTPUT_ID = [None]
_C.INFERENCE.TEST_NUM = 1
_C.INFERENCE.TEST_ID = 0
_C.INFERENCE.SAMPLES_PER_BATCH = 32
def get_cfg_defaults():
return _C.clone()
def save_all_cfg(cfg, output_dir):
path = os.path.join(output_dir, "config.yaml")
with open(path, "w") as f:
f.write(cfg.dump())
print("Full config saved to {}".format(path))
| true
| true
|
79083036c4c19017232d49b3487ba0475de179c0
| 625
|
py
|
Python
|
indra/tests/test_tas.py
|
djinnome/indra
|
382b7f236e0b1422c96a268ef873530b5e92d48f
|
[
"BSD-2-Clause"
] | null | null | null |
indra/tests/test_tas.py
|
djinnome/indra
|
382b7f236e0b1422c96a268ef873530b5e92d48f
|
[
"BSD-2-Clause"
] | null | null | null |
indra/tests/test_tas.py
|
djinnome/indra
|
382b7f236e0b1422c96a268ef873530b5e92d48f
|
[
"BSD-2-Clause"
] | null | null | null |
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.sources.tas.api import _load_data, process_csv
def test_load_data():
data = _load_data()
assert len(data) > 100, len(data)
def test_processor():
tp = process_csv(affinity_class_limit=10)
assert tp
assert tp.statements
num_stmts = len(tp.statements)
# This is the total number of statements about human genes
assert num_stmts == 51722, num_stmts
assert all(len(s.evidence) == 1 for s in tp.statements), \
"Some statements lack evidence, or have extra evidence."
| 29.761905
| 72
| 0.7296
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
from indra.sources.tas.api import _load_data, process_csv
def test_load_data():
data = _load_data()
assert len(data) > 100, len(data)
def test_processor():
tp = process_csv(affinity_class_limit=10)
assert tp
assert tp.statements
num_stmts = len(tp.statements)
assert num_stmts == 51722, num_stmts
assert all(len(s.evidence) == 1 for s in tp.statements), \
"Some statements lack evidence, or have extra evidence."
| true
| true
|
790830a9b7852a95bdc8b5052dbca110443dd94f
| 808
|
py
|
Python
|
simulator/Planners/Planner.py
|
ciarakamahele/sasy
|
fd0d50785561f188c5e9b6fa5e928673457be772
|
[
"Apache-2.0"
] | null | null | null |
simulator/Planners/Planner.py
|
ciarakamahele/sasy
|
fd0d50785561f188c5e9b6fa5e928673457be772
|
[
"Apache-2.0"
] | null | null | null |
simulator/Planners/Planner.py
|
ciarakamahele/sasy
|
fd0d50785561f188c5e9b6fa5e928673457be772
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Ciara Kamahele-Sanfratello
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Planner is a generic interface used by Simulators to choose the next action to take
class Planner:
def __init__(self):
pass
def next_action(self, initial_state, goal_state, prev_obs):
pass
| 36.727273
| 85
| 0.75
|
class Planner:
def __init__(self):
pass
def next_action(self, initial_state, goal_state, prev_obs):
pass
| true
| true
|
790830d45db35356a0999260c2f11654a788d8bd
| 5,102
|
py
|
Python
|
scripts/cartesian_experiments.py
|
mikelibg/yap
|
eb46baf91f0e52918e77f1693a280e0796cdfb8e
|
[
"Apache-2.0"
] | 68
|
2019-02-27T18:03:57.000Z
|
2022-03-23T14:42:47.000Z
|
scripts/cartesian_experiments.py
|
mikelibg/yap
|
eb46baf91f0e52918e77f1693a280e0796cdfb8e
|
[
"Apache-2.0"
] | 9
|
2017-05-30T11:41:53.000Z
|
2021-10-13T11:45:43.000Z
|
scripts/cartesian_experiments.py
|
mikelibg/yap
|
eb46baf91f0e52918e77f1693a280e0796cdfb8e
|
[
"Apache-2.0"
] | 20
|
2017-06-10T09:23:58.000Z
|
2021-09-06T23:06:38.000Z
|
#!/usr/bin/python
"""Cartesian execution of options for experiments"""
import itertools
from pprint import pprint
import os
# GROUPS = [
# ('train', {'type': 'option',
# 'order': 0,
# 'values': ['train5k']}),
# ('lang', {'type': 'option',
# 'order': 1,
# 'values': 'hungarian,basque,french,korean,polish,swedish'.split(',')}),
# ('infuse', {'type': 'option',
# 'order': 2,
# 'values': ['true', 'false']}),
# ('maxmsr', {'type': 'option',
# 'order': 3,
# 'values': '1'.split(',')})
# ]
#
GROUPS = [
('train', {'type': 'option',
'order': 0,
'values': ['train', 'train5k']}),
('lang', {'type': 'option',
'order': 1,
'values': 'hungarian,basque,french,korean,polish,swedish'.split(',')}),
('infuse', {'type': 'option',
'order': 2,
'values': ['true', 'false']}),
('maxmsr', {'type': 'option',
'order': 3,
'values': '1,2,4,8'.split(',')})
]
# GROUPS = [
# ('gram', {'type': 'file',
# 'use': 'agg',
# 'order': 0,
# 'values': ['unigram', 'bigram', 'trigram', 'nextunigram', 'nextbigram', 'nexttrigram']}),
# # ('prev', {'type': 'file',
# # 'use': 'optional',
# # 'value': 'prev'}),
# ('pop', {'type': 'option',
# 'use': 'optional',
# 'value': '-pop'})
# ]
# BASE = """nohup ./chukuparser md -f $conf -td corpus/train4k.hebtb.gold.lattices -tl corpus/train4k.hebtb.pred.lattices -in corpus/dev.hebtb.gold.conll.pred.lattices -ing corpus/dev.hebtb.gold.conll.gold.lattices -om devo.$exp.b32.hebtb.mapping -it 1 -b 32 -p Funcs_Main_POS_Both_Prop -wb -bconc $flags > runstatus.$exp.b32"""
MALEARN = """nohup ./yap malearn -lattice spmrl/train.$lang.gold.conll.tobeparsed.tagged.lattices -raw spmrl/train.$lang.gold.conll.tobeparsed.raw -out $lang.json > malearn.$exp.out"""
MATRAIN = """nohup ./yap ma -dict $lang.json -raw spmrl/$train.$lang.gold.conll.tobeparsed.raw -out $train.$lang.$maxmsr.analyzed.lattices -maxmsrperpos $maxmsr > matrain.$exp.out"""
MADEV = """nohup ./yap ma -dict $lang.json -raw spmrl/dev.$lang.gold.conll.tobeparsed.raw -out dev.$lang.$maxmsr.analyzed.lattices -maxmsrperpos $maxmsr > madev.$exp.out"""
MD = """nohup ./yap md -f conf/standalone.md.yaml -td spmrl/$train.$lang.gold.conll.tobeparsed.tagged.lattices -tl $train.$lang.$maxmsr.analyzed.lattices -in dev.$lang.$maxmsr.analyzed.lattices -ing spmrl/dev.$lang.gold.conll.tobeparsed.tagged.lattices -om devo.$train_$lang_$maxmsr_$infuse.mapping -infusedev=$infuse -it 1 -b 32 -p Funcs_Main_POS_Both_Prop -bconc -pop > runstatus.$exp.out"""
cmds = [MALEARN, MATRAIN, MADEV, MD]
REPLACE_STR = '$exp'
CONF_FILE = 'standalone.md.%s.yaml'
BASE_FILE = 'standalone.base.md.yaml'
# first transform optional to empty, existing
for (name, conf) in GROUPS:
if conf.get('use', None) == 'optional':
conf['values'] = [None, conf['value']]
conf_values = map(lambda (name, conf): conf['values'], GROUPS)
executions = list(itertools.product(*conf_values))
def gen_agg_file(values, out_name):
with open(out_name, 'w') as outf:
for value in values:
with open(value) as inf:
outf.write(inf.read())
for execution in executions:
print 'At execution %s' % str(execution)
files = [BASE_FILE]
exp_strings = []
command_line_options = []
options = {}
# for i, param in enumerate(execution):
# conf_name, conf = GROUPS[i]
# # print "\tAt conf %s" % conf_name
# # pprint(conf)
# # print "\tparam is %s" % str(param)
# if conf['type'] == 'option' and param:
# print "\t\tadd %s=%s to command line" % (conf_name, str(param))
# options[conf_name] = param
# # print "\t\tadd %s to command line" % str(conf['value'])
# # command_line_options.append(conf['value'])
# if conf.get('use', None) == 'optional':
# exp_strings.append(conf_name if param else 'no%s' % conf_name)
# else:
# exp_strings.append(param)
# if conf['type'] == 'file':
# if conf['use'] == 'agg':
# files += conf['values'][:conf['values'].index(param)+1]
# if conf['use'] == 'optional' and param:
# files.append(param)
for cmd in cmds:
execcmd = cmd[:]
for name, value in zip(map(lambda (k,v):k, GROUPS), execution):
execcmd = execcmd.replace('$'+name, value)
execcmd = execcmd.replace('$exp', '_'.join(execution))
print execcmd
os.system(execcmd)
# exp_string = '_'.join(exp_strings)
# outname = CONF_FILE % exp_string
# print command_line_options
# gen_agg_file(files, outname)
# new_command = BASE.replace('$conf', outname).replace('$exp', exp_string, 2).replace('$flags', ' '.join(command_line_options))
# print 'Executing %s' % new_command
# os.system(new_command)
| 43.237288
| 393
| 0.571541
|
"""Cartesian execution of options for experiments"""
import itertools
from pprint import pprint
import os
GROUPS = [
('train', {'type': 'option',
'order': 0,
'values': ['train', 'train5k']}),
('lang', {'type': 'option',
'order': 1,
'values': 'hungarian,basque,french,korean,polish,swedish'.split(',')}),
('infuse', {'type': 'option',
'order': 2,
'values': ['true', 'false']}),
('maxmsr', {'type': 'option',
'order': 3,
'values': '1,2,4,8'.split(',')})
]
sed.tagged.lattices -raw spmrl/train.$lang.gold.conll.tobeparsed.raw -out $lang.json > malearn.$exp.out"""
MATRAIN = """nohup ./yap ma -dict $lang.json -raw spmrl/$train.$lang.gold.conll.tobeparsed.raw -out $train.$lang.$maxmsr.analyzed.lattices -maxmsrperpos $maxmsr > matrain.$exp.out"""
MADEV = """nohup ./yap ma -dict $lang.json -raw spmrl/dev.$lang.gold.conll.tobeparsed.raw -out dev.$lang.$maxmsr.analyzed.lattices -maxmsrperpos $maxmsr > madev.$exp.out"""
MD = """nohup ./yap md -f conf/standalone.md.yaml -td spmrl/$train.$lang.gold.conll.tobeparsed.tagged.lattices -tl $train.$lang.$maxmsr.analyzed.lattices -in dev.$lang.$maxmsr.analyzed.lattices -ing spmrl/dev.$lang.gold.conll.tobeparsed.tagged.lattices -om devo.$train_$lang_$maxmsr_$infuse.mapping -infusedev=$infuse -it 1 -b 32 -p Funcs_Main_POS_Both_Prop -bconc -pop > runstatus.$exp.out"""
cmds = [MALEARN, MATRAIN, MADEV, MD]
REPLACE_STR = '$exp'
CONF_FILE = 'standalone.md.%s.yaml'
BASE_FILE = 'standalone.base.md.yaml'
for (name, conf) in GROUPS:
if conf.get('use', None) == 'optional':
conf['values'] = [None, conf['value']]
conf_values = map(lambda (name, conf): conf['values'], GROUPS)
executions = list(itertools.product(*conf_values))
def gen_agg_file(values, out_name):
with open(out_name, 'w') as outf:
for value in values:
with open(value) as inf:
outf.write(inf.read())
for execution in executions:
print 'At execution %s' % str(execution)
files = [BASE_FILE]
exp_strings = []
command_line_options = []
options = {}
, execution):
execcmd = execcmd.replace('$'+name, value)
execcmd = execcmd.replace('$exp', '_'.join(execution))
print execcmd
os.system(execcmd)
| false
| true
|
790832d52d892c0e0e2e96d4e52396dcf213b110
| 21,384
|
py
|
Python
|
ppoPolicyTraining.py
|
britig/S2RL-Policies
|
b9c74b7f5efec225920c09f7e8e82d8555d61bd9
|
[
"MIT"
] | 1
|
2022-03-24T07:26:37.000Z
|
2022-03-24T07:26:37.000Z
|
ppoPolicyTraining.py
|
britig/S2RL-Policies
|
b9c74b7f5efec225920c09f7e8e82d8555d61bd9
|
[
"MIT"
] | null | null | null |
ppoPolicyTraining.py
|
britig/S2RL-Policies
|
b9c74b7f5efec225920c09f7e8e82d8555d61bd9
|
[
"MIT"
] | null | null | null |
"""
The file contains the PPO class to train with.
NOTE: All "ALG STEP"s are following the numbers from the original PPO pseudocode.
It can be found here: https://spinningup.openai.com/en/latest/_images/math/e62a8971472597f4b014c2da064f636ffe365ba3.svg
"""
import gym
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adam
#For continuous actions
from torch.distributions import MultivariateNormal
#For discrete action_space
from torch.distributions import Categorical
from network import FeedForwardActorNN, FeedForwardCriticNN
import sys
from cbf_clf_helper import clf_control, cbf_control
#Integrating tensorboard
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
class PPO:
"""
This is the PPO class we will use as our model in main.py
"""
def __init__(self, env, **hyperparameters):
"""
Initializes the PPO model, including hyperparameters.
Parameters:
policy_class - the policy class to use for our actor/critic networks.
env - the environment to train on.
hyperparameters - all extra arguments passed into PPO that should be hyperparameters.
Returns:
None
"""
# Make sure the environment is compatible with our code
assert(type(env.observation_space) == gym.spaces.Box)
# Makeassert(type(env.action_space) == gym.spaces.Box)
# Initialize hyperparameters for training with PPO
self._init_hyperparameters(hyperparameters)
# Extract environment information
self.env = env
self.obs_dim = env.observation_space.shape[0]
if self.discrete:
self.act_dim = env.action_space.n
else:
self.act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Initialize actor and critic networks
self.actor = FeedForwardActorNN(self.obs_dim, self.act_dim,self.discrete)
actor_model = 'ppo_actorKinematicBicycleGymLane.pth'
policy = FeedForwardActorNN(5, 2,False)
policy.load_state_dict(torch.load(actor_model))
actor_model = policy
#print(f'model =========== {self.actor}') # ALG STEP 1
self.critic = FeedForwardCriticNN(self.obs_dim, 1)
#print(f'critic =========== {self.critic}')
# Initialize optimizers for actor and critic
self.actor_optim = Adam(self.actor.parameters(), lr=self.lr)
self.critic_optim = Adam(self.critic.parameters(), lr=self.lr)
# Initialize the covariance matrix used to query the actor for actions
self.cov_var = torch.full(size=(self.act_dim,), fill_value=0.05)
self.cov_mat = torch.diag(self.cov_var)
self.obs_count = 0
self.index_count = 0
# This logger will help us with printing out summaries of each iteration
self.logger = {
't_so_far': 0, # timesteps so far
'i_so_far': 0, # iterations so far
'batch_lens': [], # episodic lengths in batch
'batch_rews': [], # episodic returns in batch
'batch_infractions': [], # Episodic returns in a neural network
'actor_losses': [], # losses of actor network in current iteration
'actor_network' : 0, # Actor network
}
def learn(self, env_name,failure_observations,subpolicy):
"""
Train the actor and critic networks. Here is where the main PPO algorithm resides.
Parameters:
total_timesteps - the total number of timesteps to train for
Return:
None
"""
print(f"Learning... Running {self.max_timesteps_per_episode} timesteps per episode, ", end='')
print(f"{self.timesteps_per_batch} timesteps per batch for a total of {self.training_step} iterations")
t_so_far = 0 # Timesteps simulated so far
i_so_far = 0 # Iterations ran so far
while i_so_far < self.training_step: # ALG STEP 2
# Autobots, roll out (just kidding, we're collecting our batch simulations here)
batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens = self.rollout(subpolicy,failure_observations) # ALG STEP 3
# Calculate how many timesteps we collected this batch
t_so_far += np.sum(batch_lens)
# Increment the number of iterations
i_so_far += 1
# Logging timesteps so far and iterations so far
self.logger['t_so_far'] = t_so_far
self.logger['i_so_far'] = i_so_far
# Calculate advantage at k-th iteration
V, _ = self.evaluate(batch_obs, batch_acts)
A_k = batch_rtgs - V.detach() # ALG STEP 5
# One of the only tricks I use that isn't in the pseudocode. Normalizing advantages
# isn't theoretically necessary, but in practice it decreases the variance of
# our advantages and makes convergence much more stable and faster. I added this because
# solving some environments was too unstable without it.
A_k = (A_k - A_k.mean()) / (A_k.std() + 1e-10)
# This is the loop where we update our network for some n epochs
for _ in range(self.n_updates_per_iteration): # ALG STEP 6 & 7
# Calculate V_phi and pi_theta(a_t | s_t)
V, curr_log_probs = self.evaluate(batch_obs, batch_acts)
# Calculate the ratio pi_theta(a_t | s_t) / pi_theta_k(a_t | s_t)
# NOTE: we just subtract the logs, which is the same as
# dividing the values and then canceling the log with e^log.
# For why we use log probabilities instead of actual probabilities,
# here's a great explanation:
# https://cs.stackexchange.com/questions/70518/why-do-we-use-the-log-in-gradient-based-reinforcement-algorithms
# TL;DR makes gradient ascent easier behind the scenes.
ratios = torch.exp(curr_log_probs - batch_log_probs)
# Calculate surrogate losses.
#print(f'A_k======================={A_k}')
surr1 = ratios * A_k
#print(f'surr1======================={surr1}')
surr2 = torch.clamp(ratios, 1 - self.clip, 1 + self.clip) * A_k
#print(f'surr2======================={surr2}')
# Calculate actor and critic losses.
# NOTE: we take the negative min of the surrogate losses because we're trying to maximize
# the performance function, but Adam minimizes the loss. So minimizing the negative
# performance function maximizes it.
actor_loss = (-torch.min(surr1, surr2)).mean()
#print(f'actor_loss======================={actor_loss}')
critic_loss = nn.MSELoss()(V, batch_rtgs)
# Calculate gradients and perform backward propagation for actor network
self.actor_optim.zero_grad()
actor_loss.backward(retain_graph=True)
self.actor_optim.step()
# Calculate gradients and perform backward propagation for critic network
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
# Log actor loss
self.logger['actor_losses'].append(actor_loss.detach())
self.logger['actor_network'] = self.actor
# Print a summary of our training so far
self._log_summary()
# Save our model if it's time
if i_so_far % self.save_freq == 0:
if subpolicy:
torch.save(self.actor.state_dict(), './ppo_actor_subpolicy'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic_subpolicy'+env_name+'.pth')
else:
torch.save(self.actor.state_dict(), './ppo_actor'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic'+env_name+'.pth')
def rollout(self,subpolicy,failure_observations):
"""
This is where we collect the batch of data
from simulation. Since this is an on-policy algorithm, we'll need to collect a fresh batch
of data each time we iterate the actor/critic networks.
Parameters:
None
Return:
batch_obs - the observations collected this batch. Shape: (number of timesteps, dimension of observation)
batch_acts - the actions collected this batch. Shape: (number of timesteps, dimension of action)
batch_log_probs - the log probabilities of each action taken this batch. Shape: (number of timesteps)
batch_rtgs - the Rewards-To-Go of each timestep in this batch. Shape: (number of timesteps)
batch_lens - the lengths of each episode this batch. Shape: (number of episodes)
"""
# Batch data. For more details, check function header.
batch_obs = []
batch_acts = []
batch_log_probs = []
batch_rews = []
batch_rtgs = []
batch_lens = []
batch_infractions = []
# Episodic data. Keeps track of rewards per episode, will get cleared
# upon each new episode
ep_rews = []
t = 0 # Keeps track of how many timesteps we've run so far this batch
# Keep simulating until we've run more than or equal to specified timesteps per batch
while t < self.timesteps_per_batch:
act_list = []
ep_rews = [] # rewards collected per episode
# Reset the environment. sNote that obs is short for observation.
obs = self.env.reset()
#print(f'obs reset ============= {obs}')
done = False
count_infractions = 0
count_infractions_acc = 0
count_infractions_steer = 0
# Run an episode for a maximum of max_timesteps_per_episode timesteps
for ep_t in range(self.max_timesteps_per_episode):
a_predicted_clf = clf_control(self.env.v_ego)
delta, target_id, crosstrack_error = self.env.car.tracker.stanley_control(self.env.x_ego, self.env.y_ego, self.env.yaw_ego, self.env.v_ego, self.env.delta_ego)
# If render is specified, render the environment
if self.render:
self.env.render()
t += 1 # Increment timesteps ran this batch so far
# Track observations in this batch
batch_obs.append(obs)
# Calculate action and make a step in the env.
# Note that rew is short for reward.
if self.discrete:
action, log_prob = self.get_action_discrete(obs)
else:
action, log_prob = self.get_action(obs) #self.get_action_discrete(obs)
#print(f'action chosen =============== {action}')
if(abs(round(float(action[0]),1))<abs(round(float(a_predicted_clf),1))):
count_infractions_acc = count_infractions_acc+1
if(abs(round(float(action[1]),1)) < abs(round(float(delta),1))-0.2):
#print(f'After rounding =============== {round(float(action_net[1]),1)} ====== {round(float(action[1]),1)}')
count_infractions_steer = count_infractions_steer+1
obs, rew, done, info = self.env.step(action)
count_infractions = count_infractions_acc+count_infractions_steer
# Track recent reward, action, and action log probability
ep_rews.append(rew)
batch_acts.append(action)
batch_log_probs.append(log_prob)
act_list.append(info)
# If the environment tells us the episode is terminated, break
if done:
break
# Track episodic lengths and rewards
#self.env.render(act_list)
batch_lens.append(ep_t + 1)
batch_rews.append(ep_rews)
batch_infractions.append(count_infractions)
# Reshape data as tensors in the shape specified in function description, before returning
batch_obs = torch.tensor(batch_obs, dtype=torch.float)
#print(f'batch_acts =============== {batch_acts}')
#For discrete state space
if self.discrete:
batch_acts = torch.tensor(batch_acts, dtype=torch.long).view(-1,)
else:
batch_acts = torch.tensor(batch_acts, dtype=torch.float) #torch.tensor(batch_acts, dtype=torch.long).view(-1,)
#print(f'batch_acts =============== {batch_acts}')
batch_log_probs = torch.tensor(batch_log_probs, dtype=torch.float)
batch_rtgs = self.compute_rtgs(batch_rews) # ALG STEP 4
# Log the episodic returns and episodic lengths in this batch.
self.logger['batch_rews'] = batch_rews
self.logger['batch_lens'] = batch_lens
self.logger['batch_infractions'] = batch_infractions
return batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens
def compute_rtgs(self, batch_rews):
"""
Compute the Reward-To-Go of each timestep in a batch given the rewards.
Parameters:
batch_rews - the rewards in a batch, Shape: (number of episodes, number of timesteps per episode)
Return:
batch_rtgs - the rewards to go, Shape: (number of timesteps in batch)
"""
# The rewards-to-go (rtg) per episode per batch to return.
# The shape will be (num timesteps per episode)
batch_rtgs = []
# Iterate through each episode
for ep_rews in reversed(batch_rews):
discounted_reward = 0 # The discounted reward so far
# Iterate through all rewards in the episode. We go backwards for smoother calculation of each
# discounted return (think about why it would be harder starting from the beginning)
for rew in reversed(ep_rews):
discounted_reward = rew + discounted_reward * self.gamma
batch_rtgs.insert(0, discounted_reward)
# Convert the rewards-to-go into a tensor
batch_rtgs = torch.tensor(batch_rtgs, dtype=torch.float)
return batch_rtgs
# Probability sampling for discrete actions
def get_action_discrete(self, obs):
#print(f'obs ================== {obs}')
mean = self.actor(obs)
#print(f'mean ================== {mean}')
dist = Categorical(mean)
#print(f'dist ================== {dist}')
action = dist.sample()
log_prob = dist.log_prob(action)
#print(f'action ====== {action} ========= {log_prob}')
return action.detach().numpy().item(), log_prob.detach().item()
def get_action(self, obs):
"""
Queries an action from the actor network, should be called from rollout.
Parameters:
obs - the observation at the current timestep
Return:
action - the action to take, as a numpy array
log_prob - the log probability of the selected action in the distribution
"""
# Query the actor network for a mean action
mean = self.actor(obs)
# Create a distribution with the mean action and std from the covariance matrix above.
# For more information on how this distribution works, check out Andrew Ng's lecture on it:
# https://www.youtube.com/watch?v=JjB58InuTqM
dist = MultivariateNormal(mean, self.cov_mat)
# Sample an action from the distribution
action = dist.sample()
# Calculate the log probability for that action
log_prob = dist.log_prob(action)
# Return the sampled action and the log probability of that action in our distribution
return action.detach().numpy(), log_prob.detach()
def evaluate(self, batch_obs, batch_acts):
"""
Estimate the values of each observation, and the log probs of
each action in the most recent batch with the most recent
iteration of the actor network. Should be called from learn.
Parameters:
batch_obs - the observations from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of observation)
batch_acts - the actions from the most recently collected batch as a tensor.
Shape: (number of timesteps in batch, dimension of action)
Return:
V - the predicted values of batch_obs
log_probs - the log probabilities of the actions taken in batch_acts given batch_obs
"""
# Query critic network for a value V for each batch_obs. Shape of V should be same as batch_rtgs
V = self.critic(batch_obs).squeeze()
# Calculate the log probabilities of batch actions using most recent actor network.
# This segment of code is similar to that in get_action()
mean = self.actor(batch_obs)
if self.discrete:
dist = Categorical(mean)
else:
dist = MultivariateNormal(mean, self.cov_mat)
#For discrete actions
#dist = Categorical(mean)
log_probs = dist.log_prob(batch_acts)
# Return the value vector V of each observation in the batch
# and log probabilities log_probs of each action in the batch
return V, log_probs
def _init_hyperparameters(self, hyperparameters):
"""
Initialize default and custom values for hyperparameters
Parameters:
hyperparameters - the extra arguments included when creating the PPO model, should only include
hyperparameters defined below with custom values.
Return:
None
"""
# Initialize default values for hyperparameters
# Algorithm hyperparameters
self.timesteps_per_batch = 4800 # Number of timesteps to run per batch
self.max_timesteps_per_episode = 1600 # Max number of timesteps per episode
self.n_updates_per_iteration = 5 # Number of times to update actor/critic per iteration
self.lr = 0.005 # Learning rate of actor optimizer
self.gamma = 0.95 # Discount factor to be applied when calculating Rewards-To-Go
self.clip = 0.2 # Recommended 0.2, helps define the threshold to clip the ratio during SGA
# Miscellaneous parameters
self.render = False # If we should render during rollout
self.save_freq = 10 # How often we save in number of iterations
self.seed = None # Sets the seed of our program, used for reproducibility of results
self.discrete = False # Sets the type of environment to discrete or continuous
self.training_step = 200 # Sets the number of trainig step
# Change any default values to custom values for specified hyperparameters
for param, val in hyperparameters.items():
exec('self.' + param + ' = ' + str(val))
# Sets the seed if specified
if self.seed != None:
# Check if our seed is valid first
assert(type(self.seed) == int)
# Set the seed
torch.manual_seed(self.seed)
print(f"Successfully set seed to {self.seed}")
def _log_summary(self):
"""
Print to stdout what we've logged so far in the most recent batch.
Parameters:
None
Return:
None
"""
# Calculate logging values. I use a few python shortcuts to calculate each value
# without explaining since it's not too important to PPO; feel free to look it over,
# and if you have any questions you can email me (look at bottom of README)
t_so_far = self.logger['t_so_far']
i_so_far = self.logger['i_so_far']
avg_ep_lens = np.mean(self.logger['batch_lens'])
avg_ep_rews = np.mean([np.sum(ep_rews) for ep_rews in self.logger['batch_rews']])
avg_actor_loss = np.mean([losses.float().mean() for losses in self.logger['actor_losses']])
avg_ep_infractions = np.mean([np.sum(ep_inf) for ep_inf in self.logger['batch_infractions']])
actor_model = self.logger['actor_network']
# Round decimal places for more aesthetic logging messages
avg_ep_lens = str(round(avg_ep_lens, 2))
avg_ep_rews = str(round(avg_ep_rews, 2))
avg_ep_infractions = str(round(avg_ep_infractions, 2))
avg_actor_loss = str(round(avg_actor_loss, 5))
writer.add_scalar("Average Episodic Return", int(float(avg_ep_rews)), t_so_far)
writer.add_scalar("Average actor Loss", int(float(avg_actor_loss)), t_so_far)
writer.add_scalar("Average Infractions", int(float(avg_ep_infractions)), t_so_far)
# Tracking the weight of the network
for name, param in actor_model.named_parameters():
if 'weight' in name:
writer.add_histogram(name, param.detach().numpy(), t_so_far)
# Print logging statements
print(flush=True)
print(f"-------------------- Iteration #{i_so_far} --------------------", flush=True)
print(f"Average Episodic Length: {avg_ep_lens}", flush=True)
print(f"Average Episodic Return: {avg_ep_rews}", flush=True)
print(f"Average Episodic Infractions : {avg_ep_infractions}", flush=True)
print(f"Average Loss: {avg_actor_loss}", flush=True)
print(f"Timesteps So Far: {t_so_far}", flush=True)
print(f"------------------------------------------------------", flush=True)
print(flush=True)
# Reset batch-specific logging data
self.logger['batch_lens'] = []
self.logger['batch_rews'] = []
self.logger['actor_losses'] = []
def test(env, actor_model, is_discrete):
"""
Tests the model.
Parameters:
env - the environment to test the policy on
actor_model - the actor model to load in
Return:
None
"""
print(f"Testing {actor_model}", flush=True)
# If the actor model is not specified, then exit
if actor_model == '':
print(f"Didn't specify model file. Exiting.", flush=True)
sys.exit(0)
# Extract out dimensions of observation and action spaces
obs_dim = env.observation_space.shape[0]
if is_discrete:
act_dim = env.action_space.n
else:
act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Build our policy the same way we build our actor model in PPO
policy = FeedForwardActorNN(obs_dim, act_dim,is_discrete)
# Load in the actor model saved by the PPO algorithm
policy.load_state_dict(torch.load(actor_model))
# Evaluate our policy with a separate module, eval_policy, to demonstrate
# that once we are done training the model/policy with ppo.py, we no longer need
# ppo.py since it only contains the training algorithm. The model/policy itself exists
# independently as a binary file that can be loaded in with torch.
eval_policy(policy=policy, env=env, render=True, is_discrete=is_discrete)
| 40.42344
| 164
| 0.681444
|
import gym
import numpy as np
import torch
import torch.nn as nn
from torch.optim import Adam
from torch.distributions import MultivariateNormal
from torch.distributions import Categorical
from network import FeedForwardActorNN, FeedForwardCriticNN
import sys
from cbf_clf_helper import clf_control, cbf_control
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter()
class PPO:
def __init__(self, env, **hyperparameters):
assert(type(env.observation_space) == gym.spaces.Box)
self._init_hyperparameters(hyperparameters)
self.env = env
self.obs_dim = env.observation_space.shape[0]
if self.discrete:
self.act_dim = env.action_space.n
else:
self.act_dim = env.action_space.shape[0] orwardActorNN(self.obs_dim, self.act_dim,self.discrete)
actor_model = 'ppo_actorKinematicBicycleGymLane.pth'
policy = FeedForwardActorNN(5, 2,False)
policy.load_state_dict(torch.load(actor_model))
actor_model = policy
c = FeedForwardCriticNN(self.obs_dim, 1)
self.actor_optim = Adam(self.actor.parameters(), lr=self.lr)
self.critic_optim = Adam(self.critic.parameters(), lr=self.lr)
self.cov_var = torch.full(size=(self.act_dim,), fill_value=0.05)
self.cov_mat = torch.diag(self.cov_var)
self.obs_count = 0
self.index_count = 0
self.logger = {
't_so_far': 0,
'i_so_far': 0,
'batch_lens': [],
'batch_rews': [],
'batch_infractions': [],
'actor_losses': [],
'actor_network' : 0,
}
def learn(self, env_name,failure_observations,subpolicy):
print(f"Learning... Running {self.max_timesteps_per_episode} timesteps per episode, ", end='')
print(f"{self.timesteps_per_batch} timesteps per batch for a total of {self.training_step} iterations")
t_so_far = 0
i_so_far = 0
while i_so_far < self.training_step:
batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens = self.rollout(subpolicy,failure_observations) # ALG STEP 3
# Calculate how many timesteps we collected this batch
t_so_far += np.sum(batch_lens)
# Increment the number of iterations
i_so_far += 1
# Logging timesteps so far and iterations so far
self.logger['t_so_far'] = t_so_far
self.logger['i_so_far'] = i_so_far
# Calculate advantage at k-th iteration
V, _ = self.evaluate(batch_obs, batch_acts)
A_k = batch_rtgs - V.detach() # ALG STEP 5
# One of the only tricks I use that isn't in the pseudocode. Normalizing advantages
# our advantages and makes convergence much more stable and faster. I added this because
# solving some environments was too unstable without it.
A_k = (A_k - A_k.mean()) / (A_k.std() + 1e-10)
# This is the loop where we update our network for some n epochs
for _ in range(self.n_updates_per_iteration): # ALG STEP 6 & 7
# Calculate V_phi and pi_theta(a_t | s_t)
V, curr_log_probs = self.evaluate(batch_obs, batch_acts)
# Calculate the ratio pi_theta(a_t | s_t) / pi_theta_k(a_t | s_t)
# NOTE: we just subtract the logs, which is the same as
# dividing the values and then canceling the log with e^log.
# For why we use log probabilities instead of actual probabilities,
# here's a great explanation:
ratios = torch.exp(curr_log_probs - batch_log_probs)
surr1 = ratios * A_k
surr2 = torch.clamp(ratios, 1 - self.clip, 1 + self.clip) * A_k
# the performance function, but Adam minimizes the loss. So minimizing the negative
# performance function maximizes it.
actor_loss = (-torch.min(surr1, surr2)).mean()
#print(f'actor_loss======================={actor_loss}')
critic_loss = nn.MSELoss()(V, batch_rtgs)
# Calculate gradients and perform backward propagation for actor network
self.actor_optim.zero_grad()
actor_loss.backward(retain_graph=True)
self.actor_optim.step()
# Calculate gradients and perform backward propagation for critic network
self.critic_optim.zero_grad()
critic_loss.backward()
self.critic_optim.step()
# Log actor loss
self.logger['actor_losses'].append(actor_loss.detach())
self.logger['actor_network'] = self.actor
# Print a summary of our training so far
self._log_summary()
# Save our model if it's time
if i_so_far % self.save_freq == 0:
if subpolicy:
torch.save(self.actor.state_dict(), './ppo_actor_subpolicy'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic_subpolicy'+env_name+'.pth')
else:
torch.save(self.actor.state_dict(), './ppo_actor'+env_name+'.pth')
torch.save(self.critic.state_dict(), './ppo_critic'+env_name+'.pth')
def rollout(self,subpolicy,failure_observations):
batch_obs = []
batch_acts = []
batch_log_probs = []
batch_rews = []
batch_rtgs = []
batch_lens = []
batch_infractions = []
ep_rews = []
t = 0
# Keep simulating until we've run more than or equal to specified timesteps per batch
while t < self.timesteps_per_batch:
act_list = []
ep_rews = []
obs = self.env.reset()
done = False
count_infractions = 0
count_infractions_acc = 0
count_infractions_steer = 0
for ep_t in range(self.max_timesteps_per_episode):
a_predicted_clf = clf_control(self.env.v_ego)
delta, target_id, crosstrack_error = self.env.car.tracker.stanley_control(self.env.x_ego, self.env.y_ego, self.env.yaw_ego, self.env.v_ego, self.env.delta_ego)
if self.render:
self.env.render()
t += 1
batch_obs.append(obs)
if self.discrete:
action, log_prob = self.get_action_discrete(obs)
else:
action, log_prob = self.get_action(obs)
if(abs(round(float(action[0]),1))<abs(round(float(a_predicted_clf),1))):
count_infractions_acc = count_infractions_acc+1
if(abs(round(float(action[1]),1)) < abs(round(float(delta),1))-0.2):
count_infractions_steer = count_infractions_steer+1
obs, rew, done, info = self.env.step(action)
count_infractions = count_infractions_acc+count_infractions_steer
ep_rews.append(rew)
batch_acts.append(action)
batch_log_probs.append(log_prob)
act_list.append(info)
if done:
break
batch_lens.append(ep_t + 1)
batch_rews.append(ep_rews)
batch_infractions.append(count_infractions)
batch_obs = torch.tensor(batch_obs, dtype=torch.float)
if self.discrete:
batch_acts = torch.tensor(batch_acts, dtype=torch.long).view(-1,)
else:
batch_acts = torch.tensor(batch_acts, dtype=torch.float)
batch_log_probs = torch.tensor(batch_log_probs, dtype=torch.float)
batch_rtgs = self.compute_rtgs(batch_rews)
self.logger['batch_rews'] = batch_rews
self.logger['batch_lens'] = batch_lens
self.logger['batch_infractions'] = batch_infractions
return batch_obs, batch_acts, batch_log_probs, batch_rtgs, batch_lens
def compute_rtgs(self, batch_rews):
batch_rtgs = []
for ep_rews in reversed(batch_rews):
discounted_reward = 0
for rew in reversed(ep_rews):
discounted_reward = rew + discounted_reward * self.gamma
batch_rtgs.insert(0, discounted_reward)
batch_rtgs = torch.tensor(batch_rtgs, dtype=torch.float)
return batch_rtgs
def get_action_discrete(self, obs):
mean = self.actor(obs)
dist = Categorical(mean)
action = dist.sample()
log_prob = dist.log_prob(action)
return action.detach().numpy().item(), log_prob.detach().item()
def get_action(self, obs):
mean = self.actor(obs)
# https://www.youtube.com/watch?v=JjB58InuTqM
dist = MultivariateNormal(mean, self.cov_mat)
# Sample an action from the distribution
action = dist.sample()
# Calculate the log probability for that action
log_prob = dist.log_prob(action)
# Return the sampled action and the log probability of that action in our distribution
return action.detach().numpy(), log_prob.detach()
def evaluate(self, batch_obs, batch_acts):
# Query critic network for a value V for each batch_obs. Shape of V should be same as batch_rtgs
V = self.critic(batch_obs).squeeze()
# Calculate the log probabilities of batch actions using most recent actor network.
# This segment of code is similar to that in get_action()
mean = self.actor(batch_obs)
if self.discrete:
dist = Categorical(mean)
else:
dist = MultivariateNormal(mean, self.cov_mat)
#For discrete actions
#dist = Categorical(mean)
log_probs = dist.log_prob(batch_acts)
# Return the value vector V of each observation in the batch
# and log probabilities log_probs of each action in the batch
return V, log_probs
def _init_hyperparameters(self, hyperparameters):
# Initialize default values for hyperparameters
# Algorithm hyperparameters
self.timesteps_per_batch = 4800 # Number of timesteps to run per batch
self.max_timesteps_per_episode = 1600 # Max number of timesteps per episode
self.n_updates_per_iteration = 5 # Number of times to update actor/critic per iteration
self.lr = 0.005 # Learning rate of actor optimizer
self.gamma = 0.95 # Discount factor to be applied when calculating Rewards-To-Go
self.clip = 0.2 # Recommended 0.2, helps define the threshold to clip the ratio during SGA
# Miscellaneous parameters
self.render = False # If we should render during rollout
self.save_freq = 10 # How often we save in number of iterations
self.seed = None # Sets the seed of our program, used for reproducibility of results
self.discrete = False # Sets the type of environment to discrete or continuous
self.training_step = 200 # Sets the number of trainig step
# Change any default values to custom values for specified hyperparameters
for param, val in hyperparameters.items():
exec('self.' + param + ' = ' + str(val))
# Sets the seed if specified
if self.seed != None:
# Check if our seed is valid first
assert(type(self.seed) == int)
# Set the seed
torch.manual_seed(self.seed)
print(f"Successfully set seed to {self.seed}")
def _log_summary(self):
# Calculate logging values. I use a few python shortcuts to calculate each value
# without explaining since it's not too important to PPO; feel free to look it over,
t_so_far = self.logger['t_so_far']
i_so_far = self.logger['i_so_far']
avg_ep_lens = np.mean(self.logger['batch_lens'])
avg_ep_rews = np.mean([np.sum(ep_rews) for ep_rews in self.logger['batch_rews']])
avg_actor_loss = np.mean([losses.float().mean() for losses in self.logger['actor_losses']])
avg_ep_infractions = np.mean([np.sum(ep_inf) for ep_inf in self.logger['batch_infractions']])
actor_model = self.logger['actor_network']
avg_ep_lens = str(round(avg_ep_lens, 2))
avg_ep_rews = str(round(avg_ep_rews, 2))
avg_ep_infractions = str(round(avg_ep_infractions, 2))
avg_actor_loss = str(round(avg_actor_loss, 5))
writer.add_scalar("Average Episodic Return", int(float(avg_ep_rews)), t_so_far)
writer.add_scalar("Average actor Loss", int(float(avg_actor_loss)), t_so_far)
writer.add_scalar("Average Infractions", int(float(avg_ep_infractions)), t_so_far)
for name, param in actor_model.named_parameters():
if 'weight' in name:
writer.add_histogram(name, param.detach().numpy(), t_so_far)
print(flush=True)
print(f"-------------------- Iteration #{i_so_far} --------------------", flush=True)
print(f"Average Episodic Length: {avg_ep_lens}", flush=True)
print(f"Average Episodic Return: {avg_ep_rews}", flush=True)
print(f"Average Episodic Infractions : {avg_ep_infractions}", flush=True)
print(f"Average Loss: {avg_actor_loss}", flush=True)
print(f"Timesteps So Far: {t_so_far}", flush=True)
print(f"------------------------------------------------------", flush=True)
print(flush=True)
self.logger['batch_lens'] = []
self.logger['batch_rews'] = []
self.logger['actor_losses'] = []
def test(env, actor_model, is_discrete):
print(f"Testing {actor_model}", flush=True)
if actor_model == '':
print(f"Didn't specify model file. Exiting.", flush=True)
sys.exit(0)
# Extract out dimensions of observation and action spaces
obs_dim = env.observation_space.shape[0]
if is_discrete:
act_dim = env.action_space.n
else:
act_dim = env.action_space.shape[0] #env.action_space.n #env.action_space.shape[0]
# Build our policy the same way we build our actor model in PPO
policy = FeedForwardActorNN(obs_dim, act_dim,is_discrete)
# Load in the actor model saved by the PPO algorithm
policy.load_state_dict(torch.load(actor_model))
# Evaluate our policy with a separate module, eval_policy, to demonstrate
# that once we are done training the model/policy with ppo.py, we no longer need
# ppo.py since it only contains the training algorithm. The model/policy itself exists
# independently as a binary file that can be loaded in with torch.
eval_policy(policy=policy, env=env, render=True, is_discrete=is_discrete)
| true
| true
|
7908335aa2af1e5d85aef8db310ddcc5fdffd88a
| 1,670
|
py
|
Python
|
tests/models_tests/test_log.py
|
chainer/chainerui
|
91c5c26d9154a008079dbb0bcbf69b5590d105f7
|
[
"MIT"
] | 185
|
2017-12-15T09:24:07.000Z
|
2022-01-20T11:20:13.000Z
|
tests/models_tests/test_log.py
|
chainer/chainerui
|
91c5c26d9154a008079dbb0bcbf69b5590d105f7
|
[
"MIT"
] | 191
|
2017-12-15T09:14:52.000Z
|
2022-02-17T14:09:19.000Z
|
tests/models_tests/test_log.py
|
chainer/chainerui
|
91c5c26d9154a008079dbb0bcbf69b5590d105f7
|
[
"MIT"
] | 29
|
2017-12-15T09:40:45.000Z
|
2022-03-13T11:21:11.000Z
|
from chainerui.models.log import Log
def get_test_json():
return [
{
"loss": 100,
"epoch": 1,
},
{
"loss": 90,
"epoch": 2,
}
]
def test_log_serialize_numbers():
json_data = get_test_json()
logs = [Log(data) for data in json_data]
serialized_data = [log.serialize for log in logs]
assert serialized_data[0]['logDict']['epoch'] == 1
assert serialized_data[1]['logDict']['epoch'] == 2
def test_log_serialize_arbitrary_data():
json_data = get_test_json()
json_data.insert(
0,
{
"loss": 110,
"epoch": 0,
"model_files": ["Model", "model.py"]
}
)
logs = [Log(data) for data in json_data]
serialized_data = [log.serialize for log in logs]
assert serialized_data[0]['logDict']['epoch'] == 0
assert serialized_data[0]['logDict']['model_files'] is None
assert serialized_data[1]['logDict']['epoch'] == 1
assert serialized_data[2]['logDict']['epoch'] == 2
def test_log_serialize_nan_and_inf():
json_data = get_test_json()
json_data.insert(
0,
{
"loss": float('nan'),
"epoch": float('inf'),
"iteration": 0,
}
)
logs = [Log(data) for data in json_data]
serialized_data = [log.serialize for log in logs]
assert serialized_data[0]['logDict']['iteration'] == 0
assert serialized_data[0]['logDict']['epoch'] is None
assert serialized_data[0]['logDict']['loss'] is None
assert serialized_data[1]['logDict']['epoch'] == 1
assert serialized_data[2]['logDict']['epoch'] == 2
| 25.692308
| 63
| 0.573653
|
from chainerui.models.log import Log
def get_test_json():
return [
{
"loss": 100,
"epoch": 1,
},
{
"loss": 90,
"epoch": 2,
}
]
def test_log_serialize_numbers():
json_data = get_test_json()
logs = [Log(data) for data in json_data]
serialized_data = [log.serialize for log in logs]
assert serialized_data[0]['logDict']['epoch'] == 1
assert serialized_data[1]['logDict']['epoch'] == 2
def test_log_serialize_arbitrary_data():
json_data = get_test_json()
json_data.insert(
0,
{
"loss": 110,
"epoch": 0,
"model_files": ["Model", "model.py"]
}
)
logs = [Log(data) for data in json_data]
serialized_data = [log.serialize for log in logs]
assert serialized_data[0]['logDict']['epoch'] == 0
assert serialized_data[0]['logDict']['model_files'] is None
assert serialized_data[1]['logDict']['epoch'] == 1
assert serialized_data[2]['logDict']['epoch'] == 2
def test_log_serialize_nan_and_inf():
json_data = get_test_json()
json_data.insert(
0,
{
"loss": float('nan'),
"epoch": float('inf'),
"iteration": 0,
}
)
logs = [Log(data) for data in json_data]
serialized_data = [log.serialize for log in logs]
assert serialized_data[0]['logDict']['iteration'] == 0
assert serialized_data[0]['logDict']['epoch'] is None
assert serialized_data[0]['logDict']['loss'] is None
assert serialized_data[1]['logDict']['epoch'] == 1
assert serialized_data[2]['logDict']['epoch'] == 2
| true
| true
|
79083384cf791c8f6babf1e15ace4d5a35dd72b3
| 14,114
|
py
|
Python
|
detection.py
|
kaylajanos1/TeamSpark-L3Detection
|
ecc2b4ca3588f989add309439feac33014447a32
|
[
"BSD-3-Clause"
] | null | null | null |
detection.py
|
kaylajanos1/TeamSpark-L3Detection
|
ecc2b4ca3588f989add309439feac33014447a32
|
[
"BSD-3-Clause"
] | 1
|
2021-04-28T03:14:17.000Z
|
2021-04-28T03:14:17.000Z
|
detection.py
|
kaylajanos1/TeamSpark-L3Detection
|
ecc2b4ca3588f989add309439feac33014447a32
|
[
"BSD-3-Clause"
] | null | null | null |
#Importing Libraries
import os
import csv
import sys, getopt
import uuid
import SimpleITK as sitk
import cv2
import numpy as np
import tensorflow as tf
from flask import Flask, flash, request, redirect, render_template
from flask import jsonify
from flask import send_from_directory
from flask_materialize import Material
from tensorflow.python.keras.backend import set_session
from werkzeug.utils import secure_filename
import shutil
import nibabel as nib
import pandas as pd
import numpy
from sarcopenia_ai.apps.segmentation.segloader import preprocess_test_image
from sarcopenia_ai.apps.server import settings
from sarcopenia_ai.apps.slice_detection.predict import parse_inputs, to256
from sarcopenia_ai.apps.slice_detection.utils import decode_slice_detection_prediction, \
preprocess_sitk_image_for_slice_detection, adjust_detected_position_spacing, place_line_on_img
from sarcopenia_ai.core.model_wrapper import BaseModelWrapper
from sarcopenia_ai.io import load_image
from sarcopenia_ai.preprocessing.preprocessing import blend2d
from sarcopenia_ai.utils import compute_muscle_area, compute_muscle_attenuation
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
graph = tf.get_default_graph()
import cv2
import numpy as np
def normalise_zero_one(image, eps=1e-8):
print("Here 1")
image = image.astype(np.float32)
ret = (image - np.min(image))
ret /= (np.max(image) - np.min(image) + eps)
return ret
def normalise_one_one(image):
print("Here 2")
ret = normalise_zero_one(image)
ret *= 2.
ret -= 1.
return ret
def preprocess_test_image(image):
print("Here")
#image = normalise_one_one(image, -250, 250)
image = normalise_one_one(image)
return image
##################
def find_max(img):
return np.unravel_index(np.argmax(img, axis=None), img.shape)[0]
#Read arguments
#############################
import argparse
msg = "Adding description"
# Initialize parser
parser = argparse.ArgumentParser(description = msg)
# Reading the input arguments
parser.add_argument("-i", "--Input", help = "Input file or folder")
parser.add_argument('-test_name', type=str, default='Test')
# Read arguments from command line
args = parser.parse_args()
path = args.Input
test_name = args.test_name
#Creating the result structure variables
main = os.getcwd()
directory = os.path.join(main+'/NII_Data/'+path)
if not os.path.exists(main+'/Results/'+path+"/"):
os.mkdir(main+'/Results/'+path+'/')
out = os.path.join(main+'/Results/'+path+"/"+test_name+'/')
if os.path.exists(out):
shutil.rmtree(out)
os.mkdir(out)
if not os.path.exists(out):
os.mkdir(out)
out_yes = os.path.join(out+'/Yes')
if not os.path.exists(out_yes):
os.mkdir(out_yes)
out_no = os.path.join(out+'/No')
if not os.path.exists(out_no):
os.mkdir(out_no)
out_rev = os.path.join(out+'/Review/')
if not os.path.exists(out_rev):
os.mkdir(out_rev)
out_csv = os.path.join(out+'/Pred CSVs/')
if not os.path.exists(out_csv):
os.mkdir(out_csv)
#Load the sarcopenia-ai models
#set_session(sess)
model_wrapper = BaseModelWrapper(settings.SLICE_DETECTION_MODEL_PATH)
model_wrapper.setup_model()
global slice_detection_model
slice_detection_model= model_wrapper.model
slice_detection_model._make_predict_function()
global segmentation_model
model_wrapper = BaseModelWrapper(settings.SEGMENTATION_MODEL_PATH)
model_wrapper.setup_model()
segmentation_model = model_wrapper.model
segmentation_model._make_predict_function()
####Updated functions to replace older versions listed in the sarcopenia-ai enviroment
#Previous research indicates adjusting the HU range can help bone appear better
def reduce_hu_intensity_range(img, minv=100, maxv=1500):
img = np.clip(img, minv, maxv)
img = 255 * normalise_zero_one(img)
return img
#Setting up the output file name & Prediction counter
pred_id = 0
cols = ['Folder_Path','Patient_Folder','Study_Folder','Serie_Folder','L3_detection','L3_position','Total_slices','Confidence','Slice_Thickness', 'Orientation']
lst = []
#Looping through the input folder and analyzing the images
for folder in os.listdir(directory):
#Patient Folder
if(folder=='.DS_Store'):
continue
#Study Folder
for sub_folder in os.listdir(directory+"/"+folder):
if(sub_folder=='.DS_Store'):
continue
#Series Folder
for sub_sub_folder in os.listdir(directory+"/"+folder+"/"+sub_folder):
#Image Level
for file in os.listdir(directory+"/"+folder+"/"+sub_folder+"/"+sub_sub_folder):
print("IN SUB-SUB-FOLDER: "+sub_sub_folder)
#print(file)
if(file.endswith(".nii.gz") or file.endswith(".nii")):
print("Processing file: "+file)
try:
if(sub_sub_folder=='.DS_Store'):
continue
print("IN SUB-SUB-FOLDER: "+sub_sub_folder)
image_path = directory+"/"+folder+"/"+sub_folder+"/"+sub_sub_folder+"/"+file
prob_threshold_U=settings.THRESHOLD_U
prob_threshold_L=settings.THRESHOLD_L
#Gathering image name
import ntpath
head, tail = ntpath.split(image_path)
image_name = tail or ntpath.basename(head)
pred_id = pred_id +1
print("ID --> "+str(pred_id))
results = {"success": False, "prediction": {'id': pred_id}}
sitk_image, _ = load_image(image_path)
print("-----------------------------image path: "+image_path )
#The code is not set up to analyze 4 dimensional data.
if len(sitk_image.GetSize()) == 4:
print("-------- 4D Image: Grabbing only first volume")
sitk_image = sitk_image[:, :, :, 0]
#Getting image orientation information for output file.
print('-------------- NIB')
nib_image = nib.load(image_path)
orient_nib=nib.orientations.aff2axcodes(nib_image.affine)
print('-------------- Preprocess')
#Preprocessing the image
image2d, image2d_preview= preprocess_sitk_image_for_slice_detection(sitk_image)
image3d = sitk.GetArrayFromImage(sitk_image)
#print(image3d.shape)
#print(image2d.shape)
#print(image2d_preview.shape)
spacing = sitk_image.GetSpacing()
size = list(sitk_image.GetSize())
slice_thickness = spacing[2]
#Utilizing the sarcopenia-ai model to predict the L3 vertabrae
with graph.as_default():
set_session(sess)
preds = slice_detection_model.predict(image2d)
print('-------------- Predict')
#Processing the model output
pred_z, prob = decode_slice_detection_prediction(preds)
slice_z = adjust_detected_position_spacing(pred_z, spacing)
print('Prob: '+ str(prob))
print('Slice Z: ' +str(slice_z) )
print('{red_z: '+str(pred_z))
#Normalizing the prediction image to be within %28-%47 percent of the body
new_z_calculate = 0
new_pred_z = pred_z
new_slice_z = slice_z
new_prob = prob
print('-------------- Normalize')
if(slice_z < .27*size[2] or slice_z > .48*size[2]):
print("---------------------debug")
print(preds.shape)
print(preds.shape[1])
new_pred_z = find_max(preds[0, int(.27*preds.shape[1]):int(.48*preds.shape[1])])
new_pred_z = new_pred_z + int(.27*preds.shape[1]);
new_slice_z = adjust_detected_position_spacing(new_pred_z, spacing)
print("old position")
print(pred_z)
print(slice_z)
print("new position")
print(new_pred_z)
print(new_slice_z)
new_z_calculate =1;
new_prob = float(preds[0,new_pred_z])
## Outputting prediction data
print('-------------- Predict CSV')
preds_reshaped = preds.reshape(preds.shape[0], -1)
numpy.savetxt(out_csv+"PRED_"+str(pred_id)+".csv", preds_reshaped, delimiter=",")
#If the prediction for L3 is above the predifined threshold for acceptance
if (new_prob > prob_threshold_U):
print('-------------- Above')
image = image3d
slice_image = image[new_slice_z,:, :]
image2dA = place_line_on_img(image2d[0], pred_z, pred_z, r=1)
image2dB = place_line_on_img(image2d[0], -new_pred_z, new_pred_z, r=1)
cv2.imwrite(out_yes+"/"+str(pred_id)+'_YES_'+image_name+'_SL.jpg', to256(slice_image))
cv2.imwrite(out_yes+"/"+str(pred_id)+'_YES_'+image_name+'_FR.jpg', to256(image2dA))
cv2.imwrite(out_yes+"/"+str(pred_id)+'_YES_'+image_name+'_FR2.jpg', to256(image2dB))
output = [image_path,folder,sub_folder,sub_sub_folder,'YES',new_slice_z,size[2],new_prob,slice_thickness, orient_nib]
lst.append(output)
#Images where the L3 vertabrae was not identified
elif (new_prob <= prob_threshold_L ):
print('-------------- No')
image = image3d
slice_image = image[new_slice_z,:, :]
image2dA = place_line_on_img(image2d[0], -pred_z, -pred_z, r=1)
image2dB = place_line_on_img(image2d[0], -new_pred_z, -new_pred_z, r=1)
cv2.imwrite(out_no+str(pred_id)+'_NO_'+image_name+'_SL.jpg', to256(slice_image))
cv2.imwrite(out_no+str(pred_id)+'_NO_'+image_name+'_FR.jpg', to256(image2dA))
cv2.imwrite(out_no+str(pred_id)+'_NO_'+image_name+'_FR2.jpg', to256(image2dB))
output = [image_path,folder,sub_folder,sub_sub_folder,'NO',new_slice_z,size[2],new_prob,slice_thickness, orient_nib]
lst.append(output)
#Images where the L3 vertabrae was identified but confidence requirements were not met.
else:
print('-------------- Review')
image = image3d
slice_image = image[new_slice_z,:, :]
image2dA = place_line_on_img(image2d[0], pred_z, pred_z, r=1)
image2dB = place_line_on_img(image2d[0], new_pred_z, new_pred_z, r=1)
cv2.imwrite(out_rev+str(pred_id)+'_REVIEW_'+image_name+'_SL_'+str(new_slice_z)+'_PROB_'+str(new_prob)+'.jpg', to256(slice_image))
cv2.imwrite(out_rev+str(pred_id)+'_REVIEW_'+image_name+'_FR_'+str(slice_z)+'_PROB_'+str(prob)+'.jpg', to256(image2dA))
cv2.imwrite(out_rev+str(pred_id)+'_REVIEW_'+image_name+'_FR2_'+str(new_slice_z)+'_PROB_'+str(new_prob)+'.jpg', to256(image2dB))
output = [image_path,folder,sub_folder,sub_sub_folder,'REVIEW',new_slice_z,size[2],new_prob,slice_thickness, orient_nib]
lst.append(output)
#Images that error out (e.g. image orientation is incorrect)
except:
print('-------------- Wrong')
print('-------------- ')
print('-------------- ')
print("Something went wrong - File: "+image_path)
print("Unexpected error"+str(sys.exc_info()[0]))
output = [image_path,folder,sub_folder,sub_sub_folder,'Error','','','Something went wrong:'+str(sys.exc_info()[1]),'', orient_nib]
lst.append(output)
#Outputting the results dataset
df = pd.DataFrame(lst, columns=cols)
if not os.path.exists('/content/gdrive/MyDrive/L3-Clean/Results/Summaries/'):
os.mkdir('/content/gdrive/MyDrive/L3-Clean/Results/Summaries/')
df.to_csv('/content/gdrive/MyDrive/L3-Clean/Results/Summaries/'+path+'_'+test_name+".csv")
print(' ')
print(' ')
print(' ')
print(' -------------- PROCESSING COMPLETE ------------------- ')
| 39.205556
| 159
| 0.540315
|
import os
import csv
import sys, getopt
import uuid
import SimpleITK as sitk
import cv2
import numpy as np
import tensorflow as tf
from flask import Flask, flash, request, redirect, render_template
from flask import jsonify
from flask import send_from_directory
from flask_materialize import Material
from tensorflow.python.keras.backend import set_session
from werkzeug.utils import secure_filename
import shutil
import nibabel as nib
import pandas as pd
import numpy
from sarcopenia_ai.apps.segmentation.segloader import preprocess_test_image
from sarcopenia_ai.apps.server import settings
from sarcopenia_ai.apps.slice_detection.predict import parse_inputs, to256
from sarcopenia_ai.apps.slice_detection.utils import decode_slice_detection_prediction, \
preprocess_sitk_image_for_slice_detection, adjust_detected_position_spacing, place_line_on_img
from sarcopenia_ai.core.model_wrapper import BaseModelWrapper
from sarcopenia_ai.io import load_image
from sarcopenia_ai.preprocessing.preprocessing import blend2d
from sarcopenia_ai.utils import compute_muscle_area, compute_muscle_attenuation
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
graph = tf.get_default_graph()
import cv2
import numpy as np
def normalise_zero_one(image, eps=1e-8):
print("Here 1")
image = image.astype(np.float32)
ret = (image - np.min(image))
ret /= (np.max(image) - np.min(image) + eps)
return ret
def normalise_one_one(image):
print("Here 2")
ret = normalise_zero_one(image)
ret *= 2.
ret -= 1.
return ret
def preprocess_test_image(image):
print("Here")
image = normalise_one_one(image)
return image
e+'/')
if os.path.exists(out):
shutil.rmtree(out)
os.mkdir(out)
if not os.path.exists(out):
os.mkdir(out)
out_yes = os.path.join(out+'/Yes')
if not os.path.exists(out_yes):
os.mkdir(out_yes)
out_no = os.path.join(out+'/No')
if not os.path.exists(out_no):
os.mkdir(out_no)
out_rev = os.path.join(out+'/Review/')
if not os.path.exists(out_rev):
os.mkdir(out_rev)
out_csv = os.path.join(out+'/Pred CSVs/')
if not os.path.exists(out_csv):
os.mkdir(out_csv)
model_wrapper = BaseModelWrapper(settings.SLICE_DETECTION_MODEL_PATH)
model_wrapper.setup_model()
global slice_detection_model
slice_detection_model= model_wrapper.model
slice_detection_model._make_predict_function()
global segmentation_model
model_wrapper = BaseModelWrapper(settings.SEGMENTATION_MODEL_PATH)
model_wrapper.setup_model()
segmentation_model = model_wrapper.model
segmentation_model._make_predict_function()
3_position','Total_slices','Confidence','Slice_Thickness', 'Orientation']
lst = []
for folder in os.listdir(directory):
if(folder=='.DS_Store'):
continue
for sub_folder in os.listdir(directory+"/"+folder):
if(sub_folder=='.DS_Store'):
continue
for sub_sub_folder in os.listdir(directory+"/"+folder+"/"+sub_folder):
for file in os.listdir(directory+"/"+folder+"/"+sub_folder+"/"+sub_sub_folder):
print("IN SUB-SUB-FOLDER: "+sub_sub_folder)
if(file.endswith(".nii.gz") or file.endswith(".nii")):
print("Processing file: "+file)
try:
if(sub_sub_folder=='.DS_Store'):
continue
print("IN SUB-SUB-FOLDER: "+sub_sub_folder)
image_path = directory+"/"+folder+"/"+sub_folder+"/"+sub_sub_folder+"/"+file
prob_threshold_U=settings.THRESHOLD_U
prob_threshold_L=settings.THRESHOLD_L
import ntpath
head, tail = ntpath.split(image_path)
image_name = tail or ntpath.basename(head)
pred_id = pred_id +1
print("ID --> "+str(pred_id))
results = {"success": False, "prediction": {'id': pred_id}}
sitk_image, _ = load_image(image_path)
print("-----------------------------image path: "+image_path )
if len(sitk_image.GetSize()) == 4:
print("-------- 4D Image: Grabbing only first volume")
sitk_image = sitk_image[:, :, :, 0]
print('-------------- NIB')
nib_image = nib.load(image_path)
orient_nib=nib.orientations.aff2axcodes(nib_image.affine)
print('-------------- Preprocess')
image2d, image2d_preview= preprocess_sitk_image_for_slice_detection(sitk_image)
image3d = sitk.GetArrayFromImage(sitk_image)
spacing = sitk_image.GetSpacing()
size = list(sitk_image.GetSize())
slice_thickness = spacing[2]
with graph.as_default():
set_session(sess)
preds = slice_detection_model.predict(image2d)
print('-------------- Predict')
pred_z, prob = decode_slice_detection_prediction(preds)
slice_z = adjust_detected_position_spacing(pred_z, spacing)
print('Prob: '+ str(prob))
print('Slice Z: ' +str(slice_z) )
print('{red_z: '+str(pred_z))
new_z_calculate = 0
new_pred_z = pred_z
new_slice_z = slice_z
new_prob = prob
print('-------------- Normalize')
if(slice_z < .27*size[2] or slice_z > .48*size[2]):
print("---------------------debug")
print(preds.shape)
print(preds.shape[1])
new_pred_z = find_max(preds[0, int(.27*preds.shape[1]):int(.48*preds.shape[1])])
new_pred_z = new_pred_z + int(.27*preds.shape[1]);
new_slice_z = adjust_detected_position_spacing(new_pred_z, spacing)
print("old position")
print(pred_z)
print(slice_z)
print("new position")
print(new_pred_z)
print(new_slice_z)
new_z_calculate =1;
new_prob = float(preds[0,new_pred_z])
t('-------------- Predict CSV')
preds_reshaped = preds.reshape(preds.shape[0], -1)
numpy.savetxt(out_csv+"PRED_"+str(pred_id)+".csv", preds_reshaped, delimiter=",")
if (new_prob > prob_threshold_U):
print('-------------- Above')
image = image3d
slice_image = image[new_slice_z,:, :]
image2dA = place_line_on_img(image2d[0], pred_z, pred_z, r=1)
image2dB = place_line_on_img(image2d[0], -new_pred_z, new_pred_z, r=1)
cv2.imwrite(out_yes+"/"+str(pred_id)+'_YES_'+image_name+'_SL.jpg', to256(slice_image))
cv2.imwrite(out_yes+"/"+str(pred_id)+'_YES_'+image_name+'_FR.jpg', to256(image2dA))
cv2.imwrite(out_yes+"/"+str(pred_id)+'_YES_'+image_name+'_FR2.jpg', to256(image2dB))
output = [image_path,folder,sub_folder,sub_sub_folder,'YES',new_slice_z,size[2],new_prob,slice_thickness, orient_nib]
lst.append(output)
elif (new_prob <= prob_threshold_L ):
print('-------------- No')
image = image3d
slice_image = image[new_slice_z,:, :]
image2dA = place_line_on_img(image2d[0], -pred_z, -pred_z, r=1)
image2dB = place_line_on_img(image2d[0], -new_pred_z, -new_pred_z, r=1)
cv2.imwrite(out_no+str(pred_id)+'_NO_'+image_name+'_SL.jpg', to256(slice_image))
cv2.imwrite(out_no+str(pred_id)+'_NO_'+image_name+'_FR.jpg', to256(image2dA))
cv2.imwrite(out_no+str(pred_id)+'_NO_'+image_name+'_FR2.jpg', to256(image2dB))
output = [image_path,folder,sub_folder,sub_sub_folder,'NO',new_slice_z,size[2],new_prob,slice_thickness, orient_nib]
lst.append(output)
else:
print('-------------- Review')
image = image3d
slice_image = image[new_slice_z,:, :]
image2dA = place_line_on_img(image2d[0], pred_z, pred_z, r=1)
image2dB = place_line_on_img(image2d[0], new_pred_z, new_pred_z, r=1)
cv2.imwrite(out_rev+str(pred_id)+'_REVIEW_'+image_name+'_SL_'+str(new_slice_z)+'_PROB_'+str(new_prob)+'.jpg', to256(slice_image))
cv2.imwrite(out_rev+str(pred_id)+'_REVIEW_'+image_name+'_FR_'+str(slice_z)+'_PROB_'+str(prob)+'.jpg', to256(image2dA))
cv2.imwrite(out_rev+str(pred_id)+'_REVIEW_'+image_name+'_FR2_'+str(new_slice_z)+'_PROB_'+str(new_prob)+'.jpg', to256(image2dB))
output = [image_path,folder,sub_folder,sub_sub_folder,'REVIEW',new_slice_z,size[2],new_prob,slice_thickness, orient_nib]
lst.append(output)
except:
print('-------------- Wrong')
print('-------------- ')
print('-------------- ')
print("Something went wrong - File: "+image_path)
print("Unexpected error"+str(sys.exc_info()[0]))
output = [image_path,folder,sub_folder,sub_sub_folder,'Error','','','Something went wrong:'+str(sys.exc_info()[1]),'', orient_nib]
lst.append(output)
df = pd.DataFrame(lst, columns=cols)
if not os.path.exists('/content/gdrive/MyDrive/L3-Clean/Results/Summaries/'):
os.mkdir('/content/gdrive/MyDrive/L3-Clean/Results/Summaries/')
df.to_csv('/content/gdrive/MyDrive/L3-Clean/Results/Summaries/'+path+'_'+test_name+".csv")
print(' ')
print(' ')
print(' ')
print(' -------------- PROCESSING COMPLETE ------------------- ')
| true
| true
|
790834027a8e823b24e0363e5e21439e4d9c23cf
| 6,867
|
py
|
Python
|
tests/bitmovin/services/filters/text_filter_tests.py
|
camberbridge/bitmovin-python
|
3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95
|
[
"Unlicense"
] | 44
|
2016-12-12T17:37:23.000Z
|
2021-03-03T09:48:48.000Z
|
tests/bitmovin/services/filters/text_filter_tests.py
|
camberbridge/bitmovin-python
|
3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95
|
[
"Unlicense"
] | 38
|
2017-01-09T14:45:45.000Z
|
2022-02-27T18:04:33.000Z
|
tests/bitmovin/services/filters/text_filter_tests.py
|
camberbridge/bitmovin-python
|
3af4c6e79b0291fda05fd1ceeb5bed1bba9f3c95
|
[
"Unlicense"
] | 27
|
2017-02-02T22:49:31.000Z
|
2019-11-21T07:04:57.000Z
|
import json
import unittest
from bitmovin import Bitmovin, Response, TextFilter, Font
from bitmovin.errors import BitmovinApiError
from tests.bitmovin import BitmovinTestCase
class TextFilterTests(BitmovinTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.bitmovin = Bitmovin(self.api_key)
self.assertIsNotNone(self.bitmovin)
self.assertTrue(isinstance(self.bitmovin, Bitmovin))
def tearDown(self):
super().tearDown()
def test_create_text_filter(self):
sample_filter = self._get_sample_text_filter()
filter_resource_response = self.bitmovin.filters.Text.create(sample_filter)
self.assertIsNotNone(filter_resource_response)
self.assertIsNotNone(filter_resource_response.resource)
self.assertIsNotNone(filter_resource_response.resource.id)
self._compare_text_filters(sample_filter, filter_resource_response.resource)
def test_create_text_filter_without_name(self):
sample_filter = self._get_sample_text_filter()
sample_filter.name = None
filter_resource_response = self.bitmovin.filters.Text.create(sample_filter)
self.assertIsNotNone(filter_resource_response)
self.assertIsNotNone(filter_resource_response.resource)
self.assertIsNotNone(filter_resource_response.resource.id)
self._compare_text_filters(sample_filter, filter_resource_response.resource)
def test_retrieve_text_filter(self):
sample_filter = self._get_sample_text_filter()
created_filter_response = self.bitmovin.filters.Text.create(sample_filter)
self.assertIsNotNone(created_filter_response)
self.assertIsNotNone(created_filter_response.resource)
self.assertIsNotNone(created_filter_response.resource.id)
self._compare_text_filters(sample_filter, created_filter_response.resource)
retrieved_filter_response = self.bitmovin.filters.Text.retrieve(created_filter_response.resource.id)
self.assertIsNotNone(retrieved_filter_response)
self.assertIsNotNone(retrieved_filter_response.resource)
self._compare_text_filters(created_filter_response.resource, retrieved_filter_response.resource)
def test_delete_text_filter(self):
sample_filter = self._get_sample_text_filter()
created_filter_response = self.bitmovin.filters.Text.create(sample_filter)
self.assertIsNotNone(created_filter_response)
self.assertIsNotNone(created_filter_response.resource)
self.assertIsNotNone(created_filter_response.resource.id)
self._compare_text_filters(sample_filter, created_filter_response.resource)
deleted_minimal_resource = self.bitmovin.filters.Text.delete(created_filter_response.resource.id)
self.assertIsNotNone(deleted_minimal_resource)
self.assertIsNotNone(deleted_minimal_resource.resource)
self.assertIsNotNone(deleted_minimal_resource.resource.id)
try:
self.bitmovin.filters.Text.retrieve(created_filter_response.resource.id)
self.fail(
'Previous statement should have thrown an exception. ' +
'Retrieving filter after deleting it shouldn\'t be possible.'
)
except BitmovinApiError:
pass
def test_list_text_filters(self):
sample_filter = self._get_sample_text_filter()
created_filter_response = self.bitmovin.filters.Text.create(sample_filter)
self.assertIsNotNone(created_filter_response)
self.assertIsNotNone(created_filter_response.resource)
self.assertIsNotNone(created_filter_response.resource.id)
self._compare_text_filters(sample_filter, created_filter_response.resource)
filters = self.bitmovin.filters.Text.list()
self.assertIsNotNone(filters)
self.assertIsNotNone(filters.resource)
self.assertIsNotNone(filters.response)
self.assertIsInstance(filters.resource, list)
self.assertIsInstance(filters.response, Response)
self.assertGreater(filters.resource.__sizeof__(), 1)
def test_retrieve_text_filter_custom_data(self):
sample_filter = self._get_sample_text_filter()
sample_filter.customData = '<pre>my custom data</pre>'
created_filter_response = self.bitmovin.filters.Text.create(sample_filter)
self.assertIsNotNone(created_filter_response)
self.assertIsNotNone(created_filter_response.resource)
self.assertIsNotNone(created_filter_response.resource.id)
self._compare_text_filters(sample_filter, created_filter_response.resource)
custom_data_response = self.bitmovin.filters.Text.retrieve_custom_data(
created_filter_response.resource.id)
custom_data = custom_data_response.resource
self.assertEqual(sample_filter.customData, json.loads(custom_data.customData))
def _compare_text_filters(self, first: TextFilter, second: TextFilter):
"""
:param first: TextFilter
:param second: TextFilter
:return: bool
"""
self.assertEqual(str(first.x), str(second.x))
self.assertEqual(str(first.y), str(second.y))
self.assertEqual(first.text, second.text)
self.assertEqual(first.timecode, second.timecode)
self.assertEqual(first.shadowY, second.shadowX)
self.assertEqual(first.shadowX, second.shadowX)
self.assertEqual(first.shadowColor, second.shadowColor)
self.assertEqual(first.alpha, second.alpha)
self.assertEqual(first.fontSize, second.fontSize)
self.assertEqual(first.font, second.font)
self.assertEqual(first.fontColor, second.fontColor)
self.assertEqual(first.fixBounds, second.fixBounds)
self.assertEqual(first.borderWidth, second.borderWidth)
self.assertEqual(first.lineSpacing, second.lineSpacing)
self.assertEqual(first.boxColor, second.boxColor)
self.assertEqual(first.boxBorderWidth, second.boxBorderWidth)
self.assertEqual(first.box, second.box)
self.assertEqual(first.description, second.description)
self.assertEqual(first.name, second.name)
return True
def _get_sample_text_filter(self):
text_filter = TextFilter(name='Sample Text Filter',
x='10',
y='10',
text='ThisIsATest',
font=Font.DEJAVUSANS)
self.assertIsNotNone(text_filter.x)
self.assertIsNotNone(text_filter.y)
self.assertIsNotNone(text_filter.name)
self.assertIsNotNone(text_filter.font)
return text_filter
if __name__ == '__main__':
unittest.main()
| 44.590909
| 108
| 0.722732
|
import json
import unittest
from bitmovin import Bitmovin, Response, TextFilter, Font
from bitmovin.errors import BitmovinApiError
from tests.bitmovin import BitmovinTestCase
class TextFilterTests(BitmovinTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.bitmovin = Bitmovin(self.api_key)
self.assertIsNotNone(self.bitmovin)
self.assertTrue(isinstance(self.bitmovin, Bitmovin))
def tearDown(self):
super().tearDown()
def test_create_text_filter(self):
sample_filter = self._get_sample_text_filter()
filter_resource_response = self.bitmovin.filters.Text.create(sample_filter)
self.assertIsNotNone(filter_resource_response)
self.assertIsNotNone(filter_resource_response.resource)
self.assertIsNotNone(filter_resource_response.resource.id)
self._compare_text_filters(sample_filter, filter_resource_response.resource)
def test_create_text_filter_without_name(self):
sample_filter = self._get_sample_text_filter()
sample_filter.name = None
filter_resource_response = self.bitmovin.filters.Text.create(sample_filter)
self.assertIsNotNone(filter_resource_response)
self.assertIsNotNone(filter_resource_response.resource)
self.assertIsNotNone(filter_resource_response.resource.id)
self._compare_text_filters(sample_filter, filter_resource_response.resource)
def test_retrieve_text_filter(self):
sample_filter = self._get_sample_text_filter()
created_filter_response = self.bitmovin.filters.Text.create(sample_filter)
self.assertIsNotNone(created_filter_response)
self.assertIsNotNone(created_filter_response.resource)
self.assertIsNotNone(created_filter_response.resource.id)
self._compare_text_filters(sample_filter, created_filter_response.resource)
retrieved_filter_response = self.bitmovin.filters.Text.retrieve(created_filter_response.resource.id)
self.assertIsNotNone(retrieved_filter_response)
self.assertIsNotNone(retrieved_filter_response.resource)
self._compare_text_filters(created_filter_response.resource, retrieved_filter_response.resource)
def test_delete_text_filter(self):
sample_filter = self._get_sample_text_filter()
created_filter_response = self.bitmovin.filters.Text.create(sample_filter)
self.assertIsNotNone(created_filter_response)
self.assertIsNotNone(created_filter_response.resource)
self.assertIsNotNone(created_filter_response.resource.id)
self._compare_text_filters(sample_filter, created_filter_response.resource)
deleted_minimal_resource = self.bitmovin.filters.Text.delete(created_filter_response.resource.id)
self.assertIsNotNone(deleted_minimal_resource)
self.assertIsNotNone(deleted_minimal_resource.resource)
self.assertIsNotNone(deleted_minimal_resource.resource.id)
try:
self.bitmovin.filters.Text.retrieve(created_filter_response.resource.id)
self.fail(
'Previous statement should have thrown an exception. ' +
'Retrieving filter after deleting it shouldn\'t be possible.'
)
except BitmovinApiError:
pass
def test_list_text_filters(self):
sample_filter = self._get_sample_text_filter()
created_filter_response = self.bitmovin.filters.Text.create(sample_filter)
self.assertIsNotNone(created_filter_response)
self.assertIsNotNone(created_filter_response.resource)
self.assertIsNotNone(created_filter_response.resource.id)
self._compare_text_filters(sample_filter, created_filter_response.resource)
filters = self.bitmovin.filters.Text.list()
self.assertIsNotNone(filters)
self.assertIsNotNone(filters.resource)
self.assertIsNotNone(filters.response)
self.assertIsInstance(filters.resource, list)
self.assertIsInstance(filters.response, Response)
self.assertGreater(filters.resource.__sizeof__(), 1)
def test_retrieve_text_filter_custom_data(self):
sample_filter = self._get_sample_text_filter()
sample_filter.customData = '<pre>my custom data</pre>'
created_filter_response = self.bitmovin.filters.Text.create(sample_filter)
self.assertIsNotNone(created_filter_response)
self.assertIsNotNone(created_filter_response.resource)
self.assertIsNotNone(created_filter_response.resource.id)
self._compare_text_filters(sample_filter, created_filter_response.resource)
custom_data_response = self.bitmovin.filters.Text.retrieve_custom_data(
created_filter_response.resource.id)
custom_data = custom_data_response.resource
self.assertEqual(sample_filter.customData, json.loads(custom_data.customData))
def _compare_text_filters(self, first: TextFilter, second: TextFilter):
self.assertEqual(str(first.x), str(second.x))
self.assertEqual(str(first.y), str(second.y))
self.assertEqual(first.text, second.text)
self.assertEqual(first.timecode, second.timecode)
self.assertEqual(first.shadowY, second.shadowX)
self.assertEqual(first.shadowX, second.shadowX)
self.assertEqual(first.shadowColor, second.shadowColor)
self.assertEqual(first.alpha, second.alpha)
self.assertEqual(first.fontSize, second.fontSize)
self.assertEqual(first.font, second.font)
self.assertEqual(first.fontColor, second.fontColor)
self.assertEqual(first.fixBounds, second.fixBounds)
self.assertEqual(first.borderWidth, second.borderWidth)
self.assertEqual(first.lineSpacing, second.lineSpacing)
self.assertEqual(first.boxColor, second.boxColor)
self.assertEqual(first.boxBorderWidth, second.boxBorderWidth)
self.assertEqual(first.box, second.box)
self.assertEqual(first.description, second.description)
self.assertEqual(first.name, second.name)
return True
def _get_sample_text_filter(self):
text_filter = TextFilter(name='Sample Text Filter',
x='10',
y='10',
text='ThisIsATest',
font=Font.DEJAVUSANS)
self.assertIsNotNone(text_filter.x)
self.assertIsNotNone(text_filter.y)
self.assertIsNotNone(text_filter.name)
self.assertIsNotNone(text_filter.font)
return text_filter
if __name__ == '__main__':
unittest.main()
| true
| true
|
790834cc3bfa3eafdda0ce74e90d581d6ba9a7c3
| 667
|
py
|
Python
|
tools/perf/contrib/media_router_benchmarks/media_router_measurements.py
|
sarang-apps/darshan_browser
|
173649bb8a7c656dc60784d19e7bb73e07c20daa
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 575
|
2015-06-18T23:58:20.000Z
|
2022-03-23T09:32:39.000Z
|
tools/perf/contrib/media_router_benchmarks/media_router_measurements.py
|
sarang-apps/darshan_browser
|
173649bb8a7c656dc60784d19e7bb73e07c20daa
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 113
|
2015-05-04T09:58:14.000Z
|
2022-01-31T19:35:03.000Z
|
tools/perf/contrib/media_router_benchmarks/media_router_measurements.py
|
sarang-apps/darshan_browser
|
173649bb8a7c656dc60784d19e7bb73e07c20daa
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 52
|
2015-07-14T10:40:50.000Z
|
2022-03-15T01:11:49.000Z
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from contrib.media_router_benchmarks import media_router_cpu_memory_metric
from telemetry.page import legacy_page_test
class MediaRouterCPUMemoryTest(legacy_page_test.LegacyPageTest):
"""Performs a measurement of Media Route CPU/memory usage."""
def __init__(self):
super(MediaRouterCPUMemoryTest, self).__init__()
self._metric = media_router_cpu_memory_metric.MediaRouterCPUMemoryMetric()
def ValidateAndMeasurePage(self, page, tab, results):
self._metric.AddResults(tab, results)
| 37.055556
| 78
| 0.803598
|
from contrib.media_router_benchmarks import media_router_cpu_memory_metric
from telemetry.page import legacy_page_test
class MediaRouterCPUMemoryTest(legacy_page_test.LegacyPageTest):
def __init__(self):
super(MediaRouterCPUMemoryTest, self).__init__()
self._metric = media_router_cpu_memory_metric.MediaRouterCPUMemoryMetric()
def ValidateAndMeasurePage(self, page, tab, results):
self._metric.AddResults(tab, results)
| true
| true
|
790835f22ff39732dc36c4e09154deb638a51290
| 2,839
|
py
|
Python
|
speech/samples/v1/speech_transcribe_multichannel.py
|
hugovk/google-cloud-python
|
b387134827dbc3be0e1b431201e0875798002fda
|
[
"Apache-2.0"
] | 1
|
2019-03-26T21:44:51.000Z
|
2019-03-26T21:44:51.000Z
|
speech/samples/v1/speech_transcribe_multichannel.py
|
hugovk/google-cloud-python
|
b387134827dbc3be0e1b431201e0875798002fda
|
[
"Apache-2.0"
] | 6
|
2019-05-27T22:05:58.000Z
|
2019-08-05T16:46:16.000Z
|
speech/samples/v1/speech_transcribe_multichannel.py
|
hugovk/google-cloud-python
|
b387134827dbc3be0e1b431201e0875798002fda
|
[
"Apache-2.0"
] | 1
|
2019-03-29T18:26:16.000Z
|
2019-03-29T18:26:16.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DO NOT EDIT! This is a generated sample ("Request", "speech_transcribe_multichannel")
# To install the latest published package dependency, execute the following:
# pip install google-cloud-speech
# sample-metadata
# title: Multi-Channel Audio Transcription (Local File)
# description: Transcribe a short audio file with multiple channels
# usage: python3 samples/v1/speech_transcribe_multichannel.py [--local_file_path "resources/multi.wav"]
# [START speech_transcribe_multichannel]
from google.cloud import speech_v1
import io
def sample_recognize(local_file_path):
"""
Transcribe a short audio file with multiple channels
Args:
local_file_path Path to local audio file, e.g. /path/audio.wav
"""
client = speech_v1.SpeechClient()
# local_file_path = 'resources/multi.wav'
# The number of channels in the input audio file (optional)
audio_channel_count = 2
# When set to true, each audio channel will be recognized separately.
# The recognition result will contain a channel_tag field to state which
# channel that result belongs to
enable_separate_recognition_per_channel = True
# The language of the supplied audio
language_code = "en-US"
config = {
"audio_channel_count": audio_channel_count,
"enable_separate_recognition_per_channel": enable_separate_recognition_per_channel,
"language_code": language_code,
}
with io.open(local_file_path, "rb") as f:
content = f.read()
audio = {"content": content}
response = client.recognize(config, audio)
for result in response.results:
# channel_tag to recognize which audio channel this result is for
print(u"Channel tag: {}".format(result.channel_tag))
# First alternative is the most probable result
alternative = result.alternatives[0]
print(u"Transcript: {}".format(alternative.transcript))
# [END speech_transcribe_multichannel]
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--local_file_path", type=str, default="resources/multi.wav")
args = parser.parse_args()
sample_recognize(args.local_file_path)
if __name__ == "__main__":
main()
| 32.632184
| 105
| 0.726664
|
from google.cloud import speech_v1
import io
def sample_recognize(local_file_path):
client = speech_v1.SpeechClient()
audio_channel_count = 2
enable_separate_recognition_per_channel = True
language_code = "en-US"
config = {
"audio_channel_count": audio_channel_count,
"enable_separate_recognition_per_channel": enable_separate_recognition_per_channel,
"language_code": language_code,
}
with io.open(local_file_path, "rb") as f:
content = f.read()
audio = {"content": content}
response = client.recognize(config, audio)
for result in response.results:
print(u"Channel tag: {}".format(result.channel_tag))
alternative = result.alternatives[0]
print(u"Transcript: {}".format(alternative.transcript))
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--local_file_path", type=str, default="resources/multi.wav")
args = parser.parse_args()
sample_recognize(args.local_file_path)
if __name__ == "__main__":
main()
| true
| true
|
790837f0eda32c46f86435beb0818e3c2221053d
| 27,255
|
py
|
Python
|
pipenv/cli.py
|
mlhamel/pipenv
|
22445858766a1f92c5ad87c90662ba260c8b750b
|
[
"MIT"
] | null | null | null |
pipenv/cli.py
|
mlhamel/pipenv
|
22445858766a1f92c5ad87c90662ba260c8b750b
|
[
"MIT"
] | null | null | null |
pipenv/cli.py
|
mlhamel/pipenv
|
22445858766a1f92c5ad87c90662ba260c8b750b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
from click import (
argument,
command,
echo,
edit,
group,
Group,
option,
pass_context,
Option,
version_option,
BadParameter,
)
from click_completion import init as init_completion
from click_completion import get_code
from click_didyoumean import DYMCommandCollection
import crayons
import delegator
from .__version__ import __version__
from . import environments
from .environments import *
from .utils import is_valid_url
# Enable shell completion.
init_completion()
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
class PipenvGroup(Group):
"""Custom Group class provides formatted main help"""
def get_help_option(self, ctx):
from .core import format_help
"""Override for showing formatted main help via --help and -h options"""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
if not ctx.invoked_subcommand:
# legit main help
echo(format_help(ctx.get_help()))
else:
# legit sub-command help
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help='Show this message and exit.',
)
def setup_verbose(ctx, param, value):
if value:
import logging
logging.getLogger('pip').setLevel(logging.INFO)
return value
def validate_python_path(ctx, param, value):
# Validating the Python path is complicated by accepting a number of
# friendly options: the default will be boolean False to enable
# autodetection but it may also be a value which will be searched in
# the path or an absolute path. To report errors as early as possible
# we'll report absolute paths which do not exist:
if isinstance(value, (str, bytes)):
if os.path.isabs(value) and not os.path.isfile(value):
raise BadParameter('Expected Python at path %s does not exist' % value)
return value
def validate_pypi_mirror(ctx, param, value):
if value and not is_valid_url(value):
raise BadParameter('Invalid PyPI mirror URL: %s' % value)
return value
@group(
cls=PipenvGroup,
invoke_without_command=True,
context_settings=CONTEXT_SETTINGS,
)
@option(
'--where',
is_flag=True,
default=False,
help="Output project home information.",
)
@option(
'--venv',
is_flag=True,
default=False,
help="Output virtualenv information.",
)
@option(
'--py',
is_flag=True,
default=False,
help="Output Python interpreter information.",
)
@option(
'--envs',
is_flag=True,
default=False,
help="Output Environment Variable options.",
)
@option(
'--rm', is_flag=True, default=False, help="Remove the virtualenv."
)
@option('--bare', is_flag=True, default=False, help="Minimal output.")
@option(
'--completion',
is_flag=True,
default=False,
help="Output completion (to be eval'd).",
)
@option('--man', is_flag=True, default=False, help="Display manpage.")
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--site-packages',
is_flag=True,
default=False,
help="Enable site-packages for the virtualenv.",
)
@version_option(
prog_name=crayons.normal('pipenv', bold=True), version=__version__
)
@pass_context
def cli(
ctx,
where=False,
venv=False,
rm=False,
bare=False,
three=False,
python=False,
help=False,
py=False,
site_packages=False,
envs=False,
man=False,
completion=False,
):
if completion: # Handle this ASAP to make shell startup fast.
if PIPENV_SHELL:
echo(
get_code(
shell=PIPENV_SHELL.split(os.sep)[-1], prog_name='pipenv'
)
)
else:
echo(
'Please ensure that the {0} environment variable '
'is set.'.format(crayons.normal('SHELL', bold=True)),
err=True,
)
sys.exit(1)
sys.exit(0)
from .core import (
system_which,
do_py,
warn_in_virtualenv,
do_where,
project,
spinner,
cleanup_virtualenv,
ensure_project,
format_help
)
if man:
if system_which('man'):
path = os.sep.join([os.path.dirname(__file__), 'pipenv.1'])
os.execle(system_which('man'), 'man', path, os.environ)
else:
echo(
'man does not appear to be available on your system.', err=True
)
if envs:
echo(
'The following environment variables can be set, to do various things:\n'
)
for key in environments.__dict__:
if key.startswith('PIPENV'):
echo(' - {0}'.format(crayons.normal(key, bold=True)))
echo(
'\nYou can learn more at:\n {0}'.format(
crayons.green(
'http://docs.pipenv.org/advanced/#configuration-with-environment-variables'
)
)
)
sys.exit(0)
warn_in_virtualenv()
if ctx.invoked_subcommand is None:
# --where was passed...
if where:
do_where(bare=True)
sys.exit(0)
elif py:
do_py()
sys.exit()
# --venv was passed...
elif venv:
# There is no virtualenv yet.
if not project.virtualenv_exists:
echo(
crayons.red(
'No virtualenv has been created for this project yet!'
),
err=True,
)
sys.exit(1)
else:
echo(project.virtualenv_location)
sys.exit(0)
# --rm was passed...
elif rm:
# Abort if --system (or running in a virtualenv).
if PIPENV_USE_SYSTEM:
echo(
crayons.red(
'You are attempting to remove a virtualenv that '
'Pipenv did not create. Aborting.'
)
)
sys.exit(1)
if project.virtualenv_exists:
loc = project.virtualenv_location
echo(
crayons.normal(
u'{0} ({1})…'.format(
crayons.normal('Removing virtualenv', bold=True),
crayons.green(loc),
)
)
)
with spinner():
# Remove the virtualenv.
cleanup_virtualenv(bare=True)
sys.exit(0)
else:
echo(
crayons.red(
'No virtualenv has been created for this project yet!',
bold=True,
),
err=True,
)
sys.exit(1)
# --two / --three was passed...
if (python or three is not None) or site_packages:
ensure_project(
three=three, python=python, warn=True, site_packages=site_packages
)
# Check this again before exiting for empty ``pipenv`` command.
elif ctx.invoked_subcommand is None:
# Display help to user, if no commands were passed.
echo(format_help(ctx.get_help()))
@command(
short_help="Installs provided packages and adds them to Pipfile, or (if none is given), installs all packages.",
context_settings=dict(ignore_unknown_options=True, allow_extra_args=True),
)
@argument('package_name', default=False)
@argument('more_packages', nargs=-1)
@option(
'--dev',
'-d',
is_flag=True,
default=False,
help="Install package(s) in [dev-packages].",
)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--pypi-mirror',
default=PIPENV_PYPI_MIRROR,
nargs=1,
callback=validate_pypi_mirror,
help="Specify a PyPI mirror.",
)
@option(
'--system', is_flag=True, default=False, help="System pip management."
)
@option(
'--requirements',
'-r',
nargs=1,
default=False,
help="Import a requirements.txt file.",
)
@option(
'--code', '-c', nargs=1, default=False, help="Import from codebase."
)
@option(
'--verbose',
'-v',
is_flag=True,
default=False,
help="Verbose mode.",
callback=setup_verbose,
)
@option(
'--ignore-pipfile',
is_flag=True,
default=False,
help="Ignore Pipfile when installing, using the Pipfile.lock.",
)
@option(
'--sequential',
is_flag=True,
default=False,
help="Install dependencies one-at-a-time, instead of concurrently.",
)
@option(
'--skip-lock',
is_flag=True,
default=False,
help=u"Ignore locking mechanisms when installing—use the Pipfile, instead.",
)
@option(
'--deploy',
is_flag=True,
default=False,
help=u"Abort if the Pipfile.lock is out–of–date, or Python version is wrong.",
)
@option(
'--pre', is_flag=True, default=False, help=u"Allow pre–releases."
)
@option(
'--keep-outdated',
is_flag=True,
default=False,
help=u"Keep out–dated dependencies from being updated in Pipfile.lock.",
)
@option(
'--selective-upgrade',
is_flag=True,
default=False,
help="Update specified packages.",
)
def install(
package_name=False,
more_packages=False,
dev=False,
three=False,
python=False,
pypi_mirror=None,
system=False,
lock=True,
ignore_pipfile=False,
skip_lock=False,
verbose=False,
requirements=False,
sequential=False,
pre=False,
code=False,
deploy=False,
keep_outdated=False,
selective_upgrade=False,
):
from .core import do_install
do_install(
package_name=package_name,
more_packages=more_packages,
dev=dev,
three=three,
python=python,
pypi_mirror=pypi_mirror,
system=system,
lock=lock,
ignore_pipfile=ignore_pipfile,
skip_lock=skip_lock,
verbose=verbose,
requirements=requirements,
sequential=sequential,
pre=pre,
code=code,
deploy=deploy,
keep_outdated=keep_outdated,
selective_upgrade=selective_upgrade,
)
@command(
short_help="Un-installs a provided package and removes it from Pipfile."
)
@argument('package_name', default=False)
@argument('more_packages', nargs=-1)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--system', is_flag=True, default=False, help="System pip management."
)
@option(
'--verbose',
'-v',
is_flag=True,
default=False,
help="Verbose mode.",
callback=setup_verbose,
)
@option('--lock', is_flag=True, default=True, help="Lock afterwards.")
@option(
'--all-dev',
is_flag=True,
default=False,
help="Un-install all package from [dev-packages].",
)
@option(
'--all',
is_flag=True,
default=False,
help="Purge all package(s) from virtualenv. Does not edit Pipfile.",
)
@option(
'--keep-outdated',
is_flag=True,
default=False,
help=u"Keep out–dated dependencies from being updated in Pipfile.lock.",
)
@option(
'--pypi-mirror',
default=PIPENV_PYPI_MIRROR,
nargs=1,
callback=validate_pypi_mirror,
help="Specify a PyPI mirror.",
)
def uninstall(
package_name=False,
more_packages=False,
three=None,
python=False,
system=False,
lock=False,
all_dev=False,
all=False,
verbose=False,
keep_outdated=False,
pypi_mirror=None,
):
from .core import do_uninstall
do_uninstall(
package_name=package_name,
more_packages=more_packages,
three=three,
python=python,
system=system,
lock=lock,
all_dev=all_dev,
all=all,
verbose=verbose,
keep_outdated=keep_outdated,
pypi_mirror=pypi_mirror,
)
@command(short_help="Generates Pipfile.lock.")
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--pypi-mirror',
default=PIPENV_PYPI_MIRROR,
nargs=1,
callback=validate_pypi_mirror,
help="Specify a PyPI mirror.",
)
@option(
'--verbose',
'-v',
is_flag=True,
default=False,
help="Verbose mode.",
callback=setup_verbose,
)
@option(
'--requirements',
'-r',
is_flag=True,
default=False,
help="Generate output compatible with requirements.txt.",
)
@option(
'--dev',
'-d',
is_flag=True,
default=False,
help="Generate output compatible with requirements.txt for the development dependencies.",
)
@option(
'--clear', is_flag=True, default=False, help="Clear the dependency cache."
)
@option(
'--pre', is_flag=True, default=False, help=u"Allow pre–releases."
)
@option(
'--keep-outdated',
is_flag=True,
default=False,
help=u"Keep out–dated dependencies from being updated in Pipfile.lock.",
)
def lock(
three=None,
python=False,
pypi_mirror=None,
verbose=False,
requirements=False,
dev=False,
clear=False,
pre=False,
keep_outdated=False,
):
from .core import ensure_project, do_init, do_lock
# Ensure that virtualenv is available.
ensure_project(three=three, python=python)
if requirements:
do_init(dev=dev, requirements=requirements, pypi_mirror=pypi_mirror)
do_lock(
verbose=verbose, clear=clear, pre=pre, keep_outdated=keep_outdated, pypi_mirror=pypi_mirror
)
@command(
short_help="Spawns a shell within the virtualenv.",
context_settings=dict(ignore_unknown_options=True, allow_extra_args=True),
)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--fancy',
is_flag=True,
default=False,
help="Run in shell in fancy mode (for elegantly configured shells).",
)
@option(
'--anyway',
is_flag=True,
default=False,
help="Always spawn a subshell, even if one is already spawned.",
)
@argument('shell_args', nargs=-1)
def shell(
three=None, python=False, fancy=False, shell_args=None, anyway=False
):
from .core import load_dot_env, do_shell
# Prevent user from activating nested environments.
if 'PIPENV_ACTIVE' in os.environ:
# If PIPENV_ACTIVE is set, VIRTUAL_ENV should always be set too.
venv_name = os.environ.get(
'VIRTUAL_ENV', 'UNKNOWN_VIRTUAL_ENVIRONMENT'
)
if not anyway:
echo(
'{0} {1} {2}\nNo action taken to avoid nested environments.'.format(
crayons.normal('Shell for'),
crayons.green(venv_name, bold=True),
crayons.normal('already activated.', bold=True),
),
err=True,
)
sys.exit(1)
# Load .env file.
load_dot_env()
# Use fancy mode for Windows.
if os.name == 'nt':
fancy = True
do_shell(
three=three, python=python, fancy=fancy, shell_args=shell_args
)
@command(
add_help_option=False,
short_help="Spawns a command installed into the virtualenv.",
context_settings=dict(
ignore_unknown_options=True,
allow_interspersed_args=False,
allow_extra_args=True,
),
)
@argument('command')
@argument('args', nargs=-1)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
def run(command, args, three=None, python=False):
from .core import do_run
do_run(command=command, args=args, three=three, python=python)
@command(
short_help="Checks for security vulnerabilities and against PEP 508 markers provided in Pipfile.",
context_settings=dict(ignore_unknown_options=True, allow_extra_args=True),
)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--system', is_flag=True, default=False, help="Use system Python."
)
@option(
'--unused',
nargs=1,
default=False,
help="Given a code path, show potentially unused dependencies.",
)
@argument('args', nargs=-1)
def check(
three=None,
python=False,
system=False,
unused=False,
style=False,
args=None,
):
from .core import do_check
do_check(
three=three, python=python, system=system, unused=unused, args=args
)
@command(short_help="Runs lock, then sync.")
@argument('more_packages', nargs=-1)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--pypi-mirror',
default=PIPENV_PYPI_MIRROR,
nargs=1,
callback=validate_pypi_mirror,
help="Specify a PyPI mirror.",
)
@option(
'--verbose',
'-v',
is_flag=True,
default=False,
help="Verbose mode.",
callback=setup_verbose,
)
@option(
'--dev',
'-d',
is_flag=True,
default=False,
help="Install package(s) in [dev-packages].",
)
@option(
'--clear', is_flag=True, default=False, help="Clear the dependency cache."
)
@option('--bare', is_flag=True, default=False, help="Minimal output.")
@option(
'--pre', is_flag=True, default=False, help=u"Allow pre–releases."
)
@option(
'--keep-outdated',
is_flag=True,
default=False,
help=u"Keep out–dated dependencies from being updated in Pipfile.lock.",
)
@option(
'--sequential',
is_flag=True,
default=False,
help="Install dependencies one-at-a-time, instead of concurrently.",
)
@option(
'--outdated',
is_flag=True,
default=False,
help=u"List out–of–date dependencies.",
)
@option(
'--dry-run',
is_flag=True,
default=None,
help=u"List out–of–date dependencies.",
)
@argument('package', default=False)
@pass_context
def update(
ctx,
three=None,
python=False,
pypi_mirror=None,
system=False,
verbose=False,
clear=False,
keep_outdated=False,
pre=False,
dev=False,
bare=False,
sequential=False,
package=None,
dry_run=None,
outdated=False,
more_packages=None,
):
from .core import (
ensure_project,
do_outdated,
do_lock,
do_sync,
ensure_lockfile,
do_install,
project,
)
ensure_project(three=three, python=python, warn=True)
if not outdated:
outdated = bool(dry_run)
if outdated:
do_outdated(pypi_mirror=pypi_mirror)
if not package:
echo(
'{0} {1} {2} {3}{4}'.format(
crayons.white('Running', bold=True),
crayons.red('$ pipenv lock', bold=True),
crayons.white('then', bold=True),
crayons.red('$ pipenv sync', bold=True),
crayons.white('.', bold=True),
)
)
do_lock(
verbose=verbose, clear=clear, pre=pre, keep_outdated=keep_outdated, pypi_mirror=pypi_mirror
)
do_sync(
ctx=ctx,
install=install,
dev=dev,
three=three,
python=python,
bare=bare,
dont_upgrade=False,
user=False,
verbose=verbose,
clear=clear,
unused=False,
sequential=sequential,
pypi_mirror=pypi_mirror,
)
else:
for package in ([package] + list(more_packages) or []):
if package not in project.all_packages:
echo(
'{0}: {1} was not found in your Pipfile! Aborting.'
''.format(
crayons.red('Warning', bold=True),
crayons.green(package, bold=True),
),
err=True,
)
sys.exit(1)
ensure_lockfile(keep_outdated=project.lockfile_exists, pypi_mirror=pypi_mirror)
# Install the dependencies.
do_install(
package_name=package,
more_packages=more_packages,
dev=dev,
three=three,
python=python,
pypi_mirror=pypi_mirror,
system=system,
lock=True,
ignore_pipfile=False,
skip_lock=False,
verbose=verbose,
requirements=False,
sequential=sequential,
pre=pre,
code=False,
deploy=False,
keep_outdated=True,
selective_upgrade=True,
)
@command(
short_help=u"Displays currently–installed dependency graph information."
)
@option('--bare', is_flag=True, default=False, help="Minimal output.")
@option('--json', is_flag=True, default=False, help="Output JSON.")
@option('--json-tree', is_flag=True, default=False, help="Output JSON in nested tree.")
@option(
'--reverse', is_flag=True, default=False, help="Reversed dependency graph."
)
def graph(bare=False, json=False, json_tree=False, reverse=False):
from .core import do_graph
do_graph(bare=bare, json=json, json_tree=json_tree, reverse=reverse)
@command(short_help="View a given module in your editor.", name="open")
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@argument('module', nargs=1)
def run_open(module, three=None, python=None):
from .core import which, ensure_project
# Ensure that virtualenv is available.
ensure_project(three=three, python=python, validate=False)
c = delegator.run(
'{0} -c "import {1}; print({1}.__file__);"'.format(
which('python'), module
)
)
try:
assert c.return_code == 0
except AssertionError:
echo(crayons.red('Module not found!'))
sys.exit(1)
if '__init__.py' in c.out:
p = os.path.dirname(c.out.strip().rstrip('cdo'))
else:
p = c.out.strip().rstrip('cdo')
echo(
crayons.normal('Opening {0!r} in your EDITOR.'.format(p), bold=True)
)
edit(filename=p)
sys.exit(0)
@command(short_help="Installs all packages specified in Pipfile.lock.")
@option(
'--verbose',
'-v',
is_flag=True,
default=False,
help="Verbose mode.",
callback=setup_verbose,
)
@option(
'--dev',
'-d',
is_flag=True,
default=False,
help="Additionally install package(s) in [dev-packages].",
)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--pypi-mirror',
default=PIPENV_PYPI_MIRROR,
nargs=1,
callback=validate_pypi_mirror,
help="Specify a PyPI mirror.",
)
@option('--bare', is_flag=True, default=False, help="Minimal output.")
@option(
'--clear', is_flag=True, default=False, help="Clear the dependency cache."
)
@option(
'--sequential',
is_flag=True,
default=False,
help="Install dependencies one-at-a-time, instead of concurrently.",
)
@pass_context
def sync(
ctx,
dev=False,
three=None,
python=None,
bare=False,
dont_upgrade=False,
user=False,
verbose=False,
clear=False,
unused=False,
package_name=None,
sequential=False,
pypi_mirror=None,
):
from .core import do_sync
do_sync(
ctx=ctx,
install=install,
dev=dev,
three=three,
python=python,
bare=bare,
dont_upgrade=dont_upgrade,
user=user,
verbose=verbose,
clear=clear,
unused=unused,
sequential=sequential,
pypi_mirror=pypi_mirror,
)
@command(
short_help="Uninstalls all packages not specified in Pipfile.lock."
)
@option(
'--verbose',
'-v',
is_flag=True,
default=False,
help="Verbose mode.",
callback=setup_verbose,
)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--dry-run',
is_flag=True,
default=False,
help="Just output unneeded packages.",
)
@pass_context
def clean(
ctx,
three=None,
python=None,
dry_run=False,
bare=False,
user=False,
verbose=False,
):
from .core import do_clean
do_clean(
ctx=ctx, three=three, python=python, dry_run=dry_run, verbose=verbose
)
# Install click commands.
cli.add_command(graph)
cli.add_command(install)
cli.add_command(uninstall)
cli.add_command(sync)
cli.add_command(lock)
cli.add_command(check)
cli.add_command(clean)
cli.add_command(shell)
cli.add_command(run)
cli.add_command(update)
cli.add_command(run_open)
# Only invoke the "did you mean" when an argument wasn't passed (it breaks those).
if '-' not in ''.join(sys.argv) and len(sys.argv) > 1:
cli = DYMCommandCollection(sources=[cli])
if __name__ == '__main__':
cli()
| 25.212766
| 116
| 0.60444
|
import os
import sys
from click import (
argument,
command,
echo,
edit,
group,
Group,
option,
pass_context,
Option,
version_option,
BadParameter,
)
from click_completion import init as init_completion
from click_completion import get_code
from click_didyoumean import DYMCommandCollection
import crayons
import delegator
from .__version__ import __version__
from . import environments
from .environments import *
from .utils import is_valid_url
init_completion()
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
class PipenvGroup(Group):
def get_help_option(self, ctx):
from .core import format_help
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
if not ctx.invoked_subcommand:
echo(format_help(ctx.get_help()))
else:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help='Show this message and exit.',
)
def setup_verbose(ctx, param, value):
if value:
import logging
logging.getLogger('pip').setLevel(logging.INFO)
return value
def validate_python_path(ctx, param, value):
if isinstance(value, (str, bytes)):
if os.path.isabs(value) and not os.path.isfile(value):
raise BadParameter('Expected Python at path %s does not exist' % value)
return value
def validate_pypi_mirror(ctx, param, value):
if value and not is_valid_url(value):
raise BadParameter('Invalid PyPI mirror URL: %s' % value)
return value
@group(
cls=PipenvGroup,
invoke_without_command=True,
context_settings=CONTEXT_SETTINGS,
)
@option(
'--where',
is_flag=True,
default=False,
help="Output project home information.",
)
@option(
'--venv',
is_flag=True,
default=False,
help="Output virtualenv information.",
)
@option(
'--py',
is_flag=True,
default=False,
help="Output Python interpreter information.",
)
@option(
'--envs',
is_flag=True,
default=False,
help="Output Environment Variable options.",
)
@option(
'--rm', is_flag=True, default=False, help="Remove the virtualenv."
)
@option('--bare', is_flag=True, default=False, help="Minimal output.")
@option(
'--completion',
is_flag=True,
default=False,
help="Output completion (to be eval'd).",
)
@option('--man', is_flag=True, default=False, help="Display manpage.")
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--site-packages',
is_flag=True,
default=False,
help="Enable site-packages for the virtualenv.",
)
@version_option(
prog_name=crayons.normal('pipenv', bold=True), version=__version__
)
@pass_context
def cli(
ctx,
where=False,
venv=False,
rm=False,
bare=False,
three=False,
python=False,
help=False,
py=False,
site_packages=False,
envs=False,
man=False,
completion=False,
):
if completion:
if PIPENV_SHELL:
echo(
get_code(
shell=PIPENV_SHELL.split(os.sep)[-1], prog_name='pipenv'
)
)
else:
echo(
'Please ensure that the {0} environment variable '
'is set.'.format(crayons.normal('SHELL', bold=True)),
err=True,
)
sys.exit(1)
sys.exit(0)
from .core import (
system_which,
do_py,
warn_in_virtualenv,
do_where,
project,
spinner,
cleanup_virtualenv,
ensure_project,
format_help
)
if man:
if system_which('man'):
path = os.sep.join([os.path.dirname(__file__), 'pipenv.1'])
os.execle(system_which('man'), 'man', path, os.environ)
else:
echo(
'man does not appear to be available on your system.', err=True
)
if envs:
echo(
'The following environment variables can be set, to do various things:\n'
)
for key in environments.__dict__:
if key.startswith('PIPENV'):
echo(' - {0}'.format(crayons.normal(key, bold=True)))
echo(
'\nYou can learn more at:\n {0}'.format(
crayons.green(
'http://docs.pipenv.org/advanced/#configuration-with-environment-variables'
)
)
)
sys.exit(0)
warn_in_virtualenv()
if ctx.invoked_subcommand is None:
if where:
do_where(bare=True)
sys.exit(0)
elif py:
do_py()
sys.exit()
elif venv:
if not project.virtualenv_exists:
echo(
crayons.red(
'No virtualenv has been created for this project yet!'
),
err=True,
)
sys.exit(1)
else:
echo(project.virtualenv_location)
sys.exit(0)
elif rm:
if PIPENV_USE_SYSTEM:
echo(
crayons.red(
'You are attempting to remove a virtualenv that '
'Pipenv did not create. Aborting.'
)
)
sys.exit(1)
if project.virtualenv_exists:
loc = project.virtualenv_location
echo(
crayons.normal(
u'{0} ({1})…'.format(
crayons.normal('Removing virtualenv', bold=True),
crayons.green(loc),
)
)
)
with spinner():
cleanup_virtualenv(bare=True)
sys.exit(0)
else:
echo(
crayons.red(
'No virtualenv has been created for this project yet!',
bold=True,
),
err=True,
)
sys.exit(1)
if (python or three is not None) or site_packages:
ensure_project(
three=three, python=python, warn=True, site_packages=site_packages
)
elif ctx.invoked_subcommand is None:
echo(format_help(ctx.get_help()))
@command(
short_help="Installs provided packages and adds them to Pipfile, or (if none is given), installs all packages.",
context_settings=dict(ignore_unknown_options=True, allow_extra_args=True),
)
@argument('package_name', default=False)
@argument('more_packages', nargs=-1)
@option(
'--dev',
'-d',
is_flag=True,
default=False,
help="Install package(s) in [dev-packages].",
)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--pypi-mirror',
default=PIPENV_PYPI_MIRROR,
nargs=1,
callback=validate_pypi_mirror,
help="Specify a PyPI mirror.",
)
@option(
'--system', is_flag=True, default=False, help="System pip management."
)
@option(
'--requirements',
'-r',
nargs=1,
default=False,
help="Import a requirements.txt file.",
)
@option(
'--code', '-c', nargs=1, default=False, help="Import from codebase."
)
@option(
'--verbose',
'-v',
is_flag=True,
default=False,
help="Verbose mode.",
callback=setup_verbose,
)
@option(
'--ignore-pipfile',
is_flag=True,
default=False,
help="Ignore Pipfile when installing, using the Pipfile.lock.",
)
@option(
'--sequential',
is_flag=True,
default=False,
help="Install dependencies one-at-a-time, instead of concurrently.",
)
@option(
'--skip-lock',
is_flag=True,
default=False,
help=u"Ignore locking mechanisms when installing—use the Pipfile, instead.",
)
@option(
'--deploy',
is_flag=True,
default=False,
help=u"Abort if the Pipfile.lock is out–of–date, or Python version is wrong.",
)
@option(
'--pre', is_flag=True, default=False, help=u"Allow pre–releases."
)
@option(
'--keep-outdated',
is_flag=True,
default=False,
help=u"Keep out–dated dependencies from being updated in Pipfile.lock.",
)
@option(
'--selective-upgrade',
is_flag=True,
default=False,
help="Update specified packages.",
)
def install(
package_name=False,
more_packages=False,
dev=False,
three=False,
python=False,
pypi_mirror=None,
system=False,
lock=True,
ignore_pipfile=False,
skip_lock=False,
verbose=False,
requirements=False,
sequential=False,
pre=False,
code=False,
deploy=False,
keep_outdated=False,
selective_upgrade=False,
):
from .core import do_install
do_install(
package_name=package_name,
more_packages=more_packages,
dev=dev,
three=three,
python=python,
pypi_mirror=pypi_mirror,
system=system,
lock=lock,
ignore_pipfile=ignore_pipfile,
skip_lock=skip_lock,
verbose=verbose,
requirements=requirements,
sequential=sequential,
pre=pre,
code=code,
deploy=deploy,
keep_outdated=keep_outdated,
selective_upgrade=selective_upgrade,
)
@command(
short_help="Un-installs a provided package and removes it from Pipfile."
)
@argument('package_name', default=False)
@argument('more_packages', nargs=-1)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--system', is_flag=True, default=False, help="System pip management."
)
@option(
'--verbose',
'-v',
is_flag=True,
default=False,
help="Verbose mode.",
callback=setup_verbose,
)
@option('--lock', is_flag=True, default=True, help="Lock afterwards.")
@option(
'--all-dev',
is_flag=True,
default=False,
help="Un-install all package from [dev-packages].",
)
@option(
'--all',
is_flag=True,
default=False,
help="Purge all package(s) from virtualenv. Does not edit Pipfile.",
)
@option(
'--keep-outdated',
is_flag=True,
default=False,
help=u"Keep out–dated dependencies from being updated in Pipfile.lock.",
)
@option(
'--pypi-mirror',
default=PIPENV_PYPI_MIRROR,
nargs=1,
callback=validate_pypi_mirror,
help="Specify a PyPI mirror.",
)
def uninstall(
package_name=False,
more_packages=False,
three=None,
python=False,
system=False,
lock=False,
all_dev=False,
all=False,
verbose=False,
keep_outdated=False,
pypi_mirror=None,
):
from .core import do_uninstall
do_uninstall(
package_name=package_name,
more_packages=more_packages,
three=three,
python=python,
system=system,
lock=lock,
all_dev=all_dev,
all=all,
verbose=verbose,
keep_outdated=keep_outdated,
pypi_mirror=pypi_mirror,
)
@command(short_help="Generates Pipfile.lock.")
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--pypi-mirror',
default=PIPENV_PYPI_MIRROR,
nargs=1,
callback=validate_pypi_mirror,
help="Specify a PyPI mirror.",
)
@option(
'--verbose',
'-v',
is_flag=True,
default=False,
help="Verbose mode.",
callback=setup_verbose,
)
@option(
'--requirements',
'-r',
is_flag=True,
default=False,
help="Generate output compatible with requirements.txt.",
)
@option(
'--dev',
'-d',
is_flag=True,
default=False,
help="Generate output compatible with requirements.txt for the development dependencies.",
)
@option(
'--clear', is_flag=True, default=False, help="Clear the dependency cache."
)
@option(
'--pre', is_flag=True, default=False, help=u"Allow pre–releases."
)
@option(
'--keep-outdated',
is_flag=True,
default=False,
help=u"Keep out–dated dependencies from being updated in Pipfile.lock.",
)
def lock(
three=None,
python=False,
pypi_mirror=None,
verbose=False,
requirements=False,
dev=False,
clear=False,
pre=False,
keep_outdated=False,
):
from .core import ensure_project, do_init, do_lock
ensure_project(three=three, python=python)
if requirements:
do_init(dev=dev, requirements=requirements, pypi_mirror=pypi_mirror)
do_lock(
verbose=verbose, clear=clear, pre=pre, keep_outdated=keep_outdated, pypi_mirror=pypi_mirror
)
@command(
short_help="Spawns a shell within the virtualenv.",
context_settings=dict(ignore_unknown_options=True, allow_extra_args=True),
)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--fancy',
is_flag=True,
default=False,
help="Run in shell in fancy mode (for elegantly configured shells).",
)
@option(
'--anyway',
is_flag=True,
default=False,
help="Always spawn a subshell, even if one is already spawned.",
)
@argument('shell_args', nargs=-1)
def shell(
three=None, python=False, fancy=False, shell_args=None, anyway=False
):
from .core import load_dot_env, do_shell
if 'PIPENV_ACTIVE' in os.environ:
venv_name = os.environ.get(
'VIRTUAL_ENV', 'UNKNOWN_VIRTUAL_ENVIRONMENT'
)
if not anyway:
echo(
'{0} {1} {2}\nNo action taken to avoid nested environments.'.format(
crayons.normal('Shell for'),
crayons.green(venv_name, bold=True),
crayons.normal('already activated.', bold=True),
),
err=True,
)
sys.exit(1)
load_dot_env()
if os.name == 'nt':
fancy = True
do_shell(
three=three, python=python, fancy=fancy, shell_args=shell_args
)
@command(
add_help_option=False,
short_help="Spawns a command installed into the virtualenv.",
context_settings=dict(
ignore_unknown_options=True,
allow_interspersed_args=False,
allow_extra_args=True,
),
)
@argument('command')
@argument('args', nargs=-1)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
def run(command, args, three=None, python=False):
from .core import do_run
do_run(command=command, args=args, three=three, python=python)
@command(
short_help="Checks for security vulnerabilities and against PEP 508 markers provided in Pipfile.",
context_settings=dict(ignore_unknown_options=True, allow_extra_args=True),
)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--system', is_flag=True, default=False, help="Use system Python."
)
@option(
'--unused',
nargs=1,
default=False,
help="Given a code path, show potentially unused dependencies.",
)
@argument('args', nargs=-1)
def check(
three=None,
python=False,
system=False,
unused=False,
style=False,
args=None,
):
from .core import do_check
do_check(
three=three, python=python, system=system, unused=unused, args=args
)
@command(short_help="Runs lock, then sync.")
@argument('more_packages', nargs=-1)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--pypi-mirror',
default=PIPENV_PYPI_MIRROR,
nargs=1,
callback=validate_pypi_mirror,
help="Specify a PyPI mirror.",
)
@option(
'--verbose',
'-v',
is_flag=True,
default=False,
help="Verbose mode.",
callback=setup_verbose,
)
@option(
'--dev',
'-d',
is_flag=True,
default=False,
help="Install package(s) in [dev-packages].",
)
@option(
'--clear', is_flag=True, default=False, help="Clear the dependency cache."
)
@option('--bare', is_flag=True, default=False, help="Minimal output.")
@option(
'--pre', is_flag=True, default=False, help=u"Allow pre–releases."
)
@option(
'--keep-outdated',
is_flag=True,
default=False,
help=u"Keep out–dated dependencies from being updated in Pipfile.lock.",
)
@option(
'--sequential',
is_flag=True,
default=False,
help="Install dependencies one-at-a-time, instead of concurrently.",
)
@option(
'--outdated',
is_flag=True,
default=False,
help=u"List out–of–date dependencies.",
)
@option(
'--dry-run',
is_flag=True,
default=None,
help=u"List out–of–date dependencies.",
)
@argument('package', default=False)
@pass_context
def update(
ctx,
three=None,
python=False,
pypi_mirror=None,
system=False,
verbose=False,
clear=False,
keep_outdated=False,
pre=False,
dev=False,
bare=False,
sequential=False,
package=None,
dry_run=None,
outdated=False,
more_packages=None,
):
from .core import (
ensure_project,
do_outdated,
do_lock,
do_sync,
ensure_lockfile,
do_install,
project,
)
ensure_project(three=three, python=python, warn=True)
if not outdated:
outdated = bool(dry_run)
if outdated:
do_outdated(pypi_mirror=pypi_mirror)
if not package:
echo(
'{0} {1} {2} {3}{4}'.format(
crayons.white('Running', bold=True),
crayons.red('$ pipenv lock', bold=True),
crayons.white('then', bold=True),
crayons.red('$ pipenv sync', bold=True),
crayons.white('.', bold=True),
)
)
do_lock(
verbose=verbose, clear=clear, pre=pre, keep_outdated=keep_outdated, pypi_mirror=pypi_mirror
)
do_sync(
ctx=ctx,
install=install,
dev=dev,
three=three,
python=python,
bare=bare,
dont_upgrade=False,
user=False,
verbose=verbose,
clear=clear,
unused=False,
sequential=sequential,
pypi_mirror=pypi_mirror,
)
else:
for package in ([package] + list(more_packages) or []):
if package not in project.all_packages:
echo(
'{0}: {1} was not found in your Pipfile! Aborting.'
''.format(
crayons.red('Warning', bold=True),
crayons.green(package, bold=True),
),
err=True,
)
sys.exit(1)
ensure_lockfile(keep_outdated=project.lockfile_exists, pypi_mirror=pypi_mirror)
do_install(
package_name=package,
more_packages=more_packages,
dev=dev,
three=three,
python=python,
pypi_mirror=pypi_mirror,
system=system,
lock=True,
ignore_pipfile=False,
skip_lock=False,
verbose=verbose,
requirements=False,
sequential=sequential,
pre=pre,
code=False,
deploy=False,
keep_outdated=True,
selective_upgrade=True,
)
@command(
short_help=u"Displays currently–installed dependency graph information."
)
@option('--bare', is_flag=True, default=False, help="Minimal output.")
@option('--json', is_flag=True, default=False, help="Output JSON.")
@option('--json-tree', is_flag=True, default=False, help="Output JSON in nested tree.")
@option(
'--reverse', is_flag=True, default=False, help="Reversed dependency graph."
)
def graph(bare=False, json=False, json_tree=False, reverse=False):
from .core import do_graph
do_graph(bare=bare, json=json, json_tree=json_tree, reverse=reverse)
@command(short_help="View a given module in your editor.", name="open")
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@argument('module', nargs=1)
def run_open(module, three=None, python=None):
from .core import which, ensure_project
ensure_project(three=three, python=python, validate=False)
c = delegator.run(
'{0} -c "import {1}; print({1}.__file__);"'.format(
which('python'), module
)
)
try:
assert c.return_code == 0
except AssertionError:
echo(crayons.red('Module not found!'))
sys.exit(1)
if '__init__.py' in c.out:
p = os.path.dirname(c.out.strip().rstrip('cdo'))
else:
p = c.out.strip().rstrip('cdo')
echo(
crayons.normal('Opening {0!r} in your EDITOR.'.format(p), bold=True)
)
edit(filename=p)
sys.exit(0)
@command(short_help="Installs all packages specified in Pipfile.lock.")
@option(
'--verbose',
'-v',
is_flag=True,
default=False,
help="Verbose mode.",
callback=setup_verbose,
)
@option(
'--dev',
'-d',
is_flag=True,
default=False,
help="Additionally install package(s) in [dev-packages].",
)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--pypi-mirror',
default=PIPENV_PYPI_MIRROR,
nargs=1,
callback=validate_pypi_mirror,
help="Specify a PyPI mirror.",
)
@option('--bare', is_flag=True, default=False, help="Minimal output.")
@option(
'--clear', is_flag=True, default=False, help="Clear the dependency cache."
)
@option(
'--sequential',
is_flag=True,
default=False,
help="Install dependencies one-at-a-time, instead of concurrently.",
)
@pass_context
def sync(
ctx,
dev=False,
three=None,
python=None,
bare=False,
dont_upgrade=False,
user=False,
verbose=False,
clear=False,
unused=False,
package_name=None,
sequential=False,
pypi_mirror=None,
):
from .core import do_sync
do_sync(
ctx=ctx,
install=install,
dev=dev,
three=three,
python=python,
bare=bare,
dont_upgrade=dont_upgrade,
user=user,
verbose=verbose,
clear=clear,
unused=unused,
sequential=sequential,
pypi_mirror=pypi_mirror,
)
@command(
short_help="Uninstalls all packages not specified in Pipfile.lock."
)
@option(
'--verbose',
'-v',
is_flag=True,
default=False,
help="Verbose mode.",
callback=setup_verbose,
)
@option(
'--three/--two',
is_flag=True,
default=None,
help="Use Python 3/2 when creating virtualenv.",
)
@option(
'--python',
default=False,
nargs=1,
callback=validate_python_path,
help="Specify which version of Python virtualenv should use.",
)
@option(
'--dry-run',
is_flag=True,
default=False,
help="Just output unneeded packages.",
)
@pass_context
def clean(
ctx,
three=None,
python=None,
dry_run=False,
bare=False,
user=False,
verbose=False,
):
from .core import do_clean
do_clean(
ctx=ctx, three=three, python=python, dry_run=dry_run, verbose=verbose
)
cli.add_command(graph)
cli.add_command(install)
cli.add_command(uninstall)
cli.add_command(sync)
cli.add_command(lock)
cli.add_command(check)
cli.add_command(clean)
cli.add_command(shell)
cli.add_command(run)
cli.add_command(update)
cli.add_command(run_open)
if '-' not in ''.join(sys.argv) and len(sys.argv) > 1:
cli = DYMCommandCollection(sources=[cli])
if __name__ == '__main__':
cli()
| true
| true
|
7908385035473f24ddc095eb1f402ff08512b2ba
| 1,007
|
py
|
Python
|
pyvisdk/do/license_server_source.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
pyvisdk/do/license_server_source.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
pyvisdk/do/license_server_source.py
|
Infinidat/pyvisdk
|
f2f4e5f50da16f659ccc1d84b6a00f397fa997f8
|
[
"MIT"
] | null | null | null |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def LicenseServerSource(vim, *args, **kwargs):
'''Specify a license server reachable via IPv4 network.'''
obj = vim.client.factory.create('{urn:vim25}LicenseServerSource')
# do some validation checking...
if (len(args) + len(kwargs)) < 1:
raise IndexError('Expected at least 2 arguments got: %d' % len(args))
required = [ 'licenseServer' ]
optional = [ 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| 30.515152
| 124
| 0.60576
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
| true
| true
|
790838bbfa2f5448a91ac1a3564d8b9310969a11
| 929
|
py
|
Python
|
2015/11/solve.py
|
lamperi/aoc
|
1781dcbac0be18a086c10a9b76fb6a2d3595523c
|
[
"MIT"
] | null | null | null |
2015/11/solve.py
|
lamperi/aoc
|
1781dcbac0be18a086c10a9b76fb6a2d3595523c
|
[
"MIT"
] | null | null | null |
2015/11/solve.py
|
lamperi/aoc
|
1781dcbac0be18a086c10a9b76fb6a2d3595523c
|
[
"MIT"
] | null | null | null |
data = "cqjxjnds"
import string
import re
lc = string.ascii_lowercase
next = dict(zip(lc[:-1], lc[1:]))
three_seq = ["".join(z) for z in zip(lc[:-2], lc[1:-1], lc[2:])]
def check(pw):
if "i" in pw or "o" in pw or "l" in pw:
return False
three_match = False
for seq in three_seq:
if seq in pw:
three_match = True
if not three_match:
return False
doubles = set(re.findall(r'(.)\1', pw))
if len(doubles) < 2:
return False
return True
def inc(pw):
pw = list(pw)
i = -1
while pw[i] == 'z':
pw[i] = 'a'
i -= 1
pw[i] = next[pw[i]]
return "".join(pw)
# TEST
print(check("hijklmmn"))
print(check("abbceffg"))
print(check("abbcegjk"))
print(check("abcdffaa"))
print(check("ghjaabcc"))
# PART 1
pw = data
while not check(pw):
pw = inc(pw)
print(pw)
# PART 2
pw = inc(pw)
while not check(pw):
pw = inc(pw)
print(pw)
| 17.865385
| 64
| 0.557589
|
data = "cqjxjnds"
import string
import re
lc = string.ascii_lowercase
next = dict(zip(lc[:-1], lc[1:]))
three_seq = ["".join(z) for z in zip(lc[:-2], lc[1:-1], lc[2:])]
def check(pw):
if "i" in pw or "o" in pw or "l" in pw:
return False
three_match = False
for seq in three_seq:
if seq in pw:
three_match = True
if not three_match:
return False
doubles = set(re.findall(r'(.)\1', pw))
if len(doubles) < 2:
return False
return True
def inc(pw):
pw = list(pw)
i = -1
while pw[i] == 'z':
pw[i] = 'a'
i -= 1
pw[i] = next[pw[i]]
return "".join(pw)
print(check("hijklmmn"))
print(check("abbceffg"))
print(check("abbcegjk"))
print(check("abcdffaa"))
print(check("ghjaabcc"))
pw = data
while not check(pw):
pw = inc(pw)
print(pw)
pw = inc(pw)
while not check(pw):
pw = inc(pw)
print(pw)
| true
| true
|
7908390b1e8b8ba76d42ea51f64cbfcd2c7e8784
| 818
|
py
|
Python
|
DSA 450 GFG/next_permutation.py
|
siddhi-244/CompetitiveProgrammingQuestionBank
|
4c265d41b82a7d4370c14d367f78effa9ed95d3c
|
[
"MIT"
] | 931
|
2020-04-18T11:57:30.000Z
|
2022-03-31T15:15:39.000Z
|
DSA 450 GFG/next_permutation.py
|
vanishasamriddhi/CompetitiveProgrammingQuestionBank
|
b5160a66013bda17c98070d24d3a932b833692f8
|
[
"MIT"
] | 661
|
2020-12-13T04:31:48.000Z
|
2022-03-15T19:11:54.000Z
|
DSA 450 GFG/next_permutation.py
|
Mayuri-cell/CompetitiveProgrammingQuestionBank
|
eca2257d7da5346f45bdd7a351cc95bde6ed5c7d
|
[
"MIT"
] | 351
|
2020-08-10T06:49:21.000Z
|
2022-03-25T04:02:12.000Z
|
# Link for the problem : https://leetcode.com/problems/next-permutation/
class Solution(object):
def nextPermutation(self, nums):
found = False
i = len(nums)-2
while i >=0:
if nums[i] < nums[i+1]:
found =True
break
i-=1
if not found:
nums.sort()
else:
m = self.findMaxIndex(i+1,nums,nums[i])
nums[i],nums[m] = nums[m],nums[i]
nums[i+1:] = nums[i+1:][::-1]
return nums
def findMaxIndex(self,index,a,curr):
ans = -1
index = 0
for i in range(index,len(a)):
if a[i]>curr:
if ans == -1:
ans = curr
index = i
else:
ans = min(ans,a[i])
index = i
return index
ob1 = Solution()
| 23.371429
| 72
| 0.462103
|
class Solution(object):
def nextPermutation(self, nums):
found = False
i = len(nums)-2
while i >=0:
if nums[i] < nums[i+1]:
found =True
break
i-=1
if not found:
nums.sort()
else:
m = self.findMaxIndex(i+1,nums,nums[i])
nums[i],nums[m] = nums[m],nums[i]
nums[i+1:] = nums[i+1:][::-1]
return nums
def findMaxIndex(self,index,a,curr):
ans = -1
index = 0
for i in range(index,len(a)):
if a[i]>curr:
if ans == -1:
ans = curr
index = i
else:
ans = min(ans,a[i])
index = i
return index
ob1 = Solution()
| true
| true
|
79083937510863dae48a6665d4b274273be38709
| 8,908
|
py
|
Python
|
secedgar/core/company.py
|
Ahrvo-Trading-Systems/sec-edgar
|
b22f9aa2de0cafd98ecab884ece1e7d0f2be3381
|
[
"Apache-2.0"
] | null | null | null |
secedgar/core/company.py
|
Ahrvo-Trading-Systems/sec-edgar
|
b22f9aa2de0cafd98ecab884ece1e7d0f2be3381
|
[
"Apache-2.0"
] | null | null | null |
secedgar/core/company.py
|
Ahrvo-Trading-Systems/sec-edgar
|
b22f9aa2de0cafd98ecab884ece1e7d0f2be3381
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import os
import warnings
from datetime import date
from secedgar.cik_lookup import CIKLookup
from secedgar.client import NetworkClient
from secedgar.core._base import AbstractFiling
from secedgar.core.filing_types import FilingType
from secedgar.exceptions import FilingTypeError
from secedgar.utils import sanitize_date
class CompanyFilings(AbstractFiling):
"""Base class for receiving EDGAR filings.
Args:
cik_lookup (str): Central Index Key (CIK) for company of interest.
filing_type (Union[secedgar.core.filing_types.FilingType, None]): Valid filing type
enum. Defaults to None. If None, then all filing types for CIKs will be returned.
start_date (Union[str, datetime.datetime, datetime.date], optional): Date before
which not to fetch reports. Stands for "date after."
Defaults to None (will fetch all filings before ``end_date``).
end_date (Union[str, datetime.datetime, datetime.date], optional):
Date after which not to fetch reports.
Stands for "date before." Defaults to today.
count (int): Number of filings to fetch. Will fetch up to `count` if that many filings
are available. Defaults to all filings available.
ownership (str): Must be in {"include", "exclude"}. Whether or not to include ownership
filings.
match_format (str): Must be in {"EXACT", "AMEND", "ALL"}.
kwargs: See kwargs accepted for :class:`secedgar.client.network_client.NetworkClient`.
.. versionadded:: 0.1.5
"""
def __init__(self,
cik_lookup,
filing_type=None,
start_date=None,
end_date=date.today(),
client=None,
count=None,
ownership="include",
match_format="ALL",
**kwargs):
# Leave params before other setters
self._params = {
"action": "getcompany",
"output": "xml",
"owner": ownership,
"start": 0,
}
self.start_date = start_date
self.end_date = end_date
self.filing_type = filing_type
self.count = count
self.match_format = match_format
# Make default client NetworkClient and pass in kwargs
self._client = client if client is not None else NetworkClient(**kwargs)
# make CIKLookup object for users if not given
self.cik_lookup = cik_lookup
@property
def path(self):
"""str: Path added to client base."""
return "cgi-bin/browse-edgar"
@property
def params(self):
""":obj:`dict`: Parameters to include in requests."""
return self._params
@property
def client(self):
"""``secedgar.client._base``: Client to use to make requests."""
return self._client
@property
def start_date(self):
"""Union([datetime.date, datetime.datetime, str]): Date before which no filings fetched."""
return self._start_date
@property
def match_format(self):
"""The match format to use when searching for filings."""
return self._match_format
@match_format.setter
def match_format(self, val):
if val in ["EXACT", "AMEND", "ALL"]:
self._match_format = val
else:
raise ValueError("Format must be one of EXACT,AMEND,ALL")
@start_date.setter
def start_date(self, val):
if val is not None:
self._params["datea"] = sanitize_date(val)
self._start_date = val
else:
self._start_date = None
@property
def end_date(self):
"""Union([datetime.date, datetime.datetime, str]): Date after which no filings fetched."""
return self._end_date
@end_date.setter
def end_date(self, val):
self._params["dateb"] = sanitize_date(val)
self._end_date = val
@property
def filing_type(self):
"""``secedgar.core.FilingType``: FilingType enum of filing."""
return self._filing_type
@filing_type.setter
def filing_type(self, filing_type):
if isinstance(filing_type, FilingType):
self._params["type"] = filing_type.value
elif filing_type is not None:
raise FilingTypeError
self._filing_type = filing_type
@property
def count(self):
"""Number of filings to fetch."""
return self._count
@count.setter
def count(self, val):
if val is None:
self._count = None
elif not isinstance(val, int):
raise TypeError("Count must be positive integer or None.")
elif val < 1:
raise ValueError("Count must be positive integer or None.")
else:
self._count = val
self._params["count"] = val
@property
def cik_lookup(self):
"""``secedgar.cik_lookup.CIKLookup``: CIKLookup object."""
return self._cik_lookup
@cik_lookup.setter
def cik_lookup(self, val):
if not isinstance(val, CIKLookup):
val = CIKLookup(val, client=self.client)
self._cik_lookup = val
def get_urls(self, **kwargs):
"""Get urls for all CIKs given to Filing object.
Args:
**kwargs: Anything to be passed to requests when making get request.
See keyword arguments accepted for
``secedgar.client._base.AbstractClient.get_soup``.
Returns:
urls (list): List of urls for txt files to download.
"""
return {
key: self._get_urls_for_cik(cik, **kwargs)
for key, cik in self.cik_lookup.lookup_dict.items()
}
# TODO: Change this to return accession numbers that are turned into URLs later
def _get_urls_for_cik(self, cik, **kwargs):
"""Get all urls for specific company according to CIK.
Must match start date, end date, filing_type, and count parameters.
Args:
cik (str): CIK for company.
**kwargs: Anything to be passed to requests when making get request.
See keyword arguments accepted for
``secedgar.client._base.AbstractClient.get_soup``.
Returns:
txt_urls (list of str): Up to the desired number of URLs for that specific company
if available.
"""
self.params["CIK"] = cik
links = []
self.params["start"] = 0 # set start back to 0 before paginating
while self.count is None or len(links) < self.count:
data = self.client.get_soup(self.path, self.params, **kwargs)
links.extend([link.string for link in data.find_all("filinghref")])
self.params["start"] += self.client.batch_size
if len(data.find_all("filinghref")) == 0: # no more filings
break
txt_urls = [link[:link.rfind("-")].strip() + ".txt" for link in links]
if isinstance(self.count, int) and len(txt_urls) < self.count:
warnings.warn(
"Only {num} of {count} filings were found for {cik}.".format(
num=len(txt_urls), count=self.count, cik=cik))
# Takes `count` filings at most
return txt_urls[:self.count]
def save(self, directory, dir_pattern=None, file_pattern=None):
"""Save files in specified directory.
Each txt url looks something like:
https://www.sec.gov/Archives/edgar/data/1018724/000101872419000043/0001018724-19-000043.txt
Args:
directory (str): Path to directory where files should be saved.
dir_pattern (str): Format string for subdirectories. Default is "{cik}/{type}".
Valid options are {cik} and/or {type}.
file_pattern (str): Format string for files. Default is "{accession_number}".
Valid options are {accession_number}.
Returns:
None
Raises:
ValueError: If no text urls are available for given filing object.
"""
urls = self.get_urls_safely()
if dir_pattern is None:
dir_pattern = os.path.join("{cik}", "{type}")
if file_pattern is None:
file_pattern = "{accession_number}"
inputs = []
for cik, links in urls.items():
formatted_dir = dir_pattern.format(cik=cik,
type=self.filing_type.value)
for link in links:
formatted_file = file_pattern.format(
accession_number=self.get_accession_number(link))
path = os.path.join(directory, formatted_dir, formatted_file)
inputs.append((link, path))
loop = asyncio.get_event_loop()
loop.run_until_complete(self.client.wait_for_download_async(inputs))
| 36.359184
| 99
| 0.60586
|
import asyncio
import os
import warnings
from datetime import date
from secedgar.cik_lookup import CIKLookup
from secedgar.client import NetworkClient
from secedgar.core._base import AbstractFiling
from secedgar.core.filing_types import FilingType
from secedgar.exceptions import FilingTypeError
from secedgar.utils import sanitize_date
class CompanyFilings(AbstractFiling):
def __init__(self,
cik_lookup,
filing_type=None,
start_date=None,
end_date=date.today(),
client=None,
count=None,
ownership="include",
match_format="ALL",
**kwargs):
self._params = {
"action": "getcompany",
"output": "xml",
"owner": ownership,
"start": 0,
}
self.start_date = start_date
self.end_date = end_date
self.filing_type = filing_type
self.count = count
self.match_format = match_format
self._client = client if client is not None else NetworkClient(**kwargs)
self.cik_lookup = cik_lookup
@property
def path(self):
return "cgi-bin/browse-edgar"
@property
def params(self):
return self._params
@property
def client(self):
return self._client
@property
def start_date(self):
return self._start_date
@property
def match_format(self):
return self._match_format
@match_format.setter
def match_format(self, val):
if val in ["EXACT", "AMEND", "ALL"]:
self._match_format = val
else:
raise ValueError("Format must be one of EXACT,AMEND,ALL")
@start_date.setter
def start_date(self, val):
if val is not None:
self._params["datea"] = sanitize_date(val)
self._start_date = val
else:
self._start_date = None
@property
def end_date(self):
return self._end_date
@end_date.setter
def end_date(self, val):
self._params["dateb"] = sanitize_date(val)
self._end_date = val
@property
def filing_type(self):
return self._filing_type
@filing_type.setter
def filing_type(self, filing_type):
if isinstance(filing_type, FilingType):
self._params["type"] = filing_type.value
elif filing_type is not None:
raise FilingTypeError
self._filing_type = filing_type
@property
def count(self):
return self._count
@count.setter
def count(self, val):
if val is None:
self._count = None
elif not isinstance(val, int):
raise TypeError("Count must be positive integer or None.")
elif val < 1:
raise ValueError("Count must be positive integer or None.")
else:
self._count = val
self._params["count"] = val
@property
def cik_lookup(self):
return self._cik_lookup
@cik_lookup.setter
def cik_lookup(self, val):
if not isinstance(val, CIKLookup):
val = CIKLookup(val, client=self.client)
self._cik_lookup = val
def get_urls(self, **kwargs):
return {
key: self._get_urls_for_cik(cik, **kwargs)
for key, cik in self.cik_lookup.lookup_dict.items()
}
def _get_urls_for_cik(self, cik, **kwargs):
self.params["CIK"] = cik
links = []
self.params["start"] = 0
while self.count is None or len(links) < self.count:
data = self.client.get_soup(self.path, self.params, **kwargs)
links.extend([link.string for link in data.find_all("filinghref")])
self.params["start"] += self.client.batch_size
if len(data.find_all("filinghref")) == 0:
break
txt_urls = [link[:link.rfind("-")].strip() + ".txt" for link in links]
if isinstance(self.count, int) and len(txt_urls) < self.count:
warnings.warn(
"Only {num} of {count} filings were found for {cik}.".format(
num=len(txt_urls), count=self.count, cik=cik))
return txt_urls[:self.count]
def save(self, directory, dir_pattern=None, file_pattern=None):
urls = self.get_urls_safely()
if dir_pattern is None:
dir_pattern = os.path.join("{cik}", "{type}")
if file_pattern is None:
file_pattern = "{accession_number}"
inputs = []
for cik, links in urls.items():
formatted_dir = dir_pattern.format(cik=cik,
type=self.filing_type.value)
for link in links:
formatted_file = file_pattern.format(
accession_number=self.get_accession_number(link))
path = os.path.join(directory, formatted_dir, formatted_file)
inputs.append((link, path))
loop = asyncio.get_event_loop()
loop.run_until_complete(self.client.wait_for_download_async(inputs))
| true
| true
|
790839ac243e96c58a803971d7e398d0e116d55a
| 2,109
|
py
|
Python
|
var/spack/repos/builtin/packages/meme/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/meme/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/meme/package.py
|
MiddelkoopT/spack
|
4d94c4c4600f42a7a3bb3d06ec879140bc259304
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1
|
2022-01-18T23:39:24.000Z
|
2022-01-18T23:39:24.000Z
|
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
from spack.version import Version
class Meme(AutotoolsPackage):
"""The MEME Suite allows the biologist to discover novel motifs in
collections of unaligned nucleotide or protein sequences, and to perform a
wide variety of other motif-based analyses."""
homepage = "http://meme-suite.org"
url = "http://meme-suite.org/meme-software/5.1.1/meme-5.1.1.tar.gz"
version('5.3.0', sha256='b2ddec9db972fcf77b29c7deb62df8b1dd8a6638c13c1aa06a5d563c4a7ff756')
version('5.2.0', sha256='0cbf8c2172e9b6c07855b8aeec457f4825f0b132f8cbb11192880e2f6033f54f')
version('5.1.1', sha256='38d73d256d431ad4eb7da2c817ce56ff2b4e26c39387ff0d6ada088938b38eb5')
version('4.12.0', sha256='49ff80f842b59d328588acfcd1d15bf94c55fed661d22b0f95f37430cc363a06')
version('4.11.4', sha256='3e869ff57e327a9c8615dbef784e3f1095f7f7a0120cecd55efe10c3f2ee8eb3')
variant('mpi', default=True, description='Enable MPI support')
variant('image-magick', default=False, description='Enable image-magick for png output')
depends_on('zlib', type=('link'))
depends_on('libgcrypt', type=('link'))
depends_on('perl', type=('build', 'run'))
depends_on('python@2.7:', type=('build', 'run'))
depends_on('mpi', when='+mpi')
depends_on('imagemagick', type=('build', 'run'), when='+image-magick')
depends_on('perl-xml-parser', type=('build', 'run'))
def url_for_version(self, version):
url = 'http://meme-suite.org/meme-software/{0}/meme{1}{2}.tar.gz'
sep = '-' if version >= Version('5.0.2') else '_'
return url.format(version.up_to(3), sep, version)
def configure_args(self):
spec = self.spec
# have meme build its own versions of libxml2/libxslt, see #6736
args = ['--enable-build-libxml2', '--enable-build-libxslt']
if '~mpi' in spec:
args += ['--enable-serial']
return args
| 44.87234
| 96
| 0.697013
|
from spack import *
from spack.version import Version
class Meme(AutotoolsPackage):
homepage = "http://meme-suite.org"
url = "http://meme-suite.org/meme-software/5.1.1/meme-5.1.1.tar.gz"
version('5.3.0', sha256='b2ddec9db972fcf77b29c7deb62df8b1dd8a6638c13c1aa06a5d563c4a7ff756')
version('5.2.0', sha256='0cbf8c2172e9b6c07855b8aeec457f4825f0b132f8cbb11192880e2f6033f54f')
version('5.1.1', sha256='38d73d256d431ad4eb7da2c817ce56ff2b4e26c39387ff0d6ada088938b38eb5')
version('4.12.0', sha256='49ff80f842b59d328588acfcd1d15bf94c55fed661d22b0f95f37430cc363a06')
version('4.11.4', sha256='3e869ff57e327a9c8615dbef784e3f1095f7f7a0120cecd55efe10c3f2ee8eb3')
variant('mpi', default=True, description='Enable MPI support')
variant('image-magick', default=False, description='Enable image-magick for png output')
depends_on('zlib', type=('link'))
depends_on('libgcrypt', type=('link'))
depends_on('perl', type=('build', 'run'))
depends_on('python@2.7:', type=('build', 'run'))
depends_on('mpi', when='+mpi')
depends_on('imagemagick', type=('build', 'run'), when='+image-magick')
depends_on('perl-xml-parser', type=('build', 'run'))
def url_for_version(self, version):
url = 'http://meme-suite.org/meme-software/{0}/meme{1}{2}.tar.gz'
sep = '-' if version >= Version('5.0.2') else '_'
return url.format(version.up_to(3), sep, version)
def configure_args(self):
spec = self.spec
args = ['--enable-build-libxml2', '--enable-build-libxslt']
if '~mpi' in spec:
args += ['--enable-serial']
return args
| true
| true
|
79083aa16d5d92fd67caba5eb952e4b0176daeb1
| 6,233
|
py
|
Python
|
agent.py
|
cisc474projectgroup/cartpole-q-learning
|
d7215990c8bdf8c1ff20cdfa3a7530e1a2c641b5
|
[
"MIT"
] | null | null | null |
agent.py
|
cisc474projectgroup/cartpole-q-learning
|
d7215990c8bdf8c1ff20cdfa3a7530e1a2c641b5
|
[
"MIT"
] | null | null | null |
agent.py
|
cisc474projectgroup/cartpole-q-learning
|
d7215990c8bdf8c1ff20cdfa3a7530e1a2c641b5
|
[
"MIT"
] | null | null | null |
import random
import copy
from collections import defaultdict
from collections import deque
from collections import namedtuple
from matplotlib import pyplot as plt
import numpy as np
class Q():
def __init__(self, n_actions, observation_space, bin_size, low_bound=None, high_bound=None, initial_mean=0.0, initial_std=0.0):
self.n_actions = n_actions
self._observation_dimension = 1
for d in observation_space.shape:
self._observation_dimension *= d
self._bin_sizes = bin_size if isinstance(bin_size, list) else [bin_size] * self._observation_dimension
self._dimension_bins = []
for i, low, high in self._low_high_iter(observation_space, low_bound, high_bound):
b_size = self._bin_sizes[i]
bins = self._make_bins(low, high, b_size)
print(bins)
self._dimension_bins.append(bins)
# if we encounter the new observation, we initialize action evaluations
self.table = defaultdict(lambda: initial_std * np.random.randn(self.n_actions) + initial_mean)
@classmethod
def _make_bins(cls, low, high, bin_size):
bins = np.arange(low, high, (float(high) - float(low)) / (bin_size - 2)) # exclude both ends
if min(bins) < 0 and 0 not in bins:
bins = np.sort(np.append(bins, [0])) # 0 centric bins
return bins
@classmethod
def _low_high_iter(cls, observation_space, low_bound, high_bound):
lows = observation_space.low
highs = observation_space.high
for i in range(len(lows)):
low = lows[i]
if low_bound is not None:
_low_bound = low_bound if not isinstance(low_bound, list) else low_bound[i]
low = low if _low_bound is None else max(low, _low_bound)
high = highs[i]
if high_bound is not None:
_high_bound = high_bound if not isinstance(high_bound, list) else high_bound[i]
high = high if _high_bound is None else min(high, _high_bound)
yield i, low, high
def observation_to_state(self, observation):
state = 0
# caution: bin_size over 10 will not work accurately
unit = max(self._bin_sizes)
for d, o in enumerate(observation.flatten()):
state = state + np.digitize(o, self._dimension_bins[d]) * pow(unit, d) # bin_size numeral system
return state
def values(self, observation):
state = self.observation_to_state(observation)
return self.table[state]
class Agent():
def __init__(self, q, epsilon=0.05):
self.q = q
self.epsilon = epsilon
def act(self, observation):
action = -1
if np.random.random() < self.epsilon:
action = np.random.choice(self.q.n_actions)
else:
action = np.argmax(self.q.values(observation))
return action
class Trainer():
def __init__(self, agent, gamma=0.95, learning_rate=0.1, learning_rate_decay=None, epsilon=0.05, epsilon_decay=None, max_step=-1,target=500):
self.agent = agent
self.gamma = gamma
self.learning_rate = learning_rate
self.learning_rate_decay = learning_rate_decay
self.epsilon = epsilon
self.epsilon_decay = epsilon_decay
self.max_step = max_step
def train(self, env, episode_count, render=False):
mean_step_all =[]
mean_q_all=[]
goal_time_all=[]
reward_all=[]
self.agent.epsilon = self.epsilon
values = []
steps = deque(maxlen=100)
lr = self.learning_rate
for i in range(episode_count):
reward_total = 0
goal_time =0
obs = env.reset()
step = 0
done = False
while not done:
if render:
env.render()
action = self.agent.act(obs)
next_obs, reward, done,goal_time= env.step(action)
reward_total+= reward
goal_time += goal_time
state = self.agent.q.observation_to_state(obs)
future = 0 if done else np.max(self.agent.q.values(next_obs))
value = self.agent.q.table[state][action]
self.agent.q.table[state][action] += lr * (reward + self.gamma * future - value)
obs = next_obs
values.append(value)
step += 1
if self.max_step > 0 and step > self.max_step:
done = True
else:
mean = np.mean(values)
steps.append(step)
mean_step = np.mean(steps)
print("Episode {}: {}steps(avg{}). epsilon={:.3f}, lr={:.3f}, mean q value={:.2f}".format(
i, step, mean_step, self.agent.epsilon, lr, mean)
)
mean_step_all.append(mean_step)
mean_q_all.append(mean)
reward_all.append(reward_total)
if mean_step>1000:
render=True
if self.epsilon_decay is not None:
self.agent.epsilon = self.epsilon_decay(self.agent.epsilon, i)
if self.learning_rate_decay is not None:
lr = self.learning_rate_decay(lr, i)
# plot in comparsion
plt.xlabel('Episodes')
plt.ylabel('reward')
# plt.plot(mean_step_all, label='Q-learning', color='blue')
plt.plot(reward_all, label='Q-learning', color='yellow')
plt.plot(goal_time_all, label='Q-learning', color='green')
# plt.legend(['reward', 'Q-learning'], loc='upper right')
plt.title('reward/Episode')
plt.show()
# plot in comparsion
plt.xlabel('Episodes')
plt.ylabel('goal_time')
# plt.plot(mean_step_all, label='Q-learning', color='blue')
plt.plot(goal_time_all, label='Q-learning', color='green')
# plt.legend(['reward', 'Q-learning'], loc='upper right')
plt.title('goal/Episode')
plt.show()
| 36.664706
| 145
| 0.574683
|
import random
import copy
from collections import defaultdict
from collections import deque
from collections import namedtuple
from matplotlib import pyplot as plt
import numpy as np
class Q():
def __init__(self, n_actions, observation_space, bin_size, low_bound=None, high_bound=None, initial_mean=0.0, initial_std=0.0):
self.n_actions = n_actions
self._observation_dimension = 1
for d in observation_space.shape:
self._observation_dimension *= d
self._bin_sizes = bin_size if isinstance(bin_size, list) else [bin_size] * self._observation_dimension
self._dimension_bins = []
for i, low, high in self._low_high_iter(observation_space, low_bound, high_bound):
b_size = self._bin_sizes[i]
bins = self._make_bins(low, high, b_size)
print(bins)
self._dimension_bins.append(bins)
self.table = defaultdict(lambda: initial_std * np.random.randn(self.n_actions) + initial_mean)
@classmethod
def _make_bins(cls, low, high, bin_size):
bins = np.arange(low, high, (float(high) - float(low)) / (bin_size - 2))
if min(bins) < 0 and 0 not in bins:
bins = np.sort(np.append(bins, [0]))
return bins
@classmethod
def _low_high_iter(cls, observation_space, low_bound, high_bound):
lows = observation_space.low
highs = observation_space.high
for i in range(len(lows)):
low = lows[i]
if low_bound is not None:
_low_bound = low_bound if not isinstance(low_bound, list) else low_bound[i]
low = low if _low_bound is None else max(low, _low_bound)
high = highs[i]
if high_bound is not None:
_high_bound = high_bound if not isinstance(high_bound, list) else high_bound[i]
high = high if _high_bound is None else min(high, _high_bound)
yield i, low, high
def observation_to_state(self, observation):
state = 0
unit = max(self._bin_sizes)
for d, o in enumerate(observation.flatten()):
state = state + np.digitize(o, self._dimension_bins[d]) * pow(unit, d)
return state
def values(self, observation):
state = self.observation_to_state(observation)
return self.table[state]
class Agent():
def __init__(self, q, epsilon=0.05):
self.q = q
self.epsilon = epsilon
def act(self, observation):
action = -1
if np.random.random() < self.epsilon:
action = np.random.choice(self.q.n_actions)
else:
action = np.argmax(self.q.values(observation))
return action
class Trainer():
def __init__(self, agent, gamma=0.95, learning_rate=0.1, learning_rate_decay=None, epsilon=0.05, epsilon_decay=None, max_step=-1,target=500):
self.agent = agent
self.gamma = gamma
self.learning_rate = learning_rate
self.learning_rate_decay = learning_rate_decay
self.epsilon = epsilon
self.epsilon_decay = epsilon_decay
self.max_step = max_step
def train(self, env, episode_count, render=False):
mean_step_all =[]
mean_q_all=[]
goal_time_all=[]
reward_all=[]
self.agent.epsilon = self.epsilon
values = []
steps = deque(maxlen=100)
lr = self.learning_rate
for i in range(episode_count):
reward_total = 0
goal_time =0
obs = env.reset()
step = 0
done = False
while not done:
if render:
env.render()
action = self.agent.act(obs)
next_obs, reward, done,goal_time= env.step(action)
reward_total+= reward
goal_time += goal_time
state = self.agent.q.observation_to_state(obs)
future = 0 if done else np.max(self.agent.q.values(next_obs))
value = self.agent.q.table[state][action]
self.agent.q.table[state][action] += lr * (reward + self.gamma * future - value)
obs = next_obs
values.append(value)
step += 1
if self.max_step > 0 and step > self.max_step:
done = True
else:
mean = np.mean(values)
steps.append(step)
mean_step = np.mean(steps)
print("Episode {}: {}steps(avg{}). epsilon={:.3f}, lr={:.3f}, mean q value={:.2f}".format(
i, step, mean_step, self.agent.epsilon, lr, mean)
)
mean_step_all.append(mean_step)
mean_q_all.append(mean)
reward_all.append(reward_total)
if mean_step>1000:
render=True
if self.epsilon_decay is not None:
self.agent.epsilon = self.epsilon_decay(self.agent.epsilon, i)
if self.learning_rate_decay is not None:
lr = self.learning_rate_decay(lr, i)
plt.xlabel('Episodes')
plt.ylabel('reward')
plt.plot(reward_all, label='Q-learning', color='yellow')
plt.plot(goal_time_all, label='Q-learning', color='green')
plt.title('reward/Episode')
plt.show()
plt.xlabel('Episodes')
plt.ylabel('goal_time')
plt.plot(goal_time_all, label='Q-learning', color='green')
plt.title('goal/Episode')
plt.show()
| true
| true
|
79083dc89001dad5e8c587aa8fb669efa87334d8
| 274
|
py
|
Python
|
resources/cipher_suite_grabber.py
|
berney/TLS-Attacker
|
32c5bcb87a57f9a3b1ff3f126e6432010421875b
|
[
"ECL-2.0",
"Apache-2.0"
] | 593
|
2016-04-20T16:19:52.000Z
|
2020-11-05T01:22:01.000Z
|
resources/cipher_suite_grabber.py
|
berney/TLS-Attacker
|
32c5bcb87a57f9a3b1ff3f126e6432010421875b
|
[
"ECL-2.0",
"Apache-2.0"
] | 75
|
2016-05-02T22:34:02.000Z
|
2020-11-06T11:02:36.000Z
|
resources/cipher_suite_grabber.py
|
berney/TLS-Attacker
|
32c5bcb87a57f9a3b1ff3f126e6432010421875b
|
[
"ECL-2.0",
"Apache-2.0"
] | 130
|
2016-04-21T05:16:09.000Z
|
2020-10-26T01:09:52.000Z
|
#!/usr/bin/env python2
import sys
import re
import datetime
import hashlib
import optparse
import urllib2
# cheers Dirk :)
url = 'https://testssl.sh/mapping-rfc.txt'
for line in urllib2.urlopen(url):
cipher = line.split()
print cipher[1]+'(0'+cipher[0]+'),'
| 16.117647
| 42
| 0.686131
|
import sys
import re
import datetime
import hashlib
import optparse
import urllib2
url = 'https://testssl.sh/mapping-rfc.txt'
for line in urllib2.urlopen(url):
cipher = line.split()
print cipher[1]+'(0'+cipher[0]+'),'
| false
| true
|
79083f031ae30d50fc4d6738156e85935cf331f0
| 9,102
|
py
|
Python
|
homeassistant/components/homematicip_cloud/light.py
|
itewk/home-assistant
|
769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4
|
[
"Apache-2.0"
] | 23
|
2017-11-15T21:03:53.000Z
|
2021-03-29T21:33:48.000Z
|
homeassistant/components/homematicip_cloud/light.py
|
itewk/home-assistant
|
769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4
|
[
"Apache-2.0"
] | 6
|
2021-02-08T20:59:36.000Z
|
2022-03-12T00:52:11.000Z
|
homeassistant/components/homematicip_cloud/light.py
|
itewk/home-assistant
|
769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4
|
[
"Apache-2.0"
] | 10
|
2018-01-01T00:12:51.000Z
|
2021-12-21T23:08:05.000Z
|
"""Support for HomematicIP Cloud lights."""
import logging
from typing import Any, Dict
from homematicip.aio.device import (
AsyncBrandDimmer,
AsyncBrandSwitchMeasuring,
AsyncBrandSwitchNotificationLight,
AsyncDimmer,
AsyncFullFlushDimmer,
AsyncPluggableDimmer,
)
from homematicip.base.enums import RGBColorState
from homematicip.base.functionalChannels import NotificationLightChannel
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_NAME,
ATTR_HS_COLOR,
ATTR_TRANSITION,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
Light,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN as HMIPC_DOMAIN, HMIPC_HAPID, HomematicipGenericDevice
from .hap import HomematicipHAP
_LOGGER = logging.getLogger(__name__)
ATTR_TODAY_ENERGY_KWH = "today_energy_kwh"
ATTR_CURRENT_POWER_W = "current_power_w"
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None
) -> None:
"""Old way of setting up HomematicIP Cloud lights."""
pass
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the HomematicIP Cloud lights from a config entry."""
hap = hass.data[HMIPC_DOMAIN][config_entry.data[HMIPC_HAPID]]
entities = []
for device in hap.home.devices:
if isinstance(device, AsyncBrandSwitchMeasuring):
entities.append(HomematicipLightMeasuring(hap, device))
elif isinstance(device, AsyncBrandSwitchNotificationLight):
entities.append(HomematicipLight(hap, device))
entities.append(
HomematicipNotificationLight(hap, device, device.topLightChannelIndex)
)
entities.append(
HomematicipNotificationLight(
hap, device, device.bottomLightChannelIndex
)
)
elif isinstance(
device,
(AsyncDimmer, AsyncPluggableDimmer, AsyncBrandDimmer, AsyncFullFlushDimmer),
):
entities.append(HomematicipDimmer(hap, device))
if entities:
async_add_entities(entities)
class HomematicipLight(HomematicipGenericDevice, Light):
"""Representation of a HomematicIP Cloud light device."""
def __init__(self, hap: HomematicipHAP, device) -> None:
"""Initialize the light device."""
super().__init__(hap, device)
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._device.on
async def async_turn_on(self, **kwargs) -> None:
"""Turn the device on."""
await self._device.turn_on()
async def async_turn_off(self, **kwargs) -> None:
"""Turn the device off."""
await self._device.turn_off()
class HomematicipLightMeasuring(HomematicipLight):
"""Representation of a HomematicIP Cloud measuring light device."""
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes of the generic device."""
state_attr = super().device_state_attributes
current_power_w = self._device.currentPowerConsumption
if current_power_w > 0.05:
state_attr[ATTR_CURRENT_POWER_W] = round(current_power_w, 2)
state_attr[ATTR_TODAY_ENERGY_KWH] = round(self._device.energyCounter, 2)
return state_attr
class HomematicipDimmer(HomematicipGenericDevice, Light):
"""Representation of HomematicIP Cloud dimmer light device."""
def __init__(self, hap: HomematicipHAP, device) -> None:
"""Initialize the dimmer light device."""
super().__init__(hap, device)
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._device.dimLevel is not None and self._device.dimLevel > 0.0
@property
def brightness(self) -> int:
"""Return the brightness of this light between 0..255."""
return int((self._device.dimLevel or 0.0) * 255)
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
async def async_turn_on(self, **kwargs) -> None:
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
await self._device.set_dim_level(kwargs[ATTR_BRIGHTNESS] / 255.0)
else:
await self._device.set_dim_level(1)
async def async_turn_off(self, **kwargs) -> None:
"""Turn the light off."""
await self._device.set_dim_level(0)
class HomematicipNotificationLight(HomematicipGenericDevice, Light):
"""Representation of HomematicIP Cloud dimmer light device."""
def __init__(self, hap: HomematicipHAP, device, channel: int) -> None:
"""Initialize the dimmer light device."""
self.channel = channel
if self.channel == 2:
super().__init__(hap, device, "Top")
else:
super().__init__(hap, device, "Bottom")
self._color_switcher = {
RGBColorState.WHITE: [0.0, 0.0],
RGBColorState.RED: [0.0, 100.0],
RGBColorState.YELLOW: [60.0, 100.0],
RGBColorState.GREEN: [120.0, 100.0],
RGBColorState.TURQUOISE: [180.0, 100.0],
RGBColorState.BLUE: [240.0, 100.0],
RGBColorState.PURPLE: [300.0, 100.0],
}
@property
def _func_channel(self) -> NotificationLightChannel:
return self._device.functionalChannels[self.channel]
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return (
self._func_channel.dimLevel is not None
and self._func_channel.dimLevel > 0.0
)
@property
def brightness(self) -> int:
"""Return the brightness of this light between 0..255."""
return int((self._func_channel.dimLevel or 0.0) * 255)
@property
def hs_color(self) -> tuple:
"""Return the hue and saturation color value [float, float]."""
simple_rgb_color = self._func_channel.simpleRGBColorState
return self._color_switcher.get(simple_rgb_color, [0.0, 0.0])
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes of the generic device."""
state_attr = super().device_state_attributes
if self.is_on:
state_attr[ATTR_COLOR_NAME] = self._func_channel.simpleRGBColorState
return state_attr
@property
def name(self) -> str:
"""Return the name of the generic device."""
return f"{super().name} Notification"
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self.__class__.__name__}_{self.post}_{self._device.id}"
async def async_turn_on(self, **kwargs) -> None:
"""Turn the light on."""
# Use hs_color from kwargs,
# if not applicable use current hs_color.
hs_color = kwargs.get(ATTR_HS_COLOR, self.hs_color)
simple_rgb_color = _convert_color(hs_color)
# Use brightness from kwargs,
# if not applicable use current brightness.
brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness)
# If no kwargs, use default value.
if not kwargs:
brightness = 255
# Minimum brightness is 10, otherwise the led is disabled
brightness = max(10, brightness)
dim_level = brightness / 255.0
transition = kwargs.get(ATTR_TRANSITION, 0.5)
await self._device.set_rgb_dim_level_with_time(
channelIndex=self.channel,
rgb=simple_rgb_color,
dimLevel=dim_level,
onTime=0,
rampTime=transition,
)
async def async_turn_off(self, **kwargs) -> None:
"""Turn the light off."""
simple_rgb_color = self._func_channel.simpleRGBColorState
transition = kwargs.get(ATTR_TRANSITION, 0.5)
await self._device.set_rgb_dim_level_with_time(
channelIndex=self.channel,
rgb=simple_rgb_color,
dimLevel=0.0,
onTime=0,
rampTime=transition,
)
def _convert_color(color: tuple) -> RGBColorState:
"""
Convert the given color to the reduced RGBColorState color.
RGBColorStat contains only 8 colors including white and black,
so a conversion is required.
"""
if color is None:
return RGBColorState.WHITE
hue = int(color[0])
saturation = int(color[1])
if saturation < 5:
return RGBColorState.WHITE
if 30 < hue <= 90:
return RGBColorState.YELLOW
if 90 < hue <= 160:
return RGBColorState.GREEN
if 150 < hue <= 210:
return RGBColorState.TURQUOISE
if 210 < hue <= 270:
return RGBColorState.BLUE
if 270 < hue <= 330:
return RGBColorState.PURPLE
return RGBColorState.RED
| 32.391459
| 88
| 0.652054
|
import logging
from typing import Any, Dict
from homematicip.aio.device import (
AsyncBrandDimmer,
AsyncBrandSwitchMeasuring,
AsyncBrandSwitchNotificationLight,
AsyncDimmer,
AsyncFullFlushDimmer,
AsyncPluggableDimmer,
)
from homematicip.base.enums import RGBColorState
from homematicip.base.functionalChannels import NotificationLightChannel
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_NAME,
ATTR_HS_COLOR,
ATTR_TRANSITION,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
Light,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN as HMIPC_DOMAIN, HMIPC_HAPID, HomematicipGenericDevice
from .hap import HomematicipHAP
_LOGGER = logging.getLogger(__name__)
ATTR_TODAY_ENERGY_KWH = "today_energy_kwh"
ATTR_CURRENT_POWER_W = "current_power_w"
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None
) -> None:
pass
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
hap = hass.data[HMIPC_DOMAIN][config_entry.data[HMIPC_HAPID]]
entities = []
for device in hap.home.devices:
if isinstance(device, AsyncBrandSwitchMeasuring):
entities.append(HomematicipLightMeasuring(hap, device))
elif isinstance(device, AsyncBrandSwitchNotificationLight):
entities.append(HomematicipLight(hap, device))
entities.append(
HomematicipNotificationLight(hap, device, device.topLightChannelIndex)
)
entities.append(
HomematicipNotificationLight(
hap, device, device.bottomLightChannelIndex
)
)
elif isinstance(
device,
(AsyncDimmer, AsyncPluggableDimmer, AsyncBrandDimmer, AsyncFullFlushDimmer),
):
entities.append(HomematicipDimmer(hap, device))
if entities:
async_add_entities(entities)
class HomematicipLight(HomematicipGenericDevice, Light):
def __init__(self, hap: HomematicipHAP, device) -> None:
super().__init__(hap, device)
@property
def is_on(self) -> bool:
return self._device.on
async def async_turn_on(self, **kwargs) -> None:
await self._device.turn_on()
async def async_turn_off(self, **kwargs) -> None:
await self._device.turn_off()
class HomematicipLightMeasuring(HomematicipLight):
@property
def device_state_attributes(self) -> Dict[str, Any]:
state_attr = super().device_state_attributes
current_power_w = self._device.currentPowerConsumption
if current_power_w > 0.05:
state_attr[ATTR_CURRENT_POWER_W] = round(current_power_w, 2)
state_attr[ATTR_TODAY_ENERGY_KWH] = round(self._device.energyCounter, 2)
return state_attr
class HomematicipDimmer(HomematicipGenericDevice, Light):
def __init__(self, hap: HomematicipHAP, device) -> None:
super().__init__(hap, device)
@property
def is_on(self) -> bool:
return self._device.dimLevel is not None and self._device.dimLevel > 0.0
@property
def brightness(self) -> int:
return int((self._device.dimLevel or 0.0) * 255)
@property
def supported_features(self) -> int:
return SUPPORT_BRIGHTNESS
async def async_turn_on(self, **kwargs) -> None:
if ATTR_BRIGHTNESS in kwargs:
await self._device.set_dim_level(kwargs[ATTR_BRIGHTNESS] / 255.0)
else:
await self._device.set_dim_level(1)
async def async_turn_off(self, **kwargs) -> None:
await self._device.set_dim_level(0)
class HomematicipNotificationLight(HomematicipGenericDevice, Light):
def __init__(self, hap: HomematicipHAP, device, channel: int) -> None:
self.channel = channel
if self.channel == 2:
super().__init__(hap, device, "Top")
else:
super().__init__(hap, device, "Bottom")
self._color_switcher = {
RGBColorState.WHITE: [0.0, 0.0],
RGBColorState.RED: [0.0, 100.0],
RGBColorState.YELLOW: [60.0, 100.0],
RGBColorState.GREEN: [120.0, 100.0],
RGBColorState.TURQUOISE: [180.0, 100.0],
RGBColorState.BLUE: [240.0, 100.0],
RGBColorState.PURPLE: [300.0, 100.0],
}
@property
def _func_channel(self) -> NotificationLightChannel:
return self._device.functionalChannels[self.channel]
@property
def is_on(self) -> bool:
return (
self._func_channel.dimLevel is not None
and self._func_channel.dimLevel > 0.0
)
@property
def brightness(self) -> int:
return int((self._func_channel.dimLevel or 0.0) * 255)
@property
def hs_color(self) -> tuple:
simple_rgb_color = self._func_channel.simpleRGBColorState
return self._color_switcher.get(simple_rgb_color, [0.0, 0.0])
@property
def device_state_attributes(self) -> Dict[str, Any]:
state_attr = super().device_state_attributes
if self.is_on:
state_attr[ATTR_COLOR_NAME] = self._func_channel.simpleRGBColorState
return state_attr
@property
def name(self) -> str:
return f"{super().name} Notification"
@property
def supported_features(self) -> int:
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR
@property
def unique_id(self) -> str:
return f"{self.__class__.__name__}_{self.post}_{self._device.id}"
async def async_turn_on(self, **kwargs) -> None:
hs_color = kwargs.get(ATTR_HS_COLOR, self.hs_color)
simple_rgb_color = _convert_color(hs_color)
brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness)
if not kwargs:
brightness = 255
brightness = max(10, brightness)
dim_level = brightness / 255.0
transition = kwargs.get(ATTR_TRANSITION, 0.5)
await self._device.set_rgb_dim_level_with_time(
channelIndex=self.channel,
rgb=simple_rgb_color,
dimLevel=dim_level,
onTime=0,
rampTime=transition,
)
async def async_turn_off(self, **kwargs) -> None:
simple_rgb_color = self._func_channel.simpleRGBColorState
transition = kwargs.get(ATTR_TRANSITION, 0.5)
await self._device.set_rgb_dim_level_with_time(
channelIndex=self.channel,
rgb=simple_rgb_color,
dimLevel=0.0,
onTime=0,
rampTime=transition,
)
def _convert_color(color: tuple) -> RGBColorState:
if color is None:
return RGBColorState.WHITE
hue = int(color[0])
saturation = int(color[1])
if saturation < 5:
return RGBColorState.WHITE
if 30 < hue <= 90:
return RGBColorState.YELLOW
if 90 < hue <= 160:
return RGBColorState.GREEN
if 150 < hue <= 210:
return RGBColorState.TURQUOISE
if 210 < hue <= 270:
return RGBColorState.BLUE
if 270 < hue <= 330:
return RGBColorState.PURPLE
return RGBColorState.RED
| true
| true
|
79083f742da3f3eee14c296fe653dcc7712b69ac
| 2,167
|
py
|
Python
|
data/check_apogee_spectra.py
|
andycasey/stellar-twins
|
9b3cfbf608e3e15a2358bbd33aa5ae21cfc1d0dd
|
[
"MIT"
] | null | null | null |
data/check_apogee_spectra.py
|
andycasey/stellar-twins
|
9b3cfbf608e3e15a2358bbd33aa5ae21cfc1d0dd
|
[
"MIT"
] | null | null | null |
data/check_apogee_spectra.py
|
andycasey/stellar-twins
|
9b3cfbf608e3e15a2358bbd33aa5ae21cfc1d0dd
|
[
"MIT"
] | 1
|
2016-09-28T20:47:21.000Z
|
2016-09-28T20:47:21.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Test the quoted APOGEE uncertainties from individual (rebinned) spectra. """
__author__ = "Andy Casey <arc@ast.cam.ac.uk>"
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from glob import glob
from itertools import combinations
def get_differences(apStar_filename):
image = fits.open(apStar_filename)
N_visits = image[0].header["NVISITS"]
data_index = 1
error_index = 2
mask_index = 3
# Generate all permutations.
differences = []
for i, j in combinations(range(N_visits), 2):
di = image[data_index].data[i + 2, :]
dj = image[data_index].data[j + 2, :]
sigma = np.sqrt(image[error_index].data[i + 2, :]**2 \
+ image[error_index].data[j + 2, :]**2)
ok = (di > 0) * (dj > 0) * np.isfinite(di * dj * sigma) \
* (image[mask_index].data[i + 2, :] == 0) \
* (image[mask_index].data[j + 2, :] == 0)
differences.extend(((di - dj)/sigma)[ok])
differences = np.array(differences).flatten()
return differences
def plot_differences(differences):
fig, ax = plt.subplots(1)
y_bin, x_bin, _ = ax.hist(differences, bins=100, facecolor="#666666")
x = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 1000)
y = np.exp(-0.5*x**2)/np.sqrt(2*np.pi)
ax.plot(x, y*np.trapz(y_bin, x=x_bin[1:])/np.sqrt(2*np.pi), lw=2, c="r")
ax.set_title("mu = {0:.1f}, sigma(|d|) = {1:.1f}".format(
np.median(differences), np.std(np.abs(differences))))
ax.set_xlabel("(F1 - F2)/sqrt(sigma_1^2 + sigma_2^2)")
return fig
if __name__ == "__main__":
filenames = glob("APOGEE/*.fits")
all_differences = []
for filename in filenames:
differences = get_differences(filename)
if len(differences) > 0:
fig = plot_differences(differences)
fig.savefig("APOGEE/{0}.png".format(filename.split("/")[-1].split(".")[0]))
plt.close("all")
print(filename)
all_differences.extend(differences)
fig = plot_differences(np.array(all_differences))
fig.savefig("APOGEE/all.png")
| 28.142857
| 87
| 0.606368
|
__author__ = "Andy Casey <arc@ast.cam.ac.uk>"
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from glob import glob
from itertools import combinations
def get_differences(apStar_filename):
image = fits.open(apStar_filename)
N_visits = image[0].header["NVISITS"]
data_index = 1
error_index = 2
mask_index = 3
differences = []
for i, j in combinations(range(N_visits), 2):
di = image[data_index].data[i + 2, :]
dj = image[data_index].data[j + 2, :]
sigma = np.sqrt(image[error_index].data[i + 2, :]**2 \
+ image[error_index].data[j + 2, :]**2)
ok = (di > 0) * (dj > 0) * np.isfinite(di * dj * sigma) \
* (image[mask_index].data[i + 2, :] == 0) \
* (image[mask_index].data[j + 2, :] == 0)
differences.extend(((di - dj)/sigma)[ok])
differences = np.array(differences).flatten()
return differences
def plot_differences(differences):
fig, ax = plt.subplots(1)
y_bin, x_bin, _ = ax.hist(differences, bins=100, facecolor="#666666")
x = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 1000)
y = np.exp(-0.5*x**2)/np.sqrt(2*np.pi)
ax.plot(x, y*np.trapz(y_bin, x=x_bin[1:])/np.sqrt(2*np.pi), lw=2, c="r")
ax.set_title("mu = {0:.1f}, sigma(|d|) = {1:.1f}".format(
np.median(differences), np.std(np.abs(differences))))
ax.set_xlabel("(F1 - F2)/sqrt(sigma_1^2 + sigma_2^2)")
return fig
if __name__ == "__main__":
filenames = glob("APOGEE/*.fits")
all_differences = []
for filename in filenames:
differences = get_differences(filename)
if len(differences) > 0:
fig = plot_differences(differences)
fig.savefig("APOGEE/{0}.png".format(filename.split("/")[-1].split(".")[0]))
plt.close("all")
print(filename)
all_differences.extend(differences)
fig = plot_differences(np.array(all_differences))
fig.savefig("APOGEE/all.png")
| true
| true
|
790840947ae7244ad5ef3eab016a7359ff9c3924
| 4,032
|
py
|
Python
|
ezgal/scripts/convert_basti.py
|
dpgettings/ezgal
|
de4a58879eaee0bddbcdb42dddfa2b398dbc3ea9
|
[
"MIT"
] | 5
|
2015-07-15T21:05:22.000Z
|
2017-10-05T19:16:09.000Z
|
ezgal/scripts/convert_basti.py
|
dpgettings/ezgal
|
de4a58879eaee0bddbcdb42dddfa2b398dbc3ea9
|
[
"MIT"
] | 1
|
2015-09-12T12:37:29.000Z
|
2015-09-12T12:37:29.000Z
|
ezgal/scripts/convert_basti.py
|
dpgettings/ezgal
|
de4a58879eaee0bddbcdb42dddfa2b398dbc3ea9
|
[
"MIT"
] | 2
|
2015-05-14T14:34:19.000Z
|
2019-03-22T02:17:22.000Z
|
#!/usr/bin/python
import glob,re,sys,math,pyfits
import numpy as np
import utils
if len(sys.argv) < 2:
print '\nconvert basti SSP models to ez_gal fits format'
print 'Run in directory with SED models for one metallicity'
print 'Usage: convert_basti.py ez_gal.ascii\n'
sys.exit(2)
fileout = sys.argv[1]
# try to extract meta data out of fileout
sfh = ''; tau = ''; met = ''; imf = ''
# split on _ but get rid of the extension
parts = '.'.join(fileout.split('.')[:-1]).split('_')
# look for sfh
for (check,val) in zip(['ssp','exp'], ['SSP','Exponential']):
if parts.count(check):
sfh = val
sfh_index = parts.index(check)
break
# tau?
if sfh:
tau = parts[sfh_index+1] if sfh == 'exp' else ''
# metallicity
if parts.count('z'):
met = parts[parts.index('z') + 1]
# imf
for (check,val) in zip(['krou','salp','chab'], ['Kroupa', 'Salpeter', 'Chabrier']):
if parts.count(check):
imf = val
break
if parts.count('n'):
n = parts[parts.index('n') + 1]
ae = False
if parts.count('ae'): ae = True
# does the file with masses exist?
has_masses = False
mass_file = glob.glob('MLR*.txt')
if len(mass_file):
# read it in!
print 'Loading masses from %s' % mass_file[0]
data = utils.rascii(mass_file[0], silent=True)
masses = data[:,10:14].sum(axis=1)
has_masses = True
files = glob.glob('SPEC*agb*')
nages = len(files)
ages = []
for (i,file) in enumerate(files):
ls = []
this = []
# extract the age from the filename and convert to years
m = re.search('t60*(\d+)$', file)
ages.append(int(m.group(1))*1e6)
# read in this file
fp = open(file, 'r')
for line in fp:
parts = line.strip().split()
ls.append(float(parts[0].strip()))
this.append(float(parts[1].strip()))
if i == 0:
# if this is the first file, generate the data table
nls = len(ls)
seds = np.empty((nls,nages))
# convert to ergs/s/angstrom
seds[:,i] = np.array(this)/4.3607e-33/1e10
# convert to numpy
ages = np.array(ages)
ls = np.array(ls)*10.0
# make sure we are sorted in age
sinds = ages.argsort()
ages = ages[sinds]
seds = seds[:,sinds]
# speed of light
c = utils.convert_length(utils.c, incoming='m', outgoing='a')
# convert from angstroms to hertz
vs = c/ls
# convert from ergs/s/A to ergs/s/Hz
seds *= ls.reshape((ls.size,1))**2.0/c
# and now from ergs/s/Hz to ergs/s/Hz/cm^2.0
seds /= (4.0*math.pi*utils.convert_length(10, incoming='pc', outgoing='cm')**2.0)
# sort in frequency space
sinds = vs.argsort()
# generate fits frame with sed in it
primary_hdu = pyfits.PrimaryHDU(seds[sinds,:])
primary_hdu.header.update('units', 'ergs/s/cm^2/Hz')
primary_hdu.header.update('has_seds', True)
primary_hdu.header.update('nfilters', 0)
primary_hdu.header.update('nzfs', 0)
# store meta data
if sfh and met and imf:
primary_hdu.header.update('has_meta', True)
primary_hdu.header.update('model', 'BaSTI', comment='meta data')
primary_hdu.header.update('met', met, comment='meta data')
primary_hdu.header.update('imf', imf, comment='meta data')
primary_hdu.header.update('sfh', sfh, comment='meta data')
if sfh == 'Exponential': primary_hdu.header.update('tau', tau, comment='meta data')
primary_hdu.header.update('n', n, comment='meta data')
primary_hdu.header.update('ae', ae, comment='meta data')
# store the list of frequencies in a table
vs_hdu = pyfits.new_table(pyfits.ColDefs([pyfits.Column(name='vs', array=vs[sinds], format='D', unit='hertz')]))
vs_hdu.header.update('units', 'hertz')
# and the list of ages
cols = [pyfits.Column(name='ages', array=ages, format='D', unit='years')]
# and masses
if has_masses: cols.append(pyfits.Column(name='masses', array=masses, format='D', unit='m_sun'))
ages_hdu = pyfits.new_table(pyfits.ColDefs(cols))
if has_masses: ages_hdu.header.update('has_mass', True)
# make the fits file in memory
hdulist = pyfits.HDUList([primary_hdu,vs_hdu,ages_hdu])
# and write it out
hdulist.writeto(fileout, clobber=True)
| 30.545455
| 112
| 0.657738
|
import glob,re,sys,math,pyfits
import numpy as np
import utils
if len(sys.argv) < 2:
print '\nconvert basti SSP models to ez_gal fits format'
print 'Run in directory with SED models for one metallicity'
print 'Usage: convert_basti.py ez_gal.ascii\n'
sys.exit(2)
fileout = sys.argv[1]
sfh = ''; tau = ''; met = ''; imf = ''
parts = '.'.join(fileout.split('.')[:-1]).split('_')
for (check,val) in zip(['ssp','exp'], ['SSP','Exponential']):
if parts.count(check):
sfh = val
sfh_index = parts.index(check)
break
if sfh:
tau = parts[sfh_index+1] if sfh == 'exp' else ''
if parts.count('z'):
met = parts[parts.index('z') + 1]
for (check,val) in zip(['krou','salp','chab'], ['Kroupa', 'Salpeter', 'Chabrier']):
if parts.count(check):
imf = val
break
if parts.count('n'):
n = parts[parts.index('n') + 1]
ae = False
if parts.count('ae'): ae = True
has_masses = False
mass_file = glob.glob('MLR*.txt')
if len(mass_file):
print 'Loading masses from %s' % mass_file[0]
data = utils.rascii(mass_file[0], silent=True)
masses = data[:,10:14].sum(axis=1)
has_masses = True
files = glob.glob('SPEC*agb*')
nages = len(files)
ages = []
for (i,file) in enumerate(files):
ls = []
this = []
m = re.search('t60*(\d+)$', file)
ages.append(int(m.group(1))*1e6)
fp = open(file, 'r')
for line in fp:
parts = line.strip().split()
ls.append(float(parts[0].strip()))
this.append(float(parts[1].strip()))
if i == 0:
nls = len(ls)
seds = np.empty((nls,nages))
seds[:,i] = np.array(this)/4.3607e-33/1e10
ages = np.array(ages)
ls = np.array(ls)*10.0
sinds = ages.argsort()
ages = ages[sinds]
seds = seds[:,sinds]
c = utils.convert_length(utils.c, incoming='m', outgoing='a')
vs = c/ls
seds *= ls.reshape((ls.size,1))**2.0/c
seds /= (4.0*math.pi*utils.convert_length(10, incoming='pc', outgoing='cm')**2.0)
sinds = vs.argsort()
primary_hdu = pyfits.PrimaryHDU(seds[sinds,:])
primary_hdu.header.update('units', 'ergs/s/cm^2/Hz')
primary_hdu.header.update('has_seds', True)
primary_hdu.header.update('nfilters', 0)
primary_hdu.header.update('nzfs', 0)
if sfh and met and imf:
primary_hdu.header.update('has_meta', True)
primary_hdu.header.update('model', 'BaSTI', comment='meta data')
primary_hdu.header.update('met', met, comment='meta data')
primary_hdu.header.update('imf', imf, comment='meta data')
primary_hdu.header.update('sfh', sfh, comment='meta data')
if sfh == 'Exponential': primary_hdu.header.update('tau', tau, comment='meta data')
primary_hdu.header.update('n', n, comment='meta data')
primary_hdu.header.update('ae', ae, comment='meta data')
vs_hdu = pyfits.new_table(pyfits.ColDefs([pyfits.Column(name='vs', array=vs[sinds], format='D', unit='hertz')]))
vs_hdu.header.update('units', 'hertz')
cols = [pyfits.Column(name='ages', array=ages, format='D', unit='years')]
if has_masses: cols.append(pyfits.Column(name='masses', array=masses, format='D', unit='m_sun'))
ages_hdu = pyfits.new_table(pyfits.ColDefs(cols))
if has_masses: ages_hdu.header.update('has_mass', True)
hdulist = pyfits.HDUList([primary_hdu,vs_hdu,ages_hdu])
hdulist.writeto(fileout, clobber=True)
| false
| true
|
7908410bd7bc8c885dca9b63f878d01401772eb0
| 2,963
|
py
|
Python
|
python_vuejs/vuejs.py
|
Timtech4u/python-vuejs
|
7634726ad7fc5ab02a6159e7f150360ededca250
|
[
"MIT"
] | null | null | null |
python_vuejs/vuejs.py
|
Timtech4u/python-vuejs
|
7634726ad7fc5ab02a6159e7f150360ededca250
|
[
"MIT"
] | null | null | null |
python_vuejs/vuejs.py
|
Timtech4u/python-vuejs
|
7634726ad7fc5ab02a6159e7f150360ededca250
|
[
"MIT"
] | 1
|
2018-11-24T02:05:28.000Z
|
2018-11-24T02:05:28.000Z
|
# -*- coding: utf-8 -*-
from collections import namedtuple
from subprocess import check_output
import click
from .utils import cd
try:
from subprocess import call as run
except ImportError:
from subprocess import run
class VueJs(object):
"""
Provide subprocess call to `npm` and `vue-cli`
"""
@staticmethod
def node_check():
"""
Node and npm version checker
"""
node_ver = check_output('node -v'.split()).decode('utf-8').rsplit('.')[0]
npm_ver = check_output('npm -v'.split()).decode('utf-8').rsplit('.')[0]
return all([node_ver > 'v5', npm_ver >= '4'])
@staticmethod
def vue_cli_check():
"""
vue-cli version checker
"""
try:
return check_output('vue -V'.split()).decode('utf-8').rsplit('.')[0]
except OSError:
return False
@staticmethod
def install_cli():
run('npm install -g vue-cli'.split())
@staticmethod
def project_setup(project):
run('vue init webpack {project}'.format(project=project).split())
@staticmethod
def install_dependencies(project):
with cd(project):
run('npm install'.split())
@staticmethod
def dev():
run('npm run dev'.split())
@staticmethod
def build():
run('npm run build'.split())
class VueJsBuilder(object):
@staticmethod
def startproject(project):
nt = namedtuple('Result', ['status', 'message', 'color'])
if VueJs.vue_cli_check():
VueJs.project_setup(project)
VueJs.install_dependencies(project)
return nt(True, 'Application and dependencies installed\n', 'green')
else:
return nt(False, 'Please install vue-cli via `vuecli` command', 'red')
@click.group()
def cli():
"""
Click entry point: vue-cli commands group
By convention all new cli has a cli function with a pass statement
"""
pass
@cli.command()
def vuecheck():
"""
Check if node > 5 and npm > 3 are installed
"""
if VueJs.node_check():
click.echo(click.style('Found node and npm', fg='green'))
else:
click.echo(click.style('Missing node and npm installation', fg='red'))
@cli.command()
def installvuecli():
"""
Install vue-cli
"""
if VueJs.vue_cli_check():
click.echo(click.style('Found valid vue-cli', fg='green'))
else:
VueJs.install_cli()
click.echo(click.style('Installed vue-cli globally', fg='green'))
@cli.command()
@click.argument('project')
def startvueapp(project):
"""
Init vue project via vue-cli
"""
result = VueJsBuilder.startproject(project)
click.echo(click.style(result.message, fg=result.color))
@cli.command()
def vuedev():
"""
Run frontend dev server via npm
"""
VueJs.dev()
@cli.command()
def vuebuild():
"""
Build Vue.js project via npm
"""
VueJs.build()
| 22.792308
| 82
| 0.600405
|
from collections import namedtuple
from subprocess import check_output
import click
from .utils import cd
try:
from subprocess import call as run
except ImportError:
from subprocess import run
class VueJs(object):
@staticmethod
def node_check():
node_ver = check_output('node -v'.split()).decode('utf-8').rsplit('.')[0]
npm_ver = check_output('npm -v'.split()).decode('utf-8').rsplit('.')[0]
return all([node_ver > 'v5', npm_ver >= '4'])
@staticmethod
def vue_cli_check():
try:
return check_output('vue -V'.split()).decode('utf-8').rsplit('.')[0]
except OSError:
return False
@staticmethod
def install_cli():
run('npm install -g vue-cli'.split())
@staticmethod
def project_setup(project):
run('vue init webpack {project}'.format(project=project).split())
@staticmethod
def install_dependencies(project):
with cd(project):
run('npm install'.split())
@staticmethod
def dev():
run('npm run dev'.split())
@staticmethod
def build():
run('npm run build'.split())
class VueJsBuilder(object):
@staticmethod
def startproject(project):
nt = namedtuple('Result', ['status', 'message', 'color'])
if VueJs.vue_cli_check():
VueJs.project_setup(project)
VueJs.install_dependencies(project)
return nt(True, 'Application and dependencies installed\n', 'green')
else:
return nt(False, 'Please install vue-cli via `vuecli` command', 'red')
@click.group()
def cli():
pass
@cli.command()
def vuecheck():
if VueJs.node_check():
click.echo(click.style('Found node and npm', fg='green'))
else:
click.echo(click.style('Missing node and npm installation', fg='red'))
@cli.command()
def installvuecli():
if VueJs.vue_cli_check():
click.echo(click.style('Found valid vue-cli', fg='green'))
else:
VueJs.install_cli()
click.echo(click.style('Installed vue-cli globally', fg='green'))
@cli.command()
@click.argument('project')
def startvueapp(project):
result = VueJsBuilder.startproject(project)
click.echo(click.style(result.message, fg=result.color))
@cli.command()
def vuedev():
VueJs.dev()
@cli.command()
def vuebuild():
VueJs.build()
| true
| true
|
7908411e8ba2d90f953b5dd55f4a6f78d9db403d
| 259
|
py
|
Python
|
Practice1.py
|
anayakoti/FirstSample
|
8ef05772991644e63a4fd6759458f449cd2b00c0
|
[
"bzip2-1.0.6"
] | null | null | null |
Practice1.py
|
anayakoti/FirstSample
|
8ef05772991644e63a4fd6759458f449cd2b00c0
|
[
"bzip2-1.0.6"
] | null | null | null |
Practice1.py
|
anayakoti/FirstSample
|
8ef05772991644e63a4fd6759458f449cd2b00c0
|
[
"bzip2-1.0.6"
] | null | null | null |
createNewFile=open('dummy.txt', 'w');
grades=0;
print("Enter a rollnumber");
grades=input();
while(grades!='*'):
createNewFile.write(grades +"\n");
print("Enter a rollnumber");
grades=input();
createNewFile.close();
| 19.923077
| 38
| 0.583012
|
createNewFile=open('dummy.txt', 'w');
grades=0;
print("Enter a rollnumber");
grades=input();
while(grades!='*'):
createNewFile.write(grades +"\n");
print("Enter a rollnumber");
grades=input();
createNewFile.close();
| true
| true
|
790841b06ba83dce9101ed02863786dd32d19090
| 147
|
py
|
Python
|
treasury_yield_analysis/task/__init__.py
|
samsea18/Treasury-Yield-Analysis
|
8746adee93a995089d3c6dc1eb371ecef9cd942c
|
[
"MIT"
] | null | null | null |
treasury_yield_analysis/task/__init__.py
|
samsea18/Treasury-Yield-Analysis
|
8746adee93a995089d3c6dc1eb371ecef9cd942c
|
[
"MIT"
] | 1
|
2021-06-29T16:34:26.000Z
|
2021-06-29T16:34:26.000Z
|
treasury_yield_analysis/task/__init__.py
|
samsea18/Treasury-Yield-Analysis
|
8746adee93a995089d3c6dc1eb371ecef9cd942c
|
[
"MIT"
] | null | null | null |
from .treasury_yields import Treasury_Yield_Task
from .mariadb import Mariadb_Task
from .bea import BEA_Task
from .yfinance import Yfinance_Task
| 36.75
| 49
| 0.85034
|
from .treasury_yields import Treasury_Yield_Task
from .mariadb import Mariadb_Task
from .bea import BEA_Task
from .yfinance import Yfinance_Task
| true
| true
|
790841b8809171e06dadf551f38dccc196d0b33e
| 7,094
|
py
|
Python
|
.install/.backup/lib/apitools/base/py/extra_types.py
|
bopopescu/google-cloud-sdk
|
b34e6a18f1e89673508166acce816111c3421e4b
|
[
"Apache-2.0"
] | null | null | null |
.install/.backup/lib/apitools/base/py/extra_types.py
|
bopopescu/google-cloud-sdk
|
b34e6a18f1e89673508166acce816111c3421e4b
|
[
"Apache-2.0"
] | null | null | null |
.install/.backup/lib/apitools/base/py/extra_types.py
|
bopopescu/google-cloud-sdk
|
b34e6a18f1e89673508166acce816111c3421e4b
|
[
"Apache-2.0"
] | 1
|
2020-07-24T20:04:47.000Z
|
2020-07-24T20:04:47.000Z
|
"""Extra types understood by apitools.
This file will be replaced by a .proto file when we switch to proto2
from protorpc.
"""
import collections
import json
import numbers
from protorpc import message_types
from protorpc import messages
from protorpc import protojson
from apitools.base.py import encoding
from apitools.base.py import exceptions
from apitools.base.py import util
__all__ = [
'DateTimeMessage',
'JsonArray',
'JsonObject',
'JsonValue',
'JsonProtoEncoder',
'JsonProtoDecoder',
]
# We import from protorpc.
# pylint:disable=invalid-name
DateTimeMessage = message_types.DateTimeMessage
# pylint:enable=invalid-name
def _ValidateJsonValue(json_value):
entries = [(f, json_value.get_assigned_value(f.name))
for f in json_value.all_fields()]
assigned_entries = [(f, value) for f, value in entries if value is not None]
if len(assigned_entries) != 1:
raise exceptions.InvalidDataError('Malformed JsonValue: %s' % json_value)
def _JsonValueToPythonValue(json_value):
"""Convert the given JsonValue to a json string."""
util.Typecheck(json_value, JsonValue)
_ValidateJsonValue(json_value)
if json_value.is_null:
return None
entries = [(f, json_value.get_assigned_value(f.name))
for f in json_value.all_fields()]
assigned_entries = [(f, value) for f, value in entries if value is not None]
field, value = assigned_entries[0]
if not isinstance(field, messages.MessageField):
return value
elif field.message_type is JsonObject:
return _JsonObjectToPythonValue(value)
elif field.message_type is JsonArray:
return _JsonArrayToPythonValue(value)
def _JsonObjectToPythonValue(json_value):
util.Typecheck(json_value, JsonObject)
return dict([(prop.key, _JsonValueToPythonValue(prop.value)) for prop
in json_value.properties])
def _JsonArrayToPythonValue(json_value):
util.Typecheck(json_value, JsonArray)
return [_JsonValueToPythonValue(e) for e in json_value.entries]
_MAXINT64 = 2 << 63 - 1
_MININT64 = -(2 << 63)
def _PythonValueToJsonValue(py_value):
"""Convert the given python value to a JsonValue."""
if py_value is None:
return JsonValue(is_null=True)
if isinstance(py_value, bool):
return JsonValue(boolean_value=py_value)
if isinstance(py_value, basestring):
return JsonValue(string_value=py_value)
if isinstance(py_value, numbers.Number):
if isinstance(py_value, (int, long)):
if _MININT64 < py_value < _MAXINT64:
return JsonValue(integer_value=py_value)
return JsonValue(double_value=float(py_value))
if isinstance(py_value, dict):
return JsonValue(object_value=_PythonValueToJsonObject(py_value))
if isinstance(py_value, collections.Iterable):
return JsonValue(array_value=_PythonValueToJsonArray(py_value))
raise exceptions.InvalidDataError(
'Cannot convert "%s" to JsonValue' % py_value)
def _PythonValueToJsonObject(py_value):
util.Typecheck(py_value, dict)
return JsonObject(
properties=[
JsonObject.Property(key=key, value=_PythonValueToJsonValue(value))
for key, value in py_value.iteritems()])
def _PythonValueToJsonArray(py_value):
return JsonArray(entries=map(_PythonValueToJsonValue, py_value))
class JsonValue(messages.Message):
"""Any valid JSON value."""
# Is this JSON object `null`?
is_null = messages.BooleanField(1, default=False)
# Exactly one of the following is provided if is_null is False; none
# should be provided if is_null is True.
boolean_value = messages.BooleanField(2)
string_value = messages.StringField(3)
# We keep two numeric fields to keep int64 round-trips exact.
double_value = messages.FloatField(4, variant=messages.Variant.DOUBLE)
integer_value = messages.IntegerField(5, variant=messages.Variant.INT64)
# Compound types
object_value = messages.MessageField('JsonObject', 6)
array_value = messages.MessageField('JsonArray', 7)
class JsonObject(messages.Message):
"""A JSON object value.
Messages:
Property: A property of a JsonObject.
Fields:
properties: A list of properties of a JsonObject.
"""
class Property(messages.Message):
"""A property of a JSON object.
Fields:
key: Name of the property.
value: A JsonValue attribute.
"""
key = messages.StringField(1)
value = messages.MessageField(JsonValue, 2)
properties = messages.MessageField(Property, 1, repeated=True)
class JsonArray(messages.Message):
"""A JSON array value."""
entries = messages.MessageField(JsonValue, 1, repeated=True)
_JSON_PROTO_TO_PYTHON_MAP = {
JsonArray: _JsonArrayToPythonValue,
JsonObject: _JsonObjectToPythonValue,
JsonValue: _JsonValueToPythonValue,
}
_JSON_PROTO_TYPES = tuple(_JSON_PROTO_TO_PYTHON_MAP.keys())
def _JsonProtoToPythonValue(json_proto):
util.Typecheck(json_proto, _JSON_PROTO_TYPES)
return _JSON_PROTO_TO_PYTHON_MAP[type(json_proto)](json_proto)
def _PythonValueToJsonProto(py_value):
if isinstance(py_value, dict):
return _PythonValueToJsonObject(py_value)
if (isinstance(py_value, collections.Iterable) and
not isinstance(py_value, basestring)):
return _PythonValueToJsonArray(py_value)
return _PythonValueToJsonValue(py_value)
def _JsonProtoToJson(json_proto, unused_encoder=None):
return json.dumps(_JsonProtoToPythonValue(json_proto))
def _JsonToJsonProto(json_data, unused_decoder=None):
return _PythonValueToJsonProto(json.loads(json_data))
# pylint:disable=invalid-name
JsonProtoEncoder = _JsonProtoToJson
JsonProtoDecoder = _JsonToJsonProto
# pylint:enable=invalid-name
encoding.RegisterCustomMessageCodec(
encoder=JsonProtoEncoder, decoder=JsonProtoDecoder)(JsonValue)
encoding.RegisterCustomMessageCodec(
encoder=JsonProtoEncoder, decoder=JsonProtoDecoder)(JsonObject)
encoding.RegisterCustomMessageCodec(
encoder=JsonProtoEncoder, decoder=JsonProtoDecoder)(JsonArray)
def _EncodeDateTimeField(field, value):
result = protojson.ProtoJson().encode_field(field, value)
return encoding.CodecResult(value=result, complete=True)
def _DecodeDateTimeField(unused_field, value):
result = protojson.ProtoJson().decode_field(
message_types.DateTimeField(1), value)
return encoding.CodecResult(value=result, complete=True)
encoding.RegisterFieldTypeCodec(_EncodeDateTimeField, _DecodeDateTimeField)(
message_types.DateTimeField)
def _EncodeInt64Field(field, value):
"""Handle the special case of int64 as a string."""
capabilities = [
messages.Variant.INT64,
messages.Variant.UINT64,
]
if field.variant not in capabilities:
return encoding.CodecResult(value=value, complete=False)
if field.repeated:
result = [str(x) for x in value]
else:
result = str(value)
return encoding.CodecResult(value=result, complete=True)
def _DecodeInt64Field(unused_field, value):
# Don't need to do anything special, they're decoded just fine
return encoding.CodecResult(value=value, complete=False)
encoding.RegisterFieldTypeCodec(_EncodeInt64Field, _DecodeInt64Field)(
messages.IntegerField)
| 30.577586
| 78
| 0.762193
|
import collections
import json
import numbers
from protorpc import message_types
from protorpc import messages
from protorpc import protojson
from apitools.base.py import encoding
from apitools.base.py import exceptions
from apitools.base.py import util
__all__ = [
'DateTimeMessage',
'JsonArray',
'JsonObject',
'JsonValue',
'JsonProtoEncoder',
'JsonProtoDecoder',
]
DateTimeMessage = message_types.DateTimeMessage
def _ValidateJsonValue(json_value):
entries = [(f, json_value.get_assigned_value(f.name))
for f in json_value.all_fields()]
assigned_entries = [(f, value) for f, value in entries if value is not None]
if len(assigned_entries) != 1:
raise exceptions.InvalidDataError('Malformed JsonValue: %s' % json_value)
def _JsonValueToPythonValue(json_value):
util.Typecheck(json_value, JsonValue)
_ValidateJsonValue(json_value)
if json_value.is_null:
return None
entries = [(f, json_value.get_assigned_value(f.name))
for f in json_value.all_fields()]
assigned_entries = [(f, value) for f, value in entries if value is not None]
field, value = assigned_entries[0]
if not isinstance(field, messages.MessageField):
return value
elif field.message_type is JsonObject:
return _JsonObjectToPythonValue(value)
elif field.message_type is JsonArray:
return _JsonArrayToPythonValue(value)
def _JsonObjectToPythonValue(json_value):
util.Typecheck(json_value, JsonObject)
return dict([(prop.key, _JsonValueToPythonValue(prop.value)) for prop
in json_value.properties])
def _JsonArrayToPythonValue(json_value):
util.Typecheck(json_value, JsonArray)
return [_JsonValueToPythonValue(e) for e in json_value.entries]
_MAXINT64 = 2 << 63 - 1
_MININT64 = -(2 << 63)
def _PythonValueToJsonValue(py_value):
if py_value is None:
return JsonValue(is_null=True)
if isinstance(py_value, bool):
return JsonValue(boolean_value=py_value)
if isinstance(py_value, basestring):
return JsonValue(string_value=py_value)
if isinstance(py_value, numbers.Number):
if isinstance(py_value, (int, long)):
if _MININT64 < py_value < _MAXINT64:
return JsonValue(integer_value=py_value)
return JsonValue(double_value=float(py_value))
if isinstance(py_value, dict):
return JsonValue(object_value=_PythonValueToJsonObject(py_value))
if isinstance(py_value, collections.Iterable):
return JsonValue(array_value=_PythonValueToJsonArray(py_value))
raise exceptions.InvalidDataError(
'Cannot convert "%s" to JsonValue' % py_value)
def _PythonValueToJsonObject(py_value):
util.Typecheck(py_value, dict)
return JsonObject(
properties=[
JsonObject.Property(key=key, value=_PythonValueToJsonValue(value))
for key, value in py_value.iteritems()])
def _PythonValueToJsonArray(py_value):
return JsonArray(entries=map(_PythonValueToJsonValue, py_value))
class JsonValue(messages.Message):
is_null = messages.BooleanField(1, default=False)
boolean_value = messages.BooleanField(2)
string_value = messages.StringField(3)
double_value = messages.FloatField(4, variant=messages.Variant.DOUBLE)
integer_value = messages.IntegerField(5, variant=messages.Variant.INT64)
object_value = messages.MessageField('JsonObject', 6)
array_value = messages.MessageField('JsonArray', 7)
class JsonObject(messages.Message):
class Property(messages.Message):
key = messages.StringField(1)
value = messages.MessageField(JsonValue, 2)
properties = messages.MessageField(Property, 1, repeated=True)
class JsonArray(messages.Message):
entries = messages.MessageField(JsonValue, 1, repeated=True)
_JSON_PROTO_TO_PYTHON_MAP = {
JsonArray: _JsonArrayToPythonValue,
JsonObject: _JsonObjectToPythonValue,
JsonValue: _JsonValueToPythonValue,
}
_JSON_PROTO_TYPES = tuple(_JSON_PROTO_TO_PYTHON_MAP.keys())
def _JsonProtoToPythonValue(json_proto):
util.Typecheck(json_proto, _JSON_PROTO_TYPES)
return _JSON_PROTO_TO_PYTHON_MAP[type(json_proto)](json_proto)
def _PythonValueToJsonProto(py_value):
if isinstance(py_value, dict):
return _PythonValueToJsonObject(py_value)
if (isinstance(py_value, collections.Iterable) and
not isinstance(py_value, basestring)):
return _PythonValueToJsonArray(py_value)
return _PythonValueToJsonValue(py_value)
def _JsonProtoToJson(json_proto, unused_encoder=None):
return json.dumps(_JsonProtoToPythonValue(json_proto))
def _JsonToJsonProto(json_data, unused_decoder=None):
return _PythonValueToJsonProto(json.loads(json_data))
JsonProtoEncoder = _JsonProtoToJson
JsonProtoDecoder = _JsonToJsonProto
encoding.RegisterCustomMessageCodec(
encoder=JsonProtoEncoder, decoder=JsonProtoDecoder)(JsonValue)
encoding.RegisterCustomMessageCodec(
encoder=JsonProtoEncoder, decoder=JsonProtoDecoder)(JsonObject)
encoding.RegisterCustomMessageCodec(
encoder=JsonProtoEncoder, decoder=JsonProtoDecoder)(JsonArray)
def _EncodeDateTimeField(field, value):
result = protojson.ProtoJson().encode_field(field, value)
return encoding.CodecResult(value=result, complete=True)
def _DecodeDateTimeField(unused_field, value):
result = protojson.ProtoJson().decode_field(
message_types.DateTimeField(1), value)
return encoding.CodecResult(value=result, complete=True)
encoding.RegisterFieldTypeCodec(_EncodeDateTimeField, _DecodeDateTimeField)(
message_types.DateTimeField)
def _EncodeInt64Field(field, value):
capabilities = [
messages.Variant.INT64,
messages.Variant.UINT64,
]
if field.variant not in capabilities:
return encoding.CodecResult(value=value, complete=False)
if field.repeated:
result = [str(x) for x in value]
else:
result = str(value)
return encoding.CodecResult(value=result, complete=True)
def _DecodeInt64Field(unused_field, value):
return encoding.CodecResult(value=value, complete=False)
encoding.RegisterFieldTypeCodec(_EncodeInt64Field, _DecodeInt64Field)(
messages.IntegerField)
| true
| true
|
7908421b2547ace9920792b02f8c54541379d1a7
| 4,664
|
py
|
Python
|
server.py
|
catarinaacsilva/pacman
|
940823f4654dfc01e63361aa2ca17a275aa7b1fa
|
[
"MIT"
] | null | null | null |
server.py
|
catarinaacsilva/pacman
|
940823f4654dfc01e63361aa2ca17a275aa7b1fa
|
[
"MIT"
] | null | null | null |
server.py
|
catarinaacsilva/pacman
|
940823f4654dfc01e63361aa2ca17a275aa7b1fa
|
[
"MIT"
] | null | null | null |
import requests
import argparse
import asyncio
import json
import logging
import websockets
import os.path
from collections import namedtuple
from game import Game
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
wslogger = logging.getLogger('websockets')
wslogger.setLevel(logging.WARN)
logger = logging.getLogger('Server')
logger.setLevel(logging.INFO)
Player = namedtuple('Player', ['name', 'ws'])
class Game_server:
def __init__(self, mapfile, ghosts, level_ghosts, lives, timeout, grading=None):
self.game = Game(mapfile, ghosts, level_ghosts, lives, timeout)
self.game_properties = {'map': mapfile,
'n_ghosts': ghosts,
'l_ghosts': level_ghosts}
self.players = asyncio.Queue()
self.viewers = set()
self.current_player = None
self.grading = grading
async def incomming_handler(self, websocket, path):
try:
async for message in websocket:
data = json.loads(message)
if data["cmd"] == "join":
map_info = self.game.info()
await websocket.send(map_info)
if path == "/player":
logger.info("<%s> has joined", data["name"])
await self.players.put(Player(data["name"], websocket))
if path == "/viewer":
self.viewers.add(websocket)
if data["cmd"] == "key" and self.current_player.ws == websocket:
logger.debug((self.current_player.name, data))
self.game.keypress(data["key"][0])
except websockets.exceptions.ConnectionClosed as c:
logger.info("Client disconnected")
if websocket in self.viewers:
self.viewers.remove(websocket)
async def mainloop(self):
while True:
logger.info("Waiting for players")
self.current_player = await self.players.get()
if self.current_player.ws.closed:
logger.error("<{}> disconnect while waiting".format(self.current_player.name))
continue
try:
logger.info("Starting game for <{}>".format(self.current_player.name))
self.game.start(self.current_player.name)
if self.grading:
game_rec = dict(self.game_properties)
game_rec['player'] = self.current_player.name
while self.game.running:
await self.game.next_frame()
await self.current_player.ws.send(self.game.state)
if self.viewers:
await asyncio.wait([client.send(self.game.state) for client in self.viewers])
await self.current_player.ws.send(json.dumps({"score": self.game.score}))
logger.info("Disconnecting <{}>".format(self.current_player.name))
except websockets.exceptions.ConnectionClosed as c:
self.current_player = None
finally:
if self.grading:
game_rec['score'] = self.game.score
r = requests.post(self.grading, json=game_rec)
if self.current_player:
await self.current_player.ws.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--bind", help="IP address to bind to", default="")
parser.add_argument("--port", help="TCP port", type=int, default=8000)
parser.add_argument("--ghosts", help="Number of ghosts", type=int, default=1)
parser.add_argument("--level", help="difficulty level of ghosts", choices=['0','1','2','3'], default='1')
parser.add_argument("--lives", help="Number of lives", type=int, default=3)
parser.add_argument("--timeout", help="Timeout after this amount of steps", type=int, default=3000)
parser.add_argument("--map", help="path to the map bmp", default="data/map1.bmp")
parser.add_argument("--grading-server", help="url of grading server", default=None)
args = parser.parse_args()
g = Game_server(args.map, args.ghosts, int(args.level), args.lives, args.timeout, args.grading_server)
game_loop_task = asyncio.ensure_future(g.mainloop())
websocket_server = websockets.serve(g.incomming_handler, args.bind, args.port)
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(websocket_server, game_loop_task))
loop.close()
| 42.018018
| 109
| 0.595197
|
import requests
import argparse
import asyncio
import json
import logging
import websockets
import os.path
from collections import namedtuple
from game import Game
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
wslogger = logging.getLogger('websockets')
wslogger.setLevel(logging.WARN)
logger = logging.getLogger('Server')
logger.setLevel(logging.INFO)
Player = namedtuple('Player', ['name', 'ws'])
class Game_server:
def __init__(self, mapfile, ghosts, level_ghosts, lives, timeout, grading=None):
self.game = Game(mapfile, ghosts, level_ghosts, lives, timeout)
self.game_properties = {'map': mapfile,
'n_ghosts': ghosts,
'l_ghosts': level_ghosts}
self.players = asyncio.Queue()
self.viewers = set()
self.current_player = None
self.grading = grading
async def incomming_handler(self, websocket, path):
try:
async for message in websocket:
data = json.loads(message)
if data["cmd"] == "join":
map_info = self.game.info()
await websocket.send(map_info)
if path == "/player":
logger.info("<%s> has joined", data["name"])
await self.players.put(Player(data["name"], websocket))
if path == "/viewer":
self.viewers.add(websocket)
if data["cmd"] == "key" and self.current_player.ws == websocket:
logger.debug((self.current_player.name, data))
self.game.keypress(data["key"][0])
except websockets.exceptions.ConnectionClosed as c:
logger.info("Client disconnected")
if websocket in self.viewers:
self.viewers.remove(websocket)
async def mainloop(self):
while True:
logger.info("Waiting for players")
self.current_player = await self.players.get()
if self.current_player.ws.closed:
logger.error("<{}> disconnect while waiting".format(self.current_player.name))
continue
try:
logger.info("Starting game for <{}>".format(self.current_player.name))
self.game.start(self.current_player.name)
if self.grading:
game_rec = dict(self.game_properties)
game_rec['player'] = self.current_player.name
while self.game.running:
await self.game.next_frame()
await self.current_player.ws.send(self.game.state)
if self.viewers:
await asyncio.wait([client.send(self.game.state) for client in self.viewers])
await self.current_player.ws.send(json.dumps({"score": self.game.score}))
logger.info("Disconnecting <{}>".format(self.current_player.name))
except websockets.exceptions.ConnectionClosed as c:
self.current_player = None
finally:
if self.grading:
game_rec['score'] = self.game.score
r = requests.post(self.grading, json=game_rec)
if self.current_player:
await self.current_player.ws.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--bind", help="IP address to bind to", default="")
parser.add_argument("--port", help="TCP port", type=int, default=8000)
parser.add_argument("--ghosts", help="Number of ghosts", type=int, default=1)
parser.add_argument("--level", help="difficulty level of ghosts", choices=['0','1','2','3'], default='1')
parser.add_argument("--lives", help="Number of lives", type=int, default=3)
parser.add_argument("--timeout", help="Timeout after this amount of steps", type=int, default=3000)
parser.add_argument("--map", help="path to the map bmp", default="data/map1.bmp")
parser.add_argument("--grading-server", help="url of grading server", default=None)
args = parser.parse_args()
g = Game_server(args.map, args.ghosts, int(args.level), args.lives, args.timeout, args.grading_server)
game_loop_task = asyncio.ensure_future(g.mainloop())
websocket_server = websockets.serve(g.incomming_handler, args.bind, args.port)
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.gather(websocket_server, game_loop_task))
loop.close()
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.