index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
11,148
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/intelligence/reports/test_crud.py
|
# -*- coding: utf-8 -*-
import pytest
import yaml
from cfme.fixtures import pytest_selenium as sel
from cfme.intelligence.reports.dashboards import Dashboard
from cfme.intelligence.reports.reports import CustomReport
from cfme.intelligence.reports.schedules import Schedule
from cfme.intelligence.reports.widgets import ChartWidget, MenuWidget, ReportWidget, RSSFeedWidget
from utils.path import data_path
from utils.randomness import generate_random_string
from utils.update import update
report_crud_dir = data_path.join("reports_crud")
schedules_crud_dir = data_path.join("schedules_crud")
def crud_files_reports():
result = []
if not report_crud_dir.exists():
report_crud_dir.mkdir()
for file in report_crud_dir.listdir():
if file.isfile() and file.basename.endswith(".yaml"):
result.append(file.basename)
return result
def crud_files_schedules():
result = []
if not schedules_crud_dir.exists():
schedules_crud_dir.mkdir()
for file in schedules_crud_dir.listdir():
if file.isfile() and file.basename.endswith(".yaml"):
result.append(file.basename)
return result
@pytest.fixture(params=crud_files_reports())
def custom_report(request):
with report_crud_dir.join(request.param).open(mode="r") as rep_yaml:
return CustomReport(**yaml.load(rep_yaml))
@pytest.fixture(params=crud_files_schedules())
def schedule(request):
with schedules_crud_dir.join(request.param).open(mode="r") as rep_yaml:
data = yaml.load(rep_yaml)
name = data.pop("name")
description = data.pop("description")
filter = data.pop("filter")
return Schedule(name, description, filter, **data)
def test_custom_report_crud(custom_report):
custom_report.create()
with update(custom_report):
custom_report.title += generate_random_string()
custom_report.queue(wait_for_finish=True)
for report in custom_report.get_saved_reports():
report.data # touch the results
custom_report.delete()
@pytest.mark.meta(blockers=[1202412])
def test_schedule_crud(schedule):
schedule.create()
with update(schedule):
schedule.description = "badger badger badger"
schedule.queue(wait_for_finish=True)
schedule.delete()
@pytest.mark.meta(blockers=[1209945])
def test_menuwidget_crud():
w = MenuWidget(
generate_random_string(),
description=generate_random_string(),
active=True,
shortcuts={
"Services / Catalogs": generate_random_string(),
"Clouds / Providers": generate_random_string(),
},
visibility=["<By Role>", sel.ByText("EvmRole-administrator")]
)
w.create()
with update(w):
w.active = False
w.delete()
@pytest.mark.meta(blockers=[1209945])
def test_reportwidget_crud():
w = ReportWidget(
generate_random_string(),
description=generate_random_string(),
active=True,
filter=["Events", "Operations", "Operations VMs Powered On/Off for Last Week"],
columns=["VM Name", "Message"],
rows="10",
timer={"run": "Hourly", "hours": "Hour"},
visibility=["<By Role>", sel.ByText("EvmRole-administrator")]
)
w.create()
with update(w):
w.active = False
w.delete()
@pytest.mark.meta(blockers=[1209945])
def test_chartwidget_crud():
w = ChartWidget(
generate_random_string(),
description=generate_random_string(),
active=True,
filter="Configuration Management/Virtual Machines/Vendor and Guest OS",
timer={"run": "Hourly", "hours": "Hour"},
visibility=["<By Role>", sel.ByText("EvmRole-administrator")]
)
w.create()
with update(w):
w.active = False
w.delete()
@pytest.mark.meta(blockers=[1209945])
def test_rssfeedwidget_crud():
w = RSSFeedWidget(
generate_random_string(),
description=generate_random_string(),
active=True,
type="Internal",
feed="Administrative Events",
rows="8",
visibility=["<By Role>", sel.ByText("EvmRole-administrator")]
)
w.create()
# Basic update
with update(w):
w.active = False
# Different feed type
with update(w):
w.type = "External"
w.external = "SlashDot"
# and custom address
with update(w):
w.type = "External"
w.external = "http://rss.example.com/"
w.delete()
def test_dashboard_crud():
d = Dashboard(
generate_random_string(),
"EvmGroup-administrator",
generate_random_string(),
locked=False,
widgets=["Top CPU Consumers (weekly)", "Vendor and Guest OS Chart"]
)
d.create()
with update(d):
d.locked = True
with update(d):
d.locked = False
with update(d):
d.widgets = "Top Storage Consumers"
d.delete()
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,149
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/configure/test_about_links.py
|
# -*- coding: utf-8 -*-
from cfme.fixtures import pytest_selenium as sel
from cfme.configure import about
import pytest
import requests
def test_about_links():
sel.force_navigate('about')
for link_key, link_loc in about.product_assistance.locators.items():
href = sel.get_attribute(link_loc, 'href')
try:
resp = requests.head(href, verify=False, timeout=20)
except (requests.Timeout, requests.ConnectionError) as ex:
pytest.fail(ex.message)
assert 200 <= resp.status_code < 400, "Unable to access '{}' ({})".format(link_key, href)
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,150
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/automate/test_provisioning_dialogs.py
|
# -*- coding: utf-8 -*-
import pytest
from cfme.automate import provisioning_dialogs
from utils.randomness import generate_random_string
from utils.update import update
@pytest.yield_fixture(scope="function")
def dialog():
dlg = provisioning_dialogs.ProvisioningDialog(
provisioning_dialogs.ProvisioningDialog.VM_PROVISION,
name=generate_random_string(),
description=generate_random_string()
)
yield dlg
if dlg.exists:
dlg.delete()
def test_provisioning_dialog_crud(dialog):
dialog.create()
assert dialog.exists
with update(dialog):
dialog.name = generate_random_string()
dialog.description = generate_random_string()
assert dialog.exists
dialog.change_type(provisioning_dialogs.ProvisioningDialog.HOST_PROVISION)
assert dialog.exists
dialog.delete()
assert not dialog.exists
sort_by_params = []
for nav_loc, name in provisioning_dialogs.ProvisioningDialog.ALLOWED_TYPES:
sort_by_params.append((nav_loc, "Name", "ascending"))
sort_by_params.append((nav_loc, "Name", "descending"))
sort_by_params.append((nav_loc, "Description", "ascending"))
sort_by_params.append((nav_loc, "Description", "descending"))
@pytest.mark.meta(blockers=[1096388])
@pytest.mark.parametrize(("nav_loc", "by", "order"), sort_by_params)
def test_provisioning_dialogs_sorting(nav_loc, by, order):
pytest.sel.force_navigate("{}_dialogs".format(nav_loc))
provisioning_dialogs.dialog_table.sort_by(by, order)
# When we can get the same comparing function as the PGSQL DB has, we can check
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,151
|
petrblaho/cfme_tests
|
refs/heads/master
|
/utils/randomness.py
|
# -*- coding: utf-8 -*-
import fauxfactory
import random
import sys
def generate_random_int(max=sys.maxint):
max = int(max)
return fauxfactory.gen_integer(0, max)
def generate_random_local_ip():
return "10.{}.{}.{}".format(
generate_random_int(255), generate_random_int(255), generate_random_int(255))
def generate_random_string(size=8):
size = int(size)
return fauxfactory.gen_string("alphanumeric", size)
def generate_lowercase_random_string(size=8):
return generate_random_string(size).lower()
def generate_random_uuid_as_str():
return fauxfactory.gen_uuid()
def pick(from_where, n, quiet=True):
"""Picks `n` elements randomly from source iterable.
Will be converted during processing so no side effects
Args:
from_where: Source iterable.
n: How many elements to pick
quiet: Whether raise the exception about n bigger than len(from_where) or not. Default True.
Returns: n-length list with randomly picked elements from `from_where`
"""
if len(from_where) < n:
# We want more
if not quiet:
raise ValueError("Less elements in from_where than you want!")
else:
return list(from_where)
elif len(from_where) == n:
# We want all
return list(from_where)
# Random picking
result = []
from_where = list(from_where) # to prevent side effects
while len(result) < n:
index = random.choice(range(len(from_where)))
result.append(from_where.pop(index))
return result
class RandomizeValues(object):
_randomizers = {
'random_int': generate_random_int,
'random_str': generate_random_string,
'random_uuid': generate_random_uuid_as_str,
}
@classmethod
def from_dict(cls, d):
"""Load a dictionary with randomizable values and randomize them
Targeted at dicts produced from loading YAML, so it doesn't try to
handle more than basic types (str, tuple, list, set, dict)
Allowable dict values to randomize (remember to quote these in YAML):
- {random_int}: Becomes an int between 0 and maxint, inclusive
- {random_int:max}: Becomes an int between 0 and "max",
inclusive
- {random_str}: Becomes a string of numbers and letters,
length 8
- {random_str:length}: Becomes a string of numbers and
letters, length "length"
- {random_uuid}: Becomes a completely random uuid
Returns a modified dict with randomize values
"""
return {k: cls._randomize_item(v) for k, v in d.items()}
@classmethod
def _randomize_item(cls, item):
# Go through the most common types deserialized from yaml
# pass them back through RandomizeValues as needed until
# there are concrete things to randomize
if isinstance(item, dict):
return cls.from_dict(item)
elif isinstance(item, tuple):
return tuple(cls._randomize_item(x) for x in item)
elif isinstance(item, list):
return [cls._randomize_item(x) for x in item]
elif isinstance(item, set):
return set([cls._randomize_item(x) for x in item])
elif isinstance(item, basestring) and item.startswith('{random_'):
# Concreteness! Try to parse out the randomness case and
# possible argument to the randomizer
# '{key:arg}' should become 'key' and 'arg'; if no arg, arg is None
try:
key, arg = item.strip('{}').split(':', 1)
except ValueError:
key, arg = item.strip('{}'), None
else:
# No idea what this is, return it
return item
if key in cls._randomizers:
# If the case actually exists, call its randomizer
randomizer = cls._randomizers[key]
if arg:
random_value = randomizer(arg)
else:
random_value = randomizer()
return str(random_value)
else:
# randomizer was tripped, but no matching randomizers found
# in _randomizers, just return what was there
return item
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,152
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/cloud/test_provisioning.py
|
# These tests don't work at the moment, due to the security_groups multi select not working
# in selenium (the group is selected then immediately reset)
import pytest
from textwrap import dedent
from cfme.automate import explorer as automate
from cfme.cloud.instance import instance_factory
from cfme.cloud.provider import OpenStackProvider
from cfme.fixtures import pytest_selenium as sel
from utils import testgen
from utils.randomness import generate_random_string
from utils.update import update
from utils.wait import wait_for
pytestmark = [pytest.mark.meta(server_roles="+automate")]
def pytest_generate_tests(metafunc):
# Filter out providers without templates defined
argnames, argvalues, idlist = testgen.cloud_providers(metafunc, 'provisioning')
new_argvalues = []
new_idlist = []
for i, argvalue_tuple in enumerate(argvalues):
args = dict(zip(argnames, argvalue_tuple))
if not args['provisioning']:
# Don't know what type of instance to provision, move on
continue
# required keys should be a subset of the dict keys set
if not {'image'}.issubset(args['provisioning'].viewkeys()):
# Need image for image -> instance provisioning
continue
if metafunc.function in {
test_provision_from_template_with_attached_disks, test_provision_with_boot_volume,
test_provision_with_additional_volume} \
and args['provider_type'] != 'openstack':
continue
new_idlist.append(idlist[i])
new_argvalues.append([args[argname] for argname in argnames])
testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module")
@pytest.fixture(scope="function")
def vm_name(request, provider_mgmt):
vm_name = 'test_image_prov_%s' % generate_random_string()
return vm_name
def test_provision_from_template(request, setup_provider, provider_crud, provisioning, vm_name):
""" Tests instance provision from template
Metadata:
test_flag: provision
"""
image = provisioning['image']['name']
note = ('Testing provisioning from image %s to vm %s on provider %s' %
(image, vm_name, provider_crud.key))
instance = instance_factory(vm_name, provider_crud, image)
request.addfinalizer(instance.delete_from_provider)
inst_args = {
'email': 'image_provisioner@example.com',
'first_name': 'Image',
'last_name': 'Provisioner',
'notes': note,
'instance_type': provisioning['instance_type'],
'availability_zone': provisioning['availability_zone'],
'security_groups': [provisioning['security_group']],
'guest_keypair': provisioning['guest_keypair']
}
if isinstance(provider_crud, OpenStackProvider):
inst_args['cloud_network'] = provisioning['cloud_network']
sel.force_navigate("clouds_instances_by_provider")
instance.create(**inst_args)
VOLUME_METHOD = ("""
prov = $evm.root["miq_provision"]
prov.set_option(
:clone_options,
{:block_device_mapping => [%s]})
""")
ONE_FIELD = """{:volume_id => "%s", :device_name => "%s"}"""
@pytest.fixture(scope="module")
def default_domain_enabled():
dom = automate.Domain.default
if dom is not None:
if not dom.is_enabled:
with update(dom):
dom.enabled = True
# Not collected for EC2 in generate_tests above
@pytest.mark.meta(blockers=[1152737])
@pytest.mark.parametrize("disks", [1, 2])
def test_provision_from_template_with_attached_disks(
request, setup_provider, provider_crud, provisioning, vm_name, provider_mgmt, disks,
soft_assert, provider_type, default_domain_enabled):
""" Tests provisioning from a template and attaching disks
Metadata:
test_flag: provision
"""
image = provisioning['image']['name']
note = ('Testing provisioning from image %s to vm %s on provider %s' %
(image, vm_name, provider_crud.key))
DEVICE_NAME = "/dev/sd{}"
device_mapping = []
with provider_mgmt.with_volumes(1, n=disks) as volumes:
for i, volume in enumerate(volumes):
device_mapping.append((volume, DEVICE_NAME.format(chr(ord("b") + i))))
# Set up automate
cls = automate.Class(
name="Methods",
namespace=automate.Namespace.make_path("Cloud", "VM", "Provisioning", "StateMachines"))
method = automate.Method(
name="openstack_PreProvision",
cls=cls)
with update(method):
disk_mapping = []
for mapping in device_mapping:
disk_mapping.append(ONE_FIELD % mapping)
method.data = VOLUME_METHOD % ", ".join(disk_mapping)
def _finish_method():
with update(method):
method.data = """prov = $evm.root["miq_provision"]"""
request.addfinalizer(_finish_method)
instance = instance_factory(vm_name, provider_crud, image)
request.addfinalizer(instance.delete_from_provider)
inst_args = {
'email': 'image_provisioner@example.com',
'first_name': 'Image',
'last_name': 'Provisioner',
'notes': note,
'instance_type': provisioning['instance_type'],
'availability_zone': provisioning['availability_zone'],
'security_groups': [provisioning['security_group']],
'guest_keypair': provisioning['guest_keypair']
}
if isinstance(provider_crud, OpenStackProvider):
inst_args['cloud_network'] = provisioning['cloud_network']
sel.force_navigate("clouds_instances_by_provider")
instance.create(**inst_args)
for volume_id in volumes:
soft_assert(vm_name in provider_mgmt.volume_attachments(volume_id))
for volume, device in device_mapping:
soft_assert(provider_mgmt.volume_attachments(volume)[vm_name] == device)
instance.delete_from_provider() # To make it possible to delete the volume
wait_for(lambda: not instance.does_vm_exist_on_provider(), num_sec=180, delay=5)
# Not collected for EC2 in generate_tests above
@pytest.mark.meta(blockers=[1160342])
def test_provision_with_boot_volume(
request, setup_provider, provider_crud, provisioning, vm_name, provider_mgmt, soft_assert,
provider_type, default_domain_enabled):
""" Tests provisioning from a template and attaching one booting volume.
Metadata:
test_flag: provision, volumes
"""
image = provisioning['image']['name']
note = ('Testing provisioning from image %s to vm %s on provider %s' %
(image, vm_name, provider_crud.key))
with provider_mgmt.with_volume(1) as volume:
# Set up automate
cls = automate.Class(
name="Methods",
namespace=automate.Namespace.make_path("Cloud", "VM", "Provisioning", "StateMachines"))
method = automate.Method(
name="openstack_CustomizeRequest",
cls=cls)
with update(method):
method.data = dedent('''\
$evm.root["miq_provision"].set_option(
:clone_options, {
:image_ref => nil,
:block_device_mapping_v2 => [{
:boot_index => 0,
:uuid => "%s",
:device_name => "vda",
:source_type => "volume",
:destination_type => "volume",
:delete_on_termination => false
}]
}
)
''' % (volume, ))
def _finish_method():
with update(method):
method.data = """prov = $evm.root["miq_provision"]"""
request.addfinalizer(_finish_method)
instance = instance_factory(vm_name, provider_crud, image)
request.addfinalizer(instance.delete_from_provider)
inst_args = {
'email': 'image_provisioner@example.com',
'first_name': 'Image',
'last_name': 'Provisioner',
'notes': note,
'instance_type': provisioning['instance_type'],
'availability_zone': provisioning['availability_zone'],
'security_groups': [provisioning['security_group']],
'guest_keypair': provisioning['guest_keypair']
}
if isinstance(provider_crud, OpenStackProvider):
inst_args['cloud_network'] = provisioning['cloud_network']
sel.force_navigate("clouds_instances_by_provider")
instance.create(**inst_args)
soft_assert(vm_name in provider_mgmt.volume_attachments(volume))
soft_assert(provider_mgmt.volume_attachments(volume)[vm_name] == "vda")
instance.delete_from_provider() # To make it possible to delete the volume
wait_for(lambda: not instance.does_vm_exist_on_provider(), num_sec=180, delay=5)
# Not collected for EC2 in generate_tests above
@pytest.mark.meta(blockers=[1186413])
def test_provision_with_additional_volume(
request, setup_provider, provider_crud, provisioning, vm_name, provider_mgmt, soft_assert,
provider_type, default_domain_enabled, provider_data):
""" Tests provisioning with setting specific image from AE and then also making it create and
attach an additional 3G volume.
Metadata:
test_flag: provision, volumes
"""
image = provisioning['image']['name']
note = ('Testing provisioning from image %s to vm %s on provider %s' %
(image, vm_name, provider_crud.key))
# Set up automate
cls = automate.Class(
name="Methods",
namespace=automate.Namespace.make_path("Cloud", "VM", "Provisioning", "StateMachines"))
method = automate.Method(
name="openstack_CustomizeRequest",
cls=cls)
try:
image_id = provider_mgmt.get_template_id(provider_data["small_template"])
except KeyError:
pytest.skip("No small_template in provider adta!")
with update(method):
method.data = dedent('''\
$evm.root["miq_provision"].set_option(
:clone_options, {
:image_ref => nil,
:block_device_mapping_v2 => [{
:boot_index => 0,
:uuid => "%s",
:device_name => "vda",
:source_type => "image",
:destination_type => "volume",
:volume_size => 3,
:delete_on_termination => false
}]
}
)
''' % (image_id, ))
def _finish_method():
with update(method):
method.data = """prov = $evm.root["miq_provision"]"""
request.addfinalizer(_finish_method)
instance = instance_factory(vm_name, provider_crud, image)
request.addfinalizer(instance.delete_from_provider)
inst_args = {
'email': 'image_provisioner@example.com',
'first_name': 'Image',
'last_name': 'Provisioner',
'notes': note,
'instance_type': provisioning['instance_type'],
'availability_zone': provisioning['availability_zone'],
'security_groups': [provisioning['security_group']],
'guest_keypair': provisioning['guest_keypair']
}
if isinstance(provider_crud, OpenStackProvider):
inst_args['cloud_network'] = provisioning['cloud_network']
sel.force_navigate("clouds_instances_by_provider")
instance.create(**inst_args)
prov_instance = provider_mgmt._find_instance_by_name(vm_name)
try:
assert hasattr(prov_instance, 'os-extended-volumes:volumes_attached')
volumes_attached = getattr(prov_instance, 'os-extended-volumes:volumes_attached')
assert len(volumes_attached) == 1
volume_id = volumes_attached[0]["id"]
assert provider_mgmt.volume_exists(volume_id)
volume = provider_mgmt.get_volume(volume_id)
assert volume.size == 3
finally:
instance.delete_from_provider()
wait_for(lambda: not instance.does_vm_exist_on_provider(), num_sec=180, delay=5)
if volume_id in locals():
if provider_mgmt.volume_exists(volume_id):
provider_mgmt.delete_volume(volume_id)
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,153
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/infrastructure/test_vm_discovery.py
|
import pytest
import time
from cfme.exceptions import CFMEException
from cfme.infrastructure import virtual_machines
from cfme.infrastructure.provider import SCVMMProvider
from utils import testgen
from utils.randomness import generate_random_string
from utils.wait import TimedOutError
def pytest_generate_tests(metafunc):
# Filter out providers without provisioning data or hosts defined
argnames, argvalues, idlist = testgen.infra_providers(metafunc)
testgen.parametrize(metafunc, argnames, argvalues, ids=idlist, scope="module")
@pytest.fixture(scope="class")
def vm_name():
return "test_dscvry_" + generate_random_string()
def if_scvmm_refresh_provider(provider):
# No eventing from SCVMM so force a relationship refresh
if isinstance(provider, SCVMMProvider):
provider.refresh_provider_relationships()
def wait_for_vm_state_changes(vm, timeout=600):
count = 0
while count < timeout:
try:
quadicon = vm.find_quadicon(refresh=True)
if quadicon.state is 'Archived':
return True
elif quadicon.state is 'Orphaned':
raise CFMEException("VM should be Archived but it is Orphaned now.")
except:
pass
time.sleep(15)
count += 15
if count > timeout:
raise CFMEException("VM should be Archived but it is Orphaned now.")
def test_vm_discovery(request, setup_provider, provider_crud, provider_mgmt, vm_name):
"""
Tests whether cfme will discover a vm change
(add/delete) without being manually refreshed.
Metadata:
test_flag: discovery
"""
vm = virtual_machines.Vm(vm_name, provider_crud)
def _cleanup():
vm.delete_from_provider()
if_scvmm_refresh_provider(provider_crud)
request.addfinalizer(_cleanup)
vm.create_on_provider()
if_scvmm_refresh_provider(provider_crud)
try:
vm.wait_to_appear(timeout=600, load_details=False)
except TimedOutError:
pytest.fail("VM was not found in CFME")
vm.delete_from_provider()
if_scvmm_refresh_provider(provider_crud)
wait_for_vm_state_changes(vm)
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,154
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/services/test_dynamicdd_dialogelement.py
|
import pytest
from cfme.services.catalogs.catalog_item import CatalogItem
from cfme.automate.service_dialogs import ServiceDialog
from cfme.services.catalogs.catalog import Catalog
from cfme.services.catalogs.service_catalogs import ServiceCatalogs
from cfme.automate.explorer import Domain, Namespace, Class, Method, Instance
from cfme.web_ui import flash
from utils.randomness import generate_random_string
import utils.randomness as rand
pytestmark = [
pytest.mark.usefixtures("logged_in"),
pytest.mark.long_running,
pytest.mark.ignore_stream("5.2"),
pytest.mark.ignore_stream("upstream"),
pytest.mark.meta(server_roles="+automate")
]
item_name = generate_random_string()
METHOD_TORSO = """
# Method for logging
def log(level, message)
@method = 'Service Dialog Provider Select'
$evm.log(level, "#{@method} - #{message}")
end
# Start Here
log(:info, " - Listing Root Object Attributes:") if @debug
$evm.root.attributes.sort.each { |k, v| $evm.log('info', "#{@method} - \t#{k}: #{v}") if @debug }
log(:info, "===========================================") if @debug
dialog_field = $evm.object
dialog_field['data_type'] = 'string'
dialog_field['required'] = 'true'
dialog_field['sort_by'] = 'value'
list = []
list << ['item_value', 'item_description']
dialog_field['values'] = list
"""
@pytest.yield_fixture(scope="function")
def dialog(copy_instance, create_method):
dialog = "dialog_" + generate_random_string()
element_data = dict(
ele_label="ele_" + rand.generate_random_string(),
ele_name=rand.generate_random_string(),
ele_desc="my ele desc",
choose_type="Drop Down Dynamic List",
field_entry_point="InspectMe",
field_show_refresh_button=True
)
service_dialog = ServiceDialog(label=dialog, description="my dialog",
submit=True, cancel=True,
tab_label="tab_" + rand.generate_random_string(), tab_desc="my tab desc",
box_label="box_" + rand.generate_random_string(), box_desc="my box desc")
service_dialog.create(element_data)
flash.assert_success_message('Dialog "%s" was added' % dialog)
yield dialog
@pytest.yield_fixture(scope="function")
def catalog():
cat_name = "cat_" + generate_random_string()
catalog = Catalog(name=cat_name,
description="my catalog")
catalog.create()
yield catalog
@pytest.fixture(scope="function")
def copy_domain(request):
domain = Domain(name="new_domain", enabled=True)
domain.create()
request.addfinalizer(lambda: domain.delete() if domain.exists() else None)
return domain
@pytest.fixture(scope="function")
def create_method(request, copy_domain):
method = Method(
name="InspectMe",
data=METHOD_TORSO,
cls=Class(
name="Request",
namespace=Namespace(
name="System",
parent=copy_domain
)
)
)
method.create()
return method
@pytest.fixture(scope="function")
def copy_instance(request, copy_domain):
miq_domain = Domain(name="ManageIQ (Locked)", enabled=True)
instance = Instance(
name="InspectMe",
cls=Class(
name="Request",
namespace=Namespace(
name="System",
parent=miq_domain
)
)
)
instance.copy_to(copy_domain)
def test_dynamicdropdown_dialog(dialog, catalog):
item_name = generate_random_string()
catalog_item = CatalogItem(item_type="Generic", name=item_name,
description="my catalog", display_in=True, catalog=catalog.name,
dialog=dialog)
catalog_item.create()
service_catalogs = ServiceCatalogs("service_name")
service_catalogs.order(catalog_item.catalog, catalog_item)
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,155
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/web_ui/menu.py
|
import ui_navigate as nav
from cfme.fixtures import pytest_selenium as sel
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.action_chains import ActionChains
from utils import version, classproperty
from utils.wait import wait_for
class Loc(object):
@classproperty
def toplevel_tabs_loc(cls):
return version.pick({version.LOWEST: '//div[@class="navbar"]/ul',
'5.4': '//nav[contains(@class, "navbar")]/div/ul[@id="maintab"]'})
@classproperty
def toplevel_loc(cls):
return version.pick({version.LOWEST: ('//div[@class="navbar"]/ul/li'
'/a[normalize-space(.)="{}"]'),
'5.4': cls.toplevel_tabs_loc + ('/li/a[normalize-space(.)="{}"'
'and (contains(@class, "visible-lg"))]')})
@classproperty
def secondlevel_first_item_loc(cls):
return version.pick({version.LOWEST: ('//div[@class="navbar"]/ul/li'
'/a[normalize-space(.)="{}"]/../ul/li[1]/a'),
'5.4': cls.toplevel_tabs_loc + '/li/a[normalize-space(.)="{}"]/../ul/li[1]/a'})
@classproperty
def inactive_box_loc(cls):
return version.pick({version.LOWEST: ("//ul[@id='maintab']//"
"ul[contains(@class, 'inactive')]"),
'5.4': "//ul[@id='maintab']//ul[contains(@class, 'inactive')]"})
@classproperty
def a(cls):
return version.pick({version.LOWEST: "./a",
'5.4': "./a[contains(@class, 'visible-lg')]"})
def any_box_displayed():
"""Checks whether any of the not-currently-selected toplevel items is hovered (active).
First part of the condition is for the 5.3+ pop-up, second is for 5.2.
"""
return version.pick({
version.LOWEST:
lambda: sel.is_displayed("//a[contains(@class, 'maintab_active')]", _no_deeper=True),
"5.3":
lambda: any(map(
lambda e: sel.is_displayed(e, _no_deeper=True),
sel.elements(Loc.inactive_box_loc))),
"5.4":
lambda: sel.is_displayed(
"//li[contains(@class, 'dropdown') and contains(@class, 'open')]", _no_deeper=True)
})()
def get_top_level_element(title):
"""Returns the ``li`` element representing the menu item in top-level menu."""
return sel.element((Loc.toplevel_loc + "/..").format(title))
def open_top_level(title):
"""Opens the section."""
sel.raw_click(sel.element(Loc.a, root=get_top_level_element(title)))
def get_second_level_element(top_level_el, title):
"""Returns the ``li`` element representing the menu item in second-level menu."""
return sel.element("./ul/li/a[normalize-space(.)='{}']/..".format(title), root=top_level_el)
def open_second_level(top_level_element, title):
"""Click on second-level menu."""
second = get_second_level_element(top_level_element, title)
sel.raw_click(sel.element("./a", root=second))
def get_current_toplevel_name():
"""Returns text of the currently selected top level menu item."""
get_rid_of_the_menu_box()
return sel.text(
version.pick({
"5.4": "//ul[@id='maintab']/li[not(contains(@class, 'drop'))]/a[2]",
"5.3": "//ul[@id='maintab']/li[not(contains(@class, 'in'))]/a",
version.LOWEST: "//ul[@id='maintab']/li/ul[not(contains(@style, 'none'))]/../a"
})).encode("utf-8").strip()
def get_rid_of_the_menu_box():
"""Moves the mouse pointer away from the menu location and waits for the popups to hide."""
ActionChains(sel.browser()).move_to_element(sel.element("#tP")).perform()
wait_for(lambda: not any_box_displayed(), num_sec=10, delay=0.1, message="menu box")
# Dictionary of (nav destination name, section title) section tuples
# Keys are toplevel sections (the main tabs), values are a supertuple of secondlevel sections
sections = {
('cloud_intelligence', 'Cloud Intelligence'): (
('dashboard', 'Dashboard'),
('reports', 'Reports'),
('chargeback', 'Chargeback'),
('timelines', 'Timelines'),
('rss', 'RSS')
),
('services', 'Services'): (
('my_services', 'My Services'),
('services_catalogs', 'Catalogs'),
('services_workloads', 'Workloads'),
('services_requests', 'Requests')
),
('clouds', 'Clouds'): (
('clouds_providers', 'Providers'),
('clouds_availability_zones', 'Availability Zones'),
('clouds_tenants', 'Tenants'),
('clouds_flavors', 'Flavors'),
('clouds_security_groups', 'Security Groups'),
('clouds_instances', 'Instances'),
('clouds_stacks', 'Stacks')
),
('infrastructure', 'Infrastructure'): (
('infrastructure_providers', 'Providers'),
('infrastructure_clusters', 'Clusters'),
('infrastructure_hosts', 'Hosts'),
('infrastructure_virtual_machines', 'Virtual Machines'),
('infrastructure_resource_pools', 'Resource Pools'),
('infrastructure_datastores', 'Datastores'),
('infrastructure_repositories', 'Repositories'),
('infrastructure_pxe', 'PXE'),
('infrastructure_requests', 'Requests'),
('infrastructure_config_management', 'Configuration Management')
),
('storage', 'Storage'): (
('filers', 'Filers'),
('volumes', 'Volumes'),
('luns', 'LUNs'),
('file_shares', 'File Shares'),
('storage_managers', 'Storage Managers')
),
('control', 'Control'): (
('control_explorer', 'Explorer'),
('control_simulation', 'Simulation'),
('control_import_export', 'Import / Export'),
('control_log', 'Log')
),
('automate', 'Automate'): (
('automate_explorer', 'Explorer'),
('automate_simulation', 'Simulation'),
('automate_customization', 'Customization'),
('automate_import_export', 'Import / Export'),
('automate_log', 'Log'),
('automate_requests', 'Requests')
),
('optimize', 'Optimize'): (
('utilization', 'Utilization'),
('planning', 'Planning'),
('bottlenecks', 'Bottlenecks')
),
('configure', 'Configure'): (
('my_settings', 'My Settings'),
('tasks', 'Tasks'),
('configuration', 'Configuration'),
('smartproxies', 'SmartProxies'),
('about', 'About')
)
}
def is_page_active(toplevel, secondlevel=None):
try:
if get_current_toplevel_name() != toplevel:
return False
except NoSuchElementException:
return False
if secondlevel:
try:
sel.element(version.pick({
"5.4": ("//nav[contains(@class, 'navbar')]//ul/li[@class='active']"
"/a[normalize-space(.)='{}']/..".format(secondlevel)),
version.LOWEST: ("//div[@class='navbar']//ul/li[@class='active']"
"/a[normalize-space(.)='{}']/..".format(secondlevel))
}))
except NoSuchElementException:
return False
return True
def nav_to_fn(toplevel, secondlevel=None):
def f(_):
if not is_page_active(toplevel):
try:
# Try to circumvent the issue on fir
get_rid_of_the_menu_box()
open_top_level(toplevel)
get_rid_of_the_menu_box()
if get_current_toplevel_name() != toplevel:
# Infrastructure / Requests workaround
sel.move_to_element(get_top_level_element(toplevel))
# Using pure move_to_element to not move the mouse anywhere else
# So in this case, we move the mouse to the first item of the second level
ActionChains(sel.browser())\
.move_to_element(sel.element(Loc.secondlevel_first_item_loc.format(
toplevel)))\
.click()\
.perform()
get_rid_of_the_menu_box()
# Now when we went directly to the first item, everything should just work
tl = get_current_toplevel_name()
if tl != toplevel:
raise Exception("Navigation screwed! (wanted {}, got {}".format(toplevel,
tl))
except NoSuchElementException:
if visible_toplevel_tabs(): # Target menu is missing
raise
else:
return # no menu at all, assume single permission
# Can't do this currently because silly menu traps us
# if is_page_active(toplevel, secondlevel):
# return
if secondlevel is not None:
get_rid_of_the_menu_box()
open_second_level(get_top_level_element(toplevel), secondlevel)
get_rid_of_the_menu_box()
return f
def reverse_lookup(toplevel_path, secondlevel_path=None):
"""Reverse lookup for navigation destinations defined in this module, based on menu text
Usage:
# Returns 'clouds'
reverse_lookup('Clouds')
# Returns 'clouds_providers'
reverse_lookup('Clouds', 'Providers')
# Returns 'automate_import_export'
reverse_lookup('Automate', 'Import / Export')
Note:
It may be tempting to use this when you don't know the name of a page, e.g.:
go_to(reverse_lookup('Infrastructure', 'Providers'))
Don't do that; use the nav tree name.
"""
if secondlevel_path:
menu_path = '%s/%s' % (toplevel_path, secondlevel_path)
else:
menu_path = toplevel_path
for (toplevel_dest, toplevel), secondlevels in sections.items():
if menu_path == toplevel:
return toplevel_dest
for secondlevel_dest, secondlevel in secondlevels:
if menu_path == '%s/%s' % (toplevel, secondlevel):
return secondlevel_dest
def visible_toplevel_tabs():
menu_names = []
ele = version.pick({
"5.4": 'li/a[2]',
version.LOWEST: 'li/a'})
for menu_elem in sel.elements(ele, root=Loc.toplevel_tabs_loc):
menu_names.append(sel.text(menu_elem))
return menu_names
def visible_pages():
"""Return a list of all the menu pages currently visible top- and second-level pages
Mainly useful for RBAC testing
"""
# Gather up all the visible toplevel tabs
menu_names = visible_toplevel_tabs()
# Now go from tab to tab and pull the secondlevel names from the visible links
displayed_menus = []
for menu_name in menu_names:
menu_elem = sel.element(Loc.toplevel_loc.format(menu_name))
sel.move_to_element(menu_elem)
for submenu_elem in sel.elements('../ul/li/a', root=menu_elem):
displayed_menus.append((menu_name, sel.text(submenu_elem)))
# Do reverse lookups so we can compare to the list of nav destinations for this group
return sorted([reverse_lookup(*displayed) for displayed in displayed_menus])
# Construct the nav tree based on sections
_branches = dict()
# The main tab destination is usually the first secondlevel page in that tab
# Since this is redundant, it's arguable that the toplevel tabs should be
# nav destination at all; they're included here "just in case". The toplevel
# and secondlevel destinations exist at the same level of nav_tree because the
# secondlevel destinations don't depend on the toplevel nav taking place to reach
# their destination.
for (toplevel_dest, toplevel), secondlevels in sections.items():
for secondlevel_dest, secondlevel in secondlevels:
_branches[secondlevel_dest] = nav_to_fn(toplevel, secondlevel)
_branches[toplevel_dest] = [nav_to_fn(toplevel, None), {}]
nav.add_branch('toplevel', _branches)
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,156
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/services/test_catalog_item.py
|
import pytest
from cfme.web_ui import flash
from cfme.services.catalogs.catalog_item import CatalogItem
from cfme.automate.service_dialogs import ServiceDialog
from cfme.services.catalogs.catalog import Catalog
from utils.randomness import generate_random_string
from utils import error
from utils.update import update
import cfme.tests.configure.test_access_control as tac
import utils.randomness as rand
pytestmark = [pytest.mark.usefixtures("logged_in"),
pytest.mark.ignore_stream("5.2")]
@pytest.yield_fixture(scope="function")
def dialog():
dialog = "dialog_" + generate_random_string()
element_data = dict(
ele_label="ele_" + rand.generate_random_string(),
ele_name=rand.generate_random_string(),
ele_desc="my ele desc",
choose_type="Text Box",
default_text_box="default value"
)
service_dialog = ServiceDialog(label=dialog, description="my dialog",
submit=True, cancel=True,
tab_label="tab_" + rand.generate_random_string(),
tab_desc="my tab desc",
box_label="box_" + rand.generate_random_string(),
box_desc="my box desc")
service_dialog.create(element_data)
flash.assert_success_message('Dialog "%s" was added' % dialog)
yield dialog
@pytest.yield_fixture(scope="function")
def catalog():
catalog = "cat_" + generate_random_string()
cat = Catalog(name=catalog,
description="my catalog")
cat.create()
yield catalog
@pytest.yield_fixture(scope="function")
def catalog_item(dialog, catalog):
catalog_item = CatalogItem(item_type="Generic",
name=generate_random_string(),
description="my catalog", display_in=True,
catalog=catalog, dialog=dialog)
yield catalog_item
def test_create_catalog_item(catalog_item):
catalog_item.create()
flash.assert_success_message('Service Catalog Item "%s" was added' %
catalog_item.name)
def test_update_catalog_item(catalog_item):
catalog_item.create()
with update(catalog_item):
catalog_item.description = "my edited description"
def test_delete_catalog_item(catalog_item):
catalog_item.create()
catalog_item.delete()
def test_add_button_group(catalog_item):
catalog_item.create()
catalog_item.add_button_group()
def test_add_button(catalog_item):
catalog_item.create()
catalog_item.add_button()
def test_edit_tags(catalog_item):
catalog_item.create()
catalog_item.edit_tags("Cost Center 001")
@pytest.mark.xfail(message='downstream - https://bugzilla.redhat.com/show_bug.cgi?id=996789 ;'
'upstream - https://bugzilla.redhat.com/show_bug.cgi?id=1092651')
def test_catalog_item_duplicate_name(catalog_item):
catalog_item.create()
with error.expected("Name has already been taken"):
catalog_item.create()
@pytest.mark.meta(blockers=[1130301])
def test_permissions_catalog_item_add(setup_cloud_providers, catalog_item):
""" Tests that a catalog can be added only with the right permissions"""
tac.single_task_permission_test([['Services', 'Catalogs Explorer', 'Catalog Items']],
{'Add Catalog Item': catalog_item.create})
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,157
|
petrblaho/cfme_tests
|
refs/heads/master
|
/utils/tests/test_randomness.py
|
# -*-coding: utf-8
import pytest
from utils import randomness
pytestmark = [
pytest.mark.nondestructive,
pytest.mark.skip_selenium,
]
def test_generate_random_string_noargs():
random_string = randomness.generate_random_string()
# 8 is the default length
assert len(random_string) == 8
def test_generate_random_string_args():
length = 16
random_string = randomness.generate_random_string(length)
assert len(random_string) == length
def test_generate_random_int_noargs():
# maxint is the default max, so no need to check against it
random_int = randomness.generate_random_int()
assert random_int > 0
def test_generate_random_int_args():
maxvalue = 1
random_int = randomness.generate_random_int(maxvalue)
assert 0 <= random_int <= maxvalue
def test_generate_random_uuid():
"""Not sure if there's a better test than a string of normal uuid length (36)"""
uuid = randomness.generate_random_uuid_as_str()
assert len(uuid) == 36
assert isinstance(uuid, basestring)
def test_randomness_fixtures(random_uuid_as_string, random_string):
"""Make sure the fixtures work as intended"""
assert len(random_uuid_as_string) == 36
assert isinstance(random_uuid_as_string, basestring)
assert isinstance(random_string, basestring)
@pytest.fixture(scope="class")
def random_stash(request):
request.cls.before = {
'str': '{random_str}',
'tuple': ('{random_str}',),
'list': ['{random_str}'],
'set': set(['{random_str}']),
'notrandom': '{random_thisisabogusrandomizer}',
}
request.cls.after = randomness.RandomizeValues.from_dict(request.cls.before)
request.cls.again = randomness.RandomizeValues.from_dict(request.cls.before)
@pytest.mark.usefixtures("random_stash")
class TestRandomizeValues(object):
def test_randomizevalues(self):
# These should be different in the two dicts
assert self.after['str'] != self.before['str']
assert self.after['tuple'] != self.before['tuple']
assert self.after['list'] != self.before['list']
assert self.after['set'] != self.before['set']
def test_randomizevalues_type(self):
"""Object type should still be dict"""
assert isinstance(self.after, type(self.before))
def test_randomizevalues_bogus_randomizer(self):
"""Unmatched randomizer shouldn't change"""
assert self.after['notrandom'] == self.before['notrandom']
def test_randomizevalues_again(self):
"""If we generate the dict again, it should be newly randomized"""
assert self.after != self.again
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,158
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/integration/test_ipa_external.py
|
# -*- coding: utf-8 -*-
import pytest
from cfme.configure.access_control import Group, User
from cfme import login
from utils.conf import cfme_data
from utils.providers import setup_a_provider
@pytest.fixture(scope="module")
def setup_first_provider():
setup_a_provider(validate=True, check_existing=True)
@pytest.mark.ignore_stream("5.2") # Old version can't do IPA
def test_external_auth_ipa(request, setup_first_provider, configure_external_auth_ipa_module):
try:
data = cfme_data.get("ipa_test", {})
except KeyError:
pytest.skip("No ipa_test section in yaml")
group = Group(description='cfme', role="EvmRole-user")
request.addfinalizer(group.delete)
group.create()
user = User(name=data["fullname"])
request.addfinalizer(user.delete)
request.addfinalizer(login.login_admin)
login.login(data["username"], data["password"])
assert login.current_full_name() == data["fullname"]
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,159
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/configure/test_register_appliance.py
|
# -*- coding: utf-8 -*-
import pytest
import re
from cfme.configure import red_hat_updates
from cfme.web_ui import InfoBlock, flash
from utils import conf, error, version
from utils.blockers import BZ
from utils.ssh import SSHClient
from utils.testgen import parametrize
from utils.wait import wait_for
REG_METHODS = ('rhsm', 'sat5', 'sat6')
"""
Tests RHSM, Sat5 and Sat6 registration and checks result over ssh
(update is not performed - it is non-destructive).
For setup, see test_update_appliances.py (red_hat_updates section in cfme_data yaml).
These tests do not check registration results in the web UI, only through SSH.
"""
def pytest_generate_tests(metafunc):
argnames = ['reg_method', 'reg_data', 'proxy_url', 'proxy_creds']
argvalues = []
idlist = []
stream = version.current_stream()
try:
all_reg_data = conf.cfme_data.get('redhat_updates', {})['streams'][stream]
except KeyError:
pytest.mark.uncollect(metafunc.function)
return
if 'reg_method' in metafunc.fixturenames:
for reg_method in REG_METHODS:
# We cannot validate against Satellite 5
if metafunc.function.__name__ == 'test_rh_creds_validation' and reg_method == 'sat5':
continue
reg_data = all_reg_data.get(reg_method, None)
if not reg_data or not reg_data.get('test_registration', False):
continue
proxy_data = conf.cfme_data['redhat_updates'].get('http_proxy', False)
if proxy_data and reg_data.get('use_http_proxy', False):
proxy_url = proxy_data['url']
proxy_creds_key = proxy_data['credentials']
proxy_creds = conf.credentials[proxy_creds_key]
argval = [reg_method, reg_data, proxy_url, proxy_creds]
argid = '{}-{}'.format(reg_method, 'proxy_on')
idlist.append(argid)
argvalues.append(argval)
argval = [reg_method, reg_data, None, None]
argid = '{}-{}'.format(reg_method, 'proxy_off')
idlist.append(argid)
argvalues.append(argval)
parametrize(metafunc, argnames, argvalues, ids=idlist, scope="module")
# We must make sure that org ID is unset (because of BZ#1048997)
@pytest.fixture
def unset_org_id():
try:
red_hat_updates.update_registration(
service='sat5',
url="http://not.used.for.reg/XMLRPC",
username='not_used_for_reg',
password='not_used_for_reg',
organization=''
)
except Exception as ex:
# Did this happen because the save button was dimmed?
try:
# If so, its fine - just return
if red_hat_updates.form_buttons.save.is_dimmed:
return
except:
# And if we cant access the save button
pass
# Something else happened so return the original exception
raise ex
def rhsm_unregister():
with SSHClient() as ssh:
ssh.run_command('subscription-manager remove --all')
ssh.run_command('subscription-manager unregister')
ssh.run_command('subscription-manager clean')
def sat5_unregister():
with SSHClient() as ssh:
ssh.run_command('rm -f /etc/sysconfig/rhn/systemid')
def sat6_unregister():
with SSHClient() as ssh:
ssh.run_command('subscription-manager remove --all')
ssh.run_command('subscription-manager unregister')
ssh.run_command('subscription-manager clean')
ssh.run_command('mv -f /etc/rhsm/rhsm.conf.kat-backup /etc/rhsm/rhsm.conf')
ssh.run_command('rpm -qa | grep katello-ca-consumer | xargs rpm -e')
def is_registration_complete(used_repo_or_channel):
with SSHClient() as ssh:
ret, out = ssh.run_command('yum repolist enabled')
# Check that the specified (or default) repo (can be multiple, separated by a space)
# is enabled and that there are packages available
for repo_or_channel in used_repo_or_channel.split(' '):
if (repo_or_channel not in out) or (not re.search(r'repolist: [^0]', out)):
return False
return True
@pytest.mark.ignore_stream("upstream")
@pytest.mark.meta(
blockers=[
BZ(1198111, unblock=lambda reg_method: reg_method not in {'rhsm', 'sat6'})
]
)
def test_rh_creds_validation(request, unset_org_id,
reg_method, reg_data, proxy_url, proxy_creds):
if reg_method in ('rhsm', 'sat6'):
repo_or_channel = reg_data.get('enable_repo', None)
else:
repo_or_channel = reg_data.get('add_channel', None)
if not repo_or_channel:
set_default_repo = True
else:
set_default_repo = False
if proxy_url:
use_proxy = True
proxy_username = proxy_creds['username']
proxy_password = proxy_creds['password']
else:
use_proxy = False
proxy_url = None
proxy_username = None
proxy_password = None
red_hat_updates.update_registration(
service=reg_method,
url=reg_data['url'],
username=conf.credentials[reg_method]['username'],
password=conf.credentials[reg_method]['password'],
repo_name=repo_or_channel,
organization=reg_data.get('organization', None),
use_proxy=use_proxy,
proxy_url=proxy_url,
proxy_username=proxy_username,
proxy_password=proxy_password,
set_default_repository=set_default_repo,
cancel=True
)
@pytest.mark.ignore_stream("upstream")
@pytest.mark.meta(
blockers=[
BZ(1102724, unblock=lambda proxy_url: proxy_url is None),
# Sat6 requires validation to register
BZ(1198111, unblock=lambda reg_method: reg_method != 'sat6')
]
)
def test_rh_registration(request, unset_org_id,
reg_method, reg_data, proxy_url, proxy_creds):
if reg_method in ('rhsm', 'sat6'):
repo_or_channel = reg_data.get('enable_repo', None)
else:
repo_or_channel = reg_data.get('add_channel', None)
if not repo_or_channel:
set_default_repo = True
else:
set_default_repo = False
if proxy_url:
use_proxy = True
proxy_username = proxy_creds['username']
proxy_password = proxy_creds['password']
else:
use_proxy = False
proxy_url = None
proxy_username = None
proxy_password = None
red_hat_updates.update_registration(
service=reg_method,
url=reg_data['url'],
username=conf.credentials[reg_method]['username'],
password=conf.credentials[reg_method]['password'],
repo_name=repo_or_channel,
organization=reg_data.get('organization', None),
use_proxy=use_proxy,
proxy_url=proxy_url,
proxy_username=proxy_username,
proxy_password=proxy_password,
set_default_repository=set_default_repo,
# Satellite 6 registration requires validation to be able to choose organization
validate=False if reg_method != 'sat6' else True
)
used_repo_or_channel = InfoBlock('Red Hat Software Updates', 'Update Repository').text
red_hat_updates.register_appliances() # Register all
if reg_method == 'rhsm':
request.addfinalizer(rhsm_unregister)
elif reg_method == 'sat5':
request.addfinalizer(sat5_unregister)
else:
request.addfinalizer(sat6_unregister)
wait_for(
func=is_registration_complete,
func_args=[used_repo_or_channel],
delay=40,
num_sec=400,
fail_func=red_hat_updates.register_appliances
)
@pytest.mark.ignore_stream("upstream")
def test_sat5_incorrect_url_format_check(request, unset_org_id):
# Check that we weren't allowed to save the data
with error.expected('No matching flash message'):
red_hat_updates.update_registration(
service="sat5",
url="url.not.matching.format.example.com",
username="not_used",
password="not_used"
)
# Confirm that it was the Sat5 url check that blocked it
flash.assert_message_contain("https://server.example.com/XMLRPC")
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,160
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/intelligence/reports/test_report_chargeback.py
|
# -*- coding: utf-8 -*-
import pytest
import cfme.web_ui.flash as flash
from cfme.intelligence.reports.reports import CustomReport
from utils.providers import setup_a_provider
from utils.randomness import generate_random_string
from utils.log import logger
@pytest.fixture(scope="module")
def setup_first_provider():
setup_a_provider(validate=True, check_existing=True)
def _cleanup_report(report):
try:
logger.info('Cleaning up report {}'.format(report.menu_name))
report.delete()
except:
logger.warning('Failed to clean up report {}'.
format(report.menu_name))
# These tests are meant to catch issues such as BZ 1203022
def test_charge_report_filter_owner(setup_first_provider, request):
"""Tests creation of chargeback report that is filtered by owner
"""
report = CustomReport(
menu_name=generate_random_string(),
title=generate_random_string(),
base_report_on="Chargebacks",
report_fields=[
"Network I/O Used",
"Network I/O Used Cost",
"Storage Used",
"Storage Used Cost",
"Disk I/O Used",
"Disk I/O Used Cost",
"Owner",
"Total Cost",
],
filter_show_costs="Owner",
filter_owner="Administrator",
)
report.create()
def cleanup_report(report):
return lambda: _cleanup_report(report)
request.addfinalizer(cleanup_report(report))
flash.assert_message_match('Report "{}" was added'.format(report.menu_name))
report.queue(wait_for_finish=True)
def test_charge_report_filter_tag(setup_first_provider, request):
"""Tests creation of chargeback report that is filtered by tag
"""
report = CustomReport(
menu_name=generate_random_string(),
title=generate_random_string(),
base_report_on="Chargebacks",
report_fields=[
"CPU Used",
"CPU Used Cost",
"Memory Used",
"Memory Used Cost",
"Owner",
"vCPUs Allocated Cost",
"Total Cost",
],
filter_show_costs="My Company Tag",
filter_tag_cat="Location",
filter_tag_value="Chicago",
)
report.create()
def cleanup_report(report):
return lambda: _cleanup_report(report)
request.addfinalizer(cleanup_report(report))
flash.assert_message_match('Report "{}" was added'.format(report.menu_name))
report.queue(wait_for_finish=True)
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,161
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/intelligence/reports/menus.py
|
# -*- coding: utf-8 -*-
"""Module handling report menus contents"""
from contextlib import contextmanager
from cfme.fixtures import pytest_selenium as sel
from cfme.intelligence.reports.ui_elements import FolderManager
from cfme.web_ui import Region, Tree, accordion, form_buttons, menu
from cfme.web_ui.multibox import MultiBoxSelect
from utils import version
from utils.log import logger
menu.nav.add_branch(
"reports",
{
"report_menus_group":
lambda ctx: accordion.tree("Edit Report Menus", "All EVM Groups", ctx["group"])
}
)
reports_tree = Tree({
"5.3": "//div[@id='menu_roles_treebox']/ul",
version.LOWEST: "//div[@id='treebox']//table[not(tbody/tr/td[@class='hiddenRow'])]"
})
manager = FolderManager("//div[@id='folder_lists']/table")
report_select = MultiBoxSelect(
"//select[@id='available_reports']",
"//select[@id='selected_reports']",
"//a[@title='Move selected reports right']/img",
"//a[@title='Move selected reports left']/img",
)
buttons = Region(locators=dict(
commit="//a[@title='Commit report management changes']/img",
discard="//a[@title='Discard report management changes']/img",
))
default_button = form_buttons.FormButton("Reset All menus to CFME defaults")
def get_folders(group):
"""Returns list of folders for given user group.
Args:
group: User group to check.
"""
sel.force_navigate("report_menus_group", context={"group": group})
reports_tree.click_path("Top Level")
return manager.fields
def get_subfolders(group, folder):
"""Returns list of sub-folders for given user group and folder.
Args:
group: User group to check.
folder: Folder to read.
"""
sel.force_navigate("report_menus_group", context={"group": group})
reports_tree.click_path("Top Level", folder)
return manager.fields
def add_folder(group, folder):
"""Adds a folder under top-level.
Args:
group: User group.
folder: Name of the new folder.
"""
with manage_folder() as top_level:
top_level.add(folder)
def add_subfolder(group, folder, subfolder):
"""Adds a subfolder under specified folder.
Args:
group: User group.
folder: Name of the folder.
subfolder: Name of the new subdfolder.
"""
with manage_folder(folder) as fldr:
fldr.add(subfolder)
def reset_to_default(group):
"""Clicks the `Default` button.
Args:
group: Group to set to Default
"""
sel.force_navigate("report_menus_group", context={"group": group})
sel.click(default_button)
sel.click(form_buttons.save)
@contextmanager
def manage_folder(group, folder=None):
"""Context manager to use when modifying the folder contents.
Args:
group: User group.
folder: Which folder to manage. If None, top-level will be managed.
Returns: Context-managed :py:class:`cfme.intelligence.reports.ui_elements.FolderManager` inst.
"""
sel.force_navigate("report_menus_group", context={"group": group})
if folder is None:
reports_tree.click_path("Top Level")
else:
reports_tree.click_path("Top Level", folder)
try:
yield manager
except FolderManager._BailOut:
logger.info("Discarding editation modifications on {}".format(str(repr(manager))))
manager.discard()
except:
# In case of any exception, nothing will be saved
manager.discard()
raise # And reraise the exception
else:
# If no exception happens, save!
manager.commit()
form_buttons.save()
@contextmanager
def manage_subfolder(group, folder, subfolder):
"""Context manager to use when modifying the subfolder contents.
Args:
group: User group.
folder: Parent folder name.
subfolder: Subfodler name to manage.
Returns: Context-managed :py:class:`cfme.intelligence.reports.ui_elements.FolderManager` inst.
"""
sel.force_navigate("report_menus_group", context={"group": group})
reports_tree.click_path("Top Level", folder, subfolder)
try:
yield report_select
except FolderManager._BailOut:
logger.info("Discarding editation modifications on {}".format(str(repr(manager))))
manager.discard()
except:
# In case of any exception, nothing will be saved
manager.discard()
raise # And reraise the exception
else:
# If no exception happens, save!
manager.commit()
form_buttons.save()
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,162
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/infrastructure/test_tag_infrastructure.py
|
import pytest
from cfme.web_ui import Quadicon, mixins
from cfme.configure.configuration import Category, Tag
from utils import providers
from utils.randomness import generate_lowercase_random_string, generate_random_string
@pytest.fixture(scope="module")
def setup_first_provider():
providers.setup_a_provider(prov_class="infra", validate=True, check_existing=True)
pytestmark = [
pytest.mark.parametrize("location", [
"infrastructure_providers",
"infrastructure_clusters",
"infrastructure_hosts",
"infrastructure_datastores",
"infra_vms",
"infra_templates",
]),
pytest.mark.usefixtures("setup_first_provider")
]
@pytest.yield_fixture(scope="module")
def category():
cg = Category(name=generate_lowercase_random_string(size=8),
description=generate_random_string(size=32),
display_name=generate_random_string(size=32))
cg.create()
yield cg
cg.delete()
@pytest.yield_fixture(scope="module")
def tag(category):
tag = Tag(name=generate_lowercase_random_string(size=8),
display_name=generate_random_string(size=32),
category=category)
tag.create()
yield tag
tag.delete()
def test_tag_infra_item_through_selecting(location, tag):
"""Add a tag to a infra item
"""
pytest.sel.force_navigate(location)
Quadicon.select_first_quad()
mixins.add_tag(tag)
Quadicon.select_first_quad() # It goes back to the list view.
mixins.remove_tag(tag)
def test_tag_infra_item_through_details(location, tag):
"""Add a tag to a infra item
"""
pytest.sel.force_navigate(location)
pytest.sel.click(Quadicon.first())
mixins.add_tag(tag)
mixins.remove_tag(tag)
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,163
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/infrastructure/test_system_image_type.py
|
import pytest
from cfme.infrastructure import pxe
import utils.error as error
from utils.randomness import generate_random_string
from utils.update import update
pytestmark = [pytest.mark.usefixtures("logged_in")]
def test_system_image_type_crud():
"""
Tests a System Image Type using CRUD operations.
"""
sys_image_type = pxe.SystemImageType(
name=generate_random_string(size=8),
provision_type='Vm')
sys_image_type.create()
with update(sys_image_type):
sys_image_type.name = sys_image_type.name + "_update"
sys_image_type.delete(cancel=False)
def test_duplicate_name_error_validation():
"""
Tests a System Image for duplicate name.
"""
sys_image_type = pxe.SystemImageType(
name=generate_random_string(size=8),
provision_type='Vm')
sys_image_type.create()
with error.expected('Name has already been taken'):
sys_image_type.create()
sys_image_type.delete(cancel=False)
def test_name_required_error_validation():
"""
Tests a System Image with no name.
"""
sys_image_type = pxe.SystemImageType(
name=None,
provision_type='Vm')
with error.expected('Name is required'):
sys_image_type.create()
# Commenting the maximum charater validation due to
# http://cfme-tests.readthedocs.org/guides/gotchas.html#
# selenium-is-not-clicking-on-the-element-it-says-it-is
# def test_name_max_character_validation():
# """
# Tests a System Image name with max characters.
# """
# sys_image_type = pxe.SystemImageType(
# name=generate_random_string(size=256),
# provision_type='Vm')
# sys_image_type.create()
# sys_image_type.delete(cancel=False)
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,164
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/cloud/stack.py
|
import ui_navigate as nav
import cfme.fixtures.pytest_selenium as sel
from cfme.web_ui import Quadicon, Region, SplitTable
from utils.pretty import Pretty
details_page = Region(infoblock_type='detail')
output_table = SplitTable(
('//*[@id="list_grid"]//table[contains(@class, "hdr")]/tbody', 1),
('//*[@id="list_grid"]//table[contains(@class, "obj")]/tbody', 1)
)
nav.add_branch(
'clouds_stacks', {
'clouds_stack':
lambda ctx: sel.click(Quadicon(ctx['stack'].name, 'stack'))
}
)
class Stack(Pretty):
pretty_attrs = ['name']
def __init__(self, name=None):
self.name = name
def nav_to_security_group_link(self):
sel.force_navigate('clouds_stack', context={'stack': self})
sel.click(details_page.infoblock.element("Relationships", "Security Groups"))
def nav_to_parameters_link(self):
sel.force_navigate('clouds_stack', context={'stack': self})
sel.click(details_page.infoblock.element("Relationships", "Parameters"))
def nav_to_output_link(self):
sel.force_navigate('clouds_stack', context={'stack': self})
sel.click(details_page.infoblock.element("Relationships", "Outputs"))
cells = {'Key': "WebsiteURL"}
output_table.click_rows_by_cells(cells, "Key", True)
def nav_to_resources_link(self):
sel.force_navigate('clouds_stack', context={'stack': self})
sel.click(details_page.infoblock.element("Relationships", "Resources"))
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,165
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/intelligence/reports/test_report_corresponds.py
|
# -*- coding: utf-8 -*-
import pytest
import utils
from cfme.intelligence.reports.reports import CustomReport
from utils import version
from utils.providers import provider_factory_by_name, setup_a_provider
from utils.randomness import generate_random_string, pick
@pytest.fixture(scope="module")
def setup_first_provider():
setup_a_provider(validate=True, check_existing=True)
@pytest.yield_fixture(scope="function")
def report_vms(setup_first_provider):
report = CustomReport(
menu_name=generate_random_string(),
title=generate_random_string(),
base_report_on="Virtual Machines",
report_fields=[
version.pick({
version.LOWEST: "Provider : Name",
"5.3": "Cloud/Infrastructure Provider : Name",
}),
"Cluster : Name",
"Datastore : Name",
"Hardware : Number of CPUs",
"Hardware : RAM",
"Host : Name",
"Name",
]
)
report.create()
report.queue(wait_for_finish=True)
yield pick(
filter(
lambda i: len(i[
version.pick({
version.LOWEST: "Provider : Name",
"5.3": "Cloud/Infrastructure Provider Name",
})
].strip()) > 0,
list(report.get_saved_reports()[0].data.rows)), 2)
report.delete()
def test_custom_vm_report(soft_assert, report_vms):
for row in report_vms:
if row["Name"].startswith("test_"):
continue # Might disappear meanwhile
provider_name = row[version.pick({
version.LOWEST: "Provider : Name",
"5.3": "Cloud/Infrastructure Provider Name",
})]
provider = provider_factory_by_name(provider_name)
provider_hosts_and_ips = utils.net.resolve_ips(provider.list_host())
provider_datastores = provider.list_datastore()
provider_clusters = provider.list_cluster()
soft_assert(provider.does_vm_exist(row["Name"]), "VM {} does not exist in {}!".format(
row["Name"], provider_name
))
if row["Cluster Name"]:
soft_assert(
row["Cluster Name"] in provider_clusters,
"Cluster {} not found in {}!".format(row["Cluster Name"], str(provider_clusters))
)
if row["Datastore Name"]:
soft_assert(
row["Datastore Name"] in provider_datastores,
"Datastore {} not found in {}!".format(
row["Datastore Name"], str(provider_datastores))
)
# Because of mixing long and short host names, we have to use both-directional `in` op.
if row["Host Name"]:
found = False
possible_ips_or_hosts = utils.net.resolve_ips((row["Host Name"], ))
for possible_ip_or_host in possible_ips_or_hosts:
for host_ip in provider_hosts_and_ips:
if possible_ip_or_host in host_ip or host_ip in possible_ip_or_host:
found = True
soft_assert(
found,
"Host {} not found in {}!".format(possible_ips_or_hosts, provider_hosts_and_ips)
)
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,166
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/web_ui/expression_editor.py
|
# -*- coding: utf-8 -*-
""" The expression editor present in some locations of CFME.
"""
from functools import partial
from selenium.common.exceptions import NoSuchElementException
from multimethods import singledispatch
from utils.wait import wait_for, TimedOutError
import cfme.fixtures.pytest_selenium as sel
from cfme.web_ui import Anything, Calendar, Form, Input, Region, Select, fill
import re
import sys
import types
from utils.pretty import Pretty
def _make_button(title):
return "//span[not(contains(@style,'none'))]//img[@alt='%s']"\
% title
def _root():
return sel.element("//div[@id='exp_editor_div']")
def _atom_root():
return sel.element("./div[@id='exp_atom_editor_div']", root=_root())
def _expressions_root():
return sel.element("./fieldset/div", root=_root())
###
# Buttons container
buttons = Region(
locators=dict(
commit="//img[@alt='Commit expression element changes']",
discard="//img[@alt='Discard expression element changes']",
remove="//span[not(contains(@style, 'none'))]//img[@alt='Remove this expression element']",
NOT="//span[not(contains(@style, 'none'))]" +
"//img[@alt='Wrap this expression element with a NOT']",
OR="//span[not(contains(@style, 'none'))]//img[@alt='OR with a new expression element']",
AND="//span[not(contains(@style, 'none'))]//img[@alt='AND with a new expression element']",
redo="//img[@alt='Redo']",
undo="//img[@alt='Undo']",
select_specific="//img[@alt='Click to change to a specific Date/Time format']",
select_relative="//img[@alt='Click to change to a relative Date/Time format']",
)
)
###
# Buttons for operationg the expression concatenation
#
def click_undo():
sel.click(buttons.undo)
def click_redo():
sel.click(buttons.redo)
def click_and():
sel.click(buttons.AND)
def click_or():
sel.click(buttons.OR)
def click_not():
sel.click(buttons.NOT)
def click_remove():
sel.click(buttons.remove)
###
# Buttons for operating the atomic expressions
#
def click_commit():
sel.click(buttons.commit)
def click_discard():
sel.click(buttons.discard)
###
# Functions for operating the selection of the expressions
#
def select_first_expression():
""" There is always at least one (???), so no checking of bounds.
"""
sel.click(sel.elements("//a[contains(@id,'exp_')]", root=_expressions_root())[0])
def select_expression_by_text(text):
sel.click(
sel.element(
"//a[contains(@id,'exp_')][contains(normalize-space(text()),'%s')]" % text,
root=_expressions_root()
)
)
def no_expression_present():
els = sel.elements("//a[contains(@id,'exp_')]", root=_expressions_root())
if len(els) > 1:
return False
return els[0].text.strip() == "???"
def any_expression_present():
return not no_expression_present()
def is_editing():
try:
sel.element(
"//a[contains(@id,'exp_')][contains(normalize-space(text()),'???')]",
root=_expressions_root()
)
return True
except NoSuchElementException:
return False
def delete_whole_expression():
while any_expression_present():
select_first_expression()
click_remove()
def get_expression_as_text():
""" Returns whole expression as represented visually.
"""
return sel.text("//div[@id='exp_editor_div']/fieldset/div").encode("utf-8").strip()
###
# Form handling
#
field_form = Form(
fields=[
("type", Select("select#chosen_typ")),
("field", Select("select#chosen_field")),
("key", Select("select#chosen_key")),
("value", Input("chosen_value")),
("user_input", Input("user_input")),
]
)
field_date_form = Form(
fields=[
("dropdown_select", Select("select#chosen_from_1")),
("input_select_date", Calendar("miq_date_1_0")),
("input_select_time", Select("select#miq_time_1_0"))
]
)
count_form = Form(
fields=[
("type", Select("select#chosen_typ")),
("count", Select("select#chosen_count")),
("key", Select("select#chosen_key")),
("value", Input("chosen_value")),
("user_input", Input("user_input")),
]
)
tag_form = Form(
fields=[
("type", Select("select#chosen_typ")),
("tag", Select("select#chosen_tag")),
("value", Select("#chosen_value")),
("user_input", Input("user_input")),
]
)
find_form = Form(
fields=[
("type", Select("select#chosen_typ")),
("field", Select("select#chosen_field")),
("skey", Select("select#chosen_skey")),
("value", "#chosen_value"),
("check", Select("select#chosen_check")),
("cfield", Select("select#chosen_cfield")),
("ckey", Select("select#chosen_ckey")),
("cvalue", Input("chosen_cvalue")),
]
)
registry_form = Form(
fields=[
("type", Select("select#chosen_typ")),
("key", Input("chosen_regkey")),
("value", Input("chosen_regval")),
("operation", Select("#chosen_key")),
("contents", Input("chosen_value")),
]
)
date_switch_buttons = Region(
locators=dict(
to_relative="//img[@alt='Click to change to a relative Date/Time format']",
to_specific="//img[@alt='Click to change to a specific Date/Time format']"
)
)
date_specific_form = Form(
fields=[
("date", Input("miq_date_1_0")),
("time", Input("miq_time_1_0")),
]
)
date_relative_form = Form(
fields=[
("from", Select("select#chosen_from_1")),
("through", Select("select#chosen_through_1")),
]
)
###
# Fill commands
#
def fill_count(count=None, key=None, value=None):
""" Fills the 'Count of' type of form.
If the value is unspecified and we are in the advanced search form (user input), the user_input
checkbox will be checked if the value is None.
Args:
count: Name of the field to compare (Host.VMs, ...).
key: Operation to do (=, <, >=, ...).
value: Value to check against.
Returns: See :py:func:`cfme.web_ui.fill`.
"""
fill(
count_form,
dict(
type="Count of",
count=count,
key=key,
value=value,
),
)
# In case of advanced search box
if sel.is_displayed(field_form.user_input):
user_input = value is None
else:
user_input = None
fill(field_form.user_input, user_input)
sel.click(buttons.commit)
def fill_tag(tag=None, value=None):
""" Fills the 'Tag' type of form.
Args:
tag: Name of the field to compare.
value: Value to check against.
Returns: See :py:func:`cfme.web_ui.fill`.
"""
fill(
tag_form,
dict(
type="Tag",
tag=tag,
value=value,
),
)
# In case of advanced search box
if sel.is_displayed(field_form.user_input):
user_input = value is None
else:
user_input = None
fill(field_form.user_input, user_input)
sel.click(buttons.commit)
def fill_registry(key=None, value=None, operation=None, contents=None):
""" Fills the 'Registry' type of form."""
return fill(
registry_form,
dict(
type="Registry",
key=key,
value=value,
operation=operation,
contents=contents,
),
action=buttons.commit
)
def fill_find(field=None, skey=None, value=None, check=None, cfield=None, ckey=None, cvalue=None):
fill(
find_form,
dict(
type="Find",
field=field,
skey=skey,
value=value,
check=check,
cfield=cfield,
ckey=ckey,
cvalue=cvalue,))
sel.click(buttons.commit)
def fill_field(field=None, key=None, value=None):
""" Fills the 'Field' type of form.
Args:
tag: Name of the field to compare (Host.VMs, ...).
key: Operation to do (=, <, >=, IS NULL, ...).
value: Value to check against.
Returns: See :py:func:`cfme.web_ui.fill`.
"""
field_norm = field.strip().lower()
if "date updated" in field_norm or "date created" in field_norm or "boot time" in field_norm:
no_date = False
else:
no_date = True
fill(
field_form,
dict(
type="Field",
field=field,
key=key,
value=value if no_date else None,
),
)
# In case of advanced search box
if sel.is_displayed(field_form.user_input):
user_input = value is None
else:
user_input = None
fill(field_form.user_input, user_input)
if not no_date:
# Flip the right part of form
if isinstance(value, basestring) and not re.match(r"^[0-9]{2}/[0-9]{2}/[0-9]{4}$", value):
if not sel.is_displayed(field_date_form.dropdown_select):
sel.click(date_switch_buttons.to_relative)
fill(field_date_form, {"dropdown_select": value})
sel.click(buttons.commit)
else:
# Specific selection
if not sel.is_displayed(field_date_form.input_select_date):
sel.click(date_switch_buttons.to_specific)
if (isinstance(value, tuple) or isinstance(value, list)) and len(value) == 2:
date, time = value
elif isinstance(value, basestring): # is in correct format mm/dd/yyyy
# Date only (for now)
date = value[:]
time = None
else:
raise TypeError("fill_field expects a 2-tuple (date, time) or string with date")
# TODO datetime.datetime support
fill(field_date_form.input_select_date, date)
# Try waiting a little bit for time field
# If we don't wait, committing the expression will glitch
try:
wait_for(lambda: sel.is_displayed(field_date_form.input_select_time), num_sec=6)
# It appeared, so if the time is to be set, we will set it (passing None glitches)
if time:
fill(field_date_form.input_select_time, time)
except TimedOutError:
# Did not appear, ignore that
pass
finally:
# And finally, commit the expression :)
sel.click(buttons.commit)
else:
sel.click(buttons.commit)
###
# Processor for YAML commands
#
_banned_commands = {"get_func", "run_commands", "dsl_parse", "create_program_from_dsl"}
def get_func(name):
""" Return callable from this module by its name.
Args:
name: Name of the variable containing the callable.
Returns: Callable from this module
"""
assert name not in _banned_commands, "Command '%s' is not permitted!" % name
assert not name.startswith("_"), "Command '%s' is private!" % name
try:
func = getattr(sys.modules[__name__], name)
except AttributeError:
raise NameError("Could not find function %s to operate the editor!" % name)
try:
func.__call__
return func
except AttributeError:
raise NameError("%s is not callable!" % name)
def run_commands(command_list, clear_expression=True):
""" Run commands from the command list.
Command list syntax:
.. code-block:: python
[
"function1", # no args
"function2", # dtto
{"fill_fields": {"field1": "value", "field2": "value"}}, # Passes kwargs
{"do_other_things": [1,2,3]} # Passes args
]
In YAML:
.. code-block:: yaml
- function1
- function2
-
fill_fields:
field1: value
field2: value
-
do_other_things:
- 1
- 2
- 3
Args:
command_list: :py:class:`list` object of the commands
clear_expression: Whether to clear the expression before entering new one (default `True`)
"""
assert isinstance(command_list, list) or isinstance(command_list, tuple)
step_list = []
for command in command_list:
if isinstance(command, basestring):
# Single command, no params
step_list.append(get_func(command))
elif isinstance(command, dict):
for key, value in command.iteritems():
func = get_func(key)
args = []
kwargs = {}
if isinstance(value, list) or isinstance(value, tuple):
args.extend(value)
elif isinstance(value, dict):
kwargs.update(value)
else:
raise Exception("I use '%s' type here!" % type(value).__name__)
step_list.append(partial(func, *args, **kwargs))
else:
raise Exception("I cannot process '%s' type here!" % type(command).__name__)
if clear_expression:
delete_whole_expression()
for step in step_list:
step()
@singledispatch
def create_program(source):
""" Wrong call
"""
raise TypeError("Program code wrong! You must specify string (DSL), command list or None!")
@create_program.method(basestring)
def _create_program_from_dsl(dsl_program):
""" Simple DSL to fill the expression editor.
Syntax:
DSL consists from statements. Statements are separated with newline or ;.
Each statement is a single function call. Functions are called in this module.
Function without parameters can be called like this:
function
or
function()
If the function has some parameters, you have to choose whether they are kwargs or args.
DSL has no string literals, so if you want to call a function with classic parameters:
function(parameter one, parameter two, you cannot use comma)
And with kwargs:
function(username=John Doe, password=top secret)
You cannot split the statement to multiple lines as the DSL is regexp-based.
Args:
dsl_program: Source string with the program.
Returns: Callable, which fills the expression.
"""
SIMPLE_CALL = r"^[a-z_A-Z][a-z_A-Z0-9]*$"
ARGS_CALL = r"^(?P<name>[a-z_A-Z][a-z_A-Z0-9]*)\((?P<args>.*)\)$"
KWARG = r"^[^=]+=.*$"
command_list = []
for i, line in enumerate([x.strip() for x in re.split(r"\n|;", dsl_program)]):
if len(line) == 0:
continue
elif re.match(SIMPLE_CALL, line):
command_list.append(line)
continue
args_match = re.match(ARGS_CALL, line)
if not args_match:
raise SyntaxError("Could not resolve statement `%s' on line %d" % (line, i))
fname = args_match.groupdict()["name"]
args = [x.strip() for x in args_match.groupdict()["args"].split(",")]
if len(args) > 0 and len(args[0]) > 0:
if re.match(KWARG, args[0]):
# kwargs
kwargs = dict([map(lambda x: x.strip(), x.split("=", 1)) for x in args])
command_list.append({fname: kwargs})
else:
# Args
command_list.append({fname: [None if arg == "/None/" else arg for arg in args]})
else:
command_list.append(fname)
return create_program(command_list)
@create_program.method(list)
@create_program.method(tuple)
def _create_program_from_list(command_list):
""" Create function which fills the expression from the command list.
Args:
command_list: Command list for :py:func:`run_program`
Returns: Callable, which fills the expression.
"""
return partial(run_commands, command_list)
@create_program.method(types.NoneType)
def _create_program_from_none(none):
return lambda: none
class Expression(Pretty):
"""This class enables to embed the expression in a Form.
Args:
show_func: Function to call to show the expression if there are more of them.
"""
pretty_attrs = ['show_func']
def __init__(self, show_func=lambda: None):
self.show_func = show_func
@fill.method((Expression, Anything))
def _fill_expression(e, p):
e.show_func()
prog = create_program(p)
prog()
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,167
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/storage/test_storage_crud.py
|
# -*- coding: utf-8 -*-
import pytest
from cfme.storage.managers import StorageManager
from utils.randomness import generate_random_string
from utils.update import update
from utils.version import current_version
pytestmark = [pytest.mark.usefixtures("use_storage")]
@pytest.mark.uncollectif(lambda: not current_version().is_in_series("5.2"))
def test_storage_manager_crud(request):
sm = StorageManager(
name=generate_random_string(),
type=StorageManager.NETAPP_RS,
hostname=generate_random_string(),
ip="127.0.0.250",
port="12345",
credentials=StorageManager.Credential(
username="test",
password="pw"
)
)
request.addfinalizer(lambda: sm.delete() if sm.exists else None)
assert not sm.exists
sm.create(validate=False)
assert sm.exists
with update(sm, validate=False):
sm.hostname = generate_random_string()
assert sm.exists
sm.delete()
assert not sm.exists
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,168
|
petrblaho/cfme_tests
|
refs/heads/master
|
/fixtures/widgets.py
|
# -*- coding: utf-8 -*-
import pytest
from cfme.dashboard import Widget
from cfme.intelligence.reports import widgets
@pytest.fixture(scope="session")
def widgets_generated(any_provider_session):
pytest.sel.force_navigate("dashboard")
widget_list = []
for widget in Widget.all():
widget_list.append((widget.name, widget.content_type))
for w_name, w_type in widget_list:
w = widgets.Widget.detect(w_type, w_name)
w.generate()
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,169
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/automate/test_class.py
|
import pytest
from cfme.automate.explorer import Class
from utils.randomness import generate_random_string
from utils.update import update
import utils.error as error
import cfme.tests.automate as ta
pytestmark = [pytest.mark.usefixtures("logged_in")]
@pytest.fixture(scope="module")
def make_namespace(request):
return ta.make_namespace(request=request)
@pytest.fixture(scope="function")
def a_class(make_namespace):
return ta.a_class(make_namespace)
def test_class_crud(a_class):
a_class.create()
orig = a_class.description
with update(a_class):
a_class.description = 'edited'
with update(a_class):
a_class.description = orig
a_class.delete()
assert not a_class.exists()
def test_schema_crud(a_class):
a_class.create()
f1 = Class.SchemaField(name='foo')
f2 = Class.SchemaField(name='bar')
f3 = Class.SchemaField(name='baz')
a_class.edit_schema(add_fields=(f1, f2))
a_class.edit_schema(remove_fields=(f1,), add_fields=(f3,))
# The inheritance box has been removed from the UI until it is implemented properly,
# see 1138859
#
# def test_add_class_inherited(a_class):
# subclass = Class(name=generate_random_string(8),
# namespace=a_class.namespace,
# description="subclass",
# inherits_from=a_class)
# a_class.create()
# subclass.create()
def test_duplicate_class_disallowed(a_class):
a_class.create()
with error.expected("Name has already been taken"):
a_class.create()
def test_same_class_name_different_namespace(make_namespace):
other_namespace = ta.make_namespace()
name = generate_random_string(8)
cls1 = Class(name=name, namespace=make_namespace)
cls2 = Class(name=name, namespace=other_namespace)
cls1.create()
cls2.create()
# delete one and check the other still exists
cls1.delete()
assert cls2.exists()
@pytest.mark.meta(blockers=[1148541])
def test_display_name_unset_from_ui(request, a_class):
a_class.create()
request.addfinalizer(a_class.delete)
with update(a_class):
a_class.display_name = generate_random_string()
assert a_class.exists
with update(a_class):
a_class.display_name = ""
assert a_class.exists
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,170
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/configure/test_docs.py
|
# -*- coding: utf-8 -*-
import pytest
import re
import requests
try:
# Faster, C-ext
from cStringIO import StringIO
except ImportError:
# Slower, pure python
from StringIO import StringIO
from PyPDF2 import PdfFileReader
from cfme.configure.about import product_assistance as about
from utils import version
@pytest.fixture(scope="module")
def guides():
return [loc for loc in about.locators.iterkeys() if loc.endswith("_guide")]
@pytest.fixture(scope="session")
def docs_info():
return [
'Control',
'Lifecycle and Automation',
'Quick Start',
'Settings And Operations',
'Insight',
'Integration Services'
]
def test_links(guides, soft_assert):
"""Test whether the PDF documents are present."""
pytest.sel.force_navigate("about")
for link in guides:
locator = getattr(about, link)
url = pytest.sel.get_attribute(locator, "href")
soft_assert(
requests.head(url, verify=False).status_code == 200,
"'{}' is not accessible".format(pytest.sel.text(locator).encode("utf-8").strip())
)
@pytest.mark.meta(blockers=[1145326, "GH#ManageIQ/manageiq:2246"])
def test_contents(guides, soft_assert):
"""Test contents of each document."""
pytest.sel.force_navigate("about")
precomp_noguide = re.compile("(.*) Guide")
for link in guides:
locator = getattr(about, link)
url = pytest.sel.get_attribute(locator, "href")
data = requests.get(url, verify=False)
pdf = PdfFileReader(StringIO(data.content))
pdf_info = pdf.getDocumentInfo()
pdf_title_low = pdf_info["/Title"].lower()
# don't include the word 'guide'
title_text_low = precomp_noguide.search(pytest.sel.text(locator)).group(1).lower()
cur_ver = version.current_version()
expected = [title_text_low]
if cur_ver == version.LATEST:
expected.append('manageiq')
else:
expected.append('cloudforms')
expected.append('{}.{}'.format(cur_ver.version[0], cur_ver.version[1]))
for exp_str in expected:
soft_assert(exp_str in pdf_title_low, "{} not in {}".format(exp_str, pdf_title_low))
@pytest.mark.meta(blockers=[1026939])
def test_info(guides, soft_assert):
pytest.sel.force_navigate("about")
for link in guides:
l_a = getattr(about, link)
# l_icon also implicitly checks for the icon url == text url
l_icon = lambda: pytest.sel.element(
"../a[contains(@href, '{}')]/img".format(
pytest.sel.get_attribute(l_a, "href").rsplit("/", 1)[-1]
),
root=l_a
)
l_icon_a = lambda: pytest.sel.element("..", root=l_icon)
soft_assert(
pytest.sel.get_attribute(l_icon, "alt") == pytest.sel.get_attribute(l_icon_a, "title"),
"Icon alt attr should match icon title attr ({})".format(pytest.sel.text(l_a))
)
soft_assert(
pytest.sel.get_attribute(l_icon_a, "href") == pytest.sel.get_attribute(l_a, "href"),
"Icon url should match text url ({})".format(pytest.sel.text(l_a))
)
@pytest.mark.meta(blockers=[1026946])
def test_all_docs_present(guides, docs_info):
pytest.sel.force_navigate("about")
docs_list = list(docs_info)
for link in guides:
for doc in docs_list:
if doc.lower() in pytest.sel.text(getattr(about, link)).lower():
break
else:
continue
docs_list.remove(doc)
assert len(docs_list) == 0, "All documents should be available ({} are missing)".format(
", ".join(docs_list)
)
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,171
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/web_ui/mixins.py
|
from cfme.fixtures import pytest_selenium as sel
from cfme.web_ui import fill, Form, Select, Table, toolbar, form_buttons, flash
tag_form = Form(
fields=[
('category', Select('//select[@id="tag_cat"]')),
('tag', Select('//select[@id="tag_add"]'))
])
tag_table = Table("//div[@id='assignments_div']//table")
def add_tag(tag, single_value=False):
toolbar.select('Policy', 'Edit Tags')
if isinstance(tag, (list, tuple)):
fill_d = {
"category": tag[0] if not single_value else "{} *".format(tag[0]),
"tag": tag[1]
}
else:
fill_d = {"tag": tag.display_name}
if tag.category.single_value:
fill_d["category"] = "{} *".format(tag.category.display_name)
else:
fill_d["category"] = tag.category.display_name
fill(tag_form, fill_d)
form_buttons.save()
flash.assert_success_message('Tag edits were successfully saved')
def remove_tag(tag):
toolbar.select('Policy', 'Edit Tags')
if isinstance(tag, (tuple, list)):
category, tag_name = tag
else:
category = tag.category.display_name
tag_name = tag.display_name
row = tag_table.find_row_by_cells({'category': category, 'assigned_value': tag_name},
partial_check=True)
sel.click(row[0])
form_buttons.save()
flash.assert_success_message('Tag edits were successfully saved')
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,172
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/configure/about.py
|
# -*- coding: utf-8 -*-
from cfme.web_ui import Region
from utils import version
product_assistance = Region(
locators={
'quick_start_guide': "//a[normalize-space(.)='Quick Start Guide']",
'insight_guide': "//a[normalize-space(.)='Insight Guide']",
'control_guide': "//a[normalize-space(.)='Control Guide']",
'lifecycle_and_automation_guide':
"//a[normalize-space(.)='Lifecycle and Automation Guide']",
'integrate_guide': {
version.LOWEST: "//a[normalize-space(.)='Integrate Guide']",
'5.3': "//a[normalize-space(.)='Integration Services Guide']"
},
'settings_and_operations_guide': "//a[normalize-space(.)='Settings and Operations Guide']",
'red_hat_customer_portal': "//a[normalize-space(.)='Red Hat Customer Portal']"
},
title='About',
identifying_loc='quick_start_guide',
infoblock_type="form"
)
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,173
|
petrblaho/cfme_tests
|
refs/heads/master
|
/fixtures/parallelizer/parallelizer_tester.py
|
"""parallelizer tester
Useful to make sure tests are being parallelized properly, and then reported correctly.
This file is named specially to prevent being picked up by py.test's default collector, and should
not be run during a normal test run.
"""
import random
from time import sleep
import pytest
pytestmark = pytest.mark.usefixtures('param', 'wait')
def pytest_generate_tests(metafunc):
# Starts at 10 for vane reason: Artifactor report does a naive sort, so 10 comes before 1
ids = [i + 10 for i in xrange(20)]
random.shuffle(ids)
argvalues = [[v] for v in ids]
metafunc.parametrize(['param'], argvalues, ids=ids, scope='module')
@pytest.fixture
def wait():
# Add some randomness to make sure reports are getting mixed up like they would in a "real" run
sleep(random.random() * 5)
@pytest.fixture
def setup_fail():
raise Exception('I failed to setup!')
@pytest.yield_fixture
def teardown_fail():
yield
raise Exception('I failed to teardown!')
def test_passes():
pass
def test_fails():
raise Exception('I failed!')
@pytest.mark.xfail
def test_xfails():
raise Exception('I failed!')
@pytest.mark.xfail
def test_xpasses():
pass
def test_fails_setup(setup_fail):
pass
def test_fails_teardown(teardown_fail):
pass
@pytest.mark.skipif('True')
def test_skipped():
pass
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,174
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/services/test_operations.py
|
# -*- coding: utf-8 -*-
"""Tests checking for link access from outside."""
import pytest
import cfme.provisioning
from cfme.fixtures import pytest_selenium as sel
from cfme.login import login_admin
from cfme.provisioning import provisioning_form
from cfme.services import requests
from cfme.web_ui import flash
from utils.browser import browser
from utils.providers import setup_a_provider
from utils.randomness import generate_random_string
from utils.wait import wait_for
from fixtures.pytest_store import store
pytestmark = [
pytest.mark.meta(server_roles="-automate"), # To prevent the provisioning itself.
]
@pytest.fixture(scope="module")
def provider():
return setup_a_provider("infra")
@pytest.fixture(scope="module")
def provider_data(provider):
return provider.get_yaml_data()
@pytest.fixture(scope="module")
def provisioning(provider_data):
return provider_data.get("provisioning", {})
@pytest.fixture(scope="module")
def template_name(provisioning):
return provisioning.get("template")
@pytest.fixture(scope="module")
def vm_name():
return generate_random_string(size=16)
@pytest.yield_fixture(scope="module")
def generated_request(provider, provider_data, provisioning, template_name, vm_name):
"""Creates a provision request, that is not automatically approved, and returns the search data.
After finishing the test, request should be automatically deleted.
Slightly modified code from :py:module:`cfme.tests.infrastructure.test_provisioning`
"""
first_name = generate_random_string()
last_name = generate_random_string()
notes = generate_random_string()
e_mail = "{}@{}.test".format(first_name, last_name)
host, datastore = map(provisioning.get, ('host', 'datastore'))
pytest.sel.force_navigate('infrastructure_provision_vms', context={
'provider': provider,
'template_name': template_name,
})
provisioning_data = {
'email': e_mail,
'first_name': first_name,
'last_name': last_name,
'notes': notes,
'vm_name': vm_name,
'host_name': {'name': [host]},
'datastore_name': {'name': [datastore]},
'num_vms': "10", # so it won't get auto-approved
}
# Same thing, different names. :\
if provider_data["type"] == 'rhevm':
provisioning_data['provision_type'] = 'Native Clone'
elif provider_data["type"] == 'virtualcenter':
provisioning_data['provision_type'] = 'VMware'
try:
provisioning_data['vlan'] = provisioning['vlan']
except KeyError:
# provisioning['vlan'] is required for rhevm provisioning
if provider_data["type"] == 'rhevm':
raise pytest.fail('rhevm requires a vlan value in provisioning info')
provisioning_form.fill(provisioning_data)
pytest.sel.click(provisioning_form.submit_button)
flash.assert_no_errors()
request_cells = {
"Description": "Provision from [{}] to [{}###]".format(template_name, vm_name),
}
yield request_cells
browser().get(store.base_url)
login_admin()
requests.delete_request(request_cells)
flash.assert_no_errors()
def test_services_request_direct_url(generated_request):
"""Go to the request page, save the url and try to access it directly."""
assert requests.go_to_request(generated_request), "could not find the request!"
request_url = sel.current_url()
sel.get(sel.base_url()) # I need to flip it with something different here
sel.get(request_url) # Ok, direct access now.
wait_for(
lambda: sel.is_displayed(".brand"),
num_sec=20,
message="wait for a CFME page appear",
delay=0.5
)
def test_copy_request(request, generated_request, vm_name, template_name):
"""Check if request gets properly copied."""
new_vm_name = generate_random_string(size=16)
cfme.provisioning.copy_request_by_vm_and_template_name(
vm_name, template_name, {"vm_name": new_vm_name}, multi=True)
request.addfinalizer(lambda: requests.delete_request({
"Description": "Provision from [{}] to [{}###]".format(template_name, new_vm_name),
}))
assert cfme.provisioning.go_to_request_by_vm_and_template_name(
new_vm_name, template_name, multi=True)
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,175
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/services/catalogs/service_catalogs.py
|
# -*- coding: utf-8 -*-
from functools import partial
import cfme.fixtures.pytest_selenium as sel
from cfme.web_ui import accordion, flash, menu, form_buttons
from utils.update import Updateable
from utils.pretty import Pretty
from utils import version
order_button = {
version.LOWEST: "//img[@title='Order this Service']",
'5.4': "//button[@title='Order this Service']"
}
accordion_tree = partial(accordion.tree, "Service Catalogs")
menu.nav.add_branch(
'services_catalogs',
{
'service_catalogs':
[
lambda _: accordion.click('Service Catalogs'),
{
'service_catalog':
[
lambda ctx: accordion_tree(
'All Services', ctx['catalog'], ctx['catalog_item'].name),
{
'order_service_catalog': lambda _: sel.click(order_button)
}
]
}
]
}
)
class ServiceCatalogs(Updateable, Pretty):
pretty_attrs = ['service_name']
def __init__(self, service_name=None):
self.service_name = service_name
def order(self, catalog, catalog_item):
sel.force_navigate('order_service_catalog',
context={'catalog': catalog,
'catalog_item': catalog_item})
sel.click(form_buttons.submit)
flash.assert_success_message("Order Request was Submitted")
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,176
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/configure/test_timeprofile.py
|
# -*- coding: utf-8 -*-
import cfme.configure.settings as st
import utils.error as error
import utils.randomness as random
from utils.update import update
def new_timeprofile():
return st.Timeprofile(description='time_profile' + random.generate_random_string(),
scope='Current User',
days=True,
hours=True,
timezone="(GMT-10:00) Hawaii")
def test_timeprofile_crud():
timeprofile = new_timeprofile()
timeprofile.create()
with update(timeprofile):
timeprofile.scope = 'All Users'
copied_timeprofile = timeprofile.copy()
copied_timeprofile.delete()
timeprofile.delete()
def test_timeprofile_duplicate_name():
nt = new_timeprofile()
nt.create()
msg = "Error during 'add': Validation failed: Description has already been taken"
with error.expected(msg):
nt.create()
nt. delete()
def test_timeprofile_name_max_character_validation():
tp = st.Timeprofile(
description=random.generate_random_string(size=50),
scope='Current User',
timezone="(GMT-10:00) Hawaii")
tp.create()
tp.delete()
def test_days_required_error_validation():
tp = st.Timeprofile(
description='time_profile' + random.generate_random_string(),
scope='Current User',
timezone="(GMT-10:00) Hawaii",
days=False,
hours=True)
with error.expected("At least one Day must be selected"):
tp.create()
def test_hours_required_error_validation():
tp = st.Timeprofile(
description='time_profile' + random.generate_random_string(),
scope='Current User',
timezone="(GMT-10:00) Hawaii",
days=True,
hours=False)
with error.expected("At least one Hour must be selected"):
tp.create()
def test_description_required_error_validation():
tp = st.Timeprofile(
description=None,
scope='Current User',
timezone="(GMT-10:00) Hawaii")
with error.expected("Description is required"):
tp.create()
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,177
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/configure/test_session_timeout.py
|
# -*- coding: utf-8 -*-
import pytest
import time
from cfme.configure.configuration import AuthSetting
from utils.browser import ensure_browser_open, quit
from utils.wait import wait_for
def test_session_timeout(request):
"""Sets the timeout to shortest possible time and waits if it really times out."""
@request.addfinalizer # Wow, why we did not figure this out before?!
def _finalize():
quit()
ensure_browser_open()
AuthSetting.set_session_timeout(hours="24", minutes="0")
AuthSetting.set_session_timeout(hours="0", minutes="5")
# Wait 10 minutes
time.sleep(10 * 60)
# Try getting timeout
# I had to use wait_for because on 5.4 and upstream builds it made weird errors
wait_for(
lambda: pytest.sel.elements(
"//div[(@id='flash_div' or @id='login_div') and contains(normalize-space(.), "
"'Session was timed out due to inactivity')]"),
num_sec=60,
delay=5,
fail_func=lambda: pytest.sel.click("//a[normalize-space(text())='Cloud Intelligence']")
)
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,178
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/test_utilization.py
|
import pytest
from utils import db
from utils import providers
from utils import testgen
from utils import conf
import time
from cfme.configure.configuration import candu
pytestmark = [
pytest.mark.meta(
server_roles="+ems_metrics_coordinator +ems_metrics_collector +ems_metrics_processor")
]
pytest_generate_tests = testgen.generate(testgen.provider_by_type, None)
@pytest.yield_fixture(scope="module")
def enable_candu():
try:
candu.enable_all()
yield
finally:
candu.disable_all()
# blow away all providers when done - collecting metrics for all of them is
# too much
@pytest.fixture
def handle_provider(provider_key):
providers.clear_providers()
providers.setup_provider(provider_key)
def test_metrics_collection(handle_provider, provider_key, provider_crud, enable_candu):
"""check the db is gathering collection data for the given provider
Metadata:
test_flag: metrics_collection
"""
metrics_tbl = db.cfmedb()['metrics']
mgmt_systems_tbl = db.cfmedb()['ext_management_systems']
# the id for the provider we're testing
mgmt_system_id = db.cfmedb().session.query(mgmt_systems_tbl).filter(
mgmt_systems_tbl.name == conf.cfme_data.get('management_systems', {})[provider_key]['name']
).first().id
start_time = time.time()
metric_count = 0
timeout = 900.0 # 15 min
while time.time() < start_time + timeout:
last_metric_count = metric_count
print "name: %s, id: %s, metrics: %s" % (provider_key,
mgmt_system_id, metric_count)
# count all the metrics for the provider we're testing
metric_count = db.cfmedb().session.query(metrics_tbl).filter(
metrics_tbl.parent_ems_id == mgmt_system_id
).count()
# collection is working if increasing
if metric_count > last_metric_count and last_metric_count > 0:
return
else:
time.sleep(15)
if time.time() > start_time + timeout:
raise Exception("Timed out waiting for metrics to be collected")
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,179
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/configure/test_tag.py
|
import pytest
from cfme.configure.configuration import Category, Tag
from utils.update import update
from utils.randomness import generate_lowercase_random_string, generate_random_string
@pytest.yield_fixture
def category():
cg = Category(name=generate_lowercase_random_string(size=8),
description=generate_random_string(size=32),
display_name=generate_random_string(size=32))
cg.create()
yield cg
cg.delete()
def test_tag_crud(category):
tag = Tag(name=generate_lowercase_random_string(size=8),
display_name=generate_random_string(size=32),
category=category)
tag.create()
with update(tag):
tag.display_name = generate_random_string(size=32)
tag.delete(cancel=False)
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,180
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/automate/__init__.py
|
from utils.randomness import generate_random_string
from cfme.automate.explorer import Namespace, Class, Instance, Domain
from utils import version
@version.dependent
def a_domain():
return None
@a_domain.method('5.3')
def a_domain_53():
return Domain(name=generate_random_string(8),
description=generate_random_string(32),
enabled=True)
def make_domain(request=None):
d = a_domain()
if d:
d.create()
if request is not None:
request.addfinalizer(d.delete)
return d
def a_namespace(domain=None, request=None):
if not domain:
domain = make_domain(request=request)
return Namespace(name=generate_random_string(8),
description=generate_random_string(32),
parent=domain)
def a_namespace_with_path(domain=None, request=None):
name = generate_random_string(8)
if not domain:
domain = make_domain(request=request)
n = Namespace.make_path('Factory', 'StateMachines', name, domain=domain)
n.description = generate_random_string(32)
return n
def make_namespace(request=None):
ns = a_namespace(request=request)
ns.create()
if request is not None:
request.addfinalizer(ns.delete)
return ns
def a_class(ns=None, request=None):
if not ns:
ns = make_namespace(request=request)
return Class(name=generate_random_string(8),
description=generate_random_string(32),
namespace=ns)
def make_class(ns=None, request=None):
cls = a_class(ns, request=request)
cls.create()
if request is not None:
request.addfinalizer(cls.delete)
return cls
def an_instance(cls=None, request=None):
if not cls:
cls = make_class(request=request)
return Instance(name=generate_random_string(8),
description=generate_random_string(32),
cls=cls)
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,181
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/intelligence/reports/test_widgets.py
|
# -*- coding: utf-8 -*-
""""""
import pytest
from cfme.fixtures import pytest_selenium as sel
from cfme.dashboard import Widget
from cfme.intelligence.reports.widgets import MenuWidget, ReportWidget, RSSFeedWidget, ChartWidget
from cfme.intelligence.reports.dashboards import DefaultDashboard
from cfme.web_ui import toolbar
from utils.randomness import generate_random_string
from utils.update import update
@pytest.fixture(scope="module")
def default_widgets():
sel.force_navigate("reports_default_dashboard_edit")
return DefaultDashboard.form.widgets.selected_items
@pytest.fixture(scope="module")
def dashboard(default_widgets):
return DefaultDashboard(widgets=default_widgets)
@pytest.fixture(scope="function")
def custom_widgets(request):
ws = [
MenuWidget(
generate_random_string(),
description=generate_random_string(),
active=True,
shortcuts={
"Services / Catalogs": generate_random_string(),
"Clouds / Providers": generate_random_string(),
},
visibility="<To All Users>"),
ReportWidget(
generate_random_string(),
description=generate_random_string(),
active=True,
filter=["Events", "Operations", "Operations VMs Powered On/Off for Last Week"],
columns=["VM Name", "Message"],
rows="10",
timer={"run": "Hourly", "hours": "Hour"},
visibility="<To All Users>"),
ChartWidget(
generate_random_string(),
description=generate_random_string(),
active=True,
filter="Configuration Management/Virtual Machines/Vendor and Guest OS",
timer={"run": "Hourly", "hours": "Hour"},
visibility="<To All Users>"),
RSSFeedWidget(
generate_random_string(),
description=generate_random_string(),
active=True,
type="Internal",
feed="Administrative Events",
rows="8",
visibility="<To All Users>"),
]
map(lambda w: w.create(), ws) # create all widgets
request.addfinalizer(lambda: map(lambda w: w.delete(), ws)) # Delete them after test
return ws
def test_widgets_on_dashboard(request, dashboard, default_widgets, custom_widgets, soft_assert):
with update(dashboard):
dashboard.widgets = map(lambda w: w.title, custom_widgets)
def _finalize():
with update(dashboard):
dashboard.widgets = default_widgets
request.addfinalizer(_finalize)
sel.force_navigate("dashboard")
toolbar.select("Reset Dashboard Widgets to the defaults", invokes_alert=True)
sel.handle_alert(False)
soft_assert(len(Widget.all()) == len(custom_widgets), "Count of the widgets differ")
for custom_w in custom_widgets:
try:
Widget.by_name(custom_w.title)
except NameError:
soft_assert(False, "Widget {} not found on dashboard".format(custom_w.title))
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,182
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/infrastructure/repositories.py
|
"""Infrastructure / Repositories"""
from functools import partial
import cfme.fixtures.pytest_selenium as sel
import cfme.web_ui.flash as flash
from cfme.web_ui import menu
import cfme.web_ui.toolbar as tb
from cfme.web_ui import Region, Form, Input, SplitCheckboxTable, fill, form_buttons
from cfme.web_ui.form_buttons import FormButton
from cfme.web_ui.paginator import pages
from utils.update import Updateable
from utils.pretty import Pretty
from utils.version import LOWEST
repo_list = SplitCheckboxTable(
("//div[@id='list_grid']/div[1]//tbody", 1),
("//div[@id='list_grid']/div[2]//tbody", 1),
header_checkbox_locator="#masterToggle"
)
details_page = Region(infoblock_type='detail')
form = Form(
fields=[
('name', Input('repo_name')),
('path', Input('repo_path')),
]
)
add_btn = {
LOWEST: FormButton('Add this Repository'),
# wonky upstream locator
'5.4': '//button[.="Add"]'
}
save_btn = {
LOWEST: form_buttons.save,
'5.4': '//button[.="Save"]'
}
cfg_btn = partial(tb.select, 'Configuration')
pol_btn = partial(tb.select, 'Policy')
def _repo_row(name):
for page in pages():
row = repo_list.find_row('Name', name)
if row:
return row
else:
raise Exception('row not found for repo {}'.format(name))
def _repo_nav_fn(context):
repo = context['repository']
sel.click(_repo_row(repo.name)[1])
sel.wait_for_element(repo._detail_page_identifying_loc)
def _check_repo(name, callback=None):
sel.check(sel.element('.//img', root=_repo_row(name)[0]))
if callback:
return callback()
menu.nav.add_branch(
'infrastructure_repositories', {
'infrastructure_repository_new': lambda _: cfg_btn('Add a new Repository'),
'infrastructure_repository_edit': lambda ctx: _check_repo(ctx['repository'].name,
lambda: cfg_btn('Edit the Selected Repository')),
'infrastructure_repository': [
_repo_nav_fn, {
'infrastructure_repository_policy_assignment': lambda _: pol_btn('Manage Policies'),
'infrastructure_repository_policy_tags': lambda _: pol_btn('Edit Tags'),
}
]
}
)
class Repository(Updateable, Pretty):
"""
Model of an infrastructure repository in cfme.
Args:
name: Name of the repository host
path: UNC path to the repository share
Usage:
myrepo = Repository(name='vmware', path='//hostname/path/to/share')
myrepo.create()
"""
pretty_attrs = ['name', 'path']
def __init__(self, name=None, path=None):
self.name = name
self.path = path
self._detail_page_identifying_loc = "//h1[contains(., '{}')]".format(self.name)
def _submit(self, cancel, submit_button):
if cancel:
sel.click(form_buttons.cancel)
# sel.wait_for_element(page.configuration_btn)
else:
sel.click(submit_button)
flash.assert_no_errors()
def create(self, cancel=False, validate_credentials=False):
"""
Creates a repository in the UI
Args:
cancel (boolean): Whether to cancel out of the creation. The cancel is done
after all the information present in the Host has been filled in the UI.
validate_credentials (boolean): Whether to validate credentials - if True and the
credentials are invalid, an error will be raised.
"""
sel.force_navigate('infrastructure_repository_new')
fill(form, vars(self))
self._submit(cancel, add_btn)
def update(self, updates, cancel=False, validate_credentials=False):
"""
Updates a repository in the UI. Better to use utils.update.update context
manager than call this directly.
Args:
updates (dict): fields that are changing.
cancel (boolean): whether to cancel out of the update.
"""
sel.force_navigate('infrastructure_repository_edit', context={'repository': self})
fill(form, updates)
self._submit(cancel, save_btn)
def delete(self, cancel=False):
"""
Deletes a repository from CFME
Args:
cancel: Whether to cancel the deletion, defaults to False
"""
if self.exists:
sel.force_navigate('infrastructure_repository', context={'repository': self})
cfg_btn('Remove from the VMDB', invokes_alert=True)
sel.handle_alert(cancel=cancel)
def get_detail(self, *ident):
""" Gets details from the details infoblock
The function first ensures that we are on the detail page for the specific repository.
Args:
*ident: An InfoBlock title, followed by the Key name, e.g. "Relationships", "Images"
Returns: A string representing the contents of the InfoBlock's value.
"""
if not self._on_detail_page():
sel.force_navigate('infrastructure_repository', context={'repository': self})
return details_page.infoblock.text(*ident)
def _on_detail_page(self):
""" Returns ``True`` if on the repository detail page, ``False`` if not."""
return self.is_displayed(self._detail_page_identifying_loc)
@property
def exists(self):
sel.force_navigate('infrastructure_repositories')
try:
return bool(_repo_row(self.name))
except: # exception?
return False
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,183
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/configure/test_tag_category.py
|
from cfme.configure.configuration import Category
from utils.update import update
from utils.randomness import generate_lowercase_random_string, generate_random_string
def test_category_crud():
cg = Category(name=generate_lowercase_random_string(size=8),
description=generate_random_string(size=32),
display_name=generate_random_string(size=32))
cg.create()
with update(cg):
cg.description = generate_random_string(size=32)
cg.delete(cancel=False)
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,184
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/infrastructure/test_pxe.py
|
import pytest
from cfme.infrastructure import pxe
from utils.update import update
from utils.testgen import generate, pxe_servers
pytest_generate_tests = generate(pxe_servers)
@pytest.fixture(scope='function')
def has_no_pxe_servers():
pxe.remove_all_pxe_servers()
@pytest.mark.usefixtures('has_no_pxe_servers')
def test_pxe_server_crud(pxe_name, pxe_server_crud):
"""
Basic Add test for PXE server including refresh.
"""
pxe_server_crud.create()
with update(pxe_server_crud):
pxe_server_crud.name = pxe_server_crud.name + "_update"
pxe_server_crud.delete(cancel=False)
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,185
|
petrblaho/cfme_tests
|
refs/heads/master
|
/cfme/tests/cloud/test_instance_power_control.py
|
import cfme.web_ui.flash as flash
import pytest
from cfme.cloud.instance import instance_factory, get_all_instances, EC2Instance, OpenStackInstance
from cfme.fixtures import pytest_selenium as sel
from utils import error, testgen
from utils.randomness import generate_random_string
from utils.wait import wait_for, TimedOutError
pytestmark = [pytest.mark.usefixtures('test_power_control')]
def pytest_generate_tests(metafunc):
final_argv, final_argn, final_ids = [], [], []
if 'ec2_only' in metafunc.fixturenames:
prov_types = ['ec2']
elif 'openstack_only' in metafunc.fixturenames:
prov_types = ['openstack']
else:
prov_types = ['ec2', 'openstack']
# Get all providers and pick those, that have power control test enabled
argnames, argvalues, idlist = testgen.provider_by_type(
metafunc, prov_types, 'test_power_control')
if not idlist:
return
for argn, argv, single_id in zip(argnames, argvalues, idlist):
test_pwr_ctl_i = argnames.index('test_power_control')
provider_key_i = argnames.index('provider_key')
final_argn = argnames
if argv[test_pwr_ctl_i] is True:
final_argv.append(argv)
final_ids.append(argv[provider_key_i])
# Then append '{provider_type}_only' fixture, if necessary
if len(prov_types) == 1:
final_argn.append('{}_only'.format(prov_types[0]))
for argval in final_argv:
argval.append('')
testgen.parametrize(metafunc, final_argn, final_argv, ids=final_ids, scope="function")
# This fixture must be named 'vm_name' because its tied to fixtures/virtual_machine
@pytest.fixture(scope="module")
def vm_name():
return "test_instance_pwrctl_{}".format(generate_random_string())
@pytest.fixture(scope="function")
def test_instance(request, delete_instances_fin, setup_provider,
provider_crud, provider_mgmt, vm_name):
""" Fixture to provision instance on the provider
"""
instance = instance_factory(vm_name, provider_crud)
if not provider_mgmt.does_vm_exist(vm_name):
delete_instances_fin[provider_crud.key] = instance
instance.create_on_provider()
elif isinstance(instance, EC2Instance) and \
provider_mgmt.is_vm_state(vm_name, provider_mgmt.states['deleted']):
provider_mgmt.set_name(vm_name, 'test_terminated_{}'.format(generate_random_string()))
delete_instances_fin[provider_crud.key] = instance
instance.create_on_provider()
return instance
@pytest.fixture(scope="module")
def delete_instances_fin(request):
""" Fixture to add a finalizer to delete provisioned instances at the end of tests
This is a "trashbin" fixture - it returns a mutable that you put stuff into.
"""
provisioned_instances = {}
def delete_instances(instances_dict):
for instance in instances_dict.itervalues():
instance.delete_from_provider()
request.addfinalizer(lambda: delete_instances(provisioned_instances))
return provisioned_instances
def wait_for_state_change_time_refresh(instance, state_change_time, timeout=300):
""" Waits for 'State Changed On' refresh
"""
def _wait_for_state_refresh():
instance.load_details()
return state_change_time != instance.get_detail(
properties=("Power Management", "State Changed On"))
try:
wait_for(_wait_for_state_refresh, num_sec=timeout, delay=30)
except TimedOutError:
return False
def check_power_options(soft_assert, instance, power_state):
""" Checks if power options match given power state ('on', 'off')
"""
must_be_available = {
EC2Instance: {
'on': [EC2Instance.STOP, EC2Instance.SOFT_REBOOT, EC2Instance.TERMINATE],
'off': [EC2Instance.START, EC2Instance.TERMINATE]
},
OpenStackInstance: {
'on': [
OpenStackInstance.SUSPEND,
OpenStackInstance.SOFT_REBOOT,
OpenStackInstance.HARD_REBOOT,
OpenStackInstance.TERMINATE
],
'off': [OpenStackInstance.START, OpenStackInstance.TERMINATE]
}
}
mustnt_be_available = {
EC2Instance: {
'on': [EC2Instance.START],
'off': [EC2Instance.STOP, EC2Instance.SOFT_REBOOT]
},
OpenStackInstance: {
'on': [OpenStackInstance.START],
'off': [
OpenStackInstance.SUSPEND,
OpenStackInstance.SOFT_REBOOT,
OpenStackInstance.HARD_REBOOT
]
}
}
for pwr_option in must_be_available[instance.__class__][power_state]:
soft_assert(
instance.is_pwr_option_available_in_cfme(option=pwr_option, from_details=True),
"{} must be available in current power state".format(pwr_option))
for pwr_option in mustnt_be_available[instance.__class__][power_state]:
soft_assert(
not instance.is_pwr_option_available_in_cfme(option=pwr_option, from_details=True),
"{} must not be available in current power state".format(pwr_option))
@pytest.mark.long_running
def test_quadicon_terminate_cancel(setup_provider_funcscope, provider_type, provider_mgmt,
test_instance, verify_vm_running, soft_assert):
""" Tests terminate cancel
Metadata:
test_flag: power_control, provision
"""
test_instance.wait_for_vm_state_change(
desired_state=test_instance.STATE_ON, timeout=720)
test_instance.power_control_from_cfme(option=test_instance.TERMINATE, cancel=True)
with error.expected('instance still exists'):
# try to find VM, if found, try again - times out with expected message
wait_for(
lambda: provider_mgmt.does_vm_exist(test_instance.name),
fail_condition=True,
num_sec=60,
delay=15,
message="instance still exists")
soft_assert(test_instance.find_quadicon().state == 'currentstate-on')
@pytest.mark.long_running
def test_quadicon_terminate(setup_provider_funcscope, provider_type, provider_mgmt,
test_instance, verify_vm_running, soft_assert):
""" Tests terminate instance
Metadata:
test_flag: power_control, provision
"""
test_instance.wait_for_vm_state_change(
desired_state=test_instance.STATE_ON, timeout=720)
test_instance.power_control_from_cfme(option=test_instance.TERMINATE, cancel=False)
wait_for(test_instance.does_vm_exist_in_cfme, fail_condition=True, num_sec=300, delay=30,
fail_func=test_instance.provider_crud.refresh_provider_relationships,
message="instance still exists in cfme UI")
if provider_type == 'openstack':
soft_assert(not provider_mgmt.does_vm_exist(test_instance.name), "instance still exists")
else:
soft_assert(
provider_mgmt.is_vm_state(test_instance.name, provider_mgmt.states['deleted']),
"instance still exists")
sel.force_navigate("clouds_instances_archived_branch")
soft_assert(
test_instance.name in get_all_instances(do_not_navigate=True),
"instance is not among archived instances")
@pytest.mark.long_running
def test_stop(ec2_only, setup_provider_funcscope, provider_type, provider_mgmt,
test_instance, soft_assert, verify_vm_running):
""" Tests instance stop
Metadata:
test_flag: power_control, provision
"""
test_instance.wait_for_vm_state_change(
desired_state=test_instance.STATE_ON, timeout=720, from_details=True)
check_power_options(soft_assert, test_instance, 'on')
test_instance.power_control_from_cfme(
option=test_instance.STOP, cancel=False, from_details=True)
flash.assert_message_contain("Stop initiated")
test_instance.wait_for_vm_state_change(
desired_state=test_instance.STATE_OFF, timeout=720, from_details=True)
wait_for(
lambda: provider_mgmt.is_vm_stopped(test_instance.name),
num_sec=180,
delay=20,
message="mgmt system check - instance stopped")
@pytest.mark.long_running
def test_start(ec2_only, setup_provider_funcscope, provider_type, provider_mgmt,
test_instance, soft_assert, verify_vm_stopped):
""" Tests instance start
Metadata:
test_flag: power_control, provision
"""
test_instance.wait_for_vm_state_change(
desired_state=test_instance.STATE_OFF, timeout=720, from_details=True)
check_power_options(soft_assert, test_instance, 'off')
test_instance.power_control_from_cfme(
option=test_instance.START, cancel=False, from_details=True)
flash.assert_message_contain("Start initiated")
test_instance.wait_for_vm_state_change(
desired_state=test_instance.STATE_ON, timeout=720, from_details=True)
soft_assert(
provider_mgmt.is_vm_running(test_instance.name),
"instance is not running")
@pytest.mark.long_running
def test_soft_reboot(setup_provider_funcscope, provider_type, provider_mgmt,
test_instance, soft_assert, verify_vm_running):
""" Tests instance soft reboot
Metadata:
test_flag: power_control, provision
"""
test_instance.wait_for_vm_state_change(
desired_state=test_instance.STATE_ON, timeout=720, from_details=True)
state_change_time = test_instance.get_detail(('Power Management', 'State Changed On'))
test_instance.power_control_from_cfme(
option=test_instance.SOFT_REBOOT, cancel=False, from_details=True)
flash.assert_message_contain("Restart initiated")
wait_for_state_change_time_refresh(test_instance, state_change_time, timeout=720)
test_instance.wait_for_vm_state_change(
desired_state=test_instance.STATE_ON, from_details=True)
soft_assert(
provider_mgmt.is_vm_running(test_instance.name),
"instance is not running")
@pytest.mark.long_running
def test_hard_reboot(openstack_only, setup_provider_funcscope, provider_type,
provider_mgmt, test_instance, soft_assert, verify_vm_running):
""" Tests instance hard reboot
Metadata:
test_flag: power_control, provision
"""
test_instance.wait_for_vm_state_change(
desired_state=test_instance.STATE_ON, timeout=720, from_details=True)
state_change_time = test_instance.get_detail(('Power Management', 'State Changed On'))
test_instance.power_control_from_cfme(
option=test_instance.HARD_REBOOT, cancel=False, from_details=True)
flash.assert_message_contain("Reset initiated")
wait_for_state_change_time_refresh(test_instance, state_change_time, timeout=720)
test_instance.wait_for_vm_state_change(
desired_state=test_instance.STATE_ON, from_details=True)
soft_assert(
provider_mgmt.is_vm_running(test_instance.name),
"instance is not running")
@pytest.mark.long_running
def test_suspend(openstack_only, setup_provider_funcscope, provider_type, provider_mgmt,
test_instance, soft_assert, verify_vm_running):
""" Tests instance suspend
Metadata:
test_flag: power_control, provision
"""
test_instance.wait_for_vm_state_change(
desired_state=test_instance.STATE_ON, timeout=720, from_details=True)
check_power_options(soft_assert, test_instance, 'on')
test_instance.power_control_from_cfme(
option=test_instance.SUSPEND, cancel=False, from_details=True)
flash.assert_message_contain("Suspend initiated")
test_instance.wait_for_vm_state_change(
desired_state=test_instance.STATE_OFF, timeout=720, from_details=True)
soft_assert(
provider_mgmt.is_vm_suspended(test_instance.name),
"instance is still running")
@pytest.mark.long_running
@pytest.mark.meta(blockers=[1183757])
def test_resume(openstack_only, setup_provider_funcscope,
provider_type, provider_mgmt, test_instance, soft_assert, verify_vm_suspended):
""" Tests instance resume
Metadata:
test_flag: power_control, provision
"""
test_instance.wait_for_vm_state_change(
desired_state=test_instance.STATE_SUSPENDED, timeout=720, from_details=True)
check_power_options(soft_assert, test_instance, 'off')
test_instance.power_control_from_cfme(
option=test_instance.START, cancel=False, from_details=True)
flash.assert_message_contain("Start initiated")
test_instance.wait_for_vm_state_change(
desired_state=test_instance.STATE_ON, timeout=720, from_details=True)
soft_assert(
provider_mgmt.is_vm_running(test_instance.name),
"instance is not running")
@pytest.mark.long_running
def test_terminate(setup_provider_funcscope,
provider_type, provider_mgmt, test_instance, soft_assert, verify_vm_running):
""" Tests instance terminate
Metadata:
test_flag: power_control, provision
"""
test_instance.wait_for_vm_state_change(
desired_state=test_instance.STATE_ON, timeout=720, from_details=True)
test_instance.power_control_from_cfme(
option=test_instance.TERMINATE, cancel=False, from_details=True)
flash.assert_message_contain("Terminate initiated")
wait_for(test_instance.does_vm_exist_in_cfme, fail_condition=True, num_sec=300, delay=30,
fail_func=test_instance.provider_crud.refresh_provider_relationships,
message="VM no longer exists in cfme UI")
if provider_type == 'openstack':
soft_assert(not provider_mgmt.does_vm_exist(test_instance.name), "instance still exists")
else:
soft_assert(
provider_mgmt.is_vm_state(test_instance.name, provider_mgmt.states['deleted']),
"instance still exists")
sel.force_navigate("clouds_instances_archived_branch")
soft_assert(
test_instance.name in get_all_instances(do_not_navigate=True),
"instance is not among archived instances")
|
{"/cfme/tests/cloud/test_tag_cloud.py": ["/utils/randomness.py"], "/cfme/tests/control/test_compliance.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_service_dialog.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_customization_template.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_namespace.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/automate/test_instance.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_zones.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_method.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_provisioning_dialogs.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_provisioning.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_vm_discovery.py": ["/utils/randomness.py"], "/cfme/tests/services/test_dynamicdd_dialogelement.py": ["/cfme/services/catalogs/service_catalogs.py", "/utils/randomness.py"], "/cfme/tests/services/test_catalog_item.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_chargeback.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_tag_infrastructure.py": ["/utils/randomness.py"], "/cfme/tests/infrastructure/test_system_image_type.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_report_corresponds.py": ["/utils/randomness.py"], "/cfme/tests/storage/test_storage_crud.py": ["/utils/randomness.py"], "/cfme/tests/automate/test_class.py": ["/utils/randomness.py", "/cfme/tests/automate/__init__.py"], "/cfme/tests/configure/test_docs.py": ["/cfme/configure/about.py"], "/cfme/tests/services/test_operations.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_timeprofile.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag.py": ["/utils/randomness.py"], "/cfme/tests/automate/__init__.py": ["/utils/randomness.py"], "/cfme/tests/intelligence/reports/test_widgets.py": ["/utils/randomness.py"], "/cfme/tests/configure/test_tag_category.py": ["/utils/randomness.py"], "/cfme/tests/cloud/test_instance_power_control.py": ["/utils/randomness.py"]}
|
11,205
|
michaeltriska/ctr_amaro
|
refs/heads/master
|
/service.py
|
from tornado.ioloop import IOLoop
import tornado.web
from api.api import BusinessAnalysisHandler
"""
Overview
The applications use Tornado as a HTTP server,
and Schematics for dealing with representations.
Project structure
The typical application is structured in submodules:
app
api - api handler
core - domain implementation, i.e. crud operatios on representations
service.py - the service class
helper - the configuration
"""
class Application(tornado.web.Application):
def __init__(self):
handlers = [(r"/crt/v2", BusinessAnalysisHandler)]
tornado.web.Application.__init__(self, handlers)
def main():
app = Application()
app.listen(80)
IOLoop.instance().start()
if __name__ == '__main__':
main()
|
{"/service.py": ["/api/api.py"], "/api/api.py": ["/models/models.py", "/core/ctr.py"], "/connectors/s3.py": ["/helper/helper.py"], "/core/ctr.py": ["/connectors/db.py", "/connectors/s3.py", "/helper/helper.py"], "/test_db.py": ["/connectors/db.py"]}
|
11,206
|
michaeltriska/ctr_amaro
|
refs/heads/master
|
/models/models.py
|
from schematics.models import Model
from schematics.types import StringType
class AggregationRequest(Model):
startTimestamp = StringType(required=True)
endTimestamp = StringType(required=True)
aggregation = StringType(required=True)
product = StringType()
platform = StringType()
|
{"/service.py": ["/api/api.py"], "/api/api.py": ["/models/models.py", "/core/ctr.py"], "/connectors/s3.py": ["/helper/helper.py"], "/core/ctr.py": ["/connectors/db.py", "/connectors/s3.py", "/helper/helper.py"], "/test_db.py": ["/connectors/db.py"]}
|
11,207
|
michaeltriska/ctr_amaro
|
refs/heads/master
|
/helper/helper.py
|
from datetime import datetime
import pytz
import time
def datetime_range(start, end, delta):
current = start
while current < end:
yield current
current += delta
def convert_tz(originalTimeStamp, originalTimeZone, targetTimeZone):
"""
Function converts unix-timestamp in s from
originalTimeZone to targetTimeZone in ms
"""
newTimeStamp = pytz.timezone(
originalTimeZone).localize(
datetime.fromtimestamp(
originalTimeStamp)).astimezone(
pytz.timezone(targetTimeZone))
return time.mktime(newTimeStamp.timetuple()) * 1000
|
{"/service.py": ["/api/api.py"], "/api/api.py": ["/models/models.py", "/core/ctr.py"], "/connectors/s3.py": ["/helper/helper.py"], "/core/ctr.py": ["/connectors/db.py", "/connectors/s3.py", "/helper/helper.py"], "/test_db.py": ["/connectors/db.py"]}
|
11,208
|
michaeltriska/ctr_amaro
|
refs/heads/master
|
/api/api.py
|
import tornado.web
import json
from models.models import AggregationRequest
from core.ctr import CTR_Calculater
class BusinessAnalysisHandler(tornado.web.RequestHandler):
def post(self, model=None):
'''
This function takes a request and calculates
the click through rate base on that data, as well
as validates the data structure of request and response.
It returns a list of objects in following formats:
Request parameters:
startTimestamp : mandatory parameter (in the format of '2016-01-03 13:55:00'
endTimestamp : mandatory parameter in the format of '2016-01-04 13:55:00'
aggregation : mandatory parameter, the interval aggregation in minutes
product : optional parameter as a string
platform : optional parameter as a string
Response parameters:
timestamp : initial timestamp of each aggregation
platform : platform as explained above
product : product as explained above
CTR : metric calculated as the #purchases / #productViews
'''
data = json.loads(self.request.body.decode('utf-8'))
try:
request_model = AggregationRequest(data)
request_model.validate()
except:
raise "400"
try:
ctr_calculator = CTR_Calculater(data)
response = {"ctr_response": ctr_calculator.calculate_ctr()}
response = json.dumps(response)
self.write(response)
except:
raise "500"
|
{"/service.py": ["/api/api.py"], "/api/api.py": ["/models/models.py", "/core/ctr.py"], "/connectors/s3.py": ["/helper/helper.py"], "/core/ctr.py": ["/connectors/db.py", "/connectors/s3.py", "/helper/helper.py"], "/test_db.py": ["/connectors/db.py"]}
|
11,209
|
michaeltriska/ctr_amaro
|
refs/heads/master
|
/connectors/s3.py
|
from data.productevent import productevents
from helper.helper import *
import pandas as pd
class S3Handler:
def __init__(self):
pass
def get_event_df(self, product, unix_start_timestamp, unix_end_timestamp):
events = [i for i in productevents
if self.get_event_condition(
i, unix_start_timestamp,
unix_end_timestamp)]
product_list = []
for e in events:
for v in e['events']:
code_color = v['data']['custom_attributes']['codeColor']
if (product and
code_color == product):
product_list.append((e["timestamp_unixtime_ms"],
code_color))
else:
product_list.append((e["timestamp_unixtime_ms"],
code_color))
cols = ["timestamp_unixtime_ms", 'code_color']
df_events = pd.DataFrame(product_list, columns=cols)
df_events['timestamp_unixtime_ms'] = df_events['timestamp_unixtime_ms'] / 1000
df_events['timestamp_unixtime_ms'] = df_events['timestamp_unixtime_ms'].apply(
(lambda x: convert_tz(
x, "Europe/London", "America/Sao_Paulo")))
df_events['timestamp_unixtime_ms'] = \
pd.to_datetime(df_events['timestamp_unixtime_ms'], unit='ms')
return df_events
def get_event_condition(self, i, unix_start_timestamp, unix_end_timestamp):
return (i['timestamp_unixtime_ms'] >= unix_start_timestamp and
i['timestamp_unixtime_ms'] <= unix_end_timestamp)
|
{"/service.py": ["/api/api.py"], "/api/api.py": ["/models/models.py", "/core/ctr.py"], "/connectors/s3.py": ["/helper/helper.py"], "/core/ctr.py": ["/connectors/db.py", "/connectors/s3.py", "/helper/helper.py"], "/test_db.py": ["/connectors/db.py"]}
|
11,210
|
michaeltriska/ctr_amaro
|
refs/heads/master
|
/connectors/db_creator.py
|
import sqlite3 as sqlite
import pandas as pd
class DataBaseCreator:
def __init__(self, database_name):
self.connection = sqlite.connect(database_name)
self.cur = self.connection.cursor()
def create_database(self, table_names):
for table in table_names:
self.create_table(table)
print('database created')
self.connection.close()
def create_table(self, tablename):
df_orders = self.read_csv('data/' + tablename + '.csv')
if 'order_date' in df_orders.columns.values:
df_orders['order_date'] = pd.to_datetime(df_orders['order_date'],
format='%d/%m/%Y %H:%M')
try:
df_orders.to_sql(tablename, self.connection)
except:
pass
def read_csv(self, file_name):
return pd.read_csv(file_name, sep=';',
index_col=None,
infer_datetime_format=True)
if __name__ == '__main__':
db = DataBaseCreator('connectors/orders.db')
db.create_database(['orders', 'order_items'])
|
{"/service.py": ["/api/api.py"], "/api/api.py": ["/models/models.py", "/core/ctr.py"], "/connectors/s3.py": ["/helper/helper.py"], "/core/ctr.py": ["/connectors/db.py", "/connectors/s3.py", "/helper/helper.py"], "/test_db.py": ["/connectors/db.py"]}
|
11,211
|
michaeltriska/ctr_amaro
|
refs/heads/master
|
/core/ctr.py
|
from datetime import datetime
from datetime import timedelta
import pandas as pd
from connectors.db import DataBaseHandler
from connectors.s3 import S3Handler
from helper.helper import *
class CTR_Calculater:
def __init__(self, model):
print("model init", model)
# initialize and transform request parameters
self.start_timestamp = model['startTimestamp']
self.end_timestamp = model['endTimestamp']
self.unix_start_timestamp = convert_tz(
datetime.strptime(
self.start_timestamp,
'%Y-%m-%d %H:%M:%S').timestamp(),
"America/Sao_Paulo", "Europe/London")
self.unix_end_timestamp = convert_tz(
datetime.strptime(
self.end_timestamp,
'%Y-%m-%d %H:%M:%S').timestamp(),
"America/Sao_Paulo", "Europe/London")
self.aggregation = model['aggregation']
self.product = None
if 'product' in model:
self.product = model['product']
self.platform = None
if 'platform' in model:
self.platform = model['platform']
# initialize aggregation dataframe to purchase and events
self.df_aggregation = pd.DataFrame()
self.df_aggregation['startTimestamp'] = \
[dt for dt in datetime_range(
datetime.strptime(
self.start_timestamp,
'%Y-%m-%d %H:%M:%S'),
datetime.strptime(
self.end_timestamp,
'%Y-%m-%d %H:%M:%S'),
timedelta(minutes=self.aggregation))]
self.df_aggregation['startTimestamp_asof'] = \
pd.to_datetime(self.df_aggregation['startTimestamp'])
self.df_aggregation.index = \
self.df_aggregation['startTimestamp_asof']
# initialize order dataframe
db = DataBaseHandler('connectors/orders.db')
self.df_oders = db.get_purchase_df(
self.start_timestamp, self.end_timestamp,
self.product, self.platform)
# initialize event dataframe
s3_con = S3Handler()
self.df_events = s3_con.get_event_df(
self.product,
self.unix_start_timestamp,
self.unix_end_timestamp)
# merge dataframe with aggregation dataframe
self.df_event_itervals = self.merge_by_time_interval(
self.df_events, self.df_aggregation,
'timestamp_unixtime_ms', self.aggregation)
self.df_oders_itervals = self.merge_by_time_interval(
self.df_oders, self.df_aggregation,
'order_date', self.aggregation)
def calculate_ctr(self):
a = self.df_event_itervals.startTimestamp.unique()
b = self.df_oders_itervals.startTimestamp.unique()
iterator = list(set(a) & set(b))
result = []
for agg_time in iterator:
oder_filter = (self.df_oders_itervals.startTimestamp == agg_time)
unique_products = self.df_oders_itervals[oder_filter].code_color.tolist()
event_filter = (self.df_event_itervals.startTimestamp == agg_time)
event_iterator = self.df_event_itervals[event_filter].groupby('code_color')
for product_key, product_value in event_iterator:
if product_key in unique_products:
p = self.df_oders_itervals[(self.df_oders_itervals.code_color == product_key)
& (self.df_oders_itervals.startTimestamp == agg_time)]
for platform_key, platform_value in p.groupby('device_type'):
no_p = len(platform_value)
no_v = len(product_value)
ctr = no_p / no_v
result.append({
"startTimestamp": str(agg_time)[:10],
"platform": platform_key,
"product": product_key,
"ctr": ctr})
return result
def merge_by_time_interval(self, df, df_aggregation, data_col, aggregation):
"""
Function performs an asof merge, as we match on “forward” search
selects the first row in the aggregation dataframe whose ‘on’ key
is greater than or equal to the left’s key.
"""
if data_col in df.columns.values:
df['startTimestamp_asof'] = pd.to_datetime(
df[data_col])
else:
return df
df = df.sort_values(by='startTimestamp_asof')
df.index = df['startTimestamp_asof']
tol = pd.Timedelta(aggregation, unit='m')
return pd.merge_asof(left=df,
right=df_aggregation,
on='startTimestamp_asof',
direction='backward',
tolerance=tol)
|
{"/service.py": ["/api/api.py"], "/api/api.py": ["/models/models.py", "/core/ctr.py"], "/connectors/s3.py": ["/helper/helper.py"], "/core/ctr.py": ["/connectors/db.py", "/connectors/s3.py", "/helper/helper.py"], "/test_db.py": ["/connectors/db.py"]}
|
11,212
|
michaeltriska/ctr_amaro
|
refs/heads/master
|
/connectors/db.py
|
import sqlite3 as sqlite
from datetime import datetime
import pandas as pd
from datetime import timedelta
class DataBaseHandler:
def __init__(self, database_name):
self.database = database_name
self.connection = self._get_connection()
self.cur = self.connection.cursor()
def _get_connection(self):
return sqlite.connect(self.database)
def close_connection(self):
self.connection.close()
def get_purchase_df(self, start, end, product, platform):
sql_query = self.get_purchase_query(
start, end, product, platform)
self.cur.execute(sql_query)
rows = self.cur.fetchall()
cols = [description[0]
for description
in self.cur.description]
self.connection.close()
df_oders = pd.DataFrame(rows, columns=cols)
df_oders["order_date"] = [datetime.strptime(
date, '%Y-%m-%d %H:%M:%S') +
timedelta(days=700)
for date in df_oders.order_date]
return df_oders
def get_purchase_query(self, start, end, product, platform):
sql_query = """
SELECT
orders.order_date AS "order_date",
orders.device_type AS "device_type",
order_items.code_color AS "code_color",
orders.id AS "order_id"
FROM
orders
INNER JOIN
order_items
ON
orders.id = order_items.order_id
WHERE
order_date >= strftime('%s')
AND order_date < strftime('%s')
""" % (start, end)
if product:
sql_query += ' AND order_items.code_color == "' + \
product + '"'
if platform:
sql_query += ' AND orders.device_type == "' + \
platform + '"'
return sql_query
|
{"/service.py": ["/api/api.py"], "/api/api.py": ["/models/models.py", "/core/ctr.py"], "/connectors/s3.py": ["/helper/helper.py"], "/core/ctr.py": ["/connectors/db.py", "/connectors/s3.py", "/helper/helper.py"], "/test_db.py": ["/connectors/db.py"]}
|
11,213
|
michaeltriska/ctr_amaro
|
refs/heads/master
|
/test_db.py
|
import unittest
from connectors.db import DataBaseHandler
class DataBaseTests(unittest.TestCase):
def test_database(self):
db = DataBaseHandler('connectors/orders.db')
rows = db.cur.execute("SELECT * FROM orders")
assert rows.fetchall()[0] == (0, 144296, 16423318,
'2016-02-01 00:11:00', 234.8, 0.0,
234.8, 'DELIVERED', 'CREDIT CARD',
15.39, 'Correios PAC', 'Brasília',
'DF', 'google / organic', 'iOS')
if __name__ == '__main__':
unittest.main()
|
{"/service.py": ["/api/api.py"], "/api/api.py": ["/models/models.py", "/core/ctr.py"], "/connectors/s3.py": ["/helper/helper.py"], "/core/ctr.py": ["/connectors/db.py", "/connectors/s3.py", "/helper/helper.py"], "/test_db.py": ["/connectors/db.py"]}
|
11,233
|
vkvam/webvtt-py
|
refs/heads/master
|
/webvtt/segmenter.py
|
from math import ceil, floor
from .generic import Caption
from webvtt.main import WebVTT
from concurrent import futures
MPEGTS = 0
SECONDS = 200 # default number of seconds per segment
class WebVTTSegmenter(object):
"""
Provides segmentation of WebVTT captions for HTTP Live Streaming (HLS).
"""
def __init__(self):
self._total_segments = 0
self._output_writer = ''
self._seconds = 0
self._mpegts = 0
self._segments = []
def _validate_webvtt(self, webvtt):
# Validates that the captions is a list and all the captions are instances of Caption.
if not isinstance(webvtt, WebVTT):
return False
for c in webvtt.captions:
if not isinstance(c, Caption):
return False
return True
def _slice_segments(self, captions):
self._segments = [[] for _ in range(self.total_segments)]
for c in captions:
segment_index_start = int(floor(float(c.start_in_seconds) / float(self.seconds)))
self.segments[segment_index_start].append(c)
# Also include a caption in other segments based on the end time.
segment_index_end = int(floor(float(c.end_in_seconds) / float(self.seconds)))
if segment_index_end > segment_index_start:
for i in range(segment_index_start + 1, segment_index_end + 1):
self.segments[i].append(c)
@staticmethod
def _write_segment(args):
writer, index, mpegts, captions = args
with writer.open('fileSequence{}.webvtt'.format(index)) as f:
f.write('WEBVTT\n')
f.write('X-TIMESTAMP-MAP=MPEGTS:{},LOCAL:00:00.000\n'.format(mpegts))
for caption in captions:
f.write('\n{} --> {}\n'.format(caption.start, caption.end))
f.writelines(caption.lines)#['{}\n'.format(l) for l in caption.lines])
def _write_segments(self):
work_list = []
for index in range(self.total_segments):
work = (self._output_writer, index, self._mpegts, [])
for caption in self.segments[index]:
work[3].append(caption)
work_list.append(work)
with futures.ThreadPoolExecutor(max_workers=100) as executor:
segmenter_result = executor.map(self._write_segment, work_list)
executor.shutdown(wait=True)
def _write_manifest(self, captions, target_seconds=SECONDS):
with self._output_writer.open('prog_index.m3u8') as f:
f.write('#EXTM3U\n')
f.write('#EXT-X-TARGETDURATION:{}\n'.format(self.seconds))
f.write('#EXT-X-VERSION:5\n')
f.write('#EXT-X-PLAYLIST-TYPE:VOD\n')
remaining_seconds = captions[-1].end_in_seconds
for i in range(self.total_segments):
segment_length = "{0:.3f}".format(min(target_seconds,remaining_seconds))
f.write('#EXTINF:{0}\n'.format(segment_length))
f.write('fileSequence{}.webvtt\n'.format(i))
remaining_seconds-=target_seconds
f.write('#EXT-X-ENDLIST\n')
def segment(self, webvtt, output='', seconds=SECONDS, mpegts=MPEGTS):
"""Segments the captions based on a number of seconds."""
captions = WebVTT().read(webvtt).captions
self._total_segments = 0 if not captions else int(ceil(float(captions[-1].end_in_seconds) / float(seconds)))
self._output_writer = output
self._seconds = seconds
self._mpegts = mpegts
self._slice_segments(captions)
self._write_segments()
self._write_manifest(captions, seconds)
@property
def seconds(self):
"""Returns the number of seconds used for segmenting captions."""
return self._seconds
@property
def total_segments(self):
"""Returns the total of segments."""
return self._total_segments
@property
def segments(self):
"""Return the list of segments."""
return self._segments
|
{"/webvtt/segmenter.py": ["/webvtt/main.py"], "/tests/webvtt.py": ["/webvtt/__init__.py", "/webvtt/main.py"], "/webvtt/__init__.py": ["/webvtt/main.py", "/webvtt/segmenter.py"], "/webvtt/main.py": ["/webvtt/parsers.py"]}
|
11,234
|
vkvam/webvtt-py
|
refs/heads/master
|
/webvtt/sub_io.py
|
import boto3
from webvtt.generic import GenericReader, GenericWriter
import os
from io import StringIO
class FileReader(GenericReader):
def __init__(self, filename):
self.filename = filename
def readlines(self):
with open(self.filename) as f:
return [line.rstrip() for line in f.readlines()]
class FileWriter(GenericWriter):
def __init__(self, folder):
output_folder = os.path.join(os.getcwd(), folder)
if not os.path.exists(output_folder):
os.makedirs(output_folder)
self.folder = output_folder
def open(self, name):
file_path = os.path.join(self.folder, name)
return open(file_path, 'w')
class StringReader(GenericReader):
def __init__(self, filename):
self.content = filename
def readlines(self):
return [line.rstrip() for line in StringIO(self.content).readlines()]
class S3FileLike(object):
def __init__(self, bucket, key, client, headers, ACL='private'):
self.bucket, self.key, self.client = bucket, key, client
self.content = []
self.headers = headers
self.ACL = ACL
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.client.Object(self.bucket, self.key).put(
Body=''.join(self.content),
ACL=self.ACL,
**self.headers
)
def write(self, content):
self.content.append(content)
def writelines(self, content):
for f in content:
self.content.append(f+'\n')
class S3ObjectWriter(GenericWriter):
def __init__(self, bucket, key_prefix, s3_resource=None, ACL='private'):
super(S3ObjectWriter, self).__init__()
self.bucket = bucket
self.key_prefix = key_prefix
self.s3_resource = boto3.resource('s3') if s3_resource is None else s3_resource
self.ACL = ACL
def open(self, key, ACL=None):
file_type = key.split(".")[-1]
headers = {} if file_type not in self.type_map else self.type_map[file_type]
if ACL is None:
ACL = self.ACL
return S3FileLike(self.bucket, '{}/{}'.format(self.key_prefix, key), self.s3_resource, headers, ACL)
|
{"/webvtt/segmenter.py": ["/webvtt/main.py"], "/tests/webvtt.py": ["/webvtt/__init__.py", "/webvtt/main.py"], "/webvtt/__init__.py": ["/webvtt/main.py", "/webvtt/segmenter.py"], "/webvtt/main.py": ["/webvtt/parsers.py"]}
|
11,235
|
vkvam/webvtt-py
|
refs/heads/master
|
/tests/webvtt.py
|
import os
import unittest
from shutil import rmtree, copy
from webvtt import WebVTT
from webvtt.exceptions import MissingFilenameError, MalformedCaptionError
from webvtt.main import SUPPORTED_FORMATS
from webvtt.generic import Caption
BASE_DIR = os.path.dirname(__file__)
SUBTITLES_DIR = os.path.join(BASE_DIR, 'subtitles')
OUTPUT_DIR = os.path.join(BASE_DIR, 'output')
class WebVTTTestCase(unittest.TestCase):
def setUp(self):
self.webvtt = WebVTT()
def _get_file(self, filename):
return os.path.join(SUBTITLES_DIR, filename)
def tearDown(self):
if os.path.exists(OUTPUT_DIR):
rmtree(OUTPUT_DIR)
def test_create_caption(self):
caption = Caption('00:00:00.500', '00:00:07.000', ['Caption test line 1', 'Caption test line 2'])
self.assertEqual(caption.start, '00:00:00.500')
self.assertEqual(caption.start_in_seconds, 0.5)
self.assertEqual(caption.end, '00:00:07.000')
self.assertEqual(caption.end_in_seconds, 7)
self.assertEqual(caption.lines, ['Caption test line 1', 'Caption test line 2'])
def test_save_captions(self):
os.makedirs(OUTPUT_DIR)
copy(self._get_file('one_caption.vtt'), OUTPUT_DIR)
self.webvtt.read(os.path.join(OUTPUT_DIR, 'one_caption.vtt'))
new_caption = Caption('00:00:07.000', '00:00:11.890', ['New caption text line1', 'New caption text line2'])
self.webvtt.captions.append(new_caption)
self.webvtt.save()
with open(os.path.join(OUTPUT_DIR, 'one_caption.vtt'), 'r', encoding='utf-8') as f:
lines = [line.rstrip() for line in f.readlines()]
expected_lines = [
'WEBVTT',
'',
'00:00:00.500 --> 00:00:07.000',
'Caption text #1',
'',
'00:00:07.000 --> 00:00:11.890',
'New caption text line1',
'New caption text line2'
]
self.assertListEqual(lines, expected_lines)
def test_srt_conversion(self):
os.makedirs(OUTPUT_DIR)
copy(self._get_file('one_caption.srt'), OUTPUT_DIR)
self.webvtt.from_srt(os.path.join(OUTPUT_DIR, 'one_caption.srt'))
self.webvtt.save()
self.assertTrue(os.path.exists(os.path.join(OUTPUT_DIR, 'one_caption.vtt')))
with open(os.path.join(OUTPUT_DIR, 'one_caption.vtt'), 'r', encoding='utf-8') as f:
lines = [line.rstrip() for line in f.readlines()]
expected_lines = [
'WEBVTT',
'',
'00:00:00.500 --> 00:00:07.000',
'Caption text #1',
]
self.assertListEqual(lines, expected_lines)
def test_sbv_conversion(self):
os.makedirs(OUTPUT_DIR)
copy(self._get_file('two_captions.sbv'), OUTPUT_DIR)
self.webvtt.from_sbv(os.path.join(OUTPUT_DIR, 'two_captions.sbv'))
self.webvtt.save()
self.assertTrue(os.path.exists(os.path.join(OUTPUT_DIR, 'two_captions.vtt')))
with open(os.path.join(OUTPUT_DIR, 'two_captions.vtt'), 'r', encoding='utf-8') as f:
lines = [line.rstrip() for line in f.readlines()]
expected_lines = [
'WEBVTT',
'',
'00:00:00.378 --> 00:00:11.378',
'Caption text #1',
'',
'00:00:11.378 --> 00:00:12.305',
'Caption text #2 (line 1)',
'Caption text #2 (line 2)',
]
self.assertListEqual(lines, expected_lines)
def test_save_to_other_location(self):
target_path = os.path.join(OUTPUT_DIR, 'test_folder')
os.makedirs(target_path)
self.webvtt.read(self._get_file('one_caption.vtt')).save(target_path)
self.assertTrue(os.path.exists(os.path.join(target_path, 'one_caption.vtt')))
def test_save_specific_filename(self):
target_path = os.path.join(OUTPUT_DIR, 'test_folder')
os.makedirs(target_path)
output_file = os.path.join(target_path, 'custom_name.vtt')
self.webvtt.read(self._get_file('one_caption.vtt')).save(output_file)
self.assertTrue(os.path.exists(output_file))
def test_save_specific_filename_no_extension(self):
target_path = os.path.join(OUTPUT_DIR, 'test_folder')
os.makedirs(target_path)
output_file = os.path.join(target_path, 'custom_name')
self.webvtt.read(self._get_file('one_caption.vtt')).save(output_file)
self.assertTrue(os.path.exists(os.path.join(target_path, 'custom_name.vtt')))
def test_caption_timestamp_update(self):
c = Caption('00:00:00.500', '00:00:07.000')
c.start = '00:00:01.750'
c.end = '00:00:08.250'
self.assertEqual(c.start, '00:00:01.750')
self.assertEqual(c.end, '00:00:08.250')
def test_caption_text(self):
c = Caption(text=['Caption line #1', 'Caption line #2'])
self.assertEqual(
c.text,
'Caption line #1\nCaption line #2'
)
def test_caption_receive_text(self):
c = Caption(text='Caption line #1\nCaption line #2')
self.assertEqual(
len(c.lines),
2
)
self.assertEqual(
c.text,
'Caption line #1\nCaption line #2'
)
def test_supported_formats(self):
self.assertListEqual(
WebVTT().supported_formats(),
[sf[0] for sf in SUPPORTED_FORMATS]
)
def test_update_text(self):
c = Caption(text='Caption line #1')
c.text = 'Caption line #1 updated'
self.assertEqual(
c.text,
'Caption line #1 updated'
)
def test_update_text_multiline(self):
c = Caption(text='Caption line #1')
c.text = 'Caption line #1\nCaption line #2'
self.assertEqual(
len(c.lines),
2
)
self.assertEqual(
c.text,
'Caption line #1\nCaption line #2'
)
def test_update_text_wrong_type(self):
c = Caption(text='Caption line #1')
self.assertRaises(
AttributeError,
setattr,
c,
'text',
123
)
def test_manipulate_lines(self):
c = Caption(text=['Caption line #1', 'Caption line #2'])
c.lines[0] = 'Caption line #1 updated'
self.assertEqual(
c.lines[0],
'Caption line #1 updated'
)
def test_captions(self):
self.webvtt.read(self._get_file('sample.vtt'))
self.assertIsInstance(self.webvtt.captions, list)
def test_captions_prevent_write(self):
self.webvtt.read(self._get_file('sample.vtt'))
self.assertRaises(
AttributeError,
setattr,
self.webvtt,
'captions',
[]
)
def test_sequence_iteration(self):
self.webvtt.read(self._get_file('sample.vtt'))
self.assertIsInstance(self.webvtt[0], Caption)
self.assertEqual(len(self.webvtt), len(self.webvtt.captions))
def test_save_no_filename(self):
webvtt = WebVTT()
self.assertRaises(
MissingFilenameError,
webvtt.save
)
def test_malformed_start_timestamp(self):
self.assertRaises(
MalformedCaptionError,
Caption,
'01:00'
)
|
{"/webvtt/segmenter.py": ["/webvtt/main.py"], "/tests/webvtt.py": ["/webvtt/__init__.py", "/webvtt/main.py"], "/webvtt/__init__.py": ["/webvtt/main.py", "/webvtt/segmenter.py"], "/webvtt/main.py": ["/webvtt/parsers.py"]}
|
11,236
|
vkvam/webvtt-py
|
refs/heads/master
|
/webvtt/__init__.py
|
from .main import WebVTT
from .segmenter import WebVTTSegmenter
from .generic import Caption
__version__ = '0.4.0'
|
{"/webvtt/segmenter.py": ["/webvtt/main.py"], "/tests/webvtt.py": ["/webvtt/__init__.py", "/webvtt/main.py"], "/webvtt/__init__.py": ["/webvtt/main.py", "/webvtt/segmenter.py"], "/webvtt/main.py": ["/webvtt/parsers.py"]}
|
11,237
|
vkvam/webvtt-py
|
refs/heads/master
|
/webvtt/main.py
|
import os
import re
from .parsers import WebVTTParser, SRTParser, SBVParser
from webvtt.exceptions import MissingFilenameError
SUPPORTED_FORMATS = (
('WebVTT (.vtt)', WebVTTParser), # default parser for WebVTT format
('SubRip (.srt)', SRTParser), # parser for SRT format
('YouTube SBV (.sbv)', SBVParser), # parser for YouTube SBV format
)
class WebVTT(object):
"""
Parse captions in WebVTT format and also from other formats like SRT.
To read WebVTT:
WebVTT().read('captions.vtt')
For other formats like SRT, use from_[format in lower case]:
WebVTT().from_srt('captions.srt')
A list of all supported formats is available calling supported_formats().
"""
FORMAT_EXTENSION_PATTERN = re.compile('.+\(\.(.+)\)')
def __init__(self):
self._captions = []
self.file = ''
# create methods dynamically to read captions based on the supported types
# read() is created for WebVTT and from_[FORMAT]() for the other formats.
for name, parser_class in SUPPORTED_FORMATS:
extension = re.match(self.FORMAT_EXTENSION_PATTERN, name).group(1)
method_name = 'read' if parser_class is WebVTTParser else 'from_{}'.format(extension)
setattr(self.__class__, method_name, self._set_reader(method_name, name, parser_class))
def __len__(self):
return len(self._captions)
def __getitem__(self, index):
return self._captions[index]
def _set_reader(self, name, format_name, parser_class):
def f(self, file):
self.file = file
self._captions = parser_class().read(file).captions
return self
f.__name__ = name
if parser_class is WebVTTParser:
f.__doc__ = 'Reads a WebVTT captions file.'
else:
f.__doc__ = 'Reads captions from a file in {} format.'.format(format_name)
return f
def save(self, output=''):
"""Save the document.
If no output is provided the file will be saved in the same location. Otherwise output
can determine a target directory or file.
"""
if not output:
if not self.file:
raise MissingFilenameError
# saving an original vtt file will overwrite the file
# and for files read from other formats will save as vtt
# with the same name and location
self.file = os.path.splitext(self.file)[0] + '.vtt'
else:
target = os.path.join(os.getcwd(), output)
if os.path.isdir(target):
# if an output is provided and it is a directory
# the file will be saved in that location with the same name
filename = os.path.splitext(os.path.basename(self.file))[0]
self.file = os.path.join(target, '{}.vtt'.format(filename))
else:
if target[-3:].lower() != 'vtt':
target += '.vtt'
# otherwise the file will be written in the specified location
self.file = target
with open(self.file, 'w') as f:
f.write('WEBVTT\n')
for c in self._captions:
f.write('\n{} --> {}\n'.format(c.start, c.end))
f.writelines(['{}\n'.format(l) for l in c.lines])
@staticmethod
def supported_formats():
"""Provides a list of supported formats that this class can read from."""
return [f[0] for f in SUPPORTED_FORMATS]
@property
def captions(self):
"""Returns the list of captions."""
return self._captions
@property
def total_length(self):
"""Returns the total length of the captions."""
if not self._captions:
return 0
return int(self._captions[-1].end_in_seconds) - int(self._captions[0].start_in_seconds)
|
{"/webvtt/segmenter.py": ["/webvtt/main.py"], "/tests/webvtt.py": ["/webvtt/__init__.py", "/webvtt/main.py"], "/webvtt/__init__.py": ["/webvtt/main.py", "/webvtt/segmenter.py"], "/webvtt/main.py": ["/webvtt/parsers.py"]}
|
11,238
|
vkvam/webvtt-py
|
refs/heads/master
|
/webvtt/parsers.py
|
import re
from webvtt.exceptions import MalformedFileError, MalformedCaptionError
from webvtt.generic import GenericParser, Caption
class TextBasedParser(GenericParser):
"""
Parser for plain text caption files.
This is a generic class, do not use directly.
"""
TIMEFRAME_LINE_PATTERN = ''
def _read_content(self, file):
lines = file.readlines()
if not lines:
raise MalformedFileError('The file is empty.')
return lines
def _parse_timeframe_line(self, line):
"""Parse timeframe line and return start and end timestamps."""
tf = self._validate_timeframe_line(line)
if not tf:
raise MalformedCaptionError('Invalid time format')
return tf.group(1), tf.group(2)
def _validate_timeframe_line(self, line):
return re.match(self.TIMEFRAME_LINE_PATTERN, line)
def _is_timeframe_line(self, line):
"""
This method returns True if the line contains the timeframes.
To be implemented by child classes.
"""
return False
def _should_skip_line(self, line, index, caption):
"""
This method returns True for a line that should be skipped.
To be implemented by child classes.
"""
return False
def _parse(self, lines):
c = None
for index, line in enumerate(lines):
if self._should_skip_line(line, index, c): # allow child classes to skip lines based on the content
continue
if self._is_timeframe_line(line):
try:
start, end = self._parse_timeframe_line(line)
except MalformedCaptionError as e:
raise MalformedCaptionError('{} in line! {}'.format(e, index + 1))
c = Caption(start, end)
elif line:
if c is None:
raise MalformedCaptionError('Caption missing timeframe in line {}.'.format(index + 1))
else:
c.add_line(line)
else:
if c is None:
continue
if not c.lines:
raise MalformedCaptionError('Caption missing text in line {}.'.format(index + 1))
self.captions.append(c)
c = None
if c is not None and c.lines:
self.captions.append(c)
class SRTParser(TextBasedParser):
"""
SRT parser.
"""
TIMEFRAME_LINE_PATTERN = re.compile('\s*(\d+:\d{2},\d{3})\s*-->\s*(\d+:\d{2},\d{3})')
def _validate(self, lines):
if len(lines) < 2 or lines[0] != '1' or not self._validate_timeframe_line(lines[1]):
raise MalformedFileError('The file does not have a valid format.')
def _is_timeframe_line(self, line):
return '-->' in line
def _should_skip_line(self, line, index, caption):
return caption is None and line.isdigit()
class WebVTTParser(SRTParser):
"""
WebVTT parser.
"""
TIMEFRAME_LINE_PATTERN = re.compile('\s*((?:\d+:){1,2}\d{2}.\d{3})\s*-->\s*((?:\d+:){1,2}\d{2}.\d{3})')
def _validate(self, lines):
if 'WEBVTT' not in lines[0]:
raise MalformedFileError('The file does not have a valid format')
def _should_skip_line(self, line, index, caption):
return index == 0 and line == 'WEBVTT'
class SBVParser(TextBasedParser):
"""
YouTube SBV parser.
"""
TIMEFRAME_LINE_PATTERN = re.compile('\s*(\d+:\d{2}.\d{3}),(\d+:\d{2}.\d{3})')
def _validate(self, lines):
if not self._validate_timeframe_line(lines[0]):
raise MalformedFileError('The file does not have a valid format')
def _is_timeframe_line(self, line):
return self._validate_timeframe_line(line)
|
{"/webvtt/segmenter.py": ["/webvtt/main.py"], "/tests/webvtt.py": ["/webvtt/__init__.py", "/webvtt/main.py"], "/webvtt/__init__.py": ["/webvtt/main.py", "/webvtt/segmenter.py"], "/webvtt/main.py": ["/webvtt/parsers.py"]}
|
11,240
|
dimitri98713/Mytest
|
refs/heads/master
|
/point.py
|
def point(x,y):
return [x,y]
def getx(p):
return p[0]
def gety(p):
return p[1]
|
{"/test_point_creation.py": ["/point.py"]}
|
11,241
|
dimitri98713/Mytest
|
refs/heads/master
|
/test_point_creation.py
|
from point import *
def test_point_creation():
p = point(22,7)
def test_access_x_and_y():
assert 22 == getx(p)
assert 7 == gety(p)
|
{"/test_point_creation.py": ["/point.py"]}
|
11,288
|
AaronChiu2017/ohmydata_spider
|
refs/heads/master
|
/ohmydata_spider/commands/__init__.py
|
__author__ = 'mee'
|
{"/ohmydata_spider/spiders/TmallCommentspider.py": ["/ohmydata_spider/items.py", "/ohmydata_spider/util/select_result.py", "/ohmydata_spider/pipelines.py"], "/ohmydata_spider/contrib/downloadermiddleware/Cookie.py": ["/ohmydata_spider/util/sinaCookie.py"]}
|
11,289
|
AaronChiu2017/ohmydata_spider
|
refs/heads/master
|
/ohmydata_spider/pipelines.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/topics/item-pipeline.html
from scrapy.exceptions import DropItem
from scrapy.conf import settings
import logging
import pymongo
import functools
def check_spider_pipeline(process_item_method):
@functools.wraps(process_item_method)
def wrapper(self, item, spider):
# message for debugging
msg = '%%s %s pipeline step'%(self.__class__.__name__,)
logger = logging.getLogger(spider.name)
if self.__class__ in spider.pipeline:
logger.info(msg % 'executing')
return process_item_method(self, item, spider)
else:
return item
return wrapper
class MongoDBPipeline(object):
def __init__(self):
connection = pymongo.MongoClient(
settings['SingleMONGODB_SERVER'],
settings['SingleMONGODB_PORT']
)
db = connection[settings['MONGODB_DB']]
self.collection = db[settings['MONGODB_COLLECTION']]
@check_spider_pipeline
def process_item(self, item, spider):
valid = True
for data in item:
if not data:
valid = False
raise DropItem("Missing {0}!".format(data))
if valid:
self.collection.insert(dict(item))
self.logger.info("proxy ip added to MonogoDB database")
return item
class DataTreasurePipeline(object):
@check_spider_pipeline
def process_item(self, item, spider):
print 'book name is :' + item['book_name']
print 'book description is ' + item['book_description']
return item
class JdBookPipeline(object):
@check_spider_pipeline
def process_item(self, item, spider):
return item
class TmallCommentPipeline(object):
@check_spider_pipeline
def process_item(self, item, spider):
return item
class WeiboPipeline(object):
@check_spider_pipeline
def process_item(self, item, spider):
return item
|
{"/ohmydata_spider/spiders/TmallCommentspider.py": ["/ohmydata_spider/items.py", "/ohmydata_spider/util/select_result.py", "/ohmydata_spider/pipelines.py"], "/ohmydata_spider/contrib/downloadermiddleware/Cookie.py": ["/ohmydata_spider/util/sinaCookie.py"]}
|
11,290
|
AaronChiu2017/ohmydata_spider
|
refs/heads/master
|
/ohmydata_spider/commands/clear_redis_stats.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
__author__ = 'mee'
import redis
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
FILTER_KEY = 'myspider:dupefilter'
REQUEST_KEY = 'myspider:requests'
STATS_KEY = 'scrapy:stats'
def clear_stats():
server = redis.Redis(REDIS_HOST,REDIS_PORT)
server.delete(FILTER_KEY)
server.delete(REQUEST_KEY)
server.delete(STATS_KEY)
if __name__ == '__main__':
clear_stats()
|
{"/ohmydata_spider/spiders/TmallCommentspider.py": ["/ohmydata_spider/items.py", "/ohmydata_spider/util/select_result.py", "/ohmydata_spider/pipelines.py"], "/ohmydata_spider/contrib/downloadermiddleware/Cookie.py": ["/ohmydata_spider/util/sinaCookie.py"]}
|
11,291
|
AaronChiu2017/ohmydata_spider
|
refs/heads/master
|
/ohmydata_spider/spiders/myspider.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
from scrapy_redis.spiders import RedisSpider
from scrapy.selector import Selector
from ohmydata_spider.util.select_result import list_first_item,clean_url
from scrapy.http import Request
from ohmydata_spider.items import TutorialItem
import ohmydata_spider.pipelines
class MySpider(RedisSpider):
name = 'myspider'
start_urls = ('http://www.kjson.com/proxy/index/1',)
pipeline = set([
ohmydata_spider.pipelines.MongoDBPipeline,
])
proxy_porturl = {
"data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABQAAAAUCAIAAAAC64paAAAB1UlEQVQ4jdWTzcspYRjGH9OxGdLIRhTNQhYWSKmxGxtJxrBQNvwDSlnxH8hCNkgWsrSwEU1Z2dhbKJqGRvlI81FY6NH0dBbzHm9HZ+Ws3mv13Pdz/+q+rroNEELwqbCPyZ8L/3q9eJ5vtVqr1Qoh5Pf7y+Wyy+UCACCE2u32ZDJBCDEMUywWv2kIIYRQVVWaphuNhqqql8ulWq2mUin9q9frsSy73+83m000Gh0Oh/CPvtY+Ho+32y2Xy5lMJoIg4vH4+XxGCAEARqNRPp+32+0kSTIMw3Hcu2eSJB0OR7fb1TTter0OBoNYLIZhmCRJkiT5fD59zOv1CoLwDhuNxk6nI4piKpViWdbj8VQqFQCALMsAAKvVqo8RBPF4PDRNe097sVhst9t0Oh2JRDiOWy6XeloAAAz7GjMYDH/FrVuXZZmiqOl0qpf1ej2ZTEIIBUEIhUK73U7vz2YzmqbfAzscDs/nMxgM6iVFUafTCSHkdDpxHF+v13p/s9m8/H+v7Xa7zWZzv9+/3++KogyHw0AggGEYhmGJRGIwGCiKIorieDzOZrMv2PC6qtVq1Ww2eZ7HcTwcDpdKJZvNpvuq1Wrz+dxisRQKhUwm8w/4A/3Qq/ov+Dc2O/z/LmddcAAAAABJRU5ErkJggg==":'80',
}
def parse(self, response):
response_sel = Selector(response)
next_link = list_first_item(response_sel.xpath(u'//div[@class="page"]/a[text()="下一页"]/@href').extract())
if next_link:
next_link = clean_url(response.url, next_link, response.encoding)
yield Request(url=next_link, callback=self.parse)
print next_link
# 必须使用for循环来调用parse_detail函数,否则只能解析第一个界面
for item in self.parse_detail(response):
yield item
def parse_detail(self, response):
response_sel = Selector(response)
table_bodys = response_sel.xpath('//*[@id="dataTables-example"]/tbody/tr')
for table_body in table_bodys:
proxy_item = TutorialItem()
port_url = str(list_first_item(table_body.xpath('./td[2]/img/@src').extract())).split('&')[0]
if port_url in self.proxy_porturl:
proxy_item['proxy_url'] = list_first_item(table_body.xpath('./td[1]/text()').extract()) + ':' + self.proxy_porturl[port_url]
proxy_item['proxy_type'] = list_first_item(table_body.xpath('./td[3]/text()').extract())
proxy_item['proxy_locate'] = list_first_item(table_body.xpath('./td[7]/text()').extract())
else:
continue
yield proxy_item
|
{"/ohmydata_spider/spiders/TmallCommentspider.py": ["/ohmydata_spider/items.py", "/ohmydata_spider/util/select_result.py", "/ohmydata_spider/pipelines.py"], "/ohmydata_spider/contrib/downloadermiddleware/Cookie.py": ["/ohmydata_spider/util/sinaCookie.py"]}
|
11,292
|
AaronChiu2017/ohmydata_spider
|
refs/heads/master
|
/start_cluster.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import multiprocessing
import argparse
import pexpect
import sys
class sshProcess(multiprocessing.Process):
def __init__(self, workerid, hostname, password, crawlername):
multiprocessing.Process.__init__(self)
self.workerid = workerid
self.hostname = hostname
self.password = password
self.crawlername = crawlername
def run(self):
server = pexpect.spawn('ssh %s cd ~/cpython/ohmydata_spider;scrapy crawl %s'%(self.hostname, self.crawlername))
fout = file(self.workerid+'.log', 'w')
server.logfile = fout
server.expect('.*ssword:')
server.sendline(self.password)
server.expect(pexpect.EOF)
def main():
parse = argparse.ArgumentParser()
parse.add_argument('--worker', help='input the number of you want run in the worker', type=int,default=1)
parse.add_argument('--crawlername', help='input the cralwer name that you want running', type=str,default="")
args = parse.parse_args()
worker = args.worker
crawlername = args.crawlername
config = open('cluster.config', 'r')
for line in config:
info = line.split(' ')
if len(info) == 3:
workerid = info[0]
hostname = info[1]
password = info[2]
i = 0
while i < worker:
p = sshProcess(workerid, hostname, password, crawlername)
p.start()
i = i + 1
if __name__ == '__main__':
main()
|
{"/ohmydata_spider/spiders/TmallCommentspider.py": ["/ohmydata_spider/items.py", "/ohmydata_spider/util/select_result.py", "/ohmydata_spider/pipelines.py"], "/ohmydata_spider/contrib/downloadermiddleware/Cookie.py": ["/ohmydata_spider/util/sinaCookie.py"]}
|
11,293
|
AaronChiu2017/ohmydata_spider
|
refs/heads/master
|
/readunicode.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import codecs
f = open('TmallComment.json', 'r')
f2 = open('allTmallComment.json', 'w')
for text in f:
try:
f2.write(text.decode('unicode-escape').encode('utf-8'))
print text.decode('unicode-escape').encode('utf-8')
except Exception, e:
print 'error'
|
{"/ohmydata_spider/spiders/TmallCommentspider.py": ["/ohmydata_spider/items.py", "/ohmydata_spider/util/select_result.py", "/ohmydata_spider/pipelines.py"], "/ohmydata_spider/contrib/downloadermiddleware/Cookie.py": ["/ohmydata_spider/util/sinaCookie.py"]}
|
11,294
|
AaronChiu2017/ohmydata_spider
|
refs/heads/master
|
/ohmydata_spider/settings.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# Scrapy settings for ohmydata_spider project
#
# For simplicity, this file contains only the most important settings by
# default. All the other settings are documented here:
#
# http://doc.scrapy.org/topics/settings.html
#
SPIDER_MODULES = ['ohmydata_spider.spiders']
NEWSPIDER_MODULE = 'ohmydata_spider.spiders'
ITEM_PIPELINES = {
'ohmydata_spider.pipelines.DataTreasurePipeline': 100,
'ohmydata_spider.pipelines.MongoDBPipeline': 200,
'ohmydata_spider.pipelines.JdBookPipeline': 300,
'ohmydata_spider.pipelines.TmallCommentPipeline': 400,
}
# 设置等待时间缓解服务器压力,并能够隐藏自己
DOWNLOAD_DELAY = 2
RANDOMIZE_DOWNLOAD_DELAY = True
# 关闭默认的s3下载处理器
DOWNLOAD_HANDLERS = {'s3':None,}
# 并发请求最大值
CONCURRENT_REQUESTS = 64
# 单个域名并发请求最大值
CONCURRENT_REQUESTS_PER_DOMAIN = 32
# 下载中间件设置,下载中间件用于修改全局scrapy request和response.
DOWNLOADER_MIDDLEWARES = {
'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware':110,
'ohmydata_spider.contrib.downloadermiddleware.selector_proxy.SelectorProxyMiddlerware':100,
'scrapy.extensions.downloadermiddlewares.useragent.UserAgentMiddleware':None,
'ohmydata_spider.contrib.downloadermiddleware.rotate_useragent.RotateUserAgentMiddleware':400,#将中间件中的user_agent修改为自己实现的部分
# 'ohmydata_spider.contrib.downloadermiddleware.Cookie.CookiesMiddleware':401,
}
USER_AGENT = ''
# 爬虫状态信息
STATS_CLASS = 'ohmydata_spider.scrapy_graphite.graphite.RedisGraphiteStatsCollector'
# graphite 设置
GRAPHITE_HOST = 'localhost'
GRAPHITE_PORT = 2003
GRAPHITE_IGNOREKEYS = []
# 禁用cookie
COOKIES_ENABLED = True
# COOKIES_DEBUG=False
# redis调度器相关设置部分
SCHEDULER = 'scrapy_redis.scheduler.Scheduler'
SCHEDULER_PERSIST = True
SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.SpiderPriorityQueue'
SCHEDULER_IDLE_BEFORE_CLOSE = 10
# 数据存储部分设置
SingleMONGODB_SERVER = "localhost"
SingleMONGODB_PORT = 27017
MONGODB_DB = "proxyip_data"
MONGODB_COLLECTION = "proxyip_collection"
ShardMONGODB_SERVER = "localhost"
ShardMONGODB_PORT = 27017
ShardMONGODB_DB = "proxyip_mongo"
GridFs_Collection = "proxyip_table"
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
# 数据序列化到文件
FEED_URI = u'ProductInfo.csv'
FEED_FORMAT='CSV'
|
{"/ohmydata_spider/spiders/TmallCommentspider.py": ["/ohmydata_spider/items.py", "/ohmydata_spider/util/select_result.py", "/ohmydata_spider/pipelines.py"], "/ohmydata_spider/contrib/downloadermiddleware/Cookie.py": ["/ohmydata_spider/util/sinaCookie.py"]}
|
11,295
|
AaronChiu2017/ohmydata_spider
|
refs/heads/master
|
/ohmydata_spider/spiders/dataspider.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
__author__ = 'mee'
from scrapy_redis.spiders import RedisSpider
from scrapy.selector import Selector
from ohmydata_spider.util.select_result import list_first_item,clean_url
from scrapy.http import Request
from ohmydata_spider.items import DataTreasureItem
import ohmydata_spider.pipelines
class DataSpider(RedisSpider):
name = "ohmygourd"
start_urls = (
'http://www.woaidu.org/sitemap_1.html',
)
pipeline = set([
ohmydata_spider.pipelines.DataTreasurePipeline,
])
def parse(self, response):
response_sel = Selector(response)
next_link = list_first_item(response_sel.xpath(u'//div[@class="k2"]/div/a[text()="下一页"]/@href').extract())
if next_link:
next_link = clean_url(response.url, next_link, response.encoding)
yield Request(url=next_link, callback=self.parse)
for detail_link in response_sel.xpath(u'//div[contains(@class,"sousuolist")]/a/@href').extract():
if detail_link:
detail_link = clean_url(response.url, detail_link, response.encoding)
print detail_link
yield Request(url=detail_link, callback=self.parse_detail)
def parse_detail(self, response):
data_item = DataTreasureItem()
response_selector = Selector(response)
print '********************book name is ' + list_first_item(response_selector.xpath('//div[@class="zizida"][1]/text()').extract())
data_item['book_name'] = list_first_item(response_selector.xpath('//div[@class="zizida"][1]/text()').extract())
data_item['book_description'] = list_first_item(response_selector.xpath('//div[@class="lili"][1]/text()').extract())
yield data_item
|
{"/ohmydata_spider/spiders/TmallCommentspider.py": ["/ohmydata_spider/items.py", "/ohmydata_spider/util/select_result.py", "/ohmydata_spider/pipelines.py"], "/ohmydata_spider/contrib/downloadermiddleware/Cookie.py": ["/ohmydata_spider/util/sinaCookie.py"]}
|
11,296
|
AaronChiu2017/ohmydata_spider
|
refs/heads/master
|
/ohmydata_spider/contrib/downloadermiddleware/selector_proxy.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
__author__ = 'mee'
import base64
import random
from scrapy.downloadermiddlewares.httpproxy import HttpProxyMiddleware
import pymongo
import logging
class SelectorProxyMiddlerware(HttpProxyMiddleware): # 中间件继承时一定要有__init__方法,否则不会被执行
# 实例化类时进行数据库连接
def __init__(self):
SingleMONGODB_SERVER = "localhost"
SingleMONGODB_PORT = 27017
MONGODB_DB = "proxyip_data"
MONGODB_COLLECTION = "proxyip_collection"
try:
connection = pymongo.MongoClient(
SingleMONGODB_SERVER,
SingleMONGODB_PORT
)
db = connection[MONGODB_DB]
self.collection = db[MONGODB_COLLECTION]
except Exception, e:
logging.warning("connection mongodb error %s", e.message)
def process_request(self, request, spider):
proxy = self.getproxy_ip(spider.proxy)
if proxy is not None:
logger = logging.getLogger(spider.name)
logger.info("Select the proxy : %s" % (proxy['proxy_url']))
if proxy['user_pass'] is not None:
request.meta['proxy'] = proxy['proxy_url']
encoded_user_pass = base64.encodestring(proxy['user_pass']).strip()
request.headers['Proxy-Authorization'] = 'Basic' + encoded_user_pass
else:
request.meta['proxy'] = proxy['proxy_url']
# 随机选取一个代理
def getproxy_ip(self, proxy_type):
try:
if proxy_type == 'http':
proj = self.collection.find({"proxy_type": "HTTP"}, {"proxy_url": 1})
proj.skip(random.randint(0, proj.count()))
proxy_info = proj.limit(-1).next()
proxy_dict = {'proxy_url': "http://%s"%(proxy_info['proxy_url']), "user_pass": None}
elif proxy_type == 'https':
proj = self.collection.find({"proxy_type": "HTTPS"}, {"proxy_url": 1})
proj.skip(random.randint(0, proj.count()))
proxy_info = proj.limit(-1).next()
proxy_dict = {'proxy_url': "https://%s"%(proxy_info['proxy_url']), "user_pass": None}
elif proxy_type == 'GFW':
proxy_dict = {'proxy_url': "http://127.0.0.1:8118", "user_pass": None}
return proxy_dict
except Exception, e:
logging.warning("Get proxy Exception from mongodb warn info: %s", e.message)
return None
if __name__ == '__main__':
test_proxy = SelectorProxyMiddlerware()
# test_proxy.getproxy_ip()
# test_proxy.process_request(request="",spider="")
|
{"/ohmydata_spider/spiders/TmallCommentspider.py": ["/ohmydata_spider/items.py", "/ohmydata_spider/util/select_result.py", "/ohmydata_spider/pipelines.py"], "/ohmydata_spider/contrib/downloadermiddleware/Cookie.py": ["/ohmydata_spider/util/sinaCookie.py"]}
|
11,297
|
AaronChiu2017/ohmydata_spider
|
refs/heads/master
|
/ohmydata_spider/util/select_result.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import urllib
import urlparse
from urlparse import urljoin
from w3lib.html import replace_entities
def clean_link(link_text):
return link_text.strip("\t\r\n '\"")
# 返回第一个url地址
list_first_item = lambda x:x[0] if x else None
# 将url地址组装返回,并移除空格标点 entites
clean_url = lambda base_url, u, response_encoding: urljoin(base_url, replace_entities(clean_link(u.decode(response_encoding))))
# 获取请求参数
def get_query(url, key):
bits = list(urlparse.urlparse(url))
query = urlparse.parse_qs(bits[4])
return query[key][0]
# 设置请求参数
def set_query(url, **args):
bits = list(urlparse.urlparse(url))
query = urlparse.parse_qs(bits[4])
for key in args:
query[key] = args[key]
bits[4] = urllib.urlencode(query, True)
return urlparse.urlunparse(bits)
|
{"/ohmydata_spider/spiders/TmallCommentspider.py": ["/ohmydata_spider/items.py", "/ohmydata_spider/util/select_result.py", "/ohmydata_spider/pipelines.py"], "/ohmydata_spider/contrib/downloadermiddleware/Cookie.py": ["/ohmydata_spider/util/sinaCookie.py"]}
|
11,298
|
AaronChiu2017/ohmydata_spider
|
refs/heads/master
|
/ohmydata_spider/spiders/TmallCommentspider.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
from scrapy_redis.spiders import RedisSpider
from ohmydata_spider.items import TmallCommentItem
from scrapy.selector import Selector
from scrapy.http import Request
from ohmydata_spider.util.select_result import get_query, set_query, clean_link
import ohmydata_spider.pipelines
import re, json
import logging
__author__ = 'mee'
class TmallCommentSpider(RedisSpider):
name = "TmallComment"
start_urls = (
"https://nike.world.tmall.com/",
"https://jackjones.world.tmall.com/",
"https://suning.world.tmall.com/",
"https://xiaomi.world.tmall.com/",
"https://only.world.tmall.com/",
"https://uniqlo.world.tmall.com/",
"https://apple.tmall.com/",
"https://adidas.tmall.com/",
"https://newbalance.tmall.com/",
"https://lee.tmall.com/",
)
categoryUrl = "https://suning.world.tmall.com/category-1115569769.htm?search=y&catId=1115569769&pageNo=1"
asyncUrl = "https://suning.world.tmall.com/i/asynSearch.htm?mid=null&wid=null&path=?&&search=y&catId=?&scid=?&pageNo=?"
rateUrl = "https://rate.tmall.com/list_detail_rate.htm?itemId=522155891308&sellerId=2616970884¤tPage=1"
pipeline = set([
ohmydata_spider.pipelines.TmallCommentPipeline,
])
proxy = 'GFW'
def parse(self, response):
response_sel = Selector(response)
category = response_sel.xpath(u'//a[contains(@href,"category")]/@href').extract()
sellerid = response_sel.xpath(u'//meta[contains(@content,"userId")]/@content').extract()
# get the sellerid and replace it
sellerId = re.findall(r'userId=(\d+)', sellerid[0])[0]
if sellerId:
self.rateUrl = set_query(self.rateUrl, sellerId=sellerId)
else:
self.logger.error("Get the sellerid error !")
domain = re.findall(r'https:(.*)', response.url)[0]
if domain:
# replace the request page domain
self.categoryUrl, result_count = re.subn(r'//(.*?)/', domain, self.categoryUrl)
self.asyncUrl, result_count = re.subn(r'//(.*?)/', domain, self.asyncUrl)
else:
self.logger.error("Get the request domain error!")
all_category = set()
for category_url in category:
category_id = re.findall(r'category-(\d+).htm', category_url)
if category_id:
all_category.add(category_id[0])
for category_id in all_category:
# set the category id
result_url, result_count = re.subn(r'(\d+\d+)', category_id, self.categoryUrl)
self.logger.info("category url : %s", result_url)
yield Request(url=result_url, callback=self.parse_category)
def parse_category(self, response):
response_sel = Selector(response)
data_widgetid = response_sel.xpath(u'//*[@class="J_TModule" and @data-title="搜索列表"]/@data-widgetid').extract()
wid = data_widgetid[0]
mid = 'w-' + wid + '-0'
catId = get_query(response.url, 'catId')
path = "/category"+catId + '.htm'
pageNo = get_query(response.url, 'pageNo')
page_url = set_query(self.asyncUrl, wid=wid, mid=mid, path=path, catId=catId, scid=catId,pageNo=pageNo)
yield Request(url=page_url, callback=self.parse_nextpage)
def parse_nextpage(self, response):
response_sel = Selector(response)
next_pageurl = response_sel.xpath(u'//a[contains(@class,"next")]/@href').extract()
if len(next_pageurl) > 0:
page_num = get_query(next_pageurl[0], 'pageNo')
next_url = set_query(self.categoryUrl, pageNo=page_num)
yield Request(url=next_url, callback=self.parse_category)
else:
self.logger.warning("Can not find the next page url ! ")
dl_bodys = response_sel.xpath(u'/html/body/div/div[3]')
for dl_body in dl_bodys:
item_lines = dl_body.xpath(u'./div/dl')
for item_line in item_lines:
comment_item = TmallCommentItem()
data_id = item_line.xpath(u'./@data-id').extract()
item_id = re.findall('(\d+)', data_id[0])
item_name = item_line.xpath(u'./dd[contains(@class,"detail")]/a/text()').extract()
item_type = item_line.xpath(u'./dd[contains(@class,"detail")]/a/span/text()').extract()
item_price = item_line.xpath(u'./dd[contains(@class,"detail")]/div/div[contains(@class,"cprice-area")]/span/text()').extract()
item_sales = item_line.xpath(u'./dd[contains(@class,"detail")]/div/div[contains(@class,"sale-area")]/span/text()').extract()
if len(item_name) > 1:
comment_item['ItemName'] = item_name[0].strip() + ' ' + item_name[1].strip()
else:
comment_item['ItemName'] = item_name[0].strip()
if len(item_type) > 0:
comment_item['ItemType'] = item_type[0].strip()
if len(item_price) > 1:
comment_item['ItemPrice'] = item_price[1].strip()
if len(item_sales) > 0:
comment_item['ItemSales'] = item_sales[0].strip()
yield comment_item
# if len(item_id) > 0:
# comment_url = set_query(self.rateUrl, itemId=item_id[0])
# yield Request(url=comment_url,
# meta={'item': comment_item},
# callback=self.parse_comment)
# else:
# self.logger.error('Get the item id error !')
# def parse_comment(self, response):
# response_sel = Selector(response)
# comment_item = response.meta['item']
#
# allPageCount = re.findall('"lastPage\":(.+?)\,', response_sel.extract())[0]
#
# # 对每一页的评论进行解析
# i = 1
# while i < int(allPageCount):
# next_link = set_query(response.url, currentPage=i)
#
# i = i + 1
# yield Request(url=next_link,
# meta={'item': comment_item},
# callback=self.parse_detail)
#
# def parse_detail(self, response):
#
# self.logger.info("parse url : %s", response.url)
# response_sel = Selector(response)
# commentJson = re.findall('\"rateList\":(\[.*?\])\,\"searchinfo\"', response_sel.extract())[0]
#
# for data in json.loads(commentJson):
# comment_item = response.meta['item']
#
# comment_item['itemId'] = get_query(response.url, 'itemId')
# comment_item['userNick'] = data['displayUserNick']
# comment_item['rateDate'] = data['rateDate']
# comment_item['rateContent'] = data['rateContent']
#
# yield comment_item
|
{"/ohmydata_spider/spiders/TmallCommentspider.py": ["/ohmydata_spider/items.py", "/ohmydata_spider/util/select_result.py", "/ohmydata_spider/pipelines.py"], "/ohmydata_spider/contrib/downloadermiddleware/Cookie.py": ["/ohmydata_spider/util/sinaCookie.py"]}
|
11,299
|
AaronChiu2017/ohmydata_spider
|
refs/heads/master
|
/ohmydata_spider/contrib/downloadermiddleware/Cookie.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import random
import logging
from ohmydata_spider.util.sinaCookie import cookies
__author__ = 'mee'
class CookiesMiddleware(object):
def process_request(self, request, spider):
cookie = random.choice(cookies)
# logger = logging.getLogger(spider.name)
# logger.info("Get the cookie: %s" % (cookie))
request.cookies = cookie
|
{"/ohmydata_spider/spiders/TmallCommentspider.py": ["/ohmydata_spider/items.py", "/ohmydata_spider/util/select_result.py", "/ohmydata_spider/pipelines.py"], "/ohmydata_spider/contrib/downloadermiddleware/Cookie.py": ["/ohmydata_spider/util/sinaCookie.py"]}
|
11,300
|
AaronChiu2017/ohmydata_spider
|
refs/heads/master
|
/ohmydata_spider/spiders/spidertest.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
__author__ = 'mee'
from scrapy_redis.spiders import RedisSpider
from scrapy.selector import Selector
from scrapy.http import Request
from ohmydata_spider.items import TmallCommentItem
import re
import ohmydata_spider.pipelines
class TestSpider(RedisSpider):
name = "SpiderTest"
start_urls = (
"http://weibo.cn/pub/",
)
proxy = ''
def parse(self, response):
response_sel = Selector(response)
hot_weibo = response_sel.xpath(u'//a[contains(@href, "http")]/@href')
print hot_weibo.extract()
|
{"/ohmydata_spider/spiders/TmallCommentspider.py": ["/ohmydata_spider/items.py", "/ohmydata_spider/util/select_result.py", "/ohmydata_spider/pipelines.py"], "/ohmydata_spider/contrib/downloadermiddleware/Cookie.py": ["/ohmydata_spider/util/sinaCookie.py"]}
|
11,301
|
AaronChiu2017/ohmydata_spider
|
refs/heads/master
|
/ohmydata_spider/util/sinaCookie.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
import json
import base64
import requests
import logging
__author__ = 'mee'
'''
Crawl Weibo Account
'''
WeiBoAccount = [
{'user': 'luzuidv13893@163.com', 'psw': 'a123456'},
]
def getCookies(weibo):
"""
function: get cookies
:param weibo: weibo Account Info
:return: cookies
"""
cookies = []
loginURL = r"https://login.sina.com.cn/sso/login.php?client=ssologin.js(v1.4.15)"
for elem in weibo:
account = elem['user']
password = elem['psw']
username = base64.b64encode(account.encode('utf-8')).decode('utf-8')
postData = {
"entry": "sso",
"gateway": "1",
"from": "null",
"savestate": "30",
"useticket": "0",
"pagerefer": "",
"vsnf": "1",
"su": username,
"service": "sso",
"sp": password,
"sr": "1440*900",
"encoding": "UTF-8",
"cdult": "3",
"domain": "sina.com.cn",
"prelt": "0",
"returntype": "TEXT",
}
session = requests.Session()
r = session.post(loginURL, data=postData)
jsonStr = r.content.decode('gbk')
info = json.loads(jsonStr)
if info["retcode"] == "0":
print "Cookie Account: %s"%(account)
logging.info("Cookie Account: %s"%(account))
cookie = session.cookies.get_dict()
cookies.append(cookie)
else:
logging.warn("Cookie get Failed : %s"%(info['reason']))
return cookies
cookies = getCookies(WeiBoAccount)
|
{"/ohmydata_spider/spiders/TmallCommentspider.py": ["/ohmydata_spider/items.py", "/ohmydata_spider/util/select_result.py", "/ohmydata_spider/pipelines.py"], "/ohmydata_spider/contrib/downloadermiddleware/Cookie.py": ["/ohmydata_spider/util/sinaCookie.py"]}
|
11,302
|
AaronChiu2017/ohmydata_spider
|
refs/heads/master
|
/ohmydata_spider/items.py
|
#!/usr/bin/python
# -*- coding:utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/topics/items.html
from scrapy.item import Item, Field
class TutorialItem(Item):
# define the fields for your item here like:
# name = Field()
proxy_url = Field()
proxy_type = Field()
proxy_locate = Field()
class DataTreasureItem(Item):
book_name = Field()
book_description = Field()
class JdBookItem(Item):
number = Field()
bookName = Field()
author = Field()
press = Field()
bookId = Field()
price = Field()
preferentialPrice = Field()
class TmallCommentItem(Item):
ItemName = Field()
ItemType = Field()
ItemSales = Field()
ItemPrice = Field()
itemId = Field()
userNick = Field()
rateDate = Field()
rateContent = Field()
class WeiboInfoItem(Item):
"""
weibo Account info
"""
id = Field()
NickName = Field()
Gender = Field()
Province = Field()
City = Field()
Signature = Field()
Birthday = Field()
Num_Tweets = Field()
Num_Follows = Field()
Num_Fans = Field()
Sex_Orientation = Field()
Marriage = Field()
URL = Field()
class WeiboContentItem(Item):
"""
weibo content info
"""
id = Field()
ID = Field()
Content = Field()
PubTime = Field()
Co_oridinates = Field() # location
Tools = Field() # publish tools eg.computer phone
Like = Field() # count of the like
Comment = Field() # count of the comment
Transfer = Field()
|
{"/ohmydata_spider/spiders/TmallCommentspider.py": ["/ohmydata_spider/items.py", "/ohmydata_spider/util/select_result.py", "/ohmydata_spider/pipelines.py"], "/ohmydata_spider/contrib/downloadermiddleware/Cookie.py": ["/ohmydata_spider/util/sinaCookie.py"]}
|
11,303
|
Stefany-Olivera/Python_Project
|
refs/heads/master
|
/apps/friend2_app/views.py
|
from __future__ import unicode_literals
from django.contrib import messages
from django.shortcuts import render, redirect, HttpResponse
from .models import User, Friend
def index(request):
# print(User.objects.all())
return render(request, 'friend2_app/index.html')
def register(request):
# print request.POST
response = User.objects.register(
name = request.POST["name"],
alias = request.POST["alias"],
email = request.POST["email"],
password = request.POST["password"],
confirm_password = request.POST["confirm_password"],
bday = request.POST["bday"],
)
if response['valid']:
messages.add_message(request, messages.SUCCESS, 'Welcome to the site!')
request.session["user_id"]=response["user"].id
request.session["name"]=response["user"].name
return redirect("/friends")
else:
for error_message in response["errors"]:
messages.add_message(request, messages.ERROR, error_message)
return redirect("/")
def login(request):
response = User.objects.login(
email = request.POST["email"],
password = request.POST["password"]
)
if response['valid']:
messages.add_message(request, messages.SUCCESS, 'See you soon!')
request.session["user_id"]=response["user"].id
request.session["name"]=response["user"].name
return redirect("/friends")
else:
for error_message in response["errors"]:
messages.add_message(request, messages.ERROR, error_message)
return redirect("/")
def dashboard(request):
person = User.objects.get(id=request.session['user_id'])
users = User.objects.all()
others = []
for otheruser in users:
if (otheruser.id != request.session['user_id']):
others.append(otheruser)
friends = Friend.objects.filter(friend1=person)
friendship = []
for friend in friends:
friendship.append(friend.friend2)
others2 = []
for otheruser in others:
if (otheruser not in friendship):
others2.append(otheruser)
context = {
'person' : person,
'users' : others2,
'friends' : friendship
}
return render(request, 'friend2_app/dashboard.html', context)
def profile(request, id):
show = User.objects.get(id=id)
context = {
'user' : show
}
return render(request, 'friend2_app/profile.html', context)
def addfriend(request, id):
User.themanager.add(request.session['user_id'], id)
return redirect('/friends')
def removefriend(request, id):
User.themanager.remove(request.session['user_id'], id)
return redirect('/friends')
def logout(request):
request.session.clear()
return redirect('/')
|
{"/apps/friend2_app/views.py": ["/apps/friend2_app/models.py"]}
|
11,304
|
Stefany-Olivera/Python_Project
|
refs/heads/master
|
/apps/friend2_app/models.py
|
from __future__ import unicode_literals
from django.db import models
import re
import bcrypt
from datetime import datetime
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
class UserManager(models.Manager):
def login(self, email, password):
response={
"errors":[],
"user":None,
"valid":True
}
if len(email) < 1:
response["valid"] = False
response["errors"].append("Email is required")
elif not EMAIL_REGEX.match(email):
response["valid"] = False
response["errors"].append("Invalid Email")
else:
list_of_emails = User.objects.filter(email=email.lower())
if len(list_of_emails) == 0:
response["valid"] = False
response["errors"].append("Email does not exist")
if len(password) < 8:
response["valid"] = False
response["errors"].append("Password must be 8 characters or more")
if response["valid"]:
if bcrypt.checkpw(password.encode(), list_of_emails[0].password.encode()):
response["user"] = list_of_emails[0]
else:
response["valid"] = False
response["errors"].append("Incorrect Password")
return response
def register(self, name, alias, email, password, confirm_password, bday):
now=datetime.now()
response={
"errors":[],
"user": None,
"valid": True
}
if len(name) < 1:
response["valid"] = False
response["errors"].append("Name is required")
if len(alias) <1:
response["valid"] = False
response["errors"].append("Alias is required")
if len(email) < 1:
response["valid"] = False
response["errors"].append("Email is required")
elif not EMAIL_REGEX.match(email):
response["valid"] = False
response["errors"].append("Invalid Email")
else:
list_of_emails=User.objects.filter(email=email)
if len(list_of_emails) > 0:
response["valid"] = False
response["errors"].append("Email already exists")
if len(password) < 8:
response["valid"] = False
response["errors"].append("Password must be 8 characters or more")
if confirm_password != password:
response["valid"] = False
response["errors"].append("Password must match Confirm Password")
if len(bday) < 1:
response["valid"]=False
response["errors"].append('Bday is required!')
elif now < datetime.strptime(bday,'%Y-%m-%d'):
response["valid"]=False
response["errors"].append('Bday cant be in the future!')
if response["valid"]:
response["user"] = User.objects.create(
name=name,
alias=alias,
email=email.lower(),
password=bcrypt.hashpw(password.encode(), bcrypt.gensalt()),
bday=bday,
)
return response
def add(self, user_id, friend_id):
user = self.get(id=user_id)
friend = self.get(id=friend_id)
Friend.objects.create(friend1=user, friend2=friend)
Friend.objects.create(friend1=friend, friend2=user)
def remove(self, user_id, friend_id):
user = self.get(id=user_id)
friend = self.get(id=friend_id)
relationship1 = Friend.objects.get(friend1=user, friend2=friend)
relationship2 = Friend.objects.get(friend1=friend, friend2=user)
relationship1.delete()
relationship2.delete()
class User(models.Model):
name = models.CharField(max_length=255)
alias = models.CharField(max_length=255)
email = models.CharField(max_length=255)
password = models.CharField(max_length=255)
bday = models.DateField()
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now_add = True)
themanager = UserManager()
objects = UserManager()
class Friend(models.Model):
friend1 = models.ForeignKey(User, related_name='asks')
friend2 = models.ForeignKey(User, related_name='accepts')
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = models.Manager()
|
{"/apps/friend2_app/views.py": ["/apps/friend2_app/models.py"]}
|
11,305
|
Stefany-Olivera/Python_Project
|
refs/heads/master
|
/apps/friend2_app/apps.py
|
from __future__ import unicode_literals
from django.apps import AppConfig
class Friend2AppConfig(AppConfig):
name = 'friend2_app'
|
{"/apps/friend2_app/views.py": ["/apps/friend2_app/models.py"]}
|
11,306
|
Stefany-Olivera/Python_Project
|
refs/heads/master
|
/apps/friend2_app/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-02-04 17:57
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django.db.models.manager
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Friend',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('alias', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('password', models.CharField(max_length=255)),
('bday', models.DateField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now_add=True)),
],
managers=[
('themanager', django.db.models.manager.Manager()),
],
),
migrations.AddField(
model_name='friend',
name='friend1',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='asks', to='friend2_app.User'),
),
migrations.AddField(
model_name='friend',
name='friend2',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='accepts', to='friend2_app.User'),
),
]
|
{"/apps/friend2_app/views.py": ["/apps/friend2_app/models.py"]}
|
11,344
|
facosta8/MNO-Black-Scholes
|
refs/heads/master
|
/app.py
|
# -*- coding: utf-8 -*-
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_daq as daq
from dash.dependencies import Input, Output, State
from plotly import tools
import plotly.graph_objs as go
import pandas as pd
import dask as dask
from dask.distributed import Client, progress
import dask.dataframe as dd
# client = Client()
# client.cluster
# Importo funciones de regresion.py
from regresion import download_info, analisis_p
app = dash.Dash(__name__)
# lista que especifica lo que se descarga
lista_desc = ["LBMA/GOLD", "CHRIS/CME_O1", "LBMA/SILVER", "CHRIS/CME_DA1",
"CHRIS/CME_LN1", "CHRIS/CME_C1", "CHRIS/CME_RR1",
"CHRIS/CME_LB1", "CHRIS/CME_RB1", "CHRIS/CME_NG1",
"CHRIS/CME_PL1", "CHRIS/CME_S1"]
download_info(lista_desc)
nombres_comunes = dict({'LBMA/GOLD': 'Gold',
'LBMA/SILVER': 'Silver',
'CHRIS/CME_PL1': 'Platinum',
'CHRIS/CME_O1': 'Oats',
'CHRIS/CME_DA1': 'Dairy',
'CHRIS/CME_C1': 'Corn',
'CHRIS/CME_RR1': 'Rice',
'CHRIS/CME_LB1': 'Lumber',
'CHRIS/CME_RB1': 'Gasoline',
'CHRIS/CME_NG1': 'Natural gas',
'CHRIS/CME_S1': 'Soybean'
})
colors = {
'background': '#3d3d3d',
'text': '#7FDBFF'
}
app.layout = html.Div(style={'backgroundColor': colors['background']},
children=[
html.Br(),
html.Br(),
html.H1("Commodities Finance Forecast"),
html.Br(),
html.Div(style={'backgroundColor': colors['background']},
children=[
daq.NumericInput(id='LBMA/GOLD',
className='numerico',
value=0,
size=100,
min=0,
max=10000,
label='Gold',
style={'color': '#EFEFEF'}),
html.Br(),
daq.NumericInput(id='LBMA/SILVER',
className='numerico',
value=0,
size=100,
min=0,
max=10000,
label='Silver'),
html.Br(),
daq.NumericInput(id='CHRIS/CME_O1',
className='numerico',
value=0,
size=100,
min=0,
max=10000,
label='Oats'),
html.Br(),
daq.NumericInput(id='CHRIS/CME_DA1',
className='numerico',
value=0,
size=100,
min=0,
max=10000,
label='Dairy'),
html.Br(),
daq.NumericInput(id='CHRIS/CME_C1',
className='numerico',
value=0,
size=100,
min=0,
max=10000,
label='Corn'),
html.Br(),
daq.NumericInput(id='CHRIS/CME_RR1',
className='numerico',
value=0,
size=100,
min=0,
max=10000,
label='Rice'),
html.Br(),
daq.NumericInput(id='CHRIS/CME_RB1',
className='numerico',
value=0,
size=100,
min=0,
max=10000,
label='Gasoline'),
html.Br(),
daq.NumericInput(id='CHRIS/CME_NG1',
className='numerico',
value=0,
size=100,
min=0,
max=10000,
label='Natural Gas'),
html.Br(),
daq.NumericInput(id='CHRIS/CME_S1',
className='numerico',
value=0,
size=100,
min=0,
max=10000,
label='Soybean'),
html.Br(),
], className='listafuturos'),
html.Div(style={'backgroundColor': colors['background']},
children=[
html.Label('Investment period',
className='commodity'),
daq.Slider(id='meses',
min=2,
max=24,
marks={'2': '2', '6': '6', '12': '12',
'18': '18', '24': '24'},
value=18,
size=300,
handleLabel='Months'
),
html.Br(),
html.Button(html.Span('Estimate returns'),
id='botonCalculo',
className='boton2'),
html.Br(),
html.Br(),
], className='areacalculo'),
html.Div(style={'backgroundColor': colors['background']},
children=[
html.P('''Choose the commodities you are interested in,
select the investment period and click on the button''',
id='textoresultado',
className='resultado')
], className='areacalculo'),
dcc.Graph(
id='grafica_valores',
figure={
'data': [
],
'layout': {
'plot_bgcolor': colors['background'],
'paper_bgcolor': colors['background'],
'font': {
'color': colors['text']
}
}
}
)
])
@app.callback([Output('grafica_valores', 'figure'),
Output('textoresultado', 'children')
],
[Input('botonCalculo', 'n_clicks')],
state=[State('LBMA/GOLD', 'value'),
State('LBMA/SILVER', 'value'),
State('CHRIS/CME_O1', 'value'),
State('CHRIS/CME_DA1', 'value'),
State('CHRIS/CME_C1', 'value'),
State('CHRIS/CME_RR1', 'value'),
State('CHRIS/CME_RB1', 'value'),
State('CHRIS/CME_NG1', 'value'),
State('CHRIS/CME_S1', 'value'),
State('meses', 'value')
]
)
def update_graph(n_clicks, in1, in2, in3, in4, in5, in6,
in7, in8, in9, meses):
df = pd.read_csv('datos.csv') # ya no va a la carpeta de modelo_simple/
df.Date = pd.to_datetime(df.Date)
todos = dict({'LBMA/GOLD': in1,
'LBMA/SILVER': in2,
'CHRIS/CME_O1': in3,
'CHRIS/CME_DA1': in4,
'CHRIS/CME_C1': in5,
'CHRIS/CME_RR1': in6,
'CHRIS/CME_RB1': in7,
'CHRIS/CME_NG1': in8,
'CHRIS/CME_S1': in9
})
validos = dict((k, v) for k, v in todos.items() if v > 0)
lista_validos = list(validos.keys())
lista_run = [meses, '0.0001']
# el valor de 0.0001 es la r2 mínima que decidimos
# esta baja para reducir el no de betas, la mantengo por si
# posteriormente se agrega un scroller con el que se pueda modificar
lista_run = lista_run+lista_validos
# Corro mi código para calcular rendimientos
rendim = analisis_p(lista_run)
cant = list(validos.values())
rendim['cant'] = cant
te = sum(rendim['last_price']*rendim['cant'])
ga = sum(rendim['predicted_price']*rendim['cant'])
re = str(round(100 * (ga - te) / te, 2)) + '%'
# Esto no lo entiendo pero lo dejo
cols_seleccionar = lista_validos.copy()
cols_seleccionar.append('Date')
df = df.filter(items=cols_seleccionar)
df = df.dropna()
# texto es la variable que se muestra como output final
texto = 'The percentual value increase over {} months is {}'.format(meses,
re)
# estatus = app.get_asset_url('work.gif')
lineas = (len(lista_validos) + 1) // 2
linea = 1
columna = 1
fig = tools.make_subplots(rows=lineas, cols=2,
subplot_titles=[nombres_comunes[c]
for c in lista_validos])
for commodity in lista_validos:
fig.append_trace(go.Scatter(y=df[commodity],
x=df['Date'],
),
linea, columna
)
if columna == 1:
columna = 2
elif columna == 2:
columna = 1
linea += 1
fig['layout'].update(yaxis=dict(title='Opening value'),
plot_bgcolor=colors['background'],
paper_bgcolor=colors['background'],
font=dict(color=colors['text']),
showlegend=False)
return fig, texto
if __name__ == '__main__':
app.run_server()
|
{"/app.py": ["/regresion.py"]}
|
11,345
|
facosta8/MNO-Black-Scholes
|
refs/heads/master
|
/scripts/carga_datos.py
|
#%% carga
import numpy as np
import pandas as pd
from yahoofinancials import YahooFinancials as YF
from ctypes import cdll
from pathlib import Path
import os
def descargar_datos(stocks,
inicio = '2008-01-01',
fin = '2019-01-01',
intervalo = 'daily'):
# descarga datos de acciones específicas de Yahoo finances
# inicio y fin determinan el intervalo
# intervalo puede ser ‘daily’, ‘weekly’, or ‘monthly’
intervalo = 'daily' #can be either .
acciones = YF(stocks)
hr = (acciones.get_historical_price_data(inicio, fin, intervalo))
h = pd.DataFrame(hr)
h = h.T
return h
def extraer_datos_cia(comp,pri):
# función interna para jalar los datos de acciones de cada compañía
# usada por la función extraer_datos
e = pd.DataFrame(pri)
e = e.loc[:,('formatted_date','open', 'close', 'high', 'low')]
e['cia'] = comp
return e
def extraer_datos(df):
# toma los datos de Yahoo finances y genera un dataframe adecuado
for i in range(df.shape[0]):
e = extraer_datos_cia(df.index[i],df.prices[i])
datos = pd.concat([datos, e]) if i != 0 else e
return datos
#%% carga de datos
tech_stocks = ['AAPL', 'MSFT', 'INTC', 'GOOG', 'GOOGL', 'FB', 'INTC']
bank_stocks = ['WFC', 'BAC', 'C']
# yahoo_financials_tech = YF(tech_stocks)
historia = descargar_datos(tech_stocks)
datos = extraer_datos(historia)
#%% metodos BS
matriz = np.array(datos.head().values[:,1:5])
#%% prueba con c++
# clases para usar la función de C++
# los comandos para compilar (en caso de cambios) son
# g++ -c -fPIC blackscholes.cpp -o bs.o
# g++ -shared -Wl,-soname,bs.so -o bs.so bs.o
path = Path(__file__).parent.absolute()
os.chdir(path)
lib = cdll.LoadLibrary('./bs.so')
class BlackScholes(object):
def __init__(self):
self.obj = lib.BS_new()
def bar(self):
lib.BS_bar(self.obj)
bs = BlackScholes()
bs.bar() #and you will see "Hello" on the screen
|
{"/app.py": ["/regresion.py"]}
|
11,346
|
facosta8/MNO-Black-Scholes
|
refs/heads/master
|
/regresion.py
|
import pandas as pd
import datetime
import numpy as np
from sklearn.metrics import r2_score
import sys
import os
# Paralelización
# DASK
import dask as dask
#from dask.distributed import Client, progress
import dask.dataframe as dd
#client = Client()
#client.cluster
from dask import delayed
#os.chdir('git/MNO-Black-Scholes/modelo_simple')
"""
----------------------- ENFOQUE QUANDL -----------------------
"""
import quandl
# Abro llave de quandl
quank = open('quandl.txt').readline().split(':')[1].strip()
quandl.ApiConfig.api_key = quank
# Lista de características del portafolio: [meses a predecir, r2_minima, commodities...]
lista_run = ['1','0.0001',"LBMA/GOLD","CHRIS/CME_O1", "CHRIS/CME_LB1"]
# Lista del total de commodities con que se trabaja
lista_desc = ["LBMA/GOLD","CHRIS/CME_O1","LBMA/SILVER","CHRIS/CME_DA1","CHRIS/CME_LN1",
"CHRIS/CME_C1", "CHRIS/CME_RR1", "CHRIS/CME_LB1","CHRIS/CME_RB1", "CHRIS/CME_NG1",
"CHRIS/CME_PL1","CHRIS/CME_S1"]
# Función de descarga de datos en paralelo
def download_info(lista_desc):
@delayed
def desc_datos(years_back, future_code):
now = datetime.datetime.now()
a_t = str(now)[0:10]
b_t = str(int(str(now)[0:4])-years_back)+str(now)[4:10]
sys.stdout = open(os.devnull, "w")
#yahoo = yf.download(future_code,b_t,a_t)
yahoo = quandl.get(future_code, collapse="daily",start_date=b_t, end_date=a_t)
sys.stdout = sys.__stdout__
return yahoo.iloc[:,0]
to_merge=[]
for i in range(len(lista_desc)):
globals()['data%s'%i] = desc_datos(years_back=3, future_code=lista_desc[i]) #uso 3 años de historia
to_merge.append(globals()['data%s'%i])
@delayed
def create_variables(to_merge):
variables = pd.concat(to_merge, axis=1)
return variables
intento = create_variables(to_merge)
datos = intento.compute()
datos.columns = lista_desc
datos.to_csv("datos.csv")
return None
def analisis_p(lista_run):
# Saco la info de datos.csv de acuerdo al código
info = pd.read_csv("datos.csv")
# Análisis de portafolio
def fin_poly_reg(lista_run,info,investment_length, future_code, r2_min):
# Tiempo que dura la inversión
il = investment_length
X = info.loc[:,future_code]
X = X.dropna()
# Desviación estándar
sd =np.std(X)
# Valor al ultimo día del stock
vu = X.tolist()[-1]
# Regresión polinomial
deg = [1,2,3,4,5,6,7,8,9] # número de bethas posibles
for j in range(len(deg)):
z = np.polyfit(np.arange(len(X)),X.values,deg[j])
ypred = np.polyval(z,np.arange(len(X)))
r2 = r2_score(X.values, ypred)
if r2 >= r2_min:
break
"""
GUARDO RESULTADOS DE REGRESIÓN EN TXT
El formato dentro del txt es:
sd
valor al último día del stock
tiempo que dura la inversión
betas
"""
nl='\n'
#Betas
vec=''
for i in range(len(z)):
vec = vec+str(z[i])+nl
#
with open('data.txt', 'w') as the_file:
the_file.write(str(str(sd)+nl+str(vu)+nl+str(il)+nl+vec))
the_file.close()
"""
CALCULO RENDIMIENTO CON BLACK-SCHOLES Y LEO RESULTADOS
"""
os.system('./programa.o > out.txt')
f = open('out.txt')
tocayo = f.readlines()
f.close()
"""
Calculo varianza para ver si las predicciones del tocayo están muy lejos
"""
var = np.var(X)
sal = [future_code,vu,float(tocayo[0])]
rend = 100*(float(sal[2])-float(sal[1]))/float(sal[1])
return pd.DataFrame({'commodity':[sal[0]],'last_price':[sal[1]],
'predicted_price':[sal[2]],'difference(%)':rend,'ts_variance':var,
'bethas':[z], 'rango':str(X.min())+'-'+str(X.max())})
#uno = fin_poly_reg(info, investment_length=10, future_code='LBMA/GOLD', r2_min=0.5)
# JUNTO TODO
comodities=[]
for i in range(len(lista_run)-2):
i=i+2
globals()['com%s'%i] = fin_poly_reg(lista_run,info,investment_length = float(lista_run[0]),
future_code=lista_run[i], r2_min=float(lista_run[1]))
comodities.append(globals()['com%s'%i])
variables = pd.concat(comodities, axis=0)
return variables
# Verifica que corra aquí
prueba = analisis_p(lista_run)
|
{"/app.py": ["/regresion.py"]}
|
11,370
|
apatyk/Daily-Pattern-Classifier
|
refs/heads/main
|
/common/testing.py
|
import numpy as np
import pandas as pd
import tensorflow as tf
def get_raw_data(samples, labels, data):
axdata = []
aydata = []
for i in range(len(labels)):
f = samples[i][0]
t1 = samples[i][1]
t2 = samples[i][2]
sample = data[f][t1:t2]
label = labels[i]
axdata.append(sample)
aydata.append(label)
rawsamples = np.array(axdata, copy=True)
rawlabels = np.array(aydata, copy=True)
del axdata
del aydata
return rawsamples, rawlabels
def chunks(lst, n):
for i in range(0, len(lst), n):
yield lst[i:i + n]
def get_raw_samples(samples, data):
axdata = []
for i in range(len(samples)):
f = samples[i][0]
t1 = samples[i][1]
t2 = samples[i][2]
sample = data[f][t1:t2]
axdata.append(sample)
rawsamples = np.array(axdata, copy=True)
del axdata
return rawsamples
def weighted_accuracy(TP, FP, TN, FN):
P = TP + FN
N = TN + FP
if P > 0:
W = N/P
else:
W = 1
WAcc = (W * TP + TN) / (W * P + N)
return WAcc
def true_positive_rate(TP, FN):
if TP + FN == 0:
tpr = 0
else:
tpr = TP / (TP + FN)
return tpr
def true_negative_rate(TN, FP):
if TN + FP == 0:
tnr = 0
else:
tnr = TN / (TN + FP)
return tnr
def f1_score(TP, FP, FN):
if TP == 0 and FP == 0 and FN == 0:
f1 = 0
else:
f1 = TP / (TP + 0.5 * (FP + FN))
return f1
def precision(TP, FP):
if TP + FP == 0:
p = 0
else:
p = TP / (TP + FP)
return p
def consecutive_groups(data, stepsize=1):
segments = np.split(data, np.where(np.diff(data) != stepsize)[0]+1)
bookends = []
for segment in segments:
bookends.append((segment[0], segment[-1]))
return bookends
def hysteresis_threshold(model, samples, labels, start_threshold, end_threshold, winmin, stepsec, episode_min=1.):
"""
model: tensorflow model
samples: samples of raw data
labels: labels for raw data
start_threshold: high threshold of the beginning of segmentation
end_threshold: low threshold of the end of segmentation
winmin: size of a window sample in unit of minute
stepsec: stride to move the window in unit of second / the number of second between two adjacent window samples
episode_min: the minimum length of eating episode in unit of minute. If end of segmentation -start of segmentation < episode_min,
then the episode will not be counted
"""
import pandas as pd
result_ls = []
t_pause = winmin / 2 * 60
result = {'segment_start':[], 'segment_end':[], 'prob':[], 'predictions':np.zeros([len(labels)], dtype=int), 'segment_count':0}
probs = tf.squeeze(model.predict(samples, batch_size=4096))
samples = tf.squeeze(samples)
state, start, end = 0, 0, 0
pause_counter = 0
for i in range(len(labels)):
prob = probs[i].numpy()
result['prob'].append(prob)
if state == 0 and prob > start_threshold:
state = 1
start = i
elif state == 1 and prob < end_threshold:
state = 2
end = i+1 # for Python list slicing
pause_counter = 0
elif state == 2:
if prob > start_threshold:
state = 1
else:
pause_counter += stepsec
if pause_counter >= t_pause:
# convert time to second and check threshold
if (end-start)*stepsec >= episode_min*60:
# save data
result['segment_start'].append(start)
result['segment_end'].append(end)
result['segment_count'] += 1
result['predictions'][start:end] = 1
pass
end = 0
state = 0
if state == 1: # catch meal if it ends at the end of probabilities
end = i
if pause_counter >= t_pause and (end-start)*stepsec >= episode_min*60:
# save data
result['segment_start'].append(start)
result['segment_end'].append(end)
result['segment_count'] += 1
result['predictions'][start:end] = 1
result_ls.append(result)
return pd.DataFrame(result_ls)
def hysteresis_threshold_probs(probs, labels, start_threshold, end_threshold, winmin, stepsec, episode_min=1.):
"""
probs: model output probabilities from samples
labels: labels for raw data
start_threshold: high threshold of the beginning of segmentation
end_threshold: low threshold of the end of segmentation
winmin: size of a window sample in unit of minute
stepsec: stride to move the window in unit of second / the number of second between two adjacent window samples
episode_min: the minimum length of eating episode in unit of minute. If end of segmentation -start of segmentation < episode_min,
then the episode will not be counted
"""
import pandas as pd
result_ls = []
t_pause = winmin / 2 * 60
result = {'segment_start':[], 'segment_end':[], 'prob':[], 'predictions':np.zeros([len(labels)], dtype=int), 'segment_count':0}
state, start, end = 0, 0, 0
pause_counter = 0
for i in range(len(labels)):
prob = probs[i]
result['prob'].append(prob)
if state == 0 and prob > start_threshold:
state = 1
start = i
elif state == 1 and prob < end_threshold:
state = 2
end = i+1 # for Python list slicing
pause_counter = 0
elif state == 2:
if prob > start_threshold:
state = 1
else:
pause_counter += stepsec
if pause_counter >= t_pause:
# convert time to second and check threshold
if (end-start)*stepsec >= episode_min*60:
# save data
result['segment_start'].append(start)
result['segment_end'].append(end)
result['segment_count'] += 1
result['predictions'][start:end] = 1
pass
end = 0
state = 0
if state == 1: # catch meal if it ends at the end of probabilities
end = i
if pause_counter >= t_pause and (end-start)*stepsec >= episode_min*60:
# save data
result['segment_start'].append(start)
result['segment_end'].append(end)
result['segment_count'] += 1
result['predictions'][start:end] = 1
result_ls.append(result)
return pd.DataFrame(result_ls)
def single_threshold(probs, labels, winmin, stepsec, threshold=0.5, episode_min=1.):
"""
probs: model output probabilities from samples
labels: labels for raw data
"""
import pandas as pd
result_ls = []
t_pause = winmin / 2 * 60
result = {'segment_start':[], 'segment_end':[], 'prob':[], 'predictions':np.zeros([len(labels)], dtype=int), 'segment_count':0}
state, start, end = 0, 0, 0
pause_counter = 0
for i in range(len(labels)):
prob = probs[i]
result['prob'].append(prob)
if state == 0 and prob > threshold:
state = 1
start = i
elif state == 1 and prob < threshold:
state = 2
end = i+1 # for Python list slicing
pause_counter = 0
elif state == 2:
if prob > threshold:
state = 1
else:
pause_counter += stepsec
if pause_counter >= t_pause:
# convert time to second and check threshold
if (end-start)*stepsec >= episode_min*60:
# save data
result['segment_start'].append(start)
result['segment_end'].append(end)
result['segment_count'] += 1
result['predictions'][start:end] = 1
pass
end = 0
state = 0
if state == 1: # catch meal if it ends at the end of probabilities
end = i
if pause_counter >= t_pause and (end-start)*stepsec >= episode_min*60:
# save data
result['segment_start'].append(start)
result['segment_end'].append(end)
result['segment_count'] += 1
result['predictions'][start:end] = 1
result_ls.append(result)
return pd.DataFrame(result_ls)
def calc_episode_metrics(results, labels):
"""
results: pandas dataframe output by hysteresis_threshold()
labels: GT labels for raw data
"""
TP, FP, FN = 0, 0, 0
gt_indices = np.where(labels == 1)
if np.size(gt_indices) != 0:
gt_segments = consecutive_groups(gt_indices[0])
else:
gt_segments = []
eating_segments = list(zip(results['segment_start'][0], [x-1 for x in results['segment_end'][0]])) # to account for Python list slicing
GTEval = [-1] * len(gt_segments)
MDEval = [-1] * len(eating_segments)
# TP - GT event, model event (any overlap) - 1
# FN - GT event, missed by model - 2
# FP - no event, model event - 3
# look for matches with GT events
for i, (gt_start, gt_end) in enumerate(gt_segments):
for e, (md_start, md_end) in enumerate(eating_segments):
# (1) MD within GT
# (2) MD starts before GT and ends in GT
# (3) MD starts in GT and ends after GT
# (4) MD contains GT
if (md_start >= gt_start and md_end <= gt_end) or \
(md_start <= gt_start and md_end > gt_start and md_end <= gt_end) or \
(md_start >= gt_start and md_start < gt_end and md_end >= gt_end) or \
(md_start <= gt_start and md_end >= gt_end):
GTEval[i] = e
MDEval[e] = i
# count up classifications
for i in range(len(gt_segments)):
if GTEval[i] == -1:
FN += 1
else:
TP += 1
for e in range(len(eating_segments)):
if MDEval[e] == -1:
FP += 1
return TP, FP, FN
def calc_time_metrics(MD, GT):
"""
MD: array of model detection 1s and 0s to signify eating and non-eating
GT: array of GT 1s and 0s to signify eating and non-eating
"""
TP, FP, TN, FN = 0, 0, 0, 0
# Count TP, FP, TN, FN
for i in range(len(GT)):
if MD[i] == 1:
if GT[i] == 1:
TP += 1
else:
FP += 1
else:
if GT[i] == 0:
TN += 1
else:
FN += 1
return TP, FP, TN, FN
|
{"/DailyPatternClassifier/TrainDailyPatternRNN.py": ["/common/testing.py"], "/DailyPatternClassifier/TestDailyPatternRNN.py": ["/common/testing.py"], "/GenerateSamples/GenerateSamples.py": ["/common/loadfile.py", "/common/training.py", "/common/testing.py"]}
|
11,371
|
apatyk/Daily-Pattern-Classifier
|
refs/heads/main
|
/GenerateSamples/SubmitGenSamplesJob.py
|
import os
#PBS -l select=1:ncpus=28:mem=120gb:ngpus=2:gpu_model=p100:interconnect=fdr,walltime=4:00:00
#PBS -l select=1:ncpus=24:mem=120gb:ngpus=2:gpu_model=k40:interconnect=fdr,walltime=4:00:00
#PBS -l select=1:ncpus=40:mem=370gb:ngpus=2:gpu_model=v100:interconnect=hdr,walltime=72:00:00
#PBS -l select=1:ncpus=56:mem=370gb:ngpus=2:gpu_model=v100s:interconnect=hdr,walltime=72:00:00
window_lengths = [2, 4, 6, 8, 10]
#window_lengths = [6]
for W in window_lengths:
pbsfile = open("job.pbs", "w")
pbsfile.write("""#PBS -u apatyk
#PBS -N GenSamples_{:d}Min
#PBS -l select=1:ncpus=40:mem=370gb:ngpus=2:gpu_model=v100:interconnect=hdr,walltime=72:00:00
#PBS -m ae
#PBS -o /home/apatyk/Research/GenerateSamples/results/gen-samples-{:d}min.txt
#PBS -j oe
""".format(W, W))
pbsfile.write("""ulimit -c 0
source /software/spackages/linux-centos8-x86_64/gcc-8.3.1/anaconda3-2019.10-v5cuhr6keyz5ryxcwvv2jkzfj2gwrj4a/etc/profile.d/conda.sh
module load cuda/10.2.89-gcc/8.3.1 cudnn/8.0.0.180-10.2-linux-x64-gcc/8.3.1 anaconda3/2019.10-gcc/8.3.1
conda activate tf_env
cd ~/Research/GenerateSamples/
python GenerateSamples.py {:d}
""".format(W))
pbsfile.write("""if [ $? -ne 0 ]; then
rm core.*
fi
exit""")
pbsfile.close()
os.system("qsub job.pbs")
|
{"/DailyPatternClassifier/TrainDailyPatternRNN.py": ["/common/testing.py"], "/DailyPatternClassifier/TestDailyPatternRNN.py": ["/common/testing.py"], "/GenerateSamples/GenerateSamples.py": ["/common/loadfile.py", "/common/training.py", "/common/testing.py"]}
|
11,372
|
apatyk/Daily-Pattern-Classifier
|
refs/heads/main
|
/common/loadfile.py
|
import numpy as np
import pandas as pd
import sys
from tqdm import tqdm
import math
ACC_THRESH = 0.008 # sum(acc) (G) max value for stddev rest
GYRO_THRESH = 0.04 * 180.0/ math.pi
def loadshmfile(File_Name):
RawData = np.fromfile(File_Name, dtype=np.dtype("6f4"))
# Swap gyroscope axis. Remember python always uses variables with reference.
# Swap Acceleromter
Temp = np.copy(RawData[:,5])
Temp2 = np.copy(RawData[:,3])
Temp3 = np.copy(RawData[:,4])
RawData[:,3] = Temp
RawData[:,4] = Temp2
RawData[:,5] = Temp3
return RawData
def smooth(RawData):
# Create kernel
SIG = 10.0
WINDOW_SIZE = 15 # size of window
r_array = np.linspace(14,0, 15)
Kernel = np.exp((0-np.square(r_array))/(2 * SIG * SIG))
deno = sum(Kernel)
Kernel = Kernel / deno
Smoothed = np.copy(RawData) # Clone (deep copy) the variable, instead of reference. We don't want to change RawData
r,c = RawData.shape
for x in range(c):
Smoothed[WINDOW_SIZE-1:,x] = np.convolve(RawData[:,x], Kernel, mode='valid') # Convolution followed by discarding of extra values after boundary
# Copy first 15 values from Rawdata to Smoothed. np.convolve doesn't do this.
Smoothed[:15, :] = RawData[:15,:]
return Smoothed
def loadEvents(filename):
"""
loads events data given the .shm filename
"""
# Load the meals file to get any triaged meals.
SkippedMeals = []
mealsfile = open("../common/meals-shimmer.txt", "r")
for line in mealsfile:
#print(line)
data = line.split()
#print(data[0], data[1], data[13])
if(int(data[13]) == 0):
Mdata = [data[0][-9:], data[1], int(data[13])]
SkippedMeals.append(Mdata)
EventsFileName = filename[:len(filename)-4]+"-events.txt"
# Load the meals
EventNames = []
EventStart = (np.zeros((100))).astype(int)
EventEnd = (np.zeros((100))).astype(int)
TotalEvents = 0
TimeOffset = 0
file = open(EventsFileName, "r")
#print(filename)
for lines in file:
#print(lines)
words = lines.split()
if(len(words) == 0): continue # Skip empty lines
# Convert Start time to offset
if(words[0] == "START"): # Get Start Time (TimeOffset) from file
#print(words)
hours = int(words[2].split(":")[0])
minutes = int(words[2].split(":")[1])
seconds = int(words[2].split(":")[2])
#print("{}h:{}m:{}s".format(hours, minutes,seconds))
TimeOffset = (hours * 60 * 60) + (minutes * 60) + seconds
continue
if(words[0] == "END"):
#print(words)
continue
for x in range(1,3): # Process Events Data
hours = int(words[x].split(":")[0])
minutes = int(words[x].split(":")[1])
seconds = int(words[x].split(":")[2])
EventTime = (hours * 60 * 60) + (minutes * 60) + seconds
EventTime = EventTime - TimeOffset
if(x == 1): EventStart[TotalEvents] = EventTime * 15
if(x == 2): EventEnd[TotalEvents] = EventTime * 15
if(TotalEvents>0):
if(EventStart[TotalEvents]<EventStart[TotalEvents-1]):
EventStart[TotalEvents] = EventStart[TotalEvents] + (24*60*60*15)
if(EventEnd[TotalEvents]<EventEnd[TotalEvents-1]):
EventEnd[TotalEvents] = EventEnd[TotalEvents] + (24*60*60*15)
#print(TotalEvents)
# Check if meal was triaged out for too much walking or rest
ename = words[0]
fname = filename[-9:]
skipmeal = 0
#print(fname, ename)
for skippedmeal in SkippedMeals:
Pname, EventName, Keep = skippedmeal
if(Pname == fname and ename == EventName):
#print(Pname, EventName, Keep, ename, fname, Pname == fname, ename == EventName)
skipmeal = 1
break
if(skipmeal == 1): continue
TotalEvents = TotalEvents + 1
EventNames.append(ename)
return TotalEvents, EventStart, EventEnd, EventNames
# Does not normalize. Normalization is done post-hoc.
def loadAllData3(winlength, step, removerest=1, removewalk=0, removebias=1, shx=1, gtperc = 0.5):
### Load data, make samples
samples = []
labels = []
AllSmoothed = []
AllIndices = []
totaleatingrest = 0
totaleatingwalk = 0
df = pd.read_csv('../common/batch-unix.txt', names=["Filenames"])
for x in tqdm(range(len(df["Filenames"]))):
fileeatingrest = 0
fileeatingwalk = 0
filesamples = []
filelabels = []
File_Name = "/home/apatyk/" + df["Filenames"][x]
RawData = loadshmfile(File_Name)
Smoothed = smooth(RawData)
Normalized = np.empty_like(Smoothed)
if(removebias):
# Remove acceleration bias
TREND_WINDOW = 150
mean = []
for j in range(3):
dat = pd.Series(Smoothed[:,j]).rolling(window=TREND_WINDOW).mean()
dat[:TREND_WINDOW-1] = 0
mean.append(dat)
mean2 = np.roll(np.asarray(mean).transpose(), -((TREND_WINDOW//2)-1)*3) # Shift to the left to center the values
# The last value in mean [-75] does not match that of phoneview, but an error in one datum is acceptable
# The phone view code calculates mean from -Window/2 to <Window/2 instead of including it.
Smoothed[:,0:3]-=mean2
del mean2, mean, dat
AllSmoothed.append(np.copy(Smoothed))
if(removerest != 0):
std = []
for j in range(6):
dat = pd.Series(Smoothed[:,j]).rolling(window=15).std(ddof=0)
dat[:14] = 0
std.append(dat)
# Above doesn't center window. Left Shift all values to the left by 7 datum (6 sensors)
std2 = np.roll(np.asarray(std).transpose(), -7*6)
accstd = np.sum(std2[:,:3], axis=1)
gyrostd = np.sum(std2[:,-3:], axis=1)
datrest = (accstd < ACC_THRESH) & (gyrostd < GYRO_THRESH)
mrest = datrest.copy()
for i in range(8,len(datrest)-7):
if(datrest[i]==True):
mrest[i-7:i+8] = True
del dat, datrest, gyrostd, accstd, std2, std
if(removewalk!=0):
minv = np.zeros((3,1))
maxv = np.zeros((3,1))
zerocross = np.zeros((len(Smoothed),1)).astype(int)
for j in range(3):
minv[j]=999.9
maxv[j]=-999.9
for t in range(len(Smoothed)-1):
for j in range(3):
if (Smoothed[t][j+3] < minv[j]):
minv[j]=Smoothed[t][j+3]
if (Smoothed[t][j+3] > maxv[j]):
maxv[j]=Smoothed[t][j+3]
if ((Smoothed[t][j+3] < 0.0) and (Smoothed[t+1][j+3] > 0.0) and (minv[j] < -5.0)):
zerocross[t]+=(1<<j)
minv[j]=999.9
maxv[j]=-999.9
if ((Smoothed[t][j+3] > 0.0) and (Smoothed[t+1][j+3] < 0.0) and (maxv[j] > 5.0)):
zerocross[t]+=(1<<(j+3))
minv[j]=999.9
maxv[j]=-999.9
zc = [0 if i==0 else 1 for i in zerocross]
del minv, maxv, zerocross
del RawData
# Identify things as GT
[TotalEvents, EventStart, EventEnd, EventNames] = loadEvents(File_Name) #loadfile.loadEvents(File_Name)
GT = np.zeros((len(Smoothed))).astype(int)
for i in range(TotalEvents):
#print(EventStart[i], EventStart[i], type(EventStart[i]))
GT[EventStart[i]: EventEnd[i]+1] = 1
# Generate labels
MaxData = len(Smoothed)
for t in range(0, MaxData, step):
sample = [x, t, t+winlength]
label = int((np.sum(GT[t:t+winlength])/winlength)>=gtperc)
#isrest = if(removerest) else 0
#iswalk = if(removewalk) else 0
#if(label and isrest):
if(label and removerest!=0): # Only ignore if in eating
isrest = int((np.sum(mrest[t:t+winlength])/winlength)>=0.65)
if(isrest and removerest==1): continue; # Do not consider this sample at all. Comment this if you want to move the sample to non-eating.
elif(isrest and removerest==2): label = 0;
else: label = 1
if(label and removewalk!=0): # Only ignore if in eating
iswalk = int((np.sum(zc[t:t+winlength])/winlength)>=0.15)
if(iswalk and removewalk==1): continue;
elif(iswalk and removewalk==2): label=0;
else: label = 1
# fileeatingwalk+=1
# continue # Do not append this sample to the dataset
if(t+winlength < MaxData): # Ignore last small window. Not ignoring results in a list rather than a numpy array.
filesamples.append(sample)
filelabels.append(label)
samples = samples + filesamples
labels = labels + filelabels
numsamples = (len(filesamples))
totaleatingwalk+=fileeatingwalk
#print("Loaded file {}, {} samples from {}".format(x, numsamples,File_Name), flush=True)
#print("Loaded file {}, {} samples from {}, contains {} rest in eating".format(x,numsamples,File_Name,fileeatingrest),
# flush=True)
samples_array = np.asarray(samples)
labels_array = np.asarray(labels)
#print("Total {:d} walking in eating\n".format(fileeatingwalk))
return len(df["Filenames"]), AllSmoothed, samples_array, labels_array
# Reads from designated file. Useful for handedness and grouping. Does not normalize. Normalization is done post-hoc.
def loadAllData4(filename, winlength, step, removerest=1, removewalk=0, removebias=1, shx=1, gtperc = 0.5):
### Load data, make samples
samples = []
labels = []
AllSmoothed = []
AllIndices = []
totaleatingrest = 0
totaleatingwalk = 0
df = pd.read_csv(filename, names=["Filenames"])
for x in tqdm(range(len(df["Filenames"]))):
#for x in tqdm(range(10)):
fileeatingrest = 0
fileeatingwalk = 0
filesamples = []
filelabels = []
File_Name = "/home/apatyk/" + df["Filenames"][x]
RawData = loadshmfile(File_Name) #loadfile.loadshmfile(File_Name)
Smoothed = smooth(RawData) #loadfile.smooth(RawData)
Normalized = np.empty_like(Smoothed)
if(removebias):
# Remove acceleration bias
TREND_WINDOW = 150
mean = []
for j in range(3):
dat = pd.Series(Smoothed[:,j]).rolling(window=TREND_WINDOW).mean()
dat[:TREND_WINDOW-1] = 0
mean.append(dat)
mean2 = np.roll(np.asarray(mean).transpose(), -((TREND_WINDOW//2)-1)*3) # Shift to the left to center the values
# The last value in mean [-75] does not match that of phoneview, but an error in one datum is acceptable
# The phone view code calculates mean from -Window/2 to <Window/2 instead of including it.
Smoothed[:,0:3]-=mean2
del mean2, mean, dat
AllSmoothed.append(np.copy(Smoothed))
if(removerest != 0):
std = []
for j in range(6):
dat = pd.Series(Smoothed[:,j]).rolling(window=15).std(ddof=0)
dat[:14] = 0
std.append(dat)
# Above doesn't center window. Left Shift all values to the left by 7 datum (6 sensors)
std2 = np.roll(np.asarray(std).transpose(), -7*6)
accstd = np.sum(std2[:,:3], axis=1)
gyrostd = np.sum(std2[:,-3:], axis=1)
datrest = (accstd < ACC_THRESH) & (gyrostd < GYRO_THRESH)
mrest = datrest.copy()
for i in range(8,len(datrest)-7):
if(datrest[i]==True):
mrest[i-7:i+8] = True
del dat, datrest, gyrostd, accstd, std2, std
if(removewalk!=0):
minv = np.zeros((3,1))
maxv = np.zeros((3,1))
zerocross = np.zeros((len(Smoothed),1)).astype(int)
for j in range(3):
minv[j]=999.9
maxv[j]=-999.9
for t in range(len(Smoothed)-1):
for j in range(3):
if (Smoothed[t][j+3] < minv[j]):
minv[j]=Smoothed[t][j+3]
if (Smoothed[t][j+3] > maxv[j]):
maxv[j]=Smoothed[t][j+3]
if ((Smoothed[t][j+3] < 0.0) and (Smoothed[t+1][j+3] > 0.0) and (minv[j] < -5.0)):
zerocross[t]+=(1<<j)
minv[j]=999.9
maxv[j]=-999.9
if ((Smoothed[t][j+3] > 0.0) and (Smoothed[t+1][j+3] < 0.0) and (maxv[j] > 5.0)):
zerocross[t]+=(1<<(j+3))
minv[j]=999.9
maxv[j]=-999.9
zc = [0 if i==0 else 1 for i in zerocross]
del minv, maxv, zerocross
del RawData
# Identify things as GT
[TotalEvents, EventStart, EventEnd, EventNames] = loadEvents(File_Name) #loadfile.loadEvents(File_Name)
GT = np.zeros((len(Smoothed))).astype(int)
for i in range(TotalEvents):
#print(EventStart[i], EventStart[i], type(EventStart[i]))
GT[EventStart[i]: EventEnd[i]+1] = 1
# Generate labels
MaxData = len(Smoothed)
for t in range(0, MaxData, step):
sample = [x, t, t+winlength]
label = int((np.sum(GT[t:t+winlength])/winlength)>=gtperc)
#isrest = if(removerest) else 0
#iswalk = if(removewalk) else 0
#if(label and isrest):
if(label and removerest!=0): # Only ignore if in eating
isrest = int((np.sum(mrest[t:t+winlength])/winlength)>=0.65)
if(isrest and removerest==1): continue; # Do not consider this sample at all. Comment this if you want to move the sample to non-eating.
elif(isrest and removerest==2): label = 0;
else: label = 1
if(label and removewalk!=0): # Only ignore if in eating
iswalk = int((np.sum(zc[t:t+winlength])/winlength)>=0.15)
if(iswalk and removewalk==1): continue;
elif(iswalk and removewalk==2): label=0;
else: label = 1
# fileeatingwalk+=1
# continue # Do not append this sample to the dataset
if(t+winlength < MaxData): # Ignore last small window. Not ignoring results in a list rather than a numpy array.
filesamples.append(sample)
filelabels.append(label)
samples = samples + filesamples
labels = labels + filelabels
numsamples = (len(filesamples))
totaleatingwalk+=fileeatingwalk
#print("Loaded file {}, {} samples from {}".format(x, numsamples,File_Name), flush=True)
#print("Loaded file {}, {} samples from {}, contains {} rest in eating".format(x,numsamples,File_Name,fileeatingrest),
# flush=True)
samples_array = np.asarray(samples)
labels_array = np.asarray(labels)
#print("Total {:d} walking in eating\n".format(fileeatingwalk))
return len(df["Filenames"]), AllSmoothed, samples_array, labels_array
def loadSingleData(filenum, winlength, step, removerest=1, removewalk=0, removebias=1, shx=1, gtperc = 0.5):
### Load data, make samples
samples = []
labels = []
AllSmoothed = []
AllIndices = []
totaleatingrest = 0
totaleatingwalk = 0
df = pd.read_csv('../common/batch-unix.txt', names=["Filenames"])
x = filenum
fileeatingrest = 0
fileeatingwalk = 0
filesamples = []
filelabels = []
File_Name = "/home/apatyk/" + df["Filenames"][x]
RawData = loadshmfile(File_Name)
Smoothed = smooth(RawData)
Normalized = np.empty_like(Smoothed)
if(removebias):
# Remove acceleration bias
TREND_WINDOW = 150
mean = []
for j in range(3):
dat = pd.Series(Smoothed[:,j]).rolling(window=TREND_WINDOW).mean()
dat[:TREND_WINDOW-1] = 0
mean.append(dat)
mean2 = np.roll(np.asarray(mean).transpose(), -((TREND_WINDOW//2)-1)*3) # Shift to the left to center the values
# The last value in mean [-75] does not match that of phoneview, but an error in one datum is acceptable
# The phone view code calculates mean from -Window/2 to <Window/2 instead of including it.
Smoothed[:,0:3]-=mean2
del mean2, mean, dat
AllSmoothed.append(np.copy(Smoothed))
if(removerest != 0):
std = []
for j in range(6):
dat = pd.Series(Smoothed[:,j]).rolling(window=15).std(ddof=0)
dat[:14] = 0
std.append(dat)
# Above doesn't center window. Left Shift all values to the left by 7 datum (6 sensors)
std2 = np.roll(np.asarray(std).transpose(), -7*6)
accstd = np.sum(std2[:,:3], axis=1)
gyrostd = np.sum(std2[:,-3:], axis=1)
datrest = (accstd < ACC_THRESH) & (gyrostd < GYRO_THRESH)
mrest = datrest.copy()
for i in range(8,len(datrest)-7):
if(datrest[i]==True):
mrest[i-7:i+8] = True
del dat, datrest, gyrostd, accstd, std2, std
if(removewalk!=0):
minv = np.zeros((3,1))
maxv = np.zeros((3,1))
zerocross = np.zeros((len(Smoothed),1)).astype(int)
for j in range(3):
minv[j]=999.9
maxv[j]=-999.9
for t in range(len(Smoothed)-1):
for j in range(3):
if (Smoothed[t][j+3] < minv[j]):
minv[j]=Smoothed[t][j+3]
if (Smoothed[t][j+3] > maxv[j]):
maxv[j]=Smoothed[t][j+3]
if ((Smoothed[t][j+3] < 0.0) and (Smoothed[t+1][j+3] > 0.0) and (minv[j] < -5.0)):
zerocross[t]+=(1<<j)
minv[j]=999.9
maxv[j]=-999.9
if ((Smoothed[t][j+3] > 0.0) and (Smoothed[t+1][j+3] < 0.0) and (maxv[j] > 5.0)):
zerocross[t]+=(1<<(j+3))
minv[j]=999.9
maxv[j]=-999.9
zc = [0 if i==0 else 1 for i in zerocross]
del minv, maxv, zerocross
del RawData
# Identify things as GT
[TotalEvents, EventStart, EventEnd, EventNames] = loadEvents(File_Name) #loadfile.loadEvents(File_Name)
GT = np.zeros((len(Smoothed))).astype(int)
for i in range(TotalEvents):
#print(EventStart[i], EventStart[i], type(EventStart[i]))
GT[EventStart[i]: EventEnd[i]+1] = 1
# Generate labels
MaxData = len(Smoothed)
for t in range(0, MaxData, step):
sample = [x, t, t+winlength]
label = int((np.sum(GT[t:t+winlength])/winlength)>=gtperc)
#isrest = if(removerest) else 0
#iswalk = if(removewalk) else 0
#if(label and isrest):
if(label and removerest!=0): # Only ignore if in eating
isrest = int((np.sum(mrest[t:t+winlength])/winlength)>=0.65)
if(isrest and removerest==1): continue; # Do not consider this sample at all. Comment this if you want to move the sample to non-eating.
elif(isrest and removerest==2): label = 0;
else: label = 1
if(label and removewalk!=0): # Only ignore if in eating
iswalk = int((np.sum(zc[t:t+winlength])/winlength)>=0.15)
if(iswalk and removewalk==1): continue;
elif(iswalk and removewalk==2): label=0;
else: label = 1
# fileeatingwalk+=1
# continue # Do not append this sample to the dataset
if(t+winlength < MaxData): # Ignore last small window. Not ignoring results in a list rather than a numpy array.
filesamples.append(sample)
filelabels.append(label)
samples = samples + filesamples
labels = labels + filelabels
numsamples = (len(filesamples))
totaleatingwalk+=fileeatingwalk
#print("Loaded file {}, {} samples from {}".format(x, numsamples,File_Name), flush=True)
#print("Loaded file {}, {} samples from {}, contains {} rest in eating".format(x,numsamples,File_Name,fileeatingrest),
# flush=True)
samples_array = np.asarray(samples)
labels_array = np.asarray(labels)
#print("Total {:d} walking in eating\n".format(fileeatingwalk))
return len(df["Filenames"]), AllSmoothed, samples_array, labels_array
# Global Dataset Normalization
def globalZscoreNormalize(AllSmoothed, meanvals, stdvals):
AllNormalized = []
for x in range(len(AllSmoothed)):
Smoothed = AllSmoothed[x]
Normalized = np.empty_like(Smoothed)
# Normalize
for i in range(6):
Normalized[:,i] = (Smoothed[:,i] - meanvals[i]) / stdvals[i]
# Stick this Normalized data to the Full Array
AllNormalized.append(np.copy(Normalized))
return AllNormalized
# load from a specific file
def LoadNormalizedDataOnly2(filename, removebias=1):
### Load data, make samples
meanvals = [-0.012359981 , -0.0051663737, 0.011612018, 0.05796114, 0.1477952, -0.034395125]
stdvals = [0.05756385, 0.040893298, 0.043825723, 17.199743, 15.311142, 21.229317]
AllNormalized = []
AllIndices = []
df = pd.read_csv(filename, names=["Filenames"])
for x in tqdm(range(len(df["Filenames"]))):
File_Name = "/home/apatyk/" + df["Filenames"][x]
RawData = loadshmfile(File_Name) #loadfile.loadshmfile(File_Name)
Smoothed = smooth(RawData) #loadfile.smooth(RawData)
Normalized = np.empty_like(Smoothed)
if(removebias):
# Remove acceleration bias
TREND_WINDOW = 150
mean = []
for j in range(3):
dat = pd.Series(Smoothed[:,j]).rolling(window=TREND_WINDOW).mean()
dat[:TREND_WINDOW-1] = 0
mean.append(dat)
mean2 = np.roll(np.asarray(mean).transpose(), -((TREND_WINDOW//2)-1)*3) # Shift to the left to center the values
# The last value in mean [-75] does not match that of phoneview, but an error in one datum is acceptable
# The phone view code calculates mean from -Window/2 to <Window/2 instead of including it.
Smoothed[:,0:3]-=mean2
del mean2, mean, dat
# Normalize now
for i in range(6):
Normalized[:,i] = (Smoothed[:,i] - meanvals[i]) / stdvals[i]
# Stick this Normalized data to the Full Array
AllNormalized.append(np.copy(Normalized))
return df["Filenames"], AllNormalized
# Reads from designated file. Does not normalize. Normalization is done post-hoc. GT labels generated from center of window
def loadAllDataTesting(filename, winlength, step, removebias=1, gtperc = 0.5):
### Load data, make samples
samples = []
labels = []
AllSmoothed = []
AllIndices = []
df = pd.read_csv(filename, names=["Filenames"])
for x in tqdm(range(len(df["Filenames"]))):
filesamples = []
filelabels = []
File_Name = "/home/apatyk/" + df["Filenames"][x]
RawData = loadshmfile(File_Name)
Smoothed = smooth(RawData)
Normalized = np.empty_like(Smoothed)
if(removebias):
# Remove acceleration bias
TREND_WINDOW = 150
mean = []
for j in range(3):
dat = pd.Series(Smoothed[:,j]).rolling(window=TREND_WINDOW).mean()
dat[:TREND_WINDOW-1] = 0
mean.append(dat)
mean2 = np.roll(np.asarray(mean).transpose(), -((TREND_WINDOW//2)-1)*3) # Shift to the left to center the values
# The last value in mean [-75] does not match that of phoneview, but an error in one datum is acceptable
# The phone view code calculates mean from -Window/2 to <Window/2 instead of including it.
Smoothed[:,0:3]-=mean2
del mean2, mean, dat
AllSmoothed.append(np.copy(Smoothed))
del RawData
# Identify things as GT
[TotalEvents, EventStart, EventEnd, EventNames] = loadEvents(File_Name)
GT = np.zeros((len(Smoothed))).astype(int)
for i in range(TotalEvents):
GT[EventStart[i]: EventEnd[i]+1] = 1
# Generate labels
MaxData = len(Smoothed)
for t in range(0, MaxData, step):
if(t+winlength < MaxData): # Ignore last small window.
sample = [x, t, t+winlength]
#label = int((np.sum(GT[t:t+winlength])/winlength)>=gtperc) # majority vote of window (>50%)
label = GT[t+(winlength // 2)] # middle point of window
filesamples.append(sample)
filelabels.append(label)
samples = samples + filesamples
labels = labels + filelabels
numsamples = (len(filesamples))
samples_array = np.asarray(samples)
labels_array = np.asarray(labels)
return AllSmoothed, samples_array, labels_array
|
{"/DailyPatternClassifier/TrainDailyPatternRNN.py": ["/common/testing.py"], "/DailyPatternClassifier/TestDailyPatternRNN.py": ["/common/testing.py"], "/GenerateSamples/GenerateSamples.py": ["/common/loadfile.py", "/common/training.py", "/common/testing.py"]}
|
11,373
|
apatyk/Daily-Pattern-Classifier
|
refs/heads/main
|
/DailyPatternClassifier/TrainDailyPatternRNN.py
|
# Adam Patyk
# Clemson University
# MS Thesis: Daily Pattern Classifier
# Summer 2021
# TrainDailyPatternRNN.py
# Purpose: Trains daily pattern classifiers for k-fold cross validation
# Used with TestDailyPatternRNN for evaluation
# Usage: python TrainDailyPatternRNN.py <batch_size> <num_recurrent_units> <num_training_epochs>
import sys
import os
import random
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import KFold
sys.path.append('../') # for .py files in ../common/
import common.testing as testing
if len(sys.argv) != 4:
sys.exit("Usage: python TrainDailyPatternRNN.py <batch_size> <num_recurrent_units> <num_training_epochs>")
# prepare for GPU workflow
gpus = tf.config.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.list_logical_devices('GPU')
# ignore extraneous warnings
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
seed = 42
random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
len_threshold = 850
k = 5
epochs = int(sys.argv[3]) #50
batch_size = int(sys.argv[1]) #64
num_units = int(sys.argv[2]) #16
num_subjects = 354
n_timesteps = len_threshold
# load numpy arrays from binary .npy files (created from .txt samples in LoadFiles script)
raw_samples = np.load('../GenerateSamples/compressed-samples/daily-samples.npy', allow_pickle=True)
raw_labels = np.load('../GenerateSamples/compressed-samples/daily-samples.npy', allow_pickle=True)
all_filenames = np.load('../GenerateSamples/compressed-samples/daily-filenames.npy').astype(int)
original_sample_lengths = np.array([len(sample) for sample in raw_samples])
# pad or truncate data sequences accordingly
all_samples = pad_sequences(raw_samples, len_threshold, dtype='float64', padding='post', truncating='post', value=-1)
all_labels = pad_sequences(raw_labels, len_threshold, dtype='int32', padding='post', truncating='post', value=-1)
print('Data ready.')
# prepare k-fold cross validation
kfold = KFold(k, shuffle=True, random_state=seed)
# randomly shuffle array of indices
x = range(num_subjects)
subjects = np.array(random.sample(x, num_subjects), copy=False)
total_TPR, total_TNR, total_F1, total_Prec, total_WAcc = [], [], [], [], []
total_ep_TPR, total_ep_F1, total_ep_FP_TP = [], [], []
print(f'Training with batch_size = {batch_size}, units = {num_units}')
for i, (training_subjects, testing_subjects) in enumerate(kfold.split(subjects)):
### TRAINING
print(f'FOLD {i+1}')
os.makedirs('models', exist_ok=True)
model_path = f'models/daily-pattern-b{batch_size}-u{num_units}-e{epochs}-fold{i+1}'
# retrieve only samples/labels corresponding to training fold
print('Training...')
training_bool = np.isin(all_filenames, training_subjects)
training_samples = tf.convert_to_tensor(all_samples[training_bool], np.float32)
training_labels = tf.convert_to_tensor(all_labels[training_bool], np.int8)
training_samples = tf.reshape(training_samples, (-1, n_timesteps, 1))
training_labels = tf.reshape(training_labels, (-1, n_timesteps, 1))
tf.keras.backend.clear_session()
mcp_save = tf.keras.callbacks.ModelCheckpoint(model_path, save_best_only=True, monitor='accuracy')
# define model
model = tf.keras.models.Sequential([
tf.keras.layers.Masking(mask_value=-1,
input_shape=(n_timesteps, 1)),
tf.keras.layers.Bidirectional(
tf.keras.layers.GRU(units=num_units,
return_sequences=True,
kernel_initializer='glorot_normal', # Xavier normal initialization
bias_initializer='zeros'),
merge_mode='sum'
),
tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(1, activation='sigmoid'))
])
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
history = model.fit(x=training_samples, y=training_labels,
epochs=epochs, batch_size=batch_size, verbose=2,
callbacks=[mcp_save])
### TESTING
print('Saving...')
# retrieve only samples/labels corresponding to testing fold
testing_bool = np.isin(all_filenames, testing_subjects)
testing_samples = tf.convert_to_tensor(all_samples[testing_bool], np.float32)
testing_labels = tf.convert_to_tensor(all_labels[testing_bool], np.int8)
testing_sample_lengths = original_sample_lengths[testing_bool]
testing_samples = tf.reshape(testing_samples, (-1, n_timesteps, 1))
testing_labels = tf.reshape(testing_labels, (-1, n_timesteps, 1))
# inference for all testing data using best model from training
model = tf.keras.models.load_model(model_path)
testing_probs = model.predict(testing_samples, batch_size=4096)
# save data for post-hoc evaluation
os.makedirs('testing', exist_ok=True)
np.save(f'testing/testing_lengths_{epochs}epochs_fold{i+1}.npy', testing_sample_lengths)
np.save(f'testing/testing_probs_{epochs}epochs_fold{i+1}.npy', testing_probs)
np.save(f'testing/testing_samples_{epochs}epochs_fold{i+1}.npy', tf.squeeze(testing_samples).numpy())
np.save(f'testing/testing_labels_{epochs}epochs_fold{i+1}.npy', tf.squeeze(testing_labels).numpy())
del model
print("*****************************************************************")
|
{"/DailyPatternClassifier/TrainDailyPatternRNN.py": ["/common/testing.py"], "/DailyPatternClassifier/TestDailyPatternRNN.py": ["/common/testing.py"], "/GenerateSamples/GenerateSamples.py": ["/common/loadfile.py", "/common/training.py", "/common/testing.py"]}
|
11,374
|
apatyk/Daily-Pattern-Classifier
|
refs/heads/main
|
/common/training.py
|
import random
import numpy as np
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Conv1D, GlobalAveragePooling1D
import tensorflow.keras.backend as kb
### undersamples and trains model from samples and labels provided
def trainModel(training_samples, training_labels, data, winlength, epochs, modelpath):
# undersample the training dataset
eating_indices = [i for i, e in enumerate(training_labels) if e >= 0.5]
noneating_indices = [i for i, e in enumerate(training_labels) if e < 0.5]
undersampled_noneating_indices = random.sample(noneating_indices, len(eating_indices))
undersampled_balanced_indices = eating_indices + undersampled_noneating_indices
shuffled_undersampled_balanced_indices = undersampled_balanced_indices.copy()
random.shuffle(shuffled_undersampled_balanced_indices)
axdata = []
aydata = []
for i in shuffled_undersampled_balanced_indices:
f = training_samples[i,0]
t1 = training_samples[i,1]
t2 = training_samples[i,2]
sample = data[f][t1:t2]
label = training_labels[i]
axdata.append(sample)
aydata.append(label)
balanced_data = np.array(axdata, copy=True)
balanced_labels = np.array(aydata, copy=True)
del axdata
del aydata
print("Training on {:d} samples of length {:d}".format(len(shuffled_undersampled_balanced_indices), len(balanced_data[0])))
tf.keras.backend.clear_session()
# use multiple GPUs
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
mcp_save = keras.callbacks.ModelCheckpoint(modelpath, save_best_only=True, monitor='accuracy')
model = Sequential()
model.add(Conv1D(10, 44, strides=2,activation='relu', input_shape=(winlength, 6), name='input_layer'))
model.add(Conv1D(10, 20, strides=2, activation='relu', kernel_regularizer=keras.regularizers.l1(0.01)))
model.add(Conv1D(10, 4, strides=2, activation='relu', kernel_regularizer=keras.regularizers.l1(0.01)))
model.add(GlobalAveragePooling1D())
model.add(Dense(200, activation='relu'))
model.add(Dense(1, activation='sigmoid', name='output_layer'))
model.compile(loss='binary_crossentropy',
optimizer='adam', metrics=['accuracy'])
H = model.fit(x=balanced_data, y=balanced_labels,
epochs=epochs, batch_size=256, verbose=0,
callbacks=[mcp_save])
del balanced_data
del balanced_labels
return H, model
|
{"/DailyPatternClassifier/TrainDailyPatternRNN.py": ["/common/testing.py"], "/DailyPatternClassifier/TestDailyPatternRNN.py": ["/common/testing.py"], "/GenerateSamples/GenerateSamples.py": ["/common/loadfile.py", "/common/training.py", "/common/testing.py"]}
|
11,375
|
apatyk/Daily-Pattern-Classifier
|
refs/heads/main
|
/DailyPatternClassifier/TestDailyPatternRNN.py
|
# Adam Patyk
# Clemson University
# MS Thesis: Daily Pattern Classifier
# Summer 2021
# TestDailyPatternRNN.py
# Purpose: Evaluate time and episode metrics of daily pattern classifier for k-fold cross validation
# Usage: python TestDailyPatternRNN.py <threshold_val_start> <threshold_val_end> <threshold_val_step> <num_epochs>
import sys
import os
import numpy as np
import pandas as pd
import sklearn.metrics
from tqdm import tqdm
from datetime import datetime
sys.path.append('../') # for .py files in ../common/
import common.testing as testing
if len(sys.argv) != 5:
sys.exit("Usage: python TestDailyPatternRNN.py <threshold_start> <threshold_end> <threshold_step> <num_epochs>")
thresholds = np.arange(float(sys.argv[1]), float(sys.argv[2]),float(sys.argv[3]))
k = 5
epochs = int(sys.argv[4])
results = []
start_time = datetime.now()
for T in thresholds:
print(f'T = {T}')
total_TPR, total_TNR, total_F1, total_Prec, total_WAcc = [], [], [], [], []
total_ep_TPR, total_ep_F1, total_ep_FP_TP = [], [], []
for f in range(k):
print(f'Fold {f+1}', flush=True)
# read saved data from DailyPatternRNN scripts
testing_sample_lengths = np.load(f'testing/testing_lengths_{epochs}epochs_fold{f+1}.npy')
testing_probs = np.load(f'testing/testing_probs_{epochs}epochs_fold{f+1}.npy')
testing_labels = np.load(f'testing/testing_labels_{epochs}epochs_fold{f+1}.npy')
total_TP, total_FP, total_TN, total_FN = 0, 0, 0, 0
total_ep_TP, total_ep_FP, total_ep_FN = 0, 0, 0
# get episode metrics on testing dataset
for i in tqdm(range(len(testing_labels))):
probs = testing_probs[i,:testing_sample_lengths[i]]
gt_labels = testing_labels[i,:testing_sample_lengths[i]]
# thresholding segmentation
h_results = testing.single_threshold(probs, gt_labels, winmin=6, stepsec=100, threshold=T)
# time-based metrics
TN, FP, FN, TP = sklearn.metrics.confusion_matrix(gt_labels, h_results['predictions'][0], labels=[0,1]).ravel()
total_TP += TP
total_FP += FP
total_TN += TN
total_FN += FN
# episode-based metrics
ep_TP, ep_FP, ep_FN = testing.calc_episode_metrics(h_results, gt_labels)
total_ep_TP += ep_TP
total_ep_FP += ep_FP
total_ep_FN += ep_FN
# calculate and report overall metrics
TPR = testing.true_positive_rate(total_TP, total_FN)
TNR = testing.true_negative_rate(total_TN, total_FP)
F1 = testing.f1_score(total_TP, total_FP, total_FN)
Prec = testing.precision(total_TP, total_FP)
WAcc = testing.weighted_accuracy(total_TP, total_FP, total_TN, total_FN)
ep_TPR = testing.true_positive_rate(total_ep_TP, total_ep_FN)
ep_F1 = testing.f1_score(total_ep_TP, total_ep_FP, total_ep_FN)
ep_FP_TP = -1 if total_ep_TP == 0 else total_ep_FP / total_ep_TP
total_TPR.append(TPR)
total_TNR.append(TNR)
total_F1.append(F1)
total_Prec.append(Prec)
total_WAcc.append(WAcc)
total_ep_TPR.append(ep_TPR)
total_ep_F1.append(ep_F1)
total_ep_FP_TP.append(ep_FP_TP)
T_results = {'WAcc': np.mean(total_WAcc), 'TPR': np.mean(total_TPR), 'TNR': np.mean(total_TNR), 'F1': np.mean(total_F1), 'Precision': np.mean(total_Prec),
'Episode TPR': np.mean(total_ep_TPR), 'Episode F1': np.mean(total_ep_F1), 'Episode FP/TP': np.mean(total_ep_FP_TP)}
results.append(T_results)
print('AVERAGE:')
print('--- Time Metrics ---')
print(f'WAcc: {np.mean(total_WAcc):.3f}\tTPR: {np.mean(total_TPR):.3f}\tTNR: {np.mean(total_TNR):.3f}\tF1: {np.mean(total_F1):.3f}\tPrecision: {np.mean(total_Prec):.3f}')
print('--- Episode Metrics ---')
print(f'TPR: {np.mean(total_ep_TPR):.3f}\tF1: {np.mean(total_ep_F1):.3f}\tFP/TP: {np.mean(total_ep_FP_TP):.3f}')
print("*****************************************************************", flush=True)
# prepare .csv file for export
os.makedirs('results', exist_ok=True)
results_df = pd.DataFrame(results)
results_df.insert(0, 'Threshold', thresholds)
results_df.to_csv(f'results/testing-results-{epochs}epochs.csv', index=False, header=True)
print('Results saved.')
end_time = datetime.now()
print(f'Duration: {end_time - start_time}')
|
{"/DailyPatternClassifier/TrainDailyPatternRNN.py": ["/common/testing.py"], "/DailyPatternClassifier/TestDailyPatternRNN.py": ["/common/testing.py"], "/GenerateSamples/GenerateSamples.py": ["/common/loadfile.py", "/common/training.py", "/common/testing.py"]}
|
11,376
|
apatyk/Daily-Pattern-Classifier
|
refs/heads/main
|
/GenerateSamples/GenerateSamples.py
|
# Adam Patyk
# Clemson University
# MS Thesis: Daily Pattern Classifier
# Summer 2021
# GenerateSamples.py
# Purpose: Generates daily samples for data augmentation
# Usage: python GenerateSamples.py <window_length_minutes>
import sys
import os
import tensorflow as tf # updated for TensorFlow 2.2.0
import numpy as np
import math
from datetime import datetime
from tqdm import tqdm
sys.path.append('../') # for .py files in ../common/
import common.loadfile as loadfile
import common.training as training
import common.testing as testing
if len(sys.argv) != 2:
sys.exit("Usage: python GenerateSamples.py <window_length_in_min>")
# prepare TensorFlow for GPU usage
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPU,", len(logical_gpus), "Logical GPUs")
epochs = 30
win_min = int(sys.argv[1]) #6
train_stride_sec = 15
test_stride_sec = 100
win_len = int(win_min * 60 * 15)
train_step = int(train_stride_sec * 15)
test_step = int(test_stride_sec * 15)
start_time = datetime.now()
save_dir = 'samples/'
os.makedirs(save_dir, exist_ok=True)
arr = ["echo -n 'PBS: node is '; cat $PBS_NODEFILE",\
"echo PBS: job identifier is $PBS_JOBID",\
"echo PBS: job name is $PBS_JOBNAME"]
[os.system(cmd) for cmd in arr]
print("*****************************************************************", flush=True)
print("Execution Started at " + start_time.strftime("%m/%d/%Y, %H:%M:%S"), flush=True)
print("Window Length: {:.2f} min ({:d} data)\tTraining Slide: {:d} sec ({:d} data)\tTesting Slide: {:d} sec ({:d} data)\tEpochs: {:d}".format(win_min, win_len, train_stride_sec, train_step, test_stride_sec, test_step, epochs), flush=True)
# load the dataset for training wiht majority vote GT labeling for windows
num_files, all_training_data, training_samples_array, training_labels_array = loadfile.loadAllData3(win_len,
train_step,
removerest=0,
removewalk=0,
removebias=1)
# load the dataset for testing with a different stride and GT labeling (center point)
all_testing_data, testing_samples_array, testing_labels_array = loadfile.loadAllDataTesting('../common/batch-unix.txt',
win_len,
test_step,
removebias=1)
print("Data loaded.", flush=True)
# normalize the datasets
shimmer_global_mean = [-0.012359981,-0.0051663737,0.011612018,
0.05796114,0.1477952,-0.034395125 ]
shimmer_global_stddev = [0.05756385,0.040893298,0.043825723,
17.199743,15.311142,21.229317 ]
shimmer_trended_mean = [-0.000002,-0.000002,-0.000000,
0.058144,0.147621,-0.033260 ]
shimmer_trended_stddev = [0.037592,0.034135,0.032263,
17.209038,15.321441,21.242532 ]
all_zero_means = [0,0,0,0,0,0]
mean_vals = all_zero_means
std_vals = shimmer_trended_stddev
all_training_normalized = loadfile.globalZscoreNormalize(all_training_data, mean_vals, std_vals)
all_testing_normalized = loadfile.globalZscoreNormalize(all_testing_data, mean_vals, std_vals)
print("Data normalized.")
# generate training samples from trained model
num_samples = 200000
subjects = [*range(num_files)]
num_subjects = len(subjects)
num_iterations = math.ceil(num_samples / num_subjects)
print(f'Generating training samples ({num_subjects} subjects)', flush=True)
for i in tqdm(range(num_iterations)):
start_time = datetime.now()
# train model on all training data
H, model = training.trainModel(training_samples_array, training_labels_array, all_training_normalized, win_len, epochs, save_dir + f'tmp_{win_min}min.h5')
# output P(E) and GT to text file for each recording using the trained model
for s in subjects:
subject_bool = np.isin(testing_samples_array[:,0], s)
s_samples = testing_samples_array[subject_bool]
s_labels = testing_labels_array[subject_bool]
raw_samples, gt_labels = testing.get_raw_data(s_samples, s_labels, all_testing_normalized)
if raw_samples.size != 0:
probs = model.predict(raw_samples, batch_size=1024)
result = np.hstack((np.reshape(gt_labels,(1,-1)).T, probs))
np.savetxt(save_dir + f'W{win_min}_P{s:03.0f}_I{i:03.0f}.txt', result)
tf.keras.backend.clear_session()
del model
end_time = datetime.now()
print(f'Iteration Duration: {end_time - start_time}', flush=True)
print(f'{num_iterations * num_subjects} testing samples saved.')
|
{"/DailyPatternClassifier/TrainDailyPatternRNN.py": ["/common/testing.py"], "/DailyPatternClassifier/TestDailyPatternRNN.py": ["/common/testing.py"], "/GenerateSamples/GenerateSamples.py": ["/common/loadfile.py", "/common/training.py", "/common/testing.py"]}
|
11,377
|
apatyk/Daily-Pattern-Classifier
|
refs/heads/main
|
/DailyPatternClassifier/SubmitTrainRNNJob.py
|
import os
import numpy as np
#PBS -l select=1:ncpus=28:mem=120gb:ngpus=2:gpu_model=p100:interconnect=fdr,walltime=4:00:00
#PBS -l select=1:ncpus=40:mem=370gb:ngpus=2:gpu_model=v100:interconnect=hdr,walltime=4:00:00
batch_size = 64
num_units = 16
epochs = 50
pbsfile = open("job.pbs", "w")
pbsfile.write("""#PBS -u apatyk
#PBS -N DP_E{:d}
#PBS -l select=1:ncpus=40:mem=370gb:ngpus=2:gpu_model=v100:interconnect=hdr,walltime=16:00:00
#PBS -m ae
#PBS -o /home/apatyk/Research/NewModels/results-gru-b{:d}-u{:d}-e{:d}.txt
#PBS -j oe
""".format(epochs,batch_size,num_units,epochs))
pbsfile.write("""ulimit -c 0
source /software/spackages/linux-centos8-x86_64/gcc-8.3.1/anaconda3-2019.10-v5cuhr6keyz5ryxcwvv2jkzfj2gwrj4a/etc/profile.d/conda.sh
module load cuda/10.2.89-gcc/8.3.1 cudnn/8.0.0.180-10.2-linux-x64-gcc/8.3.1 anaconda3/2019.10-gcc/8.3.1
conda activate tf_env
cd ~/Research/NewModels/
python TrainDailyPatternRNN.py {:d} {:d} {:d}
""".format(batch_size,num_units,epochs))
pbsfile.write("""if [ $? -ne 0 ]; then
rm core.*
fi
exit""")
pbsfile.close()
os.system("qsub job.pbs")
|
{"/DailyPatternClassifier/TrainDailyPatternRNN.py": ["/common/testing.py"], "/DailyPatternClassifier/TestDailyPatternRNN.py": ["/common/testing.py"], "/GenerateSamples/GenerateSamples.py": ["/common/loadfile.py", "/common/training.py", "/common/testing.py"]}
|
11,384
|
vitoo22/showmylikes
|
refs/heads/master
|
/app.py
|
from flask import Flask, render_template, request, redirect, url_for
from get_instagrams_token import *
import urllib.request
import json
app = Flask(__name__)
@app.route('/')
def hello_world():
return render_template('home.html')
@app.route('/authorization-completed/', defaults={'code': ''})
@app.route('/authorization-completed/<code>')
def authorization_completed(code):
code = request.args.get('code')
access_token = api.exchange_code_for_access_token(code)
url = 'https://api.instagram.com/v1/users/self/media/liked?access_token=' + access_token[0]
response = urllib.request.urlopen(url)
string = response.read().decode('utf-8')
json_data = json.loads(string)
return render_template('authorization-completed.html', code=access_token, json_data=json_data)
if __name__ == '__main__':
app.run(debug=True)
|
{"/app.py": ["/get_instagrams_token.py"]}
|
11,385
|
vitoo22/showmylikes
|
refs/heads/master
|
/get_instagrams_token.py
|
from instagram.client import InstagramAPI
client_id = 'acb84376812747f0b6086b14e905332b'
client_secret = 'b638f8a09d28480d8e20541a2c51bafc'
redirect_uri = 'http://127.0.0.1:5000/authorization-completed/'
scope = 'public_content'
api = InstagramAPI(client_id=client_id, client_secret=client_secret, redirect_uri=redirect_uri)
|
{"/app.py": ["/get_instagrams_token.py"]}
|
11,386
|
springle/elevator
|
refs/heads/master
|
/elevator_control_system_tests.py
|
import unittest
from elevator_control_system import ElevatorControlSystem
class TestElevatorControlSystem(unittest.TestCase):
def test_init(self):
"""
Make sure ElevatorControlSystem correctly initializes with 16 elevators and 16 floors.
"""
ecs = ElevatorControlSystem(16, 16)
self.assertEqual(ecs.status(), [(0, 0, [], [], 0), \
(1, 0, [], [], 0), \
(2, 0, [], [], 0), \
(3, 0, [], [], 0), \
(4, 0, [], [], 0), \
(5, 0, [], [], 0), \
(6, 0, [], [], 0), \
(7, 0, [], [], 0), \
(8, 0, [], [], 0), \
(9, 0, [], [], 0), \
(10, 0, [], [], 0), \
(11, 0, [], [], 0), \
(12, 0, [], [], 0), \
(13, 0, [], [], 0), \
(14, 0, [], [], 0), \
(15, 0, [], [], 0)])
def test_update_order(self):
"""
Make sure several requests are handled correctly by the up_queue.
Note that this simulation is impractical because the pickup algorithm
would have distributed people more evenly among the elevators; however,
it is useful to make sure a single elevator orders several requests
in the correct order.
"""
ecs = ElevatorControlSystem(16, 16)
ecs.update(0, 5)
ecs.update(0, 4)
ecs.update(0, 2)
ecs.update(0, 8)
self.assertEqual(ecs.status(), [(0, 0, [2, 5, 4, 8], [], 1), \
(1, 0, [], [], 0), \
(2, 0, [], [], 0), \
(3, 0, [], [], 0), \
(4, 0, [], [], 0), \
(5, 0, [], [], 0), \
(6, 0, [], [], 0), \
(7, 0, [], [], 0), \
(8, 0, [], [], 0), \
(9, 0, [], [], 0), \
(10, 0, [], [], 0), \
(11, 0, [], [], 0), \
(12, 0, [], [], 0), \
(13, 0, [], [], 0), \
(14, 0, [], [], 0), \
(15, 0, [], [], 0)])
def test_duplicates(self):
"""
Make sure no extra effort is wasted on a duplicate request.
"""
ecs = ElevatorControlSystem(16, 16)
ecs.update(1, 5)
ecs.update(1, 5)
self.assertEqual(ecs.status(), [(0, 0, [], [], 0), \
(1, 0, [5], [], 1), \
(2, 0, [], [], 0), \
(3, 0, [], [], 0), \
(4, 0, [], [], 0), \
(5, 0, [], [], 0), \
(6, 0, [], [], 0), \
(7, 0, [], [], 0), \
(8, 0, [], [], 0), \
(9, 0, [], [], 0), \
(10, 0, [], [], 0), \
(11, 0, [], [], 0), \
(12, 0, [], [], 0), \
(13, 0, [], [], 0), \
(14, 0, [], [], 0), \
(15, 0, [], [], 0)])
def test_pickup(self):
"""
Make sure the system correctly distributes a set of pickup requests.
Given the requests below, we check to make sure:
(1) The system correctly penalizes based on elevator direction
(2) The individual elevators order requests correctly
(3) Duplicate requests are consolidated
"""
ecs = ElevatorControlSystem(16,16)
ecs.pickup(5, 1)
ecs.pickup(5, -1)
ecs.pickup(4, 1)
ecs.pickup(5, 1)
ecs.pickup(6, -1)
ecs.pickup(4, -1)
self.assertEqual(ecs.status(), [(0, 0, [4,5], [], 1), \
(1, 0, [5], [], 1), \
(2, 0, [6], [], 1), \
(3, 0, [4], [], 1), \
(4, 0, [], [], 0), \
(5, 0, [], [], 0), \
(6, 0, [], [], 0), \
(7, 0, [], [], 0), \
(8, 0, [], [], 0), \
(9, 0, [], [], 0), \
(10, 0, [], [], 0), \
(11, 0, [], [], 0), \
(12, 0, [], [], 0), \
(13, 0, [], [], 0), \
(14, 0, [], [], 0), \
(15, 0, [], [], 0)])
def test_step(self):
"""
Make sure the simulation actually runs and that elevators drop people off as expected.
Given the requests below, we check to make sure:
(1) Each elevator should move 4 floors, since they are all heading up.
(2) All requests below or on floor 4 should be completed.
(3) Elevators with now empty queues should have direction 0.
"""
ecs = ElevatorControlSystem(16,16)
ecs.pickup(2, 1)
ecs.pickup(5, -1)
ecs.pickup(4, 1)
ecs.pickup(2, 1)
ecs.pickup(6, -1)
ecs.pickup(4, -1)
ecs.step()
ecs.step()
ecs.step()
ecs.step()
self.assertEqual(ecs.status(), [(0, 4, [], [], 0), \
(1, 4, [5], [], 1), \
(2, 4, [6], [], 1), \
(3, 4, [], [], 0), \
(4, 0, [], [], 0), \
(5, 0, [], [], 0), \
(6, 0, [], [], 0), \
(7, 0, [], [], 0), \
(8, 0, [], [], 0), \
(9, 0, [], [], 0), \
(10, 0, [], [], 0), \
(11, 0, [], [], 0), \
(12, 0, [], [], 0), \
(13, 0, [], [], 0), \
(14, 0, [], [], 0), \
(15, 0, [], [], 0)])
def test_turnaround(self):
"""
Make sure that the system correctly prioritizes between up_queue and down_queue.
Given the instructions below, we expect Elevator 0 to complete all of its
requests within the 6 steps. This is considered optimal given the order
and timing of the requests. We also expect the elevator to finish on
floor 2, because it should respect the order of the requests when possible.
"""
ecs = ElevatorControlSystem(16,16)
ecs.pickup(4,1)
ecs.step()
ecs.step()
ecs.step()
ecs.update(0,2)
ecs.step()
ecs.step()
ecs.step()
self.assertEqual(ecs.status(), [(0, 2, [], [], 0), \
(1, 0, [], [], 0), \
(2, 0, [], [], 0), \
(3, 0, [], [], 0), \
(4, 0, [], [], 0), \
(5, 0, [], [], 0), \
(6, 0, [], [], 0), \
(7, 0, [], [], 0), \
(8, 0, [], [], 0), \
(9, 0, [], [], 0), \
(10, 0, [], [], 0), \
(11, 0, [], [], 0), \
(12, 0, [], [], 0), \
(13, 0, [], [], 0), \
(14, 0, [], [], 0), \
(15, 0, [], [], 0)])
if __name__ == '__main__':
unittest.main()
|
{"/elevator_control_system_tests.py": ["/elevator_control_system.py"]}
|
11,387
|
springle/elevator
|
refs/heads/master
|
/elevator_control_system.py
|
import heapq, random, time
class ElevatorControlSystem():
def __init__(self, number_of_floors, number_of_elevators):
if number_of_elevators <= 0:
raise AssertionError("Your building must have at least one elevator.")
if number_of_floors <= 0:
raise AssertionError("Your building must have at least one floor.")
self.elevators = [Elevator(i) for i in range(number_of_elevators)]
self.number_of_floors = number_of_floors
self.pending_requests = []
def status(self):
# returns the status of all elevators in the system (id, floor #, goal floor #)
return [(e.id, e.current_floor, e.up_queue, e.down_queue, e.direction) for e in self.elevators]
def describe(self):
for e in self.elevators:
print(e)
def update(self, elevator_id, floor_number):
# updates the state of an elevator in the system, adding a floor to its queue
e = self.elevators[elevator_id]
e.add_to_queue(floor_number)
def pickup(self, floor_number, direction):
# submits a pickup request to the system
best_elevator = self.elevators[0]
best_distance = self.number_of_floors * 2
for e in self.elevators:
distance = abs(e.current_floor - floor_number)
# penalize elevator scores based on direction
if (e.direction > 0 and floor_number < e.current_floor) or (e.direction > 0 and direction < 0):
highest_stop = heapq.nlargest(1, e.up_queue)[0]
distance += 2 * highest_stop
elif (e.direction < 0 and floor_number > e.current_floor) or (e.direction < 0 and direction > 0):
lowest_stop = heapq.nsmallest(1, e.down_queue)[0]
distance += 2 * lowest_stop
if distance < best_distance:
best_elevator = e
best_distance = distance
best_elevator.add_to_queue(floor_number)
def step(self):
# moves through one interval in the simulation
for e in self.elevators:
e.step()
class Elevator():
def __init__(self, elevator_id):
self.id = elevator_id
self.current_floor = 0
self.direction = 0 # 1 for moving up, -1 for moving down, 0 for stationary
self.up_queue = [] # heap
self.down_queue = [] # heap
def step(self):
self.current_floor += self.direction
self.drop_off()
self.update_direction()
def drop_off(self):
if self.up_queue and self.current_floor == self.up_queue[0]:
heapq.heappop(self.up_queue)
print("Elevator " + str(self.id) + " stopping on floor " + str(self.current_floor))
elif self.down_queue and self.current_floor == abs(self.down_queue[0]):
heapq.heappop(self.down_queue)
print("Elevator " + str(self.id) + " stopping on floor " + str(self.current_floor))
def update_direction(self):
if self.direction > 0 and not self.up_queue:
self.direction = -1 if self.down_queue else 0
if self.direction < 0 and not self.down_queue:
self.direction = 1 if self.up_queue else 0
def add_to_queue(self, floor_number, direction=0):
if floor_number == self.current_floor:
print("Elevator " + str(self.id) + " stopping on floor " + str(floor_number))
elif floor_number > self.current_floor:
if floor_number not in self.up_queue:
heapq.heappush(self.up_queue, floor_number)
if not self.direction:
self.direction = 1
else:
if floor_number not in self.down_queue:
heapq.heappush(self.down_queue, -floor_number)
if not self.direction:
self.direction = -1
def __str__(self):
return "Elevator " + str(self.id) \
+ " is on floor " \
+ str(self.current_floor) \
+ " going in direction " \
+ str(self.direction) \
+ " with up_queue " \
+ str(self.up_queue) \
+ " and down_queue " \
+ str(self.down_queue) \
+ "."
if __name__ == '__main__':
print("----------------------------------")
print("---BEGINNING RANDOM SIMULATIONS---")
print("-------PRESS CTRL+C TO STOP-------")
print("----------------------------------")
time.sleep(2)
ecs = ElevatorControlSystem(16,16)
while(True):
for i in range(16):
a = random.randint(0,15)
b = random.randint(0,15)
ecs.update(i, a)
print('Requesting elevator ' + str(i) + ' to stop on floor ' + str(a) + '.')
direction = random.choice([-1,1])
ecs.pickup(b, direction)
print('Requesting pickup on floor ' + str(b) + ' in direction ' + str(direction) + '.')
for i in range(16):
ecs.step()
print(ecs.status())
time.sleep(1)
|
{"/elevator_control_system_tests.py": ["/elevator_control_system.py"]}
|
11,412
|
jjwindow/PyFields
|
refs/heads/main
|
/high_res_all_moons.py
|
from all_funcs import save_moon_trace
from multiprocessing import Pool
from timeit import default_timer as timer
# print("Start Unparallel")
# start = timer()
# all_moons = ['Miranda', 'Ariel', 'Umbriel', 'Titania', 'Oberon', 'Triton']
# num_fieldlines = 0
# num_orbit_points = 2
# for moon in all_moons:
# if moon == 'Triton':
# num_orbits = 2
# else:
# num_orbits = 1
# save_moon_trace(moon, num_orbit_points, num_orbits, num_fieldlines)
# print("End Unparallel")
# end = timer()
# print(end-start)
def run(moon):
num_fieldlines = 0
num_orbit_points = 500
if moon == 'Triton':
num_orbits = 2
else:
num_orbits = 1
save_moon_trace(moon, num_orbit_points, num_orbits, num_fieldlines)
def main():
all_moons = ['Miranda', 'Ariel', 'Umbriel', 'Titania', 'Oberon', 'Triton']
print("Start Parallel")
start = timer()
with Pool() as pool:
pool.map(run, all_moons)
end = timer()
print("End Parallel")
print(end-start)
# if __name__ == '__main__':
# main()
|
{"/high_res_all_moons.py": ["/all_funcs.py"], "/footpoint_analysis.py": ["/all_funcs.py"], "/planet_testing.py": ["/all_funcs.py"], "/unit_speed_tests.py": ["/all_funcs.py"], "/convergence_tests.py": ["/all_funcs.py"], "/dipole_testing.py": ["/all_funcs.py"], "/satellite_footpoints.py": ["/all_funcs.py"], "/quadrupole_testing.py": ["/all_funcs.py"]}
|
11,413
|
jjwindow/PyFields
|
refs/heads/main
|
/footpoint_analysis.py
|
from all_funcs import *
from matplotlib import animation
all_moons = ['Miranda', 'Ariel', 'Umbriel', 'Titania', 'Oberon', 'Triton']
def plot_ang_dev_timeseries(moon, trueFoot_f, trueFoot_b, footpoints_f, footpoints_b, time):
angs_array = angular_deviation(footpoints_f, trueFoot_f, footpoints_b, trueFoot_b)
fignames = [f"{moon} Forwards Angular Deviation", f"{moon} Forwards Latitude Deviation", f"{moon} Forwards Longitude Deviation", f"{moon} Backwards Angular Deviation", "Backwards Latitude Deviation", "Backwards Longitude Deviation"]
T_m, T_p = moon_selector(moon, 'T', 'parent_day')
T_rel = abs(T_m*T_p/(T_m-T_p))
time = time/T_rel
ax = plt.subplot(1,1,1)
n = int(len(fignames)/2)
for i, (f_array, name) in enumerate(zip(angs_array, fignames)):
ax.clear()
if i >= n:
break
f_array = [ang for (pos, ang) in f_array]
ax.plot(time, f_array, label = name)
b_array = angs_array[i+3]
b_array = [ang for (pos, ang) in b_array]
ax.plot(time, b_array, label = fignames[i+3])
ax.legend()
plt.show()
def ang_devs_timeseries(moon_arr):
for i, moon in enumerate(moon_arr):
if moon == 'Miranda':
orb = 1.05
elif moon == 'Triton':
orb = 2
else:
orb = 1
with open(f'{moon}/trueFoot_f_40_{orb}_100.npy', 'rb') as file:
trueFoot_f_arr = np.load(file, allow_pickle=True)
with open(f'{moon}/trueFoot_b_40_{orb}_100.npy', 'rb') as file:
trueFoot_b_arr = np.load(file, allow_pickle=True)
with open(f'{moon}/footpoints_f_40_{orb}_100.npy', 'rb') as file:
footpoints_f_arr = np.load(file, allow_pickle=True)
with open(f'{moon}/footpoints_b_40_{orb}_100.npy', 'rb') as file:
footpoints_b_arr = np.load(file, allow_pickle=True)
with open(f'{moon}/time_40_{orb}_100.npy', 'rb') as file:
time = np.load(file, allow_pickle=True)
plot_ang_dev_timeseries(moon, trueFoot_f_arr, trueFoot_b_arr, footpoints_f_arr, footpoints_b_arr, time)
# ang_devs_timeseries(all_moons)
##################### DATA RETRIEVAL ##########################
def get_coarse_moon_footpoints(moon, n_o_p, n_o):
"""
Get specific data for moon with n_o_p number points per orbit and n_o
number of orbits.
"""
with open(f'{moon}/trueFoot_f_{n_o_p}_{n_o}_100.npy', 'rb') as file:
trueFoot_f_arr = np.load(file, allow_pickle=True)
with open(f'{moon}/trueFoot_b_{n_o_p}_{n_o}_100.npy', 'rb') as file:
trueFoot_b_arr = np.load(file, allow_pickle=True)
with open(f'{moon}/time_{n_o_p}_{n_o}_100.npy', 'rb') as file:
time = np.load(file, allow_pickle=True)
return trueFoot_f_arr, trueFoot_b_arr, time
def get_final_moon_footpoints(moon):
"""
Gets forward, backward and time arrays from final (n=500) runs.
"""
if moon == 'Triton':
num_orbits = 2
else:
num_orbits = 1
with open(f"Finals/{moon}/trueFoot_f_500_{num_orbits}.npy", 'rb') as file:
trueFoot_f_arr = np.load(file, allow_pickle=True)
with open(f"Finals/{moon}/trueFoot_b_500_{num_orbits}.npy", 'rb') as file:
trueFoot_b_arr = np.load(file, allow_pickle=True)
with open(f'Finals/{moon}/time_500_{num_orbits}.npy', 'rb') as file:
time = np.load(file, allow_pickle=True)
return trueFoot_f_arr, trueFoot_b_arr, time
################# ANIMATIONS #######################
def animate_footpoints(moon, footpoints, delay):
"""
footpoints = (trueFoot_f_arr, trueFoot_b_arr, time)
delay (ms) - interval passed to animate
"""
(trueFoot_f_arr, trueFoot_b_arr, time) = footpoints
P = max(time)
# Setup figure axes
fig, ax = plt.subplots()
ax.set_xlabel(r"Longitude ($^{\circ}$)")
ax.set_ylabel(r"Latitude ($^{\circ}$)")
ax.set_xlim(360, 0)
ax.set_ylim(-90, 90)
latf_arr, longf_arr = [], []
latb_arr, longb_arr = [], []
# Calc latitudes + longitudes
for (pos, fp_f), (pos, fp_b) in zip(trueFoot_f_arr, trueFoot_b_arr):
latf, longf = cartesian2latlong(*fp_f)
latf_arr.append(latf)
longf_arr.append(longf + 180)
latb, longb = cartesian2latlong(*fp_b)
latb_arr.append(latb)
longb_arr.append(longb + 180)
linef, = ax.plot(longf_arr, latf_arr, '-', label = 'Forward')
lineb, = ax.plot(longb_arr, latb_arr, '-', label = 'Backward')
legend = ax.legend()
def init():
# Flush figures to begin
linef.set_ydata([np.nan] * len(longf_arr))
lineb.set_ydata([np.nan] * len(longb_arr))
return linef, lineb,
def animate(i):
linef.set_xdata(longf_arr[:i])
linef.set_ydata(latf_arr[:i])
lineb.set_xdata(longb_arr[:i])
lineb.set_ydata(latb_arr[:i])
ax.legend()
ax.set_title(f"{moon}, t = {round(time[i]/(P), 2)}" + r" x $T_{rel}$")
return linef, lineb,
ani = animation.FuncAnimation(fig, animate, frames = len(time), blit = True, init_func = init, interval=delay)
return ani
def animate_all_moons():
print("Animating Moons...")
for moon in all_moons:
print(f"...{moon}")
n_o = (moon == 'Triton')*2 + (moon != 'Triton')
fpath = f'Finals/{moon}/Animation_{moon}_500_{n_o}.mp4'
footpoints = get_final_moon_footpoints(moon)
ani = animate_footpoints(moon, footpoints, 50)
ani.save(filename = fpath, writer = 'ffmpeg')
print("Done")
# animate_all_moons()
######## COORDINATE TIMESERIES ########
def lat_long_timeseries(moon):
trueFoot_f_arr, trueFoot_b_arr, time = get_final_moon_footpoints(moon)
T_rel = max(abs(time))
time /= T_rel
latlongs_f = [cartesian2latlong(*foot) for (pos, foot) in trueFoot_f_arr]
lat_f, long_f = map(np.asarray, zip(*latlongs_f))
long_f += 180
latlongs_b = [cartesian2latlong(*foot) for (pos, foot) in trueFoot_b_arr]
lat_b, long_b = map(np.asarray, zip(*latlongs_b))
long_b += 180
fig, axs = plt.subplots(2, 1, sharex=True)
axs[0].clear()
axs[1].clear()
axs[0].set_ylim(0, 360)
axs[0].plot(time, long_f, label='Forwards')
axs[0].plot(time, long_b, label = 'Backwards')
axs[0].set_ylabel(r"Longitude ($^{\circ}$)")
axs[1].set_ylim(-90, 90)
axs[1].plot(time, lat_f, label='Forwards')
axs[1].plot(time, lat_b, label = 'Backwards')
axs[1].set_ylabel(r"Latitude ($^{\circ}$)")
axs[1].set_xlabel(r"Time ($T_{rel}$)")
axs[0].set_title(f"{moon}")
axs[0].legend()
axs[1].legend()
plt.savefig(f"Finals/{moon}/timeseries_{moon}_lat_long.png")
print(f"{moon} Figure saved.")
[lat_long_timeseries(moon) for moon in all_moons]
def footpoint_velocities(moon):
trueFoot_f_arr, trueFoot_b_arr, time = get_final_moon_footpoints(moon)
T_rel = max(abs(time))
time /= T_rel
# fig, axs = plt.subplots(2, 1, sharex=True)
# n = int(1.5*len(time)/5)
# axs[0].clear()
# axs[1].clear()
# axs[0].plot(time[:n]/(0.2*T_m*n_o), latf_arr[:n], 'b-', label = 'Forwards')
# axs[0].plot(time[:n]/(0.2*T_m*n_o), latb_arr[:n], 'r-', label = 'Backwards')
# axs[0].set_ylabel(r"Latitude ($^{\circ}$)")
# axs[1].plot(time[:n]/(0.2*T_m*n_o), longf_arr[:n], 'b-', label = 'Forwards')
# axs[1].plot(time[:n]/(0.2*T_m*n_o), longb_arr[:n], 'r-', label = 'Backwards')
# axs[1].set_ylabel(r"Longitude ($^{\circ}$)")
# axs[1].set_xlabel(r"Time /$T_{rel}$")
# axs[0].legend()
# axs[1].legend()
# # plt.show()
######## LOWES SPECTRA & FIELD RATIO DECAY ###########################
# r = np.linspace(1, 25, 1000)
# ratio = r**-1
# quad = r**-4
# dip = r**-3
# uranus_Lowes = np.array([(i+1)*sum(_g**2 for _g in g) for i, g in enumerate(g_U)]) + np.array([(i+1)*sum(_h**2 for _h in h) for i, h in enumerate(h_U)])
# uranus_Lowes = uranus_Lowes[1:]
# uranus_Lowes /= uranus_Lowes[0]
# neptune_Lowes = np.array([(i+1)*sum(_g**2 for _g in g) for i, g in enumerate(g_N)]) + np.array([(i+1)*sum(_h**2 for _h in h) for i, h in enumerate(h_N)])
# neptune_Lowes = neptune_Lowes[1:]
# neptune_Lowes /= neptune_Lowes[0]
# ratio_U = uranus_Lowes[1]/uranus_Lowes[0]
# ratio_N = neptune_Lowes[1]/neptune_Lowes[0]
# # plt.plot(range(len(uranus_Lowes)), uranus_Lowes)
# # plt.plot(range(len(neptune_Lowes)), neptune_Lowes)
# plt.plot(r, ratio_U*ratio, label = 'Uranus')
# plt.plot(r, ratio_N*ratio, label = 'Neptune')
# for moon in all_moons:
# a, = moon_selector(moon, 'a')
# plt.axvline(a, linestyle = '-.', color = 'k')
# plt.text(a+0.5, 1, f'{moon}', rotation=90)
# plt.legend()
# plt.show()
|
{"/high_res_all_moons.py": ["/all_funcs.py"], "/footpoint_analysis.py": ["/all_funcs.py"], "/planet_testing.py": ["/all_funcs.py"], "/unit_speed_tests.py": ["/all_funcs.py"], "/convergence_tests.py": ["/all_funcs.py"], "/dipole_testing.py": ["/all_funcs.py"], "/satellite_footpoints.py": ["/all_funcs.py"], "/quadrupole_testing.py": ["/all_funcs.py"]}
|
11,414
|
jjwindow/PyFields
|
refs/heads/main
|
/planet_testing.py
|
from all_funcs import *
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib as mat
import numpy as np
from numpy.linalg import norm
from tqdm import tqdm
import os.path
from palettable.wesanderson import Aquatic2_5, Cavalcanti_5
fig = plt.figure()
ax = plt.axes(projection = '3d')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
phi_array = np.linspace(0, 2*np.pi, 80)
full_field_lines = []
for phi in phi_array:
field_lines = multilines(phi, 12, 0, 2*np.pi, coeffs=uranus, ds=0.01, maxits=100000, plot=False)
full_field_lines.append(field_lines)
for field_line in field_lines:
(x, y, z) = field_line
ax.plot3D(x, y, z, color=Aquatic2_5.mpl_colors[0])
fpath = 'Neptune_Fields/Uranus_quadrupole_field_80phi_12theta.npy'
with open(fpath, 'wb') as file:
np.save(file, full_field_lines)
# all_moons = ['Miranda', 'Ariel', 'Umbriel', 'Titania', 'Oberon']
# for i, moon in enumerate(all_moons):
# orbital_points, T_arr = orbit(moon, 200, 1, relative=False)
# x, y, z = spherical2cartesian(orbital_points)
# ax.plot3D(x, y, z, color=Cavalcanti_5.mpl_colors[i], label = moon)
# orbital_points, T_arr = orbit('Triton', 200, 1, relative=True)
# x, y, z = spherical2cartesian(orbital_points)
# ax.plot3D(x, y, z, color=Cavalcanti_5.mpl_colors[4], label = 'Triton')
# plt.legend()
# plt.show()
###### Plotting range of footpoints for a single position on lat-long plot ######
# phi = 0
# moon = 'Titania'
# footpoints, trueFoot = random_footpoints(100, moon, phi, True)
# fpath = 'Titania_phi-0_n-100.npy'
# fpathTrue = 'Titania_phi-0_true.npy'
# with open(fpath, 'wb') as file:
# np.save(file, footpoints)
# with open(fpathTrue, 'wb') as file:
# np.save(file, np.asarray(trueFoot))
# with open(fpath, 'rb') as file:
# footpoints = np.load(file, allow_pickle=True)
# with open(fpathTrue, 'rb') as file:
# trueFoot = np.load(file, allow_pickle=True)
# x, y, z = map(list, zip(*footpoints))
# lat, longt = cartesian2latlong(x, y, z)
# trueLat, trueLongt = cartesian2latlong(*trueFoot)
def makeThisAPlottingFunc():
"""
Make this a general plotting func later.
"""
plt.plot(trueLongt, trueLat, 'ro', label = r"Accepted $g_n^m,~h_n^m$")
plt.plot(longt, lat, 'x', label = r"Random $g_n^m,~h_n^m$")
plt.annotate(f"{moon}, phi = {phi}", (0.7, 0.05), xycoords = 'axes fraction')
plt.xlabel(r'Longitude ($^\circ$)')
plt.ylabel(r'Latidude ($^\circ$)')
plt.legend()
plt.show()
###### Histograms ######
def histograms_dep():
"""
Histograms of angular deviation due to uncertainty of harmonic coefficients.
"""
lat_devs = []
longt_devs = []
latitudes = []
longitudes = []
for fp in footpoints:
x, y, z = fp
latitude, longitude = cartesian2latlong(x, y, z)
latitudes.append(latitude)
longitudes.append(longitude)
lat_devs.append(trueLat - latitude)
longt_devs.append(trueLongt - longitude)
# fig, ax1 = plt.subplots(3, 1, sharex = True)
fig = plt.figure()
title_ax = fig.add_subplot(111, frameon=False)
# hide tick and tick label of the big axis
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
title_ax.set_ylabel("Frequency Density")
title_ax.set_xlabel(r"Deviation from Accepted Footpoint ($^\circ$)")
ax1 = fig.add_subplot(3,1,1)
ax1.hist(lat_devs, bins='auto', color='b', edgecolor='k', label="Latitude") #latitude deviations histogram
ax1.axvline(mean_lat_dev, color='k', linestyle='dashed', linewidth=1, label = f"Mean: {round(mean_lat_dev, 3)}")
ax1.legend()
ax2 = fig.add_subplot(3,1,2)
ax2.hist(longt_devs, bins='auto', color='c', edgecolor='k', label = "Longitude") #longitude deviations histogram
ax2.axvline(mean_long_dev, color='k', linestyle='dashed', linewidth=1, label = f"Mean: {round(mean_long_dev, 3)}")
ax2.legend()
ax3 = fig.add_subplot(313)
ax3.hist([ang*180/np.pi for ang in ang_dev], bins='auto', edgecolor='k', label = 'Absolute Angle')
ax3.axvline(mean_ang_dev*180/np.pi, color = 'k', linestyle='dashed', label=f"Mean: {round(mean_ang_dev*180/np.pi, 3)}")
ax3.legend()
plt.show()
# fig, ax2 = plt.subplots(2, 1)
# ax2[0].hist(latitudes, bins='auto') #latitudes histogram
# ax2[0].axvline(trueLat, color='k', linestyle='dashed', linewidth=1)
# ax2[1].hist(longitudes, bins='auto') #longitudes histogram
# ax2[1].axvline(trueLongt, color='k', linestyle='dashed', linewidth=1)
############# ORBIT TESTING #############
# set up 3d axes
# ax = plt.axes(projection = '3d')
# ax.set_xlabel('x')
# ax.set_ylabel('y')
# ax.set_zlabel('z')
# with tqdm(total=50, desc="FOOTPOINTS") as bar:
# footpoints = []
# for phi in np.linspace(0, 2*np.pi, 50):
# start_pos = [17.188, (np.pi/2 - 0.00593), phi]
# x, y, z = field_trace(start_pos, uranus, 0.005, 200000)
# # point = (x[-1], y[-1], z[-1])
# # footpoints.append(point)
# ax.plot3D(x, y, z, color=Cavalcanti_5.mpl_colors[3])
# bar.update()
# print(len(footpoints))
# x, y, z = map(list, zip(*footpoints))
def plot_orbits(moons_list, num, num_orbits, relative = False):
"""
Plots all orbital paths for moons in a 'moons_list', in sidereal or planet
rest frame, on 3d axes. Also plots planet for scale.
PARAMS
-----------------------------------------------------------------------------
moons_list - array or list; contains only elements of type str, which must
be one of the Uranian or Neptunian moons.
relative - bool; if false, orbits plotted in sidereal rest frame. Otherwise,
plotted in planet rest frame.
"""
# initialise lists
x_ptp_arr = []
y_ptp_arr = []
z_ptp_arr = []
# plot each moon in list
for i, moon in enumerate(moons_list):
orbital_points, T_arr = orbit(moon, num, num_orbits, relative=relative) # retrieve orbital path
x, y, z = spherical2cartesian(orbital_points) # convert to Cartesian
ax.plot3D(x, y, z, color=Cavalcanti_5.mpl_colors[i], label = moon)
# save peak-to-peak width of orbital path in each co-ord.
x_ptp_arr.append(np.ptp(x))
y_ptp_arr.append(np.ptp(y))
z_ptp_arr.append(np.ptp(z))
# plot planet.
u, v = np.mgrid[0:2*np.pi:50j, 0:np.pi:25j]
a = np.cos(u)*np.sin(v)
b = np.sin(u)*np.sin(v)
c = np.cos(v)
# find maximum bound for each coordinate
x_len = max(x_ptp_arr)
y_len = max(y_ptp_arr)
# maximum z-bound either set by orbit or by planet, must compare both
z_len = max(max(z_ptp_arr), np.ptp(c))
# set aspect ratio by largest path in each dimension -> no squished paths or planets
ax.set_box_aspect((x_len, y_len, z_len))
ax.plot_wireframe(a, b, c, color=Aquatic2_5.mpl_colors[0])
plt.legend()
plt.show()
# Plotting the different planetary systems
# uranus_moons = ['Miranda', 'Ariel', 'Umbriel', 'Titania', 'Oberon']
# plot_orbits(uranus_moons)
# plot_orbits(uranus_moons, 200, 1, True)
|
{"/high_res_all_moons.py": ["/all_funcs.py"], "/footpoint_analysis.py": ["/all_funcs.py"], "/planet_testing.py": ["/all_funcs.py"], "/unit_speed_tests.py": ["/all_funcs.py"], "/convergence_tests.py": ["/all_funcs.py"], "/dipole_testing.py": ["/all_funcs.py"], "/satellite_footpoints.py": ["/all_funcs.py"], "/quadrupole_testing.py": ["/all_funcs.py"]}
|
11,415
|
jjwindow/PyFields
|
refs/heads/main
|
/unit_speed_tests.py
|
# import field_calculator
# import field_calculator_numba
# import test_wrapper
# import stepper
from all_funcs import *
# from dipole_testing import multilines
import numpy as np
import time
g_D = np.array([[0., 0., 0., 0.], [1., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]])
h_D = np.array([[0., 0., 0., 0.], [1., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]])
a_D = 1
dipole = (a_D, g_D, h_D)
def B_timer(funcname, numits):
co_ords = [[r, th, ph] for r, (th, ph) in
zip(np.linspace(0.1, 100, numits),
zip(np.linspace(0.1, np.pi-0.1, numits),
np.linspace(0.1, np.pi-0.1, numits)))]
if funcname == "B":
func = all_funcs.B
args = (dipole,)
elif funcname == "RK4":
func = all_funcs.RK4
B_0 = field_calculator.B([1, 0.3, 0.], dipole)
args = (B_0, 0.01, dipole)
t = 0
for p in co_ords:
t_0 = time.time()
func(p, *args)
t += (time.time()-t_0)
print(f"{funcname} TIME: ", t/numits)
def wrapper_timer(numits):
p_0 = [1, 0.1, 0]
t = 0
for _ in range(numits):
t_0 = time.time()
all_funcs.field_trace(p_0, dipole, 0.01, 100000)
t += time.time()-t_0
print("Wrapper TIME: ", t/numits)
# functimer(multilines, (50,), 5)
# # B_timer("RK4", 100000)
# # B_timer("B", 100000)
# # B_timer("B_numba", 100000)
# wrapper_timer(10)
|
{"/high_res_all_moons.py": ["/all_funcs.py"], "/footpoint_analysis.py": ["/all_funcs.py"], "/planet_testing.py": ["/all_funcs.py"], "/unit_speed_tests.py": ["/all_funcs.py"], "/convergence_tests.py": ["/all_funcs.py"], "/dipole_testing.py": ["/all_funcs.py"], "/satellite_footpoints.py": ["/all_funcs.py"], "/quadrupole_testing.py": ["/all_funcs.py"]}
|
11,416
|
jjwindow/PyFields
|
refs/heads/main
|
/convergence_tests.py
|
from all_funcs import *
def convergence_test(moon, pos, n_min, n_max, step):
n_array = np.arange(n_min, n_max+1, step)
ang_dev_arr_f = []
ang_dev_arr_b = []
lat_dev_arr_f = []
lat_dev_arr_b = []
long_dev_arr_f = []
long_dev_arr_b = []
for n in n_array:
footpoints_f, footpoints_b, trueFoot_f, trueFoot_b = random_footpoints(n, moon, pos, True)
mean_ang_dev_f, mean_lat_dev_f, mean_long_dev_f, mean_ang_dev_b, mean_lat_dev_b, mean_long_dev_b = angular_deviation([(pos, footpoints_f)], [(pos, trueFoot_f)], [(pos, footpoints_b)], [(pos, trueFoot_b)])
(pos, ang_dev_f) = mean_ang_dev_f[0]
ang_dev_arr_f.append(ang_dev_f)
(pos, ang_dev_b) = mean_ang_dev_b[0]
ang_dev_arr_b.append(ang_dev_b)
(pos, lat_dev_f) = mean_lat_dev_f[0]
lat_dev_arr_f.append(lat_dev_f)
(pos, lat_dev_b) = mean_lat_dev_b[0]
lat_dev_arr_b.append(lat_dev_b)
(pos, long_dev_f) = mean_long_dev_f[0]
long_dev_arr_f.append(long_dev_f)
(pos, long_dev_b) = mean_long_dev_b[0]
long_dev_arr_b.append(long_dev_b)
return n_array, ang_dev_arr_f, ang_dev_arr_b, lat_dev_arr_f, lat_dev_arr_b, long_dev_arr_f, long_dev_arr_b
n_min = 20
n_max = 250
step = 10
# arrs = convergence_test('titania', [17.07, np.pi/2, np.pi], n_min, n_max, step)
_fpaths = ['arange', 'angdevs_f', 'angdevs_b', 'latdev_f', 'latdev_b', 'longdev_f', 'longdev_b']
fpaths = ['Titania/' + path + f'_{n_min}_{n_max}_{step}_phi_pi.npy' for path in _fpaths]
# for arr, path in zip(list(arrs), fpaths):
# with open(path, 'wb') as file:
# np.save(file, arr)
with open(fpaths[0], 'rb') as file:
n_array = np.load(file, allow_pickle=True)
with open(fpaths[1], 'rb') as file:
angdevs_f = np.load(file, allow_pickle=True)
with open(fpaths[2], 'rb') as file:
angdevs_b = np.load(file, allow_pickle=True)
with open(fpaths[3], 'rb') as file:
latdev_f = np.load(file, allow_pickle=True)
with open(fpaths[4], 'rb') as file:
latdev_b = np.load(file, allow_pickle=True)
with open(fpaths[5], 'rb') as file:
longdev_f = np.load(file, allow_pickle=True)
with open(fpaths[6], 'rb') as file:
longdev_b = np.load(file, allow_pickle=True)
# n_array = arrs[0]
# for i in range(1, len(arrs)):
# plt.plot(n_array, arrs[i], label = _fpaths[i])
def forward_backward_plots():
# Make plot of forward and backward footpoint deviations
# for ang. dev, lat, long. Comment/uncomment as necessary.
fig, axs = plt.subplots(2, 1, sharex=True)
fig.add_subplot(111, frameon=False)
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.xlabel("No. random fieldlines")
plt.ylabel(r"Ang. deviation squared ($^{\circ}$)")
# plt.ylabel("Angular deviation (rad)")
# axs[0].plot(n_array, angdevs_f, label = 'Forward')
# plt.plot(n_array, angdevs_b, label = 'Backward')
# GROUP BY LAT-LONG
# axs[0].set_title("Latitude")
# axs[0].plot(n_array, [lat**2 for lat in latdev_f], label = "Forward")
# axs[0].plot(n_array, [lat**2 for lat in latdev_b], label = "Backward")
# axs[0].legend()
# axs[1].set_title("Longitude")
# axs[1].plot(n_array, [long**2 for long in longdev_f], label = "Forward")
# axs[1].plot(n_array, [long**2 for long in longdev_b], label = "Backward")
# axs[1].legend()
# GROUP BY FORWARD-BACKWARD
axs[0].set_title("Field Into Planet")
axs[0].plot(n_array, [lat**2 for lat in latdev_f], label = "Latitude")
axs[0].plot(n_array, [long**2 for long in longdev_f], label = "Longitude")
axs[0].legend()
axs[1].set_title("Field Out Of Planet")
axs[1].plot(n_array, [lat**2 for lat in latdev_b], label = "Latitude")
axs[1].plot(n_array, [long**2 for long in longdev_b], label = "Longitude")
axs[1].legend()
# plt.ylabel("Longitudinal deviation (rad)")
# plt.plot(n_array, longdev_f, label = "Forward")
# plt.plot(n_array, longdev_b, label = "Backward")
plt.show()
def triple_angle_plots():
# Make plot of ang, lat, long deviations for both forwards
# and backwards separately.
fig, axs = plt.subplots(2,1, sharex=True)
fig.add_subplot(111, frameon=False)
# hide tick and tick label of the big axis
plt.tick_params(labelcolor='none', top=False, bottom=False, left=False, right=False)
plt.xlabel("Num. Random Fieldlines")
plt.ylabel("Mean deviation from non-random footpoint (rad)")
axs[0].plot(n_array, angdevs_f, label="Angle")
axs[0].plot(n_array, latdev_f, label = "Latitude")
axs[0].plot(n_array, longdev_f, label = "Longitude")
axs[0].set_title("Fieldlines into planet")
axs[0].legend()
axs[1].plot(n_array, angdevs_b, label="Angle")
axs[1].plot(n_array, latdev_b, label = "Latitude")
axs[1].plot(n_array, longdev_b, label = "Longitude")
axs[1].set_title("Fieldlines out of planet")
axs[1].legend()
plt.show()
# triple_angle_plots()
forward_backward_plots()
|
{"/high_res_all_moons.py": ["/all_funcs.py"], "/footpoint_analysis.py": ["/all_funcs.py"], "/planet_testing.py": ["/all_funcs.py"], "/unit_speed_tests.py": ["/all_funcs.py"], "/convergence_tests.py": ["/all_funcs.py"], "/dipole_testing.py": ["/all_funcs.py"], "/satellite_footpoints.py": ["/all_funcs.py"], "/quadrupole_testing.py": ["/all_funcs.py"]}
|
11,417
|
jjwindow/PyFields
|
refs/heads/main
|
/all_funcs.py
|
"""
PyFields all_funcs.py
01.11.2020
All modules in PyFields combined into one file to improve execution time.
"""
import numpy as np
import numba
from tqdm import tqdm
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pandas as pd
import warnings
import os.path
from palettable.wesanderson import Aquatic2_5, Cavalcanti_5
from numpy.linalg import norm
import matplotlib as mat
import os
######################### GLOBAL DEFINITIONS #############################
# Uranus Coefficients
g_U = np.array([[0., 0., 0.], [0.11893, 0.11579, 0.], [-0.06030, -0.12587, 0.00196]])
h_U = np.array([[0., 0., 0.], [0., -0.15648, 0.], [0., 0.06116, 0.04759]])
g_U_err = np.array([[0., 0., 0.], [0.001, 0.003, 0.], [0.00550, 0.00610, 0.005]])
h_U_err = np.array([[0., 0., 0.], [0., 0.0017, 0.], [0., 0.00360, 0.00810]])
a_U = 1
uranus = (a_U, g_U, h_U)
uranus_uncert = (a_U, g_U_err, h_U_err)
# Neptune Coefficients
g_N = np.array([[0., 0., 0.], [0.09732, 0.03220, 0.], [0.07448, 0.00664, 0.04499]])
h_N = np.array([[0., 0., 0.], [0., -0.09889, 0.], [0., 0.11230, -0.00070]])
g_N_err = np.array([[0., 0., 0.], [0.002, 0.0036, 0.], [0.0113, 0.0112, 0.0084]])
h_N_err = np.array([[0., 0., 0.], [0., 0.0011, 0.], [0., 0.003, -0.0034]])
a_N = 1
neptune = (a_N, g_N, h_N)
neptune_uncert = (a_N, g_N_err, h_N_err)
# Dipole coefficients
g_D = np.array([[0., 0., 0., 0.], [1., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]])
h_D = np.array([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]])
a_D = 1
dipole = (a_D, g_D, h_D)
# Quadrupole coefficients
g_Q = np.array([[0., 0., 0., 0.], [0., 0., 0., 0.], [1., 0., 0., 0.], [0., 0., 0., 0.]])
h_Q = np.array([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]])
a_Q = 1
quadrupole = (a_Q, g_Q, h_Q)
########################## FIELD CALCULATOR ###############################
# Field component functions
@numba.njit
def _B_rad_(r, th, ph, a, g, h):
"""
Radial magnetic field component. Formula from Connerney (1993).
"""
lgd = [[1.,0.,0.,0.],
[np.cos(th),np.sin(th),0.,0.],
[(3/2)*((np.cos(th))**2-(1/3)),(3**0.5)*(np.cos(th))*(np.sin(th)),((3**0.5)/2)*(np.sin(th))**2,0.],
[(5/2)*(np.cos(th))*((np.cos(th))**2 - (9/15)),((5*(3**0.5))/(2**1.5))*(np.sin(th))*((np.cos(th))**2 - (3/15)),
((15**0.5)/2)*(np.cos(th))*((np.sin(th))**2),((5**0.5)/(2**1.5))*((np.sin(th))**3)]]
B_rad_result= .0
for n in range(0,3):
for m in range(0, n+1):
B_rad_result += (n+1)*((a/r)**(n+1))*(g[n][m]*np.cos(m*ph) + h[n][m]*np.sin(m*ph))*lgd[n][m]
return B_rad_result
@numba.njit
def _B_theta_(r, th, ph, a, g, h):
"""
Latitudinal magnetic field component. Formula from Connerney (1993).
"""
lgd_prime = [[0., 0., 0.],
[-np.sin(th), np.cos(th), 0.],
[-(3/2)*np.sin(2*th),(3**0.5)*((np.cos(th))**2 - (np.sin(th))**2),((3**0.5)/2)*(np.sin(2*th))]]
B_theta_result= .0
for n in range(0,3):
for m in range(0, n+1):
B_theta_result += -(a/r)**(n+2)*(g[n][m]*np.cos(m*ph) + h[n][m]*np.sin(m*ph))*lgd_prime[n][m]
return B_theta_result
@numba.njit
def _B_phi_(r, th, ph, a, g, h):
"""
Longitudinal magnetic field component. Formula from Connerney (1993).
"""
lgd = [[1.,0.,0.,0.],
[np.cos(th),np.sin(th),0.,0.],
[(3/2)*((np.cos(th))**2-(1/3)),(3**0.5)*(np.cos(th))*(np.sin(th)),((3**0.5)/2)*(np.sin(th))**2,0.],
[(5/2)*(np.cos(th))*((np.cos(th))**2 - (9/15)),((5*(3**0.5))/(2**1.5))*(np.sin(th))*((np.cos(th))**2 - (3/15)),
((15**0.5)/2)*(np.cos(th))*((np.sin(th))**2),((5**0.5)/(2**1.5))*((np.sin(th))**3)]]
B_phi_result= .0
for n in range(0,3):
for m in range(0, n+1):
B_phi_result += (1/(np.sin(th)))*m*(a/r)**(n+2)*(g[n][m]*np.sin(m*ph) - h[n][m]*np.cos(m*ph))*lgd[n][m]
return B_phi_result
def B(p, field_coeffs):
"""
Finds magnetic field strength at given (t, th, ph) co-ords for a given set of harmonic expansion
coefficients. Returns vector of components as a tuple.
"""
r, th, ph = p[0], p[1], p[2]
args = (r, th, ph, *field_coeffs)
out_array = np.array([_B_rad_(*args), _B_theta_(*args), _B_phi_(*args)])
return out_array
############################## STEPPER #################################
@numba.njit
def B_mag(B, r, th):
# r = p[0]
# th = p[1]
magnitude = np.sqrt(B[0]**2 + (r * B[1])**2 + (r * np.sin(th) * B[2])**2)
# magnitude = B[0]
return magnitude
def RK4(p_0, B_0, ds, field_coeffs, back = False):
"""
Given starting coordinates r, th, ph, performs an RK4 step of size ds to
get to follow the field to a new postion vector.
## ALL ISNTANCES OF PLANET -> field_coeffs ##
"""
r, th, ph = p_0[0], p_0[1], p_0[2]
# Field vector at starting point
# take unit vector
v_0 = B_0/B_mag(B_0, p_0[0], p_0[1])
# First Euler step
p_1 = p_0 + 0.5*ds*v_0
B_1 = B(p_1, field_coeffs)
v_1 = B_1/B_mag(B_1, p_1[0], p_1[1])
# First correction step
p_2 = p_0 + 0.5*ds*v_1
B_2 = B(p_2, field_coeffs)
v_2 = B_2/B_mag(B_2, p_2[0], p_2[1])
# Second correction step
p_3 = p_0 + ds*v_2
B_3 = B(p_3, field_coeffs)
v_3 = B_3/B_mag(B_3, p_3[0], p_3[1])
if not back:
p_next = p_0 + ds*(v_0 + 2*v_1 + 2*v_2 + v_3)/6
else:
p_next = p_0 - ds*(v_0 + 2*v_1 + 2*v_2 + v_3)/6
B_next = B(p_next, field_coeffs)
return p_next, B_next
################################### WRAPPER ###################################
def field_trace(start_pos, field_coeffs, ds, max_iter, axes = "Cartesian", back = False):
"""
Function to trace a field line given a starting positon.
PARAMS
---------------------------------------------------------------------------------------------------
start_pos - list or np.array; starting position in spherical coordinates from which to
trace the field.
field_coeffs - tuple of lists or np.arrays (2D); 2D array of coefficients for the spherical
harmonic expansion. Must be of form (a, g, h).
ds - float; stepsize to trace the field with, taken as a constant for all steps
currently with intention of adding variable step-size to the tracer.
max_iter - int; maximum number of iterations.
RETURNS
----------------------------------------------------------------------------------------------------
p_arr - np.array (2D); array of (r, th, ph) coordinates which fall on the traced field
line.
B_arr - np.array (2D); array of field vectors for every point in p_arr.
"""
B_0 = B(start_pos, field_coeffs)
p_0 = start_pos
p_arr, B_arr = np.asarray([np.zeros(3) for _ in range(max_iter)]), np.asarray([np.zeros(3) for _ in range(max_iter)])
p_arr[0] = p_0
B_arr[0] = B_0
it = 1
while (p_0[0] >= 1.) and (it < max_iter):
p_next, B_next = RK4(p_0, B_0, ds, field_coeffs, back)
p_arr[it] = p_next
B_arr[it] = B_next
p_0, B_0 = p_next, B_next
it += 1
iter_flag = (it == max_iter)
if (iter_flag):
return None
p_arr = np.asarray([p for p in p_arr if np.any(p)])[:-1]
B_arr = np.asarray([b for b in B_arr if np.any(b)])[:-1]
if ((len(p_arr) < 3) or iter_flag):
return None
else:
if axes == "Cartesian":
x, z, y = spherical2cartesian(p_arr)
return x, y, z
else:
return p_arr, B_arr
def spherical2cartesian(p_arr):
"""
Converts (2d) list of spherical coordinates to 3 (1d) lists of cartesian coordinates for
use in plotting functions.
PARAMS
---------------------------------------------------------------------------------------
p_arr - list or numpy array (2D); list of [r, theta, phi] coordinates.
RETURNS
---------------------------------------------------------------------------------------
x, y, z - lists (1D); list of x-, y-, and z-coordinates (respectively) in Cartesian axes
corresponding to the same points in space defined by p_arr.
"""
x, z, y = map(list, zip(*[(r*np.sin(theta)*np.cos(phi), r*np.cos(theta), r*np.sin(theta)*np.sin(phi)) for r, theta, phi in zip(p_arr[:, 0], p_arr[:, 1], p_arr[:, 2])]))
return x, y, z
def multilines(phi, num, th_min = 0, th_max = 2*np.pi, coeffs = dipole, ds = 0.01, maxits = 100000, plot = True):
"""
Plots 'num' (int) field lines for equally spaced theta values between th_min and th_max.
Field lines calculated using field coefficients given by coeffs (tuple), stepsize ds (float),
and terminating after maxits (int). Use plt.show() to display plot after calling.
"""
th_values = np.linspace(th_min, th_max, num, endpoint=False)
field_lines = []
with tqdm(total = len(th_values), desc=f"THETA {round(th_min/np.pi, 2)}*pi TO {round(th_max/np.pi, 2)}*pi") as bar:
for th in th_values:
if th==0 or th==np.pi or th==2*np.pi:
pass
else:
field_line = field_trace([1., th, float(phi)], coeffs, ds, maxits)
if field_line is not None:
(x, y, z) = field_line
if plot:
if y[0] > y[-1]:
colour = 'r'
else:
colour = 'b'
plt.plot(x, y, color = colour)
else:
field_lines.append(field_line)
bar.update()
# field_lines = np.asarray(field_lines)
return field_lines
def multiline_3D(num_th, phi_array, th_min, th_max, coeffs = dipole, ds = 0.01, maxits = 100000):
fig=plt.figure()
ax = plt.axes(projection = '3d')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
for phi in phi_array:
field_lines = multilines(phi, num_th, th_min=th_min, th_max=th_max, coeffs=coeffs, ds=ds, maxits=maxits, plot=False)
for field_line in field_lines:
(x, y, z) = field_line
ax.plot3D(x, y, z, color=Aquatic2_5.mpl_colors[0])
##################### ANALYTIC COMPARISONS #######################
def _analytic_field_point_(th_i, th, field = 'dipole'):
"""
Calculates (x,y) coordinate at th for a field line whith starting coordinate (t, th, ph) = (1, th, 0).
Also returns rflag, which is True if r <= 1 and false otherwise. This is to terminate calculation.
"""
if field == 'dipole':
def x(th_i, th):
return ((np.sin(th)**3)/np.sin(th_i)**2)
def y(th_i, th):
return ((np.sin(th)**2 * np.cos(th))/np.sin(th_i)**2)
else:
def x(th_i, th):
return (np.sin(th_i)**2 * np.cos(th_i))**(-0.5) * np.sqrt(np.sin(th)**2 * np.cos(th)) * np.sin(th)
def y(th_i, th):
return (np.sin(th_i)**2 * np.cos(th_i))**(-0.5) * np.sqrt(np.sin(th)**2 * np.cos(th)) * np.cos(th)
x, y = x(th_i, th), y(th_i, th)
rflag = (round((x**2 + y**2), 6) < 1) # Boolean flag - is radial coord < 1?
# print(rflag)
return x, y, rflag
def analytic_field_line(th_i, ds, field = 'dipole'):
th_range = np.arange(th_i, 2*np.pi, step=ds)
# th_i_range = np.array([th_i for _ in th_range])
# x_y_coords = [(x, y) for x, y, rflag in [_analytic_field_point_(th_i, th) for th_i, th in zip(th_i_range, th_range)] if not rflag]
x_y_coords = []
j = 0
rflag = False
# breakpoint()
while (not rflag) and (j < len(th_range)):
x, y, rflag = _analytic_field_point_(th_i, th_range[j], field)
x_y_coords.append((x, y))
j += 1
# breakpoint()
return x_y_coords
def _analytic_field_plot(th_min, th_max, numlines, ds, field = 'dipole'):
th_start = np.linspace(th_min, th_max, numlines, endpoint=False)
for th_i in th_start:
coords = analytic_field_line(th_i, ds, field)
x_arr, y_arr = map(list, zip(*coords))
plt.plot(x_arr, y_arr, '--', color = 'k')
################# COORDINATE TRANSFORM ########################
def cartesian2latlong(x, y, z):
"""
Convert 3D Cartesian coordinates to latitude-longitudes for
2D projection plots.
PARAMS
-----------------------------------------------------------------------
x, y, z - float; coordinates in planet-centred Cartesian
system. Axis of planetary rotation aligned along z-axis.
RETURNS
-----------------------------------------------------------------------
lat, long - float;
"""
# Convert lists to arrays for vectorisation.
# Ignores floats and arrays.
args = [x, y, z]
for i, elem in enumerate(args):
if isinstance(elem, list):
args[i] = np.asarray(elem)
[x, y, z] = args
r = np.sqrt(x**2 + y**2 + z**2)
lat = np.arcsin(z/r)*(180/(np.pi))
longt = np.arctan2(y, x)*(180/(np.pi))
return lat, longt
##################### MOON SELECTOR ###############################
df = pd.read_csv('satellite_properties.csv')
df.set_index('Name', inplace=True)
def moon_selector(moon, *args):
"""
Returns desired parameters for a given moon.
PARAMS
------------------------------------------------------------------
Possible values of 'moon':'
'Miranda' - 'Ariel' - 'Umbriel' - 'Titania' - 'Oberon' - 'Triton'
(not case sensitive.)
*args:
Passing no args returns full properties dictionary for that moon.
Otherwise, *args are keys to return properties for the moon. Each
argument should be a string.
Valid args are:
'Parent' - 'inc' - 'R' - 'a' - 'T'
(parent (inclination, (Radius, (scaled (orbital
planet, str) radians) km) radius) time period)
'coeffs' - 'uncert'
Spherical harmonic coefficients for parent planet / associated
uncertainties.
(tuple of form (a, g, h).)
'parent_day'
length of a day on the parent planet in units of Earth days
RETURNS
-------------------------------------------------------------------
out_dict - dict; of type {'arg' : arg_value, ...} for all 'arg'
passed as arguments.
Note - Invalid arguments do not raise an error but deploy a warning.
"""
if not isinstance(moon, str):
raise TypeError("Positional argument 'moon' must be of type string.")
# df = pd.read_csv('satellite_properties.csv')
# Select coefficients to use
# df.set_index('Name', inplace=True)
moon = moon.lower()
if moon not in df.index.values:
raise ValueError("'moon' must be one of the 5 major Uranian moons or 'triton'.")
moon_dict = df.loc[moon].to_dict()
parent = moon_dict['Parent']
coeffs = (parent == 'Uranus')*uranus + (parent == 'Neptune')*neptune
coeffs_uncert = (parent == 'Uranus')*uranus_uncert + (parent == 'Neptune')*neptune_uncert
moon_dict['coeffs'] = coeffs
moon_dict['uncert'] = coeffs_uncert
if not len(args):
return moon_dict
out_tup = tuple([moon_dict[arg] for arg in args if arg in moon_dict.keys()])
bad_args = [arg for arg in args if not (arg in moon_dict.keys())]
if len(bad_args):
warnings.warn(f'The following arguments are not in satellite_proprties and were not returned:\n {bad_args}')
return out_tup
######################### TIMER #################################
import time
def functimer(func, args, n):
"""
Times a function n times, displays and returns the average time taken.
'args' are arguments to pass to the function being timed and should be
a tuple to be unpacked.
RETURNS
----------------------------------------------------------------------
"""
t = 0
for _ in range(n):
t_0 = time.time()
func(*args)
t += time.time() - t_0
mean = t/n
print(f"{func.__name__} Time ({n} run avg):\n{mean}")
return mean
######################## COEFFICIENT UNCERTAINTIES ######################'''''
def random_footpoints(n, moon, pos, trueTrace = False):
"""
A function that generates random magnetic field footpoints within the bounds
of the uncertainties of the magnetic field coefficients. New spherical harmonic
expansion coefficients are calculated using a pseudorandom number generator,
and n fieldlines are traced using these for a given moon-planet system at a
fixed colatitude. The array of footpoints for all fieldlines is returned.
PARAMS
--------------------------------------------------------------------------------
n - int; number of random fieldlines to calculate.
moon - str; name of the moon to calculate footpoint uncertainties for.
phi - float; value of colatitude at which to start the fieldline.
trueTrace - bool; trace the fieldline using the accepted g, h coeffs.
RETURNS
---------------------------------------------------------------------------------
footpoints - list; list of tuples, where each tuple is (x, y, z) position of a
footpoint of a fieldline calculated from the random coefficients.
"""
(R, coeffs, uncert) = moon_selector(moon, 'a', 'coeffs', 'uncert')
# start_pos = [R, np.pi/2, phi]
(a, g, h) = coeffs
(a, g_err, h_err) = uncert
# Trace the accepted fieldline if desired
if trueTrace:
# x, y, z = field_trace(start_pos, (a, g, h), 0.005, 200000)
x, y, z = field_trace(pos, coeffs, 0.0075, 133000)
trueFoot_f = (x[-1], y[-1], z[-1])
x, y, z = field_trace(pos, coeffs, 0.0075, 133000, back=True)
trueFoot_b = (x[-1], y[-1], z[-1])
# initialise footpoints array
footpoints_f = [0. for _ in range(n)]
footpoints_b = [0. for _ in range(n)]
# with tqdm(total=n, desc=f"{moon}, phi={pos[2]}") as bar:
for k in range(n):
g_new = np.zeros((3,3))
h_new = np.zeros((3,3))
for i in range(3):
for j in range(3):
# Ignore null coefficients
if g[i][j] == 0.:
pass
else:
# Generate random num between -1 and 1
r_1 = (np.random.random()-0.5)*2
# Use random num as multiplier on uncertainty, add
# to coefficients
g_new[i][j] = g[i][j] + g_err[i][j]*r_1
# Repeat with different randnum for h coeffs
r_2 = (np.random.random() - 0.5)*2
h_new[i][j] = h[i][j] + h_err[i][j]*r_2
coeffs = (a, g_new, h_new)
# Trace fieldline with new set of coefficients
x, y, z = field_trace(pos, coeffs, 0.005, 200000)
# Take fieldline footpoint
footpoints_f[k] = (x[-1], y[-1], z[-1])
x, y, z = field_trace(pos, coeffs, 0.005, 200000, back=True)
footpoints_b[k] = (x[-1], y[-1], z[-1])
# bar.update()
if trueTrace:
return footpoints_f, footpoints_b, trueFoot_f, trueFoot_b
else:
return footpoints_f, footpoints_b
######################## ORBIT CALCULATION ########################
def orbit(moon, num, num_orbits, period = 'relative', relative = False): #num_orbits is how many sidereal orbits #num gives num of points in one sidereal orbit
"""
Function to generate coordinates of an orbital path of a given satellite around its parent.
Can calculate orbits in the sidereal rest frame or in the planet's rest frame.
PARAMS
-----------------------------------------------------------------------------------
moon - str; name of one of the 5 Uranian moons, or Triton.
num - int; number of time segments to plot per orbit, i.e - time resolution.
num_orbits - float or int; number of orbits to trace. Only makes a difference for
inclined orbits with relative = True.
relative - bool; if false, orbit calculated is in sidereal rest frame, i.e - no
consideration of planetary rotation. If true, then planetary rotation
is calculated and orbit given is the path seen from a frame co-rotating
with the parent planet.
RETURNS
------------------------------------------------------------------------------------
orbital_points - numpy array; array containing num + 1 points in spherical
coordinates, determining the orbital path. Each point is a list
length 3, [r, theta, phi].
"""
# Collect moon parameters
(R, coeffs, period_moon, period_plan, incl) = moon_selector(moon, 'a', 'coeffs', 'T', 'parent_day', 'inc')
incl = (np.pi/180) * incl # convert inclination to radians
omega_moon = (2*np.pi)/period_moon # period -> frequency
omega_plan = (2*np.pi)/period_plan
if period.lower() == 'relative':
p = period_moon*period_plan/abs(period_moon-period_plan)
elif period.lower() == 'sidereal':
p = period_moon
else:
raise Exception("Orbit: period arg must be 'relative' or 'sidereal'; relative by default.")
t_step = p/num
n = int(num*num_orbits) # number of points to plot - int() covers non-whole num_orbits.
orbital_points= [0 for i in range(n+1)] # initialise output list
T_arr = [i*t_step for i in range(n+1)]
for i, t in enumerate(T_arr):
# angular argument of satellite in the plane of its orbit, more correctly called the 'argument of latitude'.
phi_moon_orbit = omega_moon * t
# from Adam's eqns:
theta = np.arccos(np.cos(phi_moon_orbit)*np.sin(np.pi-incl))
phi_moon_eq = np.arctan2(-1*np.sin(phi_moon_orbit), np.cos(phi_moon_orbit)*np.cos(np.pi - incl))
# phi_moon_eq is latitude coordinate in equatorial plane.
if phi_moon_eq < 0:
# handles negative arctan2 output
phi_moon_eq += 2*np.pi
if relative:
# changes to planet rest frame
phi = phi_moon_eq - omega_plan * t
else:
phi = phi_moon_eq
# append point to list
pos = [R, theta, phi]
orbital_points[i] = pos
return np.array(orbital_points), np.array(T_arr)
###### Calculating mean angular error ######
def angular_deviation(footpoints_f_arr, trueFoot_f_arr, footpoints_b_arr, trueFoot_b_arr):
mean_ang_dev_f = []
mean_lat_dev_f = []
mean_long_dev_f = []
mean_ang_dev_b = []
mean_lat_dev_b = []
mean_long_dev_b = []
for i, (pos, trueFoot) in enumerate(trueFoot_f_arr):
trueLat, trueLongt = cartesian2latlong(*trueFoot)
ang_dev_f = []
lat_dev_f = []
long_dev_f = []
(pos, fp_arr) = footpoints_f_arr[i]
for fp in fp_arr:
# breakpoint()
lat, longt = cartesian2latlong(*fp)
ang_dev_f.append(np.arccos(np.dot(fp, trueFoot)/(norm(fp)*norm(trueFoot))))
lat_dev_f.append(trueLat - lat)
long_dev_f.append(trueLongt - longt)
mean_ang_dev_f.append((pos, np.mean(ang_dev_f)))
mean_lat_dev_f.append((pos, np.mean(lat_dev_f)))
mean_long_dev_f.append((pos, np.mean(long_dev_f)))
for i, (pos, trueFoot) in enumerate(trueFoot_b_arr):
trueLat, trueLongt = cartesian2latlong(*trueFoot)
ang_dev_b = []
lat_dev_b = []
long_dev_b = []
(pos, fp_arr) = footpoints_b_arr[i]
for fp in fp_arr:
lat, longt = cartesian2latlong(*fp)
ang_dev_b.append(np.arccos(np.dot(fp, trueFoot)/(norm(fp)*norm(trueFoot))))
lat_dev_b.append(trueLat - lat)
long_dev_b.append(trueLongt - longt)
mean_ang_dev_b.append((pos, np.mean(ang_dev_b)))
mean_lat_dev_b.append((pos, np.mean(lat_dev_b)))
mean_long_dev_b.append((pos, np.mean(long_dev_b)))
return mean_ang_dev_f, mean_lat_dev_f, mean_long_dev_f, mean_ang_dev_b, mean_lat_dev_b, mean_long_dev_b
################## TRACING & SAVING ############################
def trace_full_orbit(moon, num_orbit_points, num_orbits, num_fieldlines):
orbital_points_arr, T_arr = orbit(moon, num_orbit_points, num_orbits, relative = True)
l = len(orbital_points_arr)
footpoints_f_arr = [0 for i in range(l)]
footpoints_b_arr = [0 for i in range(l)]
trueFoot_f_arr = [0 for i in range(l)]
trueFoot_b_arr = [0 for i in range(l)]
n = len(orbital_points_arr)
# with tqdm(total=n, desc=f"{moon}") as bar:
for i, pos in enumerate(orbital_points_arr):
footpoints_f, footpoints_b, trueFoot_f, trueFoot_b = random_footpoints(num_fieldlines, moon, pos, trueTrace = True)
footpoints_f_arr[i] = (pos, footpoints_f)
footpoints_b_arr[i] = (pos, footpoints_b)
trueFoot_f_arr[i] = (pos, trueFoot_f)
trueFoot_b_arr[i] = (pos, trueFoot_b)
# bar.update()
return footpoints_f_arr, footpoints_b_arr, trueFoot_f_arr, trueFoot_b_arr, T_arr
def save_moon_trace(moon, num_orbit_points, num_orbits, num_fieldlines):
if num_fieldlines != 0:
paths = ['footpoints_f', 'footpoints_b', 'trueFoot_f', 'trueFoot_b', 'time']
all_footpoints = trace_full_orbit(moon, num_orbit_points, num_orbits, num_fieldlines)
else:
paths = ['trueFoot_f', 'trueFoot_b', 'time']
fp_f, fp_b, *all_footpoints = trace_full_orbit(moon, num_orbit_points, num_orbits, num_fieldlines)
# footpoints_f_arr, footpoints_b_arr, trueFoot_f_arr, trueFoot_b_arr = trace_full_orbit(moon, num_orbit_points, num_orbits, num_fieldlines)
cdir = os.getcwd()
dest = os.path.join(cdir, 'Finals', moon)
if not os.path.exists(dest):
os.makedirs(dest)
all_footpoints = list(all_footpoints)
for path, footpoint in zip(paths, all_footpoints):
fpath = f"Finals/{moon}/{path}_{num_orbit_points}_{num_orbits}.npy"
with open(fpath, 'wb') as file:
np.save(file, footpoint)
def trace_and_save(moon, num_orbit_points, num_orbits, num_fieldlines, t_s = None, t_f = None):
if (t_s is not None) and (t_f is not None):
# Use trace partial orbit
pass
else:
# Generate arrays of orbit coordinates and time series
orbital_points, time = orbit(moon, num_orbit_points, num_orbits, relative = True)
cdir = os.getcwd()
dest = os.path.join(cdir, 'Finals', moon)
if not os.path.exists(dest):
os.makedirs(dest)
paths = ['footpoints_f', 'footpoints_b', 'trueFoot_f', 'trueFoot_b']
for i, pos in enumerate(orbital_points):
*lists, = random_footpoints(num_fieldlines, moon, pos, trueTrace = True)
for path, list in zip(paths, lists):
fpath = f"Finals/{moon}/{path}_{num_orbit_points}_{num_orbits}_{num_fieldlines}_{i}.npy"
with open(fpath, mode='wb') as file:
np.save(file, list)
|
{"/high_res_all_moons.py": ["/all_funcs.py"], "/footpoint_analysis.py": ["/all_funcs.py"], "/planet_testing.py": ["/all_funcs.py"], "/unit_speed_tests.py": ["/all_funcs.py"], "/convergence_tests.py": ["/all_funcs.py"], "/dipole_testing.py": ["/all_funcs.py"], "/satellite_footpoints.py": ["/all_funcs.py"], "/quadrupole_testing.py": ["/all_funcs.py"]}
|
11,418
|
jjwindow/PyFields
|
refs/heads/main
|
/dipole_testing.py
|
"""
PyFields dipole_testing.py
28.10.2020
Testing model using a dipole
"""
from all_funcs import *
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib as mat
import numpy as np
from tqdm import tqdm
import os.path
########################## DIPOLE TEST ###############################
# multiline_plot(50)
# params = {
# 'axes.labelsize': 14,
# 'font.size': 14,
# 'legend.fontsize': 14,
# 'xtick.labelsize': 12,
# 'ytick.labelsize': 12,
# 'figure.figsize': [8,6]
# }
# plt.rcParams.update(params)
# plt.legend((mat.lines.Line2D([0,0], [1,1], color = 'r'),), ('Traced Dipole, ds = 0.01',))
# plt.xlabel("Distance in x")
# plt.ylabel("Distance in y")
# plt.show()
########################## ERROR CALCULATION ##########################
def dipole_error(num, th_min, th_max, ds, max_iter):
th_values = np.linspace(th_min, th_max, num, endpoint=False)
th_returns = []
for th in th_values:
if (th < 0.0):
th_return = -np.pi + abs(th)
th_returns.append(th_return)
else:
th_return = np.pi - th
th_returns.append(th_return)
th_returns = np.array(th_returns)
deltas = []
lengths = []
th_finals = []
with tqdm(total=len(th_values), desc="Tracing Fields...") as bar:
for i, th in enumerate(th_values):
start_pos = [1., th, 0.]
field = field_trace(start_pos, dipole, ds, max_iter, axes=None)
if field is not None:
(p_arr, B_arr) = field
th_final = p_arr[-1][1]
th_finals.append(th_final)
lengths.append(len(p_arr))
deltas.append(abs(th_final-th_returns[i]))
else:
th_values[i] = np.nan
th_finals.append(np.nan)
lengths.append(np.nan)
deltas.append(np.nan)
bar.update()
deltas = np.array(deltas)
lengths = np.array(lengths)
return th_values, deltas, lengths
def multi_step_size(num, th_min, th_max, stepsizes):
for ds in stepsizes:
field_lines = multiline_plot(num, th_min, th_max, ds=ds, maxits= int(1e4/ds), plot=False)
th_values, deltas, lengths = dipole_error(num, th_min, th_max, ds, int(1e4/ds))
fpath_field = f'/Testing/Dipole/Fieldlines/Dipole_fieldlines_ds_{ds}.npy'
fpath_errors = f'/Testing/Dipole/Errors/Dipole_errors_ds_{ds}.npy'
with open(fpath_field, 'wb') as file:
np.save(file, field_lines)
with open(fpath_errors, 'wb') as file:
np.save(file, [th_values, deltas, lengths])
# fpath = 'dipole_errors_0.01.npy'
"""
Below is how you save and load numpy arrays. If you're reading this, then you don't have
to run the dipole_error function again! The file will have pulled into your local respository
so you can just run this file and it will access the data straight away. :)
"""
### RUN THIS BLOCK TO GENERATE DATA AND SAVE IT ###
### DO THIS ONCE THEN ACCESS SAVED FILE TO SAVE TIME ###
# th_values, th_returns, deltas, lengths = dipole_error(50, -np.pi/2, np.pi/2, 0.01, 100000)
# with open(fpath, 'wb') as f:
# np.save(f, [th_values, th_returns, deltas, lengths])
### RUN THIS BLOCK TO RETRIEVE SAVED DATA ###
# with open(fpath, 'rb') as f:
# th_deltas = np.load(f, allow_pickle=True)
# th_values, th_returns, deltas, lengths = th_deltas
#################### PLOTTING #######################
params = {
'axes.labelsize': 14,
'font.size': 14,
'legend.fontsize': 14,
'xtick.labelsize': 12,
'ytick.labelsize': 12,
'figure.figsize': [8,6]
}
plt.rcParams.update(params)
#l = int(len(th_values)/2)
"""Adjacent plot of angular error (scaled by angular separation) and field line length vs starting theta value, with mean. Only plotted one half """
# fig, ax = plt.subplots(2,1, sharex=True)
# ax[0].plot(th_values[l:], deltas[l:]/th_gap, label="Step Size = 0.01")
# ax[0].plot(th_values[l:], [mean_gap for _ in th_values[l:]], label="Mean")
# ax[0].set_ylabel(r"(Angular Discrepancy)/$\Delta\theta$", fontsize = 'medium', labelpad = 17)
# ax[0].legend()
# ax[1].plot(th_values[l:], lengths[l:], label = "Step Size = 0.01")
# ax[1].set_ylabel("Fieldline Length (no. points)", fontsize='medium')
# ax[1].set_xlabel(r"$\theta$ (rad)", fontsize = 'medium')
# plt.legend()
# plt.rcParams.update(params)
# plt.show()
"""Plot of angular error vs starting theta value, with mean. Only plotted one half """
# plt.plot(th_values[l:], deltas[l:], label="Step Size = 0.01")
# plt.plot(th_values[l:], [mean for _ in th_values[l:]], label="Mean")
# plt.ylabel("Angular Discrepancy", fontsize = 'medium')
# plt.xlabel(r"$\theta$ (rad)", fontsize = 'medium')
# plt.legend()
# plt.rcParams.update(params)
# plt.show()
"""Plot of angular error (scaled by the angular separation) vs starting theta value, with mean. Only plotted one half """
# plt.plot(th_values[l:], deltas[l:]/th_gap, label="Step Size = 0.01")
# plt.plot(th_values[l:], [mean_gap for _ in th_values[l:]], label="Mean")
# plt.ylabel(r"(Angular Discrepancy)/$\Delta\theta$", fontsize = 'medium')
# plt.xlabel(r"$\theta$ (rad)", fontsize = 'medium')
# plt.legend()
# plt.rcParams.update(params)
# plt.show()
"""Plot of angular error (scaled by the angular separation) vs field line length (i.e. how many steps taken). Only plotted one half """
# plt.rcParams.update(params)
# plt.plot(lengths[l:], deltas[l:]/th_gap, label = "Step Size = 0.01")
# plt.xscale('log')
# plt.xlabel("Log(Fieldline Length) [num. points]")
# plt.ylabel(r"(Angular Error)/$\Delta\theta$")
# plt.legend()
# plt.show()
#################### ANALYTICAL VS PLOTTED #######################
def analytic_dipole_plot(numlines):
theta_start = np.linspace(0, np.pi/2, numlines)
def y(th, th_i):
return np.sin(th)**2 * np.cos(th) / np.sin(th_i)**2
def x(th, th_i):
return np.sin(th)**3 / np.sin(th_i)**2
for th_i in theta_start:
coords = [(x(th, th_i), y(th, th_i)) for th in np.linspace(th_i, np.pi - th_i, 200)]
x_arr, y_arr = map(list, zip(*coords))
plt.plot(x_arr, y_arr, '-.', color = 'k')
# multiline_plot(25, th_max = np.pi/2)
# ls=(0, (3, 10, 1, 10, 1, 10))
#################### 3D DIPOLE PLOT #######################
multiline_3D(10, [0., np.pi/3, 2*np.pi/3])
plt.show()
|
{"/high_res_all_moons.py": ["/all_funcs.py"], "/footpoint_analysis.py": ["/all_funcs.py"], "/planet_testing.py": ["/all_funcs.py"], "/unit_speed_tests.py": ["/all_funcs.py"], "/convergence_tests.py": ["/all_funcs.py"], "/dipole_testing.py": ["/all_funcs.py"], "/satellite_footpoints.py": ["/all_funcs.py"], "/quadrupole_testing.py": ["/all_funcs.py"]}
|
11,419
|
jjwindow/PyFields
|
refs/heads/main
|
/satellite_footpoints.py
|
from all_funcs import *
from all_funcs import _B_rad_
def trace_partial_orbit(moon, orbit_points, num_fieldlines):
l = len(orbit_points)
footpoints_f_arr = [0 for i in range(l)]
footpoints_b_arr = [0 for i in range(l)]
trueFoot_f_arr = [0 for i in range(l)]
trueFoot_b_arr = [0 for i in range(l)]
for i, pos in enumerate(orbit_points):
footpoints_f, footpoints_b, trueFoot_f, trueFoot_b = random_footpoints(num_fieldlines, moon, pos, trueTrace = True)
footpoints_f_arr[i] = (pos, footpoints_f)
footpoints_b_arr[i] = (pos, footpoints_b)
trueFoot_f_arr[i] = (pos, trueFoot_f)
trueFoot_b_arr[i] = (pos, trueFoot_b)
return footpoints_f_arr, footpoints_b_arr, trueFoot_f_arr, trueFoot_b_arr
def save_partial_moon_trace(moon, orbit_points, t_s, t_f, num_fieldlines):
paths = ['footpoints_f', 'footpoints_b', 'trueFoot_f', 'trueFoot_b', 'time']
# footpoints_f_arr, footpoints_b_arr, trueFoot_f_arr, trueFoot_b_arr = trace_full_orbit(moon, num_orbit_points, num_orbits, num_fieldlines)
all_footpoints = trace_partial_orbit(moon, orbit_points, num_fieldlines)
all_footpoints = list(all_footpoints)
for path, footpoint in zip(paths, all_footpoints):
fpath = f'{moon}/' + path + f'_partial_{t_s}_{t_f}_{len(orbit_points)}_{num_fieldlines}.npy'
with open(fpath, 'wb') as file:
np.save(file, footpoint)
def all_moon_footpoints():
all_moons = ['Miranda', 'Ariel', 'Umbriel', 'Titania', 'Oberon']
ax = plt.subplot(1,1,1)
for i, moon in enumerate(all_moons):
if moon == 'Miranda':
orb = 1.05
else:
orb = 1
with open(f'{moon}/trueFoot_f_40_{orb}_100.npy', 'rb') as file:
trueFoot_f_arr = np.load(file, allow_pickle=True)
with open(f'{moon}/trueFoot_b_40_{orb}_100.npy', 'rb') as file:
trueFoot_b_arr = np.load(file, allow_pickle=True)
# with open(f'{moon}/footpoints_f_40_{orb}_100.npy', 'rb') as file:
# footpoints_f_arr = np.load(file, allow_pickle=True)
# with open(f'{moon}/footpoints_b_40_{orb}_100.npy', 'rb') as file:
# footpoints_b_arr = np.load(file, allow_pickle=True)
# with open(f'{moon}/time_40_{orb}_100.npy', 'rb') as file:
# T_arr = np.load(file, allow_pickle=True)
for (pos, fp) in trueFoot_f_arr:
lat, longt = cartesian2latlong(*fp)
ax.plot(longt + 180, lat, 'x', color=Cavalcanti_5.mpl_colors[i])
for (pos, fp) in trueFoot_b_arr:
lat, longt = cartesian2latlong(*fp)
ax.plot(longt + 180, lat, 'x', color=Cavalcanti_5.mpl_colors[i])
ax.set_xlabel(r"Longitude ($^{\circ}$)")
ax.set_ylabel(r"Latitude ($^{\circ}$)")
ax.set_xlim(360, 0)
ax.set_ylim(-90, 90)
ax.legend((mat.lines.Line2D([0,0], [1,1], color = Cavalcanti_5.mpl_colors[0]), mat.lines.Line2D([0,0], [1,1], color = Cavalcanti_5.mpl_colors[1]), mat.lines.Line2D([0,0], [1,1], color = Cavalcanti_5.mpl_colors[2]), mat.lines.Line2D([0,0], [1,1], color = Cavalcanti_5.mpl_colors[3]), mat.lines.Line2D([0,0], [1,1], color = Cavalcanti_5.mpl_colors[4])), ('Miranda', 'Ariel', 'Umbriel', 'Titania', 'Oberon'))
plt.show()
def time_filter(t_s, t_f, time_array, orbit_points_array):
_orb_dict = {t : pos for t, pos in zip(time_array, orbit_points_array)}
orb_dict = dict(filter(lambda elem: (elem[0] > t_s) and (elem[0] < t_f), _orb_dict.items()))
# print(list(orb_dict.items())[0], list(orb_dict.items())[-1])
new_orbit_points_array = list(orb_dict.values())
new_time_array = list(orb_dict.keys())
return new_time_array, new_orbit_points_array
def surface_radial_field(planet):
coeffs = (planet == 'Uranus')*uranus + (planet == 'Neptune')*neptune
# rv, thv, phv = np.meshgrid(np.ones(500), np.linspace(0, np.pi, 500), np.linspace(0, 2*np.pi, 1000))
# points = [(r, th, ph) for r, (th, ph) in zip(np.ones(10), zip(np.linspace(0, np.pi, 10), np.linspace(0, 2*np.pi, 10)))]
points = [[(th, ph) for ph in np.linspace(0, 2*np.pi, 500)] for th in np.linspace(0, np.pi, 500)]
# B_r_mat = _B_rad_(rv, thv, phv, *coeffs)
B_r_mat = [[_B_rad_(1, *p, *coeffs) for p in row] for row in points]
plt.imshow(B_r_mat, cmap='magma', extent = [360, 0, -90, 90])
plt.colorbar()
plt.show()
# surface_radial_field('Uranus')
def surface_radial_field_contour(planet):
coeffs = (planet == 'Uranus')*uranus + (planet == 'Neptune')*neptune
# rv, thv, phv = np.meshgrid(np.ones(500), np.linspace(0, np.pi, 500), np.linspace(0, 2*np.pi, 1000))
# points = [(r, th, ph) for r, (th, ph) in zip(np.ones(10), zip(np.linspace(0, np.pi, 10), np.linspace(0, 2*np.pi, 10)))]
points = [[(th, ph) for ph in np.linspace(0, 2*np.pi, 500)] for th in np.linspace(0, np.pi, 500)]
# B_r_mat = _B_rad_(rv, thv, phv, *coeffs)
B_r_mat = np.asarray([np.asarray([_B_rad_(1, *p, *coeffs) for p in row]) for row in points])
print(np.amin(B_r_mat))
B_r_mat += abs(np.amin(B_r_mat))
print(np.amin(B_r_mat))
fig, ax = plt.subplots()
cs = ax.contourf(np.linspace(360, 0, 500), np.linspace(-90, 90, 500), B_r_mat)
ax.invert_xaxis()
# plt.imshow(B_r_mat, cmap='magma', extent = [360, 0, -90, 90])
plt.colorbar(cs)
plt.show()
# surface_radial_field('Neptune')
|
{"/high_res_all_moons.py": ["/all_funcs.py"], "/footpoint_analysis.py": ["/all_funcs.py"], "/planet_testing.py": ["/all_funcs.py"], "/unit_speed_tests.py": ["/all_funcs.py"], "/convergence_tests.py": ["/all_funcs.py"], "/dipole_testing.py": ["/all_funcs.py"], "/satellite_footpoints.py": ["/all_funcs.py"], "/quadrupole_testing.py": ["/all_funcs.py"]}
|
11,420
|
jjwindow/PyFields
|
refs/heads/main
|
/quadrupole_testing.py
|
from all_funcs import *
import matplotlib.pyplot as plt
import matplotlib as mat
import numpy as np
from collections import Counter
########################## QUADRUPOLE TEST ###############################
params = {
'axes.labelsize': 14,
'font.size': 14,
'legend.fontsize': 14,
'xtick.labelsize': 12,
'ytick.labelsize': 12,
'figure.figsize': [8,6]
}
plt.rcParams.update(params)
multiline_plot(50, coeffs=quadrupole)
plt.xlabel("Distance in x")
plt.ylabel("Distance in y")
#plt.annotate("Traced Quadropole, ds=0.01", xy=(1,1))
plt.legend((mat.lines.Line2D([0,0], [1,1], color = 'r'),mat.lines.Line2D([0,0], [1,1], color = 'b')), ('Southbound Line','Northbound Line'))
plt.show()
########################## ANALYTICAL FIELD COMPARISON ##########################
params = {
'axes.labelsize': 14,
'font.size': 14,
'legend.fontsize': 14,
'xtick.labelsize': 12,
'ytick.labelsize': 12,
'figure.figsize': [8,6]
}
plt.rcParams.update(params)
field = 'Quadrupole'
multiline_plot(25, th_max = np.pi/2, coeffs = quadrupole)
_analytic_field_plot(0, np.pi/2, 25, 0.001, 'quad')
plt.legend((mat.lines.Line2D([0,0], [1,1], color = 'r'),mat.lines.Line2D([0,0], [1,1], color = 'k', linestyle='--')), (f'Traced {field}, ds = 0.01',f'Analytical {field}'))
plt.show()
|
{"/high_res_all_moons.py": ["/all_funcs.py"], "/footpoint_analysis.py": ["/all_funcs.py"], "/planet_testing.py": ["/all_funcs.py"], "/unit_speed_tests.py": ["/all_funcs.py"], "/convergence_tests.py": ["/all_funcs.py"], "/dipole_testing.py": ["/all_funcs.py"], "/satellite_footpoints.py": ["/all_funcs.py"], "/quadrupole_testing.py": ["/all_funcs.py"]}
|
11,501
|
inzaghian/anzhu
|
refs/heads/master
|
/ui/comset.py
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'comset.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_comsetform(object):
def setupUi(self, comsetform):
comsetform.setObjectName("comsetform")
comsetform.resize(213, 193)
self.label = QtWidgets.QLabel(comsetform)
self.label.setGeometry(QtCore.QRect(24, 26, 54, 16))
self.label.setObjectName("label")
self.cb_com = QtWidgets.QComboBox(comsetform)
self.cb_com.setGeometry(QtCore.QRect(90, 26, 101, 20))
self.cb_com.setObjectName("cb_com")
self.cb_bsp = QtWidgets.QComboBox(comsetform)
self.cb_bsp.setGeometry(QtCore.QRect(90, 52, 101, 20))
self.cb_bsp.setFrame(True)
self.cb_bsp.setObjectName("cb_bsp")
self.cb_bsp.addItem("")
self.cb_bsp.addItem("")
self.cb_bsp.addItem("")
self.cb_bsp.addItem("")
self.cb_bsp.addItem("")
self.cb_bsp.addItem("")
self.label_2 = QtWidgets.QLabel(comsetform)
self.label_2.setGeometry(QtCore.QRect(24, 52, 54, 16))
self.label_2.setObjectName("label_2")
self.cb_data = QtWidgets.QComboBox(comsetform)
self.cb_data.setGeometry(QtCore.QRect(90, 78, 101, 20))
self.cb_data.setObjectName("cb_data")
self.cb_data.addItem("")
self.cb_data.addItem("")
self.label_3 = QtWidgets.QLabel(comsetform)
self.label_3.setGeometry(QtCore.QRect(24, 78, 54, 16))
self.label_3.setObjectName("label_3")
self.cb_stop = QtWidgets.QComboBox(comsetform)
self.cb_stop.setGeometry(QtCore.QRect(90, 130, 101, 20))
self.cb_stop.setObjectName("cb_stop")
self.cb_stop.addItem("")
self.cb_stop.addItem("")
self.cb_stop.addItem("")
self.label_4 = QtWidgets.QLabel(comsetform)
self.label_4.setGeometry(QtCore.QRect(24, 130, 54, 16))
self.label_4.setObjectName("label_4")
self.cb_p = QtWidgets.QComboBox(comsetform)
self.cb_p.setGeometry(QtCore.QRect(90, 104, 101, 20))
self.cb_p.setObjectName("cb_p")
self.cb_p.addItem("")
self.cb_p.addItem("")
self.cb_p.addItem("")
self.label_5 = QtWidgets.QLabel(comsetform)
self.label_5.setGeometry(QtCore.QRect(24, 104, 54, 16))
self.label_5.setObjectName("label_5")
self.btn_default = QtWidgets.QPushButton(comsetform)
self.btn_default.setGeometry(QtCore.QRect(84, 160, 50, 23))
self.btn_default.setObjectName("btn_default")
self.btn_save = QtWidgets.QPushButton(comsetform)
self.btn_save.setGeometry(QtCore.QRect(24, 160, 50, 23))
self.btn_save.setObjectName("btn_save")
self.btn_esc = QtWidgets.QPushButton(comsetform)
self.btn_esc.setGeometry(QtCore.QRect(144, 160, 50, 23))
self.btn_esc.setObjectName("btn_esc")
self.retranslateUi(comsetform)
self.cb_bsp.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(comsetform)
def retranslateUi(self, comsetform):
_translate = QtCore.QCoreApplication.translate
comsetform.setWindowTitle(_translate("comsetform", "串口设置"))
self.label.setText(_translate("comsetform", "串口"))
self.cb_bsp.setCurrentText(_translate("comsetform", "115200"))
self.cb_bsp.setItemText(0, _translate("comsetform", "115200"))
self.cb_bsp.setItemText(1, _translate("comsetform", "4800"))
self.cb_bsp.setItemText(2, _translate("comsetform", "9600"))
self.cb_bsp.setItemText(3, _translate("comsetform", "19200"))
self.cb_bsp.setItemText(4, _translate("comsetform", "38400"))
self.cb_bsp.setItemText(5, _translate("comsetform", "57600"))
self.label_2.setText(_translate("comsetform", "波特率"))
self.cb_data.setCurrentText(_translate("comsetform", "8"))
self.cb_data.setItemText(0, _translate("comsetform", "8"))
self.cb_data.setItemText(1, _translate("comsetform", "7"))
self.label_3.setText(_translate("comsetform", "数据位"))
self.cb_stop.setItemText(0, _translate("comsetform", "1"))
self.cb_stop.setItemText(1, _translate("comsetform", "1.5"))
self.cb_stop.setItemText(2, _translate("comsetform", "2"))
self.label_4.setText(_translate("comsetform", "停止位"))
self.cb_p.setItemText(0, _translate("comsetform", "NONE"))
self.cb_p.setItemText(1, _translate("comsetform", "ODD"))
self.cb_p.setItemText(2, _translate("comsetform", "EVEN"))
self.label_5.setText(_translate("comsetform", "校验位"))
self.btn_default.setText(_translate("comsetform", "默认"))
self.btn_save.setText(_translate("comsetform", "确定"))
self.btn_esc.setText(_translate("comsetform", "取消"))
|
{"/testform.py": ["/uartform.py"], "/main.py": ["/uartform.py"], "/comsetform.py": ["/ui/comset.py", "/uartform.py"], "/pinsetform.py": ["/uartform.py"], "/uartform.py": ["/ui/uart.py", "/comsetform.py", "/pinsetform.py", "/testform.py", "/com.py", "/xmlreadandwrite.py"]}
|
11,502
|
inzaghian/anzhu
|
refs/heads/master
|
/xmlreadandwrite.py
|
#coding:utf-8
import xml.etree.ElementTree as ET
#{'com': 'COM1', 'bsp': '115200', 'd': '8', 'p': 'NONE', 's': '1'}
def WriteXml(sl):
root=ET.Element("com")
for e in sl.keys():
l=ET.SubElement(root,"set")
l.attrib={'name':e,'value':sl[e]}
tree = ET.ElementTree(root)
tree.write("setmsg.xml")
def ReadXml(spath):
root=ET.parse(spath)
p=root.findall('.')
xmllist={}
for oneper in p:
for child in oneper.getchildren():
xmllist[child.attrib['name']]=child.attrib['value']
return xmllist
#sl={'com': 'COM1', 'bsp': '115200', 'd': '8', 'p': 'NONE', 's': '1'}
#WriteXml(sl)
#spath="setmsg.xml"
#print(ReadXml(spath))
|
{"/testform.py": ["/uartform.py"], "/main.py": ["/uartform.py"], "/comsetform.py": ["/ui/comset.py", "/uartform.py"], "/pinsetform.py": ["/uartform.py"], "/uartform.py": ["/ui/uart.py", "/comsetform.py", "/pinsetform.py", "/testform.py", "/com.py", "/xmlreadandwrite.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.