code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio-Records-REST configuration."""
from __future__ import absolute_import, print_function
from flask import request
from invenio_indexer.api import RecordIndexer
from invenio_search import RecordsSearch
from .facets import terms_filter
from .utils import allow_all, check_elasticsearch, deny_all
def _(x):
"""Identity function for string extraction."""
return x
RECORDS_REST_ENDPOINTS = dict(
recid=dict(
pid_type='recid',
pid_minter='recid',
pid_fetcher='recid',
search_class=RecordsSearch,
indexer_class=RecordIndexer,
search_index=None,
search_type=None,
record_serializers={
'application/json': ('invenio_records_rest.serializers'
':json_v1_response'),
},
search_serializers={
'application/json': ('invenio_records_rest.serializers'
':json_v1_search'),
},
list_route='/records/',
item_route='/records/<pid(recid):pid_value>',
default_media_type='application/json',
max_result_window=10000,
error_handlers=dict(),
),
)
"""Default REST endpoints loaded.
This option can be overwritten to describe the endpoints of different
record types. Each endpoint is in charge of managing all its CRUD operations
(GET, POST, PUT, DELETE, ...).
The structure of the dictionary is as follows:
.. code-block:: python
from flask import abort
from flask_security import current_user
from invenio_records_rest.query import es_search_factory
from invenio_records_rest.errors import PIDDeletedRESTError
def search_factory(*args, **kwargs):
if not current_user.is_authenticated:
abort(401)
return es_search_factory(*args, **kwargs)
def permission_check_factory():
def check_title(record, *args, **kwargs):
def can(self):
if record['title'] == 'Hello World':
return True
return type('Check', (), {'can': can})()
def deleted_pid_error_handler(error):
record = error.pid_error.record or {}
return make_response(jsonify({
'status': 410,
'message': error.description,
'removal_reason': record.get('removal_reason')}), 410)
RECORDS_REST_ENDPOINTS = {
'endpoint-prefix': {
'create_permission_factory_imp': permission_check_factory(),
'default_endpoint_prefix': True,
'default_media_type': 'application/json',
'delete_permission_factory_imp': permission_check_factory(),
'item_route': ('/records/<pid(record-pid-type, '
'record_class="mypackage.api:MyRecord"):pid_value>'),
'links_factory_imp': ('invenio_records_rest.links:'
'default_links_factory'),
'list_route': '/records/',
'max_result_window': 10000,
'pid_fetcher': '<registered-pid-fetcher>',
'pid_minter': '<registered-minter-name>',
'pid_type': '<record-pid-type>',
'list_permission_factory_imp': permission_check_factory(),
'read_permission_factory_imp': permission_check_factory(),
'record_class': 'mypackage.api:MyRecord',
'record_loaders': {
'application/json': 'mypackage.loaders:json_loader'
},
'record_serializers': {
'application/json': 'mypackage.utils:my_json_serializer'
},
'record_serializers_aliases': {
'json': 'application/json'
},
'search_class': 'mypackage.utils:mysearchclass',
'search_factory_imp': search_factory(),
'search_index': 'elasticsearch-index-name',
'search_serializers': {
'application/json': 'mypackage.utils:my_json_search_serializer'
},
'search_serializers_aliases': {
'json': 'application/json'
},
'search_type': 'elasticsearch-doc-type',
'suggesters': {
'my_url_param_to_complete': {
'_source': ['specified_source_filtered_field'],
'completion': {
'field': 'suggest_byyear_elasticsearch_field',
'size': 10,
'context': 'year'
}
},
},
'update_permission_factory_imp': permission_check_factory(),
'use_options_view': True,
'error_handlers': {
PIDDeletedRESTError: deleted_pid_error_handler,
},
},
}
:param create_permission_factory_imp: Import path to factory that create
permission object for a given record.
:param default_endpoint_prefix: declare the current endpoint as the default
when building endpoints for the defined ``pid_type``. By default the
default prefix is defined to be the value of ``pid_type``.
:param default_media_type: Default media type for both records and search.
:param delete_permission_factory_imp: Import path to factory that creates a
delete permission object for a given record.
:param item_route: URL rule for a single record.
:param links_factory_imp: Factory for record links generation.
:param list_route: Base URL for the records endpoint.
:param max_result_window: Maximum total number of records retrieved from a
query.
:param pid_type: It specifies the record pid type. Required.
You can generate an URL to list all records of the given ``pid_type`` by
calling ``url_for('invenio_records_rest.{0}_list'.format(
current_records_rest.default_endpoint_prefixes[pid_type]))``.
:param pid_fetcher: It identifies the registered fetcher name. Required.
:param pid_minter: It identifies the registered minter name. Required.
:param list_permission_factory_imp: Import path to factory that creates a
list permission object for a given index / list.
:param read_permission_factory_imp: Import path to factory that creates a
read permission object for a given record.
:param record_class: A record API class or importable string.
:param record_loaders: It contains the list of record deserializers for
supported formats.
:param record_serializers: It contains the list of record serializers for
supported formats.
:param record_serializers_aliases: A mapping of values of the defined query arg
(see `config.REST_MIMETYPE_QUERY_ARG_NAME`) to valid mimetypes for record
item serializers: dict(alias -> mimetype).
:param search_class: Import path or class object for the object in charge of
execute the search queries. The default search class is
:class:`invenio_search.api.RecordsSearch`.
For more information about resource loading, see the `Search
<http://elasticsearch-dsl.readthedocs.io/en/latest/search_dsl.html>` of the
ElasticSearch DSL library.
:param search_factory_imp: Factory to parse queries.
:param search_index: Name of the search index used when searching records.
:param search_serializers: It contains the list of records serializers for all
supported format. This configuration differ from the previous because in
this case it handle a list of records resulted by a search query instead of
a single record.
:param search_serializers_aliases: A mapping of values of the defined query arg
(see `config.REST_MIMETYPE_QUERY_ARG_NAME`) to valid mimetypes for records
search serializers: dict(alias -> mimetype).
:param search_type: Name of the search type used when searching records.
:param suggesters: Suggester fields configuration. Any element of the
dictionary represents a suggestion field. For each suggestion field we can
optionally specify the source filtering (appropriate for ES5) by using
``_source``. The key of the dictionary element is used to identify the url
query parameter. The ``field`` parameter identifies the suggester field
name in your elasticsearch schema.
To have more information about suggestion configuration, you can read
suggesters section on ElasticSearch documentation.
.. note:: Only completion suggesters are supported.
:param update_permission_factory_imp: Import path to factory that creates a
update permission object for a given record.
:param use_options_view: Determines if a special option view should be
installed.
:param error_handlers: Error handlers configuration for the endpoint. The
dictionary has an exception type or HTTP status code as a key and a
function or an import path to a function as a value. The function will be
passed as an argument to :meth:`flask.Blueprint.register_error_handler`, so
it should take the handled exception/code as its single argument.
"""
RECORDS_REST_DEFAULT_LOADERS = {
'application/json': lambda: request.get_json(),
'application/json-patch+json': lambda: request.get_json(force=True),
}
"""Default data loaders per request mime type.
This option can be overritten in each REST endpoint as follows:
.. code-block:: python
RECORDS_REST_ENDPOINTS = {
'recid': {
...
'record_loaders': {
'application/json': 'mypackage.utils:myloader',
},
...
}
}
"""
RECORDS_REST_SORT_OPTIONS = dict(
records=dict(
bestmatch=dict(
title=_('Best match'),
fields=['_score'],
default_order='desc',
order=1,
),
mostrecent=dict(
title=_('Most recent'),
fields=['-_created'],
default_order='asc',
order=2,
),
)
)
"""Sort options for default sorter factory.
The structure of the dictionary is as follows:
.. code-block:: python
RECORDS_REST_SORT_OPTIONS = {
'<index or index alias>': {
'<sort-field-name>': {
'fields': ['<search_field>', '<search_field>', ...],
'title': '<title displayed to end user in search-ui>',
'default_order': '<default sort order in search-ui>',
}
}
}
Each search field can be either:
- A string of the form ``'<field name>'`` (ascending) or ``'-<field name>'``
(descending).
- A dictionary with Elasticsearch sorting syntax (e.g.
``{'price' : {'order' : 'asc', 'mode' : 'avg'}}``).
- A callable taking one boolean parameter (``True`` for ascending and ``False``
for descending) and returning a dictionary like above. This is useful if you
need to extract extra sorting parameters (e.g. for geo location searches).
"""
RECORDS_REST_DEFAULT_SORT = dict(
records=dict(
query='bestmatch',
noquery='mostrecent',
)
)
"""Default sort option per index with/without query string.
The structure of the dictionary is as follows:
.. code-block:: python
RECORDS_REST_DEFAULT_SORT = {
'<index or index alias>': {
'query': '<default-sort-if-a-query-is-passed-from-url>',
'noquery': '<default-sort-if-no-query-in-passed-from-url>',
}
}
"""
RECORDS_REST_FACETS = dict(
records=dict(
aggs=dict(
type=dict(terms=dict(field='type'))
),
post_filters=dict(
type=terms_filter('type'),
)
)
)
"""Facets per index for the default facets factory.
The structure of the dictionary is as follows:
.. code-block:: python
RECORDS_REST_FACETS = {
'<index or index alias>': {
'aggs': {
'<key>': <aggregation definition>,
...
}
'filters': {
'<key>': <filter func>,
...
}
'post_filters': {
'<key>': <filter func>,
...
}
}
}
"""
RECORDS_REST_DEFAULT_CREATE_PERMISSION_FACTORY = deny_all
"""Default create permission factory: reject any request."""
RECORDS_REST_DEFAULT_LIST_PERMISSION_FACTORY = allow_all
"""Default list permission factory: allow all requests"""
RECORDS_REST_DEFAULT_READ_PERMISSION_FACTORY = check_elasticsearch
"""Default read permission factory: check if the record exists."""
RECORDS_REST_DEFAULT_UPDATE_PERMISSION_FACTORY = deny_all
"""Default update permission factory: reject any request."""
RECORDS_REST_DEFAULT_DELETE_PERMISSION_FACTORY = deny_all
"""Default delete permission factory: reject any request."""
RECORDS_REST_ELASTICSEARCH_ERROR_HANDLERS = {
'query_parsing_exception': (
'invenio_records_rest.views'
':elasticsearch_query_parsing_exception_handler'
),
'query_shard_exception': (
'invenio_records_rest.views'
':elasticsearch_query_parsing_exception_handler'
),
}
"""Handlers for ElasticSearch error codes."""
RECORDS_REST_DEFAULT_RESULTS_SIZE = 10
"""Default search results size."""
|
inveniosoftware/invenio-records-rest
|
invenio_records_rest/config.py
|
Python
|
mit
| 13,214
|
import unittest2
from google.appengine.ext import testbed
from models.account import Account
from models.suggestion import Suggestion
from helpers.suggestions.suggestion_fetcher import SuggestionFetcher
class TestSuggestionFetcher(unittest2.TestCase):
def setUp(self):
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
account = Account.get_or_insert(
"123",
email="user@example.com",
registered=True).put()
suggestion = Suggestion(
author=account,
review_state=Suggestion.REVIEW_PENDING,
target_key="2012cmp",
target_model="event").put()
def testCount(self):
self.assertEqual(SuggestionFetcher.count(Suggestion.REVIEW_PENDING, "event"), 1)
self.assertEqual(SuggestionFetcher.count(Suggestion.REVIEW_PENDING, "media"), 0)
|
synth3tk/the-blue-alliance
|
tests/test_suggestion_fetcher.py
|
Python
|
mit
| 965
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.engine import template
def _identity(resource_name, definition):
return definition
def member_definitions(old_resources, new_definition,
num_resources, num_new,
get_new_id, customise=_identity):
"""Iterate over resource definitions for a scaling group
Generates the definitions for the next change to the scaling group. Each
item is a (name, definition) tuple.
The input is a list of (name, definition) tuples for existing resources in
the group, sorted in the order that they should be replaced or removed
(i.e. the resource that should be the first to be replaced (on update) or
removed (on scale down) appears at the beginning of the list.) New
resources are added or old resources removed as necessary to ensure a total
of num_resources.
The number of resources to have their definition changed to the new one is
controlled by num_new. This value includes any new resources to be added,
with any shortfall made up by modifying the definitions of existing
resources.
"""
old_resources = old_resources[-num_resources:]
num_create = num_resources - len(old_resources)
num_replace = num_new - num_create
for i in range(num_resources):
if i < len(old_resources):
old_name, old_definition = old_resources[i]
custom_definition = customise(old_name, new_definition)
if old_definition != custom_definition and num_replace > 0:
num_replace -= 1
yield old_name, custom_definition
else:
yield old_name, old_definition
else:
new_name = get_new_id()
yield new_name, customise(new_name, new_definition)
def make_template(resource_definitions,
version=('heat_template_version', '2015-04-30'),
child_env=None):
"""Return a Template object containing the given resource definitions.
By default, the template will be in the HOT format. A different format
can be specified by passing a (version_type, version_string) tuple matching
any of the available template format plugins.
"""
tmpl = template.Template(dict([version]), env=child_env)
for name, defn in resource_definitions:
tmpl.add_resource(defn, name)
return tmpl
|
dragorosson/heat
|
heat/scaling/template.py
|
Python
|
apache-2.0
| 2,936
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-12 15:44
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Community', '0030_auto_20171112_1025'),
('Community', '0030_auto_20171105_1836'),
]
operations = [
]
|
ByrdOfAFeather/AlphaTrion
|
Community/migrations/0031_merge_20171112_1044.py
|
Python
|
mit
| 342
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.defaults
from frappe.utils import add_days, cint, cstr, date_diff, flt, getdate, nowdate, \
get_first_day, get_last_day
from frappe.model.naming import make_autoname
from frappe import _, msgprint, throw
from erpnext.accounts.party import get_party_account, get_due_date
from erpnext.controllers.stock_controller import update_gl_entries_after
from frappe.model.mapper import get_mapped_doc
from erpnext.controllers.recurring_document import *
from erpnext.controllers.selling_controller import SellingController
from tools.tools_management.custom_methods import get_merchandise_item_details, get_item_details
from erpnext.accounts.accounts_custom_methods import update_serial_noInto
from erpnext.stock.stock_custom_methods import split_serial_no, count_serial_no
from erpnext.accounts.accounts_custom_methods import generate_serial_no, release_work_order
form_grid_templates = {
"entries": "templates/form_grid/item_grid.html"
}
class SalesInvoice(SellingController):
tname = 'Sales Invoice Item'
fname = 'entries'
def __init__(self, arg1, arg2=None):
super(SalesInvoice, self).__init__(arg1, arg2)
self.status_updater = [{
'source_dt': 'Sales Invoice Item',
'target_field': 'billed_amt',
'target_ref_field': 'amount',
'target_dt': 'Sales Order Item',
'join_field': 'so_detail',
'target_parent_dt': 'Sales Order',
'target_parent_field': 'per_billed',
'source_field': 'amount',
'join_field': 'so_detail',
'percent_join_field': 'sales_order',
'status_field': 'billing_status',
'keyword': 'Billed',
'overflow_type': 'billing'
}]
def validate(self):
super(SalesInvoice, self).validate()
self.validate_posting_time()
self.so_dn_required()
self.validate_proj_cust()
self.validate_with_previous_doc()
self.validate_uom_is_integer("stock_uom", "qty")
self.check_stop_sales_order("sales_order")
self.validate_customer_account()
self.validate_debit_acc()
self.validate_fixed_asset_account()
self.clear_unallocated_advances("Sales Invoice Advance", "advance_adjustment_details")
self.add_remarks()
if cint(self.is_pos):
self.validate_pos()
self.validate_write_off_account()
if cint(self.update_stock):
self.validate_item_code()
self.update_current_stock()
self.validate_delivery_note()
if not self.is_opening:
self.is_opening = 'No'
self.set_aging_date()
frappe.get_doc("Account", self.debit_to).validate_due_date(self.posting_date, self.due_date)
self.set_against_income_account()
self.validate_c_form()
self.validate_time_logs_are_submitted()
validate_recurring_document(self)
self.validate_multiple_billing("Delivery Note", "dn_detail", "amount",
"delivery_note_details")
def on_submit(self):
if cint(self.update_stock) == 1:
self.update_stock_ledger()
else:
# Check for Approving Authority
if not self.recurring_id:
frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype,
self.company, self.grand_total, self)
self.check_prev_docstatus()
self.update_status_updater_args()
self.update_prevdoc_status()
self.update_billing_status_for_zero_amount_refdoc("Sales Order")
# this sequence because outstanding may get -ve
self.make_gl_entries()
self.check_credit_limit(self.debit_to)
if not cint(self.is_pos) == 1:
self.update_against_document_in_jv()
self.update_c_form()
self.update_time_log_batch(self.name)
update_serial_noInto(self)
convert_to_recurring(self, "RECINV.#####", self.posting_date)
self.release_workOrder()
def release_workOrder(self):
if self.get('work_order_distribution'):
for data in self.get('work_order_distribution'):
if data.tailor_work_order:
obj = frappe.get_doc('Work Order', data.tailor_work_order)
release_work_order(obj)
def before_cancel(self):
self.update_time_log_batch(None)
def on_cancel(self):
if cint(self.update_stock) == 1:
self.update_stock_ledger()
self.check_stop_sales_order("sales_order")
from erpnext.accounts.utils import remove_against_link_from_jv
remove_against_link_from_jv(self.doctype, self.name, "against_invoice")
self.update_status_updater_args()
self.update_prevdoc_status()
self.update_billing_status_for_zero_amount_refdoc("Sales Order")
self.make_gl_entries_on_cancel()
def update_status_updater_args(self):
if cint(self.update_stock):
self.status_updater.append({
'source_dt':'Sales Invoice Item',
'target_dt':'Sales Order Item',
'target_parent_dt':'Sales Order',
'target_parent_field':'per_delivered',
'target_field':'delivered_qty',
'target_ref_field':'qty',
'source_field':'qty',
'join_field':'so_detail',
'percent_join_field':'sales_order',
'status_field':'delivery_status',
'keyword':'Delivered',
'second_source_dt': 'Delivery Note Item',
'second_source_field': 'qty',
'second_join_field': 'prevdoc_detail_docname',
'overflow_type': 'delivery'
})
def on_update_after_submit(self):
validate_recurring_document(self)
convert_to_recurring(self, "RECINV.#####", self.posting_date)
def get_portal_page(self):
return "invoice" if self.docstatus==1 else None
def set_missing_values(self, for_validate=False):
self.set_pos_fields(for_validate)
if not self.debit_to:
self.debit_to = get_party_account(self.company, self.customer, "Customer")
if not self.due_date:
self.due_date = get_due_date(self.posting_date, self.customer, "Customer",
self.debit_to, self.company)
super(SalesInvoice, self).set_missing_values(for_validate)
def update_time_log_batch(self, sales_invoice):
for d in self.get(self.fname):
if d.time_log_batch:
tlb = frappe.get_doc("Time Log Batch", d.time_log_batch)
tlb.sales_invoice = sales_invoice
tlb.ignore_validate_update_after_submit = True
tlb.save()
def validate_time_logs_are_submitted(self):
for d in self.get(self.fname):
if d.time_log_batch:
status = frappe.db.get_value("Time Log Batch", d.time_log_batch, "status")
if status!="Submitted":
frappe.throw(_("Time Log Batch {0} must be 'Submitted'").format(d.time_log_batch))
def set_pos_fields(self, for_validate=False):
"""Set retail related fields from pos settings"""
if cint(self.is_pos) != 1:
return
from erpnext.stock.get_item_details import get_pos_settings_item_details, get_pos_settings
pos = get_pos_settings(self.company)
if pos:
if not for_validate and not self.customer:
self.customer = pos.customer
# self.set_customer_defaults()
for fieldname in ('territory', 'naming_series', 'currency', 'taxes_and_charges', 'letter_head', 'tc_name',
'selling_price_list', 'company', 'select_print_heading', 'cash_bank_account'):
if (not for_validate) or (for_validate and not self.get(fieldname)):
self.set(fieldname, pos.get(fieldname))
if not for_validate:
self.update_stock = cint(pos.get("update_stock"))
# set pos values in items
for item in self.get("entries"):
if item.get('item_code'):
for fname, val in get_pos_settings_item_details(pos,
frappe._dict(item.as_dict()), pos).items():
if (not for_validate) or (for_validate and not item.get(fname)):
item.set(fname, val)
# fetch terms
if self.tc_name and not self.terms:
self.terms = frappe.db.get_value("Terms and Conditions", self.tc_name, "terms")
# fetch charges
if self.taxes_and_charges and not len(self.get("other_charges")):
self.set_taxes("other_charges", "taxes_and_charges")
def get_advances(self):
super(SalesInvoice, self).get_advances(self.debit_to,
"Sales Invoice Advance", "advance_adjustment_details", "credit")
def get_company_abbr(self):
return frappe.db.sql("select abbr from tabCompany where name=%s", self.company)[0][0]
def update_against_document_in_jv(self):
"""
Links invoice and advance voucher:
1. cancel advance voucher
2. split into multiple rows if partially adjusted, assign against voucher
3. submit advance voucher
"""
lst = []
for d in self.get('advance_adjustment_details'):
if flt(d.allocated_amount) > 0:
args = {
'voucher_no' : d.journal_voucher,
'voucher_detail_no' : d.jv_detail_no,
'against_voucher_type' : 'Sales Invoice',
'against_voucher' : self.name,
'account' : self.debit_to,
'is_advance' : 'Yes',
'dr_or_cr' : 'credit',
'unadjusted_amt' : flt(d.advance_amount),
'allocated_amt' : flt(d.allocated_amount)
}
lst.append(args)
if lst:
from erpnext.accounts.utils import reconcile_against_document
reconcile_against_document(lst)
def validate_customer_account(self):
"""Validates Debit To Account and Customer Matches"""
if self.customer and self.debit_to and not cint(self.is_pos):
acc_head = frappe.db.sql("select master_name from `tabAccount` where name = %s and docstatus != 2", self.debit_to)
if (acc_head and cstr(acc_head[0][0]) != cstr(self.customer)) or \
(not acc_head and (self.debit_to != cstr(self.customer) + " - " + self.get_company_abbr())):
msgprint("Debit To: %s do not match with Customer: %s for Company: %s.\n If both correctly entered, please select Master Type \
and Master Name in account master." %(self.debit_to, self.customer,self.company), raise_exception=1)
def validate_debit_acc(self):
if frappe.db.get_value("Account", self.debit_to, "report_type") != "Balance Sheet":
frappe.throw(_("Account must be a balance sheet account"))
def validate_fixed_asset_account(self):
"""Validate Fixed Asset and whether Income Account Entered Exists"""
for d in self.get('entries'):
item = frappe.db.sql("""select name,is_asset_item,is_sales_item from `tabItem`
where name = %s""", d.item_code)
acc = frappe.db.sql("""select account_type from `tabAccount`
where name = %s and docstatus != 2""", d.income_account)
if item and item[0][1] == 'Yes' and acc and acc[0][0] != 'Fixed Asset':
msgprint(_("Account {0} must be of type 'Fixed Asset' as Item {1} is an Asset Item").format(acc[0][0], d.item_code), raise_exception=True)
def validate_with_previous_doc(self):
super(SalesInvoice, self).validate_with_previous_doc(self.tname, {
"Sales Order": {
"ref_dn_field": "sales_order",
"compare_fields": [["customer", "="], ["company", "="], ["project_name", "="],
["currency", "="]],
},
"Delivery Note": {
"ref_dn_field": "delivery_note",
"compare_fields": [["customer", "="], ["company", "="], ["project_name", "="],
["currency", "="]],
},
})
if cint(frappe.defaults.get_global_default('maintain_same_sales_rate')):
super(SalesInvoice, self).validate_with_previous_doc(self.tname, {
"Sales Order Item": {
"ref_dn_field": "so_detail",
"compare_fields": [["rate", "="]],
"is_child_table": True,
"allow_duplicate_prev_row_id": True
},
"Delivery Note Item": {
"ref_dn_field": "dn_detail",
"compare_fields": [["rate", "="]],
"is_child_table": True
}
})
def set_aging_date(self):
if self.is_opening != 'Yes':
self.aging_date = self.posting_date
elif not self.aging_date:
throw(_("Ageing Date is mandatory for opening entry"))
def set_against_income_account(self):
"""Set against account for debit to account"""
against_acc = []
for d in self.get('entries'):
if d.income_account not in against_acc:
against_acc.append(d.income_account)
self.against_income_account = ','.join(against_acc)
def add_remarks(self):
if not self.remarks: self.remarks = 'No Remarks'
def so_dn_required(self):
"""check in manage account if sales order / delivery note required or not."""
dic = {'Sales Order':'so_required','Delivery Note':'dn_required'}
for i in dic:
if frappe.db.get_value('Selling Settings', None, dic[i]) == 'Yes':
for d in self.get('entries'):
if frappe.db.get_value('Item', d.item_code, 'is_stock_item') == 'Yes' \
and not d.get(i.lower().replace(' ','_')):
msgprint(_("{0} is mandatory for Item {1}").format(i,d.item_code), raise_exception=1)
def validate_proj_cust(self):
"""check for does customer belong to same project as entered.."""
if self.project_name and self.customer:
res = frappe.db.sql("""select name from `tabProject`
where name = %s and (customer = %s or
ifnull(customer,'')='')""", (self.project_name, self.customer))
if not res:
throw(_("Customer {0} does not belong to project {1}").format(self.customer,self.project_name))
def validate_pos(self):
if not self.cash_bank_account and flt(self.paid_amount):
frappe.throw(_("Cash or Bank Account is mandatory for making payment entry"))
if flt(self.paid_amount) + flt(self.write_off_amount) \
- flt(self.grand_total) > 1/(10**(self.precision("grand_total") + 1)):
frappe.throw(_("""Paid amount + Write Off Amount can not be greater than Grand Total"""))
def validate_item_code(self):
for d in self.get('entries'):
if not d.item_code:
msgprint(_("Item Code required at Row No {0}").format(d.idx), raise_exception=True)
def validate_delivery_note(self):
for d in self.get("entries"):
if d.delivery_note:
msgprint(_("Stock cannot be updated against Delivery Note {0}").format(d.delivery_note), raise_exception=1)
def validate_write_off_account(self):
if flt(self.write_off_amount) and not self.write_off_account:
msgprint(_("Please enter Write Off Account"), raise_exception=1)
def validate_c_form(self):
""" Blank C-form no if C-form applicable marked as 'No'"""
if self.amended_from and self.c_form_applicable == 'No' and self.c_form_no:
frappe.db.sql("""delete from `tabC-Form Invoice Detail` where invoice_no = %s
and parent = %s""", (self.amended_from, self.c_form_no))
frappe.db.set(self, 'c_form_no', '')
def update_current_stock(self):
for d in self.get('entries'):
if d.item_code and d.warehouse:
bin = frappe.db.sql("select actual_qty from `tabBin` where item_code = %s and warehouse = %s", (d.item_code, d.warehouse), as_dict = 1)
d.actual_qty = bin and flt(bin[0]['actual_qty']) or 0
for d in self.get('packing_details'):
bin = frappe.db.sql("select actual_qty, projected_qty from `tabBin` where item_code = %s and warehouse = %s", (d.item_code, d.warehouse), as_dict = 1)
d.actual_qty = bin and flt(bin[0]['actual_qty']) or 0
d.projected_qty = bin and flt(bin[0]['projected_qty']) or 0
def get_warehouse(self):
user_pos_setting = frappe.db.sql("""select name, warehouse from `tabPOS Setting`
where ifnull(user,'') = %s and company = %s""", (frappe.session['user'], self.company))
warehouse = user_pos_setting[0][1] if user_pos_setting else None
if not warehouse:
global_pos_setting = frappe.db.sql("""select name, warehouse from `tabPOS Setting`
where ifnull(user,'') = '' and company = %s""", self.company)
if global_pos_setting:
warehouse = global_pos_setting[0][1]
elif not user_pos_setting:
msgprint(_("POS Setting required to make POS Entry"), raise_exception=True)
return warehouse
def on_update(self):
if cint(self.update_stock) == 1:
# Set default warehouse from pos setting
if cint(self.is_pos) == 1:
w = self.get_warehouse()
if w:
for d in self.get('entries'):
if not d.warehouse:
d.warehouse = cstr(w)
from erpnext.stock.doctype.packed_item.packed_item import make_packing_list
make_packing_list(self, 'entries')
else:
self.set('packing_details', [])
if cint(self.is_pos) == 1:
if flt(self.paid_amount) == 0:
if self.cash_bank_account:
frappe.db.set(self, 'paid_amount',
(flt(self.grand_total) - flt(self.write_off_amount)))
else:
# show message that the amount is not paid
frappe.db.set(self,'paid_amount',0)
frappe.msgprint(_("Note: Payment Entry will not be created since 'Cash or Bank Account' was not specified"))
else:
frappe.db.set(self,'paid_amount',0)
def check_prev_docstatus(self):
for d in self.get('entries'):
if d.sales_order:
submitted = frappe.db.sql("""select name from `tabSales Order`
where docstatus = 1 and name = %s""", d.sales_order)
if not submitted:
frappe.throw(_("Sales Order {0} is not submitted").format(d.sales_order))
if d.delivery_note:
submitted = frappe.db.sql("""select name from `tabDelivery Note`
where docstatus = 1 and name = %s""", d.delivery_note)
if not submitted:
throw(_("Delivery Note {0} is not submitted").format(d.delivery_note))
def update_stock_ledger(self):
sl_entries = []
for d in self.get_item_list():
if frappe.db.get_value("Item", d.item_code, "is_stock_item") == "Yes" \
and d.warehouse:
sl_entries.append(self.get_sl_entries(d, {
"actual_qty": -1*flt(d.qty),
"stock_uom": frappe.db.get_value("Item", d.item_code, "stock_uom")
}))
self.make_sl_entries(sl_entries)
def make_gl_entries(self, repost_future_gle=True):
gl_entries = self.get_gl_entries()
if gl_entries:
from erpnext.accounts.general_ledger import make_gl_entries
update_outstanding = cint(self.is_pos) and self.write_off_account \
and 'No' or 'Yes'
make_gl_entries(gl_entries, cancel=(self.docstatus == 2),
update_outstanding=update_outstanding, merge_entries=False)
if update_outstanding == "No":
from erpnext.accounts.doctype.gl_entry.gl_entry import update_outstanding_amt
update_outstanding_amt(self.debit_to, self.doctype, self.name)
if repost_future_gle and cint(self.update_stock) \
and cint(frappe.defaults.get_global_default("auto_accounting_for_stock")):
items, warehouse_account = self.get_items_and_warehouse_accounts()
update_gl_entries_after(self.posting_date, self.posting_time,
warehouse_account, items)
def get_gl_entries(self, warehouse_account=None):
from erpnext.accounts.general_ledger import merge_similar_entries
gl_entries = []
self.make_customer_gl_entry(gl_entries)
self.make_tax_gl_entries(gl_entries)
self.make_item_gl_entries(gl_entries)
# merge gl entries before adding pos entries
gl_entries = merge_similar_entries(gl_entries)
self.make_pos_gl_entries(gl_entries)
return gl_entries
def make_customer_gl_entry(self, gl_entries):
if self.grand_total:
gl_entries.append(
self.get_gl_dict({
"account": self.debit_to,
"against": self.against_income_account,
"debit": self.grand_total,
"remarks": self.remarks,
"against_voucher": self.name,
"against_voucher_type": self.doctype,
})
)
def make_tax_gl_entries(self, gl_entries):
for tax in self.get("other_charges"):
if flt(tax.tax_amount_after_discount_amount):
gl_entries.append(
self.get_gl_dict({
"account": tax.account_head,
"against": self.debit_to,
"credit": flt(tax.tax_amount_after_discount_amount),
"remarks": self.remarks,
"cost_center": tax.cost_center
})
)
def make_item_gl_entries(self, gl_entries):
# income account gl entries
for item in self.get("entries"):
if flt(item.base_amount):
gl_entries.append(
self.get_gl_dict({
"account": item.income_account,
"against": self.debit_to,
"credit": item.base_amount,
"remarks": self.remarks,
"cost_center": item.cost_center
})
)
# expense account gl entries
if cint(frappe.defaults.get_global_default("auto_accounting_for_stock")) \
and cint(self.update_stock):
gl_entries += super(SalesInvoice, self).get_gl_entries()
def make_pos_gl_entries(self, gl_entries):
if cint(self.is_pos) and self.cash_bank_account and self.paid_amount:
# POS, make payment entries
gl_entries.append(
self.get_gl_dict({
"account": self.debit_to,
"against": self.cash_bank_account,
"credit": self.paid_amount,
"remarks": self.remarks,
"against_voucher": self.name,
"against_voucher_type": self.doctype,
})
)
gl_entries.append(
self.get_gl_dict({
"account": self.cash_bank_account,
"against": self.debit_to,
"debit": self.paid_amount,
"remarks": self.remarks,
})
)
# write off entries, applicable if only pos
if self.write_off_account and self.write_off_amount:
gl_entries.append(
self.get_gl_dict({
"account": self.debit_to,
"against": self.write_off_account,
"credit": self.write_off_amount,
"remarks": self.remarks,
"against_voucher": self.name,
"against_voucher_type": self.doctype,
})
)
gl_entries.append(
self.get_gl_dict({
"account": self.write_off_account,
"against": self.debit_to,
"debit": self.write_off_amount,
"remarks": self.remarks,
"cost_center": self.write_off_cost_center
})
)
def update_c_form(self):
"""Update amended id in C-form"""
if self.c_form_no and self.amended_from:
frappe.db.sql("""update `tabC-Form Invoice Detail` set invoice_no = %s,
invoice_date = %s, territory = %s, net_total = %s,
grand_total = %s where invoice_no = %s and parent = %s""",
(self.name, self.amended_from, self.c_form_no))
def get_details(self, item):
if item:
get_item_details(self,item)
return "Done"
def get_merchandise_details(self,item):
if item:
get_merchandise_item_details(self,item)
return "Done"
def get_size_details(self, index):
for d in self.get('sales_invoice_items_one'):
if cint(d.idx) == index:
if d.tailoring_item_code and d.tailoring_size and d.width:
d.fabric_qty = frappe.db.get_value('Size Item',{'parent':d.tailoring_item_code,'size':d.tailoring_size,'width':d.width},'fabric_qty')
return True
@frappe.whitelist()
def get_bank_cash_account(mode_of_payment):
val = frappe.db.get_value("Mode of Payment", mode_of_payment, "default_account")
if not val:
frappe.msgprint(_("Please set default Cash or Bank account in Mode of Payment {0}").format(mode_of_payment))
return {
"cash_bank_account": val
}
@frappe.whitelist()
def get_income_account(doctype, txt, searchfield, start, page_len, filters):
from erpnext.controllers.queries import get_match_cond
# income account can be any Credit account,
# but can also be a Asset account with account_type='Income Account' in special circumstances.
# Hence the first condition is an "OR"
return frappe.db.sql("""select tabAccount.name from `tabAccount`
where (tabAccount.report_type = "Profit and Loss"
or tabAccount.account_type = "Income Account")
and tabAccount.group_or_ledger="Ledger"
and tabAccount.docstatus!=2
and ifnull(tabAccount.master_type, "")=""
and ifnull(tabAccount.master_name, "")=""
and tabAccount.company = '%(company)s'
and tabAccount.%(key)s LIKE '%(txt)s'
%(mcond)s""" % {'company': filters['company'], 'key': searchfield,
'txt': "%%%s%%" % txt, 'mcond':get_match_cond(doctype)})
@frappe.whitelist()
def make_delivery_note(source_name, target_doc=None):
def set_missing_values(source, target):
target.ignore_pricing_rule = 1
target.run_method("set_missing_values")
target.run_method("calculate_taxes_and_totals")
def update_item(source_doc, target_doc, source_parent):
target_doc.base_amount = (flt(source_doc.qty) - flt(source_doc.delivered_qty)) * \
flt(source_doc.base_rate)
target_doc.amount = (flt(source_doc.qty) - flt(source_doc.delivered_qty)) * \
flt(source_doc.rate)
# target_doc.qty = flt(source_doc.qty) - flt(source_doc.delivered_qty)
target_doc.serial_no = split_serial_no(source_doc) if frappe.db.get_value('Item', source_doc.item_code, 'is_stock_item') == 'Yes' and frappe.db.get_value('Item', source_doc.item_code, 'has_serial_no') == 'Yes' else ''
target_doc.qty = count_serial_no(target_doc.serial_no) if frappe.db.get_value('Item', source_doc.item_code, 'is_stock_item') == 'Yes' and frappe.db.get_value('Item', source_doc.item_code, 'has_serial_no') == 'Yes' else flt(source_doc.qty) - flt(source_doc.delivered_qty)
doclist = get_mapped_doc("Sales Invoice", source_name, {
"Sales Invoice": {
"doctype": "Delivery Note",
"validation": {
"docstatus": ["=", 1]
}
},
"Sales Invoice Item": {
"doctype": "Delivery Note Item",
"field_map": {
"name": "prevdoc_detail_docname",
"parent": "against_sales_invoice",
"serial_no": "serial_no"
},
"postprocess": update_item,
"condition" : lambda doc: len(split_serial_no(doc)) > 0 if frappe.db.get_value('Item', doc.item_code, 'is_stock_item') == 'Yes' and frappe.db.get_value('Item', doc.item_code, 'has_serial_no') == 'Yes' else doc.item_code
},
"Sales Taxes and Charges": {
"doctype": "Sales Taxes and Charges",
"add_if_empty": True
},
"Sales Team": {
"doctype": "Sales Team",
"field_map": {
"incentives": "incentives"
},
"add_if_empty": True
}
}, target_doc, set_missing_values)
return doclist
|
rohitwaghchaure/New_Theme_Erp
|
erpnext/accounts/doctype/sales_invoice/sales_invoice.py
|
Python
|
agpl-3.0
| 24,951
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import unittest
import pbtest
PB_CHROME_QUNIT_TEST_URL = pbtest.PB_EXT_BG_URL_BASE + "tests/index.html"
class Test(pbtest.PBSeleniumTest):
def test_run_qunit_tests(self):
# First load a dummy URL to make sure the extension is activated.
# Otherwise, we ran into a race condition where Qunit runs (& fails)
# while chrome.extension is undefined.
# Probably related to Chromium bugs 129181 & 132148
self.driver.get(pbtest.PB_CHROME_BG_URL) # load a dummy page
self.driver.get(PB_CHROME_QUNIT_TEST_URL)
failed = self.txt_by_css("#qunit-testresult > span.failed")
passed = self.txt_by_css("#qunit-testresult > span.passed")
total = self.txt_by_css("#qunit-testresult > span.total")
print "User agent:", self.txt_by_css("#qunit-userAgent")
print "QUnits tests: Failed: %s Passed: %s Total: %s" %\
(failed, passed, total)
self.assertEqual("0", failed)
# TODO: Report failed QUnit tests
if __name__ == "__main__":
unittest.main()
|
cynthiatekwe/privacybadgerchrome
|
tests/selenium/qunit_test.py
|
Python
|
gpl-3.0
| 1,129
|
from sympy.vector.coordsysrect import CoordSys3D
from sympy.vector.deloperator import Del
from sympy.vector.scalar import BaseScalar
from sympy.vector.vector import Vector, BaseVector
from sympy.vector.operators import gradient, curl, divergence
from sympy import diff, integrate, S, simplify
from sympy.core import sympify
from sympy.vector.dyadic import Dyadic
def express(expr, system, system2=None, variables=False):
"""
Global function for 'express' functionality.
Re-expresses a Vector, Dyadic or scalar(sympyfiable) in the given
coordinate system.
If 'variables' is True, then the coordinate variables (base scalars)
of other coordinate systems present in the vector/scalar field or
dyadic are also substituted in terms of the base scalars of the
given system.
Parameters
==========
expr : Vector/Dyadic/scalar(sympyfiable)
The expression to re-express in CoordSys3D 'system'
system: CoordSys3D
The coordinate system the expr is to be expressed in
system2: CoordSys3D
The other coordinate system required for re-expression
(only for a Dyadic Expr)
variables : boolean
Specifies whether to substitute the coordinate variables present
in expr, in terms of those of parameter system
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy import Symbol, cos, sin
>>> N = CoordSys3D('N')
>>> q = Symbol('q')
>>> B = N.orient_new_axis('B', q, N.k)
>>> from sympy.vector import express
>>> express(B.i, N)
(cos(q))*N.i + (sin(q))*N.j
>>> express(N.x, B, variables=True)
B.x*cos(q) - B.y*sin(q)
>>> d = N.i.outer(N.i)
>>> express(d, B, N) == (cos(q))*(B.i|N.i) + (-sin(q))*(B.j|N.i)
True
"""
if expr == 0 or expr == Vector.zero:
return expr
if not isinstance(system, CoordSys3D):
raise TypeError("system should be a CoordSys3D \
instance")
if isinstance(expr, Vector):
if system2 is not None:
raise ValueError("system2 should not be provided for \
Vectors")
# Given expr is a Vector
if variables:
# If variables attribute is True, substitute
# the coordinate variables in the Vector
system_list = []
for x in expr.atoms(BaseScalar, BaseVector):
if x.system != system:
system_list.append(x.system)
system_list = set(system_list)
subs_dict = {}
for f in system_list:
subs_dict.update(f.scalar_map(system))
expr = expr.subs(subs_dict)
# Re-express in this coordinate system
outvec = Vector.zero
parts = expr.separate()
for x in parts:
if x != system:
temp = system.rotation_matrix(x) * parts[x].to_matrix(x)
outvec += matrix_to_vector(temp, system)
else:
outvec += parts[x]
return outvec
elif isinstance(expr, Dyadic):
if system2 is None:
system2 = system
if not isinstance(system2, CoordSys3D):
raise TypeError("system2 should be a CoordSys3D \
instance")
outdyad = Dyadic.zero
var = variables
for k, v in expr.components.items():
outdyad += (express(v, system, variables=var) *
(express(k.args[0], system, variables=var) |
express(k.args[1], system2, variables=var)))
return outdyad
else:
if system2 is not None:
raise ValueError("system2 should not be provided for \
Vectors")
if variables:
# Given expr is a scalar field
system_set = set([])
expr = sympify(expr)
# Substitute all the coordinate variables
for x in expr.atoms(BaseScalar):
if x.system != system:
system_set.add(x.system)
subs_dict = {}
for f in system_set:
subs_dict.update(f.scalar_map(system))
return expr.subs(subs_dict)
return expr
def directional_derivative(field, direction_vector):
"""
Returns the directional derivative of a scalar or vector field computed
along a given vector in coordinate system which parameters are expressed.
Parameters
==========
field : Vector or Scalar
The scalar or vector field to compute the directional derivative of
direction_vector : Vector
The vector to calculated directional derivative along them.
Examples
========
>>> from sympy.vector import CoordSys3D, directional_derivative
>>> R = CoordSys3D('R')
>>> f1 = R.x*R.y*R.z
>>> v1 = 3*R.i + 4*R.j + R.k
>>> directional_derivative(f1, v1)
R.x*R.y + 4*R.x*R.z + 3*R.y*R.z
>>> f2 = 5*R.x**2*R.z
>>> directional_derivative(f2, v1)
5*R.x**2 + 30*R.x*R.z
"""
from sympy.vector.operators import _get_coord_sys_from_expr
coord_sys = _get_coord_sys_from_expr(field)
if len(coord_sys) > 0:
# TODO: This gets a random coordinate system in case of multiple ones:
coord_sys = next(iter(coord_sys))
field = express(field, coord_sys, variables=True)
i, j, k = coord_sys.base_vectors()
x, y, z = coord_sys.base_scalars()
out = Vector.dot(direction_vector, i) * diff(field, x)
out += Vector.dot(direction_vector, j) * diff(field, y)
out += Vector.dot(direction_vector, k) * diff(field, z)
if out == 0 and isinstance(field, Vector):
out = Vector.zero
return out
elif isinstance(field, Vector):
return Vector.zero
else:
return S.Zero
def laplacian(expr):
"""
Return the laplacian of the given field computed in terms of
the base scalars of the given coordinate system.
Parameters
==========
expr : SymPy Expr or Vector
expr denotes a scalar or vector field.
Examples
========
>>> from sympy.vector import CoordSys3D, laplacian
>>> R = CoordSys3D('R')
>>> f = R.x**2*R.y**5*R.z
>>> laplacian(f)
20*R.x**2*R.y**3*R.z + 2*R.y**5*R.z
>>> f = R.x**2*R.i + R.y**3*R.j + R.z**4*R.k
>>> laplacian(f)
2*R.i + 6*R.y*R.j + 12*R.z**2*R.k
"""
delop = Del()
if expr.is_Vector:
return (gradient(divergence(expr)) - curl(curl(expr))).doit()
return delop.dot(delop(expr)).doit()
def is_conservative(field):
"""
Checks if a field is conservative.
Parameters
==========
field : Vector
The field to check for conservative property
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy.vector import is_conservative
>>> R = CoordSys3D('R')
>>> is_conservative(R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k)
True
>>> is_conservative(R.z*R.j)
False
"""
# Field is conservative irrespective of system
# Take the first coordinate system in the result of the
# separate method of Vector
if not isinstance(field, Vector):
raise TypeError("field should be a Vector")
if field == Vector.zero:
return True
return curl(field).simplify() == Vector.zero
def is_solenoidal(field):
"""
Checks if a field is solenoidal.
Parameters
==========
field : Vector
The field to check for solenoidal property
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy.vector import is_solenoidal
>>> R = CoordSys3D('R')
>>> is_solenoidal(R.y*R.z*R.i + R.x*R.z*R.j + R.x*R.y*R.k)
True
>>> is_solenoidal(R.y * R.j)
False
"""
# Field is solenoidal irrespective of system
# Take the first coordinate system in the result of the
# separate method in Vector
if not isinstance(field, Vector):
raise TypeError("field should be a Vector")
if field == Vector.zero:
return True
return divergence(field).simplify() is S.Zero
def scalar_potential(field, coord_sys):
"""
Returns the scalar potential function of a field in a given
coordinate system (without the added integration constant).
Parameters
==========
field : Vector
The vector field whose scalar potential function is to be
calculated
coord_sys : CoordSys3D
The coordinate system to do the calculation in
Examples
========
>>> from sympy.vector import CoordSys3D
>>> from sympy.vector import scalar_potential, gradient
>>> R = CoordSys3D('R')
>>> scalar_potential(R.k, R) == R.z
True
>>> scalar_field = 2*R.x**2*R.y*R.z
>>> grad_field = gradient(scalar_field)
>>> scalar_potential(grad_field, R)
2*R.x**2*R.y*R.z
"""
# Check whether field is conservative
if not is_conservative(field):
raise ValueError("Field is not conservative")
if field == Vector.zero:
return S.Zero
# Express the field exntirely in coord_sys
# Substitute coordinate variables also
if not isinstance(coord_sys, CoordSys3D):
raise TypeError("coord_sys must be a CoordSys3D")
field = express(field, coord_sys, variables=True)
dimensions = coord_sys.base_vectors()
scalars = coord_sys.base_scalars()
# Calculate scalar potential function
temp_function = integrate(field.dot(dimensions[0]), scalars[0])
for i, dim in enumerate(dimensions[1:]):
partial_diff = diff(temp_function, scalars[i + 1])
partial_diff = field.dot(dim) - partial_diff
temp_function += integrate(partial_diff, scalars[i + 1])
return temp_function
def scalar_potential_difference(field, coord_sys, point1, point2):
"""
Returns the scalar potential difference between two points in a
certain coordinate system, wrt a given field.
If a scalar field is provided, its values at the two points are
considered. If a conservative vector field is provided, the values
of its scalar potential function at the two points are used.
Returns (potential at point2) - (potential at point1)
The position vectors of the two Points are calculated wrt the
origin of the coordinate system provided.
Parameters
==========
field : Vector/Expr
The field to calculate wrt
coord_sys : CoordSys3D
The coordinate system to do the calculations in
point1 : Point
The initial Point in given coordinate system
position2 : Point
The second Point in the given coordinate system
Examples
========
>>> from sympy.vector import CoordSys3D, Point
>>> from sympy.vector import scalar_potential_difference
>>> R = CoordSys3D('R')
>>> P = R.origin.locate_new('P', R.x*R.i + R.y*R.j + R.z*R.k)
>>> vectfield = 4*R.x*R.y*R.i + 2*R.x**2*R.j
>>> scalar_potential_difference(vectfield, R, R.origin, P)
2*R.x**2*R.y
>>> Q = R.origin.locate_new('O', 3*R.i + R.j + 2*R.k)
>>> scalar_potential_difference(vectfield, R, P, Q)
-2*R.x**2*R.y + 18
"""
if not isinstance(coord_sys, CoordSys3D):
raise TypeError("coord_sys must be a CoordSys3D")
if isinstance(field, Vector):
# Get the scalar potential function
scalar_fn = scalar_potential(field, coord_sys)
else:
# Field is a scalar
scalar_fn = field
# Express positions in required coordinate system
origin = coord_sys.origin
position1 = express(point1.position_wrt(origin), coord_sys,
variables=True)
position2 = express(point2.position_wrt(origin), coord_sys,
variables=True)
# Get the two positions as substitution dicts for coordinate variables
subs_dict1 = {}
subs_dict2 = {}
scalars = coord_sys.base_scalars()
for i, x in enumerate(coord_sys.base_vectors()):
subs_dict1[scalars[i]] = x.dot(position1)
subs_dict2[scalars[i]] = x.dot(position2)
return scalar_fn.subs(subs_dict2) - scalar_fn.subs(subs_dict1)
def matrix_to_vector(matrix, system):
"""
Converts a vector in matrix form to a Vector instance.
It is assumed that the elements of the Matrix represent the
measure numbers of the components of the vector along basis
vectors of 'system'.
Parameters
==========
matrix : SymPy Matrix, Dimensions: (3, 1)
The matrix to be converted to a vector
system : CoordSys3D
The coordinate system the vector is to be defined in
Examples
========
>>> from sympy import ImmutableMatrix as Matrix
>>> m = Matrix([1, 2, 3])
>>> from sympy.vector import CoordSys3D, matrix_to_vector
>>> C = CoordSys3D('C')
>>> v = matrix_to_vector(m, C)
>>> v
C.i + 2*C.j + 3*C.k
>>> v.to_matrix(C) == m
True
"""
outvec = Vector.zero
vects = system.base_vectors()
for i, x in enumerate(matrix):
outvec += x * vects[i]
return outvec
def _path(from_object, to_object):
"""
Calculates the 'path' of objects starting from 'from_object'
to 'to_object', along with the index of the first common
ancestor in the tree.
Returns (index, list) tuple.
"""
if from_object._root != to_object._root:
raise ValueError("No connecting path found between " +
str(from_object) + " and " + str(to_object))
other_path = []
obj = to_object
while obj._parent is not None:
other_path.append(obj)
obj = obj._parent
other_path.append(obj)
object_set = set(other_path)
from_path = []
obj = from_object
while obj not in object_set:
from_path.append(obj)
obj = obj._parent
index = len(from_path)
i = other_path.index(obj)
while i >= 0:
from_path.append(other_path[i])
i -= 1
return index, from_path
def orthogonalize(*vlist, **kwargs):
"""
Takes a sequence of independent vectors and orthogonalizes them
using the Gram - Schmidt process. Returns a list of
orthogonal or orthonormal vectors.
Parameters
==========
vlist : sequence of independent vectors to be made orthogonal.
orthonormal : Optional parameter
Set to True if the vectors returned should be
orthonormal.
Default: False
Examples
========
>>> from sympy.vector.coordsysrect import CoordSys3D
>>> from sympy.vector.vector import Vector, BaseVector
>>> from sympy.vector.functions import orthogonalize
>>> C = CoordSys3D('C')
>>> i, j, k = C.base_vectors()
>>> v1 = i + 2*j
>>> v2 = 2*i + 3*j
>>> orthogonalize(v1, v2)
[C.i + 2*C.j, 2/5*C.i + (-1/5)*C.j]
References
==========
.. [1] https://en.wikipedia.org/wiki/Gram-Schmidt_process
"""
orthonormal = kwargs.get('orthonormal', False)
if not all(isinstance(vec, Vector) for vec in vlist):
raise TypeError('Each element must be of Type Vector')
ortho_vlist = []
for i, term in enumerate(vlist):
for j in range(i):
term -= ortho_vlist[j].projection(vlist[i])
# TODO : The following line introduces a performance issue
# and needs to be changed once a good solution for issue #10279 is
# found.
if simplify(term).equals(Vector.zero):
raise ValueError("Vector set not linearly independent")
ortho_vlist.append(term)
if orthonormal:
ortho_vlist = [vec.normalize() for vec in ortho_vlist]
return ortho_vlist
|
kaushik94/sympy
|
sympy/vector/functions.py
|
Python
|
bsd-3-clause
| 15,689
|
import os,sys
import argparse
import pickle
parser = argparse.ArgumentParser()
parser.add_argument('-b','--bed',help='Original bed file of alignments')
parser.add_argument('-a','--agp',help='AGP file for the final scaffolds')
parser.add_argument('-l','--length',help="Length of input unitigs")
args = parser.parse_args()
scaffolds_current = {}
with open(args.agp,'r') as f:
for line in f:
attrs = line.split()
if attrs[4] == 'N':
continue
if attrs[0] not in scaffolds_current:
scaffolds_current[attrs[0]] = []
contig = attrs[5]
if attrs[-1] == '+':
scaffolds_current[attrs[0]].append(contig+':B')
scaffolds_current[attrs[0]].append(contig+':E')
else:
scaffolds_current[attrs[0]].append(contig+':E')
scaffolds_current[attrs[0]].append(contig+':B')
#print breakpoints
scaff_id = 1
scaffolds_new = {}
unitig_length = {}
old2new = {}
def update_bed(expanded_scaffold):
contig2scaffold = {}
contig2info = {}
scaffold_length = {}
#print re_counts
for key in expanded_scaffold:
path = expanded_scaffold[key]
scaffold_length[key] = 0
offset = 0
for i in xrange(0,len(path)-1,2):
contig = path[i].split(':')[0]
contig2scaffold[contig] = key
ori = path[i].split(':')[1] + path[i+1].split(':')[1]
if ori == 'BE':
contig2info[contig] = (offset,offset+unitig_length[contig],'FOW')
else:
contig2info[contig] = (offset,offset+unitig_length[contig],'REV')
offset += unitig_length[contig]
scaffold_length[key] += unitig_length[contig]
o_lines = ""
count = 0
prev_line = ""
#print contig2scaffold
#sys.exit(1)
if True:
#output = open(args.directory+'/alignment_iteration_'+str(iteration)+'.bed','w')
with open(args.bed,'r') as f:
olines = ""
prev_scaffold = ""
for line in f:
if prev_line == "":
prev_line = line
continue
else:
prev_attrs = prev_line.split()
curr_attrs = line.split()
try:
prev_contig = prev_attrs[0]
curr_contig = curr_attrs[0]
prev_read = prev_attrs[3].split('/')[0]
curr_read = curr_attrs[3].split('/')[0]
first = prev_attrs[3].split('/')[1]
second = curr_attrs[3].split('/')[1]
except:
continue
if prev_contig in contig2scaffold and curr_contig in contig2scaffold:
prev_scaffold = contig2scaffold[prev_contig]
curr_scaffold = contig2scaffold[curr_contig]
if prev_read == curr_read and first == '1' and second == '2':
# if prev_read == curr_read and first == '1' and second == '2':
prev_info = contig2info[prev_contig]
prev_start = int(prev_attrs[1])
prev_end = int(prev_attrs[2])
new_prev_start = prev_start + prev_info[0]
new_prev_end = prev_end + prev_info[0]
olines += "0\t"+prev_scaffold+'\t'+str(new_prev_start)+"\t0\t"
#o_lines += prev_scaffold+'\t'+str(new_prev_start)+'\t'+str(new_prev_end)+'\t'+prev_attrs[3]+'\n'
count += 1
curr_info = contig2info[curr_contig]
curr_start = int(curr_attrs[1])
curr_end = int(curr_attrs[2])
new_curr_start = curr_start + curr_info[0]
new_curr_end = curr_end + curr_info[0]
olines += "1\t"+curr_scaffold+'\t'+str(new_curr_start)+"\t1\n"
#o_lines += curr_scaffold+'\t'+str(new_curr_start)+'\t'+str(new_curr_end)+'\t'+curr_attrs[3]+'\n'
count += 1
if count == 1000000:
print olines
#output.write(o_lines)
count = 0
olines = ""
prev_line = line
#write remaining lines
print olines
#output.write(o_lines)
#output.close()
with open(args.length,'r') as f:
for line in f:
attrs = line.split()
unitig_length[attrs[0]] = int(attrs[1])
update_bed(scaffolds_current)
|
machinegun/SALSA
|
alignments2txt.py
|
Python
|
mit
| 4,812
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Simple command line interface to pyluxafor.
"""
from __future__ import division, print_function, absolute_import
import argparse
import sys
import logging
from pyluxafor import Devices
from pyluxafor import Wave, Pattern, Leds
from pyluxafor import __version__
__author__ = 'Magnus Isaksson'
__copyright__ = 'Magnus Isaksson'
__license__ = 'gpl3'
_logger = logging.getLogger(__name__)
def add_jump2color_parser(subparsers):
parser = subparsers.add_parser('jump2color',
description='Switches color on your Luxafor device.',
help='Switches color on your Luxafor device.')
parser.set_defaults(runner=jump2color)
parser.add_argument('-c',
'--color',
required=True,
help='Color in 3 hex codes (e.g. #00FF00 for green).')
def jump2color(args):
with Devices().first as d:
d.jump2color(args.color, leds=Leds.all)
return 'Jumping to color: {}'.format(args.color)
def add_fade2color_parser(subparsers):
parser = subparsers.add_parser('fade2color',
description='Fade to color on your Luxafor device.',
help='Fade to color on your Luxafor device.')
parser.set_defaults(runner=fade2color)
parser.add_argument('-c',
'--color',
required=True,
help='Color in 3 hex codes (e.g. #00FF00 for green).')
parser.add_argument('-s',
'--speed',
required=False,
default=100,
type=int,
help='Fading speed [0-255], low value equals higher speed.')
def fade2color(args):
if args.speed < 0 or args.speed > 255:
return 'Error: Speed needs to be an integer between 0 and 255.'
with Devices().first as d:
d.fade2color(args.color, leds=Leds.all, speed=args.speed)
return 'Fading to color: {} with speed: {}'.format(args.color, args.speed)
def add_blink_parser(subparsers):
parser = subparsers.add_parser('blink',
description='Blink color on your Luxafor device.',
help='Blink color on your Luxafor device.')
parser.set_defaults(runner=blink)
parser.add_argument('-c',
'--color',
required=True,
help='Color in 3 hex codes (e.g. #00FF00 for green).')
parser.add_argument('-s',
'--speed',
required=False,
default=100,
type=int,
help='Blink speed [0-255], low value equals higher speed.')
parser.add_argument('-r',
'--repeats',
required=False,
default=2,
type=int,
help='Repeats [1-255].')
def blink(args):
if args.speed < 0 or args.speed > 255:
return 'Error: Speed needs to both be an integer between 0 and 255.'
if args.repeats < 1 or args.repeats > 255:
return 'Error: Repeats needs to be an integer between 1 and 255.'
with Devices().first as d:
d.blink(args.color, leds=Leds.all, speed=args.speed, repeats=args.repeats)
return 'Blinking color: {}, {} times with speed: {}'.format(args.color, args.repeats, args.speed)
def add_pattern_parser(subparsers):
parser = subparsers.add_parser('pattern',
description='Run pattern on your Luxafor device.',
help='Run pattern on your Luxafor device.')
parser.set_defaults(runner=pattern)
parser.add_argument('-p',
'--pattern',
required=True,
help=', '.join([p for p in Pattern._fields]))
parser.add_argument('-r',
'--repeats',
type=int,
default=2,
required=False,
help='Repeats [1-255].')
def pattern(args):
args.pattern = args.pattern.lower()
if args.pattern not in Pattern._fields:
return 'Error: {} is not a valid pattern.'.format(args.pattern)
if args.repeats < 1 or args.repeats > 255:
return 'Error: Repeats needs to be a integer between 1 and 255.'
with Devices().first as d:
d.pattern(pattern_type=getattr(Pattern, args.pattern), repeats=args.repeats)
return 'Running pattern {} {} times.'.format(args.pattern, args.repeats)
def add_wave_parser(subparsers):
parser = subparsers.add_parser('wave',
description='Run wave on your Luxafor device.',
help='Run wave on your Luxafor device.')
parser.set_defaults(runner=wave)
parser.add_argument('-c',
'--color',
required=True,
help='Color in 3 hex codes (e.g. #00FF00 for green).')
parser.add_argument('-w',
'--wave',
required=True,
help=', '.join([p for p in Wave._fields]))
parser.add_argument('-s',
'--speed',
required=False,
default=100,
type=int,
help='Blink speed [0-255], low value equals higher speed.')
parser.add_argument('-r',
'--repeats',
required=False,
default=2,
type=int,
help='Repeats [1-255].')
def wave(args):
args.pattern = args.wave.lower()
if args.pattern not in Wave._fields:
return 'Error: {} is not a valid wave type.'.format(args.pattern)
if args.speed < 0 or args.speed > 255:
return 'Error: Speed needs to be an integer between 0 and 255.'
if args.repeats < 1 or args.repeats > 255:
return 'Error: Repeats needs to be a integer between 1 and 255.'
with Devices().first as d:
d.wave(color=args.color, wave_type=getattr(Wave, args.wave), speed=args.speed, repeats=args.repeats)
return 'Running a {} wave with color {} and speed {}, {} times.'.format(args.wave, args.color,
args.speed, args.repeats)
def add_off_parser(subparsers):
parser = subparsers.add_parser('off',
description='Turn all LEDs of on your Luxafor device.',
help='Turn all LEDs of on your Luxafor device.')
parser.set_defaults(runner=off)
def off(args):
with Devices().first as d:
d.off()
return 'Turning of all LEDs on your device.'
def add_list_devices_parser(subparsers):
parser = subparsers.add_parser('devices',
description='List all Luxafor devices found on your system.',
help='List all Luxafor devices found on your system.')
parser.set_defaults(runner=list_devices)
def list_devices(args):
ans_str = 'Sorry, no Luxafor device found in the system.'
devices = ['Product: {}, Manufacturer: {}, Serial #: {}'.format(d.conn.product,
d.conn.manufacturer,
d.conn.serial_number.encode('utf-8')) for d in Devices().list]
if devices:
ans_str = '\n'.join(devices)
return '\nFound {} devices connected to your system.\n{}'.format(len(devices), ans_str)
def parse_args(args):
"""
Parse command line parameters
:param args: command line parameters as list of strings
:return: command line parameters as :obj:`argparse.Namespace`
"""
parser = argparse.ArgumentParser(
description="Simple command line interface using pyluxafor.")
# Global arguments
parser.add_argument('-v',
'--version',
action='version',
version='pyluxafor {ver}'.format(ver=__version__))
# Sub parsers
subparsers = parser.add_subparsers(title='subcommands')
add_list_devices_parser(subparsers)
add_jump2color_parser(subparsers)
add_fade2color_parser(subparsers)
add_blink_parser(subparsers)
add_pattern_parser(subparsers)
add_wave_parser(subparsers)
add_off_parser(subparsers)
return parser
def main(args):
parser = parse_args(args)
args = parser.parse_args(args)
# Do we know what to run?
if 'runner' not in args:
parser.print_help()
else:
ans = args.runner(args)
print(ans)
def run():
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
main(sys.argv[1:])
if __name__ == "__main__":
run()
|
mais4719/PyLuxafor
|
pyluxafor/cli.py
|
Python
|
gpl-3.0
| 9,126
|
import sys
import logging
import rds_config
import pymysql
#rds settings
rds_host = "mysqlforlambdatest.crrx8guwm61o.us-east-1.rds.amazonaws.com"
name = rds_config.db_username
password = rds_config.db_password
db_name = rds_config.db_name
logger = logging.getLogger()
logger.setLevel(logging.INFO)
try:
conn = pymysql.connect(rds_host, user=name, passwd=password, db=db_name, connect_timeout=5)
except:
logger.error("ERROR: Unexpected error: Could not connect to MySql instance.")
sys.exit()
logger.info("SUCCESS: Connection to RDS mysql instance succeeded")
def handler(event, context):
"""
This function fetches content from mysql RDS instance
"""
item_count = 0
with conn.cursor() as cur:
cur.execute("create table Employee3 ( EmpID int NOT NULL, Name varchar(255) NOT NULL, PRIMARY KEY (EmpID))")
cur.execute('insert into Employee3 (EmpID, Name) values(1, "Joe")')
cur.execute('insert into Employee3 (EmpID, Name) values(2, "Bob")')
cur.execute('insert into Employee3 (EmpID, Name) values(3, "Mary")')
conn.commit()
cur.execute("select * from Employee3")
for row in cur:
item_count += 1
logger.info(row)
#print(row)
return "Added %d items from RDS MySQL table" %(item_count)
|
arunwagle/DemoRepo
|
AWS/Lambda/python-mysql/app.py
|
Python
|
apache-2.0
| 1,317
|
# -*- coding: utf-8 -*-
"""
Abstract interface to bounce windows and moratoria.
"""
__author__ = 'Jathan McCollum, Mark Thomas, Michael Shields'
__maintainer__ = 'Jathan McCollum'
__email__ = 'jathan.mccollum@teamaol.com'
__copyright__ = 'Copyright 2006-2012, AOL Inc.'
# Imports
from datetime import datetime, timedelta
from pytz import timezone, UTC
from trigger.conf import settings
from trigger import exceptions
# Constants
BOUNCE_VALUES = ('green', 'yellow', 'red')
BOUNCE_DEFAULT_TZ = timezone(settings.BOUNCE_DEFAULT_TZ)
BOUNCE_DEFAULT_COLOR = settings.BOUNCE_DEFAULT_COLOR
BOUNCE_VALUE_MAP = {
'red': 3,
'yellow': 2,
'green': 1,
}
# Exports
__all__ = ('BounceStatus', 'BounceWindow', 'bounce')
# Classes
class BounceStatus(object):
"""
An object that represents a bounce window risk-level status.
+ green: Low risk
+ yellow: Medium risk
+ red: High risk
Objects stringify to 'red', 'green', or 'yellow', and can be compared
against those strings. Objects can also be compared against each other.
'red' > 'yellow' > 'green'.
>>> green = BounceStatus('green')
>>> yellow = BounceStatus('yellow')
>>> print green
green
>>> yellow > green
True
:param status_name:
The colored risk-level status name.
"""
def __init__(self, status_name):
self.status_name = status_name
self.value = BOUNCE_VALUES.index(status_name)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.status_name)
def __str__(self):
return self.status_name
def __cmp__(self, other):
try:
return self.value.__cmp__(other.value)
except AttributeError:
# Other object is not a BounceStatus; maybe it's a string.
return self.value.__cmp__(BounceStatus(other).value)
class BounceWindow(object):
"""
Build a bounce window of 24 `~trigger.changemgmt.BounceStatus` objects.
You may either specify your own list of 24
`~trigger.changemgmt.BounceStatus` objects using ``status_by_hour``, or you
may omit this argument and specify your 'green', 'yellow', and 'red'
risk levels by using hyphenated and comma-separated text strings.
You may use digits ("14") or hyphenated ranges ("0-5") and may join these
together using a comma (",") with or without spacing separating them. For
example "0-5, 14" will be parsed into ``[0, 1, 2, 3, 4, 5, 14]``.
The `default` color is used to fill in the gaps between the other colors,
so that the total is always 24 in the resultant list status objects.
>>> b = BounceWindow(green='0-3, 23', red='10', default='yellow')
>>> b.status()
<BounceStatus: yellow>
>>> b.next_ok('green')
datetime.datetime(2012, 12, 5, 4, 0, tzinfo=<UTC>)
>>> b.dump()
{0: <BounceStatus: green>,
1: <BounceStatus: green>,
2: <BounceStatus: green>,
3: <BounceStatus: green>,
4: <BounceStatus: yellow>,
5: <BounceStatus: yellow>,
6: <BounceStatus: yellow>,
7: <BounceStatus: yellow>,
8: <BounceStatus: yellow>,
9: <BounceStatus: yellow>,
10: <BounceStatus: red>,
11: <BounceStatus: yellow>,
12: <BounceStatus: yellow>,
13: <BounceStatus: yellow>,
14: <BounceStatus: yellow>,
15: <BounceStatus: yellow>,
16: <BounceStatus: yellow>,
17: <BounceStatus: yellow>,
18: <BounceStatus: yellow>,
19: <BounceStatus: yellow>,
20: <BounceStatus: yellow>,
21: <BounceStatus: yellow>,
22: <BounceStatus: yellow>,
23: <BounceStatus: green>}
You may modify the global default fallback color by setting
:setting:`BOUNCE_DEFAULT_COLOR` in your ``settings.py``.
Although the query API is generic and could accomodate any sort of bounce
window policy, this constructor knows only about AOL's bounce windows,
which operate on "US/Eastern" time (worldwide), always change on hour
boundaries, and are the same every day. If that ever changes, only this
class will need to be updated.
End-users are not expected to create new ``BounceWindow`` objects;
instead, use `~trigger.changemgmt.bounce()` or
`~trigger.netdevices.NetDevice.bounce` to get an object,
then query its methods.
:param status_by_hour:
(Optional) A list of 24 `~trigger.changemgmt.BounceStatus` objects.
:param green:
Representative string of hours.
:param yellow:
Representative string of hours.
:param red:
Representative string of hours.
:param default:
The color used to fill in the gaps between other risk levels.
"""
# Prepopulate these objects to save a little horsepower
BOUNCE_STATUS = dict([(n, BounceStatus(n)) for n in BOUNCE_VALUES])
def __init__(self, status_by_hour=None, green=None, yellow=None, red=None,
default=BOUNCE_DEFAULT_COLOR):
# Parse the hours specified into BounceWindows
self._green = green
self._yellow = yellow
self._red = red
self.default = default
hours = {
'green': self._parse_hours(green),
'yellow': self._parse_hours(yellow),
'red': self._parse_hours(red),
}
self.hours = hours
self.hour_map = self._map_bounces(self.hours, default=default)
# Allow for providing status_by_hour, but don't rely on it
if status_by_hour is None:
status_by_hour = self.hour_map.values()
if not len(status_by_hour) == 24:
msg = 'There must be exactly 24 hours defined for this BounceWindow.'
raise exceptions.InvalidBounceWindow(msg)
# Make sure each status occurs at least once, or next_ok()
# might never return.
for status in BOUNCE_VALUE_MAP:
if status not in status_by_hour:
msg = '%s risk-level must be defined!' % status
raise exceptions.InvalidBounceWindow(msg)
self._status_by_hour = status_by_hour
def __repr__(self):
return "%s(green=%r, yellow=%r, red=%r, default=%r)" % (self.__class__.__name__,
self._green,
self._yellow,
self._red,
self.default)
def status(self, when=None):
"""
Return a `~trigger.changemgmt.BounceStatus` object for the specified
time or now.
:param when:
A ``datetime`` object.
"""
when_et = (when or datetime.now(tz=UTC)).astimezone(BOUNCE_DEFAULT_TZ)
# Return default during weekend moratorium, otherwise look it up.
if (when_et.weekday() >= 5 or
when_et.weekday() == 0 and when_et.hour < 4 or
when_et.weekday() == 4 and when_et.hour >= 12):
return BounceStatus(BOUNCE_DEFAULT_COLOR)
else:
return self._status_by_hour[when_et.hour]
def next_ok(self, status, when=None):
"""
Return the next time at or after the specified time (default now) that
it the bounce status will be at equal to or less than the given status.
For example, ``next_ok('yellow')`` will return the time that the bounce
window becomes 'yellow' or 'green'. Returns UTC time.
:param status:
The colored risk-level status name.
:param when:
A ``datetime`` object.
"""
when = when or datetime.now(tz=UTC)
if self.status(when) <= status:
return when.astimezone(UTC)
when = datetime(when.year, when.month, when.day, when.hour, tzinfo=UTC)
when += timedelta(hours=1)
while self.status(when) > status:
when += timedelta(hours=1)
return when
def dump(self):
"""Dump a mapping of hour to status"""
return self.hour_map
def _get_bounces(self, hours, color):
"""
Return a list of hours mapped to bounce objects
:param hours:
A list of integers representing hours
:param color:
The risk-level color name.
"""
return zip(hours, [self.BOUNCE_STATUS[color]] * len(hours))
def _map_bounces(self, hdict, default=None):
"""
Map a dictionary of colors and hours into a dictionary keyed by hour and
the appropriate BounceStatus object.
:param hdict:
Dictionary mapping of hours to status objects.
:param default:
The default bounce status name.
"""
if default is None:
default = self.default
status = []
for color, hours in hdict.iteritems():
status.extend(self._get_bounces(hours, color))
# Fill in missing keys with the default color
missing = [i for i in range(24) if i not in dict(status)]
if missing:
status.extend(self._get_bounces(missing, default))
return dict(status)
def _parse_hours(self, hs):
"""
Parse hour strings into lists of hours. Or if a list of hours is passed
in, just return it as is.
>>> parse_hours('0-3, 23')
[0, 1, 2, 3, 23]
parse_hours(range(3))
[0, 1, 2]
:param hs:
A string representation of hours.
"""
myhours = []
if hs is None:
return myhours
# Assume it's a list of integers?
if isinstance(hs, list):
return hs
# Split the pattern by ',' and then trim whitespace, carve hyphenated
# ranges out and then return a list of hours. More error-checking
# Coming "Soon".
blocks = hs.split(',')
for block in blocks:
# Clean whitespace and split on hyphens
parts = block.strip().split('-')
parts = [int(p) for p in parts] # make ints
if len(parts) == 1: # no hyphen
parts.append(parts[0] + 1)
elif len(parts) == 2:
parts[1] += 1
else:
raise RuntimeError("This should not have happened!")
# Return the individual hours
for i in range(*parts):
myhours.append(i)
return myhours
# Load ``bounce()`` from the location of ``bounce.py`` or provide a dummy that
# returns a hard-coded bounce window
from .bounce import bounce
|
eludom/trigger
|
trigger/changemgmt/__init__.py
|
Python
|
bsd-3-clause
| 10,715
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-02 12:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0006_auto_20170413_1923'),
]
operations = [
migrations.AddField(
model_name='trips',
name='level_0',
field=models.IntegerField(blank=True, default=0, null=True),
),
]
|
NiJeLorg/paratransit_api
|
paratransit/api/migrations/0007_trips_level_0.py
|
Python
|
mit
| 465
|
#############################################################################
##
## Copyright (C) 2016 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the test suite of PySide2.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
'''Test cases for Virtual functions with wrong return type'''
import unittest
import py3kcompat as py3k
from PySide2 import QtWidgets
from helper import UsesQApplication
import warnings
warnings.simplefilter('error')
class MyWidget(QtWidgets.QWidget):
def __init__(self, parent=None):
super(MyWidget, self).__init__(parent)
def sizeHint(self):
pass
class testCase(UsesQApplication):
def testVirtualReturn(self):
w = MyWidget()
if py3k.IS_PY3K:
self.assertWarns(RuntimeWarning, w.show)
else:
self.assertRaises(RuntimeWarning, w.show)
if __name__ == '__main__':
unittest.main()
|
qtproject/pyside-pyside
|
tests/QtWidgets/wrong_return_test.py
|
Python
|
lgpl-2.1
| 1,930
|
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test descendant package tracking code."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import COIN
MAX_ANCESTORS = 25
MAX_DESCENDANTS = 25
class MempoolPackagesTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [["-maxorphantx=1000"], ["-maxorphantx=1000", "-limitancestorcount=5"]]
# Build a transaction that spends parent_txid:vout
# Return amount sent
def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs):
send_value = satoshi_round((value - fee)/num_outputs)
inputs = [ {'txid' : parent_txid, 'vout' : vout} ]
outputs = {}
for i in range(num_outputs):
outputs[node.getnewaddress()] = send_value
rawtx = node.createrawtransaction(inputs, outputs)
signedtx = node.signrawtransaction(rawtx)
txid = node.sendrawtransaction(signedtx['hex'])
fulltx = node.getrawtransaction(txid, 1)
assert(len(fulltx['vout']) == num_outputs) # make sure we didn't generate a change output
return (txid, send_value)
def run_test(self):
''' Mine some blocks and have them mature. '''
self.nodes[0].generate(101)
utxo = self.nodes[0].listunspent(10)
txid = utxo[0]['txid']
vout = utxo[0]['vout']
value = utxo[0]['amount']
fee = Decimal("0.0001")
# MAX_ANCESTORS transactions off a confirmed tx should be fine
chain = []
for i in range(MAX_ANCESTORS):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, 0, value, fee, 1)
value = sent_value
chain.append(txid)
# Check mempool has MAX_ANCESTORS transactions in it, and descendant
# count and fees should look correct
mempool = self.nodes[0].getrawmempool(True)
assert_equal(len(mempool), MAX_ANCESTORS)
descendant_count = 1
descendant_fees = 0
descendant_size = 0
descendants = []
ancestors = list(chain)
for x in reversed(chain):
# Check that getmempoolentry is consistent with getrawmempool
entry = self.nodes[0].getmempoolentry(x)
assert_equal(entry, mempool[x])
# Check that the descendant calculations are correct
assert_equal(mempool[x]['descendantcount'], descendant_count)
descendant_fees += mempool[x]['fee']
assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee'])
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN)
descendant_size += mempool[x]['size']
assert_equal(mempool[x]['descendantsize'], descendant_size)
descendant_count += 1
# Check that getmempooldescendants is correct
assert_equal(sorted(descendants), sorted(self.nodes[0].getmempooldescendants(x)))
descendants.append(x)
# Check that getmempoolancestors is correct
ancestors.remove(x)
assert_equal(sorted(ancestors), sorted(self.nodes[0].getmempoolancestors(x)))
# Check that getmempoolancestors/getmempooldescendants correctly handle verbose=true
v_ancestors = self.nodes[0].getmempoolancestors(chain[-1], True)
assert_equal(len(v_ancestors), len(chain)-1)
for x in v_ancestors.keys():
assert_equal(mempool[x], v_ancestors[x])
assert(chain[-1] not in v_ancestors.keys())
v_descendants = self.nodes[0].getmempooldescendants(chain[0], True)
assert_equal(len(v_descendants), len(chain)-1)
for x in v_descendants.keys():
assert_equal(mempool[x], v_descendants[x])
assert(chain[0] not in v_descendants.keys())
# Check that ancestor modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=1000)
mempool = self.nodes[0].getrawmempool(True)
ancestor_fees = 0
for x in chain:
ancestor_fees += mempool[x]['fee']
assert_equal(mempool[x]['ancestorfees'], ancestor_fees * COIN + 1000)
# Undo the prioritisetransaction for later tests
self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=-1000)
# Check that descendant modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=1000)
mempool = self.nodes[0].getrawmempool(True)
descendant_fees = 0
for x in reversed(chain):
descendant_fees += mempool[x]['fee']
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 1000)
# Adding one more transaction on to the chain should fail.
assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], txid, vout, value, fee, 1)
# Check that prioritising a tx before it's added to the mempool works
# First clear the mempool by mining a block.
self.nodes[0].generate(1)
sync_blocks(self.nodes)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
# Prioritise a transaction that has been mined, then add it back to the
# mempool by using invalidateblock.
self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=2000)
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Keep node1's tip synced with node0
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
# Now check that the transaction is in the mempool, with the right modified fee
mempool = self.nodes[0].getrawmempool(True)
descendant_fees = 0
for x in reversed(chain):
descendant_fees += mempool[x]['fee']
if (x == chain[-1]):
assert_equal(mempool[x]['modifiedfee'], mempool[x]['fee']+satoshi_round(0.00002))
assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 2000)
# TODO: check that node1's mempool is as expected
# TODO: test ancestor size limits
# Now test descendant chain limits
txid = utxo[1]['txid']
value = utxo[1]['amount']
vout = utxo[1]['vout']
transaction_package = []
# First create one parent tx with 10 children
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 10)
parent_transaction = txid
for i in range(10):
transaction_package.append({'txid': txid, 'vout': i, 'amount': sent_value})
# Sign and send up to MAX_DESCENDANT transactions chained off the parent tx
for i in range(MAX_DESCENDANTS - 1):
utxo = transaction_package.pop(0)
(txid, sent_value) = self.chain_transaction(self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
for j in range(10):
transaction_package.append({'txid': txid, 'vout': j, 'amount': sent_value})
mempool = self.nodes[0].getrawmempool(True)
assert_equal(mempool[parent_transaction]['descendantcount'], MAX_DESCENDANTS)
# Sending one more chained transaction will fail
utxo = transaction_package.pop(0)
assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
# TODO: check that node1's mempool is as expected
# TODO: test descendant size limits
# Test reorg handling
# First, the basics:
self.nodes[0].generate(1)
sync_blocks(self.nodes)
self.nodes[1].invalidateblock(self.nodes[0].getbestblockhash())
self.nodes[1].reconsiderblock(self.nodes[0].getbestblockhash())
# Now test the case where node1 has a transaction T in its mempool that
# depends on transactions A and B which are in a mined block, and the
# block containing A and B is disconnected, AND B is not accepted back
# into node1's mempool because its ancestor count is too high.
# Create 8 transactions, like so:
# Tx0 -> Tx1 (vout0)
# \--> Tx2 (vout1) -> Tx3 -> Tx4 -> Tx5 -> Tx6 -> Tx7
#
# Mine them in the next block, then generate a new tx8 that spends
# Tx1 and Tx7, and add to node1's mempool, then disconnect the
# last block.
# Create tx0 with 2 outputs
utxo = self.nodes[0].listunspent()
txid = utxo[0]['txid']
value = utxo[0]['amount']
vout = utxo[0]['vout']
send_value = satoshi_round((value - fee)/2)
inputs = [ {'txid' : txid, 'vout' : vout} ]
outputs = {}
for i in range(2):
outputs[self.nodes[0].getnewaddress()] = send_value
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransaction(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
tx0_id = txid
value = send_value
# Create tx1
(tx1_id, tx1_value) = self.chain_transaction(self.nodes[0], tx0_id, 0, value, fee, 1)
# Create tx2-7
vout = 1
txid = tx0_id
for i in range(6):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1)
vout = 0
value = sent_value
# Mine these in a block
self.nodes[0].generate(1)
self.sync_all()
# Now generate tx8, with a big fee
inputs = [ {'txid' : tx1_id, 'vout': 0}, {'txid' : txid, 'vout': 0} ]
outputs = { self.nodes[0].getnewaddress() : send_value + value - 4*fee }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransaction(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
sync_mempools(self.nodes)
# Now try to disconnect the tip on each node...
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
sync_blocks(self.nodes)
if __name__ == '__main__':
MempoolPackagesTest().main()
|
romanornr/viacoin
|
test/functional/mempool_packages.py
|
Python
|
mit
| 10,536
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DiskOffering.available_size_kb'
db.add_column(u'physical_diskoffering', 'available_size_kb',
self.gf('django.db.models.fields.PositiveIntegerField')(default=0),
keep_default=False)
def backwards(self, orm):
# Deleting field 'DiskOffering.available_size_kb'
db.delete_column(u'physical_diskoffering', 'available_size_kb')
models = {
u'physical.databaseinfra': {
'Meta': {'object_name': 'DatabaseInfra'},
'capacity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'endpoint_dns': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Engine']"}),
'environment': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '406', 'blank': 'True'}),
'per_database_size_mbytes': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'databaseinfras'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
u'physical.diskoffering': {
'Meta': {'object_name': 'DiskOffering'},
'available_size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'size_kb': ('django.db.models.fields.PositiveIntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.engine': {
'Meta': {'unique_together': "((u'version', u'engine_type'),)", 'object_name': 'Engine'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'engines'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.EngineType']"}),
'engine_upgrade_option': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_engine'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Engine']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user_data_script': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'physical.enginetype': {
'Meta': {'object_name': 'EngineType'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'equivalent_environment': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Environment']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.host': {
'Meta': {'object_name': 'Host'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'future_host': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'monitor_url': ('django.db.models.fields.URLField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.instance': {
'Meta': {'unique_together': "((u'address', u'port'),)", 'object_name': 'Instance'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'databaseinfra': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'instances'", 'to': u"orm['physical.DatabaseInfra']"}),
'dns': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'future_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Instance']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
'hostname': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_arbiter': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'port': ('django.db.models.fields.IntegerField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.plan': {
'Meta': {'object_name': 'Plan'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'disk_offering': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'null': 'True', 'on_delete': 'models.PROTECT', 'to': u"orm['physical.DiskOffering']"}),
'engine': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plans'", 'to': u"orm['physical.Engine']"}),
'engine_equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'backwards_plan'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['physical.Plan']"}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['physical.Environment']", 'symmetrical': 'False'}),
'equivalent_plan': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['physical.Plan']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_ha': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_db_size': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'provider': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'replication_topology': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'replication_topology'", 'null': 'True', 'to': u"orm['physical.ReplicationTopology']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.planattribute': {
'Meta': {'object_name': 'PlanAttribute'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'plan': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'plan_attributes'", 'to': u"orm['physical.Plan']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'physical.replicationtopology': {
'Meta': {'object_name': 'ReplicationTopology'},
'class_path': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'engine': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'replication_topologies'", 'symmetrical': 'False', 'to': u"orm['physical.Engine']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['physical']
|
globocom/database-as-a-service
|
dbaas/physical/migrations/0025_auto__add_field_diskoffering_available_size_kb.py
|
Python
|
bsd-3-clause
| 11,926
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals
import gpg
import sys
from groups import group_lists
# Copyright (C) 2018 Ben McGinnes <ben@gnupg.org>
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License and the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU General Public License and the GNU
# Lesser General Public along with this program; if not, see
# <https://www.gnu.org/licenses/>.
"""
Uses the groups module to encrypt to multiple recipients.
"""
c = gpg.Context(armor=True)
if len(sys.argv) > 3:
group_id = sys.argv[1]
filepath = sys.argv[2:]
elif len(sys.argv) == 3:
group_id = sys.argv[1]
filepath = sys.argv[2]
elif len(sys.argv) == 2:
group_id = sys.argv[1]
filepath = input("Enter the filename to encrypt: ")
else:
group_id = input("Enter the group name to encrypt to: ")
filepath = input("Enter the filename to encrypt: ")
with open(filepath, "rb") as f:
text = f.read()
for i in range(len(group_lists)):
if group_lists[i][0] == group_id:
klist = group_lists[i][1]
else:
klist = None
logrus = []
if klist is not None:
for i in range(len(klist)):
apattern = list(c.keylist(pattern=klist[i], secret=False))
if apattern[0].can_encrypt == 1:
logrus.append(apattern[0])
else:
pass
try:
ciphertext, result, sign_result = c.encrypt(text, recipients=logrus,
add_encrypt_to=True)
except gpg.errors.InvalidRecipients as e:
for i in range(len(e.recipients)):
for n in range(len(logrus)):
if logrus[n].fpr == e.recipients[i].fpr:
logrus.remove(logrus[n])
else:
pass
try:
ciphertext, result, sign_result = c.encrypt(text,
recipients=logrus,
add_encrypt_to=True,
always_trust=True)
except:
pass
with open("{0}.asc".format(filepath), "wb") as f:
f.write(ciphertext)
else:
pass
# EOF
|
gpg/gpgme
|
lang/python/examples/howto/encrypt-to-group.py
|
Python
|
lgpl-2.1
| 2,992
|
# Copyright 2015 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_log import log as logging
from oslo_service import periodic_task
from trove.common import cfg
from trove.common import exception
from trove.common.i18n import _
from trove.guestagent.datastore.experimental.couchdb import service
from trove.guestagent import dbaas
from trove.guestagent import volume
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
MANAGER = CONF.datastore_manager
class Manager(periodic_task.PeriodicTasks):
"""
This is CouchDB Manager class. It is dynamically loaded
based off of the datastore of the Trove instance.
"""
def __init__(self):
self.appStatus = service.CouchDBAppStatus()
self.app = service.CouchDBApp(self.appStatus)
super(Manager, self).__init__(CONF)
def rpc_ping(self, context):
LOG.debug("Responding to RPC ping.")
return True
def prepare(self, context, packages, databases, memory_mb, users,
device_path=None, mount_point=None, backup_info=None,
config_contents=None, root_password=None, overrides=None,
cluster_config=None, snapshot=None):
"""
This is called when the Trove instance first comes online.
It is the first RPC message passed from the task manager.
prepare handles all the base configuration of the CouchDB instance.
"""
self.appStatus.begin_install()
self.app.install_if_needed(packages)
if device_path:
self.app.stop_db()
device = volume.VolumeDevice(device_path)
# unmount if device is already mounted
device.unmount_device(device_path)
device.format()
if os.path.exists(mount_point):
device.migrate_data(mount_point)
device.mount(mount_point)
LOG.debug('Mounted the volume (%s).' % device_path)
self.app.start_db()
self.app.change_permissions()
self.app.make_host_reachable()
self.app.complete_install_or_restart()
LOG.info(_('Completed setup of CouchDB database instance.'))
@periodic_task.periodic_task
def update_status(self, context):
"""Update the status of the CouchDB service."""
self.appStatus.update()
def get_filesystem_stats(self, context, fs_path):
"""Gets the filesystem stats for the path given."""
LOG.debug("In get_filesystem_stats: fs_path= %s" % fs_path)
mount_point = CONF.get(
'mysql' if not MANAGER else MANAGER).mount_point
return dbaas.get_filesystem_volume_stats(mount_point)
def stop_db(self, context, do_not_start_on_reboot=False):
"""
Stop this CouchDB instance.
This method is called when the guest agent
gets a stop message from the taskmanager.
"""
LOG.debug("Stopping the CouchDB instance.")
self.app.stop_db(do_not_start_on_reboot=do_not_start_on_reboot)
def restart(self, context):
"""
Restart this CouchDB instance.
This method is called when the guest agent
gets a restart message from the taskmanager.
"""
LOG.debug("Restarting the CouchDB instance.")
self.app.restart()
def reset_configuration(self, context, configuration):
"""
Currently this method does nothing. This method needs to be
implemented to enable rollback of flavor-resize on guestagent side.
"""
LOG.debug("Resetting CouchDB configuration.")
pass
def change_passwords(self, context, users):
LOG.debug("Changing password.")
raise exception.DatastoreOperationNotSupported(
operation='change_passwords', datastore=MANAGER)
def update_attributes(self, context, username, hostname, user_attrs):
LOG.debug("Updating database attributes.")
raise exception.DatastoreOperationNotSupported(
operation='update_attributes', datastore=MANAGER)
def create_database(self, context, databases):
LOG.debug("Creating database.")
raise exception.DatastoreOperationNotSupported(
operation='create_database', datastore=MANAGER)
def create_user(self, context, users):
LOG.debug("Creating user.")
raise exception.DatastoreOperationNotSupported(
operation='create_user', datastore=MANAGER)
def delete_database(self, context, database):
LOG.debug("Deleting database.")
raise exception.DatastoreOperationNotSupported(
operation='delete_database', datastore=MANAGER)
def delete_user(self, context, user):
LOG.debug("Deleting user.")
raise exception.DatastoreOperationNotSupported(
operation='delete_user', datastore=MANAGER)
def get_user(self, context, username, hostname):
LOG.debug("Getting user.")
raise exception.DatastoreOperationNotSupported(
operation='get_user', datastore=MANAGER)
def grant_access(self, context, username, hostname, databases):
LOG.debug("Granting acccess.")
raise exception.DatastoreOperationNotSupported(
operation='grant_access', datastore=MANAGER)
def revoke_access(self, context, username, hostname, database):
LOG.debug("Revoking access.")
raise exception.DatastoreOperationNotSupported(
operation='revoke_access', datastore=MANAGER)
def list_access(self, context, username, hostname):
LOG.debug("Listing access.")
raise exception.DatastoreOperationNotSupported(
operation='list_access', datastore=MANAGER)
def list_databases(self, context, limit=None, marker=None,
include_marker=False):
LOG.debug("Listing databases.")
raise exception.DatastoreOperationNotSupported(
operation='list_databases', datastore=MANAGER)
def list_users(self, context, limit=None, marker=None,
include_marker=False):
LOG.debug("Listing users.")
raise exception.DatastoreOperationNotSupported(
operation='list_users', datastore=MANAGER)
def enable_root(self, context):
LOG.debug("Enabling root.")
raise exception.DatastoreOperationNotSupported(
operation='enable_root', datastore=MANAGER)
def is_root_enabled(self, context):
LOG.debug("Checking if root is enabled.")
raise exception.DatastoreOperationNotSupported(
operation='is_root_enabled', datastore=MANAGER)
def create_backup(self, context, backup_info):
LOG.debug("Creating backup.")
raise exception.DatastoreOperationNotSupported(
operation='create_backup', datastore=MANAGER)
def start_db_with_conf_changes(self, context, config_contents):
LOG.debug("Starting CouchDB with configuration changes.")
self.app.start_db_with_conf_changes(config_contents)
def mount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.mount(mount_point, write_to_fstab=False)
LOG.debug("Mounted the device %s at the mount_point %s." %
(device_path, mount_point))
def unmount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.unmount(mount_point)
LOG.debug("Unmounted the device %s from the mount point %s." %
(device_path, mount_point))
def resize_fs(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.resize_fs(mount_point)
LOG.debug("Resized the filesystem at %s." % mount_point)
|
cp16net/trove
|
trove/guestagent/datastore/experimental/couchdb/manager.py
|
Python
|
apache-2.0
| 8,342
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# tjson_python documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import tjson_python
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'TJSON Python'
copyright = u"2016, Mario Idival"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = tjson_python.__version__
# The full version, including alpha/beta/rc tags.
release = tjson_python.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to
# some non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built
# documents.
#keep_warnings = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as
# html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the
# top of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon
# of the docs. This file should be a Windows icon file (.ico) being
# 16x16 or 32x32 pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets)
# here, relative to this directory. They are copied after the builtin
# static files, so a file named "default.css" will overwrite the builtin
# "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names
# to template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer.
# Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer.
# Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages
# will contain a <link> tag referring to it. The value of this option
# must be the base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'tjson_pythondoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index', 'tjson_python.tex',
u'TJSON Python Documentation',
u'Mario Idival', 'manual'),
]
# The name of an image file (relative to this directory) to place at
# the top of the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings
# are parts, not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'tjson_python',
u'TJSON Python Documentation',
[u'Mario Idival'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'tjson_python',
u'TJSON Python Documentation',
u'Mario Idival',
'tjson_python',
'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
marioidival/tjson-python
|
docs/conf.py
|
Python
|
mit
| 8,452
|
#######################################################################
# Copyright 2012 Mark Wolf
#
# This file is part of OrgWolf.
#
# OrgWolf is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#######################################################################
from django.contrib import admin
from wolfmail.models import Message
admin.site.register(Message)
|
m3wolf/orgwolf
|
wolfmail/admin.py
|
Python
|
gpl-3.0
| 936
|
# -*- coding: utf-8 -*-
"""
Test pelican-gist
=================
Test stuff in pelican_gist.
"""
from __future__ import unicode_literals
import os
from pelican_gist import plugin as gistplugin
from mock import patch
import requests.models
def test_gist_url():
gist_id = str(3254906)
filename = 'brew-update-notifier.sh'
# Test without a filename
url = gistplugin.gist_url(gist_id)
assert gist_id in url
# Test with filename
url = gistplugin.gist_url(gist_id, filename)
assert url.endswith(filename)
assert gist_id in url
def test_script_url():
gist_id = str(3254906)
filename = 'brew-update-notifier.sh'
# Test without a filename
url = gistplugin.script_url(gist_id)
assert url.endswith('.js')
assert gist_id in url
# Test with filename
url = gistplugin.script_url(gist_id, filename)
assert url.endswith(filename)
assert 'file={}'.format(filename) in url
assert gist_id in url
def test_cache_filename():
path_base = '/tmp'
gist_id = str(3254906)
filename = 'brew-update-notifier.sh'
# Test without a filename
path = gistplugin.cache_filename(path_base, gist_id)
assert path.startswith(path_base)
assert path.endswith('.cache')
# Test with filename
path = gistplugin.cache_filename(path_base, gist_id, filename)
assert path.startswith(path_base)
assert path.endswith('.cache')
def test_set_get_cache():
path_base = '/tmp'
gist_id = str(3254906)
filename = 'brew-update-notifier.sh'
body = """Some gist body"""
# Make sure there is no cache
for f in (gistplugin.cache_filename(path_base, gist_id),
gistplugin.cache_filename(path_base, gist_id, filename)):
if os.path.exists(f):
os.remove(f)
# Get an empty cache
cache_file = gistplugin.get_cache(path_base, gist_id)
assert cache_file is None
cache_file = gistplugin.get_cache(path_base, gist_id, filename)
assert cache_file is None
# Set a cache file
gistplugin.set_cache(path_base, gist_id, body)
# Fetch the same file
cached = gistplugin.get_cache(path_base, gist_id)
assert cached == body
# Set a cache file
gistplugin.set_cache(path_base, gist_id, body, filename)
# Fetch the same file
cached = gistplugin.get_cache(path_base, gist_id, filename)
assert cached == body
def test_fetch_gist():
"""Ensure fetch_gist returns the response content as a string."""
CODE_BODY = "code"
with patch('requests.get') as get:
return_response = requests.models.Response()
return_response.status_code = 200
return_response._content= CODE_BODY.encode()
get.return_value = return_response
assert gistplugin.fetch_gist(1) == CODE_BODY
|
kura/kura.io
|
plugins/pelican_gist/test_plugin.py
|
Python
|
mit
| 2,786
|
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from pycadf.openstack.common import lockutils
class LockFixture(fixtures.Fixture):
"""External locking fixture.
This fixture is basically an alternative to the synchronized decorator with
the external flag so that tearDowns and addCleanups will be included in
the lock context for locking between tests. The fixture is recommended to
be the first line in a test method, like so::
def test_method(self):
self.useFixture(LockFixture)
...
or the first line in setUp if all the test methods in the class are
required to be serialized. Something like::
class TestCase(testtools.testcase):
def setUp(self):
self.useFixture(LockFixture)
super(TestCase, self).setUp()
...
This is because addCleanups are put on a LIFO queue that gets run after the
test method exits. (either by completing or raising an exception)
"""
def __init__(self, name, lock_file_prefix=None):
self.mgr = lockutils.lock(name, lock_file_prefix, True)
def setUp(self):
super(LockFixture, self).setUp()
self.addCleanup(self.mgr.__exit__, None, None, None)
self.mgr.__enter__()
|
varunarya10/pycadf
|
pycadf/openstack/common/fixture/lockutils.py
|
Python
|
apache-2.0
| 1,889
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Records configuration."""
from __future__ import unicode_literals
from .models import RecordMetadata as RecordMetadataModel
RECORDS_BREADCRUMB_TITLE_KEY = 'title.title'
"""Key used to extract the breadcrumb title from the record."""
RECORD_DOCUMENT_NAME_GENERATOR = ('invenio.modules.records.utils:'
'default_name_generator')
RECORD_DOCUMENT_VIEWRESTR_POLICY = 'ANY'
"""When a document belongs to more than one record, and this policy is set to
`ALL` the user must be authorized to view all the records to continue checking
the access rights of the document. If the policy is set to `ANY` (default),
then the user needs to be authorized to view at least one record in order to
continue checking the document specific access rights."""
RECORD_KEY_ALIASSES = {
'recid': 'control_number',
'980': 'collections',
'980__a': 'collections.primary',
'980__b': 'collections.secondary',
}
|
jmartinm/invenio-records
|
invenio_records/config.py
|
Python
|
gpl-2.0
| 1,717
|
# Copyright 2017 The UAI-SDK Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import sys
import json
import tarfile
from uai.utils.logger import uai_logger
from uai.operation.base_operation import BaseUaiServiceOp
class UaiServiceTarOp(BaseUaiServiceOp):
""" The Base Tar Tool Class with UAI
"""
def __init__(self, parser):
super(UaiServiceTarOp, self).__init__(parser)
self.conf_params = {}
self.filelist = []
def _add_code_args(self, tar_parser):
code_parse = tar_parser.add_argument_group(
'Code-Params', 'User Code Storage Info Parameters')
code_parse.add_argument(
'--pack_file_path',
type=str,
required=True,
help='the relative directry of files')
if hasattr(self, 'pack_source') is True:
code_parse.add_argument(
'--upload_name',
type=str,
required=True,
help='the packed tar file name')
else:
code_parse.add_argument(
'--tar_name',
type=str,
required=True,
help='the packed tar file name')
code_parse.add_argument(
'--main_module',
type=str,
required=True,
help='the main module of the user program')
code_parse.add_argument(
'--main_class',
type=str,
required=True,
help='the main class name of the user program')
code_parse.add_argument(
'--model_dir',
type=str,
required=True,
help='the directroy of models, relative to the pack_file_path')
code_parse.add_argument(
'--code_files',
type=str,
required=True,
help='the all python files')
def _add_args(self):
self._add_code_args(self.parser)
def _parse_code_args(self, args):
self.pack_file_path = args['pack_file_path']
if hasattr(self, 'pack_source') is True:
self.upload_name = args['upload_name']
self.tar_name = self.upload_name
else:
self.tar_name = args['tar_name']
self.main_file = args['main_module']
self.main_class =args['main_class']
self.model_dir = args['model_dir']
self.code_files = args['code_files']
def _parse_args(self, args):
self._parse_code_args(args)
def _get_filelist(self):
self._get_code_list()
self._get_model_list()
def _get_code_list(self):
code_filelist = self.code_files.strip().split(',')
for i in code_filelist:
self.filelist.append(i)
self.filelist.append('ufile.json')
def _get_model_list(self):
raise UserWarning("UaiPackTool._get_model_list Unimplemented")
def _gen_jsonfile(self):
with open(os.path.join(os.getcwd(), self.pack_file_path, 'ufile.json'), 'w') as f:
json.dump(self.conf_params, f)
def _pack_file(self):
uai_logger.info('Start packing files in the target tar path.')
os.chdir(os.path.join(os.getcwd(), self.pack_file_path))
tar = tarfile.open(self.tar_name, 'w')
for i in self.filelist:
try:
tar.add(i)
except OSError as e:
uai_logger.info('{0} : {1}'.format(OSError, e))
uai_logger.info('The package process is interrupted.')
sys.exit(0)
tar.close()
uai_logger.info('Finish packing the files.')
def _tar(self):
self._get_filelist()
self._gen_jsonfile()
self._pack_file()
def cmd_run(self, args):
self._parse_args(args)
self._tar()
|
ucloud/uai-sdk
|
uai/operation/tar/base_tar_op.py
|
Python
|
apache-2.0
| 4,390
|
import logging
from swiftclient.service import SwiftService, SwiftError
from sys import argv
logging.basicConfig(level=logging.ERROR)
logging.getLogger("requests").setLevel(logging.CRITICAL)
logging.getLogger("swiftclient").setLevel(logging.CRITICAL)
logger = logging.getLogger(__name__)
def is_png(obj):
return (
obj["name"].lower().endswith('.png') or
obj["content_type"] == 'image/png'
)
container = argv[1]
with SwiftService() as swift:
try:
list_options = {"prefix": "archive_2016-01-01/"}
list_parts_gen = swift.list(container=container)
for page in list_parts_gen:
if page["success"]:
objects = [
obj["name"] for obj in page["listing"] if is_png(obj)
]
for down_res in swift.download(
container=container,
objects=objects):
if down_res['success']:
print("'%s' downloaded" % down_res['object'])
else:
print("'%s' download failed" % down_res['object'])
else:
raise page["error"]
except SwiftError as e:
logger.error(e.value)
|
jeseem/python-swiftclient
|
examples/download.py
|
Python
|
apache-2.0
| 1,242
|
#!/usr/bin/env python3
# -*- Coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# Open Asset Import Library (ASSIMP)
# ---------------------------------------------------------------------------
#
# Copyright (c) 2006-2010, ASSIMP Development Team
#
# All rights reserved.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# * Neither the name of the ASSIMP team, nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior
# written permission of the ASSIMP Development Team.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ---------------------------------------------------------------------------
"""Shared stuff for the gen_db and run scripts """
# -------------------------------------------------------------------------------
def hashing(file,pp):
""" Map an input file and a postprocessing config to an unique hash.
The hash is used to store the item in the database. It
needs to be persistent across different python implementations
and platforms, so we implement the hashing manually.
"""
def myhash(instring):
# sdbm hash
res = 0
for t in instring:
res = (ord(t) + (res<<6) + (res<<16) - res) % 2**32
return res
return hex(myhash(file.replace('\\','/')+":"+pp))
assimp_bin_path = None
# -------------------------------------------------------------------------------
def find_assimp_or_die():
"""Find assimp_cmd's binary for the current platform.
The path to the binary is stored in assimp_bin_path, the process
is aborted if it can't be found.
"""
import os
import platform
import sys
def locate_file(f_list):
for f in f_list:
try:
fl = open(f,"rb")
except IOError:
continue
fl.close()
return f
return None
global assimp_bin_path
if os.name == "nt":
if platform.machine() == "x86":
search = [os.path.join("..","..","bin","assimpcmd_release-dll_Win32","assimp.exe"),
os.path.join("..","..","bin","x86","assimp")]
else: # amd64, hopefully
search = [os.path.join("..","..","bin","assimpcmd_release-dll_x64","assimp.exe"),
os.path.join("..","..","bin","x64","assimp")]
assimp_bin_path = locate_file(search)
if assimp_bin_path is None:
print("Can't locate assimp_cmd binary")
sys.exit(-5)
print("Located assimp/assimp_cmd binary at ",assimp_bin_path)
elif os.name == "posix":
#search = [os.path.join("..","..","bin","gcc","assimp"),
# os.path.join("/usr","local","bin",'assimp')]
assimp_bin_path = "assimp"
print("Taking system-wide assimp binary")
else:
print("Unsupported operating system")
sys.exit(-5)
if __name__ == '__main__':
find_assimp_or_die()
# vim: ai ts=4 sts=4 et sw=4
|
ivansoban/ILEngine
|
thirdparty/assimp/test/regression/utils.py
|
Python
|
mit
| 4,276
|
import SimpleHTTPServer
import SocketServer
from BaseHTTPServer import BaseHTTPRequestHandler
from urlparse import urlparse
port = 8000
class funHandler(BaseHTTPRequestHandler):
# def do_GET(self, function, *args, **kwargs):
def do_GET(self):
print self.path
self.send_response(200)
return self.path
# httpd = SocketServer.TCPServer(("", port), MyHandler)
# httpd.serve_forever()
|
kevkruemp/HRI_Plant_Monitor
|
simple_server.py
|
Python
|
mit
| 414
|
def parse_nick_modes(mode_string):
"""Parse a nick mode string.
The function returns a list of lists with three members: sign,
mode and argument. The sign is "+" or "-". The argument is
always None.
Example:
>>> parse_nick_modes("+ab-c")
[['+', 'a', None], ['+', 'b', None], ['-', 'c', None]]
"""
return _parse_modes(mode_string, "")
def parse_channel_modes(mode_string):
"""Parse a channel mode string.
The function returns a list of lists with three members: sign,
mode and argument. The sign is "+" or "-". The argument is
None if mode isn't one of "b", "k", "l", "v" or "o".
Example:
>>> parse_channel_modes("+ab-c foo")
[['+', 'a', None], ['+', 'b', 'foo'], ['-', 'c', None]]
"""
return _parse_modes(mode_string, "bklvo")
def _parse_modes(mode_string, unary_modes=""):
"""
Parse the mode_string and return a list of triples.
If no string is supplied return an empty list.
>>> _parse_modes('')
[]
If no sign is supplied, return an empty list.
>>> _parse_modes('ab')
[]
Discard unused args.
>>> _parse_modes('+a foo bar baz')
[['+', 'a', None]]
Return none for unary args when not provided
>>> _parse_modes('+abc foo', unary_modes='abc')
[['+', 'a', 'foo'], ['+', 'b', None], ['+', 'c', None]]
This function never throws an error:
>>> import random
>>> def random_text(min_len = 3, max_len = 80):
... len = random.randint(min_len, max_len)
... chars_to_choose = [unichr(x) for x in range(0,1024)]
... chars = (random.choice(chars_to_choose) for x in xrange(len))
... return u''.join(chars)
>>> def random_texts(min_len = 3, max_len = 80):
... while True:
... yield random_text(min_len, max_len)
>>> import itertools
>>> texts = itertools.islice(random_texts(), 1000)
>>> set(type(_parse_modes(text)) for text in texts)
set([<type 'list'>])
"""
# mode_string must be non-empty and begin with a sign
if not mode_string or not mode_string[0] in '+-':
return []
modes = []
parts = mode_string.split()
mode_part, args = parts[0], parts[1:]
for ch in mode_part:
if ch in "+-":
sign = ch
continue
arg = args.pop(0) if ch in unary_modes and args else None
modes.append([sign, ch, arg])
return modes
|
gordon-/naobot
|
irc/modes.py
|
Python
|
agpl-3.0
| 2,417
|
"""Tests for Samsung TV config flow."""
import socket
from unittest.mock import Mock, call, patch
import pytest
from samsungctl.exceptions import AccessDenied, UnhandledResponse
from samsungtvws import SamsungTVWS
from samsungtvws.exceptions import ConnectionFailure, HttpApiError
from websocket import WebSocketException, WebSocketProtocolException
from homeassistant import config_entries
from homeassistant.components import dhcp, ssdp, zeroconf
from homeassistant.components.samsungtv.const import (
CONF_MANUFACTURER,
CONF_MODEL,
DEFAULT_MANUFACTURER,
DOMAIN,
LEGACY_PORT,
METHOD_LEGACY,
METHOD_WEBSOCKET,
RESULT_AUTH_MISSING,
RESULT_CANNOT_CONNECT,
RESULT_NOT_SUPPORTED,
RESULT_UNKNOWN_HOST,
TIMEOUT_REQUEST,
TIMEOUT_WEBSOCKET,
)
from homeassistant.components.ssdp import (
ATTR_UPNP_FRIENDLY_NAME,
ATTR_UPNP_MANUFACTURER,
ATTR_UPNP_MODEL_NAME,
ATTR_UPNP_UDN,
)
from homeassistant.const import (
CONF_HOST,
CONF_ID,
CONF_IP_ADDRESS,
CONF_MAC,
CONF_METHOD,
CONF_NAME,
CONF_PORT,
CONF_TOKEN,
)
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from .const import SAMPLE_APP_LIST
from tests.common import MockConfigEntry
RESULT_ALREADY_CONFIGURED = "already_configured"
RESULT_ALREADY_IN_PROGRESS = "already_in_progress"
MOCK_IMPORT_DATA = {
CONF_HOST: "fake_host",
CONF_NAME: "fake",
CONF_PORT: 55000,
}
MOCK_IMPORT_DATA_WITHOUT_NAME = {
CONF_HOST: "fake_host",
}
MOCK_IMPORT_WSDATA = {
CONF_HOST: "fake_host",
CONF_NAME: "fake",
CONF_PORT: 8002,
}
MOCK_USER_DATA = {CONF_HOST: "fake_host", CONF_NAME: "fake_name"}
MOCK_SSDP_DATA = ssdp.SsdpServiceInfo(
ssdp_usn="mock_usn",
ssdp_st="mock_st",
ssdp_location="https://fake_host:12345/test",
upnp={
ATTR_UPNP_FRIENDLY_NAME: "[TV] fake_name",
ATTR_UPNP_MANUFACTURER: "Samsung fake_manufacturer",
ATTR_UPNP_MODEL_NAME: "fake_model",
ATTR_UPNP_UDN: "uuid:0d1cef00-00dc-1000-9c80-4844f7b172de",
},
)
MOCK_SSDP_DATA_NOPREFIX = ssdp.SsdpServiceInfo(
ssdp_usn="mock_usn",
ssdp_st="mock_st",
ssdp_location="http://fake2_host:12345/test",
upnp={
ATTR_UPNP_FRIENDLY_NAME: "fake2_name",
ATTR_UPNP_MANUFACTURER: "Samsung fake2_manufacturer",
ATTR_UPNP_MODEL_NAME: "fake2_model",
ATTR_UPNP_UDN: "uuid:0d1cef00-00dc-1000-9c80-4844f7b172df",
},
)
MOCK_SSDP_DATA_WRONGMODEL = ssdp.SsdpServiceInfo(
ssdp_usn="mock_usn",
ssdp_st="mock_st",
ssdp_location="http://fake2_host:12345/test",
upnp={
ATTR_UPNP_FRIENDLY_NAME: "fake2_name",
ATTR_UPNP_MANUFACTURER: "fake2_manufacturer",
ATTR_UPNP_MODEL_NAME: "HW-Qfake",
ATTR_UPNP_UDN: "uuid:0d1cef00-00dc-1000-9c80-4844f7b172df",
},
)
MOCK_DHCP_DATA = dhcp.DhcpServiceInfo(
ip="fake_host", macaddress="aa:bb:cc:dd:ee:ff", hostname="fake_hostname"
)
EXISTING_IP = "192.168.40.221"
MOCK_ZEROCONF_DATA = zeroconf.ZeroconfServiceInfo(
host="fake_host",
addresses=["fake_host"],
hostname="mock_hostname",
name="mock_name",
port=1234,
properties={
"deviceid": "aa:bb:cc:dd:ee:ff",
"manufacturer": "fake_manufacturer",
"model": "fake_model",
"serialNumber": "fake_serial",
},
type="mock_type",
)
MOCK_OLD_ENTRY = {
CONF_HOST: "fake_host",
CONF_ID: "0d1cef00-00dc-1000-9c80-4844f7b172de_old",
CONF_IP_ADDRESS: EXISTING_IP,
CONF_METHOD: "legacy",
CONF_PORT: None,
}
MOCK_LEGACY_ENTRY = {
CONF_HOST: EXISTING_IP,
CONF_ID: "0d1cef00-00dc-1000-9c80-4844f7b172de_old",
CONF_METHOD: "legacy",
CONF_PORT: None,
}
MOCK_WS_ENTRY = {
CONF_HOST: "fake_host",
CONF_METHOD: METHOD_WEBSOCKET,
CONF_PORT: 8002,
CONF_MODEL: "any",
CONF_NAME: "any",
}
MOCK_DEVICE_INFO = {
"device": {
"type": "Samsung SmartTV",
"name": "fake_name",
"modelName": "fake_model",
},
"id": "123",
}
MOCK_DEVICE_INFO_2 = {
"device": {
"type": "Samsung SmartTV",
"name": "fake2_name",
"modelName": "fake2_model",
},
"id": "345",
}
AUTODETECT_LEGACY = {
"name": "HomeAssistant",
"description": "HomeAssistant",
"id": "ha.component.samsung",
"method": "legacy",
"port": None,
"host": "fake_host",
"timeout": TIMEOUT_REQUEST,
}
AUTODETECT_WEBSOCKET_PLAIN = {
"host": "fake_host",
"name": "HomeAssistant",
"port": 8001,
"timeout": TIMEOUT_REQUEST,
"token": None,
}
AUTODETECT_WEBSOCKET_SSL = {
"host": "fake_host",
"name": "HomeAssistant",
"port": 8002,
"timeout": TIMEOUT_REQUEST,
"token": None,
}
DEVICEINFO_WEBSOCKET_SSL = {
"host": "fake_host",
"name": "HomeAssistant",
"port": 8002,
"timeout": TIMEOUT_WEBSOCKET,
"token": "123456789",
}
@pytest.mark.usefixtures("remote")
async def test_user_legacy(hass: HomeAssistant) -> None:
"""Test starting a flow by user."""
# show form
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
# entry was added
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_USER_DATA
)
# legacy tv entry created
assert result["type"] == "create_entry"
assert result["title"] == "fake_name"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_NAME] == "fake_name"
assert result["data"][CONF_METHOD] == "legacy"
assert result["data"][CONF_MANUFACTURER] == DEFAULT_MANUFACTURER
assert result["data"][CONF_MODEL] is None
assert result["result"].unique_id is None
@pytest.mark.usefixtures("remotews")
async def test_user_websocket(hass: HomeAssistant) -> None:
"""Test starting a flow by user."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote", side_effect=OSError("Boom")
):
# show form
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
# entry was added
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=MOCK_USER_DATA
)
# websocket tv entry created
assert result["type"] == "create_entry"
assert result["title"] == "Living Room (82GXARRS)"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_NAME] == "Living Room"
assert result["data"][CONF_METHOD] == "websocket"
assert result["data"][CONF_MANUFACTURER] == "Samsung"
assert result["data"][CONF_MODEL] == "82GXARRS"
assert result["result"].unique_id == "be9554b9-c9fb-41f4-8920-22da015376a4"
@pytest.mark.usefixtures("remotews")
async def test_user_legacy_missing_auth(hass: HomeAssistant) -> None:
"""Test starting a flow by user with authentication."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=AccessDenied("Boom"),
):
# legacy device missing authentication
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_AUTH_MISSING
async def test_user_legacy_not_supported(hass: HomeAssistant) -> None:
"""Test starting a flow by user for not supported device."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=UnhandledResponse("Boom"),
):
# legacy device not supported
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_NOT_SUPPORTED
async def test_user_websocket_not_supported(hass: HomeAssistant) -> None:
"""Test starting a flow by user for not supported device."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS.open",
side_effect=WebSocketProtocolException("Boom"),
):
# websocket device not supported
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_NOT_SUPPORTED
async def test_user_not_successful(hass: HomeAssistant) -> None:
"""Test starting a flow by user but no connection found."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS.open",
side_effect=OSError("Boom"),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_CANNOT_CONNECT
async def test_user_not_successful_2(hass: HomeAssistant) -> None:
"""Test starting a flow by user but no connection found."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS.open",
side_effect=ConnectionFailure("Boom"),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_CANNOT_CONNECT
@pytest.mark.usefixtures("remote")
async def test_ssdp(hass: HomeAssistant, no_mac_address: Mock) -> None:
"""Test starting a flow from discovery."""
no_mac_address.return_value = "aa:bb:cc:dd:ee:ff"
with patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWSBridge.async_device_info",
return_value=MOCK_DEVICE_INFO,
):
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# entry was added
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "create_entry"
assert result["title"] == "fake_name (fake_model)"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_NAME] == "fake_name"
assert result["data"][CONF_MANUFACTURER] == "Samsung fake_manufacturer"
assert result["data"][CONF_MODEL] == "fake_model"
assert result["result"].unique_id == "0d1cef00-00dc-1000-9c80-4844f7b172de"
@pytest.mark.usefixtures("remote")
async def test_ssdp_noprefix(hass: HomeAssistant, no_mac_address: Mock) -> None:
"""Test starting a flow from discovery without prefixes."""
no_mac_address.return_value = "aa:bb:cc:dd:ee:ff"
with patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWSBridge.async_device_info",
return_value=MOCK_DEVICE_INFO_2,
):
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data=MOCK_SSDP_DATA_NOPREFIX,
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
with patch(
"homeassistant.components.samsungtv.bridge.Remote.__enter__",
return_value=True,
):
# entry was added
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "create_entry"
assert result["title"] == "fake2_name (fake2_model)"
assert result["data"][CONF_HOST] == "fake2_host"
assert result["data"][CONF_NAME] == "fake2_name"
assert result["data"][CONF_MANUFACTURER] == "Samsung fake2_manufacturer"
assert result["data"][CONF_MODEL] == "fake2_model"
assert result["result"].unique_id == "0d1cef00-00dc-1000-9c80-4844f7b172df"
@pytest.mark.usefixtures("remotews")
async def test_ssdp_legacy_missing_auth(hass: HomeAssistant) -> None:
"""Test starting a flow from discovery with authentication."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=AccessDenied("Boom"),
):
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# missing authentication
with patch(
"homeassistant.components.samsungtv.bridge.SamsungTVLegacyBridge.async_try_connect",
return_value=RESULT_AUTH_MISSING,
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_AUTH_MISSING
@pytest.mark.usefixtures("remote", "remotews")
async def test_ssdp_legacy_not_supported(hass: HomeAssistant) -> None:
"""Test starting a flow from discovery for not supported device."""
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
with patch(
"homeassistant.components.samsungtv.bridge.SamsungTVLegacyBridge.async_try_connect",
return_value=RESULT_NOT_SUPPORTED,
):
# device not supported
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_NOT_SUPPORTED
@pytest.mark.usefixtures("remote", "remotews")
async def test_ssdp_websocket_success_populates_mac_address(
hass: HomeAssistant,
) -> None:
"""Test starting a flow from ssdp for a supported device populates the mac."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "create_entry"
assert result["title"] == "Living Room (82GXARRS)"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_NAME] == "Living Room"
assert result["data"][CONF_MAC] == "aa:bb:cc:dd:ee:ff"
assert result["data"][CONF_MANUFACTURER] == "Samsung fake_manufacturer"
assert result["data"][CONF_MODEL] == "82GXARRS"
assert result["result"].unique_id == "0d1cef00-00dc-1000-9c80-4844f7b172de"
async def test_ssdp_websocket_not_supported(hass: HomeAssistant) -> None:
"""Test starting a flow from discovery for not supported device."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS",
) as remotews, patch.object(
remotews, "open", side_effect=WebSocketProtocolException("Boom")
):
# device not supported
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_NOT_SUPPORTED
@pytest.mark.usefixtures("remote")
async def test_ssdp_model_not_supported(hass: HomeAssistant) -> None:
"""Test starting a flow from discovery."""
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data=MOCK_SSDP_DATA_WRONGMODEL,
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_NOT_SUPPORTED
@pytest.mark.usefixtures("no_mac_address")
async def test_ssdp_not_successful(hass: HomeAssistant) -> None:
"""Test starting a flow from discovery but no device found."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS.open",
side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWSBridge.async_device_info",
return_value=MOCK_DEVICE_INFO,
):
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# device not found
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_CANNOT_CONNECT
@pytest.mark.usefixtures("no_mac_address")
async def test_ssdp_not_successful_2(hass: HomeAssistant) -> None:
"""Test starting a flow from discovery but no device found."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS.open",
side_effect=ConnectionFailure("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWSBridge.async_device_info",
return_value=MOCK_DEVICE_INFO,
):
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# device not found
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_CANNOT_CONNECT
@pytest.mark.usefixtures("remote")
async def test_ssdp_already_in_progress(
hass: HomeAssistant, no_mac_address: Mock
) -> None:
"""Test starting a flow from discovery twice."""
no_mac_address.return_value = "aa:bb:cc:dd:ee:ff"
with patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWSBridge.async_device_info",
return_value=MOCK_DEVICE_INFO,
):
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# failed as already in progress
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_ALREADY_IN_PROGRESS
@pytest.mark.usefixtures("remote")
async def test_ssdp_already_configured(
hass: HomeAssistant, no_mac_address: Mock
) -> None:
"""Test starting a flow from discovery when already configured."""
no_mac_address.return_value = "aa:bb:cc:dd:ee:ff"
with patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWSBridge.async_device_info",
return_value=MOCK_DEVICE_INFO,
):
# entry was added
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
entry = result["result"]
assert entry.data[CONF_MANUFACTURER] == DEFAULT_MANUFACTURER
assert entry.data[CONF_MODEL] is None
assert entry.unique_id is None
# failed as already configured
result2 = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result2["type"] == "abort"
assert result2["reason"] == RESULT_ALREADY_CONFIGURED
# check updated device info
assert entry.unique_id == "0d1cef00-00dc-1000-9c80-4844f7b172de"
@pytest.mark.usefixtures("remote")
async def test_import_legacy(hass: HomeAssistant, no_mac_address: Mock) -> None:
"""Test importing from yaml with hostname."""
no_mac_address.return_value = "aa:bb:cc:dd:ee:ff"
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=MOCK_IMPORT_DATA,
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "fake"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_NAME] == "fake"
assert result["data"][CONF_MANUFACTURER] == "Samsung"
assert result["result"].unique_id is None
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0].data[CONF_METHOD] == METHOD_LEGACY
assert entries[0].data[CONF_PORT] == LEGACY_PORT
@pytest.mark.usefixtures("remote", "remotews_no_device_info", "no_mac_address")
async def test_import_legacy_without_name(hass: HomeAssistant) -> None:
"""Test importing from yaml without a name."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=MOCK_IMPORT_DATA_WITHOUT_NAME,
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "fake_host"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_MANUFACTURER] == "Samsung"
assert result["result"].unique_id is None
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0].data[CONF_METHOD] == METHOD_LEGACY
assert entries[0].data[CONF_PORT] == LEGACY_PORT
@pytest.mark.usefixtures("remotews")
async def test_import_websocket(hass: HomeAssistant):
"""Test importing from yaml with hostname."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=MOCK_IMPORT_WSDATA,
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "fake"
assert result["data"][CONF_METHOD] == METHOD_WEBSOCKET
assert result["data"][CONF_PORT] == 8002
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_NAME] == "fake"
assert result["data"][CONF_MANUFACTURER] == "Samsung"
assert result["result"].unique_id is None
@pytest.mark.usefixtures("remotews")
async def test_import_websocket_without_port(hass: HomeAssistant):
"""Test importing from yaml with hostname by no port."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=MOCK_IMPORT_WSDATA,
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "fake"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_NAME] == "fake"
assert result["data"][CONF_MANUFACTURER] == "Samsung"
assert result["result"].unique_id is None
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0].data[CONF_METHOD] == METHOD_WEBSOCKET
assert entries[0].data[CONF_PORT] == 8002
@pytest.mark.usefixtures("remotews")
async def test_import_unknown_host(hass: HomeAssistant):
"""Test importing from yaml with hostname that does not resolve."""
with patch(
"homeassistant.components.samsungtv.config_flow.socket.gethostbyname",
side_effect=socket.gaierror,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=MOCK_IMPORT_DATA,
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == RESULT_UNKNOWN_HOST
@pytest.mark.usefixtures("remote", "remotews")
async def test_dhcp(hass: HomeAssistant) -> None:
"""Test starting a flow from dhcp."""
# confirm to add the entry
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=MOCK_DHCP_DATA,
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# entry was added
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "create_entry"
assert result["title"] == "Living Room (82GXARRS)"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_NAME] == "Living Room"
assert result["data"][CONF_MAC] == "aa:bb:cc:dd:ee:ff"
assert result["data"][CONF_MANUFACTURER] == "Samsung"
assert result["data"][CONF_MODEL] == "82GXARRS"
assert result["result"].unique_id == "be9554b9-c9fb-41f4-8920-22da015376a4"
@pytest.mark.usefixtures("remote", "remotews")
async def test_zeroconf(hass: HomeAssistant) -> None:
"""Test starting a flow from zeroconf."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=MOCK_ZEROCONF_DATA,
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert result["step_id"] == "confirm"
# entry was added
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input="whatever"
)
assert result["type"] == "create_entry"
assert result["title"] == "Living Room (82GXARRS)"
assert result["data"][CONF_HOST] == "fake_host"
assert result["data"][CONF_NAME] == "Living Room"
assert result["data"][CONF_MAC] == "aa:bb:cc:dd:ee:ff"
assert result["data"][CONF_MANUFACTURER] == "Samsung"
assert result["data"][CONF_MODEL] == "82GXARRS"
assert result["result"].unique_id == "be9554b9-c9fb-41f4-8920-22da015376a4"
@pytest.mark.usefixtures("remotews_soundbar")
async def test_zeroconf_ignores_soundbar(hass: HomeAssistant) -> None:
"""Test starting a flow from zeroconf where the device is actually a soundbar."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=MOCK_ZEROCONF_DATA,
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "not_supported"
@pytest.mark.usefixtures("remote", "remotews_no_device_info")
async def test_zeroconf_no_device_info(hass: HomeAssistant) -> None:
"""Test starting a flow from zeroconf where device_info returns None."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=MOCK_ZEROCONF_DATA,
)
await hass.async_block_till_done()
assert result["type"] == "abort"
assert result["reason"] == "not_supported"
@pytest.mark.usefixtures("remotews")
async def test_zeroconf_and_dhcp_same_time(hass: HomeAssistant) -> None:
"""Test starting a flow from zeroconf and dhcp."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=MOCK_DHCP_DATA,
)
await hass.async_block_till_done()
assert result["type"] == "form"
assert result["step_id"] == "confirm"
result2 = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=MOCK_ZEROCONF_DATA,
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "already_in_progress"
async def test_autodetect_websocket(hass: HomeAssistant) -> None:
"""Test for send key with autodetection of protocol."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=OSError("Boom"),
), patch("homeassistant.components.samsungtv.bridge.SamsungTVWS") as remotews:
remote = Mock(SamsungTVWS)
remote.__enter__ = Mock(return_value=remote)
remote.__exit__ = Mock(return_value=False)
remote.app_list.return_value = SAMPLE_APP_LIST
remote.rest_device_info.return_value = {
"id": "uuid:be9554b9-c9fb-41f4-8920-22da015376a4",
"device": {
"modelName": "82GXARRS",
"networkType": "wireless",
"wifiMac": "aa:bb:cc:dd:ee:ff",
"udn": "uuid:be9554b9-c9fb-41f4-8920-22da015376a4",
"mac": "aa:bb:cc:dd:ee:ff",
"name": "[TV] Living Room",
"type": "Samsung SmartTV",
},
}
remote.token = "123456789"
remotews.return_value = remote
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
assert result["data"][CONF_METHOD] == "websocket"
assert result["data"][CONF_TOKEN] == "123456789"
assert remotews.call_count == 2
assert remotews.call_args_list == [
call(**AUTODETECT_WEBSOCKET_SSL),
call(**DEVICEINFO_WEBSOCKET_SSL),
]
await hass.async_block_till_done()
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0].data[CONF_MAC] == "aa:bb:cc:dd:ee:ff"
async def test_websocket_no_mac(hass: HomeAssistant) -> None:
"""Test for send key with autodetection of protocol."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=OSError("Boom"),
), patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS"
) as remotews, patch(
"getmac.get_mac_address", return_value="gg:hh:ii:ll:mm:nn"
):
remote = Mock(SamsungTVWS)
remote.__enter__ = Mock(return_value=remote)
remote.__exit__ = Mock(return_value=False)
remote.app_list.return_value = SAMPLE_APP_LIST
remote.rest_device_info.return_value = {
"id": "uuid:be9554b9-c9fb-41f4-8920-22da015376a4",
"device": {
"modelName": "82GXARRS",
"networkType": "lan",
"udn": "uuid:be9554b9-c9fb-41f4-8920-22da015376a4",
"name": "[TV] Living Room",
"type": "Samsung SmartTV",
},
}
remote.token = "123456789"
remotews.return_value = remote
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
assert result["data"][CONF_METHOD] == "websocket"
assert result["data"][CONF_TOKEN] == "123456789"
assert result["data"][CONF_MAC] == "gg:hh:ii:ll:mm:nn"
assert remotews.call_count == 2
assert remotews.call_args_list == [
call(**AUTODETECT_WEBSOCKET_SSL),
call(**DEVICEINFO_WEBSOCKET_SSL),
]
await hass.async_block_till_done()
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0].data[CONF_MAC] == "gg:hh:ii:ll:mm:nn"
async def test_autodetect_auth_missing(hass: HomeAssistant) -> None:
"""Test for send key with autodetection of protocol."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=[AccessDenied("Boom")],
) as remote:
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_AUTH_MISSING
assert remote.call_count == 1
assert remote.call_args_list == [call(AUTODETECT_LEGACY)]
async def test_autodetect_not_supported(hass: HomeAssistant) -> None:
"""Test for send key with autodetection of protocol."""
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=[UnhandledResponse("Boom")],
) as remote:
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_NOT_SUPPORTED
assert remote.call_count == 1
assert remote.call_args_list == [call(AUTODETECT_LEGACY)]
@pytest.mark.usefixtures("remote")
async def test_autodetect_legacy(hass: HomeAssistant) -> None:
"""Test for send key with autodetection of protocol."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "create_entry"
assert result["data"][CONF_METHOD] == "legacy"
assert result["data"][CONF_NAME] == "fake_name"
assert result["data"][CONF_MAC] is None
assert result["data"][CONF_PORT] == LEGACY_PORT
async def test_autodetect_none(hass: HomeAssistant) -> None:
"""Test for send key with autodetection of protocol."""
mock_remotews = Mock()
mock_remotews.__enter__ = Mock(return_value=mock_remotews)
mock_remotews.__exit__ = Mock()
mock_remotews.open = Mock(side_effect=OSError("Boom"))
with patch(
"homeassistant.components.samsungtv.bridge.Remote",
side_effect=OSError("Boom"),
) as remote, patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS",
return_value=mock_remotews,
) as remotews:
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=MOCK_USER_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_CANNOT_CONNECT
assert remote.call_count == 1
assert remote.call_args_list == [
call(AUTODETECT_LEGACY),
]
assert remotews.call_count == 2
assert remotews.call_args_list == [
call(**AUTODETECT_WEBSOCKET_SSL),
call(**AUTODETECT_WEBSOCKET_PLAIN),
]
@pytest.mark.usefixtures("remotews")
async def test_update_old_entry(hass: HomeAssistant) -> None:
"""Test update of old entry."""
with patch("homeassistant.components.samsungtv.bridge.Remote") as remote:
remote().rest_device_info.return_value = {
"device": {
"modelName": "fake_model2",
"name": "[TV] Fake Name",
"udn": "uuid:fake_serial",
}
}
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_OLD_ENTRY)
entry.add_to_hass(hass)
config_entries_domain = hass.config_entries.async_entries(DOMAIN)
assert len(config_entries_domain) == 1
assert entry is config_entries_domain[0]
assert entry.data[CONF_ID] == "0d1cef00-00dc-1000-9c80-4844f7b172de_old"
assert entry.data[CONF_IP_ADDRESS] == EXISTING_IP
assert not entry.unique_id
assert await async_setup_component(hass, DOMAIN, {}) is True
await hass.async_block_till_done()
# failed as already configured
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_SSDP}, data=MOCK_SSDP_DATA
)
assert result["type"] == "abort"
assert result["reason"] == RESULT_ALREADY_CONFIGURED
config_entries_domain = hass.config_entries.async_entries(DOMAIN)
assert len(config_entries_domain) == 1
entry2 = config_entries_domain[0]
# check updated device info
assert entry2.data.get(CONF_ID) is not None
assert entry2.data.get(CONF_IP_ADDRESS) is not None
assert entry2.unique_id == "0d1cef00-00dc-1000-9c80-4844f7b172de"
@pytest.mark.usefixtures("remotews")
async def test_update_missing_mac_unique_id_added_from_dhcp(
hass: HomeAssistant,
) -> None:
"""Test missing mac and unique id added."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_OLD_ENTRY, unique_id=None)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.samsungtv.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.samsungtv.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=MOCK_DHCP_DATA,
)
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data[CONF_MAC] == "aa:bb:cc:dd:ee:ff"
assert entry.unique_id == "be9554b9-c9fb-41f4-8920-22da015376a4"
@pytest.mark.usefixtures("remotews")
async def test_update_missing_mac_unique_id_added_from_zeroconf(
hass: HomeAssistant,
) -> None:
"""Test missing mac and unique id added."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_OLD_ENTRY, unique_id=None)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.samsungtv.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.samsungtv.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=MOCK_ZEROCONF_DATA,
)
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data[CONF_MAC] == "aa:bb:cc:dd:ee:ff"
assert entry.unique_id == "be9554b9-c9fb-41f4-8920-22da015376a4"
@pytest.mark.usefixtures("remotews")
async def test_update_missing_mac_unique_id_added_from_ssdp(
hass: HomeAssistant,
) -> None:
"""Test missing mac and unique id added via ssdp."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_OLD_ENTRY, unique_id=None)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.samsungtv.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.samsungtv.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_SSDP},
data=MOCK_SSDP_DATA,
)
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data[CONF_MAC] == "aa:bb:cc:dd:ee:ff"
assert entry.unique_id == "0d1cef00-00dc-1000-9c80-4844f7b172de"
@pytest.mark.usefixtures("remotews")
async def test_update_missing_mac_added_unique_id_preserved_from_zeroconf(
hass: HomeAssistant,
) -> None:
"""Test missing mac and unique id added."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_OLD_ENTRY,
unique_id="0d1cef00-00dc-1000-9c80-4844f7b172de",
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.samsungtv.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.samsungtv.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data=MOCK_ZEROCONF_DATA,
)
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data[CONF_MAC] == "aa:bb:cc:dd:ee:ff"
assert entry.unique_id == "0d1cef00-00dc-1000-9c80-4844f7b172de"
@pytest.mark.usefixtures("remote")
async def test_update_legacy_missing_mac_from_dhcp(hass: HomeAssistant) -> None:
"""Test missing mac added."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_LEGACY_ENTRY,
unique_id="0d1cef00-00dc-1000-9c80-4844f7b172de",
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.samsungtv.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.samsungtv.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=dhcp.DhcpServiceInfo(
ip=EXISTING_IP, macaddress="aa:bb:cc:dd:ee:ff", hostname="fake_hostname"
),
)
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data[CONF_MAC] == "aa:bb:cc:dd:ee:ff"
assert entry.unique_id == "0d1cef00-00dc-1000-9c80-4844f7b172de"
@pytest.mark.usefixtures("remote")
async def test_update_legacy_missing_mac_from_dhcp_no_unique_id(
hass: HomeAssistant,
) -> None:
"""Test missing mac added when there is no unique id."""
entry = MockConfigEntry(
domain=DOMAIN,
data=MOCK_LEGACY_ENTRY,
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS.rest_device_info",
side_effect=HttpApiError,
), patch(
"homeassistant.components.samsungtv.bridge.Remote.__enter__",
return_value=True,
), patch(
"homeassistant.components.samsungtv.async_setup",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.samsungtv.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data=dhcp.DhcpServiceInfo(
ip=EXISTING_IP, macaddress="aa:bb:cc:dd:ee:ff", hostname="fake_hostname"
),
)
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
assert result["type"] == "abort"
assert result["reason"] == "not_supported"
assert entry.data[CONF_MAC] == "aa:bb:cc:dd:ee:ff"
assert entry.unique_id is None
@pytest.mark.usefixtures("remote")
async def test_form_reauth_legacy(hass: HomeAssistant) -> None:
"""Test reauthenticate legacy."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_OLD_ENTRY)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"entry_id": entry.entry_id, "source": config_entries.SOURCE_REAUTH},
data=entry.data,
)
assert result["type"] == "form"
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "reauth_successful"
@pytest.mark.usefixtures("remotews")
async def test_form_reauth_websocket(hass: HomeAssistant) -> None:
"""Test reauthenticate websocket."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_WS_ENTRY)
entry.add_to_hass(hass)
assert entry.state == config_entries.ConfigEntryState.NOT_LOADED
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"entry_id": entry.entry_id, "source": config_entries.SOURCE_REAUTH},
data=entry.data,
)
assert result["type"] == "form"
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "reauth_successful"
assert entry.state == config_entries.ConfigEntryState.LOADED
async def test_form_reauth_websocket_cannot_connect(
hass: HomeAssistant, remotews: Mock
) -> None:
"""Test reauthenticate websocket when we cannot connect on the first attempt."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_WS_ENTRY)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"entry_id": entry.entry_id, "source": config_entries.SOURCE_REAUTH},
data=entry.data,
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch.object(remotews, "open", side_effect=ConnectionFailure):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result2["type"] == "form"
assert result2["errors"] == {"base": RESULT_AUTH_MISSING}
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result3["type"] == "abort"
assert result3["reason"] == "reauth_successful"
async def test_form_reauth_websocket_not_supported(hass: HomeAssistant) -> None:
"""Test reauthenticate websocket when the device is not supported."""
entry = MockConfigEntry(domain=DOMAIN, data=MOCK_WS_ENTRY)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"entry_id": entry.entry_id, "source": config_entries.SOURCE_REAUTH},
data=entry.data,
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.samsungtv.bridge.SamsungTVWS.open",
side_effect=WebSocketException,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result2["type"] == "abort"
assert result2["reason"] == "not_supported"
|
rohitranjan1991/home-assistant
|
tests/components/samsungtv/test_config_flow.py
|
Python
|
mit
| 48,107
|
"""Uses Newt connector and connects to a remote host and deploys the SREMOTE
code and an environment for it. It also installs the QDO library in that
environment.
An application using SREMOTE should check is SREMOTE is deployed and use a
similar code to this to deploy it.
It does not require superuser on remote machine. All is installed in the
context of the user.
Usage: python do_install_newt.py (edison|hopper|carver) username password
"""
import sremote.api as remote
import sremote.connector.newt as newt
from sys import argv
#TODO(gonzalorodrigo): Do better parsing of the input parameters.
connector = newt.ClientNEWTConnector(argv[1])
if not connector.auth(argv[2], argv[3]):
print "Auth error", argv[2], argv[3]
exit()
client = remote.RemoteClient(connector)
# Creates remote environment and
client.do_bootstrap_install()
client.do_install_git_module(
"https://gonzalorodrigo@bitbucket.org/berkeleylab/qdo.git",
"master",
"qdo")
|
gonzalorodrigo/qdo_interpreter
|
bin/installer/do_install_newt.py
|
Python
|
bsd-3-clause
| 998
|
#----------------------------------------------------------------------
# Copyright (c) 1999-2001, Digital Creations, Fredericksburg, VA, USA
# and Andrew Kuchling. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# o Redistributions of source code must retain the above copyright
# notice, this list of conditions, and the disclaimer that follows.
#
# o Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions, and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# o Neither the name of Digital Creations nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS AND CONTRIBUTORS *AS
# IS* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL
# CREATIONS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#----------------------------------------------------------------------
"""Support for BerkeleyDB 3.2 through 4.2.
"""
try:
if __name__ == 'bsddb3':
# import _pybsddb binary as it should be the more recent version from
# a standalone pybsddb addon package than the version included with
# python as bsddb._bsddb.
import _pybsddb
_bsddb = _pybsddb
else:
import _bsddb
except ImportError:
# Remove ourselves from sys.modules
import sys
del sys.modules[__name__]
raise
# bsddb3 calls it db, but provide _db for backwards compatibility
db = _db = _bsddb
__version__ = db.__version__
error = db.DBError # So bsddb.error will mean something...
#----------------------------------------------------------------------
import sys, os
# for backwards compatibility with python versions older than 2.3, the
# iterator interface is dynamically defined and added using a mixin
# class. old python can't tokenize it due to the yield keyword.
if sys.version >= '2.3':
import UserDict
from weakref import ref
exec """
class _iter_mixin(UserDict.DictMixin):
def _make_iter_cursor(self):
cur = self.db.cursor()
key = id(cur)
self._cursor_refs[key] = ref(cur, self._gen_cref_cleaner(key))
return cur
def _gen_cref_cleaner(self, key):
# use generate the function for the weakref callback here
# to ensure that we do not hold a strict reference to cur
# in the callback.
return lambda ref: self._cursor_refs.pop(key, None)
def __iter__(self):
try:
cur = self._make_iter_cursor()
# FIXME-20031102-greg: race condition. cursor could
# be closed by another thread before this call.
# since we're only returning keys, we call the cursor
# methods with flags=0, dlen=0, dofs=0
key = cur.first(0,0,0)[0]
yield key
next = cur.next
while 1:
try:
key = next(0,0,0)[0]
yield key
except _bsddb.DBCursorClosedError:
cur = self._make_iter_cursor()
# FIXME-20031101-greg: race condition. cursor could
# be closed by another thread before this call.
cur.set(key,0,0,0)
next = cur.next
except _bsddb.DBNotFoundError:
return
except _bsddb.DBCursorClosedError:
# the database was modified during iteration. abort.
return
def iteritems(self):
try:
try:
cur = self._make_iter_cursor()
except AttributeError:
return
# FIXME-20031102-greg: race condition. cursor could
# be closed by another thread before this call.
kv = cur.first()
key = kv[0]
yield kv
next = cur.next
while 1:
try:
kv = next()
key = kv[0]
yield kv
except _bsddb.DBCursorClosedError:
cur = self._make_iter_cursor()
# FIXME-20031101-greg: race condition. cursor could
# be closed by another thread before this call.
cur.set(key,0,0,0)
next = cur.next
except _bsddb.DBNotFoundError:
return
except _bsddb.DBCursorClosedError:
# the database was modified during iteration. abort.
return
"""
else:
class _iter_mixin: pass
class _DBWithCursor(_iter_mixin):
"""
A simple wrapper around DB that makes it look like the bsddbobject in
the old module. It uses a cursor as needed to provide DB traversal.
"""
def __init__(self, db):
self.db = db
self.db.set_get_returns_none(0)
# FIXME-20031101-greg: I believe there is still the potential
# for deadlocks in a multithreaded environment if someone
# attempts to use the any of the cursor interfaces in one
# thread while doing a put or delete in another thread. The
# reason is that _checkCursor and _closeCursors are not atomic
# operations. Doing our own locking around self.dbc,
# self.saved_dbc_key and self._cursor_refs could prevent this.
# TODO: A test case demonstrating the problem needs to be written.
# self.dbc is a DBCursor object used to implement the
# first/next/previous/last/set_location methods.
self.dbc = None
self.saved_dbc_key = None
# a collection of all DBCursor objects currently allocated
# by the _iter_mixin interface.
self._cursor_refs = {}
def __del__(self):
self.close()
def _checkCursor(self):
if self.dbc is None:
self.dbc = self.db.cursor()
if self.saved_dbc_key is not None:
self.dbc.set(self.saved_dbc_key)
self.saved_dbc_key = None
# This method is needed for all non-cursor DB calls to avoid
# BerkeleyDB deadlocks (due to being opened with DB_INIT_LOCK
# and DB_THREAD to be thread safe) when intermixing database
# operations that use the cursor internally with those that don't.
def _closeCursors(self, save=1):
if self.dbc:
c = self.dbc
self.dbc = None
if save:
try:
self.saved_dbc_key = c.current(0,0,0)[0]
except db.DBError:
pass
c.close()
del c
for cref in self._cursor_refs.values():
c = cref()
if c is not None:
c.close()
def _checkOpen(self):
if self.db is None:
raise error, "BSDDB object has already been closed"
def isOpen(self):
return self.db is not None
def __len__(self):
self._checkOpen()
return len(self.db)
def __getitem__(self, key):
self._checkOpen()
return self.db[key]
def __setitem__(self, key, value):
self._checkOpen()
self._closeCursors()
self.db[key] = value
def __delitem__(self, key):
self._checkOpen()
self._closeCursors()
del self.db[key]
def close(self):
self._closeCursors(save=0)
if self.dbc is not None:
self.dbc.close()
v = 0
if self.db is not None:
v = self.db.close()
self.dbc = None
self.db = None
return v
def keys(self):
self._checkOpen()
return self.db.keys()
def has_key(self, key):
self._checkOpen()
return self.db.has_key(key)
def set_location(self, key):
self._checkOpen()
self._checkCursor()
return self.dbc.set_range(key)
def next(self):
self._checkOpen()
self._checkCursor()
rv = self.dbc.next()
return rv
def previous(self):
self._checkOpen()
self._checkCursor()
rv = self.dbc.prev()
return rv
def first(self):
self._checkOpen()
self._checkCursor()
rv = self.dbc.first()
return rv
def last(self):
self._checkOpen()
self._checkCursor()
rv = self.dbc.last()
return rv
def sync(self):
self._checkOpen()
return self.db.sync()
#----------------------------------------------------------------------
# Compatibility object factory functions
def hashopen(file, flag='c', mode=0666, pgsize=None, ffactor=None, nelem=None,
cachesize=None, lorder=None, hflags=0):
flags = _checkflag(flag, file)
e = _openDBEnv(cachesize)
d = db.DB(e)
d.set_flags(hflags)
if pgsize is not None: d.set_pagesize(pgsize)
if lorder is not None: d.set_lorder(lorder)
if ffactor is not None: d.set_h_ffactor(ffactor)
if nelem is not None: d.set_h_nelem(nelem)
d.open(file, db.DB_HASH, flags, mode)
return _DBWithCursor(d)
#----------------------------------------------------------------------
def btopen(file, flag='c', mode=0666,
btflags=0, cachesize=None, maxkeypage=None, minkeypage=None,
pgsize=None, lorder=None):
flags = _checkflag(flag, file)
e = _openDBEnv(cachesize)
d = db.DB(e)
if pgsize is not None: d.set_pagesize(pgsize)
if lorder is not None: d.set_lorder(lorder)
d.set_flags(btflags)
if minkeypage is not None: d.set_bt_minkey(minkeypage)
if maxkeypage is not None: d.set_bt_maxkey(maxkeypage)
d.open(file, db.DB_BTREE, flags, mode)
return _DBWithCursor(d)
#----------------------------------------------------------------------
def rnopen(file, flag='c', mode=0666,
rnflags=0, cachesize=None, pgsize=None, lorder=None,
rlen=None, delim=None, source=None, pad=None):
flags = _checkflag(flag, file)
e = _openDBEnv(cachesize)
d = db.DB(e)
if pgsize is not None: d.set_pagesize(pgsize)
if lorder is not None: d.set_lorder(lorder)
d.set_flags(rnflags)
if delim is not None: d.set_re_delim(delim)
if rlen is not None: d.set_re_len(rlen)
if source is not None: d.set_re_source(source)
if pad is not None: d.set_re_pad(pad)
d.open(file, db.DB_RECNO, flags, mode)
return _DBWithCursor(d)
#----------------------------------------------------------------------
def _openDBEnv(cachesize):
e = db.DBEnv()
if cachesize is not None:
if cachesize >= 20480:
e.set_cachesize(0, cachesize)
else:
raise error, "cachesize must be >= 20480"
e.open('.', db.DB_PRIVATE | db.DB_CREATE | db.DB_THREAD | db.DB_INIT_LOCK | db.DB_INIT_MPOOL)
return e
def _checkflag(flag, file):
if flag == 'r':
flags = db.DB_RDONLY
elif flag == 'rw':
flags = 0
elif flag == 'w':
flags = db.DB_CREATE
elif flag == 'c':
flags = db.DB_CREATE
elif flag == 'n':
flags = db.DB_CREATE
#flags = db.DB_CREATE | db.DB_TRUNCATE
# we used db.DB_TRUNCATE flag for this before but BerkeleyDB
# 4.2.52 changed to disallowed truncate with txn environments.
if os.path.isfile(file):
os.unlink(file)
else:
raise error, "flags should be one of 'r', 'w', 'c' or 'n'"
return flags | db.DB_THREAD
#----------------------------------------------------------------------
# This is a silly little hack that allows apps to continue to use the
# DB_THREAD flag even on systems without threads without freaking out
# BerkeleyDB.
#
# This assumes that if Python was built with thread support then
# BerkeleyDB was too.
try:
import thread
del thread
if db.version() < (3, 3, 0):
db.DB_THREAD = 0
except ImportError:
db.DB_THREAD = 0
#----------------------------------------------------------------------
|
xbmc/atv2
|
xbmc/lib/libPython/Python/Lib/bsddb/__init__.py
|
Python
|
gpl-2.0
| 12,752
|
"""Location services."""
import csv
import editdistance
import json
import settings
TOWN_CITY = 3
TOWN_LONGITUDE = 6
TOWN_LATITUDE = 5
TOWN_COUNTRY = 1
ALL_TOWN_BUFFER = None
class CountryCsvParser(object):
"""Parse a country CSV row."""
def __init__(self, data):
"""Parse a single line of CSV."""
self.data = data
self.code = data[4]
self.name = data[5]
def country(code):
"""Return a country code."""
with open(settings.COUNTRY_DATA, newline='', encoding='latin-1') as csvfile:
town_data_reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for loop_item in town_data_reader:
country_parsed = CountryCsvParser(loop_item)
if country_parsed.code == code.upper():
return country_parsed.name
return None
def read_town_data():
"""Read town data from CSV."""
result = []
print('Reading town data')
with open(settings.TOWN_DATA, newline='', encoding='latin-1') as csvfile:
town_data_reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for loop_item in town_data_reader:
result.append(loop_item)
print(len(result))
print('Finished')
return result
def read_towns():
"""Read town data."""
town_file = open('public/data/towns.json', 'rt')
town_data = json.load(town_file)
town_file.close()
return town_data
def read_all_towns():
"""Read town data."""
global ALL_TOWN_BUFFER
if ALL_TOWN_BUFFER:
return ALL_TOWN_BUFFER
town_file = open('public/data/towns.json', 'rt')
town_data = json.load(town_file)
town_file.close()
#
# sanitise data
#
for town in town_data:
town[5] = town[5].replace('-', ' ')
town[6] = town[6].replace('-', ' ')
if town[5] == "":
town[5] = town[6]
for town in town_data:
town[8] = 'GB'
for additional_town in read_town_data():
town_data.append([
'', # 0 junk
'', # 1 junk
'', # 2 junk
additional_town[5], # 3 latitude
additional_town[6], # 4 longitude
additional_town[3], # 5 town
additional_town[3], # 6 town
'', # 7 junk
additional_town[1], # 8 country
])
ALL_TOWN_BUFFER = town_data
return town_data
def lookup_town_from_json(search):
"""Lookup towns from json."""
towns = read_towns()
found = []
for town in towns:
compare_name = town[5].lower().strip()
if compare_name == search:
found.append({
'latitude': town[3],
'longitude': town[4],
'name': town[5],
'country': town[8]
})
compare_name = town[6].lower().strip()
if compare_name == search:
found.append({
'latitude': town[3],
'longitude': town[4],
'name': town[6],
'country': town[8]
})
return found
def lookup_town(search):
"""Look for a town."""
if search.find(',') != -1:
search = search.split(',')[0]
found = []
print('Looking up %s' % search)
found = lookup_town_from_json(search)
if len(found) == 0:
with open(settings.TOWN_DATA, newline='', encoding='latin-1') as csvfile:
town_data_reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for town in town_data_reader:
compare_name = town[TOWN_CITY].lower().strip()
if compare_name == search:
found.append({
'latitude': town[TOWN_LATITUDE],
'longitude': town[TOWN_LONGITUDE],
'name': town[TOWN_CITY],
'country': town[TOWN_COUNTRY]
})
print('Found %s hits %s' % (len(found), found))
return found
def match_weight(compare_name, search):
"""Match with weight."""
compare_parts = compare_name.split()
compare_parts = set(compare_parts)
score = 0
fails = 0
for word in search:
if word in compare_parts:
score += 100
if word not in compare_parts:
fails += 1
# if fails == 0:
# score = 1000
# input()
# else:
# score = (len(search) - fails) * 1000
# if score > 0 and fails != 0:
# print('Score: %s fails %s' % (score, fails))
# print(compare_parts, search)
# input()
fail_negative = ((float(fails) / float(len(search))) * 1000)
return 1000 - fail_negative
def lookup_town_levenshtein(search, country=None):
"""Use levenshtein."""
search = search.lower().strip()
towns = read_all_towns()
found = []
zero_match = False
for town in towns:
match = False
compare_name = town[5].lower().strip()
score = editdistance.eval(search, compare_name)
if score < 3:
#
# only be lenient until we get a direct hit
#
if not zero_match:
match = True
if score == 0:
match = True
zero_match = True
if match and country:
if town[8] != country.upper().strip():
match = False
if match:
print('%s %s ' % (score, compare_name))
found.append({
'latitude': town[3],
'longitude': town[4],
'name': town[5],
'country': town[8],
'weight': score
})
# if we do have zeroes delete everything else
if zero_match:
found = [x for x in found if x['weight'] == 0]
found.sort(key=lambda item: item['weight'])
return found
def lookup_words(search):
"""Lookup multiple words."""
towns = read_all_towns()
found = []
for town in towns:
match = False
compare_name = town[5].lower().strip()
compare_name += ' ' + town[6].lower().strip()
compare_name += ' ' + town[8].lower().strip()
score = match_weight(compare_name, search)
breakpoint = 100
if score >= breakpoint:
match = True
if match:
found.append({
'latitude': town[3],
'longitude': town[4],
'name': town[6],
'country': town[8],
'weight': score
})
found.sort(key=lambda item: item['weight'])
found.reverse()
return found
def ignore_word(word):
"""Work out whether to ignore word."""
ignore = False
if word.startswith('#'):
ignore = True
if word.startswith('@'):
ignore = True
if word == ',':
ignore = True
if word == '.':
ignore = True
return ignore
def sanitize_words(words):
"""Return a list of words we're going to search on."""
result = []
for word in words:
if not ignore_word(word):
result.append(word.lower().strip())
return result
def lookup_location(text):
"""Lookup the town."""
split_words = text.split(' ')
search = sanitize_words(split_words)
town_list = []
lookup_towns = lookup_words(search)
town_list = town_list + lookup_towns
# for word in search:
# town_list = town_list + lookup_town(word)
return town_list
|
CornerstoneLabs/twittermap
|
search/location.py
|
Python
|
mit
| 7,462
|
"""Analyse top npm popular packages."""
import bs4
import requests
from .base import AnalysesBaseHandler
try:
import xmlrpclib
except ImportError:
import xmlrpc.client as xmlrpclib
class PythonPopularAnalyses(AnalysesBaseHandler):
"""Analyse top npm popular packages."""
_URL = 'http://pypi-ranking.info'
_PACKAGES_PER_PAGE = 50
@staticmethod
def _parse_version_stats(html_version_stats, sort_by_popularity=True):
"""Parse version statistics from HTML definition.
Parse version statistics from HTML definition and return ordered
versions based on downloads
:param html_version_stats: tr-like representation of version statistics
:param sort_by_popularity: whether or not to return versions sorted by popularity
:return: sorted versions based on downloads
"""
result = []
for version_definition in html_version_stats:
# Access nested td
version_name = version_definition.text.split('\n')[1]
version_downloads = version_definition.text.split('\n')[4]
# There are numbers with comma, get rid of it
result.append((version_name, int(version_downloads.replace(',', ''))))
if sort_by_popularity:
return sorted(result, key=lambda x: x[1], reverse=True)
return result
def _use_pypi_xml_rpc(self):
"""Schedule analyses of packages based on PyPI index using XML-RPC.
https://wiki.python.org/moin/PyPIXmlRpc
"""
client = xmlrpclib.ServerProxy('https://pypi.python.org/pypi')
# get a list of package names
packages = sorted(client.list_packages())
for idx, package in enumerate(packages[self.count.min:self.count.max]):
releases = client.package_releases(package, True) # True for show_hidden arg
self.log.debug("Scheduling #%d. (number versions: %d)",
self.count.min + idx, self.nversions)
for version in releases[:self.nversions]:
self.analyses_selinon_flow(package, version)
def _use_pypi_ranking(self):
"""Schedule analyses of packages based on PyPI ranking."""
to_schedule_count = self.count.max - self.count.min
packages_count = 0
page = int((self.count.min / self._PACKAGES_PER_PAGE) + 1)
page_offset = self.count.min % self._PACKAGES_PER_PAGE
while True:
pop = requests.get('{url}/alltime?page={page}'.format(url=self._URL, page=page))
pop.raise_for_status()
poppage = bs4.BeautifulSoup(pop.text, 'html.parser')
page += 1
for package_name in poppage.find_all('span', class_='list_title'):
if page_offset:
page_offset -= 1
continue
packages_count += 1
if packages_count > to_schedule_count:
return
pop = requests.get('{url}/module/{pkg}'.format(url=self._URL,
pkg=package_name.text))
poppage = bs4.BeautifulSoup(pop.text, 'html.parser')
table = poppage.find('table', id='release_list')
if table is None:
self.log.warning('No releases in %s', pop.url)
continue
versions = self._parse_version_stats(table.find_all('tr'),
sort_by_popularity=self.nversions > 1)
self.log.debug("Scheduling #%d. (number versions: %d)",
self.count.min + packages_count, self.nversions)
for version in versions[:self.nversions]:
self.analyses_selinon_flow(package_name.text, version[0])
def do_execute(self, popular=True):
"""Run core analyse on Python packages.
:param popular: boolean, sort index by popularity
"""
if popular:
self._use_pypi_ranking()
else:
self._use_pypi_xml_rpc()
|
fabric8-analytics/fabric8-analytics-jobs
|
f8a_jobs/handlers/python_popular_analyses.py
|
Python
|
apache-2.0
| 4,111
|
import errno
import os
from shlex import quote as shell_quote
import string
def shell_quote_cmd(cmdlist):
return ' '.join(map(shell_quote, cmdlist))
def format_cmdline(args, maxwidth=80):
'''Format args into a shell-quoted command line.
The result will be wrapped to maxwidth characters where possible,
not breaking a single long argument.
'''
# Leave room for the space and backslash at the end of each line
maxwidth -= 2
def lines():
line = ''
for a in (shell_quote(a) for a in args):
# If adding this argument will make the line too long,
# yield the current line, and start a new one.
if len(line) + len(a) + 1 > maxwidth:
yield line
line = ''
# Append this argument to the current line, separating
# it by a space from the existing arguments.
if line:
line += ' ' + a
else:
line = a
yield line
return ' \\\n'.join(lines())
def parse_env_var(s):
"""Parse an environment variable string
Returns a key-value tuple
Apply the same logic as `docker run -e`:
"If the operator names an environment variable without specifying a value,
then the current value of the named variable is propagated into the
container's environment
"""
parts = s.split('=', 1)
if len(parts) == 2:
k, v = parts
return (k, v)
k = parts[0]
return (k, os.getenv(k, ''))
def flatten_list(x):
if not isinstance(x, list):
raise ValueError("argument is not a list")
result = []
for i in x:
if isinstance(i, list):
for j in flatten_list(i):
result.append(j)
else:
result.append(i)
return result
def get_umask():
# Same logic as bash/builtins/umask.def
val = os.umask(0o22)
os.umask(val)
return val
def writeln(f, line):
f.write(line + '\n')
def expand_env_vars(in_str):
"""Expand environment variables in a string
Can raise `KeyError` if a variable is referenced but not defined, similar to
bash's nounset (set -u) option"""
return string.Template(in_str).substitute(os.environ)
|
JonathonReinhart/scuba
|
scuba/utils.py
|
Python
|
mit
| 2,252
|
import urllib, time, urlparse, random, string, uuid, re
from django.db import models
from django.conf import settings
from django.db.models.signals import post_save, post_delete
from django.core.mail import send_mail, mail_admins
from localflavor.us.us_states import US_STATES
from django.utils.translation import ugettext_lazy as _
from localflavor.us.models import PhoneNumberField, USPostalCodeField
from django.contrib.auth.models import (BaseUserManager, AbstractBaseUser,
PermissionsMixin, UserManager)
from django.core.mail import send_mail
from django.core import validators
from django.utils import timezone
def genereate_vid():
str(random.randint(100000000000000,999999999999999))
class flangioUserManager(BaseUserManager):
def create_user(self, username, email=None, password=None, **extra_fields):
now = timezone.now()
if not email:
raise ValueError('The given email must be set')
email = UserManager.normalize_email(email)
user = self.model(username=username, email=email,
is_staff=False, is_active=True, is_superuser=False,
last_login=now, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, username, email, password, **extra_fields):
u = self.create_user(username, email, password, **extra_fields)
u.is_staff = True
u.is_active = True
u.is_superuser = True
u.save(using=self._db)
return u
#All allowed methods plus standard email method.
SOCIAL_CHOICES =( ('email','Email'),
('twitter','Twitter'),
('facebook','Facebook'),
('google', 'Google'),
('instagram','Instagram'),
)
#You may only register with these
REGISTER_CHOICES =( ('VERIFY-EMAIL','Verify Email'),
('facebook','Facebook'),
('twitter','Twitter'),
)
class flangioUser(AbstractBaseUser, PermissionsMixin):
username = models.CharField(_('username'), max_length=30, unique=True,
help_text=_('Required. 30 characters or fewer. Letters, numbers and '
'@/./+/-/_ characters'),
validators=[
validators.RegexValidator(re.compile('^[\w.@+-]+$'), _('Enter a valid username.'), 'invalid')
])
first_name = models.CharField(_('first name'), max_length=30, blank=True)
last_name = models.CharField(_('last name'), max_length=30, blank=True)
email = models.EmailField(_('email address'), max_length=254, unique=True)
is_staff = models.BooleanField(_('staff status'), default=False,
help_text=_('Designates whether the user can log into this admin '
'site.'))
is_active = models.BooleanField(_('active'), default=True,
help_text=_('Designates whether this user should be treated as '
'active. Unselect this instead of deleting accounts.'))
date_joined = models.DateTimeField(_('date joined'), default=timezone.now)
user_type = models.CharField(max_length=10,
choices=settings.USERTYPE_CHOICES,
blank=True
)
vid = models.CharField(max_length=15, blank=True,
#default = genereate_vid
)
anonymous_patient_id = models.CharField(max_length=36, blank=True,
verbose_name=u'Anonymous ID',
#default = genereate_vid,
)
pin = models.PositiveIntegerField(max_length=4,
verbose_name='PIN', blank=True,
default="1994"
)
gender = models.CharField(max_length=40, blank=True,
choices=settings.GENDER_CHOICES)
#daily_request_max = models.PositiveIntegerField(blank=True, default =1000, max_length=10)
year_of_birth = models.CharField(blank=True, default ="", max_length=4)
height_inches = models.CharField(blank=True, default ="", max_length=3)
weight_lbs = models.CharField(blank=True, default ="", max_length=4)
state = models.CharField(blank=True, max_length=2,
choices=US_STATES,)
city = models.CharField(max_length=256, blank=True, default="")
postal_code = models.CharField(blank=True, default="", max_length=10)
mobile_phone_number = PhoneNumberField(max_length=15, blank=True, default="")
fax_number = PhoneNumberField(max_length=15, blank=True, default="")
organization = models.CharField(max_length=100, blank=True, default="")
photo_image = models.ImageField(blank = True, null=False, default='',
max_length=255L, upload_to="avatars",
verbose_name= "Profile Photo")
last_login_via = models.CharField(max_length=10, choices=SOCIAL_CHOICES,
default="email")
email_verified = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['username']
objects = UserManager()
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
pass
def __unicode__(self):
return self.email
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def email_user(self, subject, message, from_email=None, **kwargs):
"""
Sends an email to this User.
"""
send_mail(subject, message, from_email, [self.email], **kwargs)
def has_perm(self, perm, obj=None):
"Does the user have a specific permission?"
# Simplest possible answer: Yes, always
return True
def has_module_perms(self, app_label):
"Does the user have permissions to view the app `app_label`?"
# Simplest possible answer: Yes, always
return True
# This is merely an example USerProfile.
# More than liekly you'll want to create your own in your app.
# If you are not using flangio for data collection (i.e. you are merely importing
# then you might not need to use a UserProfile at all. )
#class UserProfile(models.Model):
# user = models.ForeignKey(User, unique=True, related_name="BuiltInflangioUserProfile")
# user_type = models.CharField(max_length=10,
# choices=USERTYPE_CHOICES,
# default="patient")
# vid = models.CharField(max_length=15, unique=True, blank=True,
# default = genereate_vid)
# anonymous_patient_id = models.CharField(max_length=36,
# unique=True,
# verbose_name=u'Anonymous Patient ID',
# default = genereate_vid,blank=True,)
# pin = models.PositiveIntegerField(max_length=4, verbose_name='PIN', blank=True,)
# gender = models.CharField(max_length=40, choices=gender_choices)
# year_of_birth = models.IntegerField(blank=True, default="", max_length=4)
# height_inches = models.IntegerField(blank=True, default="", max_length=3)
# weight_lbs = models.IntegerField(blank=True, default="", max_length=4)
# state = models.CharField(blank=True, max_length=2,
# choices=US_STATES,)
# city = models.CharField(max_length=256)
# postal_code = models.CharField(blank=True, default="", max_length=10)
# mobile_phone_number = PhoneNumberField(max_length=15, blank=True, default="")
# fax_number = PhoneNumberField(max_length=15, blank=True, default="")
# organization = models.CharField(max_length=100, blank=True, default="")
#
#
#
#
#
#
#
# def __unicode__(self):
# return '%s %s (%s)' % (self.user.first_name,
# self.user.last_name,
# self.vid)
#
# def save(self, **kwargs):
# randcode=random.randint(1000,9999)
# if not self.pin:
# self.pin=randcode
#
# if not self.anonymous_patient_id:
# self.anonymous_patient_id=str(uuid.uuid4())
#
#
# if not self.vid:
# self.vid = str(random.randint(100000000000000,
# 999999999999999))
#
# super(UserProfile, self).save(**kwargs)
#
class Permission(models.Model):
user = models.ForeignKey(flangioUser)
permission_name = models.CharField(max_length=50,
choices=settings.PERMISSION_CHOICES)
def __unicode__(self):
return '%s has the %s permission.' % (self.user.email, self.permission_name)
class Meta:
unique_together = (("user", "permission_name"),)
|
aviars/flangio
|
apps/accounts/models.py
|
Python
|
gpl-2.0
| 9,796
|
# Copyright (c) 2017, John Skinner
import unittest
import unittest.mock as mock
import bson
import pymongo.collection
import database.client
import batch_analysis.task_manager as manager
import batch_analysis.tasks.import_dataset_task as import_dataset_task
import batch_analysis.tasks.generate_dataset_task as generate_dataset_task
import batch_analysis.tasks.train_system_task as train_system_task
import batch_analysis.tasks.run_system_task as run_system_task
import batch_analysis.tasks.benchmark_trial_task as benchmark_task
# TODO: Tests for these two as well
# import batch_analysis.tasks.compare_trials_task as compare_trials_task
# import batch_analysis.tasks.compare_benchmarks_task as compare_benchmarks_task
class TestTaskManager(unittest.TestCase):
def test_get_import_dataset_task_checks_for_existing_task(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
module_name = 'test_module'
path = '/tmp/dataset/thisisadataset'
additional_args = {'foo': 'bar'}
subject.get_import_dataset_task(module_name, path, additional_args)
self.assertTrue(mock_collection.find_one.called)
query = mock_collection.find_one.call_args[0][0]
self.assertIn('module_name', query)
self.assertEqual(module_name, query['module_name'])
self.assertIn('path', query)
self.assertEqual(path, query['path'])
self.assertIn('additional_args.foo', query)
self.assertEqual('bar', query['additional_args.foo'])
def test_get_import_dataset_task_returns_deserialized_existing(self):
s_task = {'_type': 'ImportDatasetTask', '_id': bson.ObjectId()}
mock_entity = mock.MagicMock()
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find_one.return_value = s_task
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
mock_db_client.deserialize_entity.return_value = mock_entity
subject = manager.TaskManager(mock_collection, mock_db_client)
result = subject.get_import_dataset_task('lol no', '/tmp/lolno')
self.assertTrue(mock_db_client.deserialize_entity.called)
self.assertEqual(s_task, mock_db_client.deserialize_entity.call_args[0][0])
self.assertEqual(mock_entity, result)
def test_get_import_dataset_task_returns_new_instance_if_no_existing(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find_one.return_value = None
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
module_name = 'test_module'
path = '/tmp/dataset/thisisadataset'
result = subject.get_import_dataset_task(module_name, path)
self.assertIsInstance(result, import_dataset_task.ImportDatasetTask)
self.assertIsNone(result.identifier)
def test_get_generate_dataset_task_checks_for_existing_task(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
controller_id = bson.ObjectId()
simulator_id = bson.ObjectId()
simulator_config = {
'stereo_offset': 0.15,
'provide_rgb': True,
'provide_depth': True,
'provide_labels': False,
'provide_world_normals': False
}
repeat = 170
subject.get_generate_dataset_task(controller_id, simulator_id, simulator_config, repeat)
self.assertTrue(mock_collection.find_one.called)
query = mock_collection.find_one.call_args[0][0]
self.assertIn('controller_id', query)
self.assertEqual(controller_id, query['controller_id'])
self.assertIn('simulator_id', query)
self.assertEqual(simulator_id, query['simulator_id'])
for key, value in simulator_config.items():
self.assertIn('simulator_config.{}'.format(key), query)
self.assertEqual(value, query['simulator_config.{}'.format(key)])
self.assertIn('repeat', query)
self.assertEqual(repeat, query['repeat'])
def test_get_generate_dataset_task_returns_deserialized_existing(self):
s_task = {'_type': 'GenerateDatasetTask', '_id': bson.ObjectId()}
mock_entity = mock.MagicMock()
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find_one.return_value = s_task
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
mock_db_client.deserialize_entity.return_value = mock_entity
subject = manager.TaskManager(mock_collection, mock_db_client)
result = subject.get_generate_dataset_task(bson.ObjectId(), bson.ObjectId(), {'provide_rgb': True})
self.assertTrue(mock_db_client.deserialize_entity.called)
self.assertEqual(s_task, mock_db_client.deserialize_entity.call_args[0][0])
self.assertEqual(mock_entity, result)
def test_get_generate_dataset_task_returns_new_instance_if_no_existing(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find_one.return_value = None
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
result = subject.get_generate_dataset_task(bson.ObjectId(), bson.ObjectId(), {'provide_rgb': True})
self.assertIsInstance(result, generate_dataset_task.GenerateDatasetTask)
self.assertIsNone(result.identifier)
def test_get_train_system_task_checks_for_existing_task(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
trainer_id = bson.ObjectId()
trainee_id = bson.ObjectId()
subject.get_train_system_task(trainer_id, trainee_id)
self.assertTrue(mock_collection.find_one.called)
query = mock_collection.find_one.call_args[0][0]
self.assertIn('trainer_id', query)
self.assertEqual(trainer_id, query['trainer_id'])
self.assertIn('trainee_id', query)
self.assertEqual(trainee_id, query['trainee_id'])
def test_get_train_system_task_returns_deserialized_existing(self):
s_task = {'_type': 'ImportDatasetTask', '_id': bson.ObjectId()}
mock_entity = mock.MagicMock()
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find_one.return_value = s_task
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
mock_db_client.deserialize_entity.return_value = mock_entity
subject = manager.TaskManager(mock_collection, mock_db_client)
result = subject.get_train_system_task(bson.ObjectId(), bson.ObjectId())
self.assertTrue(mock_db_client.deserialize_entity.called)
self.assertEqual(s_task, mock_db_client.deserialize_entity.call_args[0][0])
self.assertEqual(mock_entity, result)
def test_get_train_system_task_returns_new_instance_if_no_existing(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find_one.return_value = None
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
trainer_id = bson.ObjectId()
trainee_id = bson.ObjectId()
result = subject.get_train_system_task(trainer_id, trainee_id)
self.assertIsInstance(result, train_system_task.TrainSystemTask)
self.assertIsNone(result.identifier)
def test_get_run_system_task_checks_for_existing_task(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
system_id = bson.ObjectId()
image_source_id = bson.ObjectId()
subject.get_run_system_task(system_id, image_source_id)
self.assertTrue(mock_collection.find_one.called)
query = mock_collection.find_one.call_args[0][0]
self.assertIn('system_id', query)
self.assertEqual(system_id, query['system_id'])
self.assertIn('image_source_id', query)
self.assertEqual(image_source_id, query['image_source_id'])
def test_get_run_system_task_returns_deserialized_existing(self):
s_task = {'_type': 'ImportDatasetTask', '_id': bson.ObjectId()}
mock_entity = mock.MagicMock()
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find_one.return_value = s_task
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
mock_db_client.deserialize_entity.return_value = mock_entity
subject = manager.TaskManager(mock_collection, mock_db_client)
result = subject.get_run_system_task(bson.ObjectId(), bson.ObjectId())
self.assertTrue(mock_db_client.deserialize_entity.called)
self.assertEqual(s_task, mock_db_client.deserialize_entity.call_args[0][0])
self.assertEqual(mock_entity, result)
def test_get_run_system_task_returns_new_instance_if_no_existing(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find_one.return_value = None
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
system_id = bson.ObjectId()
image_source_id = bson.ObjectId()
result = subject.get_run_system_task(system_id, image_source_id)
self.assertIsInstance(result, run_system_task.RunSystemTask)
self.assertIsNone(result.identifier)
def test_get_benchmark_task_checks_for_existing_task(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
trial_result_id = bson.ObjectId()
benchmark_id = bson.ObjectId()
subject.get_benchmark_task(trial_result_id, benchmark_id)
self.assertTrue(mock_collection.find_one.called)
query = mock_collection.find_one.call_args[0][0]
self.assertIn('trial_result_id', query)
self.assertEqual(trial_result_id, query['trial_result_id'])
self.assertIn('benchmark_id', query)
self.assertEqual(benchmark_id, query['benchmark_id'])
def test_get_benchmark_task_returns_deserialized_existing(self):
s_task = {'_type': 'ImportDatasetTask', '_id': bson.ObjectId()}
mock_entity = mock.MagicMock()
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find_one.return_value = s_task
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
mock_db_client.deserialize_entity.return_value = mock_entity
subject = manager.TaskManager(mock_collection, mock_db_client)
result = subject.get_benchmark_task(bson.ObjectId(), bson.ObjectId())
self.assertTrue(mock_db_client.deserialize_entity.called)
self.assertEqual(s_task, mock_db_client.deserialize_entity.call_args[0][0])
self.assertEqual(mock_entity, result)
def test_get_benchmark_returns_new_instance_if_no_existing(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find_one.return_value = None
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
trial_result_id = bson.ObjectId()
benchmark_id = bson.ObjectId()
result = subject.get_benchmark_task(trial_result_id, benchmark_id)
self.assertIsInstance(result, benchmark_task.BenchmarkTrialTask)
self.assertIsNone(result.identifier)
def test_do_task_checks_import_benchmark_task_is_unique(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
module_name = 'test_module'
path = '/tmp/dataset/thisisadataset'
task = import_dataset_task.ImportDatasetTask(module_name, path)
subject.do_task(task)
self.assertTrue(mock_collection.find.called)
query = mock_collection.find.call_args[0][0]
self.assertIn('module_name', query)
self.assertEqual(module_name, query['module_name'])
self.assertIn('path', query)
self.assertEqual(path, query['path'])
def test_do_task_checks_train_system_task_is_unique(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
trainer_id = bson.ObjectId()
trainee_id = bson.ObjectId()
task = train_system_task.TrainSystemTask(trainer_id, trainee_id)
subject.do_task(task)
self.assertTrue(mock_collection.find.called)
query = mock_collection.find.call_args[0][0]
self.assertIn('trainer_id', query)
self.assertEqual(trainer_id, query['trainer_id'])
self.assertIn('trainee_id', query)
self.assertEqual(trainee_id, query['trainee_id'])
def test_do_task_checks_run_system_task_is_unique(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
system_id = bson.ObjectId()
image_source_id = bson.ObjectId()
task = run_system_task.RunSystemTask(system_id, image_source_id)
subject.do_task(task)
self.assertTrue(mock_collection.find.called)
query = mock_collection.find.call_args[0][0]
self.assertIn('system_id', query)
self.assertEqual(system_id, query['system_id'])
self.assertIn('image_source_id', query)
self.assertEqual(image_source_id, query['image_source_id'])
def test_do_task_checks_benchmark_task_is_unique(self):
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
trial_result_id = bson.ObjectId()
benchmark_id = bson.ObjectId()
task = benchmark_task.BenchmarkTrialTask(trial_result_id, benchmark_id)
subject.do_task(task)
self.assertTrue(mock_collection.find.called)
query = mock_collection.find.call_args[0][0]
self.assertIn('trial_result_id', query)
self.assertEqual(trial_result_id, query['trial_result_id'])
self.assertIn('benchmark_id', query)
self.assertEqual(benchmark_id, query['benchmark_id'])
def test_do_task_saves_new_task(self):
# Mockthe method chain on the pymongo cursor
mock_cursor = mock.MagicMock()
mock_cursor.limit.return_value = mock_cursor
mock_cursor.count.return_value = 0
mock_collection = mock.create_autospec(pymongo.collection.Collection)
mock_collection.find.return_value = mock_cursor
mock_db_client = mock.create_autospec(database.client.DatabaseClient)
subject = manager.TaskManager(mock_collection, mock_db_client)
system_id = bson.ObjectId()
image_source_id = bson.ObjectId()
task = run_system_task.RunSystemTask(system_id, image_source_id)
subject.do_task(task)
self.assertTrue(mock_collection.insert.called)
s_task = task.serialize()
del s_task['_id'] # This gets set after the insert call, clear it again
self.assertEqual(s_task, mock_collection.insert.call_args[0][0])
|
jskinn/robot-vision-experiment-framework
|
batch_analysis/tests/test_task_manager.py
|
Python
|
bsd-2-clause
| 16,573
|
# -*- coding: utf-8 -*-
"""
Extract Table of Content
========================
A Pelican plugin to extract table of contents (ToC) from `article.content` and
place it in its own `article.toc` variable for use in templates.
"""
from os import path
from bs4 import BeautifulSoup
from pelican import signals, readers, contents
import logging
logger = logging.getLogger(__name__)
def extract_toc(content):
if isinstance(content, contents.Static):
return
soup = BeautifulSoup(content._content, 'html.parser')
filename = content.source_path
extension = path.splitext(filename)[1][1:]
toc = None
# default Markdown reader
if not toc and readers.MarkdownReader.enabled and extension in readers.MarkdownReader.file_extensions:
toc = soup.find('div', class_='toc')
if toc:
toc.extract()
# default reStructuredText reader
if not toc and readers.RstReader.enabled and extension in readers.RstReader.file_extensions:
toc = soup.find('div', class_='contents topic')
if toc:
toc.extract()
tag = BeautifulSoup(str(toc), 'html.parser')
tag.div['class'] = 'toc'
tag.div['id'] = ''
p = tag.find('p', class_='topic-title first')
if p:
p.extract()
toc = tag
# Pandoc reader (markdown and other formats)
if 'pandoc_reader' in content.settings['PLUGINS']:
try:
from pandoc_reader import PandocReader
except ImportError:
PandocReader = False
if not toc and PandocReader and PandocReader.enabled and extension in PandocReader.file_extensions:
toc = soup.find('nav', id='TOC')
if toc:
toc.extract()
content._content = soup.decode()
content.toc = toc.decode()
if content.toc.startswith('<html>'):
content.toc = content.toc[12:-14]
def register():
signals.content_object_init.connect(extract_toc)
|
tijptjik/thegodsproject
|
plugins/extract_toc/extract_toc.py
|
Python
|
mit
| 1,988
|
from hashlib import sha1
from datetime import datetime
import logging
import mimetypes
import re
import urllib
from django.db import models
from django.db.models import Q
from django.contrib.auth.models import User
from django.contrib.markup.templatetags import markup
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.conf import settings
from django.template.defaultfilters import slugify, striptags
from django.utils.translation import ugettext_lazy as _
from django.template import Context
from decorators import logtime, once_per_instance
# cms
from cms.models import CMSPlugin, Page
from cms.models.fields import PlaceholderField
# filer
from filer.fields.folder import FilerFolderField
from filer.models.foldermodels import Folder
WORD_LIMIT = getattr(settings, 'ARTICLES_TEASER_LIMIT', 75)
AUTO_TAG = getattr(settings, 'ARTICLES_AUTO_TAG', False)
DEFAULT_DB = getattr(settings, 'ARTICLES_DEFAULT_DB', 'default')
LOOKUP_LINK_TITLE = getattr(settings, 'ARTICLES_LOOKUP_LINK_TITLE', True)
MARKUP_HTML = 'h'
MARKUP_MARKDOWN = 'm'
MARKUP_REST = 'r'
MARKUP_TEXTILE = 't'
MARKUP_OPTIONS = getattr(settings, 'ARTICLE_MARKUP_OPTIONS', (
(MARKUP_HTML, _('HTML/Plain Text')),
(MARKUP_MARKDOWN, _('Markdown')),
(MARKUP_REST, _('ReStructured Text')),
(MARKUP_TEXTILE, _('Textile'))
))
MARKUP_DEFAULT = getattr(settings, 'ARTICLE_MARKUP_DEFAULT', MARKUP_HTML)
USE_ADDTHIS_BUTTON = getattr(settings, 'USE_ADDTHIS_BUTTON', True)
ADDTHIS_USE_AUTHOR = getattr(settings, 'ADDTHIS_USE_AUTHOR', True)
DEFAULT_ADDTHIS_USER = getattr(settings, 'DEFAULT_ADDTHIS_USER', None)
# regex used to find links in an article
LINK_RE = re.compile('<a.*?href="(.*?)".*?>(.*?)</a>', re.I|re.M)
TITLE_RE = re.compile('<title.*?>(.*?)</title>', re.I|re.M)
TAG_RE = re.compile('[^a-z0-9\-_\+\:\.]?', re.I)
log = logging.getLogger('articles.models')
def get_name(user):
"""
Provides a way to fall back to a user's username if their full name has not
been entered.
"""
key = 'username_for_%s' % user.id
log.debug('Looking for "%s" in cache (%s)' % (key, user))
name = cache.get(key)
if not name:
log.debug('Name not found')
if len(user.get_full_name().strip()):
log.debug('Using full name')
name = user.get_full_name()
else:
log.debug('Using username')
name = user.username
log.debug('Caching %s as "%s" for a while' % (key, name))
cache.set(key, name, 86400)
return name
User.get_name = get_name
class Tag(models.Model):
name = models.CharField(max_length=64, unique=True)
slug = models.CharField(max_length=64, unique=True, null=True, blank=True)
def __unicode__(self):
return self.name
@staticmethod
def clean_tag(name):
"""Replace spaces with dashes, in case someone adds such a tag manually"""
name = name.replace(' ', '-').encode('ascii', 'ignore')
name = TAG_RE.sub('', name)
clean = name.lower().strip()
log.debug('Cleaned tag "%s" to "%s"' % (name, clean))
return clean
def save(self, *args, **kwargs):
"""Cleans up any characters I don't want in a URL"""
log.debug('Ensuring that tag "%s" has a slug' % (self,))
self.slug = Tag.clean_tag(self.name)
super(Tag, self).save(*args, **kwargs)
@models.permalink
def get_absolute_url(self):
return ('articles_display_tag', (self.cleaned,))
@property
def cleaned(self):
"""Returns the clean version of the tag"""
return self.slug or Tag.clean_tag(self.name)
@property
def rss_name(self):
return self.cleaned
class Meta:
ordering = ('name',)
class ArticleStatusManager(models.Manager):
def default(self):
default = self.all()[:1]
if len(default) == 0:
return None
else:
return default[0]
class ArticleStatus(models.Model):
name = models.CharField(max_length=50)
ordering = models.IntegerField(default=0)
is_live = models.BooleanField(default=False, blank=True)
objects = ArticleStatusManager()
class Meta:
ordering = ('ordering', 'name')
verbose_name_plural = _('Article statuses')
def __unicode__(self):
if self.is_live:
return u'%s (live)' % self.name
else:
return self.name
class ArticleManager(models.Manager):
def active(self):
"""
Retrieves all active articles which have been published and have not
yet expired.
"""
now = datetime.now()
return self.get_query_set().filter(
Q(expiration_date__isnull=True) |
Q(expiration_date__gte=now),
publish_date__lte=now,
is_active=True)
def live(self, user=None):
"""Retrieves all live articles"""
qs = self.active()
if user is not None and user.is_superuser:
# superusers get to see all articles
return qs
else:
# only show live articles to regular users
return qs.filter(status__is_live=True)
MARKUP_HELP = _("""Select the type of markup you are using in this article.
<ul>
<li><a href="http://daringfireball.net/projects/markdown/basics" target="_blank">Markdown Guide</a></li>
<li><a href="http://docutils.sourceforge.net/docs/user/rst/quickref.html" target="_blank">ReStructured Text Guide</a></li>
<li><a href="http://thresholdstate.com/articles/4312/the-textile-reference-manual" target="_blank">Textile Guide</a></li>
</ul>""")
class Article(models.Model):
title = models.CharField(max_length=100)
slug = models.SlugField(unique_for_year='publish_date')
status = models.ForeignKey(ArticleStatus, default=ArticleStatus.objects.default)
author = models.ForeignKey(User)
sites = models.ManyToManyField(Site, blank=True)
keywords = models.TextField(blank=True, help_text=_("If omitted, the keywords will be the same as the article tags."))
description = models.TextField(blank=True, help_text=_("If omitted, the description will be determined by the first bit of the article's content."))
markup = models.CharField(max_length=1, choices=MARKUP_OPTIONS, default=MARKUP_DEFAULT, help_text=MARKUP_HELP)
content = models.TextField(blank=True, null=True)
rendered_content = models.TextField(blank=True, null=True)
tags = models.ManyToManyField(Tag, help_text=_('Tags that describe this article'), blank=True)
auto_tag = models.BooleanField(default=AUTO_TAG, blank=True, help_text=_('Check this if you want to automatically assign any existing tags to this article based on its content.'))
followup_for = models.ManyToManyField('self', symmetrical=False, blank=True, help_text=_('Select any other articles that this article follows up on.'), related_name='followups')
related_articles = models.ManyToManyField('self', blank=True)
publish_date = models.DateTimeField(default=datetime.now, help_text=_('The date and time this article shall appear online.'))
expiration_date = models.DateTimeField(blank=True, null=True, help_text=_('Leave blank if the article does not expire.'))
is_active = models.BooleanField(default=True, blank=True)
login_required = models.BooleanField(blank=True, help_text=_('Enable this if users must login before they can read this article.'))
use_addthis_button = models.BooleanField(_('Show AddThis button'), blank=True, default=USE_ADDTHIS_BUTTON, help_text=_('Check this to show an AddThis bookmark button when viewing an article.'))
addthis_use_author = models.BooleanField(_("Use article author's username"), blank=True, default=ADDTHIS_USE_AUTHOR, help_text=_("Check this if you want to use the article author's username for the AddThis button. Respected only if the username field is left empty."))
addthis_username = models.CharField(_('AddThis Username'), max_length=50, blank=True, default=DEFAULT_ADDTHIS_USER, help_text=_('The AddThis username to use for the button.'))
# cms field
placeholder_1 = PlaceholderField('placeholder_1')
# other relations
folder = FilerFolderField(blank=True, null=True, related_name='article_folder', on_delete=models.SET_NULL)
objects = ArticleManager()
def __init__(self, *args, **kwargs):
"""Makes sure that we have some rendered content to use"""
super(Article, self).__init__(*args, **kwargs)
self._next = None
self._previous = None
self._teaser = None
if self.id:
# mark the article as inactive if it's expired and still active
if self.expiration_date and self.expiration_date <= datetime.now() and self.is_active:
self.is_active = False
self.save()
if not self.rendered_content or not len(self.rendered_content.strip()):
self.save()
def __unicode__(self):
return self.title
@property
def name(self):
return self.title
def save(self, *args, **kwargs):
"""Renders the article using the appropriate markup language."""
using = kwargs.get('using', DEFAULT_DB)
self.do_render_markup()
self.do_addthis_button()
self.do_meta_description()
self.do_unique_slug(using)
super(Article, self).save(*args, **kwargs)
# do some things that require an ID first
requires_save = self.do_auto_tag(using)
requires_save |= self.do_tags_to_keywords()
requires_save |= self.do_default_site(using)
"""
if not self.folder:
folder_name = '%s - %s' % (self.publish_date, self.name)
parent, created = Folder.objects.get_or_create(name='Articles')
folder, created = Folder.objects.get_or_create(name=folder_name, parent=parent)
self.folder = folder
requires_save = True
"""
if requires_save:
# bypass the other processing
super(Article, self).save()
def do_render_markup(self):
"""Turns any markup into HTML"""
original = self.rendered_content
if self.markup == MARKUP_MARKDOWN:
self.rendered_content = markup.markdown(self.content)
elif self.markup == MARKUP_REST:
self.rendered_content = markup.restructuredtext(self.content)
elif self.markup == MARKUP_TEXTILE:
self.rendered_content = markup.textile(self.content)
else:
self.rendered_content = self.content
return (self.rendered_content != original)
def do_addthis_button(self):
"""Sets the AddThis username for this post"""
# if the author wishes to have an "AddThis" button on this article,
# make sure we have a username to go along with it.
if self.use_addthis_button and self.addthis_use_author and not self.addthis_username:
self.addthis_username = self.author.username
return True
return False
def do_unique_slug(self, using=DEFAULT_DB):
"""
Ensures that the slug is always unique for the year this article was
posted
"""
if not self.id:
# make sure we have a slug first
if not len(self.slug.strip()):
self.slug = slugify(self.title)
self.slug = self.get_unique_slug(self.slug, using)
return True
return False
def do_tags_to_keywords(self):
"""
If meta keywords is empty, sets them using the article tags.
Returns True if an additional save is required, False otherwise.
"""
if len(self.keywords.strip()) == 0:
self.keywords = ', '.join([t.name for t in self.tags.all()])
return True
return False
def do_meta_description(self):
"""
If meta description is empty, sets it to the article's teaser.
Returns True if an additional save is required, False otherwise.
"""
if len(self.description.strip()) == 0:
self.description = self.teaser
return True
return False
@logtime
@once_per_instance
def do_auto_tag(self, using=DEFAULT_DB):
"""
Performs the auto-tagging work if necessary.
Returns True if an additional save is required, False otherwise.
"""
if not self.auto_tag:
log.debug('Article "%s" (ID: %s) is not marked for auto-tagging. Skipping.' % (self.title, self.pk))
return False
# don't clobber any existing tags!
existing_ids = [t.id for t in self.tags.all()]
log.debug('Article %s already has these tags: %s' % (self.pk, existing_ids))
unused = Tag.objects.all()
if hasattr(unused, 'using'):
unused = unused.using(using)
unused = unused.exclude(id__in=existing_ids)
found = False
to_search = (self.content, self.title, self.description, self.keywords)
for tag in unused:
regex = re.compile(r'\b%s\b' % tag.name, re.I)
if any(regex.search(text) for text in to_search):
log.debug('Applying Tag "%s" (%s) to Article %s' % (tag, tag.pk, self.pk))
self.tags.add(tag)
found = True
return found
def do_default_site(self, using=DEFAULT_DB):
"""
If no site was selected, selects the site used to create the article
as the default site.
Returns True if an additional save is required, False otherwise.
"""
if not len(self.sites.all()):
sites = Site.objects.all()
if hasattr(sites, 'using'):
sites = sites.using(using)
self.sites.add(sites.get(pk=settings.SITE_ID))
return True
return False
def get_unique_slug(self, slug, using=DEFAULT_DB):
"""Iterates until a unique slug is found"""
# we need a publish date before we can do anything meaningful
if type(self.publish_date) is not datetime:
return slug
orig_slug = slug
year = self.publish_date.year
counter = 1
while True:
not_unique = Article.objects.all()
if hasattr(not_unique, 'using'):
not_unique = not_unique.using(using)
not_unique = not_unique.filter(publish_date__year=year, slug=slug)
if len(not_unique) == 0:
return slug
slug = '%s-%s' % (orig_slug, counter)
counter += 1
def _get_article_links(self):
"""
Find all links in this article. When a link is encountered in the
article text, this will attempt to discover the title of the page it
links to. If there is a problem with the target page, or there is no
title (ie it's an image or other binary file), the text of the link is
used as the title. Once a title is determined, it is cached for a week
before it will be requested again.
"""
links = []
# find all links in the article
log.debug('Locating links in article: %s' % (self,))
for link in LINK_RE.finditer(self.rendered_content):
url = link.group(1)
log.debug('Do we have a title for "%s"?' % (url,))
key = 'href_title_' + sha1(url).hexdigest()
# look in the cache for the link target's title
title = cache.get(key)
if title is None:
log.debug('Nope... Getting it and caching it.')
title = link.group(2)
if LOOKUP_LINK_TITLE:
try:
log.debug('Looking up title for URL: %s' % (url,))
# open the URL
c = urllib.urlopen(url)
html = c.read()
c.close()
# try to determine the title of the target
title_m = TITLE_RE.search(html)
if title_m:
title = title_m.group(1)
log.debug('Found title: %s' % (title,))
except:
# if anything goes wrong (ie IOError), use the link's text
log.warn('Failed to retrieve the title for "%s"; using link text "%s"' % (url, title))
# cache the page title for a week
log.debug('Using "%s" as title for "%s"' % (title, url))
cache.set(key, title, 604800)
# add it to the list of links and titles
if url not in (l[0] for l in links):
links.append((url, title))
return tuple(links)
links = property(_get_article_links)
def _get_word_count(self):
"""Stupid word counter for an article."""
return len(striptags(self.rendered_content).split(' '))
word_count = property(_get_word_count)
@models.permalink
def get_absolute_url(self):
return ('articles_display_article', (self.publish_date.year, self.slug))
def _get_teaser(self):
return 'teaser'
"""
Retrieve some part of the article or the article's description.
"""
if not self._teaser:
if len(self.description.strip()):
text = self.description
else:
text = self.rendered_content
words = text.split(' ')
if len(words) > WORD_LIMIT:
text = '%s...' % ' '.join(words[:WORD_LIMIT])
self._teaser = text
return self._teaser
teaser = property(_get_teaser)
def get_next_article(self):
"""Determines the next live article"""
if not self._next:
try:
qs = Article.objects.live().exclude(id__exact=self.id)
article = qs.filter(publish_date__gte=self.publish_date).order_by('publish_date')[0]
except (Article.DoesNotExist, IndexError):
article = None
self._next = article
return self._next
def get_previous_article(self):
"""Determines the previous live article"""
if not self._previous:
try:
qs = Article.objects.live().exclude(id__exact=self.id)
article = qs.filter(publish_date__lte=self.publish_date).order_by('-publish_date')[0]
except (Article.DoesNotExist, IndexError):
article = None
self._previous = article
return self._previous
class Meta:
ordering = ('-publish_date', 'title')
get_latest_by = 'publish_date'
class Attachment(models.Model):
upload_to = lambda inst, fn: 'attach/%s/%s/%s' % (datetime.now().year, inst.article.slug, fn)
article = models.ForeignKey(Article, related_name='attachments')
attachment = models.FileField(upload_to=upload_to)
caption = models.CharField(max_length=255, blank=True)
class Meta:
ordering = ('-article', 'id')
def __unicode__(self):
return u'%s: %s' % (self.article, self.caption)
@property
def filename(self):
return self.attachment.name.split('/')[-1]
@property
def content_type_class(self):
mt = mimetypes.guess_type(self.attachment.path)[0]
if mt:
content_type = mt.replace('/', '_')
else:
# assume everything else is text/plain
content_type = 'text_plain'
return content_type
|
hzlf/openbroadcast
|
website/apps/articles/models.py
|
Python
|
gpl-3.0
| 19,681
|
# Run this in the Django shell
from clinicalsearch.models import ClinicalTrial
import csv
with open('clinicalsearch/trials_ranked.csv', 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
print row
t = ClinicalTrial(id=row[0], sponsor=row[1], published=(row[2]=="TRUE"), state=row[3], url=row[4], ongoing=(row[5]=="TRUE"), title=row[6], condition=row[7], intervention=row[8], locations=row[9], last_changed=row[10], min_age=int(row[11]), max_age=int(row[12]), genders=row[13], health=(row[14] == "True"), ranking=int(row[15]))
t.save()
|
adam2392/clinicaltrials
|
populatedb.py
|
Python
|
apache-2.0
| 574
|
#!/usr/bin/env python
#File: CMRRestFrameConversion.py
#Created: Sat Dec 15 17:03:04 2012
#Last Change: Sat Dec 15 17:03:50 2012
# -*- coding: utf-8 -*-
#
# Calculates (U-V)z slopes, scatters and intercepts of a given color--magnitude
# relation.
# Follows the procedure in Appendix II of Mei et al. 2009 (ApJ, 690, 42),
# except here we convert
# mags to apparent mags at the distance of the Coma cluster.
from astLib import astSED
from astLib import astStats
from astLib import astCalc
from scipy import stats
from scipy import optimize
from scipy import interpolate
import numpy
import pylab
import os
import sys
import random
import math
import pickle
import string
random.seed()
#-----------------------------------------------------------------------------
# Constants etc.
# number of bootstrap samples, for estimating errors
BOOTSTRAPS = 1000
# number of galaxies, gets * number of models, having same age in each
# metallicity bin
NGAL = 25
FILTER_DIR = "../../../../testingData/filters/"
# Map between short filter names on command line and paths, labels etc..
filterMap=[]
filterMap.append({'shortName': 'r625', 'filePath': FILTER_DIR+'F625W_WFC.res',
'plotLabel': 'r625'})
filterMap.append({'shortName': 'i775', 'filePath': FILTER_DIR+'F775W_WFC.res',
'plotLabel': 'i775'})
filterMap.append({'shortName': 'z850', 'filePath': FILTER_DIR+'F850LP_WFC.res',
'plotLabel': 'z850'})
filterMap.append({'shortName': 'U', 'filePath': FILTER_DIR+'U_Johnson.res',
'plotLabel': 'U'})
filterMap.append({'shortName': 'V', 'filePath': FILTER_DIR+'V_Johnson.res',
'plotLabel': 'V'})
# Literature CMR results we want to convert, in their native format
litCMRs = []
litCMRs.append({'name': 'RX J0152.7-1357 (Mei et al. 2009)',
'redshift': 0.83,
'colour': 'r625-z850',
'mag': 'i775',
'slope': -0.040,
'slopeErr': 0.017,
'intercept': 1.93,
'interceptErr': 0.02,
'zeroMag': 22.5,
'scatter': 0.079,
'scatterErr': 0.008,
'magType': "AB"})
#-----------------------------------------------------------------------------
def GetCMR(nameFragment, dictList):
"""Finds the CMR dictionary in the litCMRs or results list, by looking for
nameFragment in name.
"""
foundCMR = None
for dict in dictList:
if nameFragment in dict['name']:
foundCMR = dict
return foundCMR
#------------------------------------------------------------------------------
def CalcTransformedCMRWithZM(obsCMR, fitMags, fitCols):
"""Calculates the CMR transformed to the rest frame, using the results of
the magnitude and colour
conversion fits. See handwritten notes for the tedious algebra involved.
"""
# we use the following notation s, zp for slope, zeropoint, append Err for
# errors
# CMR for CMR, Mag for fitMags, Col for fitCols
sCMR = obsCMR['slope']
zpCMR = obsCMR['intercept']
zmCMR = obsCMR['zeroMag']
sMag = fitMags['slope']
zpMag=fitMags['intercept']
sCol = fitCols['slope']
zpCol = fitCols['intercept']
transformedZeroMag = obsCMR['transformedZeroMag']
a = sCMR**-1+sMag
b = sCol/a
c = zpCMR/sCMR
d = c-zpMag-zmCMR
e = d/a
f = sCol*e
# this last term is if we want to transform to e.g. match Mei et al.
g = zpCol+f+(b*transformedZeroMag)
restCMRSlope = b
restCMRIntercept = g
restCMRScatter = obsCMR['scatter']*fitCols['slope']
return ({'slope': restCMRSlope, 'intercept': restCMRIntercept,
'scatter': restCMRScatter})
#-----------------------------------------------------------------------------
def BootstrapTransformedCMRErrorsWithZM(obsCMR, fitMags, fitCols):
"""Estimates errors on transformed CMR fit (i.e., into rest frame), by
assuming the errors on the observed CMR fit, colour transformation fit, and
mag. transformation fit have Gaussian
distributions.
"""
# we use the following notation s, zp for slope, zeropoint, append Err for
# errors
# CMR for CMR, Mag for fitMags, Col for fitCols
sCMR = obsCMR['slope']
sCMRErr = obsCMR['slopeErr']
zpCMR = obsCMR['intercept']
zpCMRErr = obsCMR['interceptErr']
sMag = fitMags['slope']
sMagErr = fitMags['slopeError']
zpMag = fitMags['intercept']
zpMagErr = fitMags['interceptError']
sCol = fitCols['slope']
sColErr = fitCols['slopeError']
zpCol = fitCols['intercept']
zpColErr = fitCols['interceptError']
bsFitResults = []
for n in range(BOOTSTRAPS):
bsCMR = {}
bsMag = {}
bsCol = {}
bsCMR['slope'] = random.normalvariate(sCMR, sCMRErr)
bsCMR['intercept'] = random.normalvariate(zpCMR, zpCMRErr)
bsCMR['zeroMag'] = obsCMR['zeroMag']
bsMag['slope'] = random.normalvariate(sMag, sMagErr)
bsMag['intercept'] = random.normalvariate(zpMag, zpMagErr)
bsCol['slope'] = random.normalvariate(sCol, sColErr)
bsCol['intercept'] = random.normalvariate(zpCol, zpColErr)
bsCMR['scatter'] = random.normalvariate(obsCMR['scatter'],
obsCMR['scatterErr'])
bsCMR['transformedZeroMag'] = obsCMR['transformedZeroMag']
bsFitResults.append(CalcTransformedCMRWithZM(bsCMR, bsMag, bsCol))
bsSlopes = []
bsIntercepts = []
bsScatters = []
for bsResult in bsFitResults:
bsSlopes.append(bsResult['slope'])
bsIntercepts.append(bsResult['intercept'])
bsScatters.append(bsResult['scatter'])
bsSlopes = numpy.array(bsSlopes)
bsIntercepts = numpy.array(bsIntercepts)
bsScatters = numpy.array(bsScatters)
restCMRSlopeErr = numpy.std(bsSlopes)
restCMRInterceptErr = numpy.std(bsIntercepts)
restCMRScatterErr = numpy.std(bsScatters)
return ({'slopeErr': restCMRSlopeErr, 'interceptErr':restCMRInterceptErr,
'scatterErr': restCMRScatterErr})
#-----------------------------------------------------------------------------
def LoadModels(fileNameList, modelType = "bc03"):
"""Creates a list of stellar population models from the given list of model
file names.
"""
models = []
for f in fileNameList:
if modelType == "bc03":
models.append(astSED.BC03Model(f))
elif modelType == "m05":
models.append(astSED.M05Model(f))
return models
#-----------------------------------------------------------------------------
def GetPassbandFileNames(inputColour):
"""Given a mag (e.g. i775) or colour string e.g. r625-z850, lookup the
appropriate file name(s) in the filterMap, and return the paths in a list.
"""
bands = inputColour.split("-")
p = []
for b in bands:
p.append(None)
for row in filterMap:
for i in range(len(bands)):
if row['shortName'] == bands[i]:
p[i]=row['filePath']
if None in p:
print("ERROR : couldn't parse colour using filterMap")
sys.exit()
else:
return p
#-----------------------------------------------------------------------------
def LoadPassbands(fileNameList, redshift = None, redshiftPassbands = False):
"""Creates a list of passband objects from the given list of passband file
names.
"""
passbands = []
for f in fileNameList:
p = astSED.Passband(f)
if redshiftPassbands == True and redshift != None:
p.wavelength=p.wavelength*(1.0+redshift)
passbands.append(p)
return passbands
#-----------------------------------------------------------------------------
def CalcColourMagTransformation(cmr, restColPassbands, restMagPassband):
"""Calculates the transformation equations needed to convert the given cmr
into the rest frame at Coma, for the given passbands.
"""
print((">>> Calculating colour, mag transform for CMR " +
litCMR['name']+"..."))
inputColour = cmr['colour']
inputMag = cmr['mag']
zCluster = cmr['redshift']
# Range of formation zs to match Mei et al. 2008
zfMax = 7.0
zfMin = 2.0
observedColPassbandFileNames = GetPassbandFileNames(inputColour)
observedMagPassbandFileName = GetPassbandFileNames(inputMag)
observedColLabel = inputColour
observedMagLabel = inputMag
# Load stuff
observedColPassbands = LoadPassbands(observedColPassbandFileNames)
observedMagPassband = LoadPassbands(observedMagPassbandFileName)[0]
# Generate galaxy models, we'll hold them all in memory here and use them
# all in a bit
print("--> Generating simulated galaxy sample ...")
restGalaxies = []
observedGalaxies = []
for n in range(NGAL):
print("... n = "+str(n+1)+"/"+str(NGAL)+" ...")
zfChoice = random.uniform(zfMin, zfMax)
ageChoice = astCalc.tl(zfChoice)-astCalc.tl(zCluster)
for i in range(len(models)):
modelChoice = i
restGalaxies.append(models[modelChoice].getSED(ageChoice, z=0.02))
observedGalaxies.append(models[modelChoice].getSED(ageChoice,
z=zCluster))
# Fit for colour conversion
observedColours = []
restColours = []
for o, r in zip(observedGalaxies, restGalaxies):
restColours.append(r.calcColour(restColPassbands[0],
restColPassbands[1], magType="Vega"))
observedColours.append(o.calcColour(observedColPassbands[0],
observedColPassbands[1], magType=cmr['magType']))
restColours = numpy.array(restColours)
observedColours = numpy.array(observedColours)
fitColData = []
for x, y in zip(observedColours, restColours):
fitColData.append([x, y])
fitCols=astStats.OLSFit(fitColData)
res = restColours-(fitCols['slope']*observedColours+fitCols['intercept'])
# scatter of residuals, use as fit error
scatter = astStats.biweightScale(res, 6.0)
# Fit for mag conversion
restMinusObservedAppMags = []
for o, r, obsCol, restCol in zip(observedGalaxies, restGalaxies,
observedColours, restColours):
restMinusObservedAppMags.append(r.calcMag(restMagPassband,
magType="Vega")-o.calcMag(observedMagPassband,
magType=cmr['magType']))
restMinusObservedAppMags = numpy.array(restMinusObservedAppMags)
fitMagData = []
for x, y in zip(observedColours, restMinusObservedAppMags):
fitMagData.append([x, y])
fitMags = astStats.OLSFit(fitMagData)
trans = {'name': cmr['name'],
'fitCols': fitCols,
'fitMags': fitMags,
'observedColours': observedColours,
'restColours': restColours,
'restMinusObservedAppMags': restMinusObservedAppMags}
return trans
#-----------------------------------------------------------------------------
def GetCSPModel(labelToFind, models, modelLabels):
"""Given a list of models and a matching list of labels, returns the model
matching the given label.
"""
foundModel = None
for m, l in zip(models, modelLabels):
if l == labelToFind:
foundModel = m
return foundModel
#-----------------------------------------------------------------------------
def ApplyCMRTransformation(cmr, trans):
"""Applies the magnitude and colour transformations stored in trans to the
CMR.
"""
print((">>> Transforming CMR "+cmr['name']+" to Coma rest frame"+
restColour+" ..."))
# Check that col, mag transformations and cmrs match up, otherwise
# something is seriously screwed up
if cmr['name'] != trans['name']:
print("ERROR: cmrs and transformation lists not paired!")
sys.exit()
fitMags = trans['fitMags']
fitCols = trans['fitCols']
transformedCMR = CalcTransformedCMRWithZM(cmr, fitMags, fitCols)
transformedCMRErrs = BootstrapTransformedCMRErrorsWithZM(cmr, fitMags,
fitCols)
result = {'name': cmr['name'],
'redshift': cmr['redshift'],
'colour': restColour,
'mag': restMag,
'slope': transformedCMR['slope'],
'slopeErr': transformedCMRErrs['slopeErr'],
'intercept': transformedCMR['intercept'],
'interceptErr': transformedCMRErrs['interceptErr'],
'zeroMag': cmr['transformedZeroMag'],
'scatter': transformedCMR['scatter'],
'scatterErr': transformedCMRErrs['scatterErr']}
return result
#-----------------------------------------------------------------------------
# Main
# Input parameters
MODEL_TYPE="bc03"
modelFileNames = ["../../../../testingData/models/tau0p1Gyr_m42.20",
"../../../../testingData/models/tau0p1Gyr_m52.20",
"../../../../testingData/models/tau0p1Gyr_m62.20",
"../../../../testingData/models/tau0p1Gyr_m72.20"]
models = LoadModels(modelFileNames, modelType = MODEL_TYPE)
# Target colour and mag bands
restColPassbandFileNames = [FILTER_DIR+"U_Johnson.res",
FILTER_DIR+"B_Johnson.res"]
restMagPassbandFileName = [FILTER_DIR+"B_Johnson.res"]
restColour = "U-B" # component of output file name
restMag = "B"
restColLabel = "(U-B)rest"
restMagLabel = "B"
restColPassbands = LoadPassbands(restColPassbandFileNames)
restMagPassband = LoadPassbands(restMagPassbandFileName)[0]
# Evaluate CMR zero point in the rest frame of Coma at intercept of zero
for litCMR in litCMRs:
litCMR['transformedZeroMag'] = 0.0
# Calculate the colour, mag transformations to take each CMR to the Coma rest frame
transformations = []
for litCMR in litCMRs:
trans = CalcColourMagTransformation(litCMR, restColPassbands,
restMagPassband)
transformations.append(trans)
# Transform the literature CMRs to the rest frame passbands at Coma
results = []
for litCMR, trans in zip(litCMRs, transformations):
result = ApplyCMRTransformation(litCMR, trans)
results.append(result)
# Write results to a text file
outFile = open("output_CMRRestConversion.txt", "w")
# Colour, mag transformation
outFile.write("# Color, mag %s rest frame transformation fit coeffs:\n" %
(restColour))
for t in transformations:
outFile.write("name = %s\n" % (t['name']))
outFile.write("# Color transformation:\n")
outFile.write("slope = %.5f\n" % (t['fitCols']['slope']))
outFile.write("slopeError = %.5f\n" % (t['fitCols']['slopeError']))
outFile.write("intercept = %.5f\n" % (t['fitCols']['intercept']))
outFile.write("interceptError = %.5f\n" % (t['fitCols']['interceptError']))
outFile.write("# Mag transformation:\n")
outFile.write("slope = %.5f\n" % (t['fitMags']['slope']))
outFile.write("slopeError = %.5f\n" % (t['fitMags']['slopeError']))
outFile.write("intercept = %.5f\n" % (t['fitMags']['intercept']))
outFile.write("interceptError = %.5f\n" % (t['fitMags']['interceptError']))
# Transformed CMR
outFile.write("# Transformed CMR:\n")
keyOrder = ["name", "redshift", "colour", "mag", "zeroMag", "slope",
"slopeErr", "intercept", "interceptErr", "scatter", "scatterErr"]
for r in results:
for k in keyOrder:
for key in list(r.keys()):
if str(key) == k:
if type(r[key]) == str:
outFile.write("%s = %s\n" % (key, r[key]))
else:
outFile.write("%s = %.3f\n" % (key, float(r[key])))
outFile.close()
|
boada/astLib
|
examples/CMRRestFrameConversion/CMRRestFrameConversion.py
|
Python
|
lgpl-2.1
| 15,480
|
#!/usr/local/bin/python
# Manaul.py
# add per-function documentation to Section_functions.txt in tabular HTML format
# extract the comment lines from OINK source files between C-style /* ... */
# invoked by PDFgen.sh when PDF of doc pages is created
# Syntax: Manual.py
import sys,os,glob,commands,re
# mtxt, etc = contents of source files
files = glob.glob("../oink/map_*.cpp")
files.sort()
mtxt = ""
for file in files: mtxt += open(file,"r").read()
files = glob.glob("../oink/reduce_*.cpp")
files.sort()
rtxt = ""
for file in files: rtxt += open(file,"r").read()
files = glob.glob("../oink/compare_*.cpp")
files.sort()
ctxt = ""
for file in files: ctxt += open(file,"r").read()
files = glob.glob("../oink/hash_*.cpp")
files.sort()
htxt = ""
for file in files: htxt += open(file,"r").read()
files = glob.glob("../oink/scan_*.cpp")
files.sort()
stxt = ""
for file in files: stxt += open(file,"r").read()
# mcomm, etc = comments in source files, with /* and */ lines
pattern = re.compile("(/\*.*?\*/)",re.DOTALL)
mcomm = re.findall(pattern,mtxt)
rcomm = re.findall(pattern,rtxt)
ccomm = re.findall(pattern,ctxt)
hcomm = re.findall(pattern,htxt)
scomm = re.findall(pattern,stxt)
# mpair, etc = comments in source files, without /* and */ lines and whitespace
mpair = []
for comm in mcomm:
lines = comm.split("\n")
lines = [line.strip() for line in lines]
mpair.append([lines[1],lines[2:-1]])
rpair = []
for comm in rcomm:
lines = comm.split("\n")
lines = [line.strip() for line in lines]
rpair.append([lines[1],lines[2:-1]])
cpair = []
for comm in ccomm:
lines = comm.split("\n")
lines = [line.strip() for line in lines]
cpair.append([lines[1],lines[2:-1]])
hpair = []
for comm in hcomm:
lines = comm.split("\n")
lines = [line.strip() for line in lines]
hpair.append([lines[1],lines[2:-1]])
spair = []
for comm in scomm:
lines = comm.split("\n")
lines = [line.strip() for line in lines]
spair.append([lines[1],lines[2:-1]])
# re-create Section_functions.txt file below double :line location
# txt2html does not know how to create multiline table entries
# so write tabular HTML format directly with <TR> and <TD>
txt = open("Section_functions.txt","r").read()
separator = "\n:line\n:line\n"
halves = txt.split(separator)
half2 = ""
half2 += "\nMap() functions :link(3_1),h4\n\n"
half2 += '<DIV ALIGN=center><TABLE WIDTH="0%" BORDER=1>'
for pair in mpair:
half2 += "<TR>\n"
half2 += "<TD>%s</TD>\n" % pair[0]
half2 += "<TD>\n"
for line in pair[1]:
half2 += "%s<BR>\n" % line
half2 += "</TD>\n"
half2 += "</TR>\n"
half2 += "</TABLE></DIV>\n"
half2 += "\n:line\n"
half2 += "\nReduce() functions :link(3_1),h4\n\n"
half2 += '<DIV ALIGN=center><TABLE WIDTH="0%" BORDER=1>'
for pair in rpair:
half2 += "<TR>\n"
half2 += "<TD>%s</TD>\n" % pair[0]
half2 += "<TD>\n"
for line in pair[1]:
half2 += "%s<BR>\n" % line
half2 += "</TD>\n"
half2 += "</TR>\n"
half2 += "</TABLE></DIV>\n"
half2 += "\n:line\n"
half2 += "\nCompare() functions :link(3_1),h4\n\n"
half2 += '<DIV ALIGN=center><TABLE WIDTH="0%" BORDER=1>'
for pair in cpair:
half2 += "<TR>\n"
half2 += "<TD>%s</TD>\n" % pair[0]
half2 += "<TD>\n"
for line in pair[1]:
half2 += "%s<BR>\n" % line
half2 += "</TD>\n"
half2 += "</TR>\n"
half2 += "</TABLE></DIV>\n"
half2 += "\n:line\n"
half2 += "\nHash() functions :link(3_1),h4\n\n"
half2 += '<DIV ALIGN=center><TABLE WIDTH="0%" BORDER=1>'
for pair in hpair:
half2 += "<TR>\n"
half2 += "<TD>%s</TD>\n" % pair[0]
half2 += "<TD>\n"
for line in pair[1]:
half2 += "%s<BR>\n" % line
half2 += "</TD>\n"
half2 += "</TR>\n"
half2 += "</TABLE></DIV>\n"
half2 += "\n:line\n"
half2 += "\nScan() functions :link(3_1),h4\n\n"
half2 += '<DIV ALIGN=center><TABLE WIDTH="0%" BORDER=1>'
for pair in spair:
half2 += "<TR>\n"
half2 += "<TD>%s</TD>\n" % pair[0]
half2 += "<TD>\n"
for line in pair[1]:
half2 += "%s<BR>\n" % line
half2 += "</TD>\n"
half2 += "</TR>\n"
half2 += "</TABLE></DIV>\n"
half2 += "\n:line\n"
txt = halves[0] + separator + half2
open("Section_functions.txt","w").write(txt)
|
ravikanthreddy89/MR-MPI
|
oinkdoc/Manual.py
|
Python
|
bsd-3-clause
| 4,105
|
# Copyright 2008 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import test
import os
import shutil
from shutil import rmtree
from os import mkdir
from glob import glob
from os.path import join, dirname, exists
import re
import json
FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
FILES_PATTERN = re.compile(r"//\s+Files:(.*)")
class SimpleTestCase(test.TestCase):
def __init__(self, path, file, arch, mode, nwdir, context, config, additional=[]):
super(SimpleTestCase, self).__init__(context, path, arch, mode, nwdir)
self.file = file
self.config = config
self.arch = arch
self.mode = mode
self.tmpdir = join(dirname(self.config.root), 'tmp')
self.additional_flags = additional
self.expected_quit_dir = ''
def GetTmpDir(self):
return "%s.%d" % (self.tmpdir, self.thread_id)
def AfterRun(self, result):
# delete the whole tmp dir
try:
rmtree(self.GetTmpDir())
except:
pass
# make it again.
try:
mkdir(self.GetTmpDir())
except:
pass
if self.expected_quit_dir :
if not os.path.exists(os.path.join(self.file, self.expected_quit_dir)):
print "expected_quit_dir:", self.expected_quit_dir
self.expected_quit_dir_fail = True
else:
try:
rmtree(os.path.join(self.file, self.expected_quit_dir))
except:
pass
def BeforeRun(self):
# delete the whole tmp dir
try:
rmtree(self.GetTmpDir())
except:
pass
# make it again.
# intermittently fails on win32, so keep trying
while not os.path.exists(self.GetTmpDir()):
try:
mkdir(self.GetTmpDir())
except:
pass
manifest = json.loads(open(os.path.join(self.file, 'package.json')).read(), 'utf-8')
if manifest.get('expect_dir_exists'):
self.expected_quit_dir = manifest['expect_dir_exists']
try:
rmtree(os.path.join(self.file, self.expected_quit_dir))
except:
pass
else:
self.expected_quit_dir = ''
def GetLabel(self):
return "%s %s" % (self.mode, self.GetName())
def GetName(self):
return self.path[-1]
def GetCommand(self):
result = [self.config.context.GetVm(self.arch, self.mode, self.nwdir)]
manifest = json.loads(open(os.path.join(self.file, 'package.json')).read(), 'utf-8')
if manifest.get('expect_exit_code'):
self.expected_exit_code = manifest['expect_exit_code']
else:
self.expected_exit_code = 0
result += [self.file]
return result
def IsFailureOutput(self, output):
return output.exit_code != self.expected_exit_code or hasattr(self, 'expected_quit_dir_fail')
def GetSource(self):
return open(self.file).read()
class SimpleTestConfiguration(test.TestConfiguration):
def __init__(self, context, root, section, additional=[]):
super(SimpleTestConfiguration, self).__init__(context, root)
self.section = section
self.additional_flags = additional
def Ls(self, path):
def SelectTest(name):
return os.path.isdir(os.path.join(path, name))
return [f[0:] for f in os.listdir(path) if SelectTest(f)]
def ListTests(self, current_path, path, arch, mode, nwdir):
all_tests = [current_path + [t] for t in self.Ls(join(self.root))]
result = []
for test in all_tests:
if self.Contains(path, test):
file_path = join(self.root, reduce(join, test[1:], ""))
result.append(SimpleTestCase(test, file_path, arch, mode, nwdir, self.context,
self, self.additional_flags))
return result
def GetBuildRequirements(self):
return ['sample', 'sample=shell']
def GetTestStatus(self, sections, defs):
status_file = join(self.root, '%s.status' % (self.section))
if exists(status_file):
test.ReadConfigurationInto(status_file, sections, defs)
class ParallelTestConfiguration(SimpleTestConfiguration):
def __init__(self, context, root, section, additional=[]):
super(ParallelTestConfiguration, self).__init__(context, root, section,
additional)
def ListTests(self, current_path, path, arch, mode, nwdir):
result = super(ParallelTestConfiguration, self).ListTests(
current_path, path, arch, mode, nwdir)
for test in result:
test.parallel = True
return result
class AddonTestConfiguration(SimpleTestConfiguration):
def __init__(self, context, root, section, additional=[]):
super(AddonTestConfiguration, self).__init__(context, root, section)
def Ls(self, path):
def SelectTest(name):
return name.endswith('.js')
result = []
for subpath in os.listdir(path):
if os.path.isdir(join(path, subpath)):
for f in os.listdir(join(path, subpath)):
if SelectTest(f):
result.append([subpath, f[:-3]])
return result
def ListTests(self, current_path, path, arch, mode, nwdir):
all_tests = [current_path + t for t in self.Ls(join(self.root))]
result = []
for test in all_tests:
if self.Contains(path, test):
file_path = join(self.root, reduce(join, test[1:], "") + ".js")
result.append(
SimpleTestCase(test, file_path, arch, mode, nwdir, self.context, self))
return result
|
280455936/nw.js
|
test/testpy/__init__.py
|
Python
|
mit
| 6,774
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Reads the HTML page from ASDA's shopping basket
# Writes to an excel spreadsheet in three columns item, quantity, price
# TODO: Obtain the HTML page by making a GET request.
from html.parser import HTMLParser
from optparse import OptionParser
import logging
from openpyxl import Workbook
HTML_PAGE = "shopping_basket.htm"
LOG_FILE = "htmlparser.log"
logger = logging.getLogger(__name__)
def convert_to_pounds(string):
"""
:return: string
The money value expressed in pounds that was entered in the string.
:param string: A string with one of the following formats:
?£a - a '£' in the string, and the amount from the
third character)
ap - the last character of the string is p, the amount is
then divided by 100 to make the result in pounds.
Utility function which extracts the amount in pounds from an appropriately
formatted string.
"""
# TODO: Return a number rather than a string!
rc = string
# Determines if '£' is in the string.
if "£" in string:
# Returns from the third character.
# TODO: ASDA prices start like £. I assume this is to do with the
# format of the string. We could improve the logic here to read
# the string in a better way, so that it start with '£'.
# TODO: Just because the string has a '£' in it doesn't mean that the
# string is in the correct format. Extra checks should be made
# here.
rc = string[2:]
# Determines if the string ends in 'p'
elif string[-1] == "p":
# Returns except the last character in pounds.
# TODO: Should check the format of the string more carefully, items that
# are 5p will come out as £0.5.
rc = "0." + string[:-1]
else:
logger.error("Incorrect format for string: %s", string)
return rc
class Product():
"""
Provides a container for each product. Stores name, quantity, price and
amount.
"""
def __init__(self):
# Initialise the member variables.
self.name = ""
self.quantity = ""
self.price = ""
self.amount = ""
# Getters and setters.
def add_name(self, name):
self.name = name
def add_amount(self, amount):
self.amount = amount
def add_quantity(self, amount):
self.quantity = amount
def add_price(self, price):
self.price = price
def get_name(self):
return self.name
def to_string(self):
return ", ".join(self.get_attrs())
def get_attrs(self):
return [self.name, self.amount, self.quantity, self.price]
class ShoppingParser(HTMLParser):
def __init__(self, logfile):
HTMLParser.__init__(self)
self.product_list = []
self.product = None
self.product_title = False
self.is_quantity = False
self.is_price = False
self.is_sub_title = False
def handle_starttag(self, tag, attrs):
self.logger.debug("Encountered a start tag: %s with attrs: %s", tag, attrs)
if attrs:
attrs = dict(attrs)
if attrs.get('class') == 'product':
self.logger.info("Creating new product")
self.product = Product()
elif tag == 'span' and attrs.get('class') and attrs.get('class') == 'title productTitle':
self.logger.debug("Starting product title")
self.product_title = True
elif tag == 'span' and attrs.get('class') and attrs.get('class') == 'qtyTxt':
self.logger.debug("Starting quantity")
self.is_quantity = True
elif tag == 'span' and attrs.get('class') and attrs.get('class') == 'price':
self.logger.debug("Starting price")
self.is_price = True
elif tag == 'span' and attrs.get('class') and attrs.get('class') == 'subTitle':
self.logger.debug("Starting subtitle")
self.is_sub_title = True
elif tag == 'div' and attrs.get('class') and attrs.get('class') == 'delSubs column':
self.logger.info("End of product: %s", self.product.get_name())
self.product_list.append(self.product)
self.product = Product()
def handle_endtag(self, tag):
self.logger.debug("Encountered an end tag : %s", tag)
if tag == 'span':
if self.is_quantity:
self.is_quantity = False
if self.product_title:
self.product_title = False
if self.is_price:
self.is_price = False
if self.is_sub_title:
self.is_sub_title = False
def handle_data(self, data):
self.logger.debug("Encountered some data : %s", data)
if self.product_title:
newname = self.product.get_name() + " " + data
self.logger.info("Product name = %s", newname)
self.product.add_name(newname)
if self.is_quantity:
self.product.add_quantity(data)
if self.is_price and data != "Now":
self.product.add_price(convert_to_pounds(data))
if self.is_sub_title:
self.product.add_amount(data)
def to_string(self):
str = ""
for product in self.product_list:
str += product.to_string()
str += "\n"
return str
def create_workbook(self):
wb = Workbook()
ws = wb.active
for row_num in range(len(self.product_list)):
attrs = self.product_list[row_num].get_attrs()
for col_num in range(len(attrs)):
cell = ws.cell(row=row_num+1, column=col_num+1, value=attrs[col_num])
wb.save("shoppinglist.xlsx")
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-l", "--log_file", dest="filename", help="The file name "
"that the logs should be written to")
(options, args) = parser.parse_args()
print(options)
print(args)
logging.basicConfig(filename=logfile, level=logging.INFO, filemode="w")
with open(HTML_PAGE, "r") as myfile:
data = myfile.read().replace('\n','')
parser = ShoppingParser()
parser.feed(data)
print(parser.to_string())
parser.create_workbook()
|
tiy1807/PythonUtils
|
PythonUtils/read_asda_shopping.py
|
Python
|
apache-2.0
| 6,546
|
# Copyright (C) 2013 Statoil ASA, Norway.
#
# The file 'time_map.py' is part of ERT - Ensemble based Reservoir Tool.
#
# ERT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ERT is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE.
#
# See the GNU General Public License at <http://www.gnu.org/licenses/gpl.html>
# for more details.
import os
import errno
from cwrap import BaseCClass
from ert.enkf import EnkfPrototype
from ert.util import CTime
class TimeMap(BaseCClass):
TYPE_NAME = "time_map"
_fread_alloc_readonly = EnkfPrototype("void* time_map_fread_alloc_readonly(char*)", bind = False)
_alloc = EnkfPrototype("void* time_map_alloc()", bind = False)
_load = EnkfPrototype("bool time_map_fread(time_map , char*)")
_save = EnkfPrototype("void time_map_fwrite(time_map , char*)")
_fload = EnkfPrototype("bool time_map_fscanf(time_map , char*)")
_iget_sim_days = EnkfPrototype("double time_map_iget_sim_days(time_map, int)")
_iget = EnkfPrototype("time_t time_map_iget(time_map, int)")
_size = EnkfPrototype("int time_map_get_size(time_map)")
_try_update = EnkfPrototype("bool time_map_try_update(time_map , int , time_t)")
_is_strict = EnkfPrototype("bool time_map_is_strict( time_map )")
_set_strict = EnkfPrototype("void time_map_set_strict( time_map , bool)")
_lookup_time = EnkfPrototype("int time_map_lookup_time( time_map , time_t)")
_lookup_time_with_tolerance = EnkfPrototype("int time_map_lookup_time_with_tolerance( time_map , time_t , int , int)")
_lookup_days = EnkfPrototype("int time_map_lookup_days( time_map , double)")
_last_step = EnkfPrototype("int time_map_get_last_step( time_map )")
_upgrade107 = EnkfPrototype("void time_map_summary_upgrade107( time_map , ecl_sum )")
_free = EnkfPrototype("void time_map_free( time_map )")
def __init__(self, filename = None):
c_ptr = self._alloc()
super(TimeMap, self).__init__(c_ptr)
if filename:
self.load(filename)
def load(self, filename):
if os.path.isfile( filename ):
self._load(filename)
else:
raise IOError(( errno.ENOENT , "File not found: %s" % filename))
def fwrite(self, filename):
self._save(filename)
def fload(self , filename):
"""
Will load a timemap as a formatted file consisting of a list of dates: DD/MM/YYYY
"""
if os.path.isfile( filename ):
OK = self._fload(filename)
if not OK:
raise Exception("Error occured when loading timemap from:%s" % filename)
else:
raise IOError(( errno.ENOENT , "File not found: %s" % filename))
def isStrict(self):
return self._is_strict()
def setStrict(self , strict):
return self._set_strict(strict)
def getSimulationDays(self, step):
""" @rtype: double """
if not isinstance(step, int):
raise TypeError("Expected an integer")
size = len(self)
if step < 0 or step >= size:
raise IndexError("Index out of range: 0 <= %d < %d" % (step, size))
return self._iget_sim_days(step)
def __getitem__(self, index):
""" @rtype: CTime """
if not isinstance(index, int):
raise TypeError("Expected an integer")
size = len(self)
if index < 0 or index >= size:
raise IndexError("Index out of range: 0 <= %d < %d" % (index, size))
return self._iget(index)
def __setitem__(self , index , time):
self.update( index , time )
def update(self , index , time):
if self._try_update(index , CTime(time)):
return True
else:
if self.isStrict():
raise Exception("Tried to update with inconsistent value")
else:
return False
def __iter__(self):
cur = 0
while cur < len(self):
yield self[cur]
cur += 1
def __contains__(self , time):
index = self._lookup_time(CTime(time))
if index >= 0:
return True
else:
return False
def lookupTime(self , time , tolerance_seconds_before = 0, tolerance_seconds_after = 0):
"""Will look up the report step corresponding to input @time.
If the tolerance arguments tolerance_seconds_before and
tolerance_seconds_after have the default value zero we require
an exact match between input time argument and the content of
the time map.
If the tolerance arguments are supplied the function will
search through the time_map for the report step closest to the
time argument, which satisfies the tolerance criteria.
With the call:
lookupTime( datetime.date(2010,1,10) , 3600*24 , 3600*7)
We will find the report step in the date interval 2010,1,9 -
2010,1,17 which is closest to 2010,1,10. The tolerance limits
are inclusive.
If no report step satisfying the criteria is found a
ValueError exception will be raised.
"""
if tolerance_seconds_before == 0 and tolerance_seconds_after == 0:
index = self._lookup_time(CTime(time))
else:
index = self._lookup_time_with_tolerance(CTime(time) , tolerance_seconds_before , tolerance_seconds_after)
if index >= 0:
return index
else:
raise ValueError("The time:%s was not found in the time_map instance" % time)
def lookupDays(self , days):
index = self._lookup_days(days)
if index >= 0:
return index
else:
raise ValueError("The days: %s was not found in the time_map instance" % days)
def __len__(self):
""" @rtype: int """
return self._size()
def free(self):
self._free()
def __repr__(self):
ls = len(self)
la = self.getLastStep()
st = 'strict' if self.isStrict() else 'not strict'
cnt = 'size = %d, last_step = %d, %s' % (ls, la, st)
return self._create_repr(cnt)
def dump(self):
"""
Will return a list of tuples (step , CTime , days).
"""
step_list = []
for step,t in enumerate(self):
step_list.append( (step , t , self.getSimulationDays( step )) )
return step_list
def getLastStep(self):
return self._last_step()
def upgrade107(self, refcase):
self._upgrade107(refcase)
|
Ensembles/ert
|
python/python/ert/enkf/util/time_map.py
|
Python
|
gpl-3.0
| 7,225
|
"""Fetch PATH (NJ/NYC) feed."""
from datetime import datetime
import logging
from bs4 import BeautifulSoup
import requests
from FeedSource import FeedSource, TIMECHECK_FMT
URL = 'http://trilliumtransit.com/transit_feeds/path-nj-us/'
FILE_NAME = 'path.zip'
LAST_UPDATED_FMT = '%Y-%m-%d %H:%M'
LOG = logging.getLogger(__name__)
class Path(FeedSource):
"""Fetch PATH feed."""
def __init__(self):
super(Path, self).__init__()
# The name of the download file changes on occasion.
# Go scrape the directory listing to find out what it is now, and update url if found.
self.urls = {FILE_NAME: URL + 'path-nj-us.zip'}
response = requests.get(URL)
if response.ok:
soup = BeautifulSoup(response.text, 'html.parser')
anchors = soup.findAll('a')
if len(anchors):
# last link on the page shoud be our download
lastlink = anchors[len(anchors)-1]
# last updated time is in next column in table (last-modified header not set)
last_updated_str = lastlink.findParent().findNextSibling().text.strip()
self.last_updated = datetime.strptime(last_updated_str, LAST_UPDATED_FMT)
filename = lastlink.text
LOG.debug('Found PATH download file named %s, last updated: %s',
filename,
self.last_updated)
download_url = URL + filename
self.urls = {FILE_NAME: download_url}
else:
LOG.error('Could not parse directory listing for PATH.')
else:
LOG.error('Could not get directory listing for PATH.')
def fetch(self):
"""No last-modified header set; check update time here."""
stat = self.status.get(FILE_NAME)
if stat:
got_last = datetime.strptime(stat['posted_date'], TIMECHECK_FMT)
if self.last_updated:
if got_last >= self.last_updated:
LOG.info('No new download found for PATH.')
self.update_existing_status(FILE_NAME)
return
else:
LOG.info('New download found for PATH posted: %s; last retrieved: %s',
self.last_updated,
got_last)
else:
LOG.error('No last updated time found for PATH.')
self.last_updated = datetime.utcnow()
else:
LOG.info('No previous download found for PATH.')
# Download it and verify
self.fetchone(FILE_NAME, self.urls.get(FILE_NAME))
self.set_posted_date(FILE_NAME, self.last_updated.strftime(TIMECHECK_FMT))
self.write_status()
|
azavea/gtfs-feed-fetcher
|
feed_sources/Path.py
|
Python
|
mit
| 2,786
|
# https://leetcode.com/problems/repeated-substring-pattern/
class Solution(object):
def repeatedSubstringPattern(self, str):
return str in (2 * str)[1:-1]
|
menghanY/LeetCode-Python
|
String/RepeatedSubstringPattern.py
|
Python
|
mit
| 168
|
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk
from gi.repository import Gst
class GLSink:
def __init__(self):
self.element = Gst.ElementFactory.make("glimagesink", None)
self.widget = Gtk.DrawingArea()
self.widget.set_double_buffered(True)
def xid(self):
return self.widget.get_window().get_xid()
def set_handle(self):
self.element.set_window_handle(self.xid())
class GtkGLSink:
def __init__(self):
self.element = Gst.ElementFactory.make("gtkglsink", None)
self.widget = self.element.get_property("widget")
def set_handle(self):
pass
|
lubosz/gst-plugins-vr
|
sphvr/sinks.py
|
Python
|
lgpl-2.1
| 652
|
# -*- coding: utf-8-*-
"""
Author: Marco Dinacci <dev@dinointeractive.com>
Copyright © 2008-2009
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
class MVCLCDNumber(QLCDNumber):
def __init__(self, parent=None):
super(MVCLCDNumber, self).__init__(parent)
self.setSegmentStyle(QLCDNumber.Flat)
def setModel(self, model):
self.model = model
def update(self):
self.display(self.model.getTileCount())
|
mdinacci/rtw
|
track-editor/src/gui/qt/plugins/mvclcdnumber.py
|
Python
|
mit
| 462
|
# robotframework-tools
#
# Python Tools for Robot Framework and Test Libraries.
#
# Copyright (C) 2013-2016 Stefan Zimmermann <zimmermann.code@gmail.com>
#
# robotframework-tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# robotframework-tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with robotframework-tools. If not, see <http://www.gnu.org/licenses/>.
"""robottools.utils
.. moduleauthor:: Stefan Zimmermann <zimmermann.code@gmail.com>
"""
from .normbool import normboolclass, normbooltype
from .normstr import normstringclass, normstringtype
from .normdict import NormalizedDict, normdictclass, normdicttype, \
normdictkeys, normdictitems, normdictdata
RobotBool = normbooltype('RobotBool', true=['true'], false=['false'])
|
userzimmermann/robotframework-tools
|
robottools/utils/__init__.py
|
Python
|
gpl-3.0
| 1,192
|
"""
sentry.plugins.sentry_interface_types.models
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import six
import sentry
from sentry.plugins import register
from sentry.plugins.bases.tag import TagPlugin
class InterfaceTypePlugin(TagPlugin):
"""
Automatically adds the 'interface_type' tag from events containing referencing
the class name of each interface (e.g. Http, Stacktrace, Exception).
"""
descrption = __doc__
slug = 'interface_types'
title = 'Auto Tag: Interface Types'
version = sentry.VERSION
author = "Sentry Team"
author_url = "https://github.com/getsentry/sentry"
tag = 'interface_type'
project_default_enabled = False
def get_tag_values(self, event):
return [i.rsplit('.', 1)[-1] for i in six.iterkeys(event.interfaces)]
register(InterfaceTypePlugin)
|
ifduyue/sentry
|
src/sentry/plugins/sentry_interface_types/models.py
|
Python
|
bsd-3-clause
| 998
|
# -*- coding: utf-8 -*-
#
#
# Authors: Adrien Peiffer
# Copyright (c) 2014 Acsone SA/NV (http://www.acsone.eu)
# All Rights Reserved
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contact a Free Software
# Service Company.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
import openerp.tests.common as common
from datetime import datetime
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
from openerp import workflow
def create_simple_invoice(self):
partner_id = self.ref('base.res_partner_2')
product_id = self.ref('product.product_product_4')
today = datetime.now()
journal_id = self.ref('account.sales_journal')
date = today.strftime(DEFAULT_SERVER_DATE_FORMAT)
return self.env['account.invoice']\
.create({'partner_id': partner_id,
'account_id':
self.ref('account.a_recv'),
'journal_id':
journal_id,
'date_invoice': date,
'invoice_line': [(0, 0, {'name': 'test',
'account_id':
self.ref('account.a_sale'),
'price_unit': 2000.00,
'quantity': 1,
'product_id': product_id,
}
)
],
})
class TestAccountDefaultDraftMove(common.TransactionCase):
def setUp(self):
super(TestAccountDefaultDraftMove, self).setUp()
def test_draft_move_invoice(self):
invoice = create_simple_invoice(self)
workflow.trg_validate(self.uid, 'account.invoice', invoice.id,
'invoice_open', self.cr)
self.assertEqual(invoice.move_id.state, 'draft', 'State error!')
|
DarkoNikolovski/account-financial-tools
|
account_default_draft_move/tests/test_account_default_draft_move.py
|
Python
|
agpl-3.0
| 2,861
|
import core, connect
import gplus
##import pubmed
##import pickle
import doi
from bson import ObjectId
import apptree
import incoming
from datetime import datetime
import time
import sessioninfo
import bulk
def destroy_db_and_test():
'''tests progressively building an spnet db starting from a blank
slate, adding papers, people, posts, topics, etc. and verifying
the expected results. NB: this is a destructive test, i.e.
it FLUSHES whatever is in the spnet database and fills it with
its own test data.'''
dbconn = connect.init_connection()
dbconn._conn.drop_database('spnet') # start test from a blank slate
rootColl = apptree.get_collections()
lorem = '''Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.'''
jojo = core.Person(docData=dict(name='jojo', age=37))
assert jojo != None
assert jojo.force_reload(delay=1) is False # set timer
assert jojo.force_reload() is False # timer still waiting
time.sleep(2)
assert jojo.force_reload() # timer done
a1 = core.EmailAddress(docData=dict(address='jojo@nowhere.edu', current=True),
parent=jojo)
fred = core.Person(docData=dict(name='fred', age=56))
a2 = core.EmailAddress(docData=dict(address='fred@dotzler.com',
authenticated=False), parent=fred)
a3 = core.EmailAddress(docData=dict(address='fred@gmail.com',
note='personal account'), parent=fred)
paper1 = core.ArxivPaperData('1302.4871', insertNew='findOrInsert').parent
paper1.update(dict(authors=[jojo._id]))
paper2 = core.ArxivPaperData('1205.6541', insertNew='findOrInsert').parent
paper2.update(dict(authors=[fred._id, jojo._id]))
assert paper1.arxiv.id == '1302.4871'
assert paper2.arxiv.id == '1205.6541'
jojoGplus = core.GplusPersonData(docData=dict(id=1234, displayName='Joseph Nye', image={'url':'http://www.nobelprize.org/nobel_prizes/physics/laureates/1921/einstein.jpg'}),
parent=jojo)
jojoGplus.update(dict(etag='oldversion'))
sig1 = core.SIG.find_or_insert('cosmology')
sig2 = core.SIG.find_or_insert('lambdaCDMmodel')
topicWords = incoming.get_topicIDs(['cosmology', 'astrophysics'],
1, datetime.utcnow(), 'test')
assert topicWords == ['cosmology', 'astrophysics']
astroSIG = core.SIG('astrophysics')
assert astroSIG.name == '#astrophysics'
assert astroSIG.origin == dict(source='test', id=1)
int1 = core.PaperInterest(docData=dict(author=jojo._id, topics=[sig1._id]),
parent=paper1)
assert core.Paper(paper1._id).interests == [int1]
assert core.Paper(paper1._id).get_interests() == {sig1._id:[jojo]}
assert core.Person(jojo._id).interests == [int1]
assert core.Person(jojo._id).topics == [sig1._id]
assert core.SIG(sig1._id).interests == [int1]
assert core.SIG(sig1._id).get_interests() == {paper1:[jojo]}
intAgain = core.PaperInterest((paper1._id, jojo._id))
assert intAgain == int1
try:
intAgain.remove_topic(sig2._id)
except KeyError:
pass
else:
raise AssertionError('failed to catch bad remove_topic()')
assert intAgain.remove_topic(sig1._id) is None
assert core.Paper(paper1._id).interests == []
# test creation via POST
paperLikes = rootColl['papers'].likes
sessioninfo.get_session.sessionDict = dict(person=fred)
int2 = paperLikes._POST(fred._id, sig2._id, '1',
parents=dict(paper=paper2))
assert int2.parent == paper2
assert int2.author == fred
assert int2.topics == [sig2]
assert core.Paper(paper2._id).interests == [int2]
assert core.Person(fred._id).interests == [int2]
assert core.Person(fred._id).topics == [sig2._id]
assert core.SIG(sig2._id).interests == [int2]
try:
paperLikes._POST(fred._id, 'this is not allowed', '1',
parents=dict(paper=paper2))
except KeyError:
pass
else:
raise AssertionError('failed to trap bad topic string')
# test removal via POST
assert paperLikes._POST(fred._id, sig2._id, '0',
parents=dict(paper=core.Paper(paper2._id))) == int2
assert core.Paper(paper2._id).interests == []
int3 = paperLikes._POST(fred._id, '#silicene', '1',
parents=dict(paper=paper2))
assert core.SIG('silicene').interests == [int3]
assert set(core.Person(fred._id).topics) == set([sig2._id, 'silicene'])
gplus2 = core.GplusPersonData(docData=dict(id=1234, displayName='Joseph Nye'),
insertNew='findOrInsert')
assert gplus2 == jojoGplus
gplus3 = core.GplusPersonData(docData=dict(id=5678, displayName='Fred Eiserling'),
insertNew='findOrInsert')
assert gplus3.parent.name == 'Fred Eiserling'
rec1 = core.Post(docData=dict(author=fred._id, citationType='recommend', id='1',
title='Why You Need to Read This Important Extension of the CDM Model',
text=lorem),
parent=paper1)
rec2 = core.Post(docData=dict(author=jojo._id, text='must read!',
citationType='mustread', id='2',
sigs=[sig1._id, sig2._id]),
parent=paper2._id)
assert set(core.Person(jojo._id).topics) == set([sig1._id, sig2._id])
post1 = core.Post(docData=dict(author=fred._id, text='interesting paper!',
id=98765, sigs=[sig1._id]), parent=paper1)
assert set(core.Person(fred._id).topics) == set([sig1._id, sig2._id, 'silicene'])
reply1 = core.Reply(docData=dict(author=jojo._id, text='I disagree with Fred.',
id=7890, replyTo=98765), parent=paper1)
issue1 = core.Issue(docData=dict(paper=paper1, title='The claims are garbage',
category='validity', author=jojo._id,
description='there is a major flaw in the first step of your proof'))
vote1 = core.IssueVote(docData=dict(person=jojo, rating='crucial',
status='open'),
parent=issue1)
assert core.Person(jojo._id).email == [a1]
assert core.Person(jojo._id).replies == [reply1]
jgp = core.GplusPersonData(1234)
assert jgp.parent == jojo
assert jgp.etag == 'oldversion'
assert len(rec1.parent.authors) == 1
assert rec1.parent.authors[0] == jojo
assert len(rec2.parent.authors) == 2
assert jojo in rec2.parent.authors
assert fred in rec2.parent.authors
assert len(rec2.parent.recommendations) == 1
assert len(jojo.recommendations) == 1
assert jojo.recommendations[0] == rec2
assert len(jojo.papers) == 2
assert len(fred.papers) == 1
assert len(paper2.authors[0].email) == 2
assert issue1.author == jojo
p = core.Paper(paper1._id)
assert len(p.issues) == 1
posts1 = p.get_all_posts()
assert len(posts1) == 1
assert posts1 == [post1]
assert posts1[0].text == 'interesting paper!'
assert list(posts1[0].get_replies()) == [reply1]
assert core.Post(98765).author == fred
assert core.Reply(7890).replyTo == post1
assert core.Reply(7890).parent == paper1
assert filter(lambda x:not x.is_rec(), core.Person(fred._id).posts) == [post1]
assert filter(lambda x:not x.is_rec(), core.SIG(sig1._id).posts) == [post1]
assert core.Post(98765).sigs == [sig1]
replyAgain = core.Reply(docData=dict(author=fred._id, text='interesting paper!',
id=7890, replyTo=98765), parent=paper1,
insertNew='findOrInsert')
assert replyAgain == reply1
assert core.Paper(paper1._id).replies == [reply1]
reply2 = core.Reply(docData=dict(author=jojo._id, text='This paper really made me think.',
id=7891, replyTo=98765), parent=paper1,
insertNew='findOrInsert')
assert core.Paper(paper1._id).replies == [reply1, reply2]
assert core.Paper(str(paper1._id)) == paper1, 'auto ID conversion failed'
assert p.issues[0] == issue1
assert len(p.issues[0].votes) == 1
assert len(rec2.sigs) == 2
assert rec2.sigs[0] == sig1
assert sig1.recommendations == [rec2]
rec1.array_append('sigs', sig2)
assert len(sig2.recommendations) == 2
assert core.Post(rec1.id).sigs == [sig2]
rec2.update(dict(text='totally fascinating!', score=27))
rec3 = core.Post(rec2.id)
assert rec3.score == 27
a4 = core.EmailAddress('fred@dotzler.com')
assert a4._parent_link == fred._id
assert a4.parent == fred
try:
p = core.Person('abcdefg')
except KeyError:
pass
else:
raise AssertionError('failed to trap bad personID')
try:
a = core.EmailAddress('bob@yoyo.com')
except KeyError:
pass
else:
raise AssertionError('failed to trap bad email')
try:
jojo = core.Person(docData=dict(name2='jojo', age=37))
except ValueError:
pass
else:
raise AssertionError('failed to trap Person w/o name')
fred.array_append('numbers', 17)
assert core.Person(fred._id).numbers == [17]
fred.array_append('numbers', 6)
assert core.Person(fred._id).numbers == [17, 6]
fred.array_del('numbers', 17)
assert core.Person(fred._id).numbers == [6]
a4.array_append('numbers', 17)
assert core.EmailAddress(a4.address).numbers == [17]
a4.array_append('numbers', 6)
assert core.EmailAddress(a4.address).numbers == [17, 6]
a4.array_del('numbers', 17)
assert core.EmailAddress(a4.address).numbers == [6]
rec3 = core.Post(docData=dict(author=fred._id, citationType='recommend',
text='I think this is a major breakthrough.',
sigs=[sig2._id], id=3456),
parent=paper2._id)
assert core.SIG(sig1._id).recommendations == [rec2]
assert len(core.SIG(sig2._id).recommendations) == 3
it = gplus.publicAccess.get_person_posts('107295654786633294692')
testPosts = list(gplus.publicAccess.find_or_insert_posts(it))
assert len(testPosts) > 0
nposts = len(core.Paper(paper1._id).posts)
nreplies = len(core.Paper(paper1._id).replies)
it = gplus.publicAccess.get_person_posts('107295654786633294692')
testPosts2 = list(gplus.publicAccess.find_or_insert_posts(it))
assert testPosts == testPosts2
assert nposts == len(core.Paper(paper1._id).posts)
assert nreplies == len(core.Paper(paper1._id).replies)
gpd = core.GplusPersonData('112634568601116338347',
insertNew='findOrInsert')
assert gpd.displayName == 'Meenakshi Roy'
gpd.update_subscriptions(dict(etag='foo', totalItems=1),
[dict(id='114744049040264263224')])
gps = gpd.subscriptions
assert gps.gplusPerson == gpd
mrID = gpd.parent._id
subscriptions = core.Person(mrID).subscriptions
assert len(subscriptions) == 0
gpd2 = core.GplusPersonData('114744049040264263224',
insertNew='findOrInsert')
time.sleep(2)
subscriptions = core.Person(mrID).subscriptions
assert len(subscriptions) == 1
assert subscriptions[0].author == gpd2.parent
cjlposts = gpd2.update_posts(999) # retrieve some recs
assert len(cjlposts) > 0 # got some
assert len(core.Person(mrID).received) > 0 # and they were delivered
assert len(core.Person(mrID).get_deliveries()) > 0 # and UI can retrieve them
recReply = core.Reply(docData=dict(author=jojo._id, id=78901, replyTo=3456,
text='Fred, thanks for your comments! Your insights are really helpful.'),
parent=paper2._id)
# make sure timestamps present on all recs
l = [r.published for r in core.Post.find_obj()]
l = [r.published for r in core.Reply.find_obj()]
assert recReply.replyTo == rec3
assert list(recReply.replyTo.get_replies()) == [recReply]
# pubmed eutils network server constantly failing now??
## pubmedDict = pubmed.get_pubmed_dict('23482246')
## with open('../pubmed/test1.pickle') as ifile:
## correctDict = pickle.load(ifile)
## assert pubmedDict == correctDict
## paper3 = core.PubmedPaperData('23482246', insertNew='findOrInsert').parent
## paper3.update(dict(authors=[fred._id]))
## ppd = core.PubmedPaperData('23139441', insertNew='findOrInsert')
## assert ppd.doi.upper() == '10.1016/J.MSEC.2012.05.020'
## assert paper3.pubmed.id == '23482246'
## assert paper3.title[:40] == correctDict['title'][:40]
s = 'aabbe'
t = doi.map_to_doi(s)
assert t == '10.1002/(SICI)1097-0258(19980815/30)17:15/16<1661::AID-SIM968>3.0.CO;2-2'
assert s == doi.map_to_shortdoi(t)
paper4 = core.DoiPaperData(DOI=t, insertNew='findOrInsert').parent
paper4.update(dict(authors=[fred._id]))
assert paper4.doi.id == s
assert paper4.doi.doi == t
assert paper4.doi.DOI == t.upper()
paper5 = core.DoiPaperData(s, insertNew='findOrInsert').parent
assert paper4 == paper5
assert rootColl['shortDOI']._GET(s) == paper4
txt = 'some text ' + paper4.doi.get_hashtag()
refs, topics, primary = incoming.get_citations_types_and_topics(txt,spnetworkOnly=False)
assert incoming.get_paper(primary,refs[primary][1]) == paper4
spnetPaper = core.DoiPaperData(DOI='10.3389/fncom.2012.00001',
insertNew='findOrInsert').parent
assert spnetPaper.title.lower() == 'open peer review by a selected-papers network'
txt = 'a long comment ' + spnetPaper.doi.get_doctag() + ', some more text'
refs, topics, primary = incoming.get_citations_types_and_topics(txt,spnetworkOnly=False)
assert incoming.get_paper(primary,refs[primary][1]) == spnetPaper
topics, subs = bulk.get_people_subs()
bulk.deliver_recs(topics, subs)
assert len(core.Person(jojo._id).received) == 4
assert len(core.Person(fred._id).received) == 2
|
cjlee112/spnet
|
spnet/test.py
|
Python
|
gpl-2.0
| 14,741
|
#
# commands.py - the GraalVM specific commands
#
# ----------------------------------------------------------------------------------------------------
#
# Copyright (c) 2007, 2012, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 only, as
# published by the Free Software Foundation.
#
# This code is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# version 2 for more details (a copy is included in the LICENSE file that
# accompanied this code).
#
# You should have received a copy of the GNU General Public License version
# 2 along with this work; if not, write to the Free Software Foundation,
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
# or visit www.oracle.com if you need additional information or have any
# questions.
#
# ----------------------------------------------------------------------------------------------------
import os, sys, shutil, zipfile, tempfile, re, time, datetime, platform, subprocess, multiprocessing
from os.path import join, exists, dirname, basename, getmtime
from argparse import ArgumentParser, REMAINDER
import mx
import sanitycheck
import itertools
import json, textwrap
# This works because when mx loads this file, it makes sure __file__ gets an absolute path
_graal_home = dirname(dirname(__file__))
""" Used to distinguish an exported GraalVM (see 'mx export'). """
_vmSourcesAvailable = exists(join(_graal_home, 'make')) and exists(join(_graal_home, 'src'))
""" The VMs that can be built and run along with an optional description. Only VMs with a
description are listed in the dialogue for setting the default VM (see _get_vm()). """
_vmChoices = {
'graal' : 'All compilation is performed with Graal. This includes bootstrapping Graal itself unless -XX:-BootstrapGraal is used.',
'server' : 'Normal compilation is performed with the tiered system (i.e., client + server), Truffle compilation is performed with Graal. Use this for optimal Truffle performance.',
'client' : None, # normal compilation with client compiler, explicit compilation (e.g., by Truffle) with Graal
'server-nograal' : None, # all compilation with tiered system (i.e., client + server), Graal omitted
'client-nograal' : None, # all compilation with client compiler, Graal omitted
'original' : None, # default VM copied from bootstrap JDK
}
""" The VM that will be run by the 'vm' command and built by default by the 'build' command.
This can be set via the global '--vm' option or the DEFAULT_VM environment variable.
It can also be temporarily set by using of a VM context manager object in a 'with' statement. """
_vm = None
""" The VM builds that will be run by the 'vm' command - default is first in list """
_vmbuildChoices = ['product', 'fastdebug', 'debug', 'optimized']
""" The VM build that will be run by the 'vm' command.
This can be set via the global '--vmbuild' option.
It can also be temporarily set by using of a VM context manager object in a 'with' statement. """
_vmbuild = _vmbuildChoices[0]
_jacoco = 'off'
""" The current working directory to switch to before running the VM. """
_vm_cwd = None
""" The base directory in which the JDKs cloned from $JAVA_HOME exist. """
_installed_jdks = None
""" Prefix for running the VM. """
_vm_prefix = None
_make_eclipse_launch = False
_minVersion = mx.JavaVersion('1.7.0_04')
def _get_vm():
"""
Gets the configured VM, presenting a dialogue if there is no currently configured VM.
"""
global _vm
if _vm:
return _vm
vm = mx.get_env('DEFAULT_VM')
if vm is None:
if not sys.stdout.isatty():
mx.abort('Need to specify VM with --vm option or DEFAULT_VM environment variable')
envPath = join(_graal_home, 'mx', 'env')
mx.log('Please select the VM to be executed from the following: ')
items = [k for k in _vmChoices.keys() if _vmChoices[k] is not None]
descriptions = [_vmChoices[k] for k in _vmChoices.keys() if _vmChoices[k] is not None]
vm = mx.select_items(items, descriptions, allowMultiple=False)
answer = raw_input('Persist this choice by adding "DEFAULT_VM=' + vm + '" to ' + envPath + '? [Yn]: ')
if not answer.lower().startswith('n'):
with open(envPath, 'a') as fp:
print >> fp, 'DEFAULT_VM=' + vm
_vm = vm
return vm
"""
A context manager that can be used with the 'with' statement to set the VM
used by all VM executions within the scope of the 'with' statement. For example:
with VM('server'):
dacapo(['pmd'])
"""
class VM:
def __init__(self, vm=None, build=None):
assert vm is None or vm in _vmChoices.keys()
assert build is None or build in _vmbuildChoices
self.vm = vm if vm else _vm
self.build = build if build else _vmbuild
self.previousVm = _vm
self.previousBuild = _vmbuild
def __enter__(self):
global _vm, _vmbuild
_vm = self.vm
_vmbuild = self.build
def __exit__(self, exc_type, exc_value, traceback):
global _vm, _vmbuild
_vm = self.previousVm
_vmbuild = self.previousBuild
def _chmodDir(chmodFlags, dirname, fnames):
os.chmod(dirname, chmodFlags)
for name in fnames:
os.chmod(os.path.join(dirname, name), chmodFlags)
def chmodRecursive(dirname, chmodFlags):
os.path.walk(dirname, _chmodDir, chmodFlags)
def clean(args):
"""clean the GraalVM source tree"""
opts = mx.clean(args, parser=ArgumentParser(prog='mx clean'))
if opts.native:
def rmIfExists(name):
if os.path.isdir(name):
shutil.rmtree(name)
elif os.path.isfile(name):
os.unlink(name)
rmIfExists(join(_graal_home, 'build'))
rmIfExists(join(_graal_home, 'build-nograal'))
rmIfExists(_jdksDir())
rmIfExists(mx.distribution('GRAAL').path)
def export(args):
"""create a GraalVM zip file for distribution"""
parser = ArgumentParser(prog='mx export');
parser.add_argument('--omit-vm-build', action='store_false', dest='vmbuild', help='omit VM build step')
parser.add_argument('--omit-dist-init', action='store_false', dest='distInit', help='omit class files and IDE configurations from distribution')
parser.add_argument('zipfile', nargs=REMAINDER, metavar='zipfile')
args = parser.parse_args(args)
tmp = tempfile.mkdtemp(prefix='tmp', dir=_graal_home)
if args.vmbuild:
# Make sure the product VM binary is up to date
with VM(vmbuild='product'):
build([])
mx.log('Copying Java sources and mx files...')
mx.run(('hg archive -I graal -I mx -I mxtool -I mx.sh ' + tmp).split())
# Copy the GraalVM JDK
mx.log('Copying GraalVM JDK...')
src = _jdk()
dst = join(tmp, basename(src))
shutil.copytree(src, dst)
zfName = join(_graal_home, 'graalvm-' + mx.get_os() + '.zip')
zf = zipfile.ZipFile(zfName, 'w')
for root, _, files in os.walk(tmp):
for f in files:
name = join(root, f)
arcname = name[len(tmp) + 1:]
zf.write(join(tmp, name), arcname)
# create class files and IDE configurations
if args.distInit:
mx.log('Creating class files...')
mx.run('mx build'.split(), cwd=tmp)
mx.log('Creating IDE configurations...')
mx.run('mx ideinit'.split(), cwd=tmp)
# clean up temp directory
mx.log('Cleaning up...')
shutil.rmtree(tmp)
mx.log('Created distribution in ' + zfName)
def _run_benchmark(args, availableBenchmarks, runBenchmark):
vmOpts, benchmarksAndOptions = _extract_VM_args(args, useDoubleDash=availableBenchmarks is None)
if availableBenchmarks is None:
harnessArgs = benchmarksAndOptions
return runBenchmark(None, harnessArgs, vmOpts)
if len(benchmarksAndOptions) == 0:
mx.abort('at least one benchmark name or "all" must be specified')
benchmarks = list(itertools.takewhile(lambda x: not x.startswith('-'), benchmarksAndOptions))
harnessArgs = benchmarksAndOptions[len(benchmarks):]
if 'all' in benchmarks:
benchmarks = availableBenchmarks
else:
for bm in benchmarks:
if bm not in availableBenchmarks:
mx.abort('unknown benchmark: ' + bm + '\nselect one of: ' + str(availableBenchmarks))
failed = []
for bm in benchmarks:
if not runBenchmark(bm, harnessArgs, vmOpts):
failed.append(bm)
if len(failed) != 0:
mx.abort('Benchmark failures: ' + str(failed))
def dacapo(args):
"""run one or more DaCapo benchmarks"""
def launcher(bm, harnessArgs, extraVmOpts):
return sanitycheck.getDacapo(bm, harnessArgs).test(_get_vm(), extraVmOpts=extraVmOpts)
_run_benchmark(args, sanitycheck.dacapoSanityWarmup.keys(), launcher)
def scaladacapo(args):
"""run one or more Scala DaCapo benchmarks"""
def launcher(bm, harnessArgs, extraVmOpts):
return sanitycheck.getScalaDacapo(bm, harnessArgs).test(_get_vm(), extraVmOpts=extraVmOpts)
_run_benchmark(args, sanitycheck.dacapoScalaSanityWarmup.keys(), launcher)
def _arch():
machine = platform.uname()[4]
if machine in ['amd64', 'AMD64', 'x86_64', 'i86pc']:
return 'amd64'
if machine in ['sun4v']:
return 'sparc'
if machine == 'i386' and mx.get_os() == 'darwin':
try:
# Support for Snow Leopard and earlier version of MacOSX
if subprocess.check_output(['sysctl', '-n', 'hw.cpu64bit_capable']).strip() == '1':
return 'amd64'
except OSError:
# sysctl is not available
pass
mx.abort('unknown or unsupported architecture: os=' + mx.get_os() + ', machine=' + machine)
def _vmLibDirInJdk(jdk):
"""
Get the directory within a JDK where the server and client
subdirectories are located.
"""
if platform.system() == 'Darwin':
return join(jdk, 'jre', 'lib')
if platform.system() == 'Windows':
return join(jdk, 'jre', 'bin')
return join(jdk, 'jre', 'lib', _arch())
def _vmCfgInJdk(jdk):
"""
Get the jvm.cfg file.
"""
if platform.system() == 'Windows':
return join(jdk, 'jre', 'lib', _arch(), 'jvm.cfg')
return join(_vmLibDirInJdk(jdk), 'jvm.cfg')
def _jdksDir():
return os.path.abspath(join(_installed_jdks if _installed_jdks else _graal_home, 'jdk' + str(mx.java().version)))
def _handle_missing_VM(bld, vm):
mx.log('The ' + bld + ' ' + vm + ' VM has not been created')
if sys.stdout.isatty():
answer = raw_input('Build it now? [Yn]: ')
if not answer.lower().startswith('n'):
with VM(vm, bld):
build([])
return
mx.abort('You need to run "mx --vm ' + vm + '--vmbuild ' + bld + ' build" to build the selected VM')
def _jdk(build='product', vmToCheck=None, create=False, installGraalJar=True):
"""
Get the JDK into which Graal is installed, creating it first if necessary.
"""
jdk = join(_jdksDir(), build)
if create:
srcJdk = mx.java().jdk
jdkContents = ['bin', 'include', 'jre', 'lib']
if exists(join(srcJdk, 'db')):
jdkContents.append('db')
if mx.get_os() != 'windows' and exists(join(srcJdk, 'man')):
jdkContents.append('man')
if not exists(jdk):
mx.log('Creating ' + jdk + ' from ' + srcJdk)
os.makedirs(jdk)
for d in jdkContents:
src = join(srcJdk, d)
dst = join(jdk, d)
if not exists(src):
mx.abort('Host JDK directory is missing: ' + src)
shutil.copytree(src, dst)
# Make a copy of the default VM so that this JDK can be
# reliably used as the bootstrap for a HotSpot build.
jvmCfg = _vmCfgInJdk(jdk)
if not exists(jvmCfg):
mx.abort(jvmCfg + ' does not exist')
defaultVM = None
jvmCfgLines = []
with open(jvmCfg) as f:
for line in f:
if line.startswith('-') and defaultVM is None:
parts = line.split()
assert len(parts) == 2, parts
assert parts[1] == 'KNOWN', parts[1]
defaultVM = parts[0][1:]
jvmCfgLines += ['# default VM is a copy of the unmodified ' + defaultVM + ' VM\n']
jvmCfgLines += ['-original KNOWN\n']
else:
jvmCfgLines += [line]
assert defaultVM is not None, 'Could not find default VM in ' + jvmCfg
if mx.get_os() != 'windows':
chmodRecursive(jdk, 0755)
shutil.move(join(_vmLibDirInJdk(jdk), defaultVM), join(_vmLibDirInJdk(jdk), 'original'))
with open(jvmCfg, 'w') as fp:
for line in jvmCfgLines:
fp.write(line)
# Install a copy of the disassembler library
try:
hsdis([], copyToDir=_vmLibDirInJdk(jdk))
except SystemExit:
pass
else:
if not exists(jdk):
if _installed_jdks and mx._opts.verbose:
mx.log("Could not find JDK directory at " + jdk)
_handle_missing_VM(build, vmToCheck if vmToCheck else 'graal')
if installGraalJar:
_installGraalJarInJdks(mx.distribution('GRAAL'))
if vmToCheck is not None:
jvmCfg = _vmCfgInJdk(jdk)
found = False
with open(jvmCfg) as f:
for line in f:
if line.strip() == '-' + vmToCheck + ' KNOWN':
found = True
break
if not found:
_handle_missing_VM(build, vmToCheck)
return jdk
def _installGraalJarInJdks(graalDist):
graalJar = graalDist.path
graalOptions = join(_graal_home, 'graal.options')
jdks = _jdksDir()
if exists(jdks):
for e in os.listdir(jdks):
jreLibDir = join(jdks, e, 'jre', 'lib')
if exists(jreLibDir):
# do a copy and then a move to get atomic updating (on Unix) of graal.jar in the JRE
fd, tmp = tempfile.mkstemp(suffix='', prefix='graal.jar', dir=jreLibDir)
shutil.copyfile(graalJar, tmp)
os.close(fd)
shutil.move(tmp, join(jreLibDir, 'graal.jar'))
if exists(graalOptions):
shutil.copy(graalOptions, join(jreLibDir, 'graal.options'))
# run a command in the windows SDK Debug Shell
def _runInDebugShell(cmd, workingDir, logFile=None, findInOutput=None, respondTo={}):
newLine = os.linesep
STARTTOKEN = 'RUNINDEBUGSHELL_STARTSEQUENCE'
ENDTOKEN = 'RUNINDEBUGSHELL_ENDSEQUENCE'
winSDK = mx.get_env('WIN_SDK', 'C:\\Program Files\\Microsoft SDKs\\Windows\\v7.1\\')
if not exists(winSDK):
mx.abort("Could not find Windows SDK : '" + winSDK + "' does not exist")
if not exists(join(winSDK, 'Bin', 'SetEnv.cmd')):
mx.abort("Invalid Windows SDK path (" + winSDK + ") : could not find Bin/SetEnv.cmd (you can use the WIN_SDK environment variable to specify an other path)")
p = subprocess.Popen('cmd.exe /E:ON /V:ON /K ""' + winSDK + '/Bin/SetEnv.cmd" & echo ' + STARTTOKEN + '"', \
shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
stdout = p.stdout
stdin = p.stdin
if logFile:
log = open(logFile, 'w')
ret = False
while True:
# encoding may be None on windows plattforms
if sys.stdout.encoding is None:
encoding = 'utf-8'
else:
encoding = sys.stdout.encoding
line = stdout.readline().decode(encoding)
if logFile:
log.write(line.encode('utf-8'))
line = line.strip()
mx.log(line)
if line == STARTTOKEN:
stdin.write('cd /D ' + workingDir + ' & ' + cmd + ' & echo ' + ENDTOKEN + newLine)
for regex in respondTo.keys():
match = regex.search(line)
if match:
stdin.write(respondTo[regex] + newLine)
if findInOutput:
match = findInOutput.search(line)
if match:
ret = True
if line == ENDTOKEN:
if not findInOutput:
stdin.write('echo ERRXXX%errorlevel%' + newLine)
else:
break
if line.startswith('ERRXXX'):
if line == 'ERRXXX0':
ret = True
break;
stdin.write('exit' + newLine)
if logFile:
log.close()
return ret
def jdkhome(vm=None):
"""return the JDK directory selected for the 'vm' command"""
build = _vmbuild if _vmSourcesAvailable else 'product'
return _jdk(build, installGraalJar=False)
def print_jdkhome(args, vm=None):
"""print the JDK directory selected for the 'vm' command"""
print jdkhome(vm)
def buildvars(args):
"""describe the variables that can be set by the -D option to the 'mx build' commmand"""
buildVars = {
'ALT_BOOTDIR' : 'The location of the bootstrap JDK installation (default: ' + mx.java().jdk + ')',
'ALT_OUTPUTDIR' : 'Build directory',
'HOTSPOT_BUILD_JOBS' : 'Number of CPUs used by make (default: ' + str(multiprocessing.cpu_count()) + ')',
'INSTALL' : 'Install the built VM into the JDK? (default: y)',
'ZIP_DEBUGINFO_FILES' : 'Install zipped debug symbols file? (default: 0)',
}
mx.log('HotSpot build variables that can be set by the -D option to "mx build":')
mx.log('')
for n in sorted(buildVars.iterkeys()):
mx.log(n)
mx.log(textwrap.fill(buildVars[n], initial_indent=' ', subsequent_indent=' ', width=200))
mx.log('')
mx.log('Note that these variables can be given persistent values in the file ' + join(_graal_home, 'mx', 'env') + ' (see \'mx about\').')
def build(args, vm=None):
"""build the VM binary
The global '--vm' and '--vmbuild' options select which VM type and build target to build."""
# Override to fail quickly if extra arguments are given
# at the end of the command line. This allows for a more
# helpful error message.
class AP(ArgumentParser):
def __init__(self):
ArgumentParser.__init__(self, prog='mx build')
def parse_args(self, args):
result = ArgumentParser.parse_args(self, args)
if len(result.remainder) != 0:
firstBuildTarget = result.remainder[0]
mx.abort('To specify the ' + firstBuildTarget + ' VM build target, you need to use the global "--vmbuild" option. For example:\n' +
' mx --vmbuild ' + firstBuildTarget + ' build')
return result
# Call mx.build to compile the Java sources
parser=AP()
parser.add_argument('--export-dir', help='directory to which graal.jar and graal.options will be copied', metavar='<path>')
parser.add_argument('-D', action='append', help='set a HotSpot build variable (run \'mx buildvars\' to list variables)', metavar='name=value')
opts2 = mx.build(['--source', '1.7'] + args, parser=parser)
assert len(opts2.remainder) == 0
if opts2.export_dir is not None:
if not exists(opts2.export_dir):
os.makedirs(opts2.export_dir)
else:
assert os.path.isdir(opts2.export_dir), '{} is not a directory'.format(opts2.export_dir)
shutil.copy(mx.distribution('GRAAL').path, opts2.export_dir)
graalOptions = join(_graal_home, 'graal.options')
if exists(graalOptions):
shutil.copy(graalOptions, opts2.export_dir)
if not _vmSourcesAvailable or not opts2.native:
return
builds = [_vmbuild]
if vm is None:
vm = _get_vm()
if vm == 'original':
pass
elif vm.startswith('server'):
buildSuffix = ''
elif vm.startswith('client'):
buildSuffix = '1'
else:
assert vm == 'graal', vm
buildSuffix = 'graal'
for build in builds:
if build == 'ide-build-target':
build = os.environ.get('IDE_BUILD_TARGET', None)
if build is None or len(build) == 0:
continue
jdk = _jdk(build, create=True)
if vm == 'original':
if build != 'product':
mx.log('only product build of original VM exists')
continue
vmDir = join(_vmLibDirInJdk(jdk), vm)
if not exists(vmDir):
if mx.get_os() != 'windows':
chmodRecursive(jdk, 0755)
mx.log('Creating VM directory in JDK7: ' + vmDir)
os.makedirs(vmDir)
def filterXusage(line):
if not 'Xusage.txt' in line:
sys.stderr.write(line + os.linesep)
# Check if a build really needs to be done
timestampFile = join(vmDir, '.build-timestamp')
if opts2.force or not exists(timestampFile):
mustBuild = True
else:
mustBuild = False
timestamp = os.path.getmtime(timestampFile)
sources = []
for d in ['src', 'make']:
for root, dirnames, files in os.walk(join(_graal_home, d)):
# ignore <graal>/src/share/tools
if root == join(_graal_home, 'src', 'share'):
dirnames.remove('tools')
sources += [join(root, name) for name in files]
for f in sources:
if len(f) != 0 and os.path.getmtime(f) > timestamp:
mustBuild = True
break
if not mustBuild:
mx.logv('[all files in src and make directories are older than ' + timestampFile[len(_graal_home) + 1:] + ' - skipping native build]')
continue
if platform.system() == 'Windows':
compilelogfile = _graal_home + '/graalCompile.log'
mksHome = mx.get_env('MKS_HOME', 'C:\\cygwin\\bin')
variant = {'client': 'compiler1', 'server': 'compiler2'}.get(vm, vm)
project_config = variant + '_' + build
_runInDebugShell('msbuild ' + _graal_home + r'\build\vs-amd64\jvm.vcproj /p:Configuration=' + project_config + ' /target:clean', _graal_home)
winCompileCmd = r'set HotSpotMksHome=' + mksHome + r'& set OUT_DIR=' + jdk + r'& set JAVA_HOME=' + jdk + r'& set path=%JAVA_HOME%\bin;%path%;%HotSpotMksHome%& cd /D "' +_graal_home + r'\make\windows"& call create.bat ' + _graal_home
print(winCompileCmd)
winCompileSuccess = re.compile(r"^Writing \.vcxproj file:")
if not _runInDebugShell(winCompileCmd, _graal_home, compilelogfile, winCompileSuccess):
mx.log('Error executing create command')
return
winBuildCmd = 'msbuild ' + _graal_home + r'\build\vs-amd64\jvm.vcxproj /p:Configuration=' + project_config + ' /p:Platform=x64'
if not _runInDebugShell(winBuildCmd, _graal_home, compilelogfile):
mx.log('Error building project')
return
else:
cpus = multiprocessing.cpu_count()
runCmd = [mx.gmake_cmd()]
runCmd.append(build + buildSuffix)
env = os.environ.copy()
if opts2.D:
for nv in opts2.D:
name, value = nv.split('=', 1)
env[name.strip()] = value
env.setdefault('ARCH_DATA_MODEL', '64')
env.setdefault('LANG', 'C')
env.setdefault('HOTSPOT_BUILD_JOBS', str(cpus))
env.setdefault('ALT_BOOTDIR', mx.java().jdk)
if not mx._opts.verbose:
runCmd.append('MAKE_VERBOSE=')
env['JAVA_HOME'] = jdk
if vm.endswith('nograal'):
env['INCLUDE_GRAAL'] = 'false'
env.setdefault('ALT_OUTPUTDIR', join(_graal_home, 'build-nograal', mx.get_os()))
else:
env['INCLUDE_GRAAL'] = 'true'
env.setdefault('INSTALL', 'y')
if mx.get_os() == 'solaris' :
# If using sparcWorks, setup flags to avoid make complaining about CC version
cCompilerVersion = subprocess.Popen('CC -V', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).stderr.readlines()[0]
if cCompilerVersion.startswith('CC: Sun C++') :
compilerRev = cCompilerVersion.split(' ')[3]
env.setdefault('ENFORCE_COMPILER_REV', compilerRev)
env.setdefault('ENFORCE_CC_COMPILER_REV', compilerRev)
if build == 'jvmg':
# I want ALL the symbols when I'm debugging on Solaris
# Some Makefile variable are overloaded by environment variable so we need to explicitely
# pass them down in the command line. This one is an example of that.
runCmd.append('STRIP_POLICY=no_strip')
# This removes the need to unzip the *.diz files before debugging in gdb
env.setdefault('ZIP_DEBUGINFO_FILES', '0')
# Clear these 2 variables as having them set can cause very confusing build problems
env.pop('LD_LIBRARY_PATH', None)
env.pop('CLASSPATH', None)
mx.run(runCmd, cwd=join(_graal_home, 'make'), err=filterXusage, env=env)
jvmCfg = _vmCfgInJdk(jdk)
if not exists(jvmCfg):
mx.abort(jvmCfg + ' does not exist')
prefix = '-' + vm + ' '
vmKnown = prefix + 'KNOWN\n'
lines = []
found = False
with open(jvmCfg) as f:
for line in f:
if line.strip() == vmKnown.strip():
found = True
lines.append(line)
if not found:
mx.log('Appending "' + prefix + 'KNOWN" to ' + jvmCfg)
if mx.get_os() != 'windows':
os.chmod(jvmCfg, 0755)
with open(jvmCfg, 'w') as f:
for line in lines:
if line.startswith(prefix):
line = vmKnown
found = True
f.write(line)
if not found:
f.write(vmKnown)
if exists(timestampFile):
os.utime(timestampFile, None)
else:
file(timestampFile, 'a')
def vmg(args):
"""run the debug build of VM selected by the '--vm' option"""
return vm(args, vmbuild='debug')
def vmfg(args):
"""run the fastdebug build of VM selected by the '--vm' option"""
return vm(args, vmbuild='fastdebug')
def vm(args, vm=None, nonZeroIsFatal=True, out=None, err=None, cwd=None, timeout=None, vmbuild=None):
"""run the VM selected by the '--vm' option"""
if vm is None:
vm = _get_vm()
if cwd is None:
cwd = _vm_cwd
elif _vm_cwd is not None and _vm_cwd != cwd:
mx.abort("conflicting working directories: do not set --vmcwd for this command")
build = vmbuild if vmbuild is not None else _vmbuild if _vmSourcesAvailable else 'product'
jdk = _jdk(build, vmToCheck=vm, installGraalJar=False)
mx.expand_project_in_args(args)
if _make_eclipse_launch:
mx.make_eclipse_launch(args, 'graal-' + build, name=None, deps=mx.project('com.oracle.graal.hotspot').all_deps([], True))
if len([a for a in args if 'PrintAssembly' in a]) != 0:
hsdis([], copyToDir=_vmLibDirInJdk(jdk))
if mx.java().debug_port is not None:
args = ['-Xdebug', '-Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=' + str(mx.java().debug_port)] + args
if _jacoco == 'on' or _jacoco == 'append':
jacocoagent = mx.library("JACOCOAGENT", True)
# Exclude all compiler tests and snippets
excludes = ['com.oracle.graal.compiler.tests.*', 'com.oracle.graal.jtt.*']
for p in mx.projects():
excludes += _find_classes_with_annotations(p, None, ['@Snippet', '@ClassSubstitution', '@Test'], includeInnerClasses=True).keys()
excludes += p.find_classes_with_matching_source_line(None, lambda line: 'JaCoCo Exclude' in line, includeInnerClasses=True).keys()
includes = ['com.oracle.graal.*']
agentOptions = {
'append' : 'true' if _jacoco == 'append' else 'false',
'bootclasspath' : 'true',
'includes' : ':'.join(includes),
'excludes' : ':'.join(excludes),
'destfile' : 'jacoco.exec'
}
args = ['-javaagent:' + jacocoagent.get_path(True) + '=' + ','.join([k + '=' + v for k, v in agentOptions.items()])] + args
if '-d64' not in args:
args = ['-d64'] + args
exe = join(jdk, 'bin', mx.exe_suffix('java'))
pfx = _vm_prefix.split() if _vm_prefix is not None else []
if '-version' in args:
ignoredArgs = args[args.index('-version')+1:]
if len(ignoredArgs) > 0:
mx.log("Warning: The following options will be ignored by the vm because they come after the '-version' argument: " + ' '.join(ignoredArgs))
return mx.run(pfx + [exe, '-' + vm] + args, nonZeroIsFatal=nonZeroIsFatal, out=out, err=err, cwd=cwd, timeout=timeout)
def _find_classes_with_annotations(p, pkgRoot, annotations, includeInnerClasses=False):
"""
Scan the sources of project 'p' for Java source files containing a line starting with 'annotation'
(ignoring preceding whitespace) and return the fully qualified class name for each Java
source file matched in a list.
"""
matches = lambda line : len([a for a in annotations if line == a or line.startswith(a + '(')]) != 0
return p.find_classes_with_matching_source_line(pkgRoot, matches, includeInnerClasses)
def _extract_VM_args(args, allowClasspath=False, useDoubleDash=False):
"""
Partitions a command line into a leading sequence of HotSpot VM options and the rest.
"""
for i in range(0, len(args)):
if useDoubleDash:
if args[i] == '--':
vmArgs = args[:i]
remainder = args[i + 1:]
return vmArgs, remainder
else:
if not args[i].startswith('-'):
if i != 0 and (args[i - 1] == '-cp' or args[i - 1] == '-classpath'):
if not allowClasspath:
mx.abort('Cannot supply explicit class path option')
else:
continue
vmArgs = args[:i]
remainder = args[i:]
return vmArgs, remainder
return args, []
def _run_tests(args, harness, annotations, testfile):
vmArgs, tests = _extract_VM_args(args)
for t in tests:
if t.startswith('-'):
mx.abort('VM option ' + t + ' must precede ' + tests[0])
def containsAny(c, substrings):
for s in substrings:
if s in c:
return True
return False
candidates = []
for p in mx.projects():
if mx.java().javaCompliance < p.javaCompliance:
continue
candidates += _find_classes_with_annotations(p, None, annotations).keys()
classes = []
if len(tests) == 0:
classes = candidates
else:
for t in tests:
found = False
for c in candidates:
if t in c:
found = True
classes.append(c)
if not found:
mx.log('warning: no tests matched by substring "' + t)
projectscp = mx.classpath([pcp.name for pcp in mx.projects() if pcp.javaCompliance <= mx.java().javaCompliance])
if len(classes) != 0:
f_testfile = open(testfile, 'w')
for c in classes:
f_testfile.write(c + '\n')
f_testfile.close()
harness(projectscp, vmArgs)
def _unittest(args, annotations):
mxdir = dirname(__file__)
name = 'JUnitWrapper'
javaSource = join(mxdir, name + '.java')
javaClass = join(mxdir, name + '.class')
testfile = os.environ.get('MX_TESTFILE', None)
if testfile is None:
(_, testfile) = tempfile.mkstemp(".testclasses", "graal")
os.close(_)
def harness(projectscp, vmArgs):
if not exists(javaClass) or getmtime(javaClass) < getmtime(javaSource):
subprocess.check_call([mx.java().javac, '-cp', projectscp, '-d', mxdir, javaSource])
if not isGraalEnabled(_get_vm()):
prefixArgs = ['-esa', '-ea']
else:
prefixArgs = ['-XX:-BootstrapGraal', '-esa', '-ea']
with open(testfile) as fp:
testclasses = [l.rstrip() for l in fp.readlines()]
if len(testclasses) == 1:
# Execute Junit directly when one test is being run. This simplifies
# replaying the VM execution in a native debugger (e.g., gdb).
vm(prefixArgs + vmArgs + ['-cp', projectscp, 'org.junit.runner.JUnitCore'] + testclasses)
else:
vm(prefixArgs + vmArgs + ['-cp', projectscp + os.pathsep + mxdir, name] + [testfile])
try:
_run_tests(args, harness, annotations, testfile)
finally:
if os.environ.get('MX_TESTFILE') is None:
os.remove(testfile)
_unittestHelpSuffix = """
If filters are supplied, only tests whose fully qualified name
includes a filter as a substring are run.
For example, this command line:
mx unittest -G:Dump= -G:MethodFilter=BC_aload.* -G:+PrintCFG BC_aload
will run all JUnit test classes that contain 'BC_aload' in their
fully qualified name and will pass these options to the VM:
-G:Dump= -G:MethodFilter=BC_aload.* -G:+PrintCFG
To get around command line length limitations on some OSes, the
JUnit class names to be executed are written to a file that a
custom JUnit wrapper reads and passes onto JUnit proper. The
MX_TESTFILE environment variable can be set to specify a
file which will not be deleted once the unittests are done
(unlike the temporary file otherwise used).
As with all other commands, using the global '-v' before 'unittest'
command will cause mx to show the complete command line
it uses to run the VM.
"""
def unittest(args):
"""run the JUnit tests (all testcases){0}"""
_unittest(args, ['@Test', '@LongTest', '@Parameters'])
def shortunittest(args):
"""run the JUnit tests (short testcases only){0}"""
_unittest(args, ['@Test'])
def longunittest(args):
"""run the JUnit tests (long testcases only){0}"""
_unittest(args, ['@LongTest', '@Parameters'])
def buildvms(args):
"""build one or more VMs in various configurations"""
vmsDefault = ','.join(_vmChoices.keys())
vmbuildsDefault = ','.join(_vmbuildChoices)
parser = ArgumentParser(prog='mx buildvms');
parser.add_argument('--vms', help='a comma separated list of VMs to build (default: ' + vmsDefault + ')', metavar='<args>', default=vmsDefault)
parser.add_argument('--builds', help='a comma separated list of build types (default: ' + vmbuildsDefault + ')', metavar='<args>', default=vmbuildsDefault)
parser.add_argument('-n', '--no-check', action='store_true', help='omit running "java -version" after each build')
parser.add_argument('-c', '--console', action='store_true', help='send build output to console instead of log file')
args = parser.parse_args(args)
vms = args.vms.split(',')
builds = args.builds.split(',')
allStart = time.time()
for v in vms:
for vmbuild in builds:
if v == 'original' and vmbuild != 'product':
continue
if not args.console:
logFile = join(v + '-' + vmbuild + '.log')
log = open(join(_graal_home, logFile), 'wb')
start = time.time()
mx.log('BEGIN: ' + v + '-' + vmbuild + '\t(see: ' + logFile + ')')
# Run as subprocess so that output can be directed to a file
subprocess.check_call([sys.executable, '-u', join('mxtool', 'mx.py'), '--vm', v, '--vmbuild', vmbuild, 'build'], cwd=_graal_home, stdout=log, stderr=subprocess.STDOUT)
duration = datetime.timedelta(seconds=time.time() - start)
mx.log('END: ' + v + '-' + vmbuild + '\t[' + str(duration) + ']')
else:
with VM(v, vmbuild):
build([])
if not args.no_check:
vmargs = ['-version']
if v == 'graal':
vmargs.insert(0, '-XX:-BootstrapGraal')
vm(vmargs, vm=v, vmbuild=vmbuild)
allDuration = datetime.timedelta(seconds=time.time() - allStart)
mx.log('TOTAL TIME: ' + '[' + str(allDuration) + ']')
def gate(args):
"""run the tests used to validate a push
If this command exits with a 0 exit code, then the source code is in
a state that would be accepted for integration into the main repository."""
class Task:
def __init__(self, title):
self.start = time.time()
self.title = title
self.end = None
self.duration = None
mx.log(time.strftime('gate: %d %b %Y %H:%M:%S: BEGIN: ') + title)
def stop(self):
self.end = time.time()
self.duration = datetime.timedelta(seconds=self.end - self.start)
mx.log(time.strftime('gate: %d %b %Y %H:%M:%S: END: ') + self.title + ' [' + str(self.duration) + ']')
return self
def abort(self, codeOrMessage):
self.end = time.time()
self.duration = datetime.timedelta(seconds=self.end - self.start)
mx.log(time.strftime('gate: %d %b %Y %H:%M:%S: ABORT: ') + self.title + ' [' + str(self.duration) + ']')
mx.abort(codeOrMessage)
return self
parser = ArgumentParser(prog='mx gate');
parser.add_argument('-j', '--omit-java-clean', action='store_false', dest='cleanJava', help='omit cleaning Java native code')
parser.add_argument('-n', '--omit-native-clean', action='store_false', dest='cleanNative', help='omit cleaning and building native code')
parser.add_argument('-g', '--only-build-graalvm', action='store_false', dest='buildNonGraal', help='only build the Graal VM')
parser.add_argument('--jacocout', help='specify the output directory for jacoco report')
args = parser.parse_args(args)
global _jacoco
tasks = []
total = Task('Gate')
try:
t = Task('Clean')
cleanArgs = []
if not args.cleanNative:
cleanArgs.append('--no-native')
if not args.cleanJava:
cleanArgs.append('--no-java')
clean(cleanArgs)
tasks.append(t.stop())
t = Task('IDEConfigCheck')
mx.ideclean([])
mx.ideinit([])
tasks.append(t.stop())
eclipse_exe = os.environ.get('ECLIPSE_EXE')
if eclipse_exe is not None:
t = Task('CodeFormatCheck')
if mx.eclipseformat(['-e', eclipse_exe]) != 0:
t.abort('Formatter modified files - run "mx eclipseformat", check in changes and repush')
tasks.append(t.stop())
t = Task('Canonicalization Check')
mx.log(time.strftime('%d %b %Y %H:%M:%S - Ensuring mx/projects files are canonicalized...'))
if mx.canonicalizeprojects([]) != 0:
t.abort('Rerun "mx canonicalizeprojects" and check-in the modified mx/projects files.')
tasks.append(t.stop())
t = Task('BuildJava')
build(['--no-native', '--jdt-warning-as-error'])
tasks.append(t.stop())
t = Task('Checkstyle')
if mx.checkstyle([]) != 0:
t.abort('Checkstyle warnings were found')
tasks.append(t.stop())
if exists('jacoco.exec'):
os.unlink('jacoco.exec')
if args.jacocout is not None:
_jacoco = 'append'
else:
_jacoco = 'off'
t = Task('BuildHotSpotGraal: fastdebug,product')
buildvms(['--vms', 'graal,server', '--builds', 'fastdebug,product'])
tasks.append(t.stop())
with VM('graal', 'fastdebug'):
t = Task('BootstrapWithSystemAssertions:fastdebug')
vm(['-esa', '-version'])
tasks.append(t.stop())
with VM('graal', 'product'):
t = Task('BootstrapWithGCVerification:product')
vm(['-XX:+UnlockDiagnosticVMOptions', '-XX:+VerifyBeforeGC', '-XX:+VerifyAfterGC', '-version'])
tasks.append(t.stop())
with VM('graal', 'product'):
t = Task('BootstrapWithG1GCVerification:product')
vm(['-XX:+UnlockDiagnosticVMOptions', '-XX:-UseSerialGC','-XX:+UseG1GC','-XX:+UseNewCode','-XX:+VerifyBeforeGC', '-XX:+VerifyAfterGC', '-version'])
tasks.append(t.stop())
with VM('graal', 'product'):
t = Task('BootstrapWithRegisterPressure:product')
vm(['-G:RegisterPressure=rbx,r11,r10,r14,xmm3,xmm11,xmm14', '-esa', '-version'])
tasks.append(t.stop())
with VM('graal', 'product'):
t = Task('BootstrapWithAOTConfiguration:product')
vm(['-G:+AOTCompilation', '-G:+VerifyPhases', '-esa', '-version'])
tasks.append(t.stop())
with VM('server', 'product'): # hosted mode
t = Task('UnitTests:hosted-product')
unittest([])
tasks.append(t.stop())
for vmbuild in ['fastdebug', 'product']:
for test in sanitycheck.getDacapos(level=sanitycheck.SanityCheckLevel.Gate, gateBuildLevel=vmbuild):
t = Task(str(test) + ':' + vmbuild)
if not test.test('graal'):
t.abort(test.name + ' Failed')
tasks.append(t.stop())
if args.jacocout is not None:
jacocoreport([args.jacocout])
_jacoco = 'off'
t = Task('CleanAndBuildGraalVisualizer')
mx.run(['ant', '-f', join(_graal_home, 'visualizer', 'build.xml'), '-q', 'clean', 'build'])
tasks.append(t.stop())
# Prevent Graal modifications from breaking the standard builds
if args.buildNonGraal:
t = Task('BuildHotSpotVarieties')
buildvms(['--vms', 'client,server', '--builds', 'fastdebug,product'])
buildvms(['--vms', 'server-nograal', '--builds', 'product'])
buildvms(['--vms', 'server-nograal', '--builds', 'optimized'])
tasks.append(t.stop())
for vmbuild in ['product', 'fastdebug']:
for theVm in ['client', 'server']:
with VM(theVm, vmbuild):
t = Task('DaCapo_pmd:' + theVm + ':' + vmbuild)
dacapo(['pmd'])
tasks.append(t.stop())
t = Task('UnitTests:' + theVm + ':' + vmbuild)
unittest(['-XX:CompileCommand=exclude,*::run*', 'graal.api'])
tasks.append(t.stop())
except KeyboardInterrupt:
total.abort(1)
except BaseException as e:
import traceback
traceback.print_exc()
total.abort(str(e))
total.stop()
mx.log('Gate task times:')
for t in tasks:
mx.log(' ' + str(t.duration) + '\t' + t.title)
mx.log(' =======')
mx.log(' ' + str(total.duration))
def deoptalot(args):
"""bootstrap a fastdebug Graal VM with DeoptimizeALot and VerifyOops on
If the first argument is a number, the process will be repeated
this number of times. All other arguments are passed to the VM."""
count = 1
if len(args) > 0 and args[0].isdigit():
count = int(args[0])
del args[0]
for _ in range(count):
if not vm(['-XX:+DeoptimizeALot', '-XX:+VerifyOops'] + args + ['-version'], vmbuild='fastdebug') == 0:
mx.abort("Failed")
def longtests(args):
deoptalot(['15', '-Xmx48m'])
dacapo(['100', 'eclipse', '-esa'])
def gv(args):
"""run the Graal Visualizer"""
with open(join(_graal_home, '.graal_visualizer.log'), 'w') as fp:
mx.logv('[Graal Visualizer log is in ' + fp.name + ']')
if not exists(join(_graal_home, 'visualizer', 'build.xml')):
mx.logv('[This initial execution may take a while as the NetBeans platform needs to be downloaded]')
mx.run(['ant', '-f', join(_graal_home, 'visualizer', 'build.xml'), '-l', fp.name, 'run'])
def igv(args):
"""run the Ideal Graph Visualizer"""
with open(join(_graal_home, '.ideal_graph_visualizer.log'), 'w') as fp:
mx.logv('[Ideal Graph Visualizer log is in ' + fp.name + ']')
if not exists(join(_graal_home, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'nbplatform')):
mx.logv('[This initial execution may take a while as the NetBeans platform needs to be downloaded]')
mx.run(['ant', '-f', join(_graal_home, 'src', 'share', 'tools', 'IdealGraphVisualizer', 'build.xml'), '-l', fp.name, 'run'])
def bench(args):
"""run benchmarks and parse their output for results
Results are JSON formated : {group : {benchmark : score}}."""
resultFile = None
if '-resultfile' in args:
index = args.index('-resultfile')
if index + 1 < len(args):
resultFile = args[index + 1]
del args[index]
del args[index]
else:
mx.abort('-resultfile must be followed by a file name')
vm = _get_vm()
if len(args) is 0:
args = ['all']
vmArgs = [arg for arg in args if arg.startswith('-')]
def benchmarks_in_group(group):
prefix = group + ':'
return [a[len(prefix):] for a in args if a.startswith(prefix)]
results = {}
benchmarks = []
#DaCapo
if ('dacapo' in args or 'all' in args):
benchmarks += sanitycheck.getDacapos(level=sanitycheck.SanityCheckLevel.Benchmark)
else:
dacapos = benchmarks_in_group('dacapo')
for dacapo in dacapos:
if dacapo not in sanitycheck.dacapoSanityWarmup.keys():
mx.abort('Unknown DaCapo : ' + dacapo)
iterations = sanitycheck.dacapoSanityWarmup[dacapo][sanitycheck.SanityCheckLevel.Benchmark]
if (iterations > 0):
benchmarks += [sanitycheck.getDacapo(dacapo, iterations)]
if ('scaladacapo' in args or 'all' in args):
benchmarks += sanitycheck.getScalaDacapos(level=sanitycheck.SanityCheckLevel.Benchmark)
else:
scaladacapos = benchmarks_in_group('scaladacapo')
for scaladacapo in scaladacapos:
if scaladacapo not in sanitycheck.dacapoScalaSanityWarmup.keys():
mx.abort('Unknown Scala DaCapo : ' + scaladacapo)
iterations = sanitycheck.dacapoScalaSanityWarmup[scaladacapo][sanitycheck.SanityCheckLevel.Benchmark]
if (iterations > 0):
benchmarks += [sanitycheck.getScalaDacapo(scaladacapo, ['-n', str(iterations)])]
#Bootstrap
if ('bootstrap' in args or 'all' in args):
benchmarks += sanitycheck.getBootstraps()
#SPECjvm2008
if ('specjvm2008' in args or 'all' in args):
benchmarks += [sanitycheck.getSPECjvm2008(['-ikv', '-wt', '120', '-it', '120'])]
else:
specjvms = benchmarks_in_group('specjvm2008')
for specjvm in specjvms:
benchmarks += [sanitycheck.getSPECjvm2008(['-ikv', '-wt', '120', '-it', '120', specjvm])]
if ('specjbb2005' in args or 'all' in args):
benchmarks += [sanitycheck.getSPECjbb2005()]
if ('specjbb2013' in args): # or 'all' in args //currently not in default set
benchmarks += [sanitycheck.getSPECjbb2013()]
if ('ctw-full' in args):
benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.Full))
if ('ctw-noinline' in args):
benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.NoInline))
if ('ctw-nocomplex' in args):
benchmarks.append(sanitycheck.getCTW(vm, sanitycheck.CTWMode.NoComplex))
for test in benchmarks:
for (groupName, res) in test.bench(vm, extraVmOpts=vmArgs).items():
group = results.setdefault(groupName, {})
group.update(res)
mx.log(json.dumps(results))
if resultFile:
with open(resultFile, 'w') as f:
f.write(json.dumps(results))
def specjvm2008(args):
"""run one or more SPECjvm2008 benchmarks"""
def launcher(bm, harnessArgs, extraVmOpts):
return sanitycheck.getSPECjvm2008(harnessArgs + [bm]).bench(_get_vm(), extraVmOpts=extraVmOpts)
availableBenchmarks = set(sanitycheck.specjvm2008Names)
for name in sanitycheck.specjvm2008Names:
parts = name.rsplit('.', 1)
if len(parts) > 1:
assert len(parts) == 2
group = parts[0]
print group
availableBenchmarks.add(group)
_run_benchmark(args, sorted(availableBenchmarks), launcher)
def specjbb2013(args):
"""runs the composite SPECjbb2013 benchmark"""
def launcher(bm, harnessArgs, extraVmOpts):
assert bm is None
return sanitycheck.getSPECjbb2013(harnessArgs).bench(_get_vm(), extraVmOpts=extraVmOpts)
_run_benchmark(args, None, launcher)
def specjbb2005(args):
"""runs the composite SPECjbb2005 benchmark"""
def launcher(bm, harnessArgs, extraVmOpts):
assert bm is None
return sanitycheck.getSPECjbb2005(harnessArgs).bench(_get_vm(), extraVmOpts=extraVmOpts)
_run_benchmark(args, None, launcher)
def hsdis(args, copyToDir=None):
"""download the hsdis library
This is needed to support HotSpot's assembly dumping features.
By default it downloads the Intel syntax version, use the 'att' argument to install AT&T syntax."""
flavor = 'intel'
if 'att' in args:
flavor = 'att'
lib = mx.add_lib_suffix('hsdis-' + _arch())
path = join(_graal_home, 'lib', lib)
if not exists(path):
mx.download(path, ['http://lafo.ssw.uni-linz.ac.at/hsdis/' + flavor + "/" + lib])
if copyToDir is not None and exists(copyToDir):
shutil.copy(path, copyToDir)
def hcfdis(args):
"""disassemble HexCodeFiles embedded in text files
Run a tool over the input files to convert all embedded HexCodeFiles
to a disassembled format."""
parser = ArgumentParser(prog='mx hcfdis');
parser.add_argument('-m', '--map', help='address to symbol map applied to disassembler output')
parser.add_argument('files', nargs=REMAINDER, metavar='files...')
args = parser.parse_args(args)
path = join(_graal_home, 'lib', 'hcfdis-1.jar')
if not exists(path):
mx.download(path, ['http://lafo.ssw.uni-linz.ac.at/hcfdis-1.jar'])
mx.run_java(['-jar', path] + args.files)
if args.map is not None:
addressRE = re.compile(r'0[xX]([A-Fa-f0-9]+)')
with open(args.map) as fp:
lines = fp.read().splitlines()
symbols = dict()
for l in lines:
addressAndSymbol = l.split(' ', 1)
if len(addressAndSymbol) == 2:
address, symbol = addressAndSymbol;
if address.startswith('0x'):
address = long(address, 16)
symbols[address] = symbol
for f in args.files:
with open(f) as fp:
lines = fp.read().splitlines()
updated = False
for i in range(0, len(lines)):
l = lines[i]
for m in addressRE.finditer(l):
sval = m.group(0)
val = long(sval, 16)
sym = symbols.get(val)
if sym:
l = l.replace(sval, sym)
updated = True
lines[i] = l
if updated:
mx.log('updating ' + f)
with open('new_' + f, "w") as fp:
for l in lines:
print >> fp, l
def jacocoreport(args):
"""create a JaCoCo coverage report
Creates the report from the 'jacoco.exec' file in the current directory.
Default output directory is 'coverage', but an alternative can be provided as an argument."""
jacocoreport = mx.library("JACOCOREPORT", True)
out = 'coverage'
if len(args) == 1:
out = args[0]
elif len(args) > 1:
mx.abort('jacocoreport takes only one argument : an output directory')
mx.run_java(['-jar', jacocoreport.get_path(True), '-in', 'jacoco.exec', '-g', join(_graal_home, 'graal'), out])
def isGraalEnabled(vm):
return vm != 'original' and not vm.endswith('nograal')
def site(args):
"""create a website containing javadoc and the project dependency graph"""
return mx.site(['--name', 'Graal',
'--jd', '@-tag', '--jd', '@test:X',
'--jd', '@-tag', '--jd', '@run:X',
'--jd', '@-tag', '--jd', '@bug:X',
'--jd', '@-tag', '--jd', '@summary:X',
'--jd', '@-tag', '--jd', '@vmoption:X',
'--overview', join(_graal_home, 'graal', 'overview.html'),
'--title', 'Graal OpenJDK Project Documentation',
'--dot-output-base', 'projects'] + args)
def mx_init(suite):
commands = {
'build': [build, ''],
'buildvars': [buildvars, ''],
'buildvms': [buildvms, '[-options]'],
'clean': [clean, ''],
'hsdis': [hsdis, '[att]'],
'hcfdis': [hcfdis, ''],
'igv' : [igv, ''],
'jdkhome': [print_jdkhome, ''],
'dacapo': [dacapo, '[VM options] benchmarks...|"all" [DaCapo options]'],
'scaladacapo': [scaladacapo, '[VM options] benchmarks...|"all" [Scala DaCapo options]'],
'specjvm2008': [specjvm2008, '[VM options] benchmarks...|"all" [SPECjvm2008 options]'],
'specjbb2013': [specjbb2013, '[VM options] [-- [SPECjbb2013 options]]'],
'specjbb2005': [specjbb2005, '[VM options] [-- [SPECjbb2005 options]]'],
'gate' : [gate, '[-options]'],
'gv' : [gv, ''],
'bench' : [bench, '[-resultfile file] [all(default)|dacapo|specjvm2008|bootstrap]'],
'unittest' : [unittest, '[VM options] [filters...]', _unittestHelpSuffix],
'longunittest' : [longunittest, '[VM options] [filters...]', _unittestHelpSuffix],
'shortunittest' : [shortunittest, '[VM options] [filters...]', _unittestHelpSuffix],
'jacocoreport' : [jacocoreport, '[output directory]'],
'site' : [site, '[-options]'],
'vm': [vm, '[-options] class [args...]'],
'vmg': [vmg, '[-options] class [args...]'],
'vmfg': [vmfg, '[-options] class [args...]'],
'deoptalot' : [deoptalot, '[n]'],
'longtests' : [longtests, '']
}
mx.add_argument('--jacoco', help='instruments com.oracle.* classes using JaCoCo', default='off', choices=['off', 'on', 'append'])
mx.add_argument('--vmcwd', dest='vm_cwd', help='current directory will be changed to <path> before the VM is executed', default=None, metavar='<path>')
mx.add_argument('--installed-jdks', help='the base directory in which the JDKs cloned from $JAVA_HOME exist. ' +
'The VM selected by --vm and --vmbuild options is under this directory (i.e., ' +
join('<path>', '<vmbuild>', 'jre', 'lib', '<vm>', mx.add_lib_prefix(mx.add_lib_suffix('jvm'))) + ')', default=None, metavar='<path>')
if (_vmSourcesAvailable):
mx.add_argument('--vm', action='store', dest='vm', choices=_vmChoices.keys(), help='the VM type to build/run')
mx.add_argument('--vmbuild', action='store', dest='vmbuild', choices=_vmbuildChoices, help='the VM build to build/run (default: ' + _vmbuildChoices[0] +')')
mx.add_argument('--ecl', action='store_true', dest='make_eclipse_launch', help='create launch configuration for running VM execution(s) in Eclipse')
mx.add_argument('--vmprefix', action='store', dest='vm_prefix', help='prefix for running the VM (e.g. "/usr/bin/gdb --args")', metavar='<prefix>')
mx.add_argument('--gdb', action='store_const', const='/usr/bin/gdb --args', dest='vm_prefix', help='alias for --vmprefix "/usr/bin/gdb --args"')
commands.update({
'export': [export, '[-options] [zipfile]'],
})
mx.update_commands(suite, commands)
def mx_post_parse_cmd_line(opts):#
# TODO _minVersion check could probably be part of a Suite in mx?
if (mx.java().version < _minVersion) :
mx.abort('Requires Java version ' + str(_minVersion) + ' or greater, got version ' + str(mx.java().version))
if (_vmSourcesAvailable):
if hasattr(opts, 'vm') and opts.vm is not None:
global _vm
_vm = opts.vm
if hasattr(opts, 'vmbuild') and opts.vmbuild is not None:
global _vmbuild
_vmbuild = opts.vmbuild
global _make_eclipse_launch
_make_eclipse_launch = getattr(opts, 'make_eclipse_launch', False)
global _jacoco
_jacoco = opts.jacoco
global _vm_cwd
_vm_cwd = opts.vm_cwd
global _installed_jdks
_installed_jdks = opts.installed_jdks
global _vm_prefix
_vm_prefix = opts.vm_prefix
mx.distribution('GRAAL').add_update_listener(_installGraalJarInJdks)
|
kevinmcain/graal
|
mx/commands.py
|
Python
|
gpl-2.0
| 58,280
|
import amino_acid
from amino_acid import peptide_to_indices
import reduced_alphabet
from features import (
make_ngram_dataset, transform_rows, toxin_features
)
import iedb
import imma2
import calis
import toxin
import fritsch_neoepitopes
|
cpcloud/pepdata
|
pepdata/__init__.py
|
Python
|
apache-2.0
| 242
|
'''
Tests for fpformat module
Nick Mathewson
'''
from test.test_support import run_unittest, import_module
import unittest
fpformat = import_module('fpformat', deprecated=True)
fix, sci, NotANumber = fpformat.fix, fpformat.sci, fpformat.NotANumber
StringType = type('')
# Test the old and obsolescent fpformat module.
#
# (It's obsolescent because fix(n,d) == "%.*f"%(d,n) and
# sci(n,d) == "%.*e"%(d,n)
# for all reasonable numeric n and d, except that sci gives 3 exponent
# digits instead of 2.
#
# Differences only occur for unreasonable n and d. <.2 wink>)
class FpformatTest(unittest.TestCase):
def checkFix(self, n, digits):
result = fix(n, digits)
if isinstance(n, StringType):
n = repr(n)
expected = "%.*f" % (digits, float(n))
self.assertEquals(result, expected)
def checkSci(self, n, digits):
result = sci(n, digits)
if isinstance(n, StringType):
n = repr(n)
expected = "%.*e" % (digits, float(n))
# add the extra 0 if needed
num, exp = expected.split("e")
if len(exp) < 4:
exp = exp[0] + "0" + exp[1:]
expected = "%se%s" % (num, exp)
self.assertEquals(result, expected)
def test_basic_cases(self):
self.assertEquals(fix(100.0/3, 3), '33.333')
self.assertEquals(sci(100.0/3, 3), '3.333e+001')
def test_reasonable_values(self):
for d in range(7):
for val in (1000.0/3, 1000, 1000.0, .002, 1.0/3, 1e10):
for realVal in (val, 1.0/val, -val, -1.0/val):
self.checkFix(realVal, d)
self.checkSci(realVal, d)
def test_failing_values(self):
# Now for 'unreasonable n and d'
self.assertEquals(fix(1.0, 1000), '1.'+('0'*1000))
self.assertEquals(sci("1"+('0'*1000), 0), '1e+1000')
# This behavior is inconsistent. sci raises an exception; fix doesn't.
yacht = "Throatwobbler Mangrove"
self.assertEquals(fix(yacht, 10), yacht)
try:
sci(yacht, 10)
except NotANumber:
pass
else:
self.fail("No exception on non-numeric sci")
def test_main():
run_unittest(FpformatTest)
if __name__ == "__main__":
test_main()
|
leighpauls/k2cro4
|
third_party/python_26/Lib/test/test_fpformat.py
|
Python
|
bsd-3-clause
| 2,316
|
from __future__ import print_function
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.utils.typechecks import assert_is_type
from h2o.frame import H2OFrame
def h2o_H2OFrame_ascharacter():
"""
Python API test: h2o.frame.H2OFrame.ascharacter()
Copied from pyunit_ascharacter.py
"""
h2oframe = h2o.import_file(path=pyunit_utils.locate("smalldata/junit/cars.csv"))
newFrame = h2oframe['cylinders'].ascharacter()
assert_is_type(newFrame, H2OFrame)
assert newFrame.isstring()[0], "h2o.H2OFrame.ascharacter() command is not working."
pyunit_utils.standalone_test(h2o_H2OFrame_ascharacter)
|
h2oai/h2o-3
|
h2o-py/tests/testdir_apis/Data_Manipulation/pyunit_h2oH2OFrame_ascharacter.py
|
Python
|
apache-2.0
| 665
|
#!/usr/bin/env python
#coding:utf-8
# Created: 10.02.2010
# Copyright (C) 2010, Manfred Moitzi
# License: MIT License
__author__ = "mozman <mozman@gmx.at>"
import unittest
from dxfwrite.tableentries import Style
from dxfwrite import dxfstr, DXFEngine
class TestStyleTableEntry(unittest.TestCase):
expected = " 0\nSTYLE\n 2\nARIAL\n 70\n0\n 40\n0.0\n 41\n1.0\n 42\n1.0\n 50\n" \
"75.0\n 71\n4\n 3\nArial.ttf\n 4\n\n"
def set_params(self, style):
style['oblique'] = 75
style['generation_flags'] = 4
def test_create_table_entry(self):
style = Style("ARIAL", font='Arial.ttf')
self.set_params(style)
self.assertEqual(dxfstr(style), self.expected)
def test_style_by_factory(self):
style = DXFEngine.style("ARIAL",
font='Arial.ttf')
self.set_params(style)
self.assertEqual(dxfstr(style), self.expected)
if __name__=='__main__':
unittest.main()
|
sbarton272/AcousticBarcodes-Explorations
|
barcodes/dxfwrite/tests/test_style.py
|
Python
|
mit
| 978
|
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""gcloud supplementary help topic command group."""
from googlecloudsdk.calliope import base
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Topic(base.Group):
"""gcloud supplementary help.
The {command} command group provides supplementary help for topics not
directly associated with individual commands.
More information on {command} can be found by running:
$ gcloud topic [TOPIC_NAME]
For a lits of available topics run:
$ gcloud topic --help
"""
|
Sorsly/subtle
|
google-cloud-sdk/lib/surface/topic/__init__.py
|
Python
|
mit
| 1,077
|
import redis
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from rq import Queue
app = Flask(__name__)
app.config.from_object('config')
db = SQLAlchemy(app)
conn = redis.from_url(app.config['REDIS_URL'])
q = Queue(connection=conn)
import tracker.views
|
gnarula/fedora-patch-tracker
|
tracker/__init__.py
|
Python
|
gpl-2.0
| 278
|
from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HUnitR04c_CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HUnitR04c_CompleteLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HUnitR04c_CompleteLHS, self).__init__(name='HUnitR04c_CompleteLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HUnitR04c_CompleteLHS')
self["equations"] = []
# Set the node attributes
# match class State(State) node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__State"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'State')
# apply class ProcDef(4.2.a.0ProcDef) node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """return True"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["mm__"] = """MT_pre__ProcDef"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.0ProcDef')
# apply class LocalDef(4.2.a.1LocalDef) node
self.add_node()
self.vs[2]["MT_pre__attr1"] = """return True"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["mm__"] = """MT_pre__LocalDef"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.1LocalDef')
# apply class Name(4.2.a.2Name) node
self.add_node()
self.vs[3]["MT_pre__attr1"] = """return True"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["mm__"] = """MT_pre__Name"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.2Name')
# apply class New(4.2.a.3New) node
self.add_node()
self.vs[4]["MT_pre__attr1"] = """return True"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["mm__"] = """MT_pre__New"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.3New')
# apply class Name(4.2.a.4Name) node
self.add_node()
self.vs[5]["MT_pre__attr1"] = """return True"""
self.vs[5]["MT_label__"] = """6"""
self.vs[5]["mm__"] = """MT_pre__Name"""
self.vs[5]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.4Name')
# apply class Name(4.2.a.5Name) node
self.add_node()
self.vs[6]["MT_pre__attr1"] = """return True"""
self.vs[6]["MT_label__"] = """7"""
self.vs[6]["mm__"] = """MT_pre__Name"""
self.vs[6]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.5Name')
# apply class Name(4.2.a.6Name) node
self.add_node()
self.vs[7]["MT_pre__attr1"] = """return True"""
self.vs[7]["MT_label__"] = """8"""
self.vs[7]["mm__"] = """MT_pre__Name"""
self.vs[7]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.6Name')
# apply class Par(4.2.a.7Par) node
self.add_node()
self.vs[8]["MT_pre__attr1"] = """return True"""
self.vs[8]["MT_label__"] = """9"""
self.vs[8]["mm__"] = """MT_pre__Par"""
self.vs[8]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.7Par')
# apply class Inst(4.2.a.8Inst) node
self.add_node()
self.vs[9]["MT_pre__attr1"] = """return True"""
self.vs[9]["MT_label__"] = """10"""
self.vs[9]["mm__"] = """MT_pre__Inst"""
self.vs[9]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.8Inst')
# apply class Inst(4.2.a.9Inst) node
self.add_node()
self.vs[10]["MT_pre__attr1"] = """return True"""
self.vs[10]["MT_label__"] = """11"""
self.vs[10]["mm__"] = """MT_pre__Inst"""
self.vs[10]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.9Inst')
# apply class Name(4.2.a.10Name) node
self.add_node()
self.vs[11]["MT_pre__attr1"] = """return True"""
self.vs[11]["MT_label__"] = """12"""
self.vs[11]["mm__"] = """MT_pre__Name"""
self.vs[11]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.10Name')
# apply class Name(4.2.a.11Name) node
self.add_node()
self.vs[12]["MT_pre__attr1"] = """return True"""
self.vs[12]["MT_label__"] = """13"""
self.vs[12]["mm__"] = """MT_pre__Name"""
self.vs[12]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.11Name')
# apply class Name(4.2.a.12Name) node
self.add_node()
self.vs[13]["MT_pre__attr1"] = """return True"""
self.vs[13]["MT_label__"] = """14"""
self.vs[13]["mm__"] = """MT_pre__Name"""
self.vs[13]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.12Name')
# apply class Name(4.2.a.13Name) node
self.add_node()
self.vs[14]["MT_pre__attr1"] = """return True"""
self.vs[14]["MT_label__"] = """15"""
self.vs[14]["mm__"] = """MT_pre__Name"""
self.vs[14]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.13Name')
# apply class Name(4.2.a.14Name) node
self.add_node()
self.vs[15]["MT_pre__attr1"] = """return True"""
self.vs[15]["MT_label__"] = """16"""
self.vs[15]["mm__"] = """MT_pre__Name"""
self.vs[15]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.14Name')
# apply class Name(4.2.a.15Name) node
self.add_node()
self.vs[16]["MT_pre__attr1"] = """return True"""
self.vs[16]["MT_label__"] = """17"""
self.vs[16]["mm__"] = """MT_pre__Name"""
self.vs[16]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.15Name')
# apply class Name(4.2.a.16Name) node
self.add_node()
self.vs[17]["MT_pre__attr1"] = """return True"""
self.vs[17]["MT_label__"] = """18"""
self.vs[17]["mm__"] = """MT_pre__Name"""
self.vs[17]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.16Name')
# apply association ProcDef--p-->LocalDefnode
self.add_node()
self.vs[18]["MT_pre__attr1"] = """return attr_value == "p" """
self.vs[18]["MT_label__"] = """19"""
self.vs[18]["mm__"] = """MT_pre__directLink_T"""
self.vs[18]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.0ProcDefassoc184.2.a.1LocalDef')
# apply association ProcDef--channelNames-->Namenode
self.add_node()
self.vs[19]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[19]["MT_label__"] = """20"""
self.vs[19]["mm__"] = """MT_pre__directLink_T"""
self.vs[19]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.0ProcDefassoc194.2.a.2Name')
# apply association LocalDef--p-->Newnode
self.add_node()
self.vs[20]["MT_pre__attr1"] = """return attr_value == "p" """
self.vs[20]["MT_label__"] = """21"""
self.vs[20]["mm__"] = """MT_pre__directLink_T"""
self.vs[20]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.1LocalDefassoc204.2.a.3New')
# apply association New--channelNames-->Namenode
self.add_node()
self.vs[21]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[21]["MT_label__"] = """22"""
self.vs[21]["mm__"] = """MT_pre__directLink_T"""
self.vs[21]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.3Newassoc214.2.a.4Name')
# apply association New--channelNames-->Namenode
self.add_node()
self.vs[22]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[22]["MT_label__"] = """23"""
self.vs[22]["mm__"] = """MT_pre__directLink_T"""
self.vs[22]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.3Newassoc224.2.a.5Name')
# apply association New--channelNames-->Namenode
self.add_node()
self.vs[23]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[23]["MT_label__"] = """24"""
self.vs[23]["mm__"] = """MT_pre__directLink_T"""
self.vs[23]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.3Newassoc234.2.a.6Name')
# apply association New--p-->Parnode
self.add_node()
self.vs[24]["MT_pre__attr1"] = """return attr_value == "p" """
self.vs[24]["MT_label__"] = """25"""
self.vs[24]["mm__"] = """MT_pre__directLink_T"""
self.vs[24]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.3Newassoc244.2.a.7Par')
# apply association Par--p-->Instnode
self.add_node()
self.vs[25]["MT_pre__attr1"] = """return attr_value == "p" """
self.vs[25]["MT_label__"] = """26"""
self.vs[25]["mm__"] = """MT_pre__directLink_T"""
self.vs[25]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.7Parassoc254.2.a.9Inst')
# apply association Par--p-->Instnode
self.add_node()
self.vs[26]["MT_pre__attr1"] = """return attr_value == "p" """
self.vs[26]["MT_label__"] = """27"""
self.vs[26]["mm__"] = """MT_pre__directLink_T"""
self.vs[26]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.7Parassoc264.2.a.8Inst')
# apply association Inst--channelNames-->Namenode
self.add_node()
self.vs[27]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[27]["MT_label__"] = """28"""
self.vs[27]["mm__"] = """MT_pre__directLink_T"""
self.vs[27]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.8Instassoc274.2.a.10Name')
# apply association Inst--channelNames-->Namenode
self.add_node()
self.vs[28]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[28]["MT_label__"] = """29"""
self.vs[28]["mm__"] = """MT_pre__directLink_T"""
self.vs[28]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.8Instassoc284.2.a.11Name')
# apply association Inst--channelNames-->Namenode
self.add_node()
self.vs[29]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[29]["MT_label__"] = """30"""
self.vs[29]["mm__"] = """MT_pre__directLink_T"""
self.vs[29]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.8Instassoc294.2.a.12Name')
# apply association Inst--channelNames-->Namenode
self.add_node()
self.vs[30]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[30]["MT_label__"] = """31"""
self.vs[30]["mm__"] = """MT_pre__directLink_T"""
self.vs[30]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.8Instassoc304.2.a.13Name')
# apply association Inst--channelNames-->Namenode
self.add_node()
self.vs[31]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[31]["MT_label__"] = """32"""
self.vs[31]["mm__"] = """MT_pre__directLink_T"""
self.vs[31]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.9Instassoc314.2.a.14Name')
# apply association Inst--channelNames-->Namenode
self.add_node()
self.vs[32]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[32]["MT_label__"] = """33"""
self.vs[32]["mm__"] = """MT_pre__directLink_T"""
self.vs[32]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.9Instassoc324.2.a.15Name')
# apply association Inst--channelNames-->Namenode
self.add_node()
self.vs[33]["MT_pre__attr1"] = """return attr_value == "channelNames" """
self.vs[33]["MT_label__"] = """34"""
self.vs[33]["mm__"] = """MT_pre__directLink_T"""
self.vs[33]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.9Instassoc334.2.a.16Name')
# trace association ProcDef--trace-->nullnode
self.add_node()
self.vs[34]["MT_label__"] = """35"""
self.vs[34]["mm__"] = """MT_pre__trace_link"""
self.vs[34]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'4.2.a.0ProcDefassoc34State')
self['equations'].append(((3,'literal'),('constant','sh')))
self['equations'].append(((5,'literal'),('constant','exit_in')))
self['equations'].append(((6,'literal'),('constant','exack_in')))
self['equations'].append(((7,'literal'),('constant','sh_in')))
self['equations'].append(((9,'name'),('constant','C')))
self['equations'].append(((10,'name'),('constant','H')))
self['equations'].append(((11,'literal'),('constant','enp')))
self['equations'].append(((12,'literal'),('constant','exit_in')))
self['equations'].append(((13,'literal'),('constant','exack_in')))
self['equations'].append(((14,'literal'),('constant','sh_in')))
self['equations'].append(((15,'literal'),('constant','exit_in')))
self['equations'].append(((16,'literal'),('constant','exack_in')))
self['equations'].append(((17,'literal'),('constant','sh_in')))
# Add the edges
self.add_edges([
(1,18), # apply class ProcDef(4.2.a.0ProcDef) -> association p
(18,2), # association LocalDef -> apply class LocalDef(4.2.a.1LocalDef)
(1,19), # apply class ProcDef(4.2.a.0ProcDef) -> association channelNames
(19,3), # association Name -> apply class Name(4.2.a.2Name)
(2,20), # apply class LocalDef(4.2.a.1LocalDef) -> association p
(20,4), # association New -> apply class New(4.2.a.3New)
(4,21), # apply class New(4.2.a.3New) -> association channelNames
(21,5), # association Name -> apply class Name(4.2.a.4Name)
(4,22), # apply class New(4.2.a.3New) -> association channelNames
(22,6), # association Name -> apply class Name(4.2.a.5Name)
(4,23), # apply class New(4.2.a.3New) -> association channelNames
(23,7), # association Name -> apply class Name(4.2.a.6Name)
(4,24), # apply class New(4.2.a.3New) -> association p
(24,8), # association Par -> apply class Par(4.2.a.7Par)
(8,25), # apply class Par(4.2.a.7Par) -> association p
(25,10), # association Inst -> apply class Inst(4.2.a.9Inst)
(8,26), # apply class Par(4.2.a.7Par) -> association p
(26,9), # association Inst -> apply class Inst(4.2.a.8Inst)
(9,27), # apply class Inst(4.2.a.8Inst) -> association channelNames
(27,11), # association Name -> apply class Name(4.2.a.10Name)
(9,28), # apply class Inst(4.2.a.8Inst) -> association channelNames
(28,12), # association Name -> apply class Name(4.2.a.11Name)
(9,29), # apply class Inst(4.2.a.8Inst) -> association channelNames
(29,13), # association Name -> apply class Name(4.2.a.12Name)
(9,30), # apply class Inst(4.2.a.8Inst) -> association channelNames
(30,14), # association Name -> apply class Name(4.2.a.13Name)
(10,31), # apply class Inst(4.2.a.9Inst) -> association channelNames
(31,15), # association Name -> apply class Name(4.2.a.14Name)
(10,32), # apply class Inst(4.2.a.9Inst) -> association channelNames
(32,16), # association Name -> apply class Name(4.2.a.15Name)
(10,33), # apply class Inst(4.2.a.9Inst) -> association channelNames
(33,17), # association Name -> apply class Name(4.2.a.16Name)
(1,34), # apply class ProcDef(State) -> backward_association
(34,0), # backward_associationnull -> match_class null(State)
])
# define evaluation methods for each match class.
def eval_attr11(self, attr_value, this):
return True
# define evaluation methods for each apply class.
def eval_attr12(self, attr_value, this):
return True
def eval_attr13(self, attr_value, this):
return True
def eval_attr14(self, attr_value, this):
return True
def eval_attr15(self, attr_value, this):
return True
def eval_attr16(self, attr_value, this):
return True
def eval_attr17(self, attr_value, this):
return True
def eval_attr18(self, attr_value, this):
return True
def eval_attr19(self, attr_value, this):
return True
def eval_attr110(self, attr_value, this):
return True
def eval_attr111(self, attr_value, this):
return True
def eval_attr112(self, attr_value, this):
return True
def eval_attr113(self, attr_value, this):
return True
def eval_attr114(self, attr_value, this):
return True
def eval_attr115(self, attr_value, this):
return True
def eval_attr116(self, attr_value, this):
return True
def eval_attr117(self, attr_value, this):
return True
def eval_attr118(self, attr_value, this):
return True
# define evaluation methods for each match association.
# define evaluation methods for each apply association.
def eval_attr119(self, attr_value, this):
return attr_value == "p"
def eval_attr120(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr121(self, attr_value, this):
return attr_value == "p"
def eval_attr122(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr123(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr124(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr125(self, attr_value, this):
return attr_value == "p"
def eval_attr126(self, attr_value, this):
return attr_value == "p"
def eval_attr127(self, attr_value, this):
return attr_value == "p"
def eval_attr128(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr129(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr130(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr131(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr132(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr133(self, attr_value, this):
return attr_value == "channelNames"
def eval_attr134(self, attr_value, this):
return attr_value == "channelNames"
def constraint(self, PreNode, graph):
return True
|
levilucio/SyVOLT
|
UMLRT2Kiltera_MM/Properties/unit_contracts/HUnitR04c_CompleteLHS.py
|
Python
|
mit
| 16,499
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'ManyToManyValue.object_ids'
db.delete_column('philo_manytomanyvalue', 'object_ids')
# Adding M2M table for field values on 'ManyToManyValue'
db.create_table('philo_manytomanyvalue_values', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('manytomanyvalue', models.ForeignKey(orm['philo.manytomanyvalue'], null=False)),
('foreignkeyvalue', models.ForeignKey(orm['philo.foreignkeyvalue'], null=False))
))
db.create_unique('philo_manytomanyvalue_values', ['manytomanyvalue_id', 'foreignkeyvalue_id'])
def backwards(self, orm):
# Adding field 'ManyToManyValue.object_ids'
db.add_column('philo_manytomanyvalue', 'object_ids', self.gf('django.db.models.fields.CommaSeparatedIntegerField')(max_length=300, null=True, blank=True), keep_default=False)
# Removing M2M table for field values on 'ManyToManyValue'
db.delete_table('philo_manytomanyvalue_values')
models = {
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'philo.attribute': {
'Meta': {'unique_together': "(('key', 'entity_content_type', 'entity_object_id'), ('value_content_type', 'value_object_id'))", 'object_name': 'Attribute'},
'entity_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_entity_set'", 'to': "orm['contenttypes.ContentType']"}),
'entity_object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attribute_value_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'value_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'philo.collection': {
'Meta': {'object_name': 'Collection'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'philo.collectionmember': {
'Meta': {'object_name': 'CollectionMember'},
'collection': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['philo.Collection']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'member_content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'member_object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'philo.contentlet': {
'Meta': {'object_name': 'Contentlet'},
'content': ('philo.models.fields.TemplateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contentlets'", 'to': "orm['philo.Page']"})
},
'philo.contentreference': {
'Meta': {'object_name': 'ContentReference'},
'content_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'contentreferences'", 'to': "orm['philo.Page']"})
},
'philo.file': {
'Meta': {'object_name': 'File'},
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mimetype': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'philo.foreignkeyvalue': {
'Meta': {'object_name': 'ForeignKeyValue'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'foreign_key_value_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'philo.jsonvalue': {
'Meta': {'object_name': 'JSONValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('philo.models.fields.JSONField', [], {})
},
'philo.manytomanyvalue': {
'Meta': {'object_name': 'ManyToManyValue'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'many_to_many_value_set'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'values': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['philo.ForeignKeyValue']", 'null': 'True', 'blank': 'True'})
},
'philo.node': {
'Meta': {'object_name': 'Node'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['philo.Node']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'}),
'view_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'node_view_set'", 'to': "orm['contenttypes.ContentType']"}),
'view_object_id': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'philo.page': {
'Meta': {'object_name': 'Page'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pages'", 'to': "orm['philo.Template']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'philo.redirect': {
'Meta': {'object_name': 'Redirect'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'status_code': ('django.db.models.fields.IntegerField', [], {'default': '302'}),
'target': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'philo.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'})
},
'philo.template': {
'Meta': {'object_name': 'Template'},
'code': ('philo.models.fields.TemplateField', [], {}),
'documentation': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mimetype': ('django.db.models.fields.CharField', [], {'default': "'text/html'", 'max_length': '255'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['philo.Template']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'db_index': 'True'})
}
}
complete_apps = ['philo']
|
ithinksw/philo
|
philo/migrations/0008_auto__del_field_manytomanyvalue_object_ids.py
|
Python
|
isc
| 9,212
|
"""Test providers performance."""
import time
import gc
import dependency_injector.providers
class Tester(object):
"""Performance tester for provider module implementations."""
def __init__(self, provider_modules, duration_factor):
"""Initializer."""
self.provider_modules = provider_modules
self.tests = [getattr(self, name)
for name in dir(self)
if name.startswith("test")]
self.total_time = 0
self.duration_factor = duration_factor
def run(self):
"""Run all tests for all provider modules."""
for module in self.provider_modules:
print("\n")
print("Running tests for module - \"{module}\":"
.format(module=module.__name__))
gc.disable()
for test in self.tests:
start_time = time.time()
test(module)
self.total_time = time.time() - start_time
print("Test \"{test}\" took - {time}"
.format(test=test.__name__,
time=self.total_time))
gc.collect()
gc.enable()
print("\n")
def test_raw_3_kw_injections(self, providers):
"""Test 3 keyword argument injections."""
class A(object):
pass
class B(object):
pass
class C(object):
pass
class Test(object):
def __init__(self, a, b, c):
pass
for x in range(int(5000000 * self.duration_factor)):
Test(a=A(), b=B(), c=C())
def test_factory_3_factory_kw_injections(self, providers):
"""Test factory with 3 keyword argument injections via factories."""
class A(object):
pass
class B(object):
pass
class C(object):
pass
class Test(object):
def __init__(self, a, b, c):
pass
a_factory = providers.Factory(A)
b_factory = providers.Factory(B)
c_factory = providers.Factory(C)
test_factory = providers.Factory(Test,
a=a_factory,
b=b_factory,
c=c_factory)
for x in range(int(5000000 * self.duration_factor)):
test_factory()
def test_abstract_factory_3_factory_kw_injections(self, providers):
"""Test factory with 3 keyword argument injections via factories."""
class A(object):
pass
class B(object):
pass
class C(object):
pass
class Test(object):
def __init__(self, a, b, c):
pass
a_factory = providers.Factory(A)
b_factory = providers.Factory(B)
c_factory = providers.Factory(C)
test_factory = providers.AbstractFactory(object)
test_factory.override(providers.Factory(Test,
a=a_factory,
b=b_factory,
c=c_factory))
for x in range(int(5000000 * self.duration_factor)):
test_factory()
def test_factory_6_factory_kw_injections_0_context(self, providers):
"""Test factory with 6 keyword argument injections."""
class Test(object):
def __init__(self, a, b, c, d, e, f):
pass
test_factory = providers.Factory(Test, a=1, b=2, c=3, d=4, e=5, f=6)
for x in range(int(5000000 * self.duration_factor)):
test_factory()
def test_factory_6_factory_kw_injections_1_context(self, providers):
"""Test factory with 6 keyword argument injections."""
class Test(object):
def __init__(self, a, b, c, d, e, f):
pass
test_factory = providers.Factory(Test, f=6)
for x in range(int(5000000 * self.duration_factor)):
test_factory(a=1, b=2, c=3, d=4, e=5)
def test_factory_6_factory_kw_injections_3_context(self, providers):
"""Test factory with 6 keyword argument injections."""
class Test(object):
def __init__(self, a, b, c, d, e, f):
pass
test_factory = providers.Factory(Test, a=1, b=2, c=3)
for x in range(int(5000000 * self.duration_factor)):
test_factory(d=4, e=5, f=6)
if __name__ == "__main__":
tester = Tester(
provider_modules=[
dependency_injector.providers,
],
duration_factor=0.5)
tester.run()
|
ets-labs/python-dependency-injector
|
tests/performance/test.py
|
Python
|
bsd-3-clause
| 4,662
|
#!/usr/bin/env python3
VERSION = "beta0.1"
AUTHOR = "Daniel Laube <mail@dlaube.de>"
import sys
import os
import gconlib
def __main__(args):
gcon = gconlib.Gcon(os.path.dirname(os.path.realpath(__file__)) + "/", VERSION)
gcon.run()
if __name__ == "__main__":
__main__(sys.argv)
|
laubed/gcon
|
gcon.py
|
Python
|
mit
| 295
|
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2017 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
"""Module with utility functions used by several Python functions."""
from __future__ import print_function
import os
import ast
import sys
import pickle
import collections
import inspect
from .exceptions import *
from . import p4regex
if sys.version_info[0] > 2:
basestring = str
def kwargs_lower(kwargs):
"""Function to rebuild and return *kwargs* dictionary
with all keys made lowercase. Should be called by every
function that could be called directly by the user.
Also turns boolean-like values into actual booleans.
Also turns values lowercase if sensible.
"""
caseless_kwargs = {}
# items() inefficient on Py2 but this is small dict
for key, value in kwargs.items():
lkey = key.lower()
if lkey in ['subset']: # only kw for which case matters
lvalue = value
else:
try:
lvalue = value.lower()
except (AttributeError, KeyError):
lvalue = value
if lkey in ['irrep', 'check_bsse', 'linkage', 'bsse_type']:
caseless_kwargs[lkey] = lvalue
elif 'dertype' in lkey:
if p4regex.der0th.match(str(lvalue)):
caseless_kwargs[lkey] = 0
elif p4regex.der1st.match(str(lvalue)):
caseless_kwargs[lkey] = 1
elif p4regex.der2nd.match(str(lvalue)):
caseless_kwargs[lkey] = 2
else:
raise KeyError('Derivative type key %s was not recognized' % str(key))
elif p4regex.yes.match(str(lvalue)):
caseless_kwargs[lkey] = True
elif p4regex.no.match(str(lvalue)):
caseless_kwargs[lkey] = False
else:
caseless_kwargs[lkey] = lvalue
return caseless_kwargs
def get_psifile(fileno, pidspace=str(os.getpid())):
"""Function to return the full path and filename for psi file
*fileno* (e.g., psi.32) in current namespace *pidspace*.
"""
psioh = core.IOManager.shared_object()
psio = core.IO.shared_object()
filepath = psioh.get_file_path(fileno)
namespace = psio.get_default_namespace()
targetfile = filepath + 'psi' + '.' + pidspace + '.' + namespace + '.' + str(fileno)
return targetfile
def format_molecule_for_input(mol, name='', forcexyz=False):
"""Function to return a string of the output of
:py:func:`inputparser.process_input` applied to the XYZ
format of molecule, passed as either fragmented
geometry string *mol* or molecule instance *mol*.
Used to capture molecule information from database
modules and for distributed (sow/reap) input files.
For the reverse, see :py:func:`molutil.geometry`.
"""
# when mol is already a string
if isinstance(mol, basestring):
mol_string = mol
mol_name = name
# when mol is core.Molecule or qcdb.Molecule object
else:
# save_string_for_psi4 is the more detailed choice as it includes fragment
# (and possibly no_com/no_reorient) info. but this is only available
# for qcdb Molecules. Since save_string_xyz was added to libmints just
# for the sow/reap purpose, may want to unify these fns sometime.
# the time for unification is nigh
if forcexyz:
mol_string = mol.save_string_xyz()
else:
mol_string = mol.create_psi4_string_from_molecule()
mol_name = mol.name() if name == '' else name
commands = """\nmolecule %s {\n%s%s\n}\n""" % (mol_name, mol_string,
'\nno_com\nno_reorient' if forcexyz else '')
return commands
def format_options_for_input(molecule=None, **kwargs):
"""Function to return a string of commands to replicate the
current state of user-modified options. Used to capture C++
options information for distributed (sow/reap) input files.
.. caution:: Some features are not yet implemented. Buy a developer a coffee.
- Does not cover local (as opposed to global) options.
"""
if molecule is not None:
symmetry = molecule.find_point_group(0.00001).symbol()
commands = ''
commands += """\ncore.set_memory_bytes(%s)\n\n""" % (core.get_memory())
for chgdopt in core.get_global_option_list():
if core.has_global_option_changed(chgdopt):
chgdoptval = core.get_global_option(chgdopt)
if molecule is not None:
if chgdopt.lower() in kwargs:
if symmetry in kwargs[chgdopt.lower()]:
chgdoptval = kwargs[chgdopt.lower()][symmetry]
if isinstance(chgdoptval, basestring):
commands += """core.set_global_option('%s', '%s')\n""" % (chgdopt, chgdoptval)
# Next four lines were conflict between master and roa branches (TDC, 10/29/2014)
elif isinstance(chgdoptval, int) or isinstance(chgdoptval, float):
commands += """core.set_global_option('%s', %s)\n""" % (chgdopt, chgdoptval)
elif isinstance(chgdoptval, list):
commands += """core.set_global_option('%s', %s)\n""" % (chgdopt, chgdoptval)
else:
commands += """core.set_global_option('%s', %s)\n""" % (chgdopt, chgdoptval)
return commands
def format_kwargs_for_input(filename, lmode=1, **kwargs):
"""Function to pickle to file *filename* the options dictionary
*kwargs*. Mode *lmode* =2 pickles appropriate settings for
reap mode. Used to capture Python options information for
distributed (sow/reap) input files.
"""
if lmode == 2:
kwargs['mode'] = 'reap'
kwargs['linkage'] = os.getpid()
filename.write('''\npickle_kw = ("""'''.encode('utf-8'))
pickle.dump(kwargs, filename)
filename.write('''""")\n'''.encode('utf-8'))
filename.write("""\nkwargs = pickle.loads(pickle_kw)\n""".encode('utf-8'))
if lmode == 2:
kwargs['mode'] = 'sow'
del kwargs['linkage']
def drop_duplicates(seq):
"""Function that given an array *seq*, returns an array without any duplicate
entries. There is no guarantee of which duplicate entry is dropped.
"""
noDupes = []
[noDupes.append(i) for i in seq if not noDupes.count(i)]
return noDupes
def all_casings(input_string):
"""Function to return a generator of all lettercase permutations
of *input_string*.
"""
if not input_string:
yield ""
else:
first = input_string[:1]
if first.lower() == first.upper():
for sub_casing in all_casings(input_string[1:]):
yield first + sub_casing
else:
for sub_casing in all_casings(input_string[1:]):
yield first.lower() + sub_casing
yield first.upper() + sub_casing
def getattr_ignorecase(module, attr):
"""Function to extract attribute *attr* from *module* if *attr*
is available in any possible lettercase permutation. Returns
attribute if available, None if not.
"""
array = None
for per in list(all_casings(attr)):
try:
getattr(module, per)
except AttributeError:
pass
else:
array = getattr(module, per)
break
return array
def import_ignorecase(module):
"""Function to import *module* in any possible lettercase
permutation. Returns module object if available, None if not.
"""
modobj = None
for per in list(all_casings(module)):
try:
modobj = __import__(per)
except ImportError:
pass
else:
break
return modobj
def extract_sowreap_from_output(sowout, quantity, sownum, linkage, allvital=False, label='electronic energy'):
"""Function to examine file *sowout* from a sow/reap distributed job
for formatted line with electronic energy information about index
*sownum* to be used for construction of *quantity* computations as
directed by master input file with *linkage* kwarg. When file *sowout*
is missing or incomplete files, function will either return zero
(*allvital* is ``False``) or terminate (*allvital* is ``True``) since
some sow/reap procedures can produce meaningful results (database)
from an incomplete set of sown files, while others cannot (gradient,
hessian).
"""
E = 0.0
try:
freagent = open('%s.out' % (sowout), 'r')
except IOError:
if allvital:
raise ValidationError('Aborting upon output file \'%s.out\' not found.\n' % (sowout))
else:
ValidationError('Aborting upon output file \'%s.out\' not found.\n' % (sowout))
return 0.0
else:
while True:
line = freagent.readline()
if not line:
if E == 0.0:
if allvital:
raise ValidationError('Aborting upon output file \'%s.out\' has no %s RESULT line.\n' % (sowout, quantity))
else:
ValidationError('Aborting upon output file \'%s.out\' has no %s RESULT line.\n' % (sowout, quantity))
break
s = line.strip().split(None, 10)
if (len(s) != 0) and (s[0:3] == [quantity, 'RESULT:', 'computation']):
if int(s[3]) != linkage:
raise ValidationError('Output file \'%s.out\' has linkage %s incompatible with master.in linkage %s.'
% (sowout, str(s[3]), str(linkage)))
if s[6] != str(sownum + 1):
raise ValidationError('Output file \'%s.out\' has nominal affiliation %s incompatible with item %s.'
% (sowout, s[6], str(sownum + 1)))
if label == 'electronic energy' and s[8:10] == ['electronic', 'energy']:
E = float(s[10])
core.print_out('%s RESULT: electronic energy = %20.12f\n' % (quantity, E))
if label == 'electronic gradient' and s[8:10] == ['electronic', 'gradient']:
E = ast.literal_eval(s[-1])
core.print_out('%s RESULT: electronic gradient = %r\n' % (quantity, E))
freagent.close()
return E
def prepare_options_for_modules(changedOnly=False, commandsInsteadDict=False):
"""Function to return a string of commands to replicate the
current state of user-modified options. Used to capture C++
options information for distributed (sow/reap) input files.
.. caution:: Some features are not yet implemented. Buy a developer a coffee.
- Need some option to get either all or changed
- Need some option to either get dict or set string or psimod command list
- command return doesn't revoke has_changed setting for unchanged with changedOnly=False
"""
modules = [
# PSI4 Modules
"ADC", "CCENERGY", "CCEOM", "CCDENSITY", "CCLAMBDA", "CCHBAR",
"CCRESPONSE", "CCSORT", "CCTRIPLES", "CLAG", "CPHF", "CIS",
"DCFT", "DETCI", "DFMP2", "DFTSAPT", "FINDIF", "FNOCC", "LMP2",
"MCSCF", "MINTS", "MRCC", "OCC", "OPTKING", "PSIMRCC", "RESPONSE",
"SAPT", "SCF", "STABILITY", "THERMO", "TRANSQT", "TRANSQT2",
# External Modules
"CFOUR",
]
options = {'GLOBALS': {}}
commands = ''
for opt in core.get_global_option_list():
if core.has_global_option_changed(opt) or not changedOnly:
if opt in ['DFT_CUSTOM_FUNCTIONAL', 'EXTERN']: # Feb 2017 hack
continue
val = core.get_global_option(opt)
options['GLOBALS'][opt] = {'value': val,
'has_changed': core.has_global_option_changed(opt)}
if isinstance(val, basestring):
commands += """core.set_global_option('%s', '%s')\n""" % (opt, val)
else:
commands += """core.set_global_option('%s', %s)\n""" % (opt, val)
#if changedOnly:
# print('Appending module %s option %s value %s has_changed %s.' % \
# ('GLOBALS', opt, core.get_global_option(opt), core.has_global_option_changed(opt)))
for module in modules:
try:
if core.has_option_changed(module, opt) or not changedOnly:
if not module in options:
options[module] = {}
val = core.get_option(module, opt)
options[module][opt] = {'value': val,
'has_changed': core.has_option_changed(module, opt)}
if isinstance(val, basestring):
commands += """core.set_local_option('%s', '%s', '%s')\n""" % (module, opt, val)
else:
commands += """core.set_local_option('%s', '%s', %s)\n""" % (module, opt, val)
#if changedOnly:
# print('Appending module %s option %s value %s has_changed %s.' % \
# (module, opt, core.get_option(module, opt), core.has_option_changed(module, opt)))
except RuntimeError:
pass
if commandsInsteadDict:
return commands
else:
return options
def mat2arr(mat):
"""Function to convert core.Matrix *mat* to Python array of arrays.
Expects core.Matrix to be flat with respect to symmetry.
"""
if mat.rowdim().n() != 1:
raise ValidationError('Cannot convert Matrix with symmetry.')
arr = []
for row in range(mat.rowdim()[0]):
temp = []
for col in range(mat.coldim()[0]):
temp.append(mat.get(row, col))
arr.append(temp)
return arr
def format_currentstate_for_input(func, name, allButMol=False, **kwargs):
"""Function to return an input file in preprocessed psithon.
Captures memory, molecule, options, function, method, and kwargs.
Used to write distributed (sow/reap) input files.
"""
commands = """\n# This is a psi4 input file auto-generated from the %s() wrapper.\n\n""" % (inspect.stack()[1][3])
commands += """memory %d mb\n\n""" % (int(0.000001 * core.get_memory()))
if not allButMol:
molecule = core.get_active_molecule()
molecule.update_geometry()
commands += format_molecule_for_input(molecule)
commands += '\n'
commands += prepare_options_for_modules(changedOnly=True, commandsInsteadDict=True)
commands += """\n%s('%s', """ % (func.__name__, name.lower())
for key in kwargs.keys():
commands += """%s=%r, """ % (key, kwargs[key])
commands += ')\n\n'
return commands
def expand_psivars(pvdefs):
"""Dictionary *pvdefs* has keys with names of PsiVariables to be
created and values with dictionary of two keys: 'args', the
PsiVariables that contribute to the key and 'func', a function (or
lambda) to combine them. This function builds those PsiVariables if
all the contributors are available. Helpful printing is available when
PRINT > 2.
"""
verbose = core.get_global_option('PRINT')
for pvar, action in pvdefs.items():
if verbose >= 2:
print("""building %s %s""" % (pvar, '.' * (50 - len(pvar))), end='')
psivars = core.get_variables()
data_rich_args = []
for pv in action['args']:
if isinstance(pv, basestring):
if pv in psivars:
data_rich_args.append(psivars[pv])
else:
if verbose >= 2:
print("""EMPTY, missing {}""".format(pv))
break
else:
data_rich_args.append(pv)
else:
result = action['func'](data_rich_args)
core.set_variable(pvar, result)
if verbose >= 2:
print("""SUCCESS""")
|
rmcgibbo/psi4public
|
psi4/driver/p4util/procutil.py
|
Python
|
lgpl-3.0
| 16,764
|
import pytest
import time
from rancher import ApiError
from .common import wait_for_template_to_be_created, \
wait_for_template_to_be_deleted, random_str, wait_for_atleast_workload
from .conftest import set_server_version, wait_for, DEFAULT_CATALOG
def test_catalog(admin_mc, remove_resource):
client = admin_mc.client
name1 = random_str()
name2 = random_str()
url1 = "https://github.com/StrongMonkey/charts-1.git"
url2 = "HTTP://github.com/StrongMonkey/charts-1.git"
catalog1 = client.create_catalog(name=name1,
branch="test",
url=url1,
)
remove_resource(catalog1)
catalog2 = client.create_catalog(name=name2,
branch="test",
url=url2,
)
remove_resource(catalog2)
wait_for_template_to_be_created(client, name1)
wait_for_template_to_be_created(client, name2)
client.delete(catalog1)
client.delete(catalog2)
wait_for_template_to_be_deleted(client, name1)
wait_for_template_to_be_deleted(client, name2)
def test_invalid_catalog_chars(admin_mc, remove_resource):
client = admin_mc.client
name = random_str()
url = "https://github.com/%0dStrongMonkey%0A/charts-1.git"
with pytest.raises(ApiError) as e:
catalog = client.create_catalog(name=name,
branch="test",
url=url,
)
remove_resource(catalog)
assert e.value.error.status == 422
assert e.value.error.message == "Invalid characters in catalog URL"
url = "https://github.com/StrongMonkey\t/charts-1.git"
with pytest.raises(ApiError) as e:
catalog = client.create_catalog(name=name,
branch="test",
url=url,
)
remove_resource(catalog)
assert e.value.error.status == 422
assert e.value.error.message == "Invalid characters in catalog URL"
def test_global_catalog_template_access(admin_mc, user_factory,
remove_resource):
client = admin_mc.client
user1 = user_factory()
remove_resource(user1)
name = random_str()
# Get all templates from library catalog that is enabled by default
updated = False
start = time.time()
interval = 0.5
while not updated:
time.sleep(interval)
interval *= 2
c = client.list_catalog(name="library").data[0]
if c.transitioning == "no":
updated = True
continue
if time.time() - start > 90:
raise AssertionError(
"Timed out waiting for catalog to stop transitioning")
existing = client.list_template(catalogId="library").data
templates = []
for t in existing:
templates.append("library-" + t.name)
url = "https://github.com/mrajashree/charts.git"
catalog = client.create_catalog(name=name,
branch="onlyOne",
url=url,
)
wait_for_template_to_be_created(client, name)
updated = False
start = time.time()
interval = 0.5
while not updated:
time.sleep(interval)
interval *= 2
c = client.list_catalog(name=name).data[0]
if c.transitioning == "no":
updated = True
continue
if time.time() - start > 90:
raise AssertionError(
"Timed out waiting for catalog to stop transitioning")
# Now list all templates of this catalog
new_templates = client.list_template(catalogId=name).data
for t in new_templates:
templates.append(name + "-" + t.name)
all_templates = existing + new_templates
# User should be able to list all these templates
user_client = user1.client
user_lib_templates = user_client.list_template(catalogId="library").data
user_new_templates = user_client.list_template(catalogId=name).data
user_templates = user_lib_templates + user_new_templates
assert len(user_templates) == len(all_templates)
client.delete(catalog)
wait_for_template_to_be_deleted(client, name)
def test_user_can_list_global_catalog(user_factory, remove_resource):
user1 = user_factory()
remove_resource(user1)
user_client = user1.client
c = user_client.list_catalog(name="library")
assert len(c) == 1
@pytest.mark.nonparallel
def test_template_version_links(admin_mc, admin_pc, custom_catalog,
remove_resource, restore_rancher_version):
"""Test that template versionLinks are being updated based off the rancher
version set on the server and the query paramater 'rancherVersion' being
set.
"""
# 1.6.0 uses 2.0.0-2.2.0
# 1.6.2 uses 2.1.0-2.3.0
client = admin_mc.client
c_name = random_str()
custom_catalog(name=c_name)
# Set the server expecting both versions
set_server_version(client, "2.1.0")
templates = client.list_template(
rancherVersion='2.1.0', catalogId=c_name)
assert len(templates.data[0]['versionLinks']) == 2
assert '1.6.0' in templates.data[0]['versionLinks']
assert '1.6.2' in templates.data[0]['versionLinks']
# Set the server expecting only the older version
set_server_version(client, "2.0.0")
templates = client.list_template(
rancherVersion='2.0.0', catalogId=c_name)
assert len(templates.data[0]['versionLinks']) == 1
assert '1.6.0' in templates.data[0]['versionLinks']
# Set the server expecting only the newer version
set_server_version(client, "2.3.0")
templates = client.list_template(
rancherVersion='2.3.0', catalogId=c_name)
assert len(templates.data[0]['versionLinks']) == 1
assert '1.6.2' in templates.data[0]['versionLinks']
# Set the server expecting no versions, this should be outside both
# versions acceptable ranges
set_server_version(client, "2.4.0")
templates = client.list_template(
rancherVersion='2.4.0', catalogId=c_name)
assert len(templates.data[0]['versionLinks']) == 0
def test_relative_paths(admin_mc, admin_pc, remove_resource):
""" This test adds a catalog's index.yaml with a relative chart url
and ensures that rancher can resolve the relative url"""
client = admin_mc.client
catalogname = "cat-" + random_str()
url = "https://raw.githubusercontent.com/rancher/integration-test-charts"\
"/relative-path"
catalog = client.create_catalog(catalogName=catalogname, branch="master",
url=url)
remove_resource(catalog)
catalog = client.reload(catalog)
assert catalog['url'] == url
# now deploy the app in the catalog to ensure we can resolve the tarball
ns = admin_pc.cluster.client.create_namespace(
catalogName="ns-" + random_str(),
projectId=admin_pc.project.id)
remove_resource(ns)
wait_for_template_to_be_created(client, catalog.id)
mysqlha = admin_pc.client.create_app(name="app-" + random_str(),
externalId="catalog://?catalog=" +
catalog.id +
"&template=mysql"
"&version=1.6.2",
targetNamespace=ns.name,
projectId=admin_pc.project.id)
remove_resource(mysqlha)
wait_for_atleast_workload(pclient=admin_pc.client, nsid=ns.id, timeout=60,
count=1)
def test_cannot_delete_system_catalog(admin_mc):
"""This test asserts that the system catalog cannot be delete"""
client = admin_mc.client
system_catalog = client.by_id_catalog("system-library")
with pytest.raises(ApiError) as e:
client.delete(system_catalog)
assert e.value.error.status == 422
assert e.value.error.message == 'not allowed to delete system-library' \
' catalog'
def test_system_catalog_missing_remove_link(admin_mc):
"""This test asserts that the remove link is missing from system-catalog's
links"""
client = admin_mc.client
system_catalog = client.by_id_catalog("system-library")
assert "remove" not in system_catalog.links
def test_cannot_update_system_if_embedded(admin_mc):
"""This test asserts that the system catalog cannot be updated if
system-catalog setting is set to 'bundled'"""
client = admin_mc.client
system_catalog_setting = client.by_id_setting("system-catalog")
# this could potentially interfere with other tests if they were to rely
# on system-catalog setting
client.update_by_id_setting(id=system_catalog_setting.id, value="bundled")
system_catalog = client.by_id_catalog("system-library")
with pytest.raises(ApiError) as e:
client.update_by_id_catalog(id=system_catalog.id, branch="asd")
assert e.value.error.status == 422
assert e.value.error.message == 'not allowed to edit system-library' \
' catalog'
def test_embedded_system_catalog_missing_edit_link(admin_mc):
"""This test asserts that the system catalog is missing the 'update' link
if system-catalog setting is set to 'bundled'"""
client = admin_mc.client
system_catalog_setting = client.by_id_setting("system-catalog")
# this could potentially interfere with other tests if they were to rely
# on system-catalog setting
client.update_by_id_setting(id=system_catalog_setting.id, value="bundled")
system_catalog = client.by_id_catalog("system-library")
assert "update" not in system_catalog.links
@pytest.mark.nonparallel
def test_catalog_refresh(admin_mc):
"""Test that on refresh the response includes the names of the catalogs
that are being refreshed"""
client = admin_mc.client
catalog = client.by_id_catalog("library")
out = client.action(obj=catalog, action_name="refresh")
assert out['catalogs'][0] == "library"
catalogs = client.list_catalog()
out = client.action(obj=catalogs, action_name="refresh")
# It just needs to be more than none, other test can add/remove catalogs
# so a hard count will break
assert len(out['catalogs']) > 0, 'no catalogs in response'
def test_invalid_catalog_chart_names(admin_mc, remove_resource):
"""Test chart with invalid name in catalog error properly
and test that a chart names are truncated and processed without
error"""
client = admin_mc.client
name = random_str()
catalog = client.create_catalog(name=name,
branch="broke-charts",
url=DEFAULT_CATALOG,
)
remove_resource(catalog)
wait_for_template_to_be_created(client, catalog.id)
def get_errored_catalog(catalog):
catalog = client.reload(catalog)
if catalog.transitioning == "error":
return catalog
return None
catalog = wait_for(lambda: get_errored_catalog(catalog),
fail_handler=lambda:
"catalog was not found in error state")
templates = client.list_template(catalogId=catalog.id).data
templatesString = ','.join([str(i) for i in templates])
assert "areallylongname" not in templatesString
assert "bad-chart_name" not in templatesString
assert catalog.state == "processed"
assert catalog.transitioning == "error"
assert "Error in chart(s):" in catalog.transitioningMessage
assert "bad-chart_name" in catalog.transitioningMessage
assert "areallylongname" in catalog.transitioningMessage
# this will break if github repo changes
assert len(templates) == 6
# checking that the errored catalog can be deleted successfully
client.delete(catalog)
wait_for_template_to_be_deleted(client, name)
assert not client.list_catalog(name=name).data
def test_invalid_catalog_chart_urls(admin_mc, remove_resource):
"""Test chart with file:// and local:// url paths"""
client = admin_mc.client
name = random_str()
catalog = client.create_catalog(name=name,
branch="invalid-urls",
url=DEFAULT_CATALOG,
)
remove_resource(catalog)
wait_for_template_to_be_created(client, catalog.id)
def get_errored_catalog(catalog):
catalog = client.reload(catalog)
if catalog.transitioning == "error":
return catalog
return None
catalog = wait_for(lambda: get_errored_catalog(catalog),
fail_handler=lambda:
"catalog was not found in error state")
templates = client.list_template(catalogId=catalog.id).data
templatesString = ','.join([str(i) for i in templates])
# url in index.yaml:
# local://azure-samples.github.io/helm-charts/aks-helloworld-0.1.0.tgz
assert "aks-goodbyeworld" not in templatesString
# url in index.yaml:
# file://azure-samples.github.io/helm-charts/aks-helloworld-0.1.0.tgz
assert "aks-helloworld" not in templatesString
assert catalog.state == "processed"
assert catalog.transitioning == "error"
assert "Error in chart(s):" in catalog.transitioningMessage
assert "aks-goodbyeworld" in catalog.transitioningMessage
assert "aks-helloworld" in catalog.transitioningMessage
# this will break if github repo changes
# valid url in index.yaml:
# https://azure-samples.github.io/helm-charts/azure-vote-0.1.0.tgz
assert len(templates) == 1
# checking that the errored catalog can be deleted successfully
client.delete(catalog)
wait_for_template_to_be_deleted(client, name)
assert not client.list_catalog(name=name).data
def test_catalog_has_helmversion(admin_mc, remove_resource):
"""Test to see that the helm version can be added to a catalog
on create and that the value is passed to the template"""
client = admin_mc.client
name1 = random_str()
name2 = random_str()
catalog1 = client.create_catalog(name=name1,
branch="master",
url=DEFAULT_CATALOG,
)
remove_resource(catalog1)
catalog2 = client.create_catalog(name=name2,
branch="master",
url=DEFAULT_CATALOG,
helmVersion="helm_v3"
)
remove_resource(catalog2)
wait_for_template_to_be_created(client, name1)
wait_for_template_to_be_created(client, name2)
assert "helm_v3" not in catalog1
assert catalog2.helmVersion == "helm_v3"
templates1 = client.list_template(catalogId=catalog1.id).data
for template in templates1:
assert "helmVersion" not in template.status
templates2 = client.list_template(catalogId=catalog2.id).data
for template in templates2:
assert "helmVersion" in template.status
assert template.status.helmVersion == "helm_v3"
def test_refresh_catalog_access(admin_mc, user_mc):
"""Tests that a user with standard access is not
able to refresh a catalog.
"""
catalog = admin_mc.client.by_id_catalog("library")
out = admin_mc.client.action(obj=catalog, action_name="refresh")
assert out['catalogs'][0] == "library"
# use catalog obj from admin client to get action not available to user
with pytest.raises(ApiError) as e:
user_mc.client.action(obj=catalog, action_name="refresh")
assert e.value.error.status == 404
|
cjellick/rancher
|
tests/integration/suite/test_catalog.py
|
Python
|
apache-2.0
| 16,016
|
# Natural Language Toolkit: Aligned Sentences
#
# Copyright (C) 2001-2010 NLTK Project
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
import sys
import logging
import nltk.metrics
from nltk.util import defaultdict
class AlignedSent(object):
"""
Aligned sentence object. Encapsulates two sentences along with
an C{Alignment} between them.
"""
def __init__(self, words = [], mots = [], alignment = '', \
encoding = 'latin-1'):
"""
Initialize a new C{AlignedSent}.
@param words: source language words
@type words: C{list} of C{str}
@param mots: target language words
@type mots: C{list} of C{str}
@param alignment: the word-level alignments between the source
and target language
@type alignment: C{Alignment}
"""
if not isinstance(alignment, Alignment):
alignment = Alignment(alignment)
self._words = words
self._mots = mots
self._check_align(alignment)
self._alignment = alignment
@property
def words(self):
return self._words
@property
def mots(self):
return self._mots
@property
def alignment(self):
return self._alignment
@alignment.setter
def alignment(self, alignment):
if not isinstance(alignment, Alignment):
alignment = Alignment(alignment)
self._check_align(alignment)
self._alignment = alignment
def _check_align(self, a):
"""
@param a: alignment to be checked
@raise IndexError: if alignment is out of sentence boundary
@return: True if passed alignment check
@rtype: boolean
"""
if not all([0 <= p[0] < len(self._words) for p in a]):
raise IndexError("Alignment is outside boundary of words")
if not all([0 <= p[1] < len(self._mots) for p in a]):
raise IndexError("Alignment is outside boundary of mots")
return True
def __repr__(self):
"""
@return: A string representation for this C{AlignedSent}.
@rtype: C{string}
"""
return "AlignedSent(%r, %r, %r)" % (self._words, self._mots, self._alignment)
def __str__(self):
"""
@return: A string representation for this C{AlignedSent}.
@rtype: C{string}
"""
source = " ".join(self._words)[:20] + "..."
target = " ".join(self._mots)[:20] + "..."
return "<AlignedSent: '%s' -> '%s'>" % (source, target)
def invert(self):
"""
@return: the invert object
@rtype: AlignedSent
"""
return AlignedSent(self._mots, self._words,
self._alignment.invert())
def precision(self, reference):
"""Calculates the precision of an aligned sentence with respect to a
"gold standard" reference C{AlignedSent}.
The "possible" precision is used since it doesn't penalise for finding
an alignment that was marked as "possible".
@type reference: C{AlignedSent} or C{Alignment}
@param reference: A "gold standard" reference aligned sentence.
@rtype: C{float} or C{None}
"""
# Get alignments in set of 2-tuples form
align = self.alignment
if isinstance(reference, AlignedSent):
possible = reference.alignment
else:
possible = Alignment(reference)
# Call NLTKs existing functions for precision
return nltk.metrics.scores.precision(possible, align)
def recall(self, reference):
"""Calculates the recall of an aligned sentence with respect to a
"gold standard" reference C{AlignedSent}.
The "sure" recall is used so we don't penalise for missing an
alignment that was only marked as "possible".
@type reference: C{AlignedSent} or C{Alignment}
@param reference: A "gold standard" reference aligned sentence.
@rtype: C{float} or C{None}
"""
# Get alignments in set of 2-tuples form
align = self.alignment
if isinstance(reference, AlignedSent):
sure = reference.alignment
else:
sure = Alignment(reference)
# Call NLTKs existing functions for recall
return nltk.metrics.scores.recall(sure, align)
def alignment_error_rate(self, reference, possible=None):
"""Calculates the Alignment Error Rate (AER) of an aligned sentence
with respect to a "gold standard" reference C{AlignedSent}.
Return an error rate between 0.0 (perfect alignment) and 1.0 (no
alignment).
@type reference: C{AlignedSent} or C{Alignment}
@param reference: A "gold standard" reference aligned sentence.
@type possible: C{AlignedSent} or C{Alignment} or C{None}
@param possible: A "gold standard" reference of possible alignments
(defaults to I{reference} if C{None})
@rtype: C{float} or C{None}
"""
# Get alignments in set of 2-tuples form
align = self.alignment
if isinstance(reference, AlignedSent):
sure = reference.alignment
else:
sure = Alignment(reference)
if possible is not None:
# Set possible alignment
if isinstance(possible, AlignedSent):
possible = possible.alignment
else:
possible = Alignment(possible)
else:
# Possible alignment is just sure alignment
possible = sure
# Sanity check
assert(sure.issubset(possible))
# Return the Alignment Error Rate
return (1.0 - float(len(align & sure) + len(align & possible)) /
float(len(align) + len(sure)))
class Alignment(frozenset):
"""
A storage class for representing alignment between two sequences, s1, s2.
In general, an alignment is a set of tuples of the form (i, j, ...)
representing an alignment between the i-th element of s1 and the
j-th element of s2. Tuples are extensible (they might contain
additional data, such as a boolean to indicate sure vs possible alignments).
"""
def __new__(cls, string_or_pairs):
if isinstance(string_or_pairs, basestring):
string_or_pairs = [_giza2pair(p) for p in string_or_pairs.split()]
self = frozenset.__new__(cls, string_or_pairs)
if self == frozenset([]):
self._len = 0
else:
self._len = max(p[0] for p in self)
self._index = None
return self
def __getitem__(self, key):
"""
Look up the alignments that map from a given index or slice.
"""
if not self._index:
self._build_index()
return self._index.__getitem__(key)
def invert(self):
"""
Return an Alignment object, being the inverted mapping.
"""
return Alignment(((p[1], p[0]) + p[2:]) for p in self)
def range(self, positions=None):
"""
Work out the range of the mapping from the given positions.
If no positions are specified, compute the range of the entire mapping.
"""
image = set()
if not self._index:
self._build_index()
if not positions:
positions = range(len(self._index))
for p in positions:
image.update(f for _,f in self._index[p])
return sorted(image)
def __repr__(self):
"""
Produce a Giza-formatted string representing the alignment.
"""
return "Alignment(%r)" % sorted(self)
def __str__(self):
"""
Produce a Giza-formatted string representing the alignment.
"""
return " ".join("%d-%d" % p[:2] for p in sorted(self))
def _build_index(self):
"""
Build a list self._index such that self._index[i] is a list
of the alignments originating from word i.
"""
self._index = [[] for _ in range(self._len + 1)]
for p in self:
self._index[p[0]].append(p)
class EMIBMModel1(object):
'''
This class contains implementations of the Expectation Maximization
algorithm for IBM Model 1. The algorithm runs upon a sentence-aligned
parallel corpus and generates word alignments in aligned sentence pairs.
The process is divided into 2 main stages.
Stage 1: Studies word-to-word translation probabilities by collecting
evidence of a English word been the translation of a foreign word from
the parallel corpus.
Stage 2: Based on the translation probabilities from Stage 1, generates
word alignments for aligned sentence pairs.
'''
def __init__(self, aligned_sents, convergent_threshold=1e-2, debug=False):
'''
Initialize a new C{EMIBMModel1}.
@param aligned_sents: The parallel text corpus.Iteratable containing
AlignedSent instances of aligned sentence pairs from the corpus.
@type aligned_sents: C{list} of L{AlignedSent} objects
@param convergent_threshold: The threshold value of convergence. An
entry is considered converged if the delta from old_t to new_t
is less than this value. The algorithm terminates when all entries
are converged. This parameter is optional, default is 0.01
@type convergent_threshold: C{float}
'''
self.aligned_sents = aligned_sents
self.convergent_threshold = convergent_threshold
# Dictionary of translation probabilities t(e,f).
self.probabilities = None
def train(self):
'''
The train() function implements Expectation Maximization training
stage that learns word-to-word translation probabilities.
@return: Number of iterations taken to converge
'''
# Collect up sets of all English and foreign words
english_words = set()
foreign_words = set()
for aligned_sent in self.aligned_sents:
english_words.update(aligned_sent.words)
foreign_words.update(aligned_sent.mots)
# add the NULL token to the foreign word set.
foreign_words.add(None)
num_probs = len(english_words)*len(foreign_words)
# Initialise t(e|f) uniformly
t = defaultdict(lambda: float(1)/len(english_words))
s_total = defaultdict(float)
for e in english_words:
for f in foreign_words:
z = t[e,f]
globally_converged = False
iteration_count = 0
while not globally_converged:
# count(e|f)
count = defaultdict(float)
# total(f)
total = defaultdict(float)
for aligned_sent in self.aligned_sents:
# Compute normalization
for e_w in aligned_sent.words:
s_total[e_w] = 0.0
for f_w in aligned_sent.mots+[None]:
s_total[e_w] += t[e_w, f_w]
# Collect counts
for e_w in aligned_sent.words:
for f_w in aligned_sent.mots+[None]:
cnt = t[e_w, f_w] / s_total[e_w]
count[e_w, f_w] += cnt
total[f_w] += cnt
# Estimate probabilities
num_converged = 0
for f_w in foreign_words:
for e_w in english_words:
new_prob = count[e_w, f_w] / total[f_w]
delta = abs(t[e_w, f_w] - new_prob)
if delta < self.convergent_threshold:
num_converged += 1
t[e_w, f_w] = new_prob
# Have we converged
iteration_count += 1
if num_converged == num_probs:
globally_converged = True
logging.debug("%d/%d (%.2f%%) converged"%(
num_converged, num_probs, 100.0*num_converged/num_probs))
self.probabilities = dict(t)
return iteration_count
def aligned(self):
'''
Returns a list of AlignedSents with Alignments calculated using
IBM-Model 1.
'''
if self.probablities is None:
raise ValueError("No probabilities calculated")
aligned = []
# Alignment Learning from t(e|f)
for aligned_sent in self.aligned_sents:
alignment = []
# for every English word
for j, e_w in enumerate(aligned_sent.words):
# find the French word that gives maximized t(e|f)
# NULL token is the initial candidate
f_max = (self.probabilities[e_w, None], None)
for i, f_w in enumerate(aligned_sent.mots):
f_max = max(f_max, (self.probabilities[e_w, f_w], i))
# only output alignment with non-NULL mapping
if f_max[1] is not None:
alignment.append((j, f_max[1]))
# substitute the alignment of AlignedSent with the yielded one
aligned.append(AlignedSent(aligned_sent.words,
aligned_sent.mots, alignment))
return aligned
def _giza2pair(pair_string):
i, j = pair_string.split("-")
return int(i), int(j)
def _naacl2pair(pair_string):
i, j, p = pair_string.split("-")
return int(i), int(j)
|
markgw/jazzparser
|
lib/nltk/align.py
|
Python
|
gpl-3.0
| 13,500
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-02-02 16:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('statisticscore', '0024_session_session_gender_enabled'),
]
operations = [
migrations.AlterField(
model_name='session',
name='session_picture',
field=models.ImageField(upload_to=b'session_pictures/'),
),
]
|
eyp-developers/statistics
|
statisticscore/migrations/0025_auto_20160202_1742.py
|
Python
|
gpl-3.0
| 456
|
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2014 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2015-2016 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
"""Unittest for the logging checker."""
import unittest
from astroid import test_utils
from pylint.checkers import logging
from pylint.testutils import CheckerTestCase, Message, set_config
class LoggingModuleDetectionTest(CheckerTestCase):
CHECKER_CLASS = logging.LoggingChecker
def test_detects_standard_logging_module(self):
stmts = test_utils.extract_node("""
import logging #@
logging.warn('%s' % '%s') #@
""")
self.checker.visit_module(None)
self.checker.visit_import(stmts[0])
with self.assertAddsMessages(Message('logging-not-lazy', node=stmts[1])):
self.checker.visit_call(stmts[1])
def test_detects_renamed_standard_logging_module(self):
stmts = test_utils.extract_node("""
import logging as blogging #@
blogging.warn('%s' % '%s') #@
""")
self.checker.visit_module(None)
self.checker.visit_import(stmts[0])
with self.assertAddsMessages(Message('logging-not-lazy', node=stmts[1])):
self.checker.visit_call(stmts[1])
@set_config(logging_modules=['logging', 'my.logging'])
def test_nonstandard_logging_module(self):
stmts = test_utils.extract_node("""
from my import logging as blogging #@
blogging.warn('%s' % '%s') #@
""")
self.checker.visit_module(None)
self.checker.visit_import(stmts[0])
with self.assertAddsMessages(Message('logging-not-lazy', node=stmts[1])):
self.checker.visit_call(stmts[1])
if __name__ == '__main__':
unittest.main()
|
axbaretto/beam
|
sdks/python/.tox/lint/lib/python2.7/site-packages/pylint/test/unittest_checker_logging.py
|
Python
|
apache-2.0
| 2,000
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: ‘wujn‘
@file: orm.py
@time: 2017/9/28 11:23
"""
import db
import time
import logging
_triggers = frozenset(['pre_insert', 'pre_update', 'pre_delete'])
def _gen_sql(table_name, mappings):
"""
类 ==> 表时 生成创建表的sql
"""
p_key = None
sql = ['create table `%s` (' % table_name]
for f in sorted(mappings.values(), lambda x, y: cmp(x._order, y._order)):
if not hasattr(f, 'ddl'):
raise StandardError('no ddl in field "%s".' % f)
ddl = f.ddl
nullable = f.nullable
if f.primary_key:
p_key = f.name
sql.append(' `%s` %s,' % (f.name, ddl) if nullable else ' `%s` %s not null,' % (f.name, ddl))
sql.append(' primary key(`%s`)' % p_key)
sql.append(');')
return '\n'.join(sql)
class Field(object):
"""
保存数据库表的字段名和字段类型
_count:Field每实例化一次就加一
self._order:表示是该类的第几个实例
这样,在定义字段时,每个字段(field实例)都有order属性
最后生成__sql时(见_gen_sql 函数),这些字段就是按序排列
self._default: 用于让orm自己填入缺省值,缺省值可以是 可调用对象,比如函数
其他实例的属性都用来描述字段属性,例如名字、是否主键、能否为空等
"""
_count = 0
def __init__(self, **kw):
self.name = kw.get('name', None)
self._default = kw.get('default', None)
self.primary_key = kw.get('primary_key', False)
self.nullable = kw.get('nullable', False)
self.updatable = kw.get('updatable', True)
self.insertable = kw.get('insertable', True)
self.ddl = kw.get('ddl', '')
self._order = Field._count
Field._count += 1
def __str__(self):
"""
:return:实例对象的描述信息
如:
<IntegerField:id,bigint,default(0),UI>
分别对应 <类:实例:实例ddl属性:实例default信息,3种标志位:N U I>
"""
s = ['<%s:%s,%s,default(%s),' % (self.__class__, self.name, self.ddl, self._default)]
self.nullable and s.append('N')
self.updatable and s.append('U')
self.insertable and s.append('I')
s.append('>')
return ''.join(s)
@property
def default(self):
"""
使用property装饰器快速访问缺省的对象
:return: 如果可以执行就返回default(),否则返回default的值
"""
d = self._default
return d() if callable(d) else d
class StringField(Field):
"""
保存string类型字段的类
"""
def __init__(self, **kw):
if 'default' not in kw:
kw['default'] = ''
if 'ddl' not in kw:
kw['ddl'] = 'varchar(255)'
super(StringField, self).__init__(**kw)
class IntegerField(Field):
"""
保存int类型字段的值
"""
def __init__(self, **kw):
if 'default' not in kw:
kw['default'] = 0
if 'ddl' not in kw:
kw['ddl'] = 'bigint'
super(IntegerField, self).__init__(**kw)
class FloatField(Field):
"""
保存float类型字段的值
"""
def __init__(self, **kw):
if 'default' not in kw:
kw['default'] = 0.0
if 'ddl' not in kw:
kw['ddl'] = 'real'
super(FloatField, self).__init__(**kw)
class BoolenField(Field):
"""
保存布尔类型字段的值
"""
def __init__(self, **kw):
if 'default' not in kw:
kw['default'] = False
if 'ddl' not in kw:
kw['ddl'] = 'bool'
super(BoolenField, self).__init__(**kw)
class TextField(Field):
"""
保存Text类型字段的属性
"""
def __init__(self, **kw):
if 'default' not in kw:
kw['default'] = ''
if 'ddl' not in kw:
kw['ddl'] = 'text'
super(TextField, self).__init__(**kw)
class BlobField(Field):
"""
保存Blob类型字段的属性
"""
def __init__(self, **kw):
if 'default' not in kw:
kw['default'] = ''
if 'ddl' not in kw:
kw['ddl'] = 'blob'
super(BlobField, self).__init__(**kw)
class VersionField(Field):
"""
保存Version类型字段的属性
"""
def __init__(self, name=None):
super(VersionField, self).__init__(name=name, default=0, ddl='bigint')
class ModelMetaClass(type):
"""
是一个元类,主要有以下作用:
1、防止对Module类的修改
2、属性与字段的mapping
迭代类的属性字典,判断是不是Field类,添加name,进行标志位的检查,提取类属性和字段类的mapping
提取完成后删除这些类属性,避免冲突
添加__mappings__属性
3、类与表的mapping
添加__table__属性,即表名(为类名的小写)
"""
def __new__(cls, name, bases, attrs):
if name == 'Module':
return type.__new__(cls, name, bases, attrs)
# store all subclasses info:
if not hasattr(cls, 'subclasses'):
cls.subclasses = {}
if not name in cls.subclasses:
cls.subclasses[name] = name
else:
logging.warning('Redefine class: %s' % name)
logging.info('Scan ORMapping %s...' % name)
mappings = dict()
primary_key = None
for k, v in attrs.iteritems():
if isinstance(v, Field):
if not v.name:
v.name = k
if v.primary_key:
if primary_key:
raise TypeError('Cannot define more than 1 primary key in class: %s' % name)
if v.updatable:
logging.warning('NOTE: change primary key to non-updatable.')
v.updatable = False
if v.nullable:
logging.warning('NOTE: change primary key to non-nullable.') # pk默认不能为空
v.nullable = False
primary_key = v
mappings[k] = v
if not primary_key:
raise TypeError('Primary key not defined in class: %s' % name)
for k in mappings.iterkeys():
attrs.pop(k)
if not '__table__' in attrs:
attrs.__table__ = name.lower()
attrs['__mappings__'] = mappings
attrs['__primary_key__'] = primary_key
attrs['__sql__'] = lambda self: _gen_sql(attrs['__table__'], mappings)
for trigger in _triggers:
if not trigger in attrs:
attrs[trigger] = None
return type.__new__(cls, name, bases, attrs)
class Module(dict):
"""
Base class for ORM.
ORM的基类
运用了大量的@classmethod,可以直接调用类的函数,而不需要在实例化之后调用
>>> class User(Model):
... id = IntegerField(primary_key=True)
... name = StringField()
... email = StringField(updatable=False)
... passwd = StringField(default=lambda: '******')
... last_modified = FloatField()
... def pre_insert(self):
... self.last_modified = time.time()
>>> u = User(id=10190, name='Michael', email='orm@db.org')
>>> r = u.insert()
>>> u.email
'orm@db.org'
>>> u.passwd
'******'
>>> u.last_modified > (time.time() - 2)
True
>>> f = User.get(10190)
>>> f.name
u'Michael'
>>> f.email
u'orm@db.org'
>>> f.email = 'changed@db.org'
>>> r = f.update() # change email but email is non-updatable!
>>> len(User.find_all())
1
>>> g = User.get(10190)
>>> g.email
u'orm@db.org'
>>> r = g.delete()
>>> len(db.select('select * from user where id=10190'))
0
>>> import json
>>> print User().__sql__()
-- generating SQL for user:
create table `user` (
`id` bigint not null,
`name` varchar(255) not null,
`email` varchar(255) not null,
`passwd` varchar(255) not null,
`last_modified` real not null,
primary key(`id`)
);
"""
__metaclass__ = ModelMetaClass
def __init__(self, **kw):
super(Module, self).__init__(**kw)
def __getattr__(self, key):
"""
get时生效,比如 a[key], a.get(key)
get时 返回属性的值
"""
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
"""
set时生效,比如 a[key] = value, a = {'key1': value1, 'key2': value2}
set时添加属性
"""
self[key] = value
@classmethod
def get(cls, pk):
"""
Get by primary key.
"""
d = db.select_one('select * from %s where %s=?' % (cls.__table__, cls.__primary_key__.name), pk)
return cls(**d) if d else None
@classmethod
def find_first(cls, where, *args):
"""
通过where语句进行条件查询,返回1个查询结果。如果有多个查询结果
仅取第一个,如果没有结果,则返回None
"""
d = db.select_one('select * from %s %s' % (cls.__table__, where), *args)
return cls(**d) if d else None
@classmethod
def find_all(cls, *args):
"""
查询所有字段, 将结果以一个列表返回
"""
L = db.select('select * from `%s`' % cls.__table__)
return [cls(**d) for d in L]
@classmethod
def find_by(cls, where, *args):
"""
通过where语句进行条件查询,将结果以一个列表返回
"""
L = db.select('select * from `%s` %s' % (cls.__table__, where), *args)
return [cls(**d) for d in L]
@classmethod
def count_all(cls):
"""
执行 select count(pk) from table语句,返回一个数值
"""
return db.select('select count(`%s`) from `%s`' % (cls.__primay_key__.name, cls.__table__))
@classmethod
def count_by(cls, where, *args):
"""
通过select count(pk) from table where ...语句进行查询, 返回一个数值
"""
return db.select_int('select count(`%s`) from `%s` %s' % (cls.__primary_key__.name, cls.__table__, where), *args)
def update(self):
"""
如果该行的字段属性有 updatable,代表该字段可以被更新
用于定义的表(继承Model的类)是一个 Dict对象,键值会变成实例的属性
所以可以通过属性来判断 用户是否定义了该字段的值
如果有属性, 就使用用户传入的值
如果无属性, 则调用字段对象的 default属性传入
具体见 Field类 的 default 属性
通过的db对象的update接口执行SQL
SQL: update `user` set `passwd`=%s,`last_modified`=%s,`name`=%s where id=%s,
ARGS: (u'******', 1441878476.202391, u'Michael', 10190
"""
self.pre_update and self.pre_update()
L = []
args = []
for k, v in self.__mappings__.iteritems():
if v.updatable:
if hasattr(self, k):
arg = getattr(self, k)
else:
arg = v.default
setattr(self, k, arg)
L.append('`%s`=?' % k)
args.append(arg)
pk = self.__primary_key__.name
args.append(getattr(self, pk))
db.update('update `%s` set %s where %s=?' % (self.__table__, ','.join(L), pk), *args)
return self
def delete(self):
"""
通过db对象的 update接口 执行SQL
SQL: delete from `user` where `id`=%s, ARGS: (10190,)
"""
self.pre_delete and self.pre_delete()
pk = self.__primary_key__.name
args = (getattr(self, pk),)
db.update('delete from `%s` where `%s`=?' % (self.__table__, pk), *args)
return self
def insert(self):
"""
通过db对象的insert接口执行SQL
SQL: insert into `user` (`passwd`,`last_modified`,`id`,`name`,`email`) values (%s,%s,%s,%s,%s),
ARGS: ('******', 1441878476.202391, 10190, 'Michael', 'orm@db.org')
"""
self.pre_insert and self.pre_insert()
params = {}
for k, v in self.__mappings__.iteritems():
if v.insertable:
if not hasattr(self, k):
setattr(self, k, v.default)
params[v.name] = getattr(self, k)
db.insert('%s' % self.__table__, **params)
return self
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
db.create_engine('root', 'SERVYOU', 'test')
db.update('drop table if exists user')
db.update('create table user (id int primary key, name text, email text, passwd text, last_modified real)')
import doctest
doctest.testmod()
|
burgerWhoo/first-webapp
|
www/transwrap/orm.py
|
Python
|
gpl-2.0
| 13,198
|
import json
from .oauth import OAuth2Test
class DigitalOceanOAuthTest(OAuth2Test):
backend_path = 'social_core.backends.digitalocean.DigitalOceanOAuth'
user_data_url = 'https://api.digitalocean.com/v2/account'
expected_username = 'sammy@digitalocean.com'
access_token_body = json.dumps({
'access_token': '547cac21118ae7',
'token_type': 'bearer',
'expires_in': 2592000,
'refresh_token': '00a3aae641658d',
'scope': 'read write',
'info': {
'name': 'Sammy Shark',
'email': 'sammy@digitalocean.com'
}
})
user_data_body = json.dumps({
"account": {
'droplet_limit': 25,
'email': 'sammy@digitalocean.com',
'uuid': 'b6fr89dbf6d9156cace5f3c78dc9851d957381ef',
'email_verified': True
}
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
|
tobias47n9e/social-core
|
social_core/tests/backends/test_digitalocean.py
|
Python
|
bsd-3-clause
| 977
|
# coding: utf-8
"""
ORCID Member
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SourceNameV30(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'value': 'str'
}
attribute_map = {
'value': 'value'
}
def __init__(self, value=None): # noqa: E501
"""SourceNameV30 - a model defined in Swagger""" # noqa: E501
self._value = None
self.discriminator = None
if value is not None:
self.value = value
@property
def value(self):
"""Gets the value of this SourceNameV30. # noqa: E501
:return: The value of this SourceNameV30. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this SourceNameV30.
:param value: The value of this SourceNameV30. # noqa: E501
:type: str
"""
self._value = value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(SourceNameV30, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SourceNameV30):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
Royal-Society-of-New-Zealand/NZ-ORCID-Hub
|
orcid_api_v3/models/source_name_v30.py
|
Python
|
mit
| 3,047
|
from .enkf_field_file_format_enum import EnkfFieldFileFormatEnum
from .load_fail_type_enum import LoadFailTypeEnum
from .enkf_var_type_enum import EnkfVarType
from .enkf_run_enum import EnkfRunType
from .enkf_obs_impl_type_enum import EnkfObservationImplementationType
from .ert_impl_type_enum import ErtImplType
from .enkf_init_modes_enum import EnkfInitModeEnum
from .realization_state_enum import RealizationStateEnum
from .enkf_truncation_type import EnkfTruncationType
from .enkf_fs_type_enum import EnKFFSType
from .gen_data_file_type_enum import GenDataFileType
from .active_mode_enum import ActiveMode
from .hook_runtime_enum import HookRuntime
__all__ = ["EnkfFieldFileFormatEnum",
"LoadFailTypeEnum",
"EnkfVarType",
"EnkfRunType",
"EnkfObservationImplementationType",
"ErtImplType",
"EnkfInitModeEnum",
"RealizationStateEnum",
"EnkfTruncationType",
"EnKFFSType",
"GenDataFileType",
"ActiveMode",
"HookRuntime"]
|
Statoil/libres
|
python/res/enkf/enums/__init__.py
|
Python
|
gpl-3.0
| 1,054
|
from __future__ import absolute_import, unicode_literals
from datetime import datetime
from itertools import count
from time import time
from celery import states
from celery.events import Event as _Event
from celery.events.state import State, Worker, Task
from celery.utils import gen_unique_id
from django.test.utils import override_settings
from django.utils import timezone
from djcelery import celery
from djcelery import snapshot
from djcelery import models
from djcelery.utils import make_aware
from djcelery.tests.utils import unittest
from djcelery.compat import unicode
_ids = count(0)
_clock = count(1)
def Event(*args, **kwargs):
kwargs.setdefault('clock', next(_clock))
kwargs.setdefault('local_received', time())
return _Event(*args, **kwargs)
def create_task(worker, **kwargs):
d = dict(uuid=gen_unique_id(),
name='djcelery.unittest.task{0}'.format(next(_ids)),
worker=worker)
return Task(**dict(d, **kwargs))
class test_Camera(unittest.TestCase):
Camera = snapshot.Camera
def setUp(self):
self.state = State()
self.cam = self.Camera(self.state)
def test_constructor(self):
cam = self.Camera(State())
self.assertTrue(cam.state)
self.assertTrue(cam.freq)
self.assertTrue(cam.cleanup_freq)
self.assertTrue(cam.logger)
def test_get_heartbeat(self):
worker = Worker(hostname='fuzzie')
self.assertIsNone(self.cam.get_heartbeat(worker))
t1 = time()
t2 = time()
t3 = time()
for t in t1, t2, t3:
worker.event('heartbeat', t, t, {})
self.state.workers[worker.hostname] = worker
self.assertEqual(self.cam.get_heartbeat(worker),
make_aware(datetime.fromtimestamp(t3)))
def test_handle_worker(self):
worker = Worker(hostname='fuzzie')
worker.event('online', time(), time(), {})
self.cam._last_worker_write.clear()
m = self.cam.handle_worker((worker.hostname, worker))
self.assertTrue(m)
self.assertTrue(m.hostname)
self.assertTrue(m.last_heartbeat)
self.assertTrue(m.is_alive())
self.assertEqual(unicode(m), unicode(m.hostname))
self.assertTrue(repr(m))
def test_handle_task_received(self):
worker = Worker(hostname='fuzzie')
worker.event('online', time(), time(), {})
self.cam.handle_worker((worker.hostname, worker))
task = create_task(worker)
task.event('received', time(), time(), {})
self.assertEqual(task.state, 'RECEIVED')
mt = self.cam.handle_task((task.uuid, task))
self.assertEqual(mt.name, task.name)
self.assertTrue(unicode(mt))
self.assertTrue(repr(mt))
mt.eta = celery.now()
self.assertIn('eta', unicode(mt))
self.assertIn(mt, models.TaskState.objects.active())
def test_handle_task(self):
worker1 = Worker(hostname='fuzzie')
worker1.event('online', time(), time(), {})
mw = self.cam.handle_worker((worker1.hostname, worker1))
task1 = create_task(worker1)
task1.event('received', time(), time(), {})
mt = self.cam.handle_task((task1.uuid, task1))
self.assertEqual(mt.worker, mw)
worker2 = Worker(hostname=None)
task2 = create_task(worker2)
task2.event('received', time(), time(), {})
mt = self.cam.handle_task((task2.uuid, task2))
self.assertIsNone(mt.worker)
task1.event('succeeded', time(), time(), {'result': 42})
self.assertEqual(task1.state, states.SUCCESS)
self.assertEqual(task1.result, 42)
mt = self.cam.handle_task((task1.uuid, task1))
self.assertEqual(mt.name, task1.name)
self.assertEqual(mt.result, 42)
task3 = create_task(worker1, name=None)
task3.event('revoked', time(), time(), {})
mt = self.cam.handle_task((task3.uuid, task3))
self.assertIsNone(mt)
def test_handle_task_timezone(self):
worker = Worker(hostname='fuzzie')
worker.event('online', time(), time(), {})
self.cam.handle_worker((worker.hostname, worker))
tstamp = 1464793200.0 # 2016-06-01T15:00:00Z
with override_settings(USE_TZ=True, TIME_ZONE='Europe/Helsinki'):
task = create_task(worker,
eta='2016-06-01T15:16:17.654321+00:00',
expires='2016-07-01T15:16:17.765432+03:00')
task.event('received', tstamp, tstamp, {})
mt = self.cam.handle_task((task.uuid, task))
self.assertEqual(
mt.tstamp,
datetime(2016, 6, 1, 15, 0, 0, tzinfo=timezone.utc),
)
self.assertEqual(
mt.eta,
datetime(2016, 6, 1, 15, 16, 17, 654321, tzinfo=timezone.utc),
)
self.assertEqual(
mt.expires,
datetime(2016, 7, 1, 12, 16, 17, 765432, tzinfo=timezone.utc),
)
task = create_task(worker, eta='2016-06-04T15:16:17.654321')
task.event('received', tstamp, tstamp, {})
mt = self.cam.handle_task((task.uuid, task))
self.assertEqual(
mt.eta,
datetime(2016, 6, 4, 15, 16, 17, 654321, tzinfo=timezone.utc),
)
with override_settings(USE_TZ=False, TIME_ZONE='Europe/Helsinki'):
task = create_task(worker,
eta='2016-06-01T15:16:17.654321+00:00',
expires='2016-07-01T15:16:17.765432+03:00')
task.event('received', tstamp, tstamp, {})
mt = self.cam.handle_task((task.uuid, task))
self.assertEqual(mt.tstamp, datetime(2016, 6, 1, 18, 0, 0))
self.assertEqual(mt.eta, datetime(2016, 6, 1, 18, 16, 17, 654321))
self.assertEqual(mt.expires,
datetime(2016, 7, 1, 15, 16, 17, 765432))
task = create_task(worker, eta='2016-06-04T15:16:17.654321')
task.event('received', tstamp, tstamp, {})
mt = self.cam.handle_task((task.uuid, task))
self.assertEqual(mt.eta, datetime(2016, 6, 4, 15, 16, 17, 654321))
def assertExpires(self, dec, expired, tasks=10):
# Cleanup leftovers from previous tests
self.cam.on_cleanup()
worker = Worker(hostname='fuzzie')
worker.event('online', time(), time(), {})
for total in range(tasks):
task = create_task(worker)
task.event('received', time() - dec, time() - dec, {})
task.event('succeeded', time() - dec, time() - dec, {'result': 42})
self.assertTrue(task.name)
self.assertTrue(self.cam.handle_task((task.uuid, task)))
self.assertEqual(self.cam.on_cleanup(), expired)
def test_on_cleanup_expires(self, dec=332000):
self.assertExpires(dec, 10)
def test_on_cleanup_does_not_expire_new(self, dec=0):
self.assertExpires(dec, 0)
def test_on_shutter(self):
state = self.state
cam = self.cam
ws = ['worker1.ex.com', 'worker2.ex.com', 'worker3.ex.com']
uus = [gen_unique_id() for i in range(50)]
events = [Event('worker-online', hostname=ws[0]),
Event('worker-online', hostname=ws[1]),
Event('worker-online', hostname=ws[2]),
Event('task-received',
uuid=uus[0], name='A', hostname=ws[0]),
Event('task-started',
uuid=uus[0], name='A', hostname=ws[0]),
Event('task-received',
uuid=uus[1], name='B', hostname=ws[1]),
Event('task-revoked',
uuid=uus[2], name='C', hostname=ws[2])]
for event in events:
event['local_received'] = time()
state.event(event)
cam.on_shutter(state)
for host in ws:
worker = models.WorkerState.objects.get(hostname=host)
self.assertTrue(worker.is_alive())
t1 = models.TaskState.objects.get(task_id=uus[0])
self.assertEqual(t1.state, 'STARTED')
self.assertEqual(t1.name, 'A')
t2 = models.TaskState.objects.get(task_id=uus[1])
self.assertEqual(t2.state, 'RECEIVED')
t3 = models.TaskState.objects.get(task_id=uus[2])
self.assertEqual(t3.state, 'REVOKED')
events = [Event('task-succeeded',
uuid=uus[0], hostname=ws[0], result=42),
Event('task-failed',
uuid=uus[1], exception="KeyError('foo')",
hostname=ws[1]),
Event('worker-offline', hostname=ws[0])]
list(map(state.event, events))
cam._last_worker_write.clear()
cam.on_shutter(state)
w1 = models.WorkerState.objects.get(hostname=ws[0])
self.assertFalse(w1.is_alive())
t1 = models.TaskState.objects.get(task_id=uus[0])
self.assertEqual(t1.state, 'SUCCESS')
self.assertEqual(t1.result, '42')
self.assertEqual(t1.worker, w1)
t2 = models.TaskState.objects.get(task_id=uus[1])
self.assertEqual(t2.state, 'FAILURE')
self.assertEqual(t2.result, "KeyError('foo')")
self.assertEqual(t2.worker.hostname, ws[1])
cam.on_shutter(state)
|
sivaprakashniet/push_pull
|
p2p/lib/python2.7/site-packages/djcelery/tests/test_snapshot.py
|
Python
|
bsd-3-clause
| 9,460
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-05-24 10:22
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('ip', '0030_auto_20170328_1545'),
]
operations = [
migrations.AlterModelOptions(
name='informationpackage',
options={'ordering': ['id'], 'permissions': (('can_upload', 'Can upload files to IP'), ('set_uploaded', 'Can set IP as uploaded'), ('create_sip', 'Can create SIP'), ('submit_sip', 'Can submit SIP'), ('transfer_sip', 'Can transfer SIP'), ('change_sa', 'Can change SA connected to IP'), ('lock_sa', 'Can lock SA to IP'), ('unlock_profile', 'Can unlock profile connected to IP'), ('can_receive_remote_files', 'Can receive remote files')), 'verbose_name': 'Information Package'},
),
]
|
ESSolutions/ESSArch_Core
|
ESSArch_Core/ip/migrations/0031_auto_20170524_1222.py
|
Python
|
gpl-3.0
| 818
|
#!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2017
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the classes that represent Telegram
InputTextMessageContent"""
from telegram import InputMessageContent
class InputTextMessageContent(InputMessageContent):
"""Base class for Telegram InputTextMessageContent Objects"""
def __init__(self, message_text, parse_mode=None, disable_web_page_preview=None, **kwargs):
# Required
self.message_text = message_text
# Optionals
self.parse_mode = parse_mode
self.disable_web_page_preview = disable_web_page_preview
@staticmethod
def de_json(data, bot):
return InputTextMessageContent(**data)
|
thonkify/thonkify
|
src/lib/telegram/inputtextmessagecontent.py
|
Python
|
mit
| 1,431
|
import unittest
from main import Solution
class SolutionTest(unittest.TestCase):
def setUp(self):
self.sol = Solution()
def test_1(self):
self.assertEqual(
self.sol.findSubstring(
"barfoothefoobarman",
["foo", "bar"]
),
[0, 9]
)
def test_2(self):
self.assertEqual(
self.sol.findSubstring(
"wordgoodgoodgoodbestword",
["word", "good", "best", "word"]
),
[]
)
def test_3(self):
self.assertEqual(
self.sol.findSubstring(
"barfoofoobarthefoobarman",
["bar","foo","the"]
),
[6, 9, 12]
)
def test_4(self):
self.assertEqual(
self.sol.findSubstring(
"aaaaaaaa",
["aa","aa","aa"]
),
[0, 1, 2]
)
|
y-usuzumi/survive-the-course
|
leetcode/30.Substring_with_Concatenation_of_All_Words/test.py
|
Python
|
bsd-3-clause
| 956
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 24 09:28:16 2014
@author: kshmirko
"""
import netCDF4 as nc
import numpy as np
DB = '../sage-reader/data/SAGE-II-6.20.nc'
Lon0 = 131.9
Lat0 = 43.1
print('Station location: Lon = %5.2f\tLat = %5.2f\n'%(Lon0, Lat0))
Radius = float(input('Enter radius to search profiles:> '))
Radius2 = Radius**2
F=nc.Dataset(DB,'r')
VAR = F.variables
print(VAR['O3'].shape)
# select profiles inside Radius
Lon = VAR['Lon'][...]
Lat = VAR['Lat'][...]
Distance = ((Lon-Lon0)**2+(Lat-Lat0)**2)
ok = np.where(Distance<Radius2)[0]
# and corresponding timepoints
TP = VAR['TP']
Tp = nc.num2date(TP[ok],TP.units, TP.calendar)
import pandas as pds
O3_profiles = VAR['O3'][ok,:]
O3_profiles_Err = VAR['O3_Err'][ok,:]
Tropo_Height = VAR['Trop_Height'][ok]
Ext386 = VAR['Ext386'][ok,:]
Ext386_Err = VAR['Ext386_Err'][ok,:]
Ext452 = VAR['Ext452'][ok,:]
Ext452_Err = VAR['Ext452_Err'][ok,:]
Ext525 = VAR['Ext525'][ok,:]
Ext525_Err = VAR['Ext525_Err'][ok,:]
Ext1020 = VAR['Ext1020'][ok,:]
Ext1020_Err = VAR['Ext1020_Err'][ok,:]
Df_O3 = pds.DataFrame(O3_profiles, index=Tp)
Df_O3_Err = pds.DataFrame(O3_profiles_Err, index=Tp)
Df_TH = pds.DataFrame(Tropo_Height, index=Tp)
Df_Ext386 = pds.DataFrame(Ext386, index=Tp)
Df_Ext452 = pds.DataFrame(Ext452, index=Tp)
Df_Ext525 = pds.DataFrame(Ext525, index=Tp)
Df_Ext1020 = pds.DataFrame(Ext1020, index=Tp)
Df_Ext386_Err = pds.DataFrame(Ext386_Err, index=Tp)
Df_Ext452_Err = pds.DataFrame(Ext452_Err, index=Tp)
Df_Ext525_Err = pds.DataFrame(Ext525_Err, index=Tp)
Df_Ext1020_Err = pds.DataFrame(Ext1020_Err, index=Tp)
H5FileName = 'DS-%5.1f-%4.1f-%3.1f.h5'%(Lon0, Lat0, Radius)
H5FileName1 = 'DT-%5.1f-%4.1f-%3.1f.h5'%(Lon0, Lat0, Radius)
Df_O3.to_hdf(H5FileName,'O3',append=True)
Df_O3_Err.to_hdf(H5FileName,'O3Err',append=True)
Df_TH.to_hdf(H5FileName,'TH',append=True)
Df_Ext386.to_hdf(H5FileName,'Ext386',append=True)
Df_Ext452.to_hdf(H5FileName,'Ext452',append=True)
Df_Ext525.to_hdf(H5FileName,'Ext525',append=True)
Df_Ext1020.to_hdf(H5FileName,'Ext1020',append=True)
Df_Ext386_Err.to_hdf(H5FileName,'Ext386_Err',append=True)
Df_Ext452_Err.to_hdf(H5FileName,'Ext452_Err',append=True)
Df_Ext525_Err.to_hdf(H5FileName,'Ext525_Err',append=True)
Df_Ext1020_Err.to_hdf(H5FileName,'Ext1020_Err',append=True)
#O3_mean = np.mean(O3_profiles,0)
#O3_mean_Err = np.mean(O3_profiles_Err,0)
#import pylab as plt
#X = np.linspace(0.5, 70, 140)
#plt.figure()
#ax = plt.subplot(111)
#ax.plot(X, O3_mean)
#ax2=ax.twinx()
#ax2.plot(X, O3_mean_Err/100.0)
#plt.show()
|
kshmirko/sageII-stat
|
prepare.py
|
Python
|
gpl-2.0
| 2,551
|
#!/usr/bin/env python
# PYTHON_ARGCOMPLETE_OK
# coding=utf-8
from copy import deepcopy
from distutils.spawn import find_executable
from getpass import getuser
from kvirt.config import Kconfig
from kvirt.examples import plandatacreate, vmdatacreate, hostcreate, _list, plancreate, planinfo, productinfo, start
from kvirt.examples import repocreate, isocreate, kubegenericcreate, kubek3screate, kubeopenshiftcreate, kubekindcreate
from kvirt.examples import dnscreate, diskcreate, diskdelete, vmcreate, vmconsole, vmexport, niccreate, nicdelete
from kvirt.examples import disconnectercreate, appopenshiftcreate, plantemplatecreate, kubehypershiftcreate
from kvirt.examples import workflowcreate
from kvirt.examples import changelog
from kvirt.baseconfig import Kbaseconfig
from kvirt.containerconfig import Kcontainerconfig
from kvirt import version
from kvirt.defaults import IMAGES, VERSION, LOCAL_OPENSHIFT_APPS, SSH_PUB_LOCATIONS
from prettytable import PrettyTable
import argcomplete
import argparse
from argparse import RawDescriptionHelpFormatter as rawhelp
from ipaddress import ip_address
from glob import glob
from kvirt import common
from kvirt.common import error, pprint, success, warning, ssh, _ssh_credentials, container_mode
from kvirt import nameutils
import os
import random
import requests
from subprocess import call
import sys
from tempfile import TemporaryDirectory
from urllib.parse import urlparse
import yaml
def cache_vms(baseconfig, region, zone, namespace):
cache_file = "%s/.kcli/%s_vms.yml" % (os.environ['HOME'], baseconfig.client)
if os.path.exists(cache_file):
with open(cache_file, 'r') as vms:
_list = yaml.safe_load(vms)
pprint("Using cache information...")
else:
config = Kconfig(client=baseconfig.client, debug=baseconfig.debug, region=region, zone=zone,
namespace=namespace)
_list = config.k.list()
with open(cache_file, 'w') as c:
pprint(f"Caching results for {baseconfig.client}...")
try:
yaml.safe_dump(_list, c, default_flow_style=False, encoding='utf-8', allow_unicode=True,
sort_keys=False)
except:
yaml.safe_dump(_list, c, default_flow_style=False, encoding='utf-8', allow_unicode=True,
sort_keys=False)
return _list
def valid_fqdn(name):
if name is not None and '/' in name:
msg = "Vm name can't include /"
raise argparse.ArgumentTypeError(msg)
return name
def valid_url(url):
if url is not None:
parsed_url = urlparse(url)
if parsed_url.scheme == '' or parsed_url.netloc == '':
msg = "Malformed url"
raise argparse.ArgumentTypeError(msg)
return url
def valid_members(members):
try:
return members[1:-1].split(',')
except:
msg = "Incorrect members list"
raise argparse.ArgumentTypeError(msg)
def valid_cluster(name):
if name is not None:
if '/' in name:
msg = "Cluster name can't include /"
raise argparse.ArgumentTypeError(msg)
return name
def alias(text):
return "Alias for %s" % text
def get_subparser_print_help(parser, subcommand):
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
for subparsers_action in subparsers_actions:
for choice, subparser in subparsers_action.choices.items():
if choice == subcommand:
subparser.print_help()
return
def get_subparser(parser, subcommand):
subparsers_actions = [
action for action in parser._actions
if isinstance(action, argparse._SubParsersAction)]
for subparsers_action in subparsers_actions:
for choice, subparser in subparsers_action.choices.items():
if choice == subcommand:
return subparser
def get_version(args):
full_version = f"version: {VERSION}"
versiondir = os.path.dirname(version.__file__)
git_file = f'{versiondir}/git'
git_version = 'N/A'
git_date = ''
if os.path.exists(git_file) and os.stat(git_file).st_size > 0:
git_version, git_date = open(git_file).read().rstrip().split(' ')
git_date = f'({git_date})'
full_version += f" commit: {git_version} {git_date}"
update = 'N/A'
if git_version != 'N/A':
try:
upstream_version = requests.get("https://api.github.com/repos/karmab/kcli/commits/master").json()['sha'][:7]
update = True if upstream_version != git_version else False
except:
pass
full_version += f" Available Updates: {update}"
print(full_version)
def get_changelog(args):
if find_executable('git') is None:
error("git needed for this functionality")
sys.exit(1)
diff = args.diff
if not diff:
diff = ['master']
if len(diff) > 1:
ori, dest = diff[:2]
else:
versiondir = os.path.dirname(version.__file__)
git_file = f'{versiondir}/git'
git_version = 'N/A'
if os.path.exists(git_file) and os.stat(git_file).st_size > 0:
git_version = open(git_file).read().rstrip().split(' ')[0]
if git_version != 'N/A':
ori, dest = git_version, diff[0]
else:
error("No source commit available. Use kcli changelog diff1 diff2")
sys.exit(1)
with TemporaryDirectory() as tmpdir:
cmd = f"git clone -q https://github.com/karmab/kcli {tmpdir}"
call(cmd, shell=True)
os.chdir(tmpdir)
cmd = f"git log --decorate=no --oneline {ori}..{dest}"
call(cmd, shell=True)
def delete_cache(args):
yes_top = args.yes_top
yes = args.yes
if not yes and not yes_top:
common.confirm("Are you sure?")
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
cache_file = "%s/.kcli/%s_vms.yml" % (os.environ['HOME'], baseconfig.client)
if os.path.exists(cache_file):
pprint(f"Deleting cache on {baseconfig.client}")
os.remove(cache_file)
else:
warning(f"No cache file found for {baseconfig.client}")
def start_vm(args):
"""Start vms"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
names = [common.get_lastvm(config.client)] if not args.names else args.names
k = config.k
codes = []
for name in names:
pprint(f"Starting vm {name}...")
result = k.start(name)
code = common.handle_response(result, name, element='', action='started')
codes.append(code)
sys.exit(1 if 1 in codes else 0)
def start_container(args):
"""Start containers"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
names = [common.get_lastvm(config.client)] if not args.names else args.names
cont = Kcontainerconfig(config, client=args.containerclient).cont
for name in names:
pprint(f"Starting container {name}...")
cont.start_container(name)
def stop_vm(args):
"""Stop vms"""
soft = args.soft
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
names = [common.get_lastvm(config.client)] if not args.names else args.names
if config.extraclients:
ks = config.extraclients
ks.update({config.client: config.k})
else:
ks = {config.client: config.k}
codes = []
for cli in ks:
k = ks[cli]
for name in names:
pprint(f"Stopping vm {name} in {cli}...")
result = k.stop(name, soft=soft)
code = common.handle_response(result, name, element='', action='stopped')
codes.append(code)
sys.exit(1 if 1 in codes else 0)
def stop_container(args):
"""Stop containers"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
names = [common.get_lastvm(config.client)] if not args.names else args.names
if config.extraclients:
ks = config.extraclients
ks.update({config.client: config.k})
else:
ks = {config.client: config.k}
for cli in ks:
cont = Kcontainerconfig(config, client=args.containerclient).cont
for name in names:
pprint(f"Stopping container {name} in {cli}...")
cont.stop_container(name)
def restart_vm(args):
"""Restart vms"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
names = [common.get_lastvm(config.client)] if not args.names else args.names
k = config.k
codes = []
for name in names:
pprint(f"Restarting vm {name}...")
result = k.restart(name)
code = common.handle_response(result, name, element='', action='restarted')
codes.append(code)
sys.exit(1 if 1 in codes else 0)
def restart_container(args):
"""Restart containers"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
names = [common.get_lastvm(config.client)] if not args.names else args.names
cont = Kcontainerconfig(config, client=args.containerclient).cont
for name in names:
pprint(f"Restarting container {name}...")
cont.stop_container(name)
cont.start_container(name)
def console_vm(args):
"""Vnc/Spice/Serial Vm console"""
serial = args.serial
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
name = common.get_lastvm(config.client) if not args.name else args.name
k = config.k
tunnel = config.tunnel
if serial:
k.serialconsole(name)
else:
k.console(name=name, tunnel=tunnel)
def console_container(args):
"""Container console"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
name = common.get_lastvm(config.client) if not args.name else args.name
cont = Kcontainerconfig(config, client=args.containerclient).cont
cont.console_container(name)
return
def delete_vm(args):
"""Delete vm"""
snapshots = args.snapshots
count = args.count
yes_top = args.yes_top
yes = args.yes
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if config.extraclients:
allclients = config.extraclients.copy()
allclients.update({config.client: config.k})
names = args.names
if not names:
error("Can't delete vms on multiple hosts without specifying their names")
sys.exit(1)
else:
allclients = {config.client: config.k}
names = [common.get_lastvm(config.client)] if not args.names else args.names
if count > 1:
if len(args.names) == 1:
names = ["%s-%d" % (args.names[0], number) for number in range(count)]
else:
error("Using count when deleting vms requires specifying an unique name")
sys.exit(1)
dnsclients = allclients.copy()
for cli in sorted(allclients):
k = allclients[cli]
if not yes and not yes_top:
common.confirm("Are you sure?")
codes = []
for name in names:
pprint(f"Deleting vm {name} on {cli}")
dnsclient, domain = k.dnsinfo(name)
if config.rhnunregister:
image = k.info(name).get('image')
if 'rhel' in image:
pprint(f"Removing rhel subscription for {name}")
ip, vmport = _ssh_credentials(k, name)[1:]
cmd = "subscription-manager unregister"
sshcmd = ssh(name, ip=ip, user='root', tunnel=config.tunnel,
tunnelhost=config.tunnelhost, tunnelport=config.tunnelport,
tunneluser=config.tunneluser, insecure=True, cmd=cmd, vmport=vmport)
os.system(sshcmd)
else:
warning(f"vm {name} doesnt appear as a rhel box. Skipping unregistration")
result = k.delete(name, snapshots=snapshots)
if result['result'] == 'success':
success(f"{name} deleted")
codes.append(0)
common.set_lastvm(name, cli, delete=True)
else:
reason = result['reason']
codes.append(1)
error(f"Could not delete {name} because {reason}")
common.set_lastvm(name, cli, delete=True)
if dnsclient is not None and domain is not None:
pprint(f"Deleting Dns entry for {name} in {domain}")
if dnsclient in dnsclients:
z = dnsclients[dnsclient]
else:
z = Kconfig(client=dnsclient).k
dnsclients[dnsclient] = z
z.delete_dns(name, domain)
cluster = name.split('-')[0] if '-master-' in name or '-worker-' in name else None
clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster)
if cluster is not None and os.path.exists(clusterdir):
os.environ['KUBECONFIG'] = "%s/auth/kubeconfig" % clusterdir
if os.path.exists("%s/kcli_parameters.yml" % clusterdir):
with open("%s/kcli_parameters.yml" % clusterdir, 'r') as install:
installparam = yaml.safe_load(install)
kubetype = installparam.get('kubetype', 'kubectl')
binary = 'oc' if kubetype == 'openshift' else 'kubectl'
domain = installparam.get('domain')
if domain is not None:
try:
pprint(f"Deleting node {name}.{domain} from your cluster")
call(f'{binary} delete node {name}.{domain}', shell=True)
except:
continue
sys.exit(1 if 1 in codes else 0)
def delete_container(args):
"""Delete container"""
yes = args.yes
yes_top = args.yes_top
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if config.extraclients:
allclients = config.extraclients.copy()
allclients.update({config.client: config.k})
names = args.names
else:
allclients = {config.client: config.k}
names = args.names
for cli in sorted(allclients):
if not yes and not yes_top:
common.confirm("Are you sure?")
codes = [0]
cont = Kcontainerconfig(config, client=args.containerclient).cont
for name in names:
pprint(f"Deleting container {name} on {cli}")
cont.delete_container(name)
sys.exit(1 if 1 in codes else 0)
def download_image(args):
"""Download Image"""
pool = args.pool
image = args.image
cmd = args.cmd
url = args.url
size = args.size
arch = args.arch
openstack = args.openstack
update_profile = not args.skip_profile
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
result = config.handle_host(pool=pool, image=image, download=True, cmd=cmd, url=url, update_profile=update_profile,
size=size, arch=arch, kvm_openstack=openstack)
if result['result'] == 'success':
sys.exit(0)
else:
sys.exit(1)
def download_iso(args):
"""Download ISO"""
pool = args.pool
url = args.url
iso = args.iso if args.iso is not None else os.path.basename(url)
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
result = config.handle_host(pool=pool, image=iso, download=True, url=url, update_profile=False)
if result['result'] == 'success':
sys.exit(0)
else:
sys.exit(1)
def delete_image(args):
images = args.images
pool = args.pool
yes = args.yes
yes_top = args.yes_top
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if config.extraclients:
allclients = config.extraclients.copy()
allclients.update({config.client: config.k})
else:
allclients = {config.client: config.k}
for cli in sorted(allclients):
k = allclients[cli]
if not yes and not yes_top:
common.confirm("Are you sure?")
codes = []
for image in images:
clientprofile = "%s_%s" % (cli, image)
imgprofiles = [p for p in config.profiles if 'image' in config.profiles[p] and
config.profiles[p]['image'] == os.path.basename(image) and
p.startswith('%s_' % cli)]
pprint(f"Deleting image {image} on {cli}")
if clientprofile in config.profiles and 'image' in config.profiles[clientprofile]:
profileimage = config.profiles[clientprofile]['image']
config.delete_profile(clientprofile, quiet=True)
result = k.delete_image(profileimage, pool=pool)
elif imgprofiles:
imgprofile = imgprofiles[0]
config.delete_profile(imgprofile, quiet=True)
result = k.delete_image(image, pool=pool)
else:
result = k.delete_image(image, pool=pool)
if result['result'] == 'success':
success(f"{image} deleted")
codes.append(0)
else:
reason = result['reason']
error(f"Could not delete image {image} because {reason}")
codes.append(1)
sys.exit(1 if 1 in codes else 0)
def create_profile(args):
"""Create profile"""
profile = args.profile
overrides = common.get_overrides(param=args.param)
baseconfig = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
result = baseconfig.create_profile(profile, overrides=overrides)
code = common.handle_response(result, profile, element='Profile', action='created', client=baseconfig.client)
return code
def delete_profile(args):
"""Delete profile"""
profile = args.profile
baseconfig = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
pprint(f"Deleting on {baseconfig.client}")
result = baseconfig.delete_profile(profile)
code = common.handle_response(result, profile, element='Profile', action='deleted', client=baseconfig.client)
return code
# sys.exit(0) if result['result'] == 'success' else sys.exit(1)
def update_profile(args):
"""Update profile"""
profile = args.profile
overrides = common.get_overrides(param=args.param)
baseconfig = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
result = baseconfig.update_profile(profile, overrides=overrides)
code = common.handle_response(result, profile, element='Profile', action='updated', client=baseconfig.client)
return code
def info_vm(args):
"""Get info on vm"""
output = args.output
fields = args.fields.split(',') if args.fields is not None else []
values = args.values
config = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if config.cache:
names = [common.get_lastvm(config.client)] if not args.names else args.names
_list = cache_vms(config, args.region, args.zone, args.namespace)
vms = {vm['name']: vm for vm in _list}
else:
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
names = [common.get_lastvm(config.client)] if not args.names else args.names
for name in names:
if config.cache and name in vms:
data = vms[name]
else:
data = config.k.info(name, debug=args.debug)
if data:
print(common.print_info(data, output=output, fields=fields, values=values, pretty=True))
def enable_host(args):
"""Enable host"""
host = args.name
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
result = baseconfig.enable_host(host)
if result['result'] == 'success':
sys.exit(0)
else:
sys.exit(1)
def disable_host(args):
"""Disable host"""
host = args.name
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
result = baseconfig.disable_host(host)
if result['result'] == 'success':
sys.exit(0)
else:
sys.exit(1)
def delete_host(args):
"""Delete host"""
common.delete_host(args.name)
def sync_host(args):
"""Handle host"""
hosts = args.names
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
result = config.handle_host(sync=hosts)
if result['result'] == 'success':
sys.exit(0)
else:
sys.exit(1)
def list_vm(args):
"""List vms"""
filters = args.filters
if args.client is not None and args.client == 'all':
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
args.client = ','.join(baseconfig.clients)
if args.client is not None and ',' in args.client:
vms = PrettyTable(["Name", "Host", "Status", "Ips", "Source", "Plan", "Profile"])
for client in args.client.split(','):
config = Kbaseconfig(client=client, debug=args.debug, quiet=True)
if config.cache:
_list = cache_vms(config, args.region, args.zone, args.namespace)
else:
config = Kconfig(client=client, debug=args.debug, region=args.region,
zone=args.zone, namespace=args.namespace)
_list = config.k.list()
for vm in _list:
name = vm.get('name')
status = vm.get('status')
ip = vm.get('ip', '')
source = vm.get('image', '')
plan = vm.get('plan', '')
profile = vm.get('profile', '')
vminfo = [name, client, status, ip, source, plan, profile]
if filters:
if status == filters:
vms.add_row(vminfo)
else:
vms.add_row(vminfo)
print(vms)
else:
vms = PrettyTable(["Name", "Status", "Ips", "Source", "Plan", "Profile"])
config = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if config.cache:
_list = cache_vms(config, args.region, args.zone, args.namespace)
else:
config = Kconfig(client=args.client, debug=args.debug, region=args.region,
zone=args.zone, namespace=args.namespace)
_list = config.k.list()
for vm in _list:
name = vm.get('name')
status = vm.get('status')
ip = vm.get('ip', '')
source = vm.get('image', '')
plan = vm.get('plan', '')
profile = vm.get('profile', '')
vminfo = [name, status, ip, source, plan, profile]
if filters:
if status == filters:
vms.add_row(vminfo)
else:
vms.add_row(vminfo)
print(vms)
return
def list_container(args):
"""List containers"""
filters = args.filters
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
cont = Kcontainerconfig(config, client=args.containerclient).cont
pprint("Listing containers...")
containers = PrettyTable(["Name", "Status", "Image", "Plan", "Command", "Ports", "Deploy"])
for container in cont.list_containers():
if filters:
status = container[1]
if status == filters:
containers.add_row(container)
else:
containers.add_row(container)
print(containers)
return
def profilelist_container(args):
"""List container profiles"""
short = args.short
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
profiles = baseconfig.list_containerprofiles()
if short:
profilestable = PrettyTable(["Profile"])
for profile in sorted(profiles):
profilename = profile[0]
profilestable.add_row([profilename])
else:
profilestable = PrettyTable(["Profile", "Image", "Nets", "Ports", "Volumes", "Cmd"])
for profile in sorted(profiles):
profilestable.add_row(profile)
profilestable.align["Profile"] = "l"
print(profilestable)
return
def list_containerimage(args):
"""List container images"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if config.type != 'kvm':
error("Operation not supported on this kind of client.Leaving...")
sys.exit(1)
cont = Kcontainerconfig(config, client=args.containerclient).cont
common.pprint("Listing images...")
images = PrettyTable(["Name"])
for image in cont.list_images():
images.add_row([image])
print(images)
return
def list_host(args):
"""List hosts"""
clientstable = PrettyTable(["Client", "Type", "Enabled", "Current"])
clientstable.align["Client"] = "l"
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
for client in sorted(baseconfig.clients):
enabled = baseconfig.ini[client].get('enabled', True)
_type = baseconfig.ini[client].get('type', 'kvm')
if client == baseconfig.client:
clientstable.add_row([client, _type, enabled, 'X'])
else:
clientstable.add_row([client, _type, enabled, ''])
print(clientstable)
return
def list_lb(args):
"""List lbs"""
short = args.short
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
loadbalancers = config.list_loadbalancers()
if short:
loadbalancerstable = PrettyTable(["Loadbalancer"])
for lb in sorted(loadbalancers):
loadbalancerstable.add_row([lb])
else:
loadbalancerstable = PrettyTable(["LoadBalancer", "IPAddress", "IPProtocol", "Ports", "Target"])
for lb in sorted(loadbalancers):
loadbalancerstable.add_row(lb)
loadbalancerstable.align["Loadbalancer"] = "l"
print(loadbalancerstable)
return
def info_profile(args):
"""List profiles"""
profile = args.profile
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
profiles = baseconfig.list_profiles()
for entry in profiles:
if entry[0] == profile:
profile, flavor, pool, disks, image, nets, cloudinit, nested, reservedns, reservehost = entry
print(f"profile: {profile}")
print(f"flavor: {flavor}")
print(f"pool: {pool}")
print(f"disks: {disks}")
print(f"image: {image}")
print(f"nets: {nets}")
print(f"cloudinit: {cloudinit}")
print(f"nested: {nested}")
print(f"reservedns: {reservedns}")
print(f"reservehost: {reservehost}")
sys.exit(0)
break
error(f"Profile {profile} doesn't exist")
sys.exit(1)
def list_profile(args):
"""List profiles"""
short = args.short
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
profiles = baseconfig.list_profiles()
if short:
profilestable = PrettyTable(["Profile"])
for profile in sorted(profiles):
profilename = profile[0]
profilestable.add_row([profilename])
else:
profilestable = PrettyTable(["Profile", "Flavor",
"Pool", "Disks", "Image",
"Nets", "Cloudinit", "Nested",
"Reservedns", "Reservehost"])
for profile in sorted(profiles):
profilestable.add_row(profile)
profilestable.align["Profile"] = "l"
print(profilestable)
return
def list_dns(args):
"""List flavors"""
short = args.short
domain = args.domain
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
entries = k.list_dns(domain)
if short:
dnstable = PrettyTable(["Entry"])
for entry in sorted(entries):
entryname = entry[0]
dnstable.add_row([entryname])
else:
dnstable = PrettyTable(["Entry", "Type", "TTL", "Data"])
for entry in sorted(entries):
dnstable.add_row(entry)
dnstable.align["Flavor"] = "l"
print(dnstable)
return
def list_flavor(args):
"""List flavors"""
short = args.short
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
flavors = k.flavors()
if short:
flavorstable = PrettyTable(["Flavor"])
for flavor in sorted(flavors):
flavorname = flavor[0]
flavorstable.add_row([flavorname])
else:
flavorstable = PrettyTable(["Flavor", "Numcpus", "Memory"])
for flavor in sorted(flavors):
flavorstable.add_row(flavor)
flavorstable.align["Flavor"] = "l"
print(flavorstable)
return
def list_image(args):
"""List images"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if config.client != 'all':
k = config.k
imagestable = PrettyTable(["Images"])
imagestable.align["Images"] = "l"
for image in k.volumes():
imagestable.add_row([image])
print(imagestable)
return
def list_iso(args):
"""List isos"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if config.client != 'all':
k = config.k
isostable = PrettyTable(["Iso"])
isostable.align["Iso"] = "l"
for iso in k.volumes(iso=True):
isostable.add_row([iso])
print(isostable)
return
def list_network(args):
"""List networks"""
short = args.short
subnets = args.subnets
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if config.client != 'all':
k = config.k
if not subnets:
networks = k.list_networks()
pprint("Listing Networks...")
if short:
networkstable = PrettyTable(["Network"])
for network in sorted(networks):
networkstable.add_row([network])
else:
networkstable = PrettyTable(["Network", "Type", "Cidr", "Dhcp", "Domain", "Mode"])
for network in sorted(networks):
networktype = networks[network]['type']
cidr = networks[network]['cidr']
dhcp = networks[network]['dhcp']
mode = networks[network]['mode']
if 'domain' in networks[network]:
domain = networks[network]['domain']
else:
domain = 'N/A'
networkstable.add_row([network, networktype, cidr, dhcp, domain, mode])
networkstable.align["Network"] = "l"
print(networkstable)
return
else:
subnets = k.list_subnets()
pprint("Listing Subnets...")
if short:
subnetstable = PrettyTable(["Subnets"])
for subnet in sorted(subnets):
subnetstable.add_row([subnet])
else:
subnetstable = PrettyTable(["Subnet", "Az", "Cidr", "Network"])
for subnet in sorted(subnets):
cidr = subnets[subnet]['cidr']
az = subnets[subnet]['az']
if 'network' in subnets[subnet]:
network = subnets[subnet]['network']
else:
network = 'N/A'
subnetstable.add_row([subnet, az, cidr, network])
subnetstable.align["Network"] = "l"
print(subnetstable)
return
def list_plan(args):
"""List plans"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if config.extraclients:
plans = PrettyTable(["Plan", "Host", "Vms"])
allclients = config.extraclients.copy()
allclients.update({config.client: config.k})
for cli in sorted(allclients):
currentconfig = Kconfig(client=cli, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
for plan in currentconfig.list_plans():
planname = plan[0]
planvms = plan[1]
plans.add_row([planname, cli, planvms])
else:
plans = PrettyTable(["Plan", "Vms"])
for plan in config.list_plans():
planname = plan[0]
planvms = plan[1]
plans.add_row([planname, planvms])
print(plans)
return
def choose_parameter_file(paramfile):
if container_mode():
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
return paramfile
def get_cluster_parameter_file(paramfile):
clustersdir = os.path.expanduser("~/.kcli/clusters")
if (paramfile is None or not os.path.exists(paramfile)) and os.environ['KUBECONFIG'].startswith(clustersdir):
cluster = os.environ['KUBECONFIG'].replace("%s/" % clustersdir, '').split('/')[0]
clusterparamfile = "%s/%s/kcli_parameters.yml" % (clustersdir, cluster)
if os.path.exists(clusterparamfile):
paramfile = clusterparamfile
return paramfile
def create_app_generic(args):
apps = args.apps
outputdir = args.outputdir
if outputdir is not None:
if container_mode() and not outputdir.startswith('/'):
outputdir = "/workdir/%s" % outputdir
if os.path.exists(outputdir) and os.path.isfile(outputdir):
error("Invalid outputdir %s" % outputdir)
sys.exit(1)
elif not os.path.exists(outputdir):
os.mkdir(outputdir)
paramfile = choose_parameter_file(args.paramfile)
if find_executable('kubectl') is None:
error("You need kubectl to install apps")
sys.exit(1)
if 'KUBECONFIG' not in os.environ:
error("KUBECONFIG env variable needs to be set")
sys.exit(1)
elif not os.path.isabs(os.environ['KUBECONFIG']):
os.environ['KUBECONFIG'] = "%s/%s" % (os.getcwd(), os.environ['KUBECONFIG'])
paramfile = get_cluster_parameter_file(paramfile)
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
available_apps = baseconfig.list_apps_generic(quiet=True)
for app in apps:
if app not in available_apps:
error(f"app {app} not available. Skipping...")
continue
pprint(f"Adding app {app}")
overrides['%s_version' % app] = overrides['%s_version' % app] if '%s_version' % app in overrides else 'latest'
baseconfig.create_app_generic(app, overrides, outputdir=outputdir)
def create_app_openshift(args):
apps = args.apps
outputdir = args.outputdir
if outputdir is not None:
if container_mode() and not outputdir.startswith('/'):
outputdir = "/workdir/%s" % outputdir
if os.path.exists(outputdir) and os.path.isfile(outputdir):
error(f"Invalid outputdir {outputdir}")
sys.exit(1)
elif not os.path.exists(outputdir):
os.mkdir(outputdir)
paramfile = choose_parameter_file(args.paramfile)
if find_executable('oc') is None:
error("You need oc to install apps")
sys.exit(1)
if 'KUBECONFIG' not in os.environ:
error("KUBECONFIG env variable needs to be set")
sys.exit(1)
elif not os.path.isabs(os.environ['KUBECONFIG']):
os.environ['KUBECONFIG'] = "%s/%s" % (os.getcwd(), os.environ['KUBECONFIG'])
paramfile = get_cluster_parameter_file(paramfile)
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
for app in apps:
if app in LOCAL_OPENSHIFT_APPS:
name = app
app_data = overrides.copy()
else:
name, source, channel, csv, description, namespace, channels, crd = common.olm_app(app)
if name is None:
error(f"Couldn't find any app matching {app}. Skipping...")
continue
if 'channel' in overrides:
overrides_channel = overrides['channel']
if overrides_channel not in channels:
error(f"Target channel {channel} not found in {channels}. Skipping...")
continue
else:
channel = overrides_channel
app_data = {'name': name, 'source': source, 'channel': channel, 'namespace': namespace, 'crd': crd}
app_data.update(overrides)
pprint(f"Adding app {app}")
baseconfig.create_app_openshift(name, app_data, outputdir=outputdir)
def delete_app_generic(args):
apps = args.apps
paramfile = choose_parameter_file(args.paramfile)
if find_executable('kubectl') is None:
error("You need kubectl to install apps")
sys.exit(1)
if 'KUBECONFIG' not in os.environ:
error("KUBECONFIG env variable needs to be set")
sys.exit(1)
elif not os.path.isabs(os.environ['KUBECONFIG']):
os.environ['KUBECONFIG'] = "%s/%s" % (os.getcwd(), os.environ['KUBECONFIG'])
paramfile = get_cluster_parameter_file(paramfile)
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
available_apps = baseconfig.list_apps_generic(quiet=True)
for app in apps:
if app not in available_apps:
error(f"app {app} not available. Skipping...")
continue
pprint(f"Deleting app {app}")
overrides[f'{app}_version'] = overrides[f'{app}_version'] if f'{app}_version' in overrides else 'latest'
baseconfig.delete_app_generic(app, overrides)
def delete_app_openshift(args):
apps = args.apps
paramfile = choose_parameter_file(args.paramfile)
if find_executable('oc') is None:
error("You need oc to install apps")
sys.exit(1)
if 'KUBECONFIG' not in os.environ:
error("KUBECONFIG env variable needs to be set")
sys.exit(1)
elif not os.path.isabs(os.environ['KUBECONFIG']):
os.environ['KUBECONFIG'] = "%s/%s" % (os.getcwd(), os.environ['KUBECONFIG'])
paramfile = get_cluster_parameter_file(paramfile)
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
for app in apps:
if app in LOCAL_OPENSHIFT_APPS:
name = app
else:
name, source, channel, csv, description, namespace, channels, crd = common.olm_app(app)
if name is None:
error(f"Couldn't find any app matching {app}. Skipping...")
continue
app_data = {'name': name, 'source': source, 'channel': channel, 'namespace': namespace, 'crd': crd}
app_data.update(overrides)
pprint(f"Deleting app {name}")
baseconfig.delete_app_openshift(app, app_data)
def list_apps_generic(args):
"""List generic kube apps"""
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
apps = PrettyTable(["Name"])
for app in baseconfig.list_apps_generic(quiet=True):
apps.add_row([app])
print(apps)
def list_apps_openshift(args):
"""List openshift kube apps"""
if find_executable('oc') is None:
error("You need oc to list apps")
sys.exit(1)
if 'KUBECONFIG' not in os.environ:
error("KUBECONFIG env variable needs to be set")
sys.exit(1)
elif not os.path.isabs(os.environ['KUBECONFIG']):
os.environ['KUBECONFIG'] = "%s/%s" % (os.getcwd(), os.environ['KUBECONFIG'])
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
apps = PrettyTable(["Name"])
for app in baseconfig.list_apps_openshift(quiet=True, installed=args.installed):
apps.add_row([app])
print(apps)
def list_kube(args):
"""List kube"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if config.extraclients:
kubestable = PrettyTable(["Cluster", "Type", "Plan", "Host", "Vms"])
allclients = config.extraclients.copy()
allclients.update({config.client: config.k})
for cli in sorted(allclients):
currentconfig = Kconfig(client=cli, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
kubes = currentconfig.list_kubes()
for kubename in kubes:
kube = kubes[kubename]
kubetype = kube['type']
kubeplan = kube['plan']
kubevms = kube['vms']
kubestable.add_row([kubename, kubetype, kubeplan, cli, kubevms])
else:
kubestable = PrettyTable(["Cluster", "Type", "Plan", "Vms"])
kubes = config.list_kubes()
for kubename in kubes:
kube = kubes[kubename]
kubetype = kube['type']
kubevms = kube['vms']
kubeplan = kube['plan']
kubestable.add_row([kubename, kubetype, kubeplan, kubevms])
print(kubestable)
return
def list_pool(args):
"""List pools"""
short = args.short
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pools = k.list_pools()
if short:
poolstable = PrettyTable(["Pool"])
for pool in sorted(pools):
poolstable.add_row([pool])
else:
poolstable = PrettyTable(["Pool", "Path"])
for pool in sorted(pools):
poolpath = k.get_pool_path(pool)
poolstable.add_row([pool, poolpath])
poolstable.align["Pool"] = "l"
print(poolstable)
return
def list_product(args):
"""List products"""
group = args.group
repo = args.repo
search = args.search
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
if search is not None:
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
products = PrettyTable(["Repo", "Product", "Group", "Description", "Numvms", "Memory"])
products.align["Repo"] = "l"
productsinfo = baseconfig.list_products(repo=repo)
for prod in sorted(productsinfo, key=lambda x: (x['repo'], x['group'], x['name'])):
name = prod['name']
repo = prod['repo']
prodgroup = prod['group']
description = prod.get('description', 'N/A')
if search.lower() not in name.lower() and search.lower() not in description.lower():
continue
if group is not None and prodgroup != group:
continue
numvms = prod.get('numvms', 'N/A')
memory = prod.get('memory', 'N/A')
group = prod.get('group', 'N/A')
products.add_row([repo, name, group, description, numvms, memory])
else:
products = PrettyTable(["Repo", "Product", "Group", "Description", "Numvms", "Memory"])
products.align["Repo"] = "l"
productsinfo = baseconfig.list_products(group=group, repo=repo)
for product in sorted(productsinfo, key=lambda x: (x['repo'], x['group'], x['name'])):
name = product['name']
repo = product['repo']
description = product.get('description', 'N/A')
numvms = product.get('numvms', 'N/A')
memory = product.get('memory', 'N/A')
group = product.get('group', 'N/A')
products.add_row([repo, name, group, description, numvms, memory])
print(products)
return
def list_repo(args):
"""List repos"""
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
repos = PrettyTable(["Repo", "Url"])
repos.align["Repo"] = "l"
reposinfo = baseconfig.list_repos()
for repo in sorted(reposinfo):
url = reposinfo[repo]
repos.add_row([repo, url])
print(repos)
return
def list_vmdisk(args):
"""List vm disks"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pprint("Listing disks...")
diskstable = PrettyTable(["Name", "Pool", "Path"])
diskstable.align["Name"] = "l"
disks = k.list_disks()
for disk in sorted(disks):
path = disks[disk]['path']
pool = disks[disk]['pool']
diskstable.add_row([disk, pool, path])
print(diskstable)
return
def create_openshift_iso(args):
cluster = args.cluster
ignitionfile = args.ignitionfile
direct = args.direct
overrides = common.get_overrides(paramfile=args.paramfile, param=args.param)
client = 'fake' if common.need_fake() else args.client
config = Kconfig(client=client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.create_openshift_iso(cluster, overrides=overrides, ignitionfile=ignitionfile, direct=direct)
def create_openshift_disconnecter(args):
plan = args.plan
if plan is None:
plan = nameutils.get_random_name()
pprint(f"Using {plan} as name of the plan")
overrides = common.get_overrides(paramfile=args.paramfile, param=args.param)
if 'cluster' not in overrides:
overrides['cluster'] = plan
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.create_openshift_disconnecter(plan, overrides=overrides)
def create_vm(args):
"""Create vms"""
name = args.name
onlyassets = True if 'assets' in vars(args) else False
image = args.image
profile = args.profile
count = args.count
profilefile = args.profilefile
overrides = common.get_overrides(paramfile=args.paramfile, param=args.param)
console = args.console
serial = args.serial
if args.wait:
overrides['wait'] = args.wait
if overrides.get('wait', False) and 'keys' not in overrides and common.get_ssh_pub_key() is None:
error("No usable public key found, which is mandatory when using wait")
sys.exit(1)
customprofile = {}
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
for key in overrides:
if key in vars(config) and vars(config)[key] is not None and type(overrides[key]) != type(vars(config)[key]):
key_type = str(type(vars(config)[key]))
error(f"The provided parameter {key} has a wrong type, it should be {key_type}")
sys.exit(1)
if 'name' in overrides:
name = overrides['name']
if name is None:
name = nameutils.get_random_name()
if config.type in ['gcp', 'kubevirt']:
name = name.replace('_', '-')
if config.type != 'aws' and not onlyassets:
pprint(f"Using {name} as name of the vm")
if image is not None:
if image in config.profiles and not onlyassets:
pprint(f"Using {image} as profile")
profile = image
elif profile is not None:
if profile.endswith('.yml'):
profilefile = profile
profile = None
if not os.path.exists(profilefile):
error(f"Missing profile file {profilefile}")
sys.exit(1)
else:
with open(profilefile, 'r') as entries:
entries = yaml.safe_load(entries)
entrieskeys = list(entries.keys())
if len(entrieskeys) == 1:
profile = entrieskeys[0]
customprofile = entries[profile]
pprint(f"Using data from {profilefile} as profile")
else:
error(f"Cant' parse {profilefile} as profile file")
sys.exit(1)
elif overrides or onlyassets:
profile = 'kvirt'
config.profiles[profile] = {}
else:
error("You need to either provide a profile, an image or some parameters")
sys.exit(1)
if count == 1:
result = config.create_vm(name, profile, overrides=overrides, customprofile=customprofile,
onlyassets=onlyassets)
if not onlyassets:
if console:
config.k.console(name=name, tunnel=config.tunnel)
elif serial:
config.k.serialconsole(name)
else:
code = common.handle_response(result, name, element='', action='created', client=config.client)
return code
elif 'reason' in result:
error(result['reason'])
else:
print(result['data'])
else:
codes = []
if 'plan' not in overrides:
overrides['plan'] = name
for number in range(count):
currentname = "%s-%d" % (name, number)
currentoverrides = deepcopy(overrides)
if 'nets' in currentoverrides:
for index, net in enumerate(currentoverrides['nets']):
if not isinstance(net, dict):
continue
if 'mac' in net:
suffix = hex(int(net['mac'][-2:]) + number)[2:].rjust(2, '0')
currentoverrides['nets'][index]['mac'] = f"{net['mac'][:-2]}{suffix}"
if 'ip' in net:
ip = str(ip_address(net['ip']) + number)
currentoverrides['nets'][index]['ip'] = ip
if 'uuid' in currentoverrides:
uuid = overrides['uuid']
currentoverrides['uuid'] = '-'.join(uuid.split('-')[:-1] + [str(int(uuid.split('-')[-1]) + number)])
result = config.create_vm(currentname, profile, overrides=currentoverrides, customprofile=customprofile,
onlyassets=onlyassets)
if not onlyassets:
codes.append(common.handle_response(result, currentname, element='', action='created',
client=config.client))
return max(codes)
def clone_vm(args):
"""Clone existing vm"""
name = args.name
base = args.base
full = args.full
start = args.start
pprint(f"Cloning vm {name} from vm {base}...")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
result = k.clone(base, name, full=full, start=start)
if result['result'] == 'success' and os.access(os.path.expanduser('~/.kcli'), os.W_OK):
common.set_lastvm(name, config.client)
def update_vm(args):
"""Update ip, memory or numcpus"""
overrides = common.get_overrides(paramfile=args.paramfile, param=args.param)
ip = overrides.get('ip')
flavor = overrides.get('flavor')
numcpus = overrides.get('numcpus')
memory = overrides.get('memory')
plan = overrides.get('plan')
autostart = overrides.get('autostart')
dns = overrides.get('dns')
host = overrides.get('host')
domain = overrides.get('domain')
cloudinit = overrides.get('cloudinit')
image = overrides.get('image')
nets = overrides.get('nets')
disks = overrides.get('disks')
information = overrides.get('information')
iso = overrides.get('iso')
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
extra_metadata = {k: overrides[k] for k in overrides if k not in config.list_keywords()}
k = config.k
names = [common.get_lastvm(config.client)] if not args.names else args.names
for name in names:
if dns:
pprint(f"Creating Dns entry for {name}...")
networks = k.vm_ports(name)
if networks and domain is None:
domain = networks[0]
if not nets:
return
else:
k.reserve_dns(name=name, nets=networks, domain=domain, ip=ip)
if ip is not None:
pprint(f"Updating ip of vm {name} to {ip}...")
k.update_metadata(name, 'ip', ip)
if cloudinit:
pprint(f"Removing cloudinit information of vm {name}")
k.remove_cloudinit(name)
if plan is not None:
pprint(f"Updating plan of vm {name} to {plan}...")
k.update_metadata(name, 'plan', plan)
if image is not None:
pprint(f"Updating image of vm {name} to {image}...")
k.update_metadata(name, 'image', image)
if memory is not None:
pprint(f"Updating memory of vm {name} to {memory}...")
k.update_memory(name, memory)
if numcpus is not None:
pprint(f"Updating numcpus of vm {name} to {numcpus}...")
k.update_cpus(name, numcpus)
if autostart is not None:
pprint(f"Setting autostart to {autostart} for vm {name}...")
k.update_start(name, start=autostart)
if information:
pprint(f"Setting information for vm {name}...")
k.update_information(name, information)
if iso is not None:
pprint(f"Switching iso for vm {name} to {iso}...")
if iso == 'None':
iso = None
k.update_iso(name, iso)
if flavor is not None:
pprint(f"Updating flavor of vm {name} to {flavor}...")
k.update_flavor(name, flavor)
if host:
pprint(f"Creating Host entry for vm {name}...")
networks = k.vm_ports(name)
if networks:
if domain is None:
domain = networks[0]
k.reserve_host(name, networks, domain)
currentvm = k.info(name)
currentnets = currentvm.get('nets', [])
currentdisks = currentvm.get('disks', [])
if disks:
pprint(f"Updating disks of vm {name}")
for index, currentdisk in enumerate(currentdisks):
if index < len(disks):
disk = disks[index]
currentdisksize = currentdisk['size']
disksize = disk.get('size', 10) if isinstance(disk, dict) else int(disk)
if disksize > currentdisksize:
if currentvm.get('status') != 'down':
warning(f"Cant resize Disk {index} in {name} while VM is up")
break
pprint(f"Resizing Disk {index} in {name}")
diskpath = currentdisk['path']
k.resize_disk(diskpath, disksize)
if len(currentdisks) < len(disks):
pprint(f"Adding Disks to {name}")
for disk in disks[len(currentdisks):]:
if isinstance(disk, int):
size = disk
pool = config.pool
elif isinstance(disk, str) and disk.isdigit():
size = int(disk)
pool = config.pool
elif isinstance(disk, dict):
size = disk.get('size', config.disksize)
pool = disk.get('pool', config.pool)
else:
continue
k.add_disk(name=name, size=size, pool=pool)
if len(currentdisks) > len(disks):
pprint(f"Removing Disks of {name}")
for disk in currentdisks[len(currentdisks) - len(disks):]:
diskname = os.path.basename(disk['path'])
diskpool = os.path.dirname(disk['path'])
k.delete_disk(name=name, diskname=diskname, pool=diskpool)
if nets:
pprint(f"Updating nets of vm {name}")
if len(currentnets) < len(nets):
pprint(f"Adding Nics to {name}")
for net in nets[len(currentnets):]:
if isinstance(net, str):
network = net
elif isinstance(net, dict) and 'name' in net:
network = net['name']
else:
error(f"Skipping wrong nic spec for {name}")
continue
k.add_nic(name, network)
if len(currentnets) > len(nets):
pprint(f"Removing Nics of {name}")
for net in range(len(currentnets), len(nets), -1):
interface = "eth%s" % (net - 1)
k.delete_nic(name, interface)
if extra_metadata:
for key in extra_metadata:
k.update_metadata(name, key, extra_metadata[key])
if overrides.get('files', []):
newfiles = overrides['files']
pprint(f"Remediating files of {name}")
config.remediate_files(name, newfiles, overrides)
def create_vmdisk(args):
"""Add disk to vm"""
overrides = common.get_overrides(paramfile=args.paramfile, param=args.param)
name = args.name
novm = args.novm
size = args.size
image = args.image
interface = args.interface
if interface not in ['virtio', 'ide', 'scsi']:
error("Incorrect disk interface. Choose between virtio, scsi or ide...")
sys.exit(1)
pool = args.pool
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
if size is None:
error("Missing size. Leaving...")
sys.exit(1)
if pool is None:
error("Missing pool. Leaving...")
sys.exit(1)
if novm:
pprint(f"Creating disk {name}...")
else:
pprint(f"Adding disk to {name}...")
k.add_disk(name=name, size=size, pool=pool, image=image, interface=interface, novm=novm, overrides=overrides)
def delete_vmdisk(args):
"""Delete disk of vm"""
yes_top = args.yes_top
yes = args.yes
if not yes and not yes_top:
common.confirm("Are you sure?")
name = args.vm
disknames = args.disknames
novm = args.novm
pool = args.pool
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
for diskname in disknames:
pprint(f"Deleting disk {diskname}")
k.delete_disk(name=name, diskname=diskname, pool=pool, novm=novm)
return
def create_dns(args):
"""Create dns entries"""
names = args.names
net = args.net
domain = args.domain
ip = args.ip
alias = args.alias
if alias is None:
alias = []
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
name = names[0]
if len(names) > 1:
alias.extend(names[1:])
if alias:
pprint("Creating alias entries for %s" % ' '.join(alias))
k.reserve_dns(name=name, nets=[net], domain=domain, ip=ip, alias=alias, primary=True)
def delete_dns(args):
"""Delete dns entries"""
names = args.names
net = args.net
allentries = args.all
domain = args.domain if args.domain is not None else net
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
for name in names:
pprint(f"Deleting Dns entry for {name}")
k.delete_dns(name, domain, allentries=allentries)
def export_vm(args):
"""Export a vm"""
image = args.image
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
names = [common.get_lastvm(config.client)] if not args.names else args.names
k = config.k
codes = []
for name in names:
result = k.export(name=name, image=image)
if result['result'] == 'success':
success(f"Vm {name} exported")
codes.append(0)
else:
reason = result['reason']
error(f"Could not export vm {name} because {reason}")
codes.append(1)
sys.exit(1 if 1 in codes else 0)
def create_lb(args):
"""Create loadbalancer"""
checkpath = args.checkpath
checkport = args.checkport
ports = args.ports
domain = args.domain
internal = args.internal
if args.vms is None:
vms = []
else:
good_vms = args.vms[1:-1] if args.vms.startswith('[') and args.vms.endswith(']') else args.vms
vms = [v.strip() for v in good_vms.split(',')]
good_ports = args.ports[1:-1] if args.ports.startswith('[') and args.ports.endswith(']') else args.ports
ports = [p.strip() for p in good_ports.split(',')]
name = nameutils.get_random_name().replace('_', '-') if args.name is None else args.name
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.create_loadbalancer(name, ports=ports, checkpath=checkpath, vms=vms, domain=domain, checkport=checkport,
internal=internal)
return 0
def delete_lb(args):
"""Delete loadbalancer"""
yes = args.yes
yes_top = args.yes_top
if not yes and not yes_top:
common.confirm("Are you sure?")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.delete_loadbalancer(args.name)
return 0
def create_generic_kube(args):
"""Create Generic kube"""
paramfile = args.paramfile
force = args.force
cluster = args.cluster
if container_mode():
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
if force:
config.delete_kube(cluster, overrides=overrides)
config.create_kube_generic(cluster, overrides=overrides)
def create_kind_kube(args):
"""Create K3s kube"""
paramfile = args.paramfile
force = args.force
cluster = args.cluster if args.cluster is not None else 'testk'
if container_mode():
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
if force:
config.delete_kube(cluster, overrides=overrides)
config.create_kube_kind(cluster, overrides=overrides)
def create_k3s_kube(args):
"""Create K3s kube"""
paramfile = args.paramfile
force = args.force
cluster = args.cluster if args.cluster is not None else 'testk'
if container_mode():
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
if force:
config.delete_kube(cluster, overrides=overrides)
config.create_kube_k3s(cluster, overrides=overrides)
def create_hypershift_kube(args):
"""Create Hypershift kube"""
paramfile = args.paramfile
force = args.force
cluster = args.cluster if args.cluster is not None else 'testk'
if container_mode():
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
if force:
config.delete_kube(cluster, overrides=overrides)
config.create_kube_hypershift(cluster, overrides=overrides)
def create_openshift_kube(args):
"""Create Openshift kube"""
paramfile = args.paramfile
force = args.force
cluster = args.cluster
if container_mode():
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
client = 'fake' if common.need_fake() else args.client
config = Kconfig(client=client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
if args.subcommand_create_kube == 'okd':
overrides['upstream'] = True
if force:
config.delete_kube(cluster, overrides=overrides)
config.create_kube_openshift(cluster, overrides=overrides)
def delete_kube(args):
"""Delete kube"""
yes = args.yes
yes_top = args.yes_top
cluster = args.cluster if args.cluster is not None else 'testk'
if not yes and not yes_top:
common.confirm("Are you sure?")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
overrides = common.get_overrides(paramfile=args.paramfile, param=args.param)
config.delete_kube(cluster, overrides=overrides)
def scale_generic_kube(args):
"""Scale generic kube"""
workers = args.workers
paramfile = args.paramfile
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
cluster = overrides.get('cluster', args.cluster)
clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster)
if not os.path.exists(clusterdir):
error(f"Cluster directory {clusterdir} not found...")
sys.exit(1)
if container_mode():
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if workers > 0:
overrides['workers'] = workers
config.scale_kube_generic(cluster, overrides=overrides)
def scale_k3s_kube(args):
"""Scale k3s kube"""
workers = args.workers
paramfile = args.paramfile
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
cluster = overrides.get('cluster', args.cluster)
clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster)
if not os.path.exists(clusterdir):
error(f"Cluster directory {clusterdir} not found...")
sys.exit(1)
if container_mode():
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
if workers > 0:
overrides['workers'] = workers
config.scale_kube_k3s(cluster, overrides=overrides)
def scale_hypershift_kube(args):
"""Scale hypershift kube"""
workers = args.workers
paramfile = args.paramfile
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
cluster = overrides.get('cluster', args.cluster)
clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster)
if not os.path.exists(clusterdir):
error(f"Cluster directory {clusterdir} not found...")
sys.exit(1)
if container_mode():
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if workers > 0:
overrides['workers'] = workers
config.scale_kube_hypershift(cluster, overrides=overrides)
def scale_openshift_kube(args):
"""Scale openshift kube"""
workers = args.workers
paramfile = args.paramfile
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
cluster = overrides.get('cluster', args.cluster)
clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster)
if not os.path.exists(clusterdir):
error(f"Cluster directory {clusterdir} not found...")
sys.exit(1)
if container_mode():
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if workers > 0:
overrides['workers'] = workers
config.scale_kube_openshift(cluster, overrides=overrides)
def update_generic_kube(args):
args.type = 'generic'
update_kube(args)
def update_hypershift_kube(args):
args.type = 'hypershift'
update_kube(args)
def update_openshift_kube(args):
args.type = 'openshift'
update_kube(args)
def update_kind_kube(args):
args.type = 'kind'
update_kube(args)
def update_k3s_kube(args):
args.type = 'k3s'
update_kube(args)
def update_kube(args):
"""Update kube"""
cluster = args.cluster
_type = args.type
data = {'kube': cluster, 'kubetype': _type}
plan = None
paramfile = args.paramfile
if container_mode():
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
if not overrides:
warning("No parameters provided, using stored one")
if 'ipi' in overrides and overrides['ipi']:
error("Update cluster workflow not available when using ipi")
sys.exit(1)
clusterdir = os.path.expanduser("~/.kcli/clusters/%s" % cluster)
if not os.path.exists(clusterdir):
error("Cluster directory %s not found..." % clusterdir)
sys.exit(1)
if os.path.exists("%s/kcli_parameters.yml" % clusterdir):
with open("%s/kcli_parameters.yml" % clusterdir, 'r') as install:
installparam = yaml.safe_load(install)
data.update(installparam)
plan = installparam.get('plan', plan)
data.update(overrides)
data['basedir'] = '/workdir' if container_mode() else '.'
if plan is None:
plan = cluster
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.update_kube(plan, _type, overrides=data)
def create_vmnic(args):
"""Add nic to vm"""
name = args.name
network = args.network
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
if network is None:
error("Missing network. Leaving...")
sys.exit(1)
pprint(f"Adding nic to vm {name}...")
k.add_nic(name=name, network=network)
def delete_vmnic(args):
"""Delete nic of vm"""
name = args.name
interface = args.interface
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pprint(f"Deleting nic from vm {name}...")
k.delete_nic(name, interface)
return
def create_pool(args):
"""Create/Delete pool"""
pool = args.pool
pooltype = args.pooltype
path = args.path
thinpool = args.thinpool
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
if path is None:
error("Missing path. Leaving...")
sys.exit(1)
pprint(f"Creating pool {pool}...")
k.create_pool(name=pool, poolpath=path, pooltype=pooltype, thinpool=thinpool)
def delete_pool(args):
"""Delete pool"""
pool = args.pool
full = args.full
yes = args.yes
yes_top = args.yes_top
if not yes and not yes_top:
common.confirm("Are you sure?")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pprint(f"Deleting pool {pool}...")
result = k.delete_pool(name=pool, full=full)
common.handle_response(result, pool, element='Pool', action='deleted')
def create_plan(args):
"""Create plan"""
plan = args.plan
ansible = args.ansible
url = args.url
path = args.path
container = args.container
inputfile = args.inputfile
force = args.force
pre = not args.skippre
post = not args.skippost
paramfile = args.paramfile
threaded = args.threaded
if inputfile is None:
inputfile = 'kcli_plan.yml'
if container_mode():
inputfile = "/workdir/%s" % inputfile
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
_type = config.ini[config.client].get('type', 'kvm')
overrides.update({'type': _type})
if force:
if plan is None:
error("Force requires specifying a plan name")
return 1
else:
config.delete_plan(plan, unregister=config.rhnunregister)
if plan is None:
plan = nameutils.get_random_name()
pprint(f"Using {plan} as name of the plan")
result = config.plan(plan, ansible=ansible, url=url, path=path, container=container, inputfile=inputfile,
overrides=overrides, pre=pre, post=post, threaded=threaded)
if 'result' in result and result['result'] == 'success':
sys.exit(0)
else:
if 'reason' in result:
error(result['reason'])
sys.exit(1)
def create_playbook(args):
"""Create plan"""
inputfile = args.inputfile
store = args.store
paramfile = args.paramfile
if inputfile is None:
inputfile = 'kcli_plan.yml'
if container_mode():
inputfile = "/workdir/%s" % inputfile
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
_type = baseconfig.ini[baseconfig.client].get('type', 'kvm')
overrides.update({'type': _type})
baseconfig.create_playbook(inputfile, overrides=overrides, store=store)
return 0
def update_plan(args):
"""Update plan"""
autostart = args.autostart
noautostart = args.noautostart
plan = args.plan
url = args.url
path = args.path
container = args.container
inputfile = args.inputfile
paramfile = args.paramfile
if container_mode():
inputfile = "/workdir/%s" % inputfile if inputfile is not None else "/workdir/kcli_plan.yml"
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
if autostart:
config.autostart_plan(plan)
return 0
elif noautostart:
config.noautostart_plan(plan)
return 0
config.plan(plan, url=url, path=path, container=container, inputfile=inputfile, overrides=overrides, update=True)
return 0
def delete_plan(args):
"""Delete plan"""
plans = args.plans
codes = []
yes = args.yes
yes_top = args.yes_top
if not yes and not yes_top:
common.confirm("Are you sure?")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
for plan in plans:
result = config.delete_plan(plan, unregister=config.rhnunregister)
if 'result' in result and result['result'] == 'success':
codes.append(0)
else:
codes.append(4)
sys.exit(4 if 4 in codes else 0)
def expose_plan(args):
plan = args.plan
if plan is None:
plan = nameutils.get_random_name()
pprint(f"Using {plan} as name of the plan")
port = args.port
inputfile = args.inputfile
installermode = args.installermode
if inputfile is None:
inputfile = 'kcli_plan.yml'
if container_mode():
inputfile = "/workdir/%s" % inputfile
overrides = common.get_overrides(param=args.param)
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
extraconfigs = {}
for extraclient in config.extraclients:
extraconfigs[extraclient] = Kconfig(client=extraclient, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
config.expose_plan(plan, inputfile=inputfile, overrides=overrides, port=port, extraconfigs=extraconfigs,
installermode=installermode)
return 0
def start_plan(args):
"""Start plan"""
plan = args.plan
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.start_plan(plan)
return 0
def stop_plan(args):
"""Stop plan"""
plan = args.plan
soft = args.soft
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.stop_plan(plan, soft=soft)
return 0
def autostart_plan(args):
"""Autostart plan"""
plan = args.plan
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.autostart_plan(plan)
return 0
def noautostart_plan(args):
"""Noautostart plan"""
plan = args.plan
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.noautostart_plan(plan)
return 0
def restart_plan(args):
"""Restart plan"""
soft = args.soft
plan = args.plan
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.stop_plan(plan, soft=soft)
config.start_plan(plan)
return 0
def info_generic_app(args):
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
baseconfig.info_app_generic(args.app)
def info_openshift_disconnecter(args):
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
baseconfig.info_openshift_disconnecter()
def info_openshift_app(args):
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
baseconfig.info_app_openshift(args.app)
def info_plan(args):
"""Info plan """
doc = args.doc
quiet = args.quiet
url = args.url
path = args.path
inputfile = args.inputfile
if container_mode():
inputfile = "/workdir/%s" % inputfile if inputfile is not None else "/workdir/kcli_plan.yml"
if url is None:
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
baseconfig.info_plan(inputfile, quiet=quiet, doc=doc)
else:
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
config.plan('info', url=url, path=path, inputfile=inputfile, info=True, quiet=quiet, doc=doc)
return 0
def info_generic_kube(args):
"""Info Generic kube"""
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
baseconfig.info_kube_generic(quiet=True)
def info_kind_kube(args):
"""Info Kind kube"""
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
baseconfig.info_kube_kind(quiet=True)
def info_k3s_kube(args):
"""Info K3s kube"""
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
baseconfig.info_kube_k3s(quiet=True)
def info_hypershift_kube(args):
"""Info Hypershift kube"""
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
baseconfig.info_kube_hypershift(quiet=True)
def info_openshift_kube(args):
"""Info Openshift kube"""
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
baseconfig.info_kube_openshift(quiet=True)
def info_network(args):
"""Info network """
name = args.name
pprint(f"Providing information about network {name}...")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
networkinfo = config.k.info_network(name)
if networkinfo:
common.pretty_print(networkinfo)
def info_keyword(args):
"""Info keyword"""
keyword = args.keyword
pprint(f"Providing information about keyword {keyword}...")
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, offline=True)
default = baseconfig.default
keywords = baseconfig.list_keywords()
if keyword not in keywords:
error(f"Keyword {keyword} not found")
return 1
else:
print("Default value: %s" % default[keyword])
print("Current value: %s" % keywords[keyword])
def download_plan(args):
"""Download plan"""
plan = args.plan
url = args.url
if plan is None:
plan = nameutils.get_random_name()
pprint(f"Using {plan} as name of the plan")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.plan(plan, url=url, download=True)
return 0
def download_coreos_installer(args):
"""Download Coreos Installer"""
paramfile = args.paramfile
if container_mode():
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
common.get_coreos_installer(version=overrides.get('version', 'latest'), arch=overrides.get('arch'))
def download_kubectl(args):
"""Download Kubectl"""
paramfile = args.paramfile
if container_mode():
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
common.get_kubectl(version=overrides.get('version', 'latest'))
def download_helm(args):
"""Download Helm"""
paramfile = args.paramfile
if container_mode():
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
common.get_helm(version=overrides.get('version', 'latest'))
def download_oc(args):
"""Download Oc"""
paramfile = args.paramfile
if container_mode():
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
common.get_oc(version=overrides.get('version', 'latest'))
def download_openshift_installer(args):
"""Download Openshift Installer"""
paramfile = args.paramfile
if container_mode():
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
return baseconfig.download_openshift_installer(overrides)
def download_okd_installer(args):
"""Download Okd Installer"""
paramfile = args.paramfile
if container_mode():
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
overrides['upstream'] = True
return baseconfig.download_openshift_installer(overrides)
def download_tasty(args):
"""Download Tasty"""
paramfile = args.paramfile
if container_mode():
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
pprint("Using default parameter file kcli_parameters.yml")
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
common.get_tasty(version=overrides.get('version', 'latest'))
def create_pipeline_github(args):
"""Create Github Pipeline"""
plan = args.plan
inputfile = args.inputfile
kube = args.kube
script = args.script
paramfile = args.paramfile
if inputfile is None:
inputfile = 'kcli_plan.yml'
if container_mode():
inputfile = "/workdir/%s" % inputfile
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
overrides = common.get_overrides(param=args.param)
renderfile = baseconfig.create_github_pipeline(plan, inputfile, paramfile=paramfile, overrides=overrides,
kube=kube, script=script)
print(renderfile)
return 0
def create_pipeline_jenkins(args):
"""Create Jenkins Pipeline"""
plan = args.plan
inputfile = args.inputfile
kube = args.kube
paramfile = args.paramfile
if inputfile is None:
inputfile = 'kcli_plan.yml'
if container_mode():
inputfile = "/workdir/%s" % inputfile
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
if not kube and not os.path.exists(inputfile):
error(f"File {inputfile} not found")
return 0
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
renderfile = baseconfig.create_jenkins_pipeline(plan, inputfile, overrides=overrides, kube=kube)
print(renderfile)
return 0
def create_pipeline_tekton(args):
"""Create Tekton Pipeline"""
inputfile = args.inputfile
kube = args.kube
paramfile = args.paramfile
plan = args.plan
if inputfile is None:
inputfile = 'kcli_plan.yml'
if container_mode():
inputfile = "/workdir/%s" % inputfile
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
overrides = common.get_overrides(param=args.param)
renderfile = baseconfig.create_tekton_pipeline(plan, inputfile, paramfile=paramfile, overrides=overrides, kube=kube)
print(renderfile)
return 0
def render_file(args):
"""Render file"""
plan = None
inputfile = args.inputfile
paramfiles = args.paramfile if args.paramfile is not None else []
ignore = args.ignore
if container_mode():
inputfile = "/workdir/%s" % inputfile if inputfile is not None else "/workdir/kcli_plan.yml"
if paramfiles:
paramfiles = ["/workdir/%s" % paramfile for paramfile in paramfiles]
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfiles = ["/workdir/kcli_parameters.yml"]
elif not paramfiles and os.path.exists("kcli_parameters.yml"):
paramfiles = ["kcli_parameters.yml"]
overrides = {}
allparamfiles = [paramfile for paramfile in glob("*_default.y*ml")]
allparamfiles.extend(paramfiles)
for paramfile in allparamfiles:
overrides.update(common.get_overrides(paramfile=paramfile))
overrides.update(common.get_overrides(param=args.param))
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
default_data = {'config_%s' % k: baseconfig.default[k] for k in baseconfig.default}
client_data = {'config_%s' % k: baseconfig.ini[baseconfig.client][k] for k in baseconfig.ini[baseconfig.client]}
client_data['config_type'] = client_data.get('config_type', 'kvm')
client_data['config_host'] = client_data.get('config_host', '127.0.0.1')
default_user = getuser() if client_data['config_type'] == 'kvm'\
and client_data['config_host'] in ['localhost', '127.0.0.1'] else 'root'
client_data['config_user'] = client_data.get('config_user', default_user)
config_data = default_data.copy()
config_data.update(client_data)
overrides.update(config_data)
if not os.path.exists(inputfile):
error(f"File {inputfile} not found")
return 0
renderfile = baseconfig.process_inputfile(plan, inputfile, overrides=overrides, ignore=ignore)
print(renderfile)
return 0
def create_vmdata(args):
"""Create cloudinit/ignition data for vm"""
args.assets = True
args.profile = None
args.profilefile = None
args.wait = False
args.console = None
args.serial = None
args.count = 1
create_vm(args)
return 0
def create_plandata(args):
"""Create cloudinit/ignition data"""
plan = None
inputfile = args.inputfile
pre = not args.skippre
outputdir = args.outputdir
paramfile = args.paramfile
if container_mode():
inputfile = "/workdir/%s" % inputfile
if paramfile is not None:
paramfile = "/workdir/%s" % paramfile
elif os.path.exists("/workdir/kcli_parameters.yml"):
paramfile = "/workdir/kcli_parameters.yml"
elif paramfile is None and os.path.exists("kcli_parameters.yml"):
paramfile = "kcli_parameters.yml"
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
config_data = {'config_%s' % k: config.ini[config.client][k] for k in config.ini[config.client]}
config_data['config_type'] = config_data.get('config_type', 'kvm')
overrides.update(config_data)
if not os.path.exists(inputfile):
error(f"File {inputfile} not found")
return 0
results = config.plan(plan, inputfile=inputfile, overrides=overrides, onlyassets=True, pre=pre)
if results.get('assets'):
for num, asset in enumerate(results['assets']):
if outputdir is None:
print(asset)
else:
if not os.path.exists(outputdir):
os.mkdir(outputdir)
# if 'ignition' in asset:
# with open("%s/%s.ign" % (outputdir, "%0.2d" % num), 'w') as f:
# f.write(asset)
assetdata = yaml.safe_load(asset)
hostname = assetdata.get('hostname')
if hostname is None:
continue
pprint("Rendering %s" % hostname)
hostnamedir = "%s/%s" % (outputdir, hostname)
if not os.path.exists(hostnamedir):
os.mkdir(hostnamedir)
runcmd = assetdata.get('runcmd', [])
write_files = assetdata.get('write_files', [])
with open("%s/runcmd" % hostnamedir, 'w') as f:
f.write('\n'.join(runcmd))
for _file in write_files:
content = _file['content']
path = _file['path'].replace('/root/', '')
SSH_PRIV_LOCATIONS = [location.replace('.pub', '') for location in SSH_PUB_LOCATIONS]
if 'openshift_pull.json' in path or path in SSH_PRIV_LOCATIONS or path in SSH_PUB_LOCATIONS:
warning("Skipping %s" % path)
continue
if '/' in path and not os.path.exists("%s/%s" % (hostnamedir, os.path.dirname(path))):
os.makedirs("%s/%s" % (hostnamedir, os.path.dirname(path)))
with open("%s/%s/%s" % (hostnamedir, os.path.dirname(path), os.path.basename(path)), 'w') as f:
f.write(content)
else:
with open("%s/%s" % (hostnamedir, path), 'w') as f:
f.write(content)
if outputdir is not None:
renderplan = config.process_inputfile(plan, inputfile, overrides=overrides)
with open("%s/kcli_plan.yml" % outputdir, 'w') as f:
f.write(renderplan)
return 0
def create_plantemplate(args):
"""Create plan template"""
skipfiles = args.skipfiles
skipscripts = args.skipscripts
directory = args.directory
paramfile = args.paramfile
overrides = common.get_overrides(paramfile=paramfile, param=args.param)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
baseconfig.create_plan_template(directory, overrides=overrides, skipfiles=skipfiles, skipscripts=skipscripts)
def create_snapshot_plan(args):
"""Snapshot plan"""
plan = args.plan
snapshot = args.snapshot
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.snapshot_plan(plan, snapshotname=snapshot)
return 0
def delete_snapshot_plan(args):
"""Snapshot plan"""
plan = args.plan
snapshot = args.snapshot
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
for vm in sorted(k.list(), key=lambda x: x['name']):
name = vm['name']
if vm['plan'] == plan:
pprint(f"Deleting snapshot {snapshot} of vm {name}...")
k.snapshot(snapshot, name, delete=True)
return 0
def revert_snapshot_plan(args):
"""Revert snapshot of plan"""
plan = args.plan
snapshot = args.snapshot
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
config.revert_plan(plan, snapshotname=snapshot)
return 0
def create_repo(args):
"""Create repo"""
repo = args.repo
url = args.url
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
if repo is None:
error("Missing repo. Leaving...")
sys.exit(1)
if url is None:
error("Missing url. Leaving...")
sys.exit(1)
pprint(f"Adding repo {repo}...")
baseconfig.create_repo(repo, url)
return 0
def delete_repo(args):
"""Delete repo"""
repo = args.repo
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
if repo is None:
error("Missing repo. Leaving...")
sys.exit(1)
pprint(f"Deleting repo {repo}...")
baseconfig.delete_repo(repo)
return
def update_repo(args):
"""Update repo"""
repo = args.repo
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
if repo is None:
pprint("Updating all repos...")
repos = baseconfig.list_repos()
for repo in repos:
pprint(f"Updating repo {repo}...")
baseconfig.update_repo(repo)
else:
pprint(f"Updating repo {repo}...")
baseconfig.update_repo(repo)
return
def info_product(args):
"""Info product"""
repo = args.repo
product = args.product
group = args.group
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
pprint(f"Providing information on product {product}...")
baseconfig.info_product(product, repo, group)
def create_product(args):
"""Create product"""
repo = args.repo
product = args.product
latest = args.latest
group = args.group
overrides = common.get_overrides(paramfile=args.paramfile, param=args.param)
plan = overrides['plan'] if 'plan' in overrides else None
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
pprint(f"Creating product {product}...")
config.create_product(product, repo=repo, group=group, plan=plan, latest=latest, overrides=overrides)
return 0
def ssh_vm(args):
"""Ssh into vm"""
local = args.L
remote = args.R
D = args.D
X = args.X
Y = args.Y
identityfile = args.identityfile
user = args.user
vmport = args.port
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
name = [common.get_lastvm(baseconfig.client)] if not args.name else args.name
tunnel = baseconfig.tunnel
tunnelhost = baseconfig.tunnelhost
tunneluser = baseconfig.tunneluser
tunnelport = baseconfig.tunnelport
if tunnel and tunnelhost is None and baseconfig.type != 'kubevirt':
error("Tunnel requested but no tunnelhost defined")
sys.exit(1)
insecure = baseconfig.insecure
if len(name) > 1:
cmd = ' '.join(name[1:])
else:
cmd = None
name = name[0]
if '@' in name and len(name.split('@')) == 2:
user = name.split('@')[0]
name = name.split('@')[1]
if os.path.exists("/i_am_a_container") and not os.path.exists("/root/.kcli/config.yml")\
and not os.path.exists("/root/.ssh/config"):
insecure = True
sshcommand = None
if baseconfig.cache:
_list = cache_vms(baseconfig, args.region, args.zone, args.namespace)
vms = [vm for vm in _list if vm['name'] == name]
if vms:
vm = vms[0]
ip = vm.get('ip')
if ip is None:
error(f"No ip found in cache for {name}...")
else:
if user is None:
user = baseconfig.vmuser if baseconfig.vmuser is not None else vm.get('user')
if vmport is None:
vmport = baseconfig.vmport if baseconfig.vmport is not None else vm.get('vmport')
sshcommand = common.ssh(name, ip=ip, user=user, local=local, remote=remote, tunnel=tunnel,
tunnelhost=tunnelhost, tunnelport=tunnelport, tunneluser=tunneluser,
insecure=insecure, cmd=cmd, X=X, Y=Y, D=D, debug=args.debug, vmport=vmport,
identityfile=identityfile)
if sshcommand is None:
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
k = config.k
u, ip, vmport = common._ssh_credentials(k, name)
if tunnel and tunnelhost is None and config.type == 'kubevirt':
info = k.info(name, debug=False)
tunnelhost = k.node_host(name=info.get('host'))
if tunnelhost is None:
error(f"No valid node ip found for {name}")
if ip is None:
return
if user is None:
user = config.vmuser if config.vmuser is not None else u
if vmport is None and config.vmport is not None:
vmport = config.vmport
if config.type in ['kvm', 'packet'] and '.' not in ip and ':' not in ip:
vmport = ip
ip = config.host
sshcommand = common.ssh(name, ip=ip, user=user, local=local, remote=remote, tunnel=tunnel,
tunnelhost=tunnelhost, tunnelport=tunnelport, tunneluser=tunneluser,
insecure=insecure, cmd=cmd, X=X, Y=Y, D=D, debug=args.debug, vmport=vmport,
identityfile=identityfile)
if sshcommand is not None:
if find_executable('ssh') is not None:
os.system(sshcommand)
else:
print(sshcommand)
else:
error(f"Couldnt ssh to {name}")
def scp_vm(args):
"""Scp into vm"""
identityfile = args.identityfile
recursive = args.recursive
source = args.source[0]
source = "/workdir/%s" % source if container_mode() else source
destination = args.destination[0]
user = args.user
vmport = args.port
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
tunnel = baseconfig.tunnel
tunnelhost = baseconfig.tunnelhost
tunneluser = baseconfig.tunneluser
tunnelport = baseconfig.tunnelport
if tunnel and tunnelhost is None:
error("Tunnel requested but no tunnelhost defined")
sys.exit(1)
insecure = baseconfig.insecure
if len(source.split(':')) == 2:
name, source = source.split(':')
download = True
elif len(destination.split(':')) == 2:
name, destination = destination.split(':')
download = False
else:
error("Couldn't run scp")
return
if '@' in name and len(name.split('@')) == 2:
user, name = name.split('@')
if download:
pprint(f"Retrieving file {source} from {name}")
else:
pprint(f"Copying file {source} to {name}")
scpcommand = None
if baseconfig.cache:
_list = cache_vms(baseconfig, args.region, args.zone, args.namespace)
vms = [vm for vm in _list if vm['name'] == name]
if vms:
vm = vms[0]
ip = vm.get('ip')
if ip is None:
error(f"No ip found in cache for {name}...")
else:
if user is None:
user = baseconfig.vmuser if baseconfig.vmuser is not None else vm.get('user')
if vmport is None:
vmport = baseconfig.vmport if baseconfig.vmport is not None else vm.get('vmport')
scpcommand = common.scp(name, ip=ip, user=user, source=source, destination=destination,
recursive=recursive, tunnel=tunnel, tunnelhost=tunnelhost,
tunnelport=tunnelport, tunneluser=tunneluser, debug=args.debug,
download=download, vmport=vmport, insecure=insecure, identityfile=identityfile)
if scpcommand is None:
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
k = config.k
u, ip, vmport = common._ssh_credentials(k, name)
if ip is None:
return
if user is None:
user = config.vmuser if config.vmuser is not None else u
if vmport is None and config.vmport is not None:
vmport = config.vmport
if config.type in ['kvm', 'packet'] and '.' not in ip and ':' not in ip:
vmport = ip
ip = '127.0.0.1'
scpcommand = common.scp(name, ip=ip, user=user, source=source, destination=destination, recursive=recursive,
tunnel=tunnel, tunnelhost=tunnelhost, tunnelport=tunnelport, tunneluser=tunneluser,
debug=config.debug, download=download, vmport=vmport, insecure=insecure,
identityfile=identityfile)
if scpcommand is not None:
if find_executable('scp') is not None:
os.system(scpcommand)
else:
print(scpcommand)
else:
error("Couldn't run scp")
def create_network(args):
"""Create Network"""
name = args.name
overrides = common.get_overrides(paramfile=args.paramfile, param=args.param)
isolated = args.isolated
cidr = args.cidr
nodhcp = args.nodhcp
domain = args.domain
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
if name is None:
error("Missing Network")
sys.exit(1)
if isolated:
nat = False
else:
nat = True
dhcp = not nodhcp
if args.dual is not None:
overrides['dual_cidr'] = args.dual
result = k.create_network(name=name, cidr=cidr, dhcp=dhcp, nat=nat, domain=domain, overrides=overrides)
common.handle_response(result, name, element='Network')
def delete_network(args):
"""Delete Network"""
yes = args.yes
yes_top = args.yes_top
names = args.names
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
if not yes and not yes_top:
common.confirm("Are you sure?")
for name in names:
result = k.delete_network(name=name)
common.handle_response(result, name, element='Network', action='deleted')
def create_host_group(args):
"""Generate Host group"""
data = {}
data['_type'] = 'group'
data['name'] = args.name
data['algorithm'] = args.algorithm
data['members'] = args.members
common.create_host(data)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if len(baseconfig.clients) == 1:
baseconfig.set_defaults()
def create_host_kvm(args):
"""Generate Kvm Host"""
data = {}
data['_type'] = 'kvm'
data['name'] = args.name
data['host'] = args.host
data['port'] = args.port
data['user'] = args.user
data['protocol'] = args.protocol
data['url'] = args.url
data['pool'] = args.pool
common.create_host(data)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if len(baseconfig.clients) == 1:
baseconfig.set_defaults()
def create_host_ovirt(args):
"""Create Ovirt Host"""
data = {}
data['name'] = args.name
data['_type'] = 'ovirt'
data['host'] = args.host
data['datacenter'] = args.datacenter
data['ca_file'] = args.ca
data['cluster'] = args.cluster
data['org'] = args.org
data['user'] = args.user
data['password'] = args.password
if args.pool is not None:
data['pool'] = args.pool
data['client'] = args.client
common.create_host(data)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if len(baseconfig.clients) == 1:
baseconfig.set_defaults()
def create_host_gcp(args):
"""Create Gcp Host"""
data = {}
data['name'] = args.name
data['credentials'] = args.credentials
data['project'] = args.project
data['zone'] = args.zone
data['_type'] = 'gcp'
common.create_host(data)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if len(baseconfig.clients) == 1:
baseconfig.set_defaults()
def create_host_aws(args):
"""Create Aws Host"""
data = {}
data['name'] = args.name
data['_type'] = 'aws'
data['access_key_id'] = args.access_key_id
data['access_key_secret'] = args.access_key_secret
data['region'] = args.region
data['keypair'] = args.keypair
common.create_host(data)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if len(baseconfig.clients) == 1:
baseconfig.set_defaults()
def create_host_ibm(args):
""""Create IBM Cloud host"""
data = {}
data['name'] = args.name
data['_type'] = 'ibm'
data['iam_api_key'] = args.iam_api_key
data['region'] = args.region
data['vpc'] = args.vpc
data['zone'] = args.zone
data['access_key_id'] = args.access_key_id
data['secret_access_key'] = args.access_key_secret
common.create_host(data)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if len(baseconfig.clients) == 1:
baseconfig.set_defaults()
def create_host_openstack(args):
"""Create Openstack Host"""
data = {}
data['name'] = args.name
data['_type'] = 'openstack'
data['user'] = args.user
data['password'] = args.password
data['project'] = args.project
data['domain'] = args.domain
data['auth_url'] = args.auth_url
common.create_host(data)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if len(baseconfig.clients) == 1:
baseconfig.set_defaults()
def create_host_kubevirt(args):
"""Create Kubevirt Host"""
data = {}
data['name'] = args.name
data['_type'] = 'kubevirt'
if args.pool is not None:
data['pool'] = args.pool
if args.token is not None:
data['token'] = args.token
if args.ca is not None:
data['ca_file'] = args.ca
data['multus'] = args.multus
data['cdi'] = args.cdi
if args.host is not None:
data['host'] = args.host
if args.port is not None:
data['port'] = args.port
common.create_host(data)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if len(baseconfig.clients) == 1:
baseconfig.set_defaults()
def create_host_vsphere(args):
"""Create Vsphere Host"""
data = {}
data['name'] = args.name
data['_type'] = 'vsphere'
data['host'] = args.host
data['user'] = args.user
data['password'] = args.password
data['datacenter'] = args.datacenter
data['cluster'] = args.cluster
if args.pool is not None:
data['pool'] = args.pool
common.create_host(data)
baseconfig = Kbaseconfig(client=args.client, debug=args.debug, quiet=True)
if len(baseconfig.clients) == 1:
baseconfig.set_defaults()
def create_container(args):
"""Create container"""
name = args.name
image = args.image
profile = args.profile
overrides = common.get_overrides(paramfile=args.paramfile, param=args.param)
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
cont = Kcontainerconfig(config, client=args.containerclient).cont
containerprofiles = {k: v for k, v in config.profiles.items() if 'type' in v and v['type'] == 'container'}
if name is None:
name = nameutils.get_random_name()
if config.type == 'kubevirt':
name = name.replace('_', '-')
if image is not None:
profile = image
if image in containerprofiles:
pprint(f"Using {image} as a profile")
else:
containerprofiles[image] = {'image': image}
pprint(f"Deploying container {name} from profile {profile}...")
profile = containerprofiles[profile]
image = next((e for e in [profile.get('image'), profile.get('image')] if e is not None), None)
if image is None:
error(f"Missing image in profile {profile}. Leaving...")
sys.exit(1)
cmd = profile.get('cmd')
ports = profile.get('ports')
environment = profile.get('environment')
volumes = next((e for e in [profile.get('volumes'), profile.get('disks')] if e is not None), None)
profile.update(overrides)
params = {'name': name, 'image': image, 'ports': ports, 'volumes': volumes, 'environment': environment,
'overrides': overrides}
if cmd is not None:
params['cmds'] = [cmd]
cont.create_container(**params)
success(f"container {name} created")
return
def snapshotcreate_vm(args):
"""Create snapshot"""
snapshot = args.snapshot
name = args.name
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pprint(f"Creating snapshot of {name} named {snapshot}...")
result = k.snapshot(snapshot, name)
code = common.handle_response(result, name, element='', action='snapshotted')
return code
def snapshotdelete_vm(args):
"""Delete snapshot"""
snapshot = args.snapshot
name = args.name
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pprint(f"Deleting snapshot {snapshot} of vm {name}...")
result = k.snapshot(snapshot, name, delete=True)
code = common.handle_response(result, name, element='', action='snapshot deleted')
return code
def snapshotrevert_vm(args):
"""Revert snapshot of vm"""
snapshot = args.snapshot
name = args.name
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pprint(f"Reverting snapshot {snapshot} of vm {name}...")
result = k.snapshot(snapshot, name, revert=True)
code = common.handle_response(result, name, element='', action='snapshot reverted')
return code
def snapshotlist_vm(args):
"""List snapshots of vm"""
name = args.name
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pprint(f"Listing snapshots of {name}...")
snapshots = k.snapshot('', name, listing=True)
if isinstance(snapshots, dict):
error(f"Vm {name} not found")
return
else:
for snapshot in snapshots:
print(snapshot)
return
def create_bucket(args):
"""Create bucket"""
buckets = args.buckets
public = args.public
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
for bucket in buckets:
pprint(f"Creating bucket {bucket}...")
k.create_bucket(bucket, public=public)
def delete_bucket(args):
"""Delete bucket"""
yes_top = args.yes_top
yes = args.yes
if not yes and not yes_top:
common.confirm("Are you sure?")
buckets = args.buckets
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
for bucket in buckets:
pprint(f"Deleting bucket {bucket}...")
k.delete_bucket(bucket)
def list_bucket(args):
"""List buckets"""
pprint("Listing buckets...")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
buckets = k.list_buckets()
bucketstable = PrettyTable(["Bucket"])
for bucket in sorted(buckets):
bucketstable.add_row([bucket])
bucketstable.align["Bucket"] = "l"
print(bucketstable)
def list_bucketfiles(args):
"""List bucket files"""
bucket = args.bucket
pprint(f"Listing bucket files of bucket {bucket}...")
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
bucketfiles = k.list_bucketfiles(bucket)
bucketfilestable = PrettyTable(["BucketFiles"])
for bucketfile in sorted(bucketfiles):
bucketfilestable.add_row([bucketfile])
bucketfilestable.align["BucketFiles"] = "l"
print(bucketfilestable)
def create_bucketfile(args):
bucket = args.bucket
temp_url = args.temp
public = args.public
path = args.path
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pprint(f"Uploading file {path} to bucket {bucket}...")
result = k.upload_to_bucket(bucket, path, temp_url=temp_url, public=public)
if result is not None:
pprint(f"bucketfile available at the following url:\n\n{result}")
def delete_bucketfile(args):
bucket = args.bucket
path = args.path
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pprint(f"Deleting file {path} to bucket {bucket}...")
k.delete_from_bucket(bucket, path)
def download_bucketfile(args):
bucket = args.bucket
path = args.path
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
pprint(f"Downloading file {path} from bucket {bucket}...")
k.download_from_bucket(bucket, path)
def report_host(args):
"""Report info about host"""
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)
k = config.k
k.report()
def switch_host(args):
"""Handle host"""
host = args.name
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
result = baseconfig.switch_host(host)
if result['result'] == 'success':
sys.exit(0)
else:
sys.exit(1)
def list_keyword(args):
"""List keywords"""
baseconfig = Kbaseconfig(client=args.client, debug=args.debug)
default = baseconfig.default
keywordstable = PrettyTable(["Keyword", "Default Value", "Current Value"])
keywordstable.align["Client"] = "l"
keywords = baseconfig.list_keywords()
for keyword in sorted(keywords):
value = keywords[keyword]
default_value = default[keyword]
keywordstable.add_row([keyword, default_value, value])
print(keywordstable)
return
def create_workflow(args):
"""Create workflow"""
workflow = args.workflow
if workflow is None:
workflow = nameutils.get_random_name()
pprint(f"Using {workflow} as name of the workflow")
overrides = common.get_overrides(paramfile=args.paramfile, param=args.param)
config = None
if 'target' in overrides:
user = None
vmport = None
target = overrides['target']
if '@' in target:
user, hostname = target.split('@')
else:
hostname = target
if '.' not in hostname and ':' not in hostname:
config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone,
namespace=args.namespace)
vmuser, vmip, vmport = _ssh_credentials(config.k, hostname)
if vmip is not None:
overrides['target'] = {'user': user or vmuser, 'port': vmport, 'ip': vmip, 'hostname': hostname}
if config is None:
config = Kbaseconfig(client=args.client, debug=args.debug)
result = config.create_workflow(workflow, overrides)
sys.exit(0 if result['result'] == 'success' else 1)
def cli():
"""
"""
PARAMETERS_HELP = 'specify parameter or keyword for rendering (multiple can be specified)'
parser = argparse.ArgumentParser(description='Libvirt/Ovirt/Vsphere/Gcp/Aws/Openstack/Kubevirt Wrapper/Ibm Cloud')
parser.add_argument('-C', '--client')
parser.add_argument('--containerclient', help='Containerclient to use')
parser.add_argument('--dnsclient', help='Dnsclient to use')
parser.add_argument('-d', '--debug', action='store_true')
parser.add_argument('-n', '--namespace', help='Namespace to use. specific to kubevirt')
parser.add_argument('-r', '--region', help='Region to use. specific to aws/gcp/ibm')
parser.add_argument('-z', '--zone', help='Zone to use. specific to gcp/ibm')
subparsers = parser.add_subparsers(metavar='', title='Available Commands')
containerconsole_desc = 'Attach To Container'
containerconsole_parser = subparsers.add_parser('attach', description=containerconsole_desc,
help=containerconsole_desc)
containerconsole_parser.add_argument('name', metavar='CONTAINERNAME', nargs='?')
containerconsole_parser.set_defaults(func=console_container)
changelog_desc = 'Changelog'
changelog_epilog = "examples:\n%s" % changelog
changelog_parser = argparse.ArgumentParser(add_help=False)
changelog_parser.add_argument('diff', metavar='DIFF', nargs=argparse.REMAINDER)
changelog_parser.set_defaults(func=get_changelog)
subparsers.add_parser('changelog', parents=[changelog_parser], description=changelog_desc, help=changelog_desc,
epilog=changelog_epilog, formatter_class=rawhelp)
create_desc = 'Create Object'
create_parser = subparsers.add_parser('create', description=create_desc, help=create_desc, aliases=['add', 'run'])
create_subparsers = create_parser.add_subparsers(metavar='', dest='subcommand_create')
vmclone_desc = 'Clone Vm'
vmclone_epilog = None
vmclone_parser = subparsers.add_parser('clone', description=vmclone_desc, help=vmclone_desc, epilog=vmclone_epilog,
formatter_class=rawhelp)
vmclone_parser.add_argument('-b', '--base', help='Base VM', metavar='BASE')
vmclone_parser.add_argument('-f', '--full', action='store_true', help='Full Clone')
vmclone_parser.add_argument('-s', '--start', action='store_true', help='Start cloned VM')
vmclone_parser.add_argument('name', metavar='VMNAME')
vmclone_parser.set_defaults(func=clone_vm)
vmconsole_desc = 'Vm Console (vnc/spice/serial)'
vmconsole_epilog = "examples:\n%s" % vmconsole
vmconsole_parser = argparse.ArgumentParser(add_help=False)
vmconsole_parser.add_argument('-s', '--serial', action='store_true')
vmconsole_parser.add_argument('name', metavar='VMNAME', nargs='?')
vmconsole_parser.set_defaults(func=console_vm)
subparsers.add_parser('console', parents=[vmconsole_parser], description=vmconsole_desc, help=vmconsole_desc,
epilog=vmconsole_epilog, formatter_class=rawhelp)
delete_desc = 'Delete Object'
delete_parser = subparsers.add_parser('delete', description=delete_desc, help=delete_desc, aliases=['remove'])
delete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation', dest="yes_top")
delete_subparsers = delete_parser.add_subparsers(metavar='', dest='subcommand_delete')
disable_desc = 'Disable Host'
disable_parser = subparsers.add_parser('disable', description=disable_desc, help=disable_desc)
disable_subparsers = disable_parser.add_subparsers(metavar='', dest='subcommand_disable')
download_desc = 'Download Assets like Image, plans or binaries'
download_parser = subparsers.add_parser('download', description=download_desc, help=download_desc)
download_subparsers = download_parser.add_subparsers(metavar='', dest='subcommand_download')
enable_desc = 'Enable Host'
enable_parser = subparsers.add_parser('enable', description=enable_desc, help=enable_desc)
enable_subparsers = enable_parser.add_subparsers(metavar='', dest='subcommand_enable')
vmexport_desc = 'Export Vm'
vmexport_epilog = "examples:\n%s" % vmexport
vmexport_parser = subparsers.add_parser('export', description=vmexport_desc, help=vmexport_desc,
epilog=vmexport_epilog,
formatter_class=rawhelp)
vmexport_parser.add_argument('-i', '--image', help='Name for the generated image. Uses the vm name otherwise',
metavar='IMAGE')
vmexport_parser.add_argument('names', metavar='VMNAMES', nargs='*')
vmexport_parser.set_defaults(func=export_vm)
expose_desc = 'Expose Object'
expose_parser = subparsers.add_parser('expose', description=expose_desc, help=expose_desc)
expose_subparsers = expose_parser.add_subparsers(metavar='', dest='subcommand_expose')
hostlist_desc = 'List Hosts'
info_desc = 'Info Host/Kube/Plan/Vm'
info_parser = subparsers.add_parser('info', description=info_desc, help=info_desc, aliases=['show'])
info_subparsers = info_parser.add_subparsers(metavar='', dest='subcommand_info')
list_desc = 'List Object'
list_epilog = "examples:\n%s" % _list
list_parser = subparsers.add_parser('list', description=list_desc, help=list_desc, aliases=['get'],
epilog=list_epilog,
formatter_class=rawhelp)
list_subparsers = list_parser.add_subparsers(metavar='', dest='subcommand_list')
render_desc = 'Render file'
render_parser = subparsers.add_parser('render', description=render_desc, help=render_desc)
render_parser.add_argument('-f', '--inputfile', help='Input Plan/File', default='kcli_plan.yml')
render_parser.add_argument('-i', '--ignore', action='store_true', help='Ignore missing variables')
render_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
render_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE', action='append')
render_parser.set_defaults(func=render_file)
restart_desc = 'Restart Vm/Plan/Container'
restart_parser = subparsers.add_parser('restart', description=restart_desc, help=restart_desc)
restart_subparsers = restart_parser.add_subparsers(metavar='', dest='subcommand_restart')
revert_desc = 'Revert Vm/Plan Snapshot'
revert_parser = subparsers.add_parser('revert', description=revert_desc, help=revert_desc)
revert_subparsers = revert_parser.add_subparsers(metavar='', dest='subcommand_revert')
scale_desc = 'Scale Kube'
scale_parser = subparsers.add_parser('scale', description=scale_desc, help=scale_desc)
scale_subparsers = scale_parser.add_subparsers(metavar='', dest='subcommand_scale')
vmscp_desc = 'Scp Into Vm'
vmscp_epilog = None
vmscp_parser = argparse.ArgumentParser(add_help=False)
vmscp_parser.add_argument('-i', '--identityfile', help='Identity file')
vmscp_parser.add_argument('-r', '--recursive', help='Recursive', action='store_true')
vmscp_parser.add_argument('-u', '-l', '--user', help='User for ssh')
vmscp_parser.add_argument('-p', '-P', '--port', help='Port for ssh')
vmscp_parser.add_argument('source', nargs=1)
vmscp_parser.add_argument('destination', nargs=1)
vmscp_parser.set_defaults(func=scp_vm)
subparsers.add_parser('scp', parents=[vmscp_parser], description=vmscp_desc, help=vmscp_desc, epilog=vmscp_epilog,
formatter_class=rawhelp)
vmssh_desc = 'Ssh Into Vm'
vmssh_epilog = None
vmssh_parser = argparse.ArgumentParser(add_help=False)
vmssh_parser.add_argument('-D', help='Dynamic Forwarding', metavar='LOCAL')
vmssh_parser.add_argument('-L', help='Local Forwarding', metavar='LOCAL')
vmssh_parser.add_argument('-R', help='Remote Forwarding', metavar='REMOTE')
vmssh_parser.add_argument('-X', action='store_true', help='Enable X11 Forwarding')
vmssh_parser.add_argument('-Y', action='store_true', help='Enable X11 Forwarding(Insecure)')
vmssh_parser.add_argument('-i', '--identityfile', help='Identity file')
vmssh_parser.add_argument('-p', '--port', '--port', help='Port for ssh')
vmssh_parser.add_argument('-u', '-l', '--user', help='User for ssh')
vmssh_parser.add_argument('name', metavar='VMNAME', nargs='*')
vmssh_parser.set_defaults(func=ssh_vm)
subparsers.add_parser('ssh', parents=[vmssh_parser], description=vmssh_desc, help=vmssh_desc, epilog=vmssh_epilog,
formatter_class=rawhelp)
start_desc = 'Start Vm/Plan/Container'
start_epilog = "examples:\n%s" % start
start_parser = subparsers.add_parser('start', description=start_desc, help=start_desc, epilog=start_epilog,
formatter_class=rawhelp)
start_subparsers = start_parser.add_subparsers(metavar='', dest='subcommand_start')
stop_desc = 'Stop Vm/Plan/Container'
stop_parser = subparsers.add_parser('stop', description=stop_desc, help=stop_desc)
stop_subparsers = stop_parser.add_subparsers(metavar='', dest='subcommand_stop')
switch_desc = 'Switch Host'
switch_parser = subparsers.add_parser('switch', description=switch_desc, help=switch_desc)
switch_subparsers = switch_parser.add_subparsers(metavar='', dest='subcommand_switch')
sync_desc = 'Sync Host'
sync_parser = subparsers.add_parser('sync', description=sync_desc, help=sync_desc)
sync_subparsers = sync_parser.add_subparsers(metavar='', dest='subcommand_sync')
update_desc = 'Update Vm/Plan/Repo'
update_parser = subparsers.add_parser('update', description=update_desc, help=update_desc)
update_subparsers = update_parser.add_subparsers(metavar='', dest='subcommand_update')
version_desc = 'Version'
version_epilog = None
version_parser = argparse.ArgumentParser(add_help=False)
version_parser.set_defaults(func=get_version)
subparsers.add_parser('version', parents=[version_parser], description=version_desc, help=version_desc,
epilog=version_epilog, formatter_class=rawhelp)
# sub subcommands
createapp_desc = 'Create Kube Apps'
createapp_parser = create_subparsers.add_parser('app', description=createapp_desc,
help=createapp_desc, aliases=['apps'])
createapp_subparsers = createapp_parser.add_subparsers(metavar='', dest='subcommand_create_app')
appgenericcreate_desc = 'Create Kube App Generic'
appgenericcreate_epilog = None
appgenericcreate_parser = createapp_subparsers.add_parser('generic', description=appgenericcreate_desc,
help=appgenericcreate_desc,
epilog=appgenericcreate_epilog, formatter_class=rawhelp)
appgenericcreate_parser.add_argument('--outputdir', '-o', help='Output directory', metavar='OUTPUTDIR')
appgenericcreate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
appgenericcreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
appgenericcreate_parser.add_argument('apps', metavar='APPS', nargs='*')
appgenericcreate_parser.set_defaults(func=create_app_generic)
appopenshiftcreate_desc = 'Create Kube App Openshift'
appopenshiftcreate_epilog = "examples:\n%s" % appopenshiftcreate
appopenshiftcreate_parser = createapp_subparsers.add_parser('openshift', description=appopenshiftcreate_desc,
help=appopenshiftcreate_desc,
epilog=appopenshiftcreate_epilog,
formatter_class=rawhelp)
appopenshiftcreate_parser.add_argument('--outputdir', '-o', help='Output directory', metavar='OUTPUTDIR')
appopenshiftcreate_parser.add_argument('-P', '--param', action='append',
help=PARAMETERS_HELP, metavar='PARAM')
appopenshiftcreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
appopenshiftcreate_parser.add_argument('apps', metavar='APPS', nargs='*')
appopenshiftcreate_parser.set_defaults(func=create_app_openshift)
deleteapp_desc = 'Delete Kube App'
deleteapp_parser = delete_subparsers.add_parser('app', description=deleteapp_desc,
help=deleteapp_desc, aliases=['apps'])
deleteapp_subparsers = deleteapp_parser.add_subparsers(metavar='', dest='subcommand_delete_app')
appgenericdelete_desc = 'Delete Kube App Generic'
appgenericdelete_epilog = None
appgenericdelete_parser = deleteapp_subparsers.add_parser('generic', description=appgenericdelete_desc,
help=appgenericdelete_desc,
epilog=appgenericdelete_epilog, formatter_class=rawhelp)
appgenericdelete_parser.add_argument('-P', '--param', action='append',
help=PARAMETERS_HELP,
metavar='PARAM')
appgenericdelete_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
appgenericdelete_parser.add_argument('apps', metavar='APPS', nargs='*')
appgenericdelete_parser.set_defaults(func=delete_app_generic)
appopenshiftdelete_desc = 'Delete Kube App Openshift'
appopenshiftdelete_epilog = None
appopenshiftdelete_parser = deleteapp_subparsers.add_parser('openshift', description=appopenshiftdelete_desc,
help=appopenshiftdelete_desc,
epilog=appopenshiftdelete_epilog,
formatter_class=rawhelp)
appopenshiftdelete_parser.add_argument('-P', '--param', action='append',
help=PARAMETERS_HELP,
metavar='PARAM')
appopenshiftdelete_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
appopenshiftdelete_parser.add_argument('apps', metavar='APPS', nargs='*')
appopenshiftdelete_parser.set_defaults(func=delete_app_openshift)
appinfo_desc = 'Info App'
appinfo_parser = info_subparsers.add_parser('app', description=appinfo_desc, help=appinfo_desc)
appinfo_subparsers = appinfo_parser.add_subparsers(metavar='', dest='subcommand_info_app')
appgenericinfo_desc = 'Info Generic App'
appgenericinfo_parser = appinfo_subparsers.add_parser('generic', description=appgenericinfo_desc,
help=appgenericinfo_desc)
appgenericinfo_parser.add_argument('app', metavar='APP')
appgenericinfo_parser.set_defaults(func=info_generic_app)
appopenshiftinfo_desc = 'Info Openshift App'
appopenshiftinfo_parser = appinfo_subparsers.add_parser('openshift', description=appopenshiftinfo_desc,
help=appopenshiftinfo_desc)
appopenshiftinfo_parser.add_argument('app', metavar='APP')
appopenshiftinfo_parser.set_defaults(func=info_openshift_app)
openshiftdisconnecterinfo_desc = 'Info Openshift Disonnecter'
openshiftdisconnecterinfo_parser = info_subparsers.add_parser('disconnecter',
description=openshiftdisconnecterinfo_desc,
help=openshiftdisconnecterinfo_desc,
aliases=['openshift-disconnecter'])
openshiftdisconnecterinfo_parser.set_defaults(func=info_openshift_disconnecter)
listapp_desc = 'List Available Kube Apps'
listapp_parser = list_subparsers.add_parser('app', description=listapp_desc,
help=listapp_desc, aliases=['apps'])
listapp_subparsers = listapp_parser.add_subparsers(metavar='', dest='subcommand_list_app')
appgenericlist_desc = 'List Available Kube Apps Generic'
appgenericlist_parser = listapp_subparsers.add_parser('generic', description=appgenericlist_desc,
help=appgenericlist_desc)
appgenericlist_parser.set_defaults(func=list_apps_generic)
appopenshiftlist_desc = 'List Available Kube Components Openshift'
appopenshiftlist_parser = listapp_subparsers.add_parser('openshift', description=appopenshiftlist_desc,
help=appopenshiftlist_desc)
appopenshiftlist_parser.add_argument('-i', '--installed', action='store_true', help='Show installed apps')
appopenshiftlist_parser.set_defaults(func=list_apps_openshift)
bucketcreate_desc = 'Create Bucket'
bucketcreate_epilog = None
bucketcreate_parser = create_subparsers.add_parser('bucket', description=bucketcreate_desc,
help=bucketcreate_desc, epilog=bucketcreate_epilog,
formatter_class=rawhelp)
bucketcreate_parser.add_argument('-p', '--public', action='store_true', help='Make the bucket public')
bucketcreate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
bucketcreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
bucketcreate_parser.add_argument('buckets', metavar='BUCKETS', nargs='+')
bucketcreate_parser.set_defaults(func=create_bucket)
bucketfilecreate_desc = 'Create Bucket file'
bucketfilecreate_parser = argparse.ArgumentParser(add_help=False)
bucketfilecreate_parser.add_argument('-p', '--public', action='store_true', help='Make the file public')
bucketfilecreate_parser.add_argument('-t', '--temp', action='store_true', help='Get temp url')
bucketfilecreate_parser.add_argument('bucket', metavar='BUCKET')
bucketfilecreate_parser.add_argument('path', metavar='PATH')
bucketfilecreate_parser.set_defaults(func=create_bucketfile)
create_subparsers.add_parser('bucket-file', parents=[bucketfilecreate_parser],
description=bucketfilecreate_desc, help=bucketfilecreate_desc)
bucketfiledelete_desc = 'Delete Bucket file'
bucketfiledelete_parser = argparse.ArgumentParser(add_help=False)
bucketfiledelete_parser.add_argument('bucket', metavar='BUCKET')
bucketfiledelete_parser.add_argument('path', metavar='PATH')
bucketfiledelete_parser.set_defaults(func=delete_bucketfile)
delete_subparsers.add_parser('bucket-file', parents=[bucketfiledelete_parser],
description=bucketfiledelete_desc, help=bucketfiledelete_desc)
bucketfiledownload_desc = 'Download Bucket file'
bucketfiledownload_parser = argparse.ArgumentParser(add_help=False)
bucketfiledownload_parser.add_argument('bucket', metavar='BUCKET')
bucketfiledownload_parser.add_argument('path', metavar='PATH')
bucketfiledownload_parser.set_defaults(func=download_bucketfile)
download_subparsers.add_parser('bucket-file', parents=[bucketfiledownload_parser],
description=bucketfiledownload_desc, help=bucketfiledownload_desc)
bucketdelete_desc = 'Delete Bucket'
bucketdelete_parser = delete_subparsers.add_parser('bucket', description=bucketdelete_desc, help=bucketdelete_desc)
bucketdelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
bucketdelete_parser.add_argument('buckets', metavar='BUCKETS', nargs='+')
bucketdelete_parser.set_defaults(func=delete_bucket)
bucketlist_desc = 'List Buckets'
bucketlist_parser = list_subparsers.add_parser('bucket', description=bucketlist_desc, help=bucketlist_desc,
aliases=['buckets'])
bucketlist_parser.set_defaults(func=list_bucket)
bucketfileslist_desc = 'List Bucket files'
bucketfileslist_parser = list_subparsers.add_parser('bucket-file', description=bucketfileslist_desc,
help=bucketfileslist_desc, aliases=['bucket-files'])
bucketfileslist_parser.add_argument('bucket', metavar='BUCKET')
bucketfileslist_parser.set_defaults(func=list_bucketfiles)
cachedelete_desc = 'Delete Cache'
cachedelete_parser = delete_subparsers.add_parser('cache', description=cachedelete_desc, help=cachedelete_desc)
cachedelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
cachedelete_parser.set_defaults(func=delete_cache)
containercreate_desc = 'Create Container'
containercreate_epilog = None
containercreate_parser = create_subparsers.add_parser('container', description=containercreate_desc,
help=containercreate_desc, epilog=containercreate_epilog,
formatter_class=rawhelp)
containercreate_parser_group = containercreate_parser.add_mutually_exclusive_group(required=True)
containercreate_parser_group.add_argument('-i', '--image', help='Image to use', metavar='Image')
containercreate_parser_group.add_argument('-p', '--profile', help='Profile to use', metavar='PROFILE')
containercreate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
containercreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
containercreate_parser.add_argument('name', metavar='NAME', nargs='?')
containercreate_parser.set_defaults(func=create_container)
containerdelete_desc = 'Delete Container'
containerdelete_parser = delete_subparsers.add_parser('container', description=containerdelete_desc,
help=containerdelete_desc)
containerdelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
containerdelete_parser.add_argument('names', metavar='CONTAINERIMAGES', nargs='+')
containerdelete_parser.set_defaults(func=delete_container)
containerimagelist_desc = 'List Container Images'
containerimagelist_parser = list_subparsers.add_parser('container-image', description=containerimagelist_desc,
help=containerimagelist_desc,
aliases=['container-images'])
containerimagelist_parser.set_defaults(func=list_containerimage)
containerlist_desc = 'List Containers'
containerlist_parser = list_subparsers.add_parser('container', description=containerlist_desc,
help=containerlist_desc, aliases=['containers'])
containerlist_parser.add_argument('--filters', choices=('up', 'down'))
containerlist_parser.set_defaults(func=list_container)
containerprofilelist_desc = 'List Container Profiles'
containerprofilelist_parser = list_subparsers.add_parser('container-profile', description=containerprofilelist_desc,
help=containerprofilelist_desc,
aliases=['container-profiles'])
containerprofilelist_parser.add_argument('--short', action='store_true')
containerprofilelist_parser.set_defaults(func=profilelist_container)
containerrestart_desc = 'Restart Containers'
containerrestart_parser = restart_subparsers.add_parser('container', description=containerrestart_desc,
help=containerrestart_desc)
containerrestart_parser.add_argument('names', metavar='CONTAINERNAMES', nargs='*')
containerrestart_parser.set_defaults(func=restart_container)
containerstart_desc = 'Start Containers'
containerstart_parser = start_subparsers.add_parser('container', description=containerstart_desc,
help=containerstart_desc)
containerstart_parser.add_argument('names', metavar='VMNAMES', nargs='*')
containerstart_parser.set_defaults(func=start_container)
containerstop_desc = 'Stop Containers'
containerstop_parser = stop_subparsers.add_parser('container', description=containerstop_desc,
help=containerstop_desc)
containerstop_parser.add_argument('names', metavar='CONTAINERNAMES', nargs='*')
containerstop_parser.set_defaults(func=stop_container)
dnscreate_desc = 'Create Dns Entries'
dnscreate_epilog = "examples:\n%s" % dnscreate
dnscreate_parser = create_subparsers.add_parser('dns', description=dnscreate_desc, help=dnscreate_desc,
epilog=dnscreate_epilog,
formatter_class=rawhelp)
dnscreate_parser.add_argument('-a', '--alias', action='append', help='specify alias (can specify multiple)',
metavar='ALIAS')
dnscreate_parser.add_argument('-d', '--domain', help='Domain where to create entry', metavar='DOMAIN')
dnscreate_parser.add_argument('-n', '--net', help='Network where to create entry. Defaults to default',
default='default', metavar='NET')
dnscreate_parser.add_argument('-i', '--ip', help='Ip', metavar='IP')
dnscreate_parser.add_argument('names', metavar='NAMES', nargs='*')
dnscreate_parser.set_defaults(func=create_dns)
dnsdelete_desc = 'Delete Dns Entries'
dnsdelete_parser = delete_subparsers.add_parser('dns', description=dnsdelete_desc, help=dnsdelete_desc)
dnsdelete_parser.add_argument('-a', '--all', action='store_true',
help='Whether to delete the entire host block. Libvirt specific')
dnsdelete_parser.add_argument('-d', '--domain', help='Domain of the entry', metavar='DOMAIN')
dnsdelete_parser.add_argument('-n', '--net', help='Network where to delete entry. Defaults to default',
default='default', metavar='NET')
dnsdelete_parser.add_argument('names', metavar='NAMES', nargs='*')
dnsdelete_parser.set_defaults(func=delete_dns)
dnslist_desc = 'List Dns Entries'
dnslist_parser = argparse.ArgumentParser(add_help=False)
dnslist_parser.add_argument('--short', action='store_true')
dnslist_parser.add_argument('domain', metavar='DOMAIN', help='Domain where to list entry (network for libvirt)')
dnslist_parser.set_defaults(func=list_dns)
list_subparsers.add_parser('dns', parents=[dnslist_parser], description=dnslist_desc, help=dnslist_desc)
hostcreate_desc = 'Create Host'
hostcreate_epilog = "examples:\n%s" % hostcreate
hostcreate_parser = create_subparsers.add_parser('host', help=hostcreate_desc, description=hostcreate_desc,
aliases=['client'], epilog=hostcreate_epilog,
formatter_class=rawhelp)
hostcreate_subparsers = hostcreate_parser.add_subparsers(metavar='', dest='subcommand_create_host')
awshostcreate_desc = 'Create Aws Host'
awshostcreate_parser = hostcreate_subparsers.add_parser('aws', help=awshostcreate_desc,
description=awshostcreate_desc)
awshostcreate_parser.add_argument('--access_key_id', help='Access Key Id', metavar='ACCESS_KEY_ID', required=True)
awshostcreate_parser.add_argument('--access_key_secret', help='Access Key Secret', metavar='ACCESS_KEY_SECRET',
required=True)
awshostcreate_parser.add_argument('-k', '--keypair', help='Keypair', metavar='KEYPAIR', required=True)
awshostcreate_parser.add_argument('-r', '--region', help='Region', metavar='REGION', required=True)
awshostcreate_parser.add_argument('name', metavar='NAME')
awshostcreate_parser.set_defaults(func=create_host_aws)
ibmhostcreate_desc = 'Create IBM Cloud Host'
ibmhostcreate_parser = hostcreate_subparsers.add_parser('ibm', help=ibmhostcreate_desc,
description=ibmhostcreate_desc)
ibmhostcreate_parser.add_argument('--iam_api_key', help='IAM API Key', metavar='IAM_API_KEY', required=True)
ibmhostcreate_parser.add_argument('--access_key_id', help='Access Key Id', metavar='ACCESS_KEY_ID')
ibmhostcreate_parser.add_argument('--access_key_secret', help='Access Key Secret', metavar='ACCESS_KEY_SECRET')
ibmhostcreate_parser.add_argument('--vpc', help='VPC name', metavar='VPC')
ibmhostcreate_parser.add_argument('--zone', help='Zone within the region', metavar='ZONE')
ibmhostcreate_parser.add_argument('-r', '--region', help='Region', metavar='REGION', required=True)
ibmhostcreate_parser.add_argument('name', metavar='NAME')
ibmhostcreate_parser.set_defaults(func=create_host_ibm)
gcphostcreate_desc = 'Create Gcp Host'
gcphostcreate_parser = hostcreate_subparsers.add_parser('gcp', help=gcphostcreate_desc,
description=gcphostcreate_desc)
gcphostcreate_parser.add_argument('--credentials', help='Path to credentials file', metavar='credentials')
gcphostcreate_parser.add_argument('--project', help='Project', metavar='project', required=True)
gcphostcreate_parser.add_argument('--zone', help='Zone', metavar='zone', required=True)
gcphostcreate_parser.add_argument('name', metavar='NAME')
gcphostcreate_parser.set_defaults(func=create_host_gcp)
grouphostcreate_desc = 'Create Group Host'
grouphostcreate_parser = hostcreate_subparsers.add_parser('group', help=grouphostcreate_desc,
description=grouphostcreate_desc)
grouphostcreate_parser.add_argument('-a', '--algorithm', help='Algorithm. Defaults to random',
metavar='ALGORITHM', default='random')
grouphostcreate_parser.add_argument('-m', '--members', help='Members', metavar='MEMBERS', type=valid_members)
grouphostcreate_parser.add_argument('name', metavar='NAME')
grouphostcreate_parser.set_defaults(func=create_host_group)
kvmhostcreate_desc = 'Create Kvm Host'
kvmhostcreate_parser = hostcreate_subparsers.add_parser('kvm', help=kvmhostcreate_desc,
description=kvmhostcreate_desc)
kvmhostcreate_parser_group = kvmhostcreate_parser.add_mutually_exclusive_group(required=True)
kvmhostcreate_parser_group.add_argument('-H', '--host', help='Host. Defaults to localhost', metavar='HOST',
default='localhost')
kvmhostcreate_parser.add_argument('--pool', help='Pool. Defaults to default', metavar='POOL', default='default')
kvmhostcreate_parser.add_argument('-p', '--port', help='Port', metavar='PORT')
kvmhostcreate_parser.add_argument('-P', '--protocol', help='Protocol to use', default='ssh', metavar='PROTOCOL')
kvmhostcreate_parser_group.add_argument('-U', '--url', help='URL to use', metavar='URL')
kvmhostcreate_parser.add_argument('-u', '--user', help='User. Defaults to root', default='root', metavar='USER')
kvmhostcreate_parser.add_argument('name', metavar='NAME')
kvmhostcreate_parser.set_defaults(func=create_host_kvm)
kubevirthostcreate_desc = 'Create Kubevirt Host'
kubevirthostcreate_parser = hostcreate_subparsers.add_parser('kubevirt', help=kubevirthostcreate_desc,
description=kubevirthostcreate_desc)
kubevirthostcreate_parser.add_argument('--ca', help='Ca file', metavar='CA')
kubevirthostcreate_parser.add_argument('--cdi', help='Cdi Support', action='store_true', default=True)
kubevirthostcreate_parser.add_argument('-c', '--context', help='Context', metavar='CONTEXT')
kubevirthostcreate_parser.add_argument('-H', '--host', help='Api Host', metavar='HOST')
kubevirthostcreate_parser.add_argument('-p', '--pool', help='Storage Class', metavar='POOL')
kubevirthostcreate_parser.add_argument('--port', help='Api Port', metavar='HOST')
kubevirthostcreate_parser.add_argument('--token', help='Token', metavar='TOKEN')
kubevirthostcreate_parser.add_argument('--multus', help='Multus Support', action='store_true', default=True)
kubevirthostcreate_parser.add_argument('name', metavar='NAME')
kubevirthostcreate_parser.set_defaults(func=create_host_kubevirt)
openstackhostcreate_desc = 'Create Openstack Host'
openstackhostcreate_parser = hostcreate_subparsers.add_parser('openstack', help=openstackhostcreate_desc,
description=openstackhostcreate_desc)
openstackhostcreate_parser.add_argument('--auth-url', help='Auth url', metavar='AUTH_URL', required=True)
openstackhostcreate_parser.add_argument('--domain', help='Domain', metavar='DOMAIN', default='Default')
openstackhostcreate_parser.add_argument('-p', '--password', help='Password', metavar='PASSWORD', required=True)
openstackhostcreate_parser.add_argument('--project', help='Project', metavar='PROJECT', required=True)
openstackhostcreate_parser.add_argument('-u', '--user', help='User', metavar='USER', required=True)
openstackhostcreate_parser.add_argument('name', metavar='NAME')
openstackhostcreate_parser.set_defaults(func=create_host_openstack)
ovirthostcreate_desc = 'Create Ovirt Host'
ovirthostcreate_parser = hostcreate_subparsers.add_parser('ovirt', help=ovirthostcreate_desc,
description=ovirthostcreate_desc)
ovirthostcreate_parser.add_argument('--ca', help='Path to certificate file', metavar='CA')
ovirthostcreate_parser.add_argument('-c', '--cluster', help='Cluster. Defaults to Default', default='Default',
metavar='CLUSTER')
ovirthostcreate_parser.add_argument('-d', '--datacenter', help='Datacenter. Defaults to Default', default='Default',
metavar='DATACENTER')
ovirthostcreate_parser.add_argument('-H', '--host', help='Host to use', metavar='HOST', required=True)
ovirthostcreate_parser.add_argument('-o', '--org', help='Organization', metavar='ORGANIZATION', required=True)
ovirthostcreate_parser.add_argument('-p', '--password', help='Password to use', metavar='PASSWORD', required=True)
ovirthostcreate_parser.add_argument('--pool', help='Storage Domain', metavar='POOL')
ovirthostcreate_parser.add_argument('-u', '--user', help='User. Defaults to admin@internal',
metavar='USER', default='admin@internal')
ovirthostcreate_parser.add_argument('name', metavar='NAME')
ovirthostcreate_parser.set_defaults(func=create_host_ovirt)
vspherehostcreate_desc = 'Create Vsphere Host'
vspherehostcreate_parser = hostcreate_subparsers.add_parser('vsphere', help=vspherehostcreate_desc,
description=vspherehostcreate_desc)
vspherehostcreate_parser.add_argument('-c', '--cluster', help='Cluster', metavar='CLUSTER', required=True)
vspherehostcreate_parser.add_argument('-d', '--datacenter', help='Datacenter', metavar='DATACENTER', required=True)
vspherehostcreate_parser.add_argument('-H', '--host', help='Vcenter Host', metavar='HOST', required=True)
vspherehostcreate_parser.add_argument('-p', '--password', help='Password', metavar='PASSWORD', required=True)
vspherehostcreate_parser.add_argument('-u', '--user', help='User', metavar='USER', required=True)
vspherehostcreate_parser.add_argument('--pool', help='Pool', metavar='POOL')
vspherehostcreate_parser.add_argument('name', metavar='NAME')
vspherehostcreate_parser.set_defaults(func=create_host_vsphere)
hostdelete_desc = 'Delete Host'
hostdelete_parser = delete_subparsers.add_parser('host', description=hostdelete_desc, help=hostdelete_desc,
aliases=['client'])
hostdelete_parser.add_argument('name', metavar='NAME')
hostdelete_parser.set_defaults(func=delete_host)
hostdisable_desc = 'Disable Host'
hostdisable_parser = disable_subparsers.add_parser('host', description=hostdisable_desc, help=hostdisable_desc,
aliases=['client'])
hostdisable_parser.add_argument('name', metavar='NAME')
hostdisable_parser.set_defaults(func=disable_host)
hostenable_desc = 'Enable Host'
hostenable_parser = enable_subparsers.add_parser('host', description=hostenable_desc, help=hostenable_desc,
aliases=['client'])
hostenable_parser.add_argument('name', metavar='NAME')
hostenable_parser.set_defaults(func=enable_host)
hostlist_parser = list_subparsers.add_parser('host', description=hostlist_desc, help=hostlist_desc,
aliases=['hosts', 'client', 'clients'])
hostlist_parser.set_defaults(func=list_host)
hostreport_desc = 'Report Info About Host'
hostreport_parser = argparse.ArgumentParser(add_help=False)
hostreport_parser.set_defaults(func=report_host)
info_subparsers.add_parser('host', parents=[hostreport_parser], description=hostreport_desc, help=hostreport_desc,
aliases=['client'])
hostswitch_desc = 'Switch Host'
hostswitch_parser = argparse.ArgumentParser(add_help=False)
hostswitch_parser.add_argument('name', help='NAME')
hostswitch_parser.set_defaults(func=switch_host)
switch_subparsers.add_parser('host', parents=[hostswitch_parser], description=hostswitch_desc, help=hostswitch_desc,
aliases=['client'])
hostsync_desc = 'Sync Host'
hostsync_parser = sync_subparsers.add_parser('host', description=hostsync_desc, help=hostsync_desc,
aliases=['client'])
hostsync_parser.add_argument('names', help='NAMES', nargs='*')
hostsync_parser.set_defaults(func=sync_host)
imagedelete_desc = 'Delete Image'
imagedelete_help = "Image to delete"
imagedelete_parser = argparse.ArgumentParser(add_help=False)
imagedelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
imagedelete_parser.add_argument('-p', '--pool', help='Pool to use', metavar='POOL')
imagedelete_parser.add_argument('images', help=imagedelete_help, metavar='IMAGES', nargs='*')
imagedelete_parser.set_defaults(func=delete_image)
delete_subparsers.add_parser('image', parents=[imagedelete_parser], description=imagedelete_desc,
help=imagedelete_desc)
delete_subparsers.add_parser('iso', parents=[imagedelete_parser], description=imagedelete_desc,
help=imagedelete_desc)
kubecreate_desc = 'Create Kube'
kubecreate_parser = create_subparsers.add_parser('kube', description=kubecreate_desc, help=kubecreate_desc,
aliases=['cluster'])
kubecreate_subparsers = kubecreate_parser.add_subparsers(metavar='', dest='subcommand_create_kube')
kubegenericcreate_desc = 'Create Generic Kube'
kubegenericcreate_epilog = "examples:\n%s" % kubegenericcreate
kubegenericcreate_parser = argparse.ArgumentParser(add_help=False)
kubegenericcreate_parser.add_argument('-f', '--force', action='store_true', help='Delete existing cluster first')
kubegenericcreate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
kubegenericcreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
kubegenericcreate_parser.add_argument('cluster', metavar='CLUSTER', nargs='?', type=valid_cluster)
kubegenericcreate_parser.set_defaults(func=create_generic_kube)
kubecreate_subparsers.add_parser('generic', parents=[kubegenericcreate_parser],
description=kubegenericcreate_desc,
help=kubegenericcreate_desc,
epilog=kubegenericcreate_epilog,
formatter_class=rawhelp, aliases=['kubeadm'])
kubekindcreate_desc = 'Create Kind Kube'
kubekindcreate_epilog = "examples:\n%s" % kubekindcreate
kubekindcreate_parser = argparse.ArgumentParser(add_help=False)
kubekindcreate_parser.add_argument('-f', '--force', action='store_true', help='Delete existing cluster first')
kubekindcreate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
kubekindcreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
kubekindcreate_parser.add_argument('cluster', metavar='CLUSTER', nargs='?', type=valid_cluster)
kubekindcreate_parser.set_defaults(func=create_kind_kube)
kubecreate_subparsers.add_parser('kind', parents=[kubekindcreate_parser],
description=kubekindcreate_desc,
help=kubekindcreate_desc,
epilog=kubekindcreate_epilog,
formatter_class=rawhelp)
kubek3screate_desc = 'Create K3s Kube'
kubek3screate_epilog = "examples:\n%s" % kubek3screate
kubek3screate_parser = argparse.ArgumentParser(add_help=False)
kubek3screate_parser.add_argument('-f', '--force', action='store_true', help='Delete existing cluster first')
kubek3screate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
kubek3screate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
kubek3screate_parser.add_argument('cluster', metavar='CLUSTER', nargs='?', type=valid_cluster)
kubek3screate_parser.set_defaults(func=create_k3s_kube)
kubecreate_subparsers.add_parser('k3s', parents=[kubek3screate_parser],
description=kubek3screate_desc,
help=kubek3screate_desc,
epilog=kubek3screate_epilog,
formatter_class=rawhelp)
parameterhelp = "specify parameter or keyword for rendering (multiple can be specified)"
kubehypershiftcreate_desc = 'Create Hypershift Kube'
kubehypershiftcreate_epilog = "examples:\n%s" % kubehypershiftcreate
kubehypershiftcreate_parser = argparse.ArgumentParser(add_help=False)
kubehypershiftcreate_parser.add_argument('-f', '--force', action='store_true', help='Delete existing cluster first')
kubehypershiftcreate_parser.add_argument('-P', '--param', action='append', help=parameterhelp, metavar='PARAM')
kubehypershiftcreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
kubehypershiftcreate_parser.add_argument('cluster', metavar='CLUSTER', nargs='?', type=valid_cluster)
kubehypershiftcreate_parser.set_defaults(func=create_hypershift_kube)
kubecreate_subparsers.add_parser('hypershift', parents=[kubehypershiftcreate_parser],
description=kubehypershiftcreate_desc,
help=kubehypershiftcreate_desc,
epilog=kubehypershiftcreate_epilog,
formatter_class=rawhelp)
parameterhelp = "specify parameter or keyword for rendering (multiple can be specified)"
kubeopenshiftcreate_desc = 'Create Openshift Kube'
kubeopenshiftcreate_epilog = "examples:\n%s" % kubeopenshiftcreate
kubeopenshiftcreate_parser = argparse.ArgumentParser(add_help=False)
kubeopenshiftcreate_parser.add_argument('-f', '--force', action='store_true', help='Delete existing cluster first')
kubeopenshiftcreate_parser.add_argument('-P', '--param', action='append', help=parameterhelp, metavar='PARAM')
kubeopenshiftcreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
kubeopenshiftcreate_parser.add_argument('cluster', metavar='CLUSTER', nargs='?', type=valid_cluster)
kubeopenshiftcreate_parser.set_defaults(func=create_openshift_kube)
kubecreate_subparsers.add_parser('openshift', parents=[kubeopenshiftcreate_parser],
description=kubeopenshiftcreate_desc,
help=kubeopenshiftcreate_desc,
epilog=kubeopenshiftcreate_epilog,
formatter_class=rawhelp, aliases=['okd'])
kubedelete_desc = 'Delete Kube'
kubedelete_parser = argparse.ArgumentParser(add_help=False)
kubedelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
kubedelete_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
kubedelete_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
kubedelete_parser.add_argument('cluster', metavar='CLUSTER', nargs='?', type=valid_cluster)
kubedelete_parser.set_defaults(func=delete_kube)
delete_subparsers.add_parser('kube', parents=[kubedelete_parser], description=kubedelete_desc, help=kubedelete_desc,
aliases=['cluster'])
kubeinfo_desc = 'Info Kube'
kubeinfo_parser = info_subparsers.add_parser('kube', description=kubeinfo_desc, help=kubeinfo_desc,
aliases=['cluster'])
kubeinfo_subparsers = kubeinfo_parser.add_subparsers(metavar='', dest='subcommand_info_kube')
kubegenericinfo_desc = 'Info Generic Kube'
kubegenericinfo_parser = kubeinfo_subparsers.add_parser('generic', description=kubegenericinfo_desc,
help=kubegenericinfo_desc, aliases=['kubeadm'])
kubegenericinfo_parser.set_defaults(func=info_generic_kube)
kubekindinfo_desc = 'Info Kind Kube'
kubekindinfo_parser = kubeinfo_subparsers.add_parser('kind', description=kubekindinfo_desc, help=kubekindinfo_desc)
kubekindinfo_parser.set_defaults(func=info_kind_kube)
kubek3sinfo_desc = 'Info K3s Kube'
kubek3sinfo_parser = kubeinfo_subparsers.add_parser('k3s', description=kubek3sinfo_desc, help=kubek3sinfo_desc)
kubek3sinfo_parser.set_defaults(func=info_k3s_kube)
kubehypershiftinfo_desc = 'Info Hypershift Kube'
kubehypershiftinfo_parser = kubeinfo_subparsers.add_parser('hypershift', description=kubehypershiftinfo_desc,
help=kubehypershiftinfo_desc)
kubehypershiftinfo_parser.set_defaults(func=info_hypershift_kube)
kubeopenshiftinfo_desc = 'Info Openshift Kube'
kubeopenshiftinfo_parser = kubeinfo_subparsers.add_parser('openshift', description=kubeopenshiftinfo_desc,
help=kubeopenshiftinfo_desc, aliases=['okd'])
kubeopenshiftinfo_parser.set_defaults(func=info_openshift_kube)
kubelist_desc = 'List Kubes'
kubelist_parser = list_subparsers.add_parser('kube', description=kubelist_desc, help=kubelist_desc,
aliases=['kubes', 'cluster', 'clusters'])
kubelist_parser.set_defaults(func=list_kube)
kubescale_desc = 'Scale Kube'
kubescale_parser = scale_subparsers.add_parser('kube', description=kubescale_desc, help=kubescale_desc,
aliases=['cluster'])
kubescale_subparsers = kubescale_parser.add_subparsers(metavar='', dest='subcommand_scale_kube')
kubegenericscale_desc = 'Scale Generic Kube'
kubegenericscale_parser = argparse.ArgumentParser(add_help=False)
kubegenericscale_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
kubegenericscale_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
kubegenericscale_parser.add_argument('-w', '--workers', help='Total number of workers', type=int, default=0)
kubegenericscale_parser.add_argument('cluster', metavar='CLUSTER', type=valid_cluster, default='testk')
kubegenericscale_parser.set_defaults(func=scale_generic_kube)
kubescale_subparsers.add_parser('generic', parents=[kubegenericscale_parser], description=kubegenericscale_desc,
help=kubegenericscale_desc, aliases=['kubeadm'])
kubek3sscale_desc = 'Scale K3s Kube'
kubek3sscale_parser = argparse.ArgumentParser(add_help=False)
kubek3sscale_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
kubek3sscale_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
kubek3sscale_parser.add_argument('-w', '--workers', help='Total number of workers', type=int, default=0)
kubek3sscale_parser.add_argument('cluster', metavar='CLUSTER', type=valid_cluster, default='testk')
kubek3sscale_parser.set_defaults(func=scale_k3s_kube)
kubescale_subparsers.add_parser('k3s', parents=[kubek3sscale_parser], description=kubek3sscale_desc,
help=kubek3sscale_desc)
parameterhelp = "specify parameter or keyword for rendering (multiple can be specified)"
kubehypershiftscale_desc = 'Scale Hypershift Kube'
kubehypershiftscale_parser = argparse.ArgumentParser(add_help=False)
kubehypershiftscale_parser.add_argument('-P', '--param', action='append', help=parameterhelp, metavar='PARAM')
kubehypershiftscale_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
kubehypershiftscale_parser.add_argument('-w', '--workers', help='Total number of workers', type=int, default=0)
kubehypershiftscale_parser.add_argument('cluster', metavar='CLUSTER', type=valid_cluster, default='testk')
kubehypershiftscale_parser.set_defaults(func=scale_hypershift_kube)
kubescale_subparsers.add_parser('hypershift', parents=[kubehypershiftscale_parser],
description=kubehypershiftscale_desc,
help=kubehypershiftscale_desc, aliases=['okd'])
parameterhelp = "specify parameter or keyword for rendering (multiple can be specified)"
kubeopenshiftscale_desc = 'Scale Openshift Kube'
kubeopenshiftscale_parser = argparse.ArgumentParser(add_help=False)
kubeopenshiftscale_parser.add_argument('-P', '--param', action='append', help=parameterhelp, metavar='PARAM')
kubeopenshiftscale_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
kubeopenshiftscale_parser.add_argument('-w', '--workers', help='Total number of workers', type=int, default=0)
kubeopenshiftscale_parser.add_argument('cluster', metavar='CLUSTER', type=valid_cluster, default='testk')
kubeopenshiftscale_parser.set_defaults(func=scale_openshift_kube)
kubescale_subparsers.add_parser('openshift', parents=[kubeopenshiftscale_parser],
description=kubeopenshiftscale_desc,
help=kubeopenshiftscale_desc, aliases=['okd'])
kubeupdate_desc = 'Update Kube'
kubeupdate_parser = update_subparsers.add_parser('kube', description=kubeupdate_desc, help=kubeupdate_desc,
aliases=['cluster'])
kubeupdate_subparsers = kubeupdate_parser.add_subparsers(metavar='', dest='subcommand_update_kube')
kubegenericupdate_desc = 'Update Generic Kube'
kubegenericupdate_parser = argparse.ArgumentParser(add_help=False)
kubegenericupdate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
kubegenericupdate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
kubegenericupdate_parser.add_argument('cluster', metavar='CLUSTER', type=valid_cluster, default='testk')
kubegenericupdate_parser.set_defaults(func=update_generic_kube)
kubeupdate_subparsers.add_parser('generic', parents=[kubegenericupdate_parser], description=kubegenericupdate_desc,
help=kubegenericupdate_desc, aliases=['kubeadm'])
kubek3supdate_desc = 'Update K3s Kube'
kubek3supdate_parser = argparse.ArgumentParser(add_help=False)
kubek3supdate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
kubek3supdate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
kubek3supdate_parser.add_argument('cluster', metavar='CLUSTER', type=valid_cluster, default='testk')
kubek3supdate_parser.set_defaults(func=update_k3s_kube)
kubeupdate_subparsers.add_parser('k3s', parents=[kubek3supdate_parser], description=kubek3supdate_desc,
help=kubek3supdate_desc)
parameterhelp = "specify parameter or keyword for rendering (multiple can be specified)"
kubeopenshiftupdate_desc = 'Update Openshift Kube'
kubeopenshiftupdate_parser = argparse.ArgumentParser(add_help=False)
kubeopenshiftupdate_parser.add_argument('-P', '--param', action='append', help=parameterhelp, metavar='PARAM')
kubeopenshiftupdate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
kubeopenshiftupdate_parser.add_argument('cluster', metavar='CLUSTER', type=valid_cluster, default='testk')
kubeopenshiftupdate_parser.set_defaults(func=update_openshift_kube)
kubeupdate_subparsers.add_parser('openshift', parents=[kubeopenshiftupdate_parser],
description=kubeopenshiftupdate_desc,
help=kubeopenshiftupdate_desc, aliases=['okd'])
lbcreate_desc = 'Create Load Balancer'
lbcreate_parser = create_subparsers.add_parser('lb', description=lbcreate_desc, help=lbcreate_desc,
aliases=['loadbalancer'])
lbcreate_parser.add_argument('--checkpath', default='/index.html', help="Path to check. Defaults to /index.html")
lbcreate_parser.add_argument('--checkport', default=80, help="Port to check. Defaults to 80")
lbcreate_parser.add_argument('--domain', help='Domain to create a dns entry associated to the load balancer')
lbcreate_parser.add_argument('-i', '--internal', action='store_true')
lbcreate_parser.add_argument('-p', '--ports', default='443', help='Load Balancer Ports. Defaults to 443')
lbcreate_parser.add_argument('-v', '--vms', help='Vms to add to the pool. Can also be a list of ips')
lbcreate_parser.add_argument('name', metavar='NAME', nargs='?')
lbcreate_parser.set_defaults(func=create_lb)
lbdelete_desc = 'Delete Load Balancer'
lbdelete_parser = delete_subparsers.add_parser('lb', description=lbdelete_desc, help=lbdelete_desc,
aliases=['loadbalancer'])
lbdelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
lbdelete_parser.add_argument('name', metavar='NAME')
lbdelete_parser.set_defaults(func=delete_lb)
lblist_desc = 'List Load Balancers'
lblist_parser = list_subparsers.add_parser('lb', description=lblist_desc, help=lblist_desc,
aliases=['loadbalancers', 'lbs'])
lblist_parser.add_argument('--short', action='store_true')
lblist_parser.set_defaults(func=list_lb)
keywordinfo_desc = 'Info Keyword'
keywordinfo_parser = info_subparsers.add_parser('keyword', description=keywordinfo_desc, help=keywordinfo_desc,
aliases=['parameter'])
keywordinfo_parser.add_argument('keyword', metavar='KEYWORD')
keywordinfo_parser.set_defaults(func=info_keyword)
profilecreate_desc = 'Create Profile'
profilecreate_parser = argparse.ArgumentParser(add_help=False)
profilecreate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (can specify multiple)',
metavar='PARAM')
profilecreate_parser.add_argument('profile', metavar='PROFILE')
profilecreate_parser.set_defaults(func=create_profile)
create_subparsers.add_parser('profile', parents=[profilecreate_parser], description=profilecreate_desc,
help=profilecreate_desc)
profileinfo_desc = 'Info Profile'
profileinfo_parser = info_subparsers.add_parser('profile', description=profileinfo_desc, help=profileinfo_desc)
profileinfo_parser.add_argument('profile', metavar='PROFILE')
profileinfo_parser.set_defaults(func=info_profile)
profilelist_desc = 'List Profiles'
profilelist_parser = list_subparsers.add_parser('profile', description=profilelist_desc, help=profilelist_desc,
aliases=['profiles'])
profilelist_parser.add_argument('--short', action='store_true')
profilelist_parser.set_defaults(func=list_profile)
profileupdate_desc = 'Update Profile'
profileupdate_parser = update_subparsers.add_parser('profile', description=profileupdate_desc,
help=profileupdate_desc)
profileupdate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
profileupdate_parser.add_argument('profile', metavar='PROFILE', nargs='?')
profileupdate_parser.set_defaults(func=update_profile)
flavorlist_desc = 'List Flavors'
flavorlist_parser = list_subparsers.add_parser('flavor', description=flavorlist_desc, help=flavorlist_desc,
aliases=['flavors'])
flavorlist_parser.add_argument('--short', action='store_true')
flavorlist_parser.set_defaults(func=list_flavor)
isolist_desc = 'List Isos'
isolist_parser = list_subparsers.add_parser('iso', description=isolist_desc, help=isolist_desc, aliases=['isos'])
isolist_parser.set_defaults(func=list_iso)
keywordlist_desc = 'List Keyword'
keywordlist_parser = list_subparsers.add_parser('keyword', description=keywordlist_desc, help=keywordlist_desc,
aliases=['keywords', 'parameter', 'parameters'])
keywordlist_parser.set_defaults(func=list_keyword)
networkinfo_desc = 'Info Network'
networkinfo_parser = info_subparsers.add_parser('network', description=networkinfo_desc, help=networkinfo_desc)
networkinfo_parser.add_argument('name', metavar='NETWORK')
networkinfo_parser.set_defaults(func=info_network)
networklist_desc = 'List Networks'
networklist_parser = list_subparsers.add_parser('network', description=networklist_desc, help=networklist_desc,
aliases=['networks'])
networklist_parser.add_argument('--short', action='store_true')
networklist_parser.add_argument('-s', '--subnets', action='store_true')
networklist_parser.set_defaults(func=list_network)
networkcreate_desc = 'Create Network'
networkcreate_parser = create_subparsers.add_parser('network', description=networkcreate_desc,
help=networkcreate_desc)
networkcreate_parser.add_argument('-i', '--isolated', action='store_true', help='Isolated Network')
networkcreate_parser.add_argument('-c', '--cidr', help='Cidr of the net', metavar='CIDR')
networkcreate_parser.add_argument('-d', '--dual', help='Cidr of dual net', metavar='DUAL')
networkcreate_parser.add_argument('--nodhcp', action='store_true', help='Disable dhcp on the net')
networkcreate_parser.add_argument('--domain', help='DNS domain. Defaults to network name')
networkcreate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (can specify multiple)',
metavar='PARAM')
networkcreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
networkcreate_parser.add_argument('name', metavar='NETWORK')
networkcreate_parser.set_defaults(func=create_network)
networkdelete_desc = 'Delete Network'
networkdelete_parser = delete_subparsers.add_parser('network', description=networkdelete_desc,
help=networkdelete_desc)
networkdelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
networkdelete_parser.add_argument('names', metavar='NETWORKS', nargs='+')
networkdelete_parser.set_defaults(func=delete_network)
disconnectercreate_desc = 'Create a disconnecter vm for openshift'
disconnectercreate_epilog = "examples:\n%s" % disconnectercreate
disconnectercreate_parser = argparse.ArgumentParser(add_help=False)
disconnectercreate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (can specify multiple)',
metavar='PARAM')
disconnectercreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
disconnectercreate_parser.add_argument('plan', metavar='PLAN', help='Plan', nargs='?')
disconnectercreate_parser.set_defaults(func=create_openshift_disconnecter)
create_subparsers.add_parser('openshift-disconnecter', parents=[disconnectercreate_parser],
description=disconnectercreate_desc, help=disconnectercreate_desc,
epilog=disconnectercreate_epilog, formatter_class=rawhelp)
isocreate_desc = 'Create an iso ignition for baremetal install'
isocreate_epilog = "examples:\n%s" % isocreate
isocreate_parser = argparse.ArgumentParser(add_help=False)
isocreate_parser.add_argument('-d', '--direct', action='store_true', help='Embed directly target ignition in iso')
isocreate_parser.add_argument('-f', '--ignitionfile', help='Ignition file')
isocreate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (can specify multiple)',
metavar='PARAM')
isocreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
isocreate_parser.add_argument('cluster', metavar='CLUSTER', help='Cluster')
isocreate_parser.set_defaults(func=create_openshift_iso)
create_subparsers.add_parser('openshift-iso', parents=[isocreate_parser], description=isocreate_desc,
help=isocreate_desc, epilog=isocreate_epilog, formatter_class=rawhelp)
pipelinecreate_desc = 'Create Pipeline'
pipelinecreate_parser = create_subparsers.add_parser('pipeline', description=pipelinecreate_desc,
help=pipelinecreate_desc)
pipelinecreate_subparsers = pipelinecreate_parser.add_subparsers(metavar='', dest='subcommand_create_pipeline')
githubpipelinecreate_desc = 'Create Github Pipeline'
githubpipelinecreate_parser = pipelinecreate_subparsers.add_parser('github', description=githubpipelinecreate_desc,
help=githubpipelinecreate_desc, aliases=['gha'])
githubpipelinecreate_parser.add_argument('-f', '--inputfile', help='Input Plan (or script) file')
githubpipelinecreate_parser.add_argument('-k', '--kube', action='store_true', help='Create kube pipeline')
githubpipelinecreate_parser.add_argument('-s', '--script', action='store_true', help='Create script pipeline')
githubpipelinecreate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)',
metavar='PARAM')
githubpipelinecreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
githubpipelinecreate_parser.add_argument('plan', metavar='PLAN', nargs='?')
githubpipelinecreate_parser.set_defaults(func=create_pipeline_github)
jenkinspipelinecreate_desc = 'Create Jenkins Pipeline'
jenkinspipelinecreate_parser = pipelinecreate_subparsers.add_parser('jenkins',
description=jenkinspipelinecreate_desc,
help=jenkinspipelinecreate_desc)
jenkinspipelinecreate_parser.add_argument('-f', '--inputfile', help='Input Plan file')
jenkinspipelinecreate_parser.add_argument('-k', '--kube', action='store_true', help='Create kube pipeline')
jenkinspipelinecreate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)',
metavar='PARAM')
jenkinspipelinecreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
jenkinspipelinecreate_parser.add_argument('plan', metavar='PLAN', nargs='?')
jenkinspipelinecreate_parser.set_defaults(func=create_pipeline_jenkins)
tektonpipelinecreate_desc = 'Create Tekton Pipeline'
tektonpipelinecreate_parser = pipelinecreate_subparsers.add_parser('tekton',
description=tektonpipelinecreate_desc,
help=tektonpipelinecreate_desc)
tektonpipelinecreate_parser.add_argument('-f', '--inputfile', help='Input Plan file')
tektonpipelinecreate_parser.add_argument('-k', '--kube', action='store_true', help='Create kube pipeline')
tektonpipelinecreate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)',
metavar='PARAM')
tektonpipelinecreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
tektonpipelinecreate_parser.add_argument('plan', metavar='PLAN', nargs='?')
tektonpipelinecreate_parser.set_defaults(func=create_pipeline_tekton)
plancreate_desc = 'Create Plan'
plancreate_epilog = "examples:\n%s" % plancreate
plancreate_parser = create_subparsers.add_parser('plan', description=plancreate_desc, help=plancreate_desc,
epilog=plancreate_epilog,
formatter_class=rawhelp)
plancreate_parser.add_argument('-A', '--ansible', help='Generate ansible inventory', action='store_true')
plancreate_parser.add_argument('-u', '--url', help='Url for plan', metavar='URL', type=valid_url)
plancreate_parser.add_argument('-p', '--path', help='Path where to download plans. Defaults to plan',
metavar='PATH')
plancreate_parser.add_argument('-c', '--container', action='store_true', help='Handle container')
plancreate_parser.add_argument('--force', action='store_true', help='Delete existing vms first')
plancreate_parser.add_argument('-f', '--inputfile', help='Input Plan file')
plancreate_parser.add_argument('-k', '--skippre', action='store_true', help='Skip pre script')
plancreate_parser.add_argument('-z', '--skippost', action='store_true', help='Skip post script')
plancreate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
plancreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
plancreate_parser.add_argument('-t', '--threaded', help='Run threaded', action='store_true')
plancreate_parser.add_argument('plan', metavar='PLAN', nargs='?')
plancreate_parser.set_defaults(func=create_plan)
plandelete_desc = 'Delete Plan'
plandelete_parser = delete_subparsers.add_parser('plan', description=plandelete_desc, help=plandelete_desc)
plandelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
plandelete_parser.add_argument('plans', metavar='PLAN', nargs='*')
plandelete_parser.set_defaults(func=delete_plan)
plansnapshotdelete_desc = 'Delete Plan Snapshot'
plansnapshotdelete_parser = delete_subparsers.add_parser('plan-snapshot', description=plansnapshotdelete_desc,
help=plansnapshotdelete_desc)
plansnapshotdelete_parser.add_argument('-p', '--plan', help='plan name', required=True, metavar='PLAN')
plansnapshotdelete_parser.add_argument('snapshot', metavar='SNAPSHOT')
plansnapshotdelete_parser.set_defaults(func=delete_snapshot_plan)
planexpose_desc = 'Expose plan'
planexpose_epilog = None
planexpose_parser = argparse.ArgumentParser(add_help=False)
planexpose_parser.add_argument('-f', '--inputfile', help='Input Plan file')
planexpose_parser.add_argument('-i', '--installermode', action='store_true', help='Filter by installervm')
planexpose_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
planexpose_parser.add_argument('--port', help='Port where to listen', type=int, default=9000, metavar='PORT')
planexpose_parser.add_argument('plan', metavar='PLAN', nargs='?')
planexpose_parser.set_defaults(func=expose_plan)
expose_subparsers.add_parser('plan', parents=[planexpose_parser], description=vmssh_desc, help=planexpose_desc,
epilog=planexpose_epilog, formatter_class=rawhelp)
planinfo_desc = 'Info Plan'
planinfo_epilog = "examples:\n%s" % planinfo
planinfo_parser = info_subparsers.add_parser('plan', description=planinfo_desc, help=planinfo_desc,
epilog=planinfo_epilog,
formatter_class=rawhelp)
planinfo_parser.add_argument('--doc', action='store_true', help='Render info as markdown table')
planinfo_parser.add_argument('-f', '--inputfile', help='Input Plan file')
planinfo_parser.add_argument('-p', '--path', help='Path where to download plans. Defaults to plan', metavar='PATH')
planinfo_parser.add_argument('-q', '--quiet', action='store_true', help='Provide parameter file output')
planinfo_parser.add_argument('-u', '--url', help='Url for plan', metavar='URL', type=valid_url)
planinfo_parser.set_defaults(func=info_plan)
planlist_desc = 'List Plans'
planlist_parser = list_subparsers.add_parser('plan', description=planlist_desc, help=planlist_desc,
aliases=['plans'])
planlist_parser.set_defaults(func=list_plan)
planrestart_desc = 'Restart Plan'
planrestart_parser = restart_subparsers.add_parser('plan', description=planrestart_desc, help=planrestart_desc)
planrestart_parser.add_argument('-s', '--soft', action='store_true', help='Do a soft stop')
planrestart_parser.add_argument('plan', metavar='PLAN')
planrestart_parser.set_defaults(func=restart_plan)
plandatacreate_desc = 'Create Cloudinit/Ignition from plan file'
plandatacreate_epilog = "examples:\n%s" % plandatacreate
plandatacreate_parser = create_subparsers.add_parser('plan-data', description=plandatacreate_desc,
help=plandatacreate_desc, epilog=plandatacreate_epilog,
formatter_class=rawhelp)
plandatacreate_parser.add_argument('-f', '--inputfile', help='Input Plan file', default='kcli_plan.yml')
plandatacreate_parser.add_argument('-k', '--skippre', action='store_true', help='Skip pre script')
plandatacreate_parser.add_argument('--outputdir', '-o', help='Output directory', metavar='OUTPUTDIR')
plandatacreate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
plandatacreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
plandatacreate_parser.add_argument('name', metavar='VMNAME', nargs='?', type=valid_fqdn)
plandatacreate_parser.set_defaults(func=create_plandata)
plantemplatecreate_desc = 'Create plan template'
plantemplatecreate_epilog = "examples:\n%s" % plantemplatecreate
plantemplatecreate_parser = create_subparsers.add_parser('plan-template', description=plantemplatecreate_desc,
help=plantemplatecreate_desc,
epilog=plantemplatecreate_epilog, formatter_class=rawhelp)
plantemplatecreate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)',
metavar='PARAM')
plantemplatecreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
plantemplatecreate_parser.add_argument('-x', '--skipfiles', action='store_true', help='Skip files in assets')
plantemplatecreate_parser.add_argument('-y', '--skipscripts', action='store_true', help='Skip scripts in assets')
plantemplatecreate_parser.add_argument('directory', metavar='DIR')
plantemplatecreate_parser.set_defaults(func=create_plantemplate)
planrevert_desc = 'Revert Snapshot Of Plan'
planrevert_parser = revert_subparsers.add_parser('plan-snapshot', description=planrevert_desc, help=planrevert_desc,
aliases=['plan'])
planrevert_parser.add_argument('-p', '--plan', help='Plan name', required=True, metavar='PLANNAME')
planrevert_parser.add_argument('snapshot', metavar='SNAPSHOT')
planrevert_parser.set_defaults(func=revert_snapshot_plan)
plansnapshotcreate_desc = 'Create Plan Snapshot'
plansnapshotcreate_parser = create_subparsers.add_parser('plan-snapshot', description=plansnapshotcreate_desc,
help=plansnapshotcreate_desc)
plansnapshotcreate_parser.add_argument('-p', '--plan', help='plan name', required=True, metavar='PLAN')
plansnapshotcreate_parser.add_argument('snapshot', metavar='SNAPSHOT')
plansnapshotcreate_parser.set_defaults(func=create_snapshot_plan)
planstart_desc = 'Start Plan'
planstart_parser = start_subparsers.add_parser('plan', description=planstart_desc, help=planstart_desc)
planstart_parser.add_argument('plan', metavar='PLAN')
planstart_parser.set_defaults(func=start_plan)
planstop_desc = 'Stop Plan'
planstop_parser = stop_subparsers.add_parser('plan', description=planstop_desc, help=planstop_desc)
planstop_parser.add_argument('-s', '--soft', action='store_true', help='Do a soft stop')
planstop_parser.add_argument('plan', metavar='PLAN')
planstop_parser.set_defaults(func=stop_plan)
planupdate_desc = 'Update Plan'
planupdate_parser = update_subparsers.add_parser('plan', description=planupdate_desc, help=planupdate_desc)
planupdate_parser.add_argument('--autostart', action='store_true', help='Set autostart for vms of the plan')
planupdate_parser.add_argument('--noautostart', action='store_true', help='Remove autostart for vms of the plan')
planupdate_parser.add_argument('-u', '--url', help='Url for plan', metavar='URL', type=valid_url)
planupdate_parser.add_argument('-p', '--path', help='Path where to download plans. Defaults to plan',
metavar='PATH')
planupdate_parser.add_argument('-c', '--container', action='store_true', help='Handle container')
planupdate_parser.add_argument('-f', '--inputfile', help='Input Plan file')
planupdate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
planupdate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
planupdate_parser.add_argument('plan', metavar='PLAN')
planupdate_parser.set_defaults(func=update_plan)
playbookcreate_desc = 'Create playbook from plan'
playbookcreate_parser = create_subparsers.add_parser('playbook', description=playbookcreate_desc,
help=playbookcreate_desc)
playbookcreate_parser.add_argument('-f', '--inputfile', help='Input Plan/File', default='kcli_plan.yml')
playbookcreate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
playbookcreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
playbookcreate_parser.add_argument('-s', '--store', action='store_true', help="Store results in files")
playbookcreate_parser.set_defaults(func=create_playbook)
poolcreate_desc = 'Create Pool'
poolcreate_parser = create_subparsers.add_parser('pool', description=poolcreate_desc, help=poolcreate_desc)
poolcreate_parser.add_argument('-f', '--full', action='store_true')
poolcreate_parser.add_argument('-t', '--pooltype', help='Type of the pool', choices=('dir', 'lvm', 'zfs'),
default='dir')
poolcreate_parser.add_argument('-p', '--path', help='Path of the pool', metavar='PATH')
poolcreate_parser.add_argument('--thinpool', help='Existing thin pool to use with lvm', metavar='THINPOOL')
poolcreate_parser.add_argument('pool')
poolcreate_parser.set_defaults(func=create_pool)
pooldelete_desc = 'Delete Pool'
pooldelete_parser = delete_subparsers.add_parser('pool', description=pooldelete_desc, help=pooldelete_desc)
pooldelete_parser.add_argument('-d', '--delete', action='store_true')
pooldelete_parser.add_argument('-f', '--full', action='store_true')
pooldelete_parser.add_argument('-p', '--path', help='Path of the pool', metavar='PATH')
pooldelete_parser.add_argument('--thinpool', help='Existing thin pool to use with lvm', metavar='THINPOOL')
pooldelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
pooldelete_parser.add_argument('pool')
pooldelete_parser.set_defaults(func=delete_pool)
poollist_desc = 'List Pools'
poollist_parser = list_subparsers.add_parser('pool', description=poollist_desc, help=poollist_desc,
aliases=['pools'])
poollist_parser.add_argument('--short', action='store_true')
poollist_parser.set_defaults(func=list_pool)
profiledelete_desc = 'Delete Profile'
profiledelete_help = "Profile to delete"
profiledelete_parser = argparse.ArgumentParser(add_help=False)
profiledelete_parser.add_argument('profile', help=profiledelete_help, metavar='PROFILE')
profiledelete_parser.set_defaults(func=delete_profile)
delete_subparsers.add_parser('profile', parents=[profiledelete_parser], description=profiledelete_desc,
help=profiledelete_desc)
productcreate_desc = 'Create Product'
productcreate_parser = create_subparsers.add_parser('product', description=productcreate_desc,
help=productcreate_desc)
productcreate_parser.add_argument('-g', '--group', help='Group to use as a name during deployment', metavar='GROUP')
productcreate_parser.add_argument('-l', '--latest', action='store_true', help='Grab latest version of the plans')
productcreate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering within scripts.'
'Can be repeated several times', metavar='PARAM')
productcreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
productcreate_parser.add_argument('-r', '--repo',
help='Repo to use, if deploying a product present in several repos',
metavar='REPO')
productcreate_parser.add_argument('product', metavar='PRODUCT')
productcreate_parser.set_defaults(func=create_product)
productinfo_desc = 'Info Of Product'
productinfo_epilog = "examples:\n%s" % productinfo
productinfo_parser = argparse.ArgumentParser(add_help=False)
productinfo_parser.set_defaults(func=info_product)
productinfo_parser.add_argument('-g', '--group', help='Only Display products of the indicated group',
metavar='GROUP')
productinfo_parser.add_argument('-r', '--repo', help='Only Display products of the indicated repository',
metavar='REPO')
productinfo_parser.add_argument('product', metavar='PRODUCT')
info_subparsers.add_parser('product', parents=[productinfo_parser], description=productinfo_desc,
help=productinfo_desc,
epilog=productinfo_epilog, formatter_class=rawhelp)
productlist_desc = 'List Products'
productlist_parser = list_subparsers.add_parser('product', description=productlist_desc, help=productlist_desc,
aliases=['products'])
productlist_parser.add_argument('-g', '--group', help='Only Display products of the indicated group',
metavar='GROUP')
productlist_parser.add_argument('-r', '--repo', help='Only Display products of the indicated repository',
metavar='REPO')
productlist_parser.add_argument('-s', '--search', help='Search matching products')
productlist_parser.set_defaults(func=list_product)
repocreate_desc = 'Create Repo'
repocreate_epilog = "examples:\n%s" % repocreate
repocreate_parser = create_subparsers.add_parser('repo', description=repocreate_desc, help=repocreate_desc,
epilog=repocreate_epilog,
formatter_class=rawhelp)
repocreate_parser.add_argument('-u', '--url', help='URL of the repo', metavar='URL', type=valid_url)
repocreate_parser.add_argument('repo')
repocreate_parser.set_defaults(func=create_repo)
repodelete_desc = 'Delete Repo'
repodelete_parser = delete_subparsers.add_parser('repo', description=repodelete_desc, help=repodelete_desc)
repodelete_parser.add_argument('repo')
repodelete_parser.set_defaults(func=delete_repo)
repolist_desc = 'List Repos'
repolist_parser = list_subparsers.add_parser('repo', description=repolist_desc, help=repolist_desc,
aliases=['repos'])
repolist_parser.set_defaults(func=list_repo)
repoupdate_desc = 'Update Repo'
repoupdate_parser = update_subparsers.add_parser('repo', description=repoupdate_desc, help=repoupdate_desc)
repoupdate_parser.add_argument('repo')
repoupdate_parser.set_defaults(func=update_repo)
coreosinstallerdownload_desc = 'Download Coreos Installer'
coreosinstallerdownload_parser = argparse.ArgumentParser(add_help=False)
coreosinstallerdownload_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)',
metavar='PARAM')
coreosinstallerdownload_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
coreosinstallerdownload_parser.set_defaults(func=download_coreos_installer)
download_subparsers.add_parser('coreos-installer', parents=[coreosinstallerdownload_parser],
description=coreosinstallerdownload_desc,
help=coreosinstallerdownload_desc)
imagedownload_desc = 'Download Cloud Image'
imagedownload_help = "Image to download. Choose between \n%s" % '\n'.join(IMAGES.keys())
imagedownload_parser = argparse.ArgumentParser(add_help=False)
imagedownload_parser.add_argument('-a', '--arch', help='Target arch', choices=['x86_64', 'aarch64'],
default='x86_64')
imagedownload_parser.add_argument('-c', '--cmd', help='Extra command to launch after downloading', metavar='CMD')
imagedownload_parser.add_argument('-o', '--openstack', help='Use openstack variant (kvm specific)',
action='store_true')
imagedownload_parser.add_argument('-p', '--pool', help='Pool to use. Defaults to default', metavar='POOL')
imagedownload_parser.add_argument('-u', '--url', help='Url to use', metavar='URL', type=valid_url)
imagedownload_parser.add_argument('--size', help='Disk size (kubevirt specific)', type=int, metavar='SIZE')
imagedownload_parser.add_argument('-s', '--skip-profile', help='Skip Profile update', action='store_true')
imagedownload_parser.add_argument('image', help=imagedownload_help, metavar='IMAGE')
imagedownload_parser.set_defaults(func=download_image)
download_subparsers.add_parser('image', parents=[imagedownload_parser], description=imagedownload_desc,
help=imagedownload_desc)
isodownload_desc = 'Download Iso'
isodownload_help = "Iso name"
isodownload_parser = argparse.ArgumentParser(add_help=False)
isodownload_parser.add_argument('-p', '--pool', help='Pool to use. Defaults to default', metavar='POOL')
isodownload_parser.add_argument('-u', '--url', help='Url to use', metavar='URL', required=True, type=valid_url)
isodownload_parser.add_argument('iso', help=isodownload_help, metavar='ISO', nargs='?')
isodownload_parser.set_defaults(func=download_iso)
download_subparsers.add_parser('iso', parents=[isodownload_parser], description=isodownload_desc,
help=isodownload_desc)
okddownload_desc = 'Download Okd Installer'
okddownload_parser = argparse.ArgumentParser(add_help=False)
okddownload_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
okddownload_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
okddownload_parser.set_defaults(func=download_okd_installer)
download_subparsers.add_parser('okd-installer', parents=[okddownload_parser],
description=okddownload_desc,
help=okddownload_desc, aliases=['okd-install'])
openshiftdownload_desc = 'Download Openshift Installer'
openshiftdownload_parser = argparse.ArgumentParser(add_help=False)
openshiftdownload_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
openshiftdownload_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
openshiftdownload_parser.set_defaults(func=download_openshift_installer)
download_subparsers.add_parser('openshift-installer', parents=[openshiftdownload_parser],
description=openshiftdownload_desc,
help=openshiftdownload_desc, aliases=['openshift-install'])
helmdownload_desc = 'Download Helm'
helmdownload_parser = argparse.ArgumentParser(add_help=False)
helmdownload_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
helmdownload_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
helmdownload_parser.set_defaults(func=download_helm)
download_subparsers.add_parser('helm', parents=[helmdownload_parser],
description=helmdownload_desc,
help=helmdownload_desc)
kubectldownload_desc = 'Download Kubectl'
kubectldownload_parser = argparse.ArgumentParser(add_help=False)
kubectldownload_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
kubectldownload_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
kubectldownload_parser.set_defaults(func=download_kubectl)
download_subparsers.add_parser('kubectl', parents=[kubectldownload_parser],
description=kubectldownload_desc,
help=kubectldownload_desc)
ocdownload_desc = 'Download Oc'
ocdownload_parser = argparse.ArgumentParser(add_help=False)
ocdownload_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
ocdownload_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
ocdownload_parser.set_defaults(func=download_oc)
download_subparsers.add_parser('oc', parents=[ocdownload_parser],
description=ocdownload_desc,
help=ocdownload_desc)
plandownload_desc = 'Download Plan'
plandownload_parser = argparse.ArgumentParser(add_help=False)
plandownload_parser.add_argument('-u', '--url', help='Url to use', metavar='URL', required=True, type=valid_url)
plandownload_parser.add_argument('plan', metavar='PLAN', nargs='?')
plandownload_parser.set_defaults(func=download_plan)
download_subparsers.add_parser('plan', parents=[plandownload_parser], description=plandownload_desc,
help=plandownload_desc)
tastydownload_desc = 'Download Tasty'
tastydownload_parser = argparse.ArgumentParser(add_help=False)
tastydownload_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
tastydownload_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
tastydownload_parser.set_defaults(func=download_tasty)
download_subparsers.add_parser('tasty', parents=[tastydownload_parser], description=tastydownload_desc,
help=tastydownload_desc)
imagelist_desc = 'List Images'
imagelist_parser = list_subparsers.add_parser('image', description=imagelist_desc, help=imagelist_desc,
aliases=['images'])
imagelist_parser.set_defaults(func=list_image)
vmcreate_desc = 'Create Vm'
vmcreate_epilog = "examples:\n%s" % vmcreate
vmcreate_parser = argparse.ArgumentParser(add_help=False)
vmcreate_parser.add_argument('-p', '--profile', help='Profile to use', metavar='PROFILE')
vmcreate_parser.add_argument('--console', help='Directly switch to console after creation', action='store_true')
vmcreate_parser.add_argument('-c', '--count', help='How many vms to create', type=int, default=1, metavar='COUNT')
vmcreate_parser.add_argument('-i', '--image', help='Image to use', metavar='IMAGE')
vmcreate_parser.add_argument('--profilefile', help='File to load profiles from', metavar='PROFILEFILE')
vmcreate_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (multiple can be specified)',
metavar='PARAM')
vmcreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
vmcreate_parser.add_argument('-s', '--serial', help='Directly switch to serial console after creation',
action='store_true')
vmcreate_parser.add_argument('-w', '--wait', action='store_true', help='Wait for cloudinit to finish')
vmcreate_parser.add_argument('name', metavar='VMNAME', nargs='?', type=valid_fqdn)
vmcreate_parser.set_defaults(func=create_vm)
create_subparsers.add_parser('vm', parents=[vmcreate_parser], description=vmcreate_desc, help=vmcreate_desc,
epilog=vmcreate_epilog, formatter_class=rawhelp)
vmdelete_desc = 'Delete Vm'
vmdelete_parser = argparse.ArgumentParser(add_help=False)
vmdelete_parser.add_argument('-c', '--count', help='How many vms to delete', type=int, default=1, metavar='COUNT')
vmdelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
vmdelete_parser.add_argument('-s', '--snapshots', action='store_true', help='Remove snapshots if needed')
vmdelete_parser.add_argument('names', metavar='VMNAMES', nargs='*')
vmdelete_parser.set_defaults(func=delete_vm)
delete_subparsers.add_parser('vm', parents=[vmdelete_parser], description=vmdelete_desc, help=vmdelete_desc)
vmdatacreate_desc = 'Create Cloudinit/Ignition for a single vm'
vmdatacreate_epilog = "examples:\n%s" % vmdatacreate
vmdatacreate_parser = create_subparsers.add_parser('vm-data', description=vmdatacreate_desc,
help=vmdatacreate_desc, epilog=vmdatacreate_epilog,
formatter_class=rawhelp)
vmdatacreate_parser.add_argument('-i', '--image', help='Image to use', metavar='IMAGE')
vmdatacreate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
vmdatacreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
vmdatacreate_parser.add_argument('name', metavar='VMNAME', nargs='?', type=valid_fqdn)
vmdatacreate_parser.set_defaults(func=create_vmdata)
vmdiskadd_desc = 'Add Disk To Vm'
diskcreate_epilog = "examples:\n%s" % diskcreate
vmdiskadd_parser = argparse.ArgumentParser(add_help=False)
vmdiskadd_parser.add_argument('-s', '--size', type=int, help='Size of the disk to add, in GB', metavar='SIZE',
default=10)
vmdiskadd_parser.add_argument('-i', '--image', help='Name or Path of a Image', metavar='IMAGE')
vmdiskadd_parser.add_argument('--interface', default='virtio', help='Disk Interface. Defaults to virtio',
metavar='INTERFACE')
vmdiskadd_parser.add_argument('-n', '--novm', action='store_true', help='Dont attach to any vm')
vmdiskadd_parser.add_argument('-p', '--pool', default='default', help='Pool', metavar='POOL')
vmdiskadd_parser.add_argument('-P', '--param', action='append',
help='specify parameter or keyword for rendering (can specify multiple)',
metavar='PARAM')
vmdiskadd_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
vmdiskadd_parser.add_argument('name', metavar='VMNAME')
vmdiskadd_parser.set_defaults(func=create_vmdisk)
create_subparsers.add_parser('vm-disk', parents=[vmdiskadd_parser], description=vmdiskadd_desc, help=vmdiskadd_desc,
aliases=['disk'], epilog=diskcreate_epilog,
formatter_class=rawhelp)
vmdiskdelete_desc = 'Delete Vm Disk'
diskdelete_epilog = "examples:\n%s" % diskdelete
vmdiskdelete_parser = argparse.ArgumentParser(add_help=False)
vmdiskdelete_parser.add_argument('-n', '--novm', action='store_true', help='Dont try to locate vm')
vmdiskdelete_parser.add_argument('--vm', help='Name of the vm', metavar='VMNAME')
vmdiskdelete_parser.add_argument('-p', '--pool', default='default', help='Pool', metavar='POOL')
vmdiskdelete_parser.add_argument('-y', '--yes', action='store_true', help='Dont ask for confirmation')
vmdiskdelete_parser.add_argument('disknames', metavar='DISKNAMES', nargs='*')
vmdiskdelete_parser.set_defaults(func=delete_vmdisk)
delete_subparsers.add_parser('vm-disk', parents=[vmdiskdelete_parser], description=vmdiskdelete_desc,
aliases=['disk'], help=vmdiskdelete_desc, epilog=diskdelete_epilog,
formatter_class=rawhelp)
vmdisklist_desc = 'List All Vm Disks'
vmdisklist_parser = argparse.ArgumentParser(add_help=False)
vmdisklist_parser.set_defaults(func=list_vmdisk)
list_subparsers.add_parser('disk', parents=[vmdisklist_parser], description=vmdisklist_desc,
help=vmdisklist_desc, aliases=['disks'])
vminfo_desc = 'Info Of Vms'
vminfo_parser = argparse.ArgumentParser(add_help=False)
vminfo_parser.add_argument('-f', '--fields', help='Display Corresponding list of fields,'
'separated by a comma', metavar='FIELDS')
vminfo_parser.add_argument('-o', '--output', choices=['plain', 'yaml'], help='Format of the output')
vminfo_parser.add_argument('-v', '--values', action='store_true', help='Only report values')
vminfo_parser.add_argument('names', help='VMNAMES', nargs='*')
vminfo_parser.set_defaults(func=info_vm)
info_subparsers.add_parser('vm', parents=[vminfo_parser], description=vminfo_desc, help=vminfo_desc)
vmlist_desc = 'List Vms'
vmlist_parser = argparse.ArgumentParser(add_help=False)
vmlist_parser.add_argument('--filters', choices=('up', 'down'))
vmlist_parser.set_defaults(func=list_vm)
list_subparsers.add_parser('vm', parents=[vmlist_parser], description=vmlist_desc, help=vmlist_desc,
aliases=['vms'])
create_vmnic_desc = 'Add Nic To Vm'
create_vmnic_epilog = "examples:\n%s" % niccreate
create_vmnic_parser = argparse.ArgumentParser(add_help=False)
create_vmnic_parser.add_argument('-n', '--network', help='Network', metavar='NETWORK')
create_vmnic_parser.add_argument('name', metavar='VMNAME')
create_vmnic_parser.set_defaults(func=create_vmnic)
create_subparsers.add_parser('vm-nic', parents=[create_vmnic_parser], description=create_vmnic_desc,
help=create_vmnic_desc, aliases=['nic'],
epilog=create_vmnic_epilog, formatter_class=rawhelp)
delete_vmnic_desc = 'Delete Nic From vm'
delete_vmnic_epilog = "examples:\n%s" % nicdelete
delete_vmnic_parser = argparse.ArgumentParser(add_help=False)
delete_vmnic_parser.add_argument('-i', '--interface', help='Interface name', metavar='INTERFACE')
delete_vmnic_parser.add_argument('-n', '--network', help='Network', metavar='NETWORK')
delete_vmnic_parser.add_argument('name', metavar='VMNAME')
delete_vmnic_parser.set_defaults(func=delete_vmnic)
delete_subparsers.add_parser('vm-nic', parents=[delete_vmnic_parser], description=delete_vmnic_desc,
help=delete_vmnic_desc, aliases=['nic'],
epilog=delete_vmnic_epilog, formatter_class=rawhelp)
vmrestart_desc = 'Restart Vms'
vmrestart_parser = restart_subparsers.add_parser('vm', description=vmrestart_desc, help=vmrestart_desc)
vmrestart_parser.add_argument('names', metavar='VMNAMES', nargs='*')
vmrestart_parser.set_defaults(func=restart_vm)
vmsnapshotcreate_desc = 'Create Snapshot Of Vm'
vmsnapshotcreate_parser = create_subparsers.add_parser('vm-snapshot', description=vmsnapshotcreate_desc,
help=vmsnapshotcreate_desc, aliases=['snapshot'])
vmsnapshotcreate_parser.add_argument('-n', '--name', help='vm name', required=True, metavar='VMNAME')
vmsnapshotcreate_parser.add_argument('snapshot')
vmsnapshotcreate_parser.set_defaults(func=snapshotcreate_vm)
vmsnapshotdelete_desc = 'Delete Snapshot Of Vm'
vmsnapshotdelete_parser = delete_subparsers.add_parser('vm-snapshot', description=vmsnapshotdelete_desc,
help=vmsnapshotdelete_desc)
vmsnapshotdelete_parser.add_argument('-n', '--name', help='vm name', required=True, metavar='VMNAME')
vmsnapshotdelete_parser.add_argument('snapshot')
vmsnapshotdelete_parser.set_defaults(func=snapshotdelete_vm)
vmsnapshotlist_desc = 'List Snapshots Of Vm'
vmsnapshotlist_parser = list_subparsers.add_parser('vm-snapshot', description=vmsnapshotlist_desc,
help=vmsnapshotlist_desc, aliases=['vm-snapshots'])
vmsnapshotlist_parser.add_argument('-n', '--name', help='vm name', required=True, metavar='VMNAME')
vmsnapshotlist_parser.set_defaults(func=snapshotlist_vm)
vmsnapshotrevert_desc = 'Revert Snapshot Of Vm'
vmsnapshotrevert_parser = revert_subparsers.add_parser('vm-snapshot', description=vmsnapshotrevert_desc,
help=vmsnapshotrevert_desc, aliases=['vm'])
vmsnapshotrevert_parser.add_argument('-n', '--name', help='vm name', required=True, metavar='VMNAME')
vmsnapshotrevert_parser.add_argument('snapshot')
vmsnapshotrevert_parser.set_defaults(func=snapshotrevert_vm)
vmstart_desc = 'Start Vms'
vmstart_parser = argparse.ArgumentParser(add_help=False)
vmstart_parser.add_argument('names', metavar='VMNAMES', nargs='*')
vmstart_parser.set_defaults(func=start_vm)
start_subparsers.add_parser('vm', parents=[vmstart_parser], description=vmstart_desc, help=vmstart_desc)
vmstop_desc = 'Stop Vms'
vmstop_parser = argparse.ArgumentParser(add_help=False)
vmstop_parser.add_argument('-s', '--soft', action='store_true', help='Do a soft stop')
vmstop_parser.add_argument('names', metavar='VMNAMES', nargs='*')
vmstop_parser.set_defaults(func=stop_vm)
stop_subparsers.add_parser('vm', parents=[vmstop_parser], description=vmstop_desc, help=vmstop_desc)
vmupdate_desc = 'Update Vm\'s Ip, Memory Or Numcpus'
vmupdate_parser = update_subparsers.add_parser('vm', description=vmupdate_desc, help=vmupdate_desc)
vmupdate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
vmupdate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
vmupdate_parser.add_argument('names', help='VMNAMES', nargs='*')
vmupdate_parser.set_defaults(func=update_vm)
workflowcreate_desc = 'Create Workflow'
workflowcreate_epilog = "examples:\n%s" % workflowcreate
workflowcreate_parser = create_subparsers.add_parser('workflow', description=workflowcreate_desc,
help=workflowcreate_desc, epilog=workflowcreate_epilog,
formatter_class=rawhelp)
workflowcreate_parser.add_argument('-P', '--param', action='append',
help='Define parameter for rendering (can specify multiple)', metavar='PARAM')
workflowcreate_parser.add_argument('--paramfile', '--pf', help='Parameters file', metavar='PARAMFILE')
# workflowcreate_parser.add_argument('-t', '--threaded', help='Run threaded', action='store_true')
workflowcreate_parser.add_argument('workflow', metavar='WORKFLOW', nargs='?')
workflowcreate_parser.set_defaults(func=create_workflow)
argcomplete.autocomplete(parser)
if len(sys.argv) == 1 or (len(sys.argv) == 3 and sys.argv[1] == '-C'):
parser.print_help()
sys.exit(0)
args = parser.parse_args()
if not hasattr(args, 'func'):
for attr in dir(args):
if attr.startswith('subcommand_') and getattr(args, attr) is None:
split = attr.split('_')
if len(split) == 2:
subcommand = split[1]
get_subparser_print_help(parser, subcommand)
elif len(split) == 3:
subcommand = split[1]
subsubcommand = split[2]
subparser = get_subparser(parser, subcommand)
get_subparser_print_help(subparser, subsubcommand)
sys.exit(0)
sys.exit(0)
elif args.func.__name__ == 'vmcreate' and args.client is not None and ',' in args.client:
args.client = random.choice(args.client.split(','))
pprint(f"Selecting {args.client} for creation")
args.func(args)
if __name__ == '__main__':
cli()
|
karmab/kcli
|
kvirt/cli.py
|
Python
|
apache-2.0
| 234,461
|
# Copyright 2020 Tensorforce Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import numpy as np
import tensorflow as tf
from tensorforce import TensorforceError, util
from tensorforce.core import layer_modules, TensorDict, TensorSpec, TensorsSpec, tf_function, \
tf_util
from tensorforce.core.distributions import Distribution
class Beta(Distribution):
"""
Beta distribution, for bounded continuous actions (specification key: `beta`).
Args:
name (string): <span style="color:#0000C0"><b>internal use</b></span>.
action_spec (specification): <span style="color:#0000C0"><b>internal use</b></span>.
input_spec (specification): <span style="color:#0000C0"><b>internal use</b></span>.
"""
def __init__(self, *, name=None, action_spec=None, input_spec=None):
assert action_spec.type == 'float' and action_spec.min_value is not None and \
action_spec.max_value is not None
parameters_spec = TensorsSpec(
alpha=TensorSpec(type='float', shape=action_spec.shape),
beta=TensorSpec(type='float', shape=action_spec.shape),
alpha_beta=TensorSpec(type='float', shape=action_spec.shape),
log_norm=TensorSpec(type='float', shape=action_spec.shape)
)
conditions_spec = TensorsSpec()
super().__init__(
name=name, action_spec=action_spec, input_spec=input_spec,
parameters_spec=parameters_spec, conditions_spec=conditions_spec
)
if self.input_spec.rank == 1:
# Single embedding
action_size = util.product(xs=self.action_spec.shape, empty=0)
self.alpha = self.submodule(
name='alpha', module='linear', modules=layer_modules, size=action_size,
initialization_scale=0.01, input_spec=self.input_spec
)
self.beta = self.submodule(
name='beta', module='linear', modules=layer_modules, size=action_size,
initialization_scale=0.01, input_spec=self.input_spec
)
else:
# Embedding per action
if self.input_spec.rank < 1 or self.input_spec.rank > 3:
raise TensorforceError.value(
name=name, argument='input_spec.shape', value=self.input_spec.shape,
hint='invalid rank'
)
if self.input_spec.shape[:-1] == self.action_spec.shape[:-1]:
size = self.action_spec.shape[-1]
elif self.input_spec.shape[:-1] == self.action_spec.shape:
size = 0
else:
raise TensorforceError.value(
name=name, argument='input_spec.shape', value=self.input_spec.shape,
hint='not flattened and incompatible with action shape'
)
self.alpha = self.submodule(
name='alpha', module='linear', modules=layer_modules, size=size,
initialization_scale=0.01, input_spec=self.input_spec
)
self.beta = self.submodule(
name='beta', module='linear', modules=layer_modules, size=size,
initialization_scale=0.01, input_spec=self.input_spec
)
def get_architecture(self):
return 'Alpha: {}\nBeta: {}'.format(
self.alpha.get_architecture(), self.beta.get_architecture()
)
def initialize(self):
super().initialize()
prefix = 'distributions/' + self.name
names = (prefix + '-alpha', prefix + '-beta')
self.register_summary(label='distribution', name=names)
spec = self.parameters_spec['alpha']
self.register_tracking(label='distribution', name='alpha', spec=spec)
self.register_tracking(label='distribution', name='beta', spec=spec)
@tf_function(num_args=2)
def parametrize(self, *, x, conditions):
# Softplus to ensure alpha and beta >= 1
one = tf_util.constant(value=1.0, dtype='float')
log_epsilon = tf_util.constant(value=np.log(util.epsilon), dtype='float')
shape = (-1,) + self.action_spec.shape
# Alpha
alpha = self.alpha.apply(x=x)
# epsilon < 1.0, hence negative
alpha = tf.clip_by_value(t=alpha, clip_value_min=log_epsilon, clip_value_max=-log_epsilon)
alpha = tf.math.exp(x=alpha) + one # tf.math.softplus(features=beta) ???
if self.input_spec.rank == 1:
alpha = tf.reshape(tensor=alpha, shape=shape)
# Beta
beta = self.beta.apply(x=x)
# epsilon < 1.0, hence negative
beta = tf.clip_by_value(t=beta, clip_value_min=log_epsilon, clip_value_max=-log_epsilon)
beta = tf.math.exp(x=beta) + one # tf.math.softplus(features=beta) ???
if self.input_spec.rank == 1:
beta = tf.reshape(tensor=beta, shape=shape)
# Alpha + Beta
alpha_beta = alpha + beta # > 2.0 so no +epsilon required
# Log norm
log_norm = tf.math.lgamma(x=alpha) + tf.math.lgamma(x=beta) - tf.math.lgamma(x=alpha_beta)
return TensorDict(alpha=alpha, beta=beta, alpha_beta=alpha_beta, log_norm=log_norm)
@tf_function(num_args=1)
def mode(self, *, parameters, independent):
alpha, beta, alpha_beta = parameters.get(('alpha', 'beta', 'alpha_beta'))
# Distribution parameter summaries
dependencies = list()
if not independent:
def fn_summary():
a = tf.math.reduce_mean(input_tensor=alpha, axis=range(self.action_spec.rank + 1))
b = tf.math.reduce_mean(input_tensor=beta, axis=range(self.action_spec.rank + 1))
return a, b
prefix = 'distributions/' + self.name
names = (prefix + '-alpha', prefix + '-beta')
dependencies.extend(self.summary(
label='distribution', name=names, data=fn_summary, step='timesteps'
))
# Distribution parameter tracking
def fn_tracking():
return tf.math.reduce_mean(input_tensor=alpha, axis=0)
dependencies.extend(self.track(label='distribution', name='alpha', data=fn_tracking))
def fn_tracking():
return tf.math.reduce_mean(input_tensor=beta, axis=0)
dependencies.extend(self.track(label='distribution', name='beta', data=fn_tracking))
with tf.control_dependencies(control_inputs=dependencies):
action = beta / alpha_beta
min_value = tf_util.constant(value=self.action_spec.min_value, dtype='float')
max_value = tf_util.constant(value=self.action_spec.max_value, dtype='float')
return min_value + (max_value - min_value) * action
@tf_function(num_args=2)
def sample(self, *, parameters, temperature, independent):
alpha, beta, alpha_beta, log_norm = parameters.get(
('alpha', 'beta', 'alpha_beta', 'log_norm')
)
# Distribution parameter summaries
dependencies = list()
if not independent:
def fn_summary():
a = tf.math.reduce_mean(input_tensor=alpha, axis=range(self.action_spec.rank + 1))
b = tf.math.reduce_mean(input_tensor=beta, axis=range(self.action_spec.rank + 1))
return a, b
prefix = 'distributions/' + self.name
names = (prefix + '-alpha', prefix + '-beta')
dependencies.extend(self.summary(
label='distribution', name=names, data=fn_summary, step='timesteps'
))
# Distribution parameter tracking
def fn_tracking():
return tf.math.reduce_mean(input_tensor=alpha, axis=0)
dependencies.extend(self.track(label='distribution', name='alpha', data=fn_tracking))
def fn_tracking():
return tf.math.reduce_mean(input_tensor=beta, axis=0)
dependencies.extend(self.track(label='distribution', name='beta', data=fn_tracking))
epsilon = tf_util.constant(value=util.epsilon, dtype='float')
def fn_mode():
# Deterministic: mean as action
return beta / alpha_beta
def fn_sample():
# Non-deterministic: sample action using gamma distribution
alpha_sample = tf.random.gamma(shape=(), alpha=alpha, dtype=tf_util.get_dtype(type='float'))
beta_sample = tf.random.gamma(shape=(), alpha=beta, dtype=tf_util.get_dtype(type='float'))
return beta_sample / (alpha_sample + beta_sample)
action = tf.cond(pred=(temperature < epsilon), true_fn=fn_mode, false_fn=fn_sample)
min_value = tf_util.constant(value=self.action_spec.min_value, dtype='float')
max_value = tf_util.constant(value=self.action_spec.max_value, dtype='float')
with tf.control_dependencies(control_inputs=dependencies):
return min_value + (max_value - min_value) * action
@tf_function(num_args=2)
def log_probability(self, *, parameters, action):
alpha, beta, log_norm = parameters.get(('alpha', 'beta', 'log_norm'))
min_value = tf_util.constant(value=self.action_spec.min_value, dtype='float')
max_value = tf_util.constant(value=self.action_spec.max_value, dtype='float')
action = (action - min_value) / (max_value - min_value)
one = tf_util.constant(value=1.0, dtype='float')
epsilon = tf_util.constant(value=util.epsilon, dtype='float')
return tf.math.xlogy(x=(beta - one), y=(action + epsilon)) + \
(alpha - one) * tf.math.log1p(x=(-action + epsilon)) - log_norm
@tf_function(num_args=1)
def entropy(self, *, parameters):
alpha, beta, alpha_beta, log_norm = parameters.get(
('alpha', 'beta', 'alpha_beta', 'log_norm')
)
one = tf_util.constant(value=1.0, dtype='float')
digamma_alpha = tf_util.cast(x=tf.math.digamma(x=tf_util.float32(x=alpha)), dtype='float')
digamma_beta = tf_util.cast(x=tf.math.digamma(x=tf_util.float32(x=beta)), dtype='float')
digamma_alpha_beta = tf_util.cast(
x=tf.math.digamma(x=tf_util.float32(x=alpha_beta)), dtype='float'
)
return log_norm - (beta - one) * digamma_beta - (alpha - one) * digamma_alpha + \
(alpha_beta - one - one) * digamma_alpha_beta
@tf_function(num_args=2)
def kl_divergence(self, *, parameters1, parameters2):
alpha1, beta1, alpha_beta1, log_norm1 = parameters1.get(
('alpha', 'beta', 'alpha_beta', 'log_norm')
)
alpha2, beta2, alpha_beta2, log_norm2 = parameters2.get(
('alpha', 'beta', 'alpha_beta', 'log_norm')
)
digamma_alpha1 = tf_util.cast(x=tf.math.digamma(x=tf_util.float32(x=alpha1)), dtype='float')
digamma_beta1 = tf_util.cast(x=tf.math.digamma(x=tf_util.float32(x=beta1)), dtype='float')
digamma_alpha_beta1 = tf_util.cast(
x=tf.math.digamma(x=tf_util.float32(x=alpha_beta1)), dtype='float'
)
return log_norm2 - log_norm1 - digamma_beta1 * (beta2 - beta1) - \
digamma_alpha1 * (alpha2 - alpha1) + digamma_alpha_beta1 * \
(alpha_beta2 - alpha_beta1)
|
reinforceio/tensorforce
|
tensorforce/core/distributions/beta.py
|
Python
|
apache-2.0
| 11,809
|
import sys
from support import RpgTestCase
from rpg.conf import Conf
class ConfTest(RpgTestCase):
def setUp(self):
self.old_argv = sys.argv
def tearDown(self):
sys.argv = self.old_argv
def test_include_dir(self):
sys.argv = ["rpg", "--plugin-dir", str(self.test_project_dir / "py")]
conf = Conf()
conf.parse_cmdline()
self.assertEqual(str(['tests/project/py']), str(conf.directories))
def test_include_dir_fail(self):
sys.argv = ["rpg", "--plugin-dir", str("NotADir")]
conf = Conf()
conf.parse_cmdline()
self.assertEqual(str(["tests/project/py"]), str(conf.directories))
def test_exclude_plug(self):
sys.argv = ["rpg", "--disable-plugin", str("TestPlugin")]
conf = Conf()
conf.parse_cmdline()
self.assertEqual(str(["TestPlugin"]), str(conf.exclude))
|
Shootervm/rpg
|
tests/test_conf.py
|
Python
|
gpl-2.0
| 889
|
# pyOCD debugger
# Copyright (c) 2015-2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from enum import Enum
import six
import copy
from functools import total_ordering
class MemoryType(Enum):
"""! @brief Known types of memory."""
OTHER = 0
RAM = 1
ROM = 2
FLASH = 3
DEVICE = 4
def check_range(start, end=None, length=None, range=None):
assert (start is not None) and ((isinstance(start, MemoryRange) or range is not None) or
((end is not None) ^ (length is not None)))
if isinstance(start, MemoryRange):
range = start
if range is not None:
start = range.start
end = range.end
elif end is None:
end = start + length - 1
return start, end
@total_ordering
class MemoryRangeBase(object):
"""! @brief Base class for a range of memory.
This base class provides the basic address range support and methods to test for containment
or intersection with another range.
"""
def __init__(self, start=0, end=0, length=None):
self._start = start
if length is not None:
self._end = self._start + length - 1
else:
self._end = end
assert self._end >= (self._start - 1)
@property
def start(self):
return self._start
@property
def end(self):
return self._end
@property
def length(self):
return self._end - self._start + 1
def contains_address(self, address):
return (address >= self.start) and (address <= self.end)
def contains_range(self, start, end=None, length=None, range=None):
"""! @return Whether the given range is fully contained by the region."""
start, end = check_range(start, end, length, range)
return self.contains_address(start) and self.contains_address(end)
def contained_by_range(self, start, end=None, length=None, range=None):
"""! @return Whether the region is fully within the bounds of the given range."""
start, end = check_range(start, end, length, range)
return start <= self.start and end >= self.end
def intersects_range(self, start, end=None, length=None, range=None):
"""! @return Whether the region and the given range intersect at any point."""
start, end = check_range(start, end, length, range)
return (start <= self.start and end >= self.start) or (start <= self.end and end >= self.end) \
or (start >= self.start and end <= self.end)
def __hash__(self):
return hash("%08x%08x%08x" % (self.start, self.end, self.length))
def __eq__(self, other):
return self.start == other.start and self.length == other.length
def __lt__(self, other):
return self.start < other.start or (self.start == other.start and self.length == other.length)
class MemoryRange(MemoryRangeBase):
"""! @brief A range of memory optionally tied to a region."""
def __init__(self, start=0, end=0, length=None, region=None):
super(MemoryRange, self).__init__(start=start, end=end, length=length)
self._region = region
@property
def region(self):
return self._region
def __hash__(self):
h = super(MemoryRange, self).__hash__()
if self.region is not None:
h ^= hash(self.region)
return h
def __eq__(self, other):
return self.start == other.start and self.length == other.length and self.region == other.region
def __repr__(self):
return "<%s@0x%x start=0x%x end=0x%x length=0x%x region=%s>" % (self.__class__.__name__,
id(self), self.start, self.end, self.length, self.region)
class MemoryRegion(MemoryRangeBase):
"""! @brief One contiguous range of memory.
Memory regions have attributes accessible via the normal dot syntax.
- `name`: Name of the region, which defaults to the region type in lowercase.
- `access`: Composition of r, w, x, s.
- `alias`: If set, this is the name of another region that of which this region is an alias.
- `is_boot_memory`: Whether the device boots from this memory. This normally implies that the
boot NVIC vector table is placed at the base address of this region, but that is not
always the case.
- `is_default`: Whether the region should be used as a default of the given type.
- `is_powered_on_boot`: Whether the memory is powered and accessible without special configuration
at system boot. For internal memories, this will almost always be true.
- `is_cacheable`: Determines whether data should be cached from this region. True for most
memory types, except DEVICE.
- `invalidate_cache_on_run`: Whether to invalidate any cached data from the region whenever the
target resumes execution or steps. Usually true, though this can be false for regions such
as memory-mapped OTP or configuration flash.
- `is_testable`: Whether pyOCD should consider the region in its functional tests.
- `is_external`: If true, the region is backed by an external memory device such as SDRAM or QSPI.
Several attributes are available whose values are computed from other attributes. These should
not be set when creating the region.
- `is_ram`
- `is_rom`
- `is_flash`
- `is_device`
- `is_readable`
- `is_writable`
- `is_executable`
- `is_secure`
- `is_nonsecure`
"""
## Default attribute values for all memory region types.
DEFAULT_ATTRS = {
'name': lambda r: r.type.name.lower(),
'access': 'rwx',
'alias': None,
'is_boot_memory': False,
'is_default': True,
'is_powered_on_boot': True,
'is_cacheable': True,
'invalidate_cache_on_run': True,
'is_testable': True,
'is_external': False,
'is_ram': lambda r: r.type == MemoryType.RAM,
'is_rom': lambda r: r.type == MemoryType.ROM,
'is_flash': lambda r: r.type == MemoryType.FLASH,
'is_device': lambda r: r.type == MemoryType.DEVICE,
'is_readable': lambda r: 'r' in r.access,
'is_writable': lambda r: 'w' in r.access,
'is_executable': lambda r: 'x' in r.access,
'is_secure': lambda r: 's' in r.access,
'is_nonsecure': lambda r: not r.is_secure,
}
def __init__(self, type=MemoryType.OTHER, start=0, end=0, length=None, **attrs):
"""! Memory region constructor.
Memory regions are required to have non-zero lengths, unlike memory ranges.
Some common optional region attributes passed as keyword arguments:
- name: If a name is not provided, the name is set to the region type in lowercase.
- access: composition of r, w, x, s
- alias
- is_boot_memory
- is_powered_on_boot
- is_testable
"""
super(MemoryRegion, self).__init__(start=start, end=end, length=length)
assert self.length > 0, "Memory regions must have a non-zero length."
assert isinstance(type, MemoryType)
self._map = None
self._type = type
self._attributes = attrs
# Assign default values to any attributes missing from kw args.
for k, v in self.DEFAULT_ATTRS.items():
if k not in self._attributes:
self._attributes[k] = v
@property
def map(self):
return self._map
@map.setter
def map(self, theMap):
self._map = theMap
@property
def type(self):
return self._type
@property
def attributes(self):
return self._attributes
@property
def alias(self):
# Resolve alias reference.
aliasValue = self._attributes['alias']
if isinstance(aliasValue, six.string_types):
referent = self._map.get_first_matching_region(name=aliasValue)
if referent is None:
raise ValueError("unable to resolve memory region alias reference '%s'" % aliasValue)
self._attributes['alias'] = referent
return referent
else:
return aliasValue
def __getattr__(self, name):
try:
v = self._attributes[name]
except KeyError:
# Transform the KeyError from a missing attribute to the expected AttributeError.
raise AttributeError(name)
else:
if callable(v):
v = v(self)
return v
def __copy__(self):
# Custom copy is required due to our __getattr__() method.
return self.__class__(
# type=self.type,
start=self.start,
length=self.length,
**self._attributes
)
def __hash__(self):
# Need to redefine __hash__ since we redefine __eq__.
return super(MemoryRegion, self).__hash__()
def __eq__(self, other):
# Include type and attributes in equality comparison.
return self.start == other.start and self.length == other.length \
and self.type == other.type and self.attributes == other.attributes
def __repr__(self):
return "<%s@0x%x name=%s type=%s start=0x%x end=0x%x length=0x%x access=%s>" % (self.__class__.__name__, id(self), self.name, self.type, self.start, self.end, self.length, self.access)
class RamRegion(MemoryRegion):
"""! @brief Contiguous region of RAM."""
def __init__(self, start=0, end=0, length=None, **attrs):
attrs['type'] = MemoryType.RAM
super(RamRegion, self).__init__(start=start, end=end, length=length, **attrs)
class RomRegion(MemoryRegion):
"""! @brief Contiguous region of ROM."""
# Default attribute values for ROM regions.
DEFAULT_ATTRS = MemoryRegion.DEFAULT_ATTRS.copy()
DEFAULT_ATTRS.update({
'access': 'rx', # ROM is by definition not writable.
})
def __init__(self, start=0, end=0, length=None, **attrs):
attrs['type'] = MemoryType.ROM
super(RomRegion, self).__init__(start=start, end=end, length=length, **attrs)
class DefaultFlashWeights:
"""! @brief Default weights for flash programming operations."""
PROGRAM_PAGE_WEIGHT = 0.130
ERASE_SECTOR_WEIGHT = 0.048
ERASE_ALL_WEIGHT = 0.174
class FlashRegion(MemoryRegion):
"""! @brief Contiguous region of flash memory.
Flash regions have a number of attributes in addition to those available in all region types.
- `blocksize`: Erase sector size in bytes.
- `page_size`: Program page size in bytes. If not set, this will default to the `blocksize`.
- `phrase_size`: The minimum programming granularity in bytes. Defaults to the `page_size` if not set.
- `erase_all_weight`: Time it takes to erase the entire region.
- `erase_sector_weight`: Time it takes to erase one sector.
- `program_page_weight`: Time it takes to program a single page.
- `erased_byte_value`: The value of an erased byte of this flash. Most flash technologies erase to
all 1s, which would be an `erased_byte_value` of 0xff.
- `algo`: The flash algorithm dictionary.
- `flm`: Path to an FLM flash algorithm.
- `flash_class`: The class that manages individual flash algorithm operations. Must be either
@ref pyocd.flash.flash.Flash "Flash", which is the default, or a subclass.
- `flash`: After connection, this attribute holds the instance of `flash_class` for this region.
- `are_erased_sectors_readable`: Specifies whether the flash controller allows reads of erased
sectors, or will fault such reads. Default is True.
`sector_size` and `blocksize` are aliases of each other. If one is set via the constructor, the
other will have the same value.
"""
# Add some default attribute values for flash regions.
DEFAULT_ATTRS = MemoryRegion.DEFAULT_ATTRS.copy()
DEFAULT_ATTRS.update({
'blocksize': lambda r: r.sector_size, # Erase sector size. Alias for sector_size.
'sector_size': lambda r: r.blocksize, # Erase sector size. Alias for blocksize.
'page_size': lambda r: r.blocksize, # Program page size.
'phrase_size': lambda r: r.page_size, # Minimum programmable unit.
'erase_all_weight': DefaultFlashWeights.ERASE_ALL_WEIGHT,
'erase_sector_weight': DefaultFlashWeights.ERASE_SECTOR_WEIGHT,
'program_page_weight': DefaultFlashWeights.PROGRAM_PAGE_WEIGHT,
'erased_byte_value': 0xff,
'access': 'rx', # By default flash is not writable.
'are_erased_sectors_readable': True,
})
def __init__(self, start=0, end=0, length=None, **attrs):
# Import locally to prevent import loops.
from ..flash.flash import Flash
assert ('blocksize' in attrs) or ('sector_size' in attrs) or ('flm' in attrs)
attrs['type'] = MemoryType.FLASH
super(FlashRegion, self).__init__(start=start, end=end, length=length, **attrs)
self._algo = attrs.get('algo', None)
self._flm = attrs.get('flm', None)
self._flash = None
if 'flash_class' in attrs:
self._flash_class = attrs['flash_class']
assert issubclass(self._flash_class, Flash)
else:
self._flash_class = Flash
# Remove writable region attributes from attributes dict so there is only one copy.
try:
del self._attributes['algo']
except KeyError:
pass
try:
del self._attributes['flash_class']
except KeyError:
pass
@property
def algo(self):
return self._algo
@algo.setter
def algo(self, flash_algo):
self._algo = flash_algo
@property
def flm(self):
return self._flm
@flm.setter
def flm(self, flm_path):
self._flm = flm_path
@property
def flash_class(self):
return self._flash_class
@flash_class.setter
def flash_class(self, klass):
self._flash_class = klass
@property
def flash(self):
return self._flash
@flash.setter
def flash(self, flashInstance):
self._flash = flashInstance
def is_data_erased(self, d):
"""! @brief Helper method to check if a block of data is erased.
@param self
@param d List of data or bytearray.
@retval True The contents of d all match the erased byte value for this flash region.
@retval False At least one byte in d did not match the erased byte value.
"""
erasedByte = self.erased_byte_value
for b in d:
if b != erasedByte:
return False
return True
def __copy__(self):
# Include the writable attributes in the copy.
clone = self.__class__(
# type=self.type,
start=self.start,
length=self.length,
algo=self._algo,
flash_class=self._flash_class,
**self._attributes
)
# Reference the shared FLM.
clone._flm = self._flm
return clone
def __repr__(self):
return "<%s@0x%x name=%s type=%s start=0x%x end=0x%x length=0x%x access=%s blocksize=0x%x>" % (self.__class__.__name__, id(self), self.name, self.type, self.start, self.end, self.length, self.access, self.blocksize)
class DeviceRegion(MemoryRegion):
"""! @brief Device or peripheral memory."""
# Default attribute values for device regions.
DEFAULT_ATTRS = MemoryRegion.DEFAULT_ATTRS.copy()
DEFAULT_ATTRS.update({
'access': 'rw', # By default device regions are not executable.
'is_cacheable': False,
'is_testable': False,
})
def __init__(self, start=0, end=0, length=None, **attrs):
attrs['type'] = MemoryType.DEVICE
super(DeviceRegion, self).__init__(start=start, end=end, length=length, **attrs)
## @brief Map from memory type to class.
MEMORY_TYPE_CLASS_MAP = {
MemoryType.OTHER: MemoryRegion,
MemoryType.RAM: RamRegion,
MemoryType.ROM: RomRegion,
MemoryType.FLASH: FlashRegion,
MemoryType.DEVICE: DeviceRegion,
}
class MemoryMap(object):
"""! @brief Memory map consisting of memory regions.
The normal way to create a memory map is to instantiate regions directly in the call to the
constructor.
@code
map = MemoryMap(
FlashRegion( start=0,
length=0x4000,
blocksize=0x400,
is_boot_memory=True,
algo=FLASH_ALGO),
RamRegion( start=0x10000000,
length=0x1000)
)
@endcode
The memory map can also be modified by adding and removing regions at runtime. Regardless of
the order regions are added, the list of regions contained in the memory map is always
maintained sorted by start address.
"""
def __init__(self, *more_regions):
"""! @brief Constructor.
All parameters passed to the constructor are assumed to be MemoryRegion instances, and
are passed to add_regions(). The resulting memory map is sorted by region start address.
@param self
@param more_regions Zero or more MemoryRegion objects passed as separate parameters.
"""
self._regions = []
self.add_regions(*more_regions)
@property
def regions(self):
"""! @brief List of all memory regions.
Regions in the returned list are sorted by start address.
"""
return self._regions
@property
def region_count(self):
"""! @brief Number of memory regions in the map."""
return len(self._regions)
def clone(self):
"""! @brief Create a duplicate of the memory map.
The duplicate memory map contains shallow copies of each of the regions. This is intended
to be used so that `Target` objects in different but simultaneously live sessions have
independant copies of the target's memory map.
"""
return MemoryMap(*[copy.copy(r) for r in self.regions])
def add_regions(self, *more_regions):
"""! @brief Add multiple regions to the memory map.
There are two options for passing the list of regions to be added. The first is to pass
each region as a separate parameter, similar to how the constructor is intended to be used.
The second option is to pass either a list or tuple of regions.
The region list is kept sorted. If no regions are provided, the call is a no-op.
@param self
@param more_regions Either a single tuple or list, or one or more MemoryRegion objects
passed as separate parameters.
"""
if len(more_regions):
if isinstance(more_regions[0], (list, tuple)):
regionsToAdd = more_regions[0]
else:
regionsToAdd = more_regions
for newRegion in regionsToAdd:
self.add_region(newRegion)
def add_region(self, new_region):
"""! @brief Add one new region to the map.
The region list is resorted after adding the provided region.
@param self
@param new_region An instance of MemoryRegion to add.
"""
new_region.map = self
self._regions.append(new_region)
self._regions.sort()
def remove_region(self, region):
"""! @brief Removes a memory region from the map.
@param self
@param region The region to remove. The region to remove is matched by identity, not value,
so this parameter must be the exact object that you wish to remove from the map.
"""
for i, r in enumerate(self._regions):
if r is region:
del self._regions[i]
def get_boot_memory(self):
"""! @brief Returns the first region marked as boot memory.
@param self
@return MemoryRegion or None.
"""
for r in self._regions:
if r.is_boot_memory:
return r
return None
def get_region_for_address(self, address):
"""! @brief Returns the first region containing the given address.
@param self
@param address An integer target address.
@return MemoryRegion or None.
"""
for r in self._regions:
if r.contains_address(address):
return r
return None
def is_valid_address(self, address):
"""! @brief Determines whether an address is contained by any region.
@param self
@param address An integer target address.
@return Boolean indicating whether the address was contained by a region.
"""
return self.get_region_for_address(address) is not None
def get_contained_regions(self, start, end=None, length=None, range=None):
"""! @brief Get all regions fully contained by an address range.
@param self
@param start The start address or a MemoryRange object.
@param end Optional end address.
@param length Optional length in bytes.
@param range Optional MemoryRange object.
@return List of all regions in the memory map that are fully enclosed by the specified
address range.
"""
start, end = check_range(start, end, length, range)
return [r for r in self._regions if r.contained_by_range(start, end)]
def get_intersecting_regions(self, start, end=None, length=None, range=None):
"""! @brief Get all regions intersected by an address range.
@param self
@param start The start address or a MemoryRange object.
@param end Optional end address.
@param length Optional length in bytes.
@param range Optional MemoryRange object.
@return List of all regions in the memory map that intersect with the specified address
range.
"""
start, end = check_range(start, end, length, range)
return [r for r in self._regions if r.intersects_range(start, end)]
def iter_matching_regions(self, **kwargs):
"""! @brief Iterate over regions matching given criteria.
Useful attributes to match on include 'type', 'name', 'is_default', and others.
@param self
@param kwargs Values for region attributes that must match.
"""
for r in self._regions:
# Check attributes.
mismatch = False
for k, v in kwargs.items():
try:
if getattr(r, k) != v:
mismatch = True
break
except AttributeError:
# Don't match regions without the specified attribute.
mismatch = True
if mismatch:
continue
yield r
def get_first_matching_region(self, **kwargs):
"""! @brief Get the first region matching a given memory type.
The region of given type with the lowest start address is returned. If there are no regions
with that type, None is returned instead.
@param self
@param type One of the MemoryType enums.
@return A MemoryRegion object or None.
"""
for r in self.iter_matching_regions(**kwargs):
return r
return None
def get_default_region_of_type(self, type):
"""! @brief Get the default region of a given memory type.
If there are multiple regions of the specified type marked as default, then the one with
the lowest start address will be returned. None is returned if there are no default regions
of the type.
@param self
@param type One of the MemoryType enums.
@return A MemoryRegion object or None.
"""
return self.get_first_matching_region(type=type, is_default=True)
def __eq__(self, other):
return isinstance(other, MemoryMap) and (self._regions == other._regions)
def __iter__(self):
"""! @brief Enable iteration over the memory map."""
return iter(self._regions)
def __getitem__(self, key):
"""! @brief Return a region indexed by name or number."""
if isinstance(key, six.string_types):
return self.get_first_matching_region(name=key)
else:
return self._regions[key]
def __repr__(self):
return "<MemoryMap@0x%08x regions=%s>" % (id(self), repr(self._regions))
|
mbedmicro/pyOCD
|
pyocd/core/memory_map.py
|
Python
|
apache-2.0
| 25,531
|
# -*- coding: utf-8 -*-
"""
tipfy.ext.auth.oauth
~~~~~~~~~~~~~~~~~~~~
Implementation of OAuth authentication scheme.
Ported from `tornado.auth <http://github.com/facebook/tornado/blob/master/tornado/auth.py>`_.
:copyright: 2009 Facebook.
:copyright: 2010 tipfy.org.
:license: Apache License Version 2.0, see LICENSE.txt for more details.
"""
from __future__ import absolute_import
import binascii
import cgi
import functools
import hashlib
import hmac
import logging
import time
import urllib
import urlparse
import uuid
from google.appengine.api import urlfetch
#from tipfy import abort, redirect
def abort(n):
pass
import aha
config = aha.Config()
class OAuthMixin(object):
"""A :class:`tipfy.RequestHandler` mixin that implements OAuth
authentication."""
_OAUTH_AUTHORIZE_URL = None
_OAUTH_NO_CALLBACKS = False
def redirect(self, url):
return url
def authorize_redirect(self, callback_uri = None, oauth_authorize_url = None):
"""Redirects the user to obtain OAuth authorization for this service.
Twitter and FriendFeed both require that you register a Callback
URL with your application. You should call this method to log the
user in, and then call get_authenticated_user() in the handler
you registered as your Callback URL to complete the authorization
process.
This method sets a cookie called _oauth_request_token which is
subsequently used (and cleared) in get_authenticated_user for
security purposes.
:param callback_uri:
:param oauth_authorize_url:
OAuth authorization URL. If not set, uses the value set in
:attr:`_OAUTH_AUTHORIZE_URL`.
:return:
"""
if callback_uri and getattr(self, '_OAUTH_NO_CALLBACKS', False):
raise Exception('This service does not support oauth_callback')
oauth_authorize_url = oauth_authorize_url or self._OAUTH_AUTHORIZE_URL
url = self._oauth_request_token_url()
try:
response = urlfetch.fetch(url, deadline = 10)
except urlfetch.DownloadError, e:
logging.exception(e)
response = None
return self._on_request_token(oauth_authorize_url, callback_uri,
response)
def get_authenticated_user(self, callback):
"""Gets the OAuth authorized user and access token on callback.
This method should be called from the handler for your registered
OAuth Callback URL to complete the registration process. We call
callback with the authenticated user, which in addition to standard
attributes like 'name' includes the 'access_key' attribute, which
contains the OAuth access you can use to make authorized requests
to this service on behalf of the user.
:param callback:
:return:
"""
request_key = self.request.args.get('oauth_token')
request_cookie = self.request.cookies.get('_oauth_request_token')
if not request_cookie:
logging.warning('Missing OAuth request token cookie')
return callback(None)
cookie_key, cookie_secret = request_cookie.split('|')
if cookie_key != request_key:
logging.warning('Request token does not match cookie')
return callback(None)
token = dict(key = cookie_key, secret = cookie_secret)
url = self._oauth_access_token_url(token)
try:
response = urlfetch.fetch(url, deadline = 10)
if response.status_code < 200 or response.status_code >= 300:
logging.warning('Invalid OAuth response: %s',
response.content)
response = None
except urlfetch.DownloadError, e:
logging.exception(e)
response = None
return self._on_access_token(callback, response)
def _oauth_request_token_url(self):
"""
:return:
"""
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_REQUEST_TOKEN_URL
args = dict(
oauth_consumer_key = consumer_token['key'],
oauth_signature_method = 'HMAC-SHA1',
oauth_timestamp = str(int(time.time())),
oauth_nonce = binascii.b2a_hex(uuid.uuid4().bytes),
oauth_version = '1.0',
)
signature = _oauth_signature(consumer_token, 'GET', url, args)
args['oauth_signature'] = signature
return url + '?' + urllib.urlencode(args)
def _on_request_token(self, authorize_url, callback_uri, response):
"""
:param authorize_url:
:param callback_uri:
:param response:
:return:
"""
if not response:
logging.warning('Could not get OAuth request token.')
return
#abort(500)
elif response.status_code < 200 or response.status_code >= 300:
logging.warning('Invalid OAuth response (%d): %s',
response.status_code, response.content)
return
#abort(500)
request_token = _oauth_parse_response(response.content)
data = '|'.join([request_token['key'], request_token['secret']])
self.set_cookie('_oauth_request_token', data)
args = dict(oauth_token = request_token['key'])
if callback_uri:
args['oauth_callback'] = urlparse.urljoin(
self.request.url, callback_uri)
return self.redirect(authorize_url + '?' + urllib.urlencode(args))
def _oauth_access_token_url(self, request_token):
"""
:param request_token:
:return:
"""
consumer_token = self._oauth_consumer_token()
url = self._OAUTH_ACCESS_TOKEN_URL
args = dict(
oauth_consumer_key = consumer_token['key'],
oauth_token = request_token['key'],
oauth_signature_method = 'HMAC-SHA1',
oauth_timestamp = str(int(time.time())),
oauth_nonce = binascii.b2a_hex(uuid.uuid4().bytes),
oauth_version = '1.0',
)
signature = _oauth_signature(consumer_token, 'GET', url, args,
request_token)
args['oauth_signature'] = signature
return url + '?' + urllib.urlencode(args)
def _on_access_token(self, callback, response):
"""
:param callback:
:param response:
:return:
"""
if not response:
logging.warning('Missing OAuth access token response.')
return callback(None)
elif response.status_code < 200 or response.status_code >= 300:
logging.warning('Invalid OAuth access token response (%d): %s',
response.status_code, response.content)
return callback(None)
access_token = _oauth_parse_response(response.content)
return self._oauth_get_user(access_token, functools.partial(
self._on_oauth_get_user, access_token, callback))
def _oauth_get_user(self, access_token, callback):
"""
:param access_token:
:param callback:
:return:
"""
raise NotImplementedError()
def _on_oauth_get_user(self, access_token, callback, user):
"""
:param access_token:
:param callback:
:param user:
:return:
"""
if not user:
callback(None)
return
user['access_token'] = access_token
return callback(user)
def _oauth_request_parameters(self, url, access_token, parameters = {},
method = 'GET'):
"""Returns the OAuth parameters as a dict for the given request.
parameters should include all POST arguments and query string arguments
that will be sent with the request.
:param url:
:param access_token:
:param parameters:
:param method:
:return:
"""
consumer_token = self._oauth_consumer_token()
base_args = dict(
oauth_consumer_key = consumer_token['key'],
oauth_token = access_token['key'],
oauth_signature_method = 'HMAC-SHA1',
oauth_timestamp = str(int(time.time())),
oauth_nonce = binascii.b2a_hex(uuid.uuid4().bytes),
oauth_version = '1.0',
)
args = {}
args.update(base_args)
args.update(parameters)
signature = _oauth_signature(consumer_token, method, url, args,
access_token)
base_args['oauth_signature'] = signature
return base_args
def _oauth_signature(consumer_token, method, url, parameters = {}, token = None):
"""Calculates the HMAC-SHA1 OAuth signature for the given request.
See http://oauth.net/core/1.0/#signing_process
:param consumer_token:
:param method:
:param url:
:param parameters:
:param token:
:return:
"""
parts = urlparse.urlparse(url)
scheme, netloc, path = parts[:3]
normalized_url = scheme.lower() + '://' + netloc.lower() + path
base_elems = []
base_elems.append(method.upper())
base_elems.append(normalized_url)
base_elems.append('&'.join('%s=%s' % (k, _oauth_escape(str(v)))
for k, v in sorted(parameters.items())))
base_string = '&'.join(_oauth_escape(e) for e in base_elems)
key_elems = [consumer_token['secret']]
key_elems.append(token['secret'] if token else '')
key = '&'.join(key_elems)
hash = hmac.new(key, base_string, hashlib.sha1)
return binascii.b2a_base64(hash.digest())[:-1]
def _oauth_escape(val):
"""
:param val:
:return:
"""
if isinstance(val, unicode):
val = val.encode('utf-8')
return urllib.quote(val, safe = '~')
def _oauth_parse_response(body):
"""
:param body:
:return:
"""
p = cgi.parse_qs(body, keep_blank_values = False)
token = dict(key = p['oauth_token'][0], secret = p['oauth_token_secret'][0])
# Add the extra parameters the Provider included to the token
special = ('oauth_token', 'oauth_token_secret')
token.update((k, p[k][0]) for k in p if k not in special)
return token
|
Letractively/aha-gae
|
plugins/aha.plugin.twitteroauth/twitteroauth/oauth.py
|
Python
|
bsd-3-clause
| 10,305
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 Gestion-Ressources (<http://www.gestion-ressources.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Export to Sage50',
'version': '1.0',
"category": 'Accounting & Finance',
'complexity': "easy",
'description': """
French version below
Export accounting data from OpenErp to Sage50.
====================================
Export accounting data from OpenErp to Sage50. The exportation generates the imp file to import in Sage50.
Documentation : layout of the import file (.IMP)
====================================
A PDF document (in the /doc repository) gives more details about the .IMP file layout that can be imported
into Sage 50.This document is part of the Sage 50 SDK (Software Development Kit) available for download at
the following address : http://na.sage.com/sage-simply-accounting/lp/partners/sdk/?isnow=ssa.
Sage 50: After creating .IMP file
====================================
Solution:
Importing purchase invoices, purchase quotes, sales invoices and sales orders into Sage 50
How to import purchase invoices, purchase quotes, sales invoices and sales orders?
You can import purchase invoices, purchase quotes, sales invoices and sales orders into Sage 50.
The transaction(s) details should be in a text file with extension .IMP.
After you have created the .IMP file, you can import the transaction(s) into Sage 50 by following these steps:
(Account information is not included in the .IMP format because when importing the file, you will receive
a pop-up screen to ask you "Select an Account to match".)
From the Home Window, go to File, Import/Export
Click on Import Transactions (the Import Transactions Wizard appears)
Select on 'Import purchase invoices, purchase quotes, sales invoices, sales orders or time slips' and click Next
You can now create a backup of your file
Click Next
Click on Browse and select the .IMP file previously created
If the customer (or vendor) in the transaction(s) you are trying to import does not exist in the Sage 50 company,
you will get a new window asking you if you want to add this customer (or vendor), or if you want to select
another customer (or vendor) from the existing ones
You will also get a similar window if the import file uses an inventory item which does not exist in Sage 50.
You will see a summary of the imported transactions, click OK and then Finish.
If you got any errors importing the data, open the .IMP file in Notepad and use the attached .PDF document
to verify the file format. Once the errors have been corrected, you can try the import again.
Note: View KB25664 for information about some possible errors when importing .IMP file.
Sage Business Care plan does NOT include support for SDK. Please, contact one of our partners website
for further assistance.
Possible errors when importing purchase invoices, purchase quotes, sales invoices and/or sales orders
====================================
Questions and Answers
Import started... Errors occurred while importing.
Line x does not contain compatible tax information.
Invalid date. The date must be between <date1> and <date2>.
A: These are the possible reasons for getting any of these error messages when importing transactions
into Simply Accounting:
- The import file (extension .IMP) you are using does not have the proper format.
Refer to the KB article 25659 for more information about the format of the import file.
- The transaction type is not enabled in the Simply Accounting company. To enable the feature, from the Home Window
in Simply Accounting, go to Setup, Settings, Company, Features, make sure the type of transaction you want to import
is checked
- The dates in the import file do not match the fiscal year dates in Simply Accounting.
Open the import file in Notepad and make the necessary changes
Exportation des données comptables de OpenERP vers Sage50.
==============================================
Ce module exporte des données comptables de OpenERP vers Sage50. Le module génére un fichier imp file qui peut
être importé dans Sage50.
Documentation : Format du fichier à importer (.IMP)
====================================
Un document .PDF (dans le répertoire /doc) donne plus de détails sur le format du fichier .IMP qui peut être
importé dans Sage 50. Ce document fait parti du Kit de développement - SDK de Sage 50 qui peut-être téléchargé
à l'adresse suivante : http://na.sage.com/sage-simply-accounting/lp/partners/sdk/?isnow=ssa.
Guide d'utilisation export Sage 50
========================================
Comment exporter les données comptables à partir d'OpenERP?
Un manuel d'utilisation est disponible dans le répertoire /doc du module.
""",
"author": "Gestion-Ressources,Odoo Community Association (OCA)",
"website": "http://www.gestion-ressources.com",
"license": "AGPL-3",
'images': [],
'depends': ['account'],
'update_xml': [
'wizard/exportsage50.xml',
'security/ir.model.access.csv',
],
'demo_xml': [],
'test':[],
'installable': True,
'auto_install': False,
'certificate': '',
'application': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Endika/connector-sage-50
|
exportsage50/__openerp__.py
|
Python
|
agpl-3.0
| 6,097
|
#!/usr/bin/env python
#
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future import standard_library
standard_library.install_aliases()
import sys
import argparse
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
if PY2:
if PYTHON_VERSION < (2, 7, 9):
raise Exception('Must use Python 2.7.9 or later')
elif PYTHON_VERSION < (3, 4):
raise Exception('Must use Python 3.4 or later')
import hpOneView as hpov
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print('EULA display needed')
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with givin credentials
try:
con.login(credential)
except:
print('Login failed')
def del_all_ligs(net):
ligs = net.get_ligs()
for lig in ligs:
net.delete_lig(lig)
def del_lig_by_name(net, name):
ligs = net.get_ligs()
for lig in ligs:
if lig['name'] == name:
net.delete_lig(lig)
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Delete individual or ALL Logical Interconnect Groups
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HP OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HP OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HP OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-d', dest='delete_all', action='store_true',
help='''
Delete ALL Logical Interconnect Groups''')
group.add_argument('-n', dest='name',
help='''
Name of the Logical Interconnect Group to delete''')
args = parser.parse_args()
credential = {'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
net = hpov.networking(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
if args.delete_all:
del_all_ligs(net)
sys.exit()
del_lig_by_name(net, args.name)
if __name__ == '__main__':
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
miqui/python-hpOneView
|
examples/scripts/del-logical-interconnect-groups.py
|
Python
|
mit
| 4,322
|
"""
DogStatsApi is a tool for collecting application metrics without hindering
performance. It collects metrics in the application thread with very little overhead
and allows flushing metrics in process, in a thread or in a greenlet, depending
on your application's needs.
"""
import logging
import socket
from functools import wraps
from contextlib import contextmanager
from time import time
from dogapi.common import get_ec2_instance_id
from dogapi.constants import MetricType
from dogapi.stats.metrics import MetricsAggregator, Counter, Gauge, Histogram
from dogapi.stats.statsd import StatsdAggregator
from dogapi.stats.reporters import HttpReporter
# Loggers
log = logging.getLogger('dd.dogapi')
class DogStatsApi(object):
def __init__(self):
""" Initialize a dogstats object. """
# Don't collect until start is called.
self._disabled = True
def start(self, api_key=None,
flush_interval=10,
roll_up_interval=10,
host=None,
device=None,
api_host=None,
use_ec2_instance_ids=False,
flush_in_thread=True,
flush_in_greenlet=False,
disabled=False,
statsd=False,
statsd_host='localhost',
statsd_port=8125):
"""
Configure the DogStatsApi instance and optionally, begin auto-flusing metrics.
:param api_key: Your DataDog API key.
:param flush_interval: The number of seconds to wait between flushes.
:param flush_in_thread: True if you'd like to spawn a thread to flush metrics. It will run every `flush_interval` seconds.
:param flush_in_greenlet: Set to true if you'd like to flush in a gevent greenlet.
"""
self.flush_interval = flush_interval
self.roll_up_interval = roll_up_interval
self.device = device
self._disabled = disabled
self.host = host or socket.gethostname()
if use_ec2_instance_ids:
self.host = get_ec2_instance_id()
self._is_auto_flushing = False
if statsd:
# If we're configured to send to a statsd instance, use an aggregator
# which forwards packets over UDP.
log.info("Initializing dog api to use statsd: %s, %s" % (statsd_host, statsd_port))
self._needs_flush = False
self._aggregator = StatsdAggregator(statsd_host, statsd_port)
else:
# Otherwise create an aggreagtor that while aggregator metrics
# in process.
self._needs_flush = True
self._aggregator = MetricsAggregator(self.roll_up_interval)
# The reporter is responsible for sending metrics off to their final destination.
# It's abstracted to support easy unit testing and in the near future, forwarding
# to the datadog agent.
self.reporter = HttpReporter(api_key=api_key, api_host=api_host)
self._is_flush_in_progress = False
self.flush_count = 0
if self._disabled:
log.info("dogapi is disabled. No metrics will flush.")
else:
if flush_in_greenlet:
self._start_flush_greenlet()
elif flush_in_thread:
self._start_flush_thread()
def stop(self):
if not self._is_auto_flushing:
return True
if self._flush_thread:
self._flush_thread.end()
self._is_auto_flushing = False
return True
def gauge(self, metric_name, value, timestamp=None, tags=None, sample_rate=1, host=None):
"""
Record the current *value* of a metric. They most recent value in
a given flush interval will be recorded. Optionally, specify a set of
tags to associate with the metric. This should be used for sum values
such as total hard disk space, process uptime, total number of active
users, or number of rows in a database table.
>>> dog_stats_api.gauge('process.uptime', time.time() - process_start_time)
>>> dog_stats_api.gauge('cache.bytes.free', cache.get_free_bytes(), tags=['version:1.0'])
"""
if not self._disabled:
self._aggregator.add_point(metric_name, tags, timestamp or time(), value, Gauge,
sample_rate=sample_rate, host=host)
def increment(self, metric_name, value=1, timestamp=None, tags=None, sample_rate=1, host=None):
"""
Increment the counter by the given *value*. Optionally, specify a list of
*tags* to associate with the metric. This is useful for counting things
such as incrementing a counter each time a page is requested.
>>> dog_stats_api.increment('home.page.hits')
>>> dog_stats_api.increment('bytes.processed', file.size())
"""
if not self._disabled:
self._aggregator.add_point(metric_name, tags, timestamp or time(), value, Counter,
sample_rate=sample_rate, host=host)
def histogram(self, metric_name, value, timestamp=None, tags=None, sample_rate=1, host=None):
"""
Sample a histogram value. Histograms will produce metrics that
describe the distribution of the recorded values, namely the minimum,
maximum, average, count and the 75th, 85th, 95th and 99th percentiles.
Optionally, specify a list of *tags* to associate with the metric.
>>> dog_stats_api.histogram('uploaded_file.size', uploaded_file.size())
"""
if not self._disabled:
self._aggregator.add_point(metric_name, tags, timestamp or time(), value, Histogram,
sample_rate=sample_rate, host=host)
@contextmanager
def timer(self, metric_name, sample_rate=1, tags=None, host=None):
"""
A context manager that will track the distribution of the contained code's run time.
Optionally specify a list of tags to associate with the metric.
::
def get_user(user_id):
with dog_stats_api.timer('user.query.time'):
# Do what you need to ...
pass
# Is equivalent to ...
def get_user(user_id):
start = time.time()
try:
# Do what you need to ...
pass
finally:
dog_stats_api.histogram('user.query.time', time.time() - start)
"""
start = time()
try:
yield
finally:
end = time()
self.histogram(metric_name, end - start, end, tags=tags,
sample_rate=sample_rate, host=host)
def timed(self, metric_name, sample_rate=1, tags=None, host=None):
"""
A decorator that will track the distribution of a function's run time.
Optionally specify a list of tags to associate with the metric.
::
@dog_stats_api.timed('user.query.time')
def get_user(user_id):
# Do what you need to ...
pass
# Is equivalent to ...
start = time.time()
try:
get_user(user_id)
finally:
dog_stats_api.histogram('user.query.time', time.time() - start)
"""
def wrapper(func):
@wraps(func)
def wrapped(*args, **kwargs):
with self.timer(metric_name, sample_rate, tags, host):
result = func(*args, **kwargs)
return result
return wrapped
return wrapper
def flush(self, timestamp=None):
"""
Flush and post all metrics to the server. Note that this is a blocking
call, so it is likely not suitable for user facing processes. In those
cases, it's probably best to flush in a thread or greenlet.
"""
try:
if not self._needs_flush:
return False
if self._is_flush_in_progress:
log.debug("A flush is already in progress. Skipping this one.")
return False
elif self._disabled:
log.info("Not flushing because we're disabled.")
return False
self._is_flush_in_progress = True
metrics = self._get_aggregate_metrics(timestamp or time())
count = len(metrics)
if count:
self.flush_count += 1
log.debug("Flush #%s sending %s metrics" % (self.flush_count, count))
self.reporter.flush(metrics)
else:
log.debug("No metrics to flush. Continuing.")
except:
try:
log.exception("Error flushing metrics")
except:
pass
finally:
self._is_flush_in_progress = False
def _get_aggregate_metrics(self, flush_time=None):
# Get rolled up metrics
rolled_up_metrics = self._aggregator.flush(flush_time)
# FIXME: emit a dictionary from the aggregator
metrics = []
for timestamp, value, name, tags, host in rolled_up_metrics:
if host is None:
host = self.host
metric = {
'metric' : name,
'points' : [[timestamp, value]],
'type': MetricType.Gauge,
'host': host,
'device': self.device,
'tags' : tags
}
metrics.append(metric)
return metrics
def _start_flush_thread(self):
""" Start a thread to flush metrics. """
from dogapi.stats.periodic_timer import PeriodicTimer
if self._is_auto_flushing:
log.info("Autoflushing already started.")
return
self._is_auto_flushing = True
# A small helper for logging and flushing.
def flush():
try:
log.debug("Flushing metrics in thread")
self.flush()
except:
try:
log.exception("Error flushing in thread")
except:
pass
log.info("Starting flush thread with interval %s." % self.flush_interval)
self._flush_thread = PeriodicTimer(self.flush_interval, flush)
self._flush_thread.start()
def _start_flush_greenlet(self):
if self._is_auto_flushing:
log.info("Autoflushing already started.")
return
self._is_auto_flushing = True
import gevent
# A small helper for flushing.
def flush():
while True:
try:
log.debug("Flushing metrics in greenlet")
self.flush()
gevent.sleep(self.flush_interval)
except:
try:
log.exception("Error flushing in greenlet")
except:
pass
log.info("Starting flush greenlet with interval %s." % self.flush_interval)
gevent.spawn(flush)
|
DataDog/dogapi
|
src/dogapi/stats/dog_stats_api.py
|
Python
|
bsd-3-clause
| 11,254
|
import time
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from base.utils import meetup_signup, timestamp_to_datetime
from base.models import UserInfo
class LoginForm(forms.Form):
username = forms.CharField()
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput)
class SignUpForm(forms.Form):
meetup_code = forms.CharField(
widget=forms.HiddenInput
)
username = forms.CharField()
email = forms.EmailField()
password = forms.CharField(widget=forms.PasswordInput)
def save(self, *args, **kwargs):
# http://www.meetup.com/meetup_api/auth/#oauth2
reuslt = meetup_signup(
self.cleaned_data['meetup_code']
)
username = self.cleaned_data['username']
password = self.cleaned_data['password']
user = User(
username=username,
email=self.cleaned_data['email'],
)
user.set_password(password)
user.save()
user = authenticate(username=username, password=password)
user_info_data = reuslt['me']
user_auth_data = reuslt['auth']
user_data = {
'user': user,
'joined': timestamp_to_datetime(
user_info_data.get('joined')
),
'expires_in': timestamp_to_datetime(
time.time() + user_auth_data.get('expires_in')
),
'access_token': user_auth_data.get("access_token"),
'token_type': user_auth_data.get("token_type"),
'refresh_token': user_auth_data.get("refresh_token"),
}
# joined
user_info_keys = [
"city", "country",
"lat", "lon", "name", "other_services",
]
user_info = {
key: user_info_data.get(key) for key in user_info_keys
}
user_data.update(user_info)
user_info = UserInfo(**user_data)
user_info.save()
return user
|
papaloizouc/producthunthackathon
|
prh/base/forms.py
|
Python
|
mit
| 2,038
|
#!/usr/bin/env python
print "Content-type:text/html\r\n\r\n"
print "<!DOCTYPE html>"
print "<html>"
print "<head>"
print "<title> Wookieware.com</title>"
print "<link rel=\"stylesheet\" type\"text/css\" href=\"../../css/corex.css\"/>"
print "<script src=\"http://ajax.googleapis.com/ajax/libs/jquery/1.7.2/jquery.min.js\"></script>"
print "</head>"
print "<body>"
print "<h1> <img src=\"../../images/glarn.png\" width=\"50\" height=\"50\">glarn: The dpid database</h1>"
|
xod442/sample_scripts
|
test3.py
|
Python
|
gpl-2.0
| 470
|
# -*- coding: utf-8 -*-
"""Test configs."""
from tagio.app import create_app
from tagio.settings import DevConfig, ProdConfig
def test_production_config():
"""Production config."""
app = create_app(ProdConfig)
assert app.config['ENV'] == 'prod'
assert app.config['DEBUG'] is False
assert app.config['DEBUG_TB_ENABLED'] is False
assert app.config['ASSETS_DEBUG'] is False
def test_dev_config():
"""Development config."""
app = create_app(DevConfig)
assert app.config['ENV'] == 'dev'
assert app.config['DEBUG'] is True
assert app.config['ASSETS_DEBUG'] is True
|
makerhanoi/tagio
|
tests/test_config.py
|
Python
|
bsd-3-clause
| 607
|
__author__ = 'bcarson'
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'Oversight',
'author': 'Ben Carson',
'url': 'https://github.com/hebenon/oversight',
'download_url': 'https://github.com/hebenon/oversight',
'author_email': 'ben.carson@bigpond.com',
'version': '0.2',
'install_requires': ['blinker', 'nose', 'pillow', 'requests', 'tensorflow'],
'packages': ['oversight'],
'scripts': [],
'name': 'oversight'
}
setup(**config)
|
hebenon/oversight
|
setup.py
|
Python
|
apache-2.0
| 544
|
from datetime import datetime, timedelta
from pprint import pprint
from django import forms
from utils.functions import shift_years
from .models import (
NfEntrada,
PosicaoCarga,
)
class NotafiscalChaveForm(forms.Form):
chave = forms.CharField(
widget=forms.TextInput())
class NotafiscalRelForm(forms.Form):
def data_ini():
return (datetime.now().replace(day=1)-timedelta(days=1)).replace(day=1)
data_de = forms.DateField(
label='Data do Faturamento: De',
initial=data_ini,
widget=forms.DateInput(attrs={'type': 'date',
'autofocus': 'autofocus'}))
data_ate = forms.DateField(
label='Até', required=False,
widget=forms.DateInput(attrs={'type': 'date'}))
uf = forms.CharField(
label='UF', max_length=2, min_length=2, required=False,
widget=forms.TextInput(attrs={'size': 2}))
nf = forms.CharField(
label='Número da NF', required=False,
widget=forms.TextInput(attrs={'type': 'number'}))
transportadora = forms.CharField(
label='Transportadora', required=False,
help_text='Sigla da transportadora.',
widget=forms.TextInput())
cliente = forms.CharField(
label='Cliente', required=False,
help_text='Parte do nome ou início do CNPJ.',
widget=forms.TextInput())
pedido = forms.CharField(
label='Pedido Tussor', required=False,
widget=forms.TextInput(attrs={'type': 'number'}))
ped_cliente = forms.CharField(
label='Pedido de cliente', required=False,
widget=forms.TextInput(attrs={'type': 'string'}))
CHOICES = [('N', 'Não filtra'),
('C', 'Com data de saída informada'),
('S', 'Sem data de saída')]
data_saida = forms.ChoiceField(
label='Quanto a data de saída', choices=CHOICES, initial='S')
CHOICES = [('T', 'Todos (Sim ou Não)'),
('S', 'Sim'),
('N', 'Não')]
entregue = forms.ChoiceField(
choices=CHOICES, initial='T')
CHOICES = [('N', 'Número da nota fiscal (decrescente)'),
('P', 'Número do pedido (crescente)'),
('A', 'Atraso (maior primeiro)')]
ordem = forms.ChoiceField(
label='Ordem de apresentação', choices=CHOICES, initial='A')
CHOICES = [('V', 'Apenas NF de venda e ativas (não canceladas)'),
('T', 'Totas as notas fiscais')]
listadas = forms.ChoiceField(
label='Notas listadas', choices=CHOICES, initial='V')
posicao = forms.ModelChoiceField(
label='Posição', required=False,
queryset=PosicaoCarga.objects.all().order_by('id'),
empty_label='--Todas--')
CHOICES = [('-', 'Todas'),
('a', 'Atacado'),
('v', 'Varejo'),
('o', 'Outras')]
tipo = forms.ChoiceField(
choices=CHOICES, initial='-')
por_pagina = forms.IntegerField(
label='NF por página', required=True, initial=100,
widget=forms.TextInput(attrs={'type': 'number'}))
page = forms.IntegerField(
required=False, widget=forms.HiddenInput())
def clean_uf(self):
uf = self.cleaned_data['uf'].upper()
data = self.data.copy()
data['uf'] = uf
self.data = data
return uf
def clean_data_de(self):
data_de = self.cleaned_data['data_de']
if data_de:
if data_de.year < 100:
data_de = shift_years(2000, data_de)
return data_de
class NfPosicaoForm(forms.Form):
data = forms.DateField(
label='Data de movimento da carga',
help_text='Só pode ficar vazia de posição form "Entregue ao apoio".',
initial=datetime.now(), required=False,
widget=forms.DateInput(attrs={'type': 'date',
'autofocus': 'autofocus'}))
posicao = forms.ModelChoiceField(
label='Posição', required=False,
queryset=PosicaoCarga.objects.all().order_by('id'),
initial=2, empty_label='--Todas--')
class EntradaNfForm(forms.ModelForm):
cadastro = forms.CharField(
label='CNPJ',
widget=forms.TextInput(
attrs={'size': 20, 'autofocus': 'autofocus'}))
emissor = forms.CharField(
widget=forms.TextInput(attrs={'size': 80}))
descricao = forms.CharField(
widget=forms.TextInput(attrs={'size': 80}))
transportadora = forms.CharField(
widget=forms.TextInput(attrs={'size': 60}))
motorista = forms.CharField(
widget=forms.TextInput(attrs={'size': 60}))
class EntradaNfSemXmlForm(EntradaNfForm):
class Meta:
model = NfEntrada
fields = [
'cadastro', 'numero', 'emissor', 'descricao', 'volumes',
'chegada', 'transportadora', 'motorista', 'placa',
'responsavel'
]
class ListaForm(forms.Form):
numero = forms.CharField(
label='Número da NF', required=False,
widget=forms.TextInput(attrs={
'type': 'number',
'size': 8,
'autofocus': 'autofocus',
}))
data = forms.DateField(
label='Data de chegada', required=False,
widget=forms.DateInput(attrs={'type': 'date'}))
pagina = forms.IntegerField(
required=False, widget=forms.HiddenInput())
|
anselmobd/fo2
|
src/logistica/forms.py
|
Python
|
mit
| 5,361
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ===========================================================================
# iDevice Para Saber más creado para la FPD por José Ramón Jiménez Reyes
# ===========================================================================
"""
Para Saber mas iDevice
"""
import logging
from exe.engine.idevice import Idevice
from exe.engine.translate import lateTranslate
from exe.engine.field import TextAreaField
import re
log = logging.getLogger(__name__)
# ===========================================================================
class ParasabermasfpdIdevice(Idevice):
"""
El iDevice Para saber permite al alumnado ampliar conocimientos voluntarios para su aprendizaje
"""
persistenceVersion = 7
def __init__(self, activity = "", answer = ""):
"""
Initialize
"""
Idevice.__init__(self,
x_(u"FPD - Para Saber Mas"),
x_(u"Jose Ramon Jimenez Reyes"),
x_(u"""Para saber más es un iDevice que permite al alumnado ampliar conocimientos, siendo estos voluntarios para su aprendizaje."""), u"", u"parasabermasfpd")
# self.emphasis = Idevice.SomeEmphasis
self.emphasis = "_parasabermasfpd"
self._activityInstruc = x_(u"""Introduce el texto que aparecerá en este iDevice""")
# self.systemResources += ["common.js"]
self.activityTextArea = TextAreaField(x_(u'Texto Para saber más'),
self._activityInstruc, activity)
self.activityTextArea.idevice = self
# Properties
activityInstruc = lateTranslate('activityInstruc')
def getResourcesField(self, this_resource):
"""
implement the specific resource finding mechanism for this iDevice:
"""
# be warned that before upgrading, this iDevice field could not exist:
if hasattr(self, 'activityTextArea')\
and hasattr(self.activityTextArea, 'images'):
for this_image in self.activityTextArea.images:
if hasattr(this_image, '_imageResource') \
and this_resource == this_image._imageResource:
return self.activityTextArea
return None
def getRichTextFields(self):
fields_list = []
if hasattr(self, 'activityTextArea'):
fields_list.append(self.activityTextArea)
return fields_list
def burstHTML(self, i):
# Parasabermasfpd Idevice:
title = i.find(name='span', attrs={'class' : 'iDeviceTitle' })
self.title = title.renderContents().decode('utf-8')
reflections = i.findAll(name='div', attrs={'id' : re.compile('^ta') })
# should be exactly two of these:
# 1st = field[0] == Activity
if len(reflections) >= 1:
self.activityTextArea.content_wo_resourcePaths = \
reflections[0].renderContents().decode('utf-8')
# and add the LOCAL resource paths back in:
self.activityTextArea.content_w_resourcePaths = \
self.activityTextArea.MassageResourceDirsIntoContent( \
self.activityTextArea.content_wo_resourcePaths)
self.activityTextArea.content = \
self.activityTextArea.content_w_resourcePaths
def upgradeToVersion1(self):
"""
Upgrades the node from version 0 to 1.
"""
log.debug(u"Upgrading iDevice")
self.icon = u"activity"
def upgradeToVersion2(self):
"""
Upgrades the node from 1 (v0.5) to 2 (v0.6).
Old packages will loose their icons, but they will load.
"""
log.debug(u"Upgrading iDevice")
# self.emphasis = Idevice.SomeEmphasis
self.emphasis = "_parasabermasfpd"
def upgradeToVersion3(self):
"""
Upgrades v0.6 to v0.7.
"""
self.lastIdevice = False
def upgradeToVersion4(self):
"""
Upgrades to exe v0.10
"""
self._upgradeIdeviceToVersion1()
self._activityInstruc = self.__dict__['activityInstruc']
def upgradeToVersion5(self):
"""
Upgrades to exe v0.10
"""
self._upgradeIdeviceToVersion1()
def upgradeToVersion6(self):
"""
Upgrades to v0.12
"""
self._upgradeIdeviceToVersion2()
# self.systemResources += ["common.js"]
def upgradeToVersion7(self):
"""
Upgrades to somewhere before version 0.25 (post-v0.24)
Taking the old unicode string fields, and converting them
into image-enabled TextAreaFields:
"""
self.activityTextArea = TextAreaField(x_(u'Texto Para sabe más'),
self._activityInstruc, self.activity)
self.activityTextArea.idevice = self
# ===========================================================================
|
luisgg/iteexe
|
exe/engine/parasabermasfpdidevice.py
|
Python
|
gpl-2.0
| 5,051
|
#-*- coding: utf-8 -*-
#! /usr/bin/env python
'''
#------------------------------------------------------------
filename: lab10_runTCcheckReLu_spiraldata.py
To check effect of Relu activation function over
Deep neural networks.
This script wants to see Relu activation can mitigate
Gradient Vanishing problem in
A Multi-Hidden Layers Fully Connected Neural Network.
This example data set is using two class spiral data.
Applying the Relu activation to lab7 example
instead of softmax activation
written by Jaewook Kang @ Jan 2018
#------------------------------------------------------------
'''
from os import getcwd
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pandas import DataFrame
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib.learn.python.learn import learn_io
# reading data set from csv file ==========================
xsize = 2
ysize = 2
data = pd.read_csv('./data/twospirals_N5000.csv')
data.columns=['xdata1','xdata2','tdata']
permutation_index = np.random.permutation(data.index)
permutated_data = data.reindex(permutation_index)
permutated_data.columns=['xdata1','xdata2','tdata']
x_data = np.zeros([permutated_data.xdata1.size,xsize])
x_data[:,0] = permutated_data.xdata1.values
x_data[:,1] = permutated_data.xdata2.values
t_data = np.zeros([permutated_data.tdata.size,ysize])
t_data[:,0] = permutated_data.tdata.values
t_data[:,1] = np.invert(permutated_data.tdata.values) + 2
total_size = permutated_data.xdata1.size
training_size = int(np.floor(permutated_data.xdata1.size * 0.8))
validation_size = total_size - training_size
# data dividing
x_training_data = x_data[0:training_size,:]
t_training_data = t_data[0:training_size,:]
x_validation_data = x_data[training_size:-1,:]
t_validation_data = t_data[training_size:-1,:]
# configure training parameters =====================================
# To see mitigation of vanishing gradient problem
learning_rate = 5E-3
training_epochs = 5000
batch_size = 500
display_step = 1
total_batch = int(training_size / batch_size)
weight_init_fn = tf.contrib.layers.xavier_initializer()
# weight_init_fn = tf.contrib.layers.variance_scaling_initializer()
# weight_init_fn = tf.random_normal_initializer()
## for convergence
# learning_rate = 5E-3
# training_epochs = 5000
# batch_size = 500
# display_step = 1
# total_batch = int(training_size / batch_size)
# computational TF graph construction ================================
# Network Parameters
n_hidden_1 = 10 # 1st layer number of neurons
n_hidden_2 = 7 # 2nd layer number of neurons
n_hidden_3 = 7 # 3rd layer number of neurons
n_hidden_4 = 4 # 4rd layer number of neurons
n_hidden_5 = 4 # 5rd layer number of neurons
num_input = xsize # two-dimensional input X = [1x2]
num_classes = ysize # 2 class
# tf Graph input
X = tf.placeholder(tf.float32, [None, num_input])
Y = tf.placeholder(tf.float32, [None, num_classes])
# Store layers weight & bias
'''
'h1': tf.Variable(tf.random_normal([num_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'h3': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_3])),
'h4': tf.Variable(tf.random_normal([n_hidden_3, n_hidden_4])),
'h5': tf.Variable(tf.random_normal([n_hidden_4, n_hidden_5])),
'out':tf.Variable(tf.random_normal([n_hidden_5, num_classes]))
'''
weights = {
'h1': tf.get_variable(name='h1_weight',
shape=[num_input, n_hidden_1],
initializer=weight_init_fn),
'h2': tf.get_variable(name='h2_weight',
shape=[n_hidden_1,n_hidden_2],
initializer=weight_init_fn),
'h3': tf.get_variable(name='h3_weight',
shape=[n_hidden_2, n_hidden_3],
initializer=weight_init_fn),
'h4': tf.get_variable(name='h4_weight',
shape=[n_hidden_3, n_hidden_4],
initializer=weight_init_fn),
'h5': tf.get_variable(name='h5_weight',
shape=[n_hidden_4, n_hidden_5],
initializer=weight_init_fn),
'out': tf.get_variable(name='out_weight',
shape=[n_hidden_5, num_classes],
initializer=weight_init_fn)
}
'''
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'b3': tf.Variable(tf.random_normal([n_hidden_3])),
'b4': tf.Variable(tf.random_normal([n_hidden_4])),
'b5': tf.Variable(tf.random_normal([n_hidden_5])),
'out': tf.Variable(tf.random_normal([num_classes]))
'''
biases = {
'b1': tf.get_variable(name='b1_bias',
shape=[n_hidden_1],
initializer= weight_init_fn),
'b2': tf.get_variable(name='b2_bias',
shape=[n_hidden_2],
initializer=weight_init_fn),
'b3': tf.get_variable(name='b3_bias',
shape=[n_hidden_3],
initializer=weight_init_fn),
'b4': tf.get_variable(name='b4_bias',
shape=[n_hidden_4],
initializer=weight_init_fn),
'b5': tf.get_variable(name='b5_bias',
shape=[n_hidden_5],
initializer=weight_init_fn),
'out': tf.get_variable(name='out_bias',
shape=[num_classes],
initializer=weight_init_fn)
}
# Create model
def neural_net(x):
# Input fully connected layer with 10 neurons
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden fully connected layer with 7 neurons
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Hidden fully connected layer with 7 neurons
layer_3 = tf.add(tf.matmul(layer_2, weights['h3']), biases['b3'])
layer_3 = tf.nn.relu(layer_3)
# Hidden fully connected layer with 4 neurons
layer_4 = tf.add(tf.matmul(layer_3, weights['h4']), biases['b4'])
layer_4 = tf.nn.relu(layer_4)
# Hidden fully connected layer with 4 neurons
layer_5 = tf.add(tf.matmul(layer_4, weights['h5']), biases['b5'])
layer_5 = tf.nn.relu(layer_5)
# Output fully connected layer with a neuron for each class
out_layer = tf.matmul(layer_5, weights['out']) + biases['out']
return out_layer
# Construct model
logits = neural_net(X)
prediction = tf.nn.softmax(logits)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost)
# optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,momentum=0.8).minimize(cost)
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Evaluate model
correct_pred = tf.equal(tf.argmax(prediction, 1), tf.argmax(Y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
errRatebyTrainingSet = np.zeros(training_epochs)
errRatebyValidationSet = np.zeros(training_epochs)
# Initialize the variables (i.e. assign their default value)
init = tf.global_variables_initializer()
# for visualization of vanishing gradient problem
grad_wrt_weight_layer1_tensor = tf.gradients(cost,weights['h1'],\
name='grad_wrt_weight_layer1')
grad_wrt_weight_layer2_tensor = tf.gradients(cost,weights['h2'],\
name='grad_wrt_weight_layer2')
grad_wrt_weight_layer3_tensor = tf.gradients(cost,weights['h3'],\
name='grad_wrt_weight_layer3')
grad_wrt_weight_layer4_tensor = tf.gradients(cost,weights['h4'],\
name='grad_wrt_weight_layer4')
grad_wrt_weight_layer5_tensor = tf.gradients(cost,weights['h5'],\
name='grad_wrt_weight_layer5')
grad_wrt_weight_layer1_iter = np.zeros([total_batch,1])
grad_wrt_weight_layer2_iter = np.zeros([total_batch,1])
grad_wrt_weight_layer3_iter = np.zeros([total_batch,1])
grad_wrt_weight_layer4_iter = np.zeros([total_batch,1])
grad_wrt_weight_layer5_iter = np.zeros([total_batch,1])
# Start training ===============================================
with tf.Session() as sess:
# Run the initializer
sess.run(init)
print("--------------------------------------------")
for epoch in range(training_epochs):
avg_cost = 0.
for i in range(total_batch):
data_start_index = i * batch_size
data_end_index = (i + 1) * batch_size
# feed traing data --------------------------
batch_xs = x_training_data[data_start_index:data_end_index, :]
batch_ts = t_training_data[data_start_index:data_end_index, :]
#----------------------------------------------
# Run optimization op (backprop) and cost op (to get loss value)
# feedign training data
_, local_batch_cost = sess.run([optimizer,cost], feed_dict={X: batch_xs,
Y: batch_ts})
if epoch == training_epochs - 1:
# print ('Gradient calculation to see gradient vanishing problem')
_, grad_wrt_weight_layer1 = sess.run([optimizer,grad_wrt_weight_layer1_tensor], feed_dict={X: batch_xs,
Y: batch_ts})
_, grad_wrt_weight_layer2 = sess.run([optimizer,grad_wrt_weight_layer2_tensor], feed_dict={X: batch_xs,
Y: batch_ts})
_, grad_wrt_weight_layer3 = sess.run([optimizer,grad_wrt_weight_layer3_tensor], feed_dict={X: batch_xs,
Y: batch_ts})
_, grad_wrt_weight_layer4 = sess.run([optimizer,grad_wrt_weight_layer4_tensor], feed_dict={X: batch_xs,
Y: batch_ts})
_, grad_wrt_weight_layer5 = sess.run([optimizer,grad_wrt_weight_layer5_tensor], feed_dict={X: batch_xs,
Y: batch_ts})
grad_wrt_weight_layer1 = np.array(grad_wrt_weight_layer1)
grad_wrt_weight_layer2 = np.array(grad_wrt_weight_layer2)
grad_wrt_weight_layer3 = np.array(grad_wrt_weight_layer3)
grad_wrt_weight_layer4 = np.array(grad_wrt_weight_layer4)
grad_wrt_weight_layer5 = np.array(grad_wrt_weight_layer5)
grad_wrt_weight_layer1 = grad_wrt_weight_layer1.reshape(grad_wrt_weight_layer1.shape[1],
grad_wrt_weight_layer1.shape[2])
grad_wrt_weight_layer2 = grad_wrt_weight_layer2.reshape(grad_wrt_weight_layer2.shape[1],
grad_wrt_weight_layer2.shape[2])
grad_wrt_weight_layer3 = grad_wrt_weight_layer3.reshape(grad_wrt_weight_layer3.shape[1],
grad_wrt_weight_layer3.shape[2])
grad_wrt_weight_layer4 = grad_wrt_weight_layer4.reshape(grad_wrt_weight_layer4.shape[1],
grad_wrt_weight_layer4.shape[2])
grad_wrt_weight_layer5 = grad_wrt_weight_layer5.reshape(grad_wrt_weight_layer5.shape[1],
grad_wrt_weight_layer5.shape[2])
grad_wrt_weight_layer1_iter[i] = grad_wrt_weight_layer1.mean()
grad_wrt_weight_layer2_iter[i] = grad_wrt_weight_layer2.mean()
grad_wrt_weight_layer3_iter[i] = grad_wrt_weight_layer3.mean()
grad_wrt_weight_layer4_iter[i] = grad_wrt_weight_layer4.mean()
grad_wrt_weight_layer5_iter[i] = grad_wrt_weight_layer5.mean()
# Compute average loss
avg_cost += local_batch_cost / total_batch
# print ("At %d-th batch in %d-epoch, avg_cost = %f" % (i,epoch,avg_cost) )
# Display logs per epoch step
if display_step == 0:
continue
elif (epoch + 1) % display_step == 0:
# print("Iteration:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))
batch_train_xs = x_training_data
batch_train_ys = t_training_data
batch_valid_xs = x_validation_data
batch_valid_ys = t_validation_data
errRatebyTrainingSet[epoch] = 1.0 - accuracy.eval({X: batch_train_xs, \
Y: batch_train_ys}, session=sess)
errRatebyValidationSet[epoch] = 1.0 - accuracy.eval({X: batch_valid_xs, \
Y: batch_valid_ys}, session=sess)
print("Training set Err rate: %s" % errRatebyTrainingSet[epoch])
print("Validation set Err rate: %s" % errRatebyValidationSet[epoch])
print("--------------------------------------------")
print("Optimization Finished!")
# Training result visualization ===============================================
hfig1= plt.figure(1,figsize=[10,10])
plt.scatter(data.xdata1.values[0:int(data.xdata1.size/2)],\
data.xdata2.values[0:int(data.xdata1.size/2)], \
color='b',label='class0')
plt.scatter(data.xdata1.values[int(data.xdata1.size/2)+2:-1],\
data.xdata2.values[int(data.xdata1.size/2)+2:-1], \
color='r',label='class1')
plt.title('Two Spiral data Example')
plt.legend()
hfig2 = plt.figure(2,figsize=(10,10))
batch_index = np.array([elem for elem in range(total_batch)])
plt.plot(batch_index,grad_wrt_weight_layer1_iter,label='layer1',color='b',marker='o')
plt.plot(batch_index,grad_wrt_weight_layer4_iter,label='layer4',color='y',marker='o')
plt.plot(batch_index,grad_wrt_weight_layer5_iter,label='layer5',color='r',marker='o')
plt.legend()
plt.title('Weight Gradient with ReLu over minibatch iter @ training epoch = %s' % training_epochs)
plt.xlabel('minibatch iter')
plt.ylabel('Weight Gradient')
hfig3 = plt.figure(3,figsize=(10,10))
epoch_index = np.array([elem for elem in range(training_epochs)])
plt.plot(epoch_index,errRatebyTrainingSet,label='Training data',color='r',marker='o')
plt.plot(epoch_index,errRatebyValidationSet,label='Validation data',color='b',marker='x')
plt.legend()
plt.title('Train/Valid Err')
plt.xlabel('Iteration epoch')
plt.ylabel('error Rate')
plt.show()
|
jwkanggist/EveryBodyTensorFlow
|
lab10_runTFcheckReLu_spiraldata.py
|
Python
|
unlicense
| 14,810
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2014 The ProteinDF development team.
# see also AUTHORS and README if provided.
#
# This file is a part of the ProteinDF software package.
#
# The ProteinDF is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The ProteinDF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ProteinDF. If not, see <http://www.gnu.org/licenses/>.
class Bond(object):
"""
"""
def __init__(self):
"""
"""
self._atoms = []
self._distmat = None
self._bondmat = None
def setup(self, mol):
"""
"""
self._list_atoms(mol)
self._make_distance_matrix()
self._make_bond_matrix()
num_of_atoms = len(self._atoms)
for p in range(num_of_atoms):
for q in range(p):
b = self._bondmat.get(p, q)
if b > 0:
mol.add_bond(self._atoms[p], self._atoms[q], b)
def _make_bond_matrix(self):
"""
結合行列を作成します
"""
num_of_atoms = len(self._atoms)
self._bondmat = SymmetricMatrix(num_of_atoms)
for p in range(num_of_atoms):
vdw_p = self._atoms[p].vdw
for q in range(p):
vdw_q = self._atoms[q].vdw
r = self._distmat.get(p, q)
if r <= (vdw_p + vdw_q) + 0.4:
self._bondmat.set(p, q, 1)
else:
self._bondmat.set(p, q, 0)
def _make_distance_matrix(self):
"""
距離行列を作成します
"""
num_of_atoms = len(self._atoms)
self._distmat = SymmetricMatrix(num_of_atoms)
for p in range(num_of_atoms):
for q in range(p):
d = self._atoms[p].xyz.distance_from(self._atoms[q].xyz)
self._distmat.set(p, q, d)
def _list_atoms(self, mol):
assert(isinstance(mol, AtomGroup))
for key, atomgroup in mol.groups():
self._list_atoms(atomgroup)
for key, atom in mol.atoms():
self._atoms.append(atom)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
ProteinDF/ProteinDF_bridge
|
proteindf_bridge/bond.py
|
Python
|
gpl-3.0
| 2,693
|
"""
@file
@brief Exception raised by the automated documentation
"""
class HelpGenException(Exception):
"""
custom exception
"""
def __init__(self, message, file=None):
"""
redefines the message sent to an exception
@param message message
@param file filename
"""
if file is None:
Exception.__init__(self, message)
else:
mes = '{0}\n File "{1}", line 1'.format(message, file)
Exception.__init__(self, mes)
class ImportErrorHelpGen(ImportError):
"""
A custom exception to detect a specific location when
*ImportError* happens in the process.
"""
pass
class HelpGenConvertError(Exception):
"""
Exception raised when a conversion failed.
"""
pass
class NotebookConvertError(Exception):
"""
Exception raised when a conversion failed.
"""
pass
|
sdpython/pyquickhelper
|
src/pyquickhelper/helpgen/helpgen_exceptions.py
|
Python
|
mit
| 936
|
from .util import TestCase, load_json_fixture
from babbage.model import Model
class ModelTestCase(TestCase):
def setUp(self):
super(ModelTestCase, self).setUp()
self.simple_model_data = load_json_fixture('models/simple_model.json')
self.simple_model = Model(self.simple_model_data)
def test_model_concepts(self):
concepts = list(self.simple_model.concepts)
assert len(concepts) == 7, len(concepts)
def test_model_match(self):
concepts = list(self.simple_model.match('foo'))
assert len(concepts) == 1, len(concepts)
def test_model_match_invalid(self):
concepts = list(self.simple_model.match('fooxx'))
assert len(concepts) == 0, len(concepts)
def test_model_aggregates(self):
aggregates = list(self.simple_model.aggregates)
assert len(aggregates) == 2, aggregates
def test_model_fact_table(self):
assert self.simple_model.fact_table_name == 'simple'
assert 'simple' in repr(self.simple_model), repr(self.simple_model)
def test_deref(self):
assert self.simple_model['foo'].name == 'foo'
assert self.simple_model['foo.key'].name == 'key'
assert self.simple_model['amount'].name == 'amount'
assert 'amount' in self.simple_model
assert 'amount.sum' in self.simple_model
assert '_count' in self.simple_model
assert 'yabba' not in self.simple_model
assert 'foo.key' in self.simple_model
def test_repr(self):
assert 'amount' in repr(self.simple_model['amount'])
assert 'amount.sum' in repr(self.simple_model['amount.sum'])
assert 'foo.key' in repr(self.simple_model['foo.key'])
assert 'foo' in repr(self.simple_model['foo'])
assert 'foo' in unicode(self.simple_model['foo'])
assert self.simple_model['foo'] == 'foo'
def test_to_dict(self):
data = self.simple_model.to_dict()
assert 'measures' in data
assert 'amount' in data['measures']
assert 'amount.sum' in data['aggregates']
assert 'ref' in data['measures']['amount']
assert 'dimensions' in data
assert 'foo' in data['dimensions']
|
zejn/babbage
|
tests/test_model.py
|
Python
|
mit
| 2,196
|
from string import ascii_letters
import pytest
from nex.pydvi.TeXUnit import pt2sp
from nex.constants.codes import CatCode
from nex.constants.parameters import Parameters
from nex.constants.instructions import Instructions
from nex.banisher import (Banisher,
get_token_representation_integer,
get_str_representation_dimension,
get_token_representation_dimension)
from nex.router import (Instructioner,
make_unexpanded_control_sequence_instruction,
make_macro_token)
from nex.utils import ascii_characters, UserError
from common import DummyInstructions, ITok, char_instr_tok
test_char_to_cat = {}
for c in ascii_characters:
test_char_to_cat[c] = CatCode.other
for c in ascii_letters:
test_char_to_cat[c] = CatCode.letter
test_char_to_cat.update({
'$': CatCode.escape,
' ': CatCode.space,
'[': CatCode.begin_group,
']': CatCode.end_group,
'\n': CatCode.end_of_line,
})
class DummyCodes:
def __init__(self, char_to_cat):
if char_to_cat is None:
self.char_to_cat = test_char_to_cat.copy()
else:
self.char_to_cat = char_to_cat
def get_cat_code(self, char):
return self.char_to_cat[char]
def get_lower_case_code(self, c):
return c.lower()
def get_upper_case_code(self, c):
return c.upper()
class DummyRouter:
def __init__(self, cs_map):
self.cs_map = cs_map
def lookup_control_sequence(self, name, *args, **kwargs):
canon_token = self.cs_map[name]
return canon_token.copy(*args, **kwargs)
def name_means_start_condition(self, name):
return name in ('ifYes', 'ifNo')
def name_means_end_condition(self, name):
return name == 'endIf'
def name_means_delimit_condition(self, name):
return name in ('else', 'ooor')
class DummyParameters:
def __init__(self, param_map):
self.param_map = param_map
def get(self, name, *args, **kwargs):
return self.param_map[name]
class DummyState:
def __init__(self, char_to_cat, cs_map, param_map=None):
self.router = DummyRouter(cs_map)
self.parameters = DummyParameters(param_map)
self.codes = DummyCodes(char_to_cat)
def evaluate_if_token_to_block(self, tok):
if tok.type == Instructions.if_true.value:
return 0
elif tok.type == Instructions.if_false.value:
return 1
else:
raise Exception
def string_to_banisher(s, cs_map, char_to_cat=None, param_map=None):
state = DummyState(cs_map=cs_map,
param_map=param_map, char_to_cat=char_to_cat)
instructions = Instructioner.from_string(
resolve_cs_func=state.router.lookup_control_sequence,
s=s,
get_cat_code_func=state.codes.get_cat_code
)
return Banisher(instructions, state, instructions.lexer.reader)
def test_resolver():
cs_map = {
'hi': ITok(DummyInstructions.test),
}
b = string_to_banisher('$hi', cs_map)
out = b.get_next_output_list()
assert len(out) == 1 and out[0].matches(cs_map['hi'])
def test_empty_macro():
cs_map = {
'macro': make_macro_token(name='macro',
replacement_text=[], parameter_text=[],
parents=None),
}
b = string_to_banisher('$macro', cs_map)
out = b._iterate()
assert out is None
assert list(b.instructions.advance_to_end()) == []
def test_short_hand_def():
cs_map = {
'cd': ITok(Instructions.count_def),
}
b = string_to_banisher('$cd $myBestNumber', cs_map)
out = b.get_next_output_list()
assert len(out) == 2
assert out[0].matches(cs_map['cd'])
assert out[1].value['name'] == 'myBestNumber'
def test_def():
cs_map = {
'df': ITok(Instructions.def_),
}
b = string_to_banisher('$df $myName[$sayLola]', cs_map)
out = b._iterate()
assert len(out) == 5
def test_let():
cs_map = {
'letrill': ITok(Instructions.let),
}
b_minimal = string_to_banisher('$letrill $cheese a', cs_map)
out_minimal = b_minimal._iterate()
assert len(out_minimal) == 3
b_equals = string_to_banisher('$letrill $cheese=a', cs_map)
out_equals = b_equals._iterate()
assert len(out_equals) == 4
b_maximal = string_to_banisher('$letrill $cheese= a', cs_map)
out_maximal = b_maximal._iterate()
assert len(out_maximal) == 5
def test_toks_def_balanced():
cs_map = {
'bestToks': ITok(Instructions.token_parameter),
}
b = string_to_banisher('$bestToks [[$this] and $that]', cs_map)
out = b._iterate()
# First time, just get first token and set context to wait for balanced
# text.
assert len(out) == 1
# Second time, grab balanced text.
out = b._iterate()
assert len(out) == 2
assert out[-1].instruction == Instructions.balanced_text_and_right_brace
assert len(out[-1].value) == 9
def test_toks_assign_literal():
cs_map = {
'bestToks': ITok(Instructions.token_parameter),
}
b = string_to_banisher('$bestToks [[$this] and $that]', cs_map)
out = b._iterate()
# First time, just get first token and set context to wait for balanced
# text.
assert len(out) == 1
# Second time, grab balanced text.
out = b._iterate()
assert len(out) == 2
assert out[-1].instruction == Instructions.balanced_text_and_right_brace
assert len(out[-1].value) == 9
def test_toks_assign_variable():
cs_map = {
'bestOfToks': ITok(Instructions.token_parameter),
'worstOfToks': ITok(Instructions.token_parameter),
}
b = string_to_banisher('$bestOfToks $worstOfToks', cs_map)
out = b._iterate()
# First time, just get first token and set context to wait for balanced
# text.
assert len(out) == 1
# Second time, grab target toks.
out = b._iterate()
assert len(out) == 1
assert out[-1].instruction == Instructions.token_parameter
def test_expand_after():
def_target = make_unexpanded_control_sequence_instruction('defTarget',
parents=None)
cs_map = {
'expandAfter': ITok(Instructions.expand_after),
'defCount': ITok(Instructions.count_def),
'getTarget': make_macro_token(name='getTarget',
replacement_text=[def_target],
parameter_text=[], parents=None),
}
# Should expand $getTarget to [$defTarget], then expand $defCount, which
# should then read $defTarget as its argument.
b = string_to_banisher('$expandAfter $defCount $getTarget', cs_map)
out = b.get_next_output_list()
assert len(out) == 2
assert out[0].matches(cs_map['defCount'])
assert out[1].matches(def_target)
def test_string_control_sequence():
cs_map = {
'getString': ITok(Instructions.string),
}
param_map = {
Parameters.escape_char: ord('@'),
}
b = string_to_banisher('$getString $CS', cs_map, param_map=param_map)
out = b.get_next_output_list()
assert all(t.value['cat'] == CatCode.other for t in out)
assert ''.join(t.value['char'] for t in out) == '@CS'
def test_string_character():
cs_map = {
'getString': ITok(Instructions.string),
}
param_map = {
Parameters.escape_char: ord('@'),
}
b = string_to_banisher('$getString A', cs_map, param_map=param_map)
out = b.get_next_output_list()
assert all(t.value['cat'] == CatCode.other for t in out)
assert ''.join(t.value['char'] for t in out) == 'A'
def test_string_control_sequence_containing_space():
cs_map = {
'getString': ITok(Instructions.string),
}
param_map = {
Parameters.escape_char: ord('@'),
}
char_to_cat_weird = test_char_to_cat.copy()
char_to_cat_weird[' '] = CatCode.letter
b = string_to_banisher('$getString$CS WITH SPACES', cs_map,
char_to_cat=char_to_cat_weird,
param_map=param_map)
out = b.get_next_output_list()
for t in out:
if t.value['char'] == ' ':
correct_cat = CatCode.space
else:
correct_cat = CatCode.other
assert t.value['cat'] == correct_cat
assert ''.join(t.value['char'] for t in out) == '@CS WITH SPACES'
def test_string_control_sequence_no_escape():
cs_map = {
'getString': ITok(Instructions.string),
}
param_map = {
# Negative value should cause no escape character to be shown.
Parameters.escape_char: -1,
}
b = string_to_banisher('$getString$NoEscapeCS', cs_map,
param_map=param_map)
out = b.get_next_output_list()
assert all(t.value['cat'] == CatCode.other for t in out)
assert ''.join(t.value['char'] for t in out) == 'NoEscapeCS'
def test_cs_name():
char = 'a'
a_token = char_instr_tok(char, CatCode.letter)
make_A_token = make_macro_token(name='theA',
replacement_text=[a_token],
parameter_text=[],
parents=None)
cs_map = {
'getCSName': ITok(Instructions.cs_name),
'endCSName': ITok(Instructions.end_cs_name),
'theA': make_A_token,
}
b = string_to_banisher('$getCSName theA$endCSName', cs_map)
# In the first iteration, should make a $theA control sequence call.
# In the second iteration, should expand $theA to `a_token`.
out = b.get_next_output_list()
assert len(out) == 1
assert out[0].matches(a_token)
def test_cs_name_end_by_expansion():
# I seem to have made this test very complicated. The idea is that a macro,
# $theFThenEnd, makes the string 'theF' then '\endcsname'.
# This is then procesed by \csname, to produce a control sequence call
# '$theF'.
# This control sequence is then expanded to the string 'F'.
char = 'F'
F_token = char_instr_tok(char, CatCode.letter)
cs_name = 'theF'
the_F_then_end_token = make_macro_token(
name='theFThenEnd',
replacement_text=([char_instr_tok(c, CatCode.letter)
for c in cs_name] +
[ITok(Instructions.end_cs_name)]),
parameter_text=[],
parents=None,
)
make_F_token = make_macro_token(name='theF',
replacement_text=[F_token],
parameter_text=[],
parents=None)
cs_map = {
'getCSName': ITok(Instructions.cs_name),
'theFThenEnd': the_F_then_end_token,
'theF': make_F_token,
}
b = string_to_banisher('$getCSName $theFThenEnd', cs_map)
# In the first iteration, should make a $AND control sequence call.
# In the second iteration, should expand $AND to `a_token`.
out = b.get_next_output_list()
assert len(out) == 1
assert out[0].matches(F_token)
def test_cs_name_containing_non_char():
cs_map = {
'getCSName': ITok(Instructions.cs_name),
'endCSName': ITok(Instructions.end_cs_name),
'primitive': ITok(DummyInstructions.test),
}
b = string_to_banisher('$getCSName $primitive $endCSName', cs_map)
with pytest.raises(UserError):
b.get_next_output_list()
def test_change_case():
B_token = char_instr_tok('B', CatCode.letter)
make_B_token = make_macro_token(name='makeB', replacement_text=[B_token],
parameter_text=[], parents=None)
y_token = char_instr_tok('y', CatCode.letter)
make_y_token = make_macro_token(name='makey', replacement_text=[y_token],
parameter_text=[], parents=None)
cs_map = {
'upper': ITok(Instructions.upper_case),
'lower': ITok(Instructions.lower_case),
'makeB': make_B_token,
'makey': make_y_token,
}
b = string_to_banisher('$upper[abc]', cs_map)
out = b.advance_to_end()
assert ''.join(t.value['char'] for t in out) == 'ABC'
b = string_to_banisher('$lower[XYZ]', cs_map)
out = b.advance_to_end()
assert ''.join(t.value['char'] for t in out) == 'xyz'
b = string_to_banisher('$lower[A$makeB C]', cs_map)
out = b.advance_to_end()
assert ''.join(t.value['char'] for t in out) == 'aBc'
b = string_to_banisher('$upper[x$makey z]', cs_map)
out = b.advance_to_end()
assert ''.join(t.value['char'] for t in out) == 'XyZ'
def test_if():
cs_map = {
'ifYes': ITok(Instructions.if_true),
'ifNo': ITok(Instructions.if_false),
'else': ITok(Instructions.else_),
'endIf': ITok(Instructions.end_if),
}
b = string_to_banisher('$ifYes abc$else def $endIf', cs_map)
out = b.advance_to_end()
assert ''.join(t.value['char'] for t in out) == 'abc'
b = string_to_banisher('$ifNo abc$else def$endIf', cs_map)
out = b.advance_to_end()
assert ''.join(t.value['char'] for t in out) == 'def'
def test_afters():
cs_map = {
'assignThen': ITok(Instructions.after_assignment),
'groupThen': ITok(Instructions.after_group),
}
for cs in cs_map:
b = string_to_banisher(f'${cs} $something', cs_map)
out = b.get_next_output_list()
assert len(out) == 2
assert out[0].matches(cs_map[cs])
assert out[1].instruction == Instructions.arbitrary_token
target_tok = out[1].value
assert target_tok.value['name'] == 'something'
def test_input():
cs_map = {
'putIn': ITok(Instructions.input),
}
b = string_to_banisher('$putIn tests/test_files/test', cs_map)
out = list(b.advance_to_end())
print(out)
def test_integer_tokenize():
ts = get_token_representation_integer(-23, parents=None)
assert len(ts) == 3
assert ts[0].value['char'] == '-' and ts[0].value['cat'] == CatCode.other
assert ts[1].value['char'] == '2' and ts[0].value['cat'] == CatCode.other
assert ts[2].value['char'] == '3' and ts[0].value['cat'] == CatCode.other
def test_dimension_tokenize():
ts = get_token_representation_dimension(pt2sp(-12.2), parents=None)
assert len(ts) == 7
assert ''.join(t.value['char'] for t in ts) == '-12.2pt'
assert all(t.value['cat'] == CatCode.other for t in ts)
# def test_the():
# cs_map = {
# 'stringify': ITok(Instructions.the),
# 'preTolerance': ITok(Instructions.integer_parameter,
# value={'parameter': Parameters.pre_tolerance}),
# }
# b = string_to_banisher('$stringify $preTolerance', cs_map)
# out = list(b.advance_to_end())
# print(out)
|
eddiejessup/nex
|
tests/test_banisher.py
|
Python
|
mit
| 14,824
|
# ===============================================================================
# Copyright 2019 Jan Hendrickx and Gabriel Parrish
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import os
# ============= standard library imports ========================
from utils.depletions_modeling.cumulative_depletions import raster_extract, write_raster
root = '/Users/dcadol/Desktop/academic_docs_II/JPL_Data/JPL_calibration_approach/jpl_etrm_warp_PT'
output_dir = '/Volumes/Seagate_Blue/jpl_research/jpl_WE_2012/jpl_et_ratio_modified'
jpl_prism_ratio_path = '/Volumes/Seagate_Blue/jpl_research/jpl_WE_2012/qgis/jpleta_prism_ratio_2012.tif'
jpl_prism_ratio, transform, dimensions, projection, dt = raster_extract(jpl_prism_ratio_path)
for file in os.listdir(root):
print 'file: {}'.format(file)
if file.endswith('.tif'):
file_name = file[:-4]
new_fname = '{}_ratiomod.tif'.format(file_name)
data, transform, dimensions, projection, dt = raster_extract(os.path.join(root, file))
jpl_mod = data / jpl_prism_ratio
write_raster(jpl_mod, transform, output_dir, new_fname, dimensions, projection, dt)
|
NMTHydro/Recharge
|
utils/depletions_modeling/multiply_rasters_with_another_raster.py
|
Python
|
apache-2.0
| 1,726
|
#!/usr/bin/env python
import os
import sys
import shutil
# Magic python path, based on http://djangosnippets.org/snippets/281/
from os.path import abspath, dirname, join
parentdir = dirname(dirname(abspath(__file__)))
# Insert our parent directory (the one containing the folder metashare/):
sys.path.insert(1, parentdir)
try:
import settings # Assumed to be in the same directory.
except ImportError:
sys.stderr.write("Error: Can't find the file 'settings.py' in the " \
"directory containing %r. It appears you've customized things.\n" \
"You'll have to run django-admin.py, passing it your settings " \
"module.\n(If the file settings.py does indeed exist, it's causing" \
" an ImportError somehow.)\n" % __file__)
sys.exit(1)
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
PROJECT_HOME = os.path.normpath(os.getcwd() + "/..")
sys.path.append(PROJECT_HOME)
USERS = "users.xml"
GROUPS = "groups.xml"
PERMISSIONS = "permissions.xml"
CONTENT_TYPES = "content-types.xml"
EDITOR_GROUPS = "editor-groups.xml"
EDITOR_GROUP_MANAGERS = "editor-group-managers.xml"
ORGANIZATIONS = "organizations.xml"
ORGANIZATION_MANAGERS = "organization-managers.xml"
USER_PROFILES = "user-profiles.xml"
LR_STATS = "lr-stats.xml"
QUERY_STATS = "query-stats.xml"
STORAGE_FOLDER = "storage"
STORAGE = "storage.xml"
METADATA = "metadata.xml"
RESOURCE = "resource.xml"
ARCHIVE_TPL = "archive.{}"
def import_users(import_folder):
"""
Imports user related entities from XML in the given folder.
"""
# delete existing content types
from django.contrib.auth.models import ContentType
ContentType.objects.all().delete()
# import content types
_import(os.path.join(import_folder, "{}".format(CONTENT_TYPES)))
_update_pk("auth")
# delete existing permissions
from django.contrib.auth.models import Permission
Permission.objects.all().delete()
# import permissions
_import(os.path.join(import_folder, "{}".format(PERMISSIONS)))
_update_pk("auth")
# delete existing groups
from django.contrib.auth.models import Group
Group.objects.all().delete()
# import groups
_import(os.path.join(import_folder, "{}".format(GROUPS)))
_update_pk("auth")
# delete existing organizations
from metashare.accounts.models import Organization
Organization.objects.all().delete()
# import organizations
_import(os.path.join(import_folder, "{}".format(ORGANIZATIONS)))
_update_pk("accounts")
# delete existing editor groups
from metashare.accounts.models import EditorGroup
EditorGroup.objects.all().delete()
# import editor groups
_import(os.path.join(import_folder, "{}".format(EDITOR_GROUPS)))
_update_pk("accounts")
# delete existing organization manager groups
from metashare.accounts.models import OrganizationManagers
OrganizationManagers.objects.all().delete()
# import organization manager groups
_import(os.path.join(import_folder, "{}".format(ORGANIZATION_MANAGERS)))
_update_pk("accounts")
# delete existing editor group manager groups
from metashare.accounts.models import EditorGroupManagers
EditorGroupManagers.objects.all().delete()
# import editor group manager groups
_import(os.path.join(import_folder, "{}".format(EDITOR_GROUP_MANAGERS)))
_update_pk("accounts")
# delete existing users
from django.contrib.auth.models import User
User.objects.all().delete()
# import users
_import(os.path.join(import_folder, "{}".format(USERS)))
_update_pk("auth")
# delete existing user profiles
from metashare.accounts.models import UserProfile
UserProfile.objects.all().delete()
# import user profiles
_import(os.path.join(import_folder, "{}".format(USER_PROFILES)))
_update_pk("accounts")
def import_stats(import_folder):
"""
Imports statistic related entities from XML in the given folder.
"""
# import lr stats
_import(os.path.join(import_folder, "{}".format(LR_STATS)))
# import query stats
_import(os.path.join(import_folder, "{}".format(QUERY_STATS)))
_update_pk("stats")
from metashare.stats.models import LRStats
from metashare.storage.models import StorageObject, PUBLISHED
# make sure that the newly introduced `ignored` flag of all `LRStats` is
# properly set:
_change_count = LRStats.objects \
.filter(lrid__in=StorageObject.objects \
.exclude(publication_status=PUBLISHED) \
.values_list('identifier', flat=True)) \
.update(ignored=True)
print "Updated the new `ignored` flag on {} imported `LRStats` objects." \
.format(_change_count)
# make sure that there a no stats objects left that have wrongly not been
# deleted in the old META-SHARE version:
# (1) delete stats objects for LRs which are marked as deleted
_objs_to_del = LRStats.objects \
.filter(lrid__in=StorageObject.objects.filter(deleted=True) \
.values_list('identifier', flat=True))
_change_count = _objs_to_del.count()
_objs_to_del.delete()
print "Deleted {} imported `LRStats` objects which belong to now " \
"deleted LRs.".format(_change_count)
# (2) delete stats objects with non-existing LR IDs
_objs_to_del = LRStats.objects.exclude(lrid__in=
StorageObject.objects.values_list('identifier', flat=True))
_change_count = _objs_to_del.count()
_objs_to_del.delete()
print "Deleted {} imported `LRStats` objects which refer to " \
"non-existing LR IDs.".format(_change_count)
def import_resources(import_folder):
"""
Imports resources from the given folder.
"""
# Check that SOLR is running, or else all resources will stay at status INTERNAL:
from metashare.repository import verify_at_startup
verify_at_startup() # may raise Exception, which we don't want to catch.
# Disable verbose debug output for the import process...
settings.DEBUG = False
os.environ['DISABLE_INDEXING_DURING_IMPORT'] = 'True'
from metashare.repository.supermodel import OBJECT_XML_CACHE
# Clean cache before starting the import process.
OBJECT_XML_CACHE.clear()
# iterate over storage folder content
from django.core import serializers
from metashare.storage.models import MASTER, ALLOWED_ARCHIVE_EXTENSIONS
from metashare.repository.models import resourceInfoType_model
imported_resources = []
erroneous_descriptors = []
storage_path = os.path.join(import_folder, STORAGE_FOLDER)
for folder_name in os.listdir(storage_path):
folder_path = "{}/{}/".format(storage_path, folder_name)
if os.path.isdir(folder_path):
try:
print "importing from folder: '{0}'".format(folder_name)
# import storage object
so_filename = os.path.join(folder_path, STORAGE)
so_in = open(so_filename, "rb")
for obj in serializers.deserialize("xml", so_in):
print "importing storage object"
# storage.xml only contains a single storage object
storage_obj = obj.object
# this storage object is NOT saved!
# we only copy the relevant attributes from this storage
# object to the one at the resource!
so_in.close()
# import resource object
ro_filename = os.path.join(folder_path, RESOURCE)
ro_in = open(ro_filename, "rb")
for obj in serializers.deserialize("xml", ro_in):
print "importing resource object"
# resource.xml only contains a single resource object
res_obj = obj
# the deserialized object contains the ManyToMany attributes
# in m2m_data
ro_in.close()
# import resource from metadata.xml
res_filename = os.path.join(folder_path, METADATA)
temp_file = open(res_filename, 'rb')
xml_string = temp_file.read()
result = resourceInfoType_model.import_from_string(
xml_string, copy_status=MASTER)
if not result[0]:
msg = u''
if len(result) > 2:
msg = u'{}'.format(result[2])
raise Exception(msg)
res = result[0]
# update imported resource with imported resource object
# and storage object
_update_resource(res, res_obj, storage_obj)
# copy possible binaries archives
for archive_name in [ARCHIVE_TPL.format(_ext)
for _ext in ALLOWED_ARCHIVE_EXTENSIONS]:
archive_filename = os.path.join(folder_path, archive_name)
if os.path.isfile(archive_filename):
print "copying archive"
res_storage_path = '{0}/{1}/'.format(
settings.STORAGE_PATH, res.storage_object.identifier)
shutil.copy(archive_filename,
os.path.join(res_storage_path, archive_name))
# there can be at most one binary
break
imported_resources.append(res)
except Exception as problem:
from django import db
if isinstance(problem, db.utils.DatabaseError):
# reset database connection (required for PostgreSQL)
db.close_connection()
erroneous_descriptors.append((folder_name, problem))
print "Done. Successfully imported {0} resources into the database, " \
"errors occurred in {1} cases.".format(
len(imported_resources), len(erroneous_descriptors))
if len(erroneous_descriptors) > 0:
print "The following resources could not be imported:"
for descriptor, exception in erroneous_descriptors:
print "\t{}: {}".format(descriptor, exception)
# Be nice and cleanup cache...
_cache_size = sum([len(x) for x in OBJECT_XML_CACHE.values()])
OBJECT_XML_CACHE.clear()
print "Cleared OBJECT_XML_CACHE ({} bytes)".format(_cache_size)
from django.core.management import call_command
call_command('rebuild_index', interactive=False)
def _update_resource(res, res_obj, storage_obj):
"""
Adds information to given resource from the given resource object
and storage object.
"""
# transfer owner from old resource object
for owner in res_obj.m2m_data['owners']:
res.owners.add(owner)
# transfer editor groups from old resource object
for group in res_obj.m2m_data['editor_groups']:
res.editor_groups.add(group)
# transfer attributes from old storage object; skip attributes that were not
# available in 2.9-beta
skip_fields = ('source_node', 'id')
for field in storage_obj._meta.local_fields:
if field.attname in skip_fields:
continue
setattr(res.storage_object, field.attname, getattr(storage_obj, field.attname))
# manually set the revision to 0, so that it is 1
# when creating the storage folder
res.storage_object.revision = 0
# source_node is left at 'None', since only MASTER copies are migrated that
# stem from our server
# saving the resource also saves the associated storage object
res.save()
# recreate storage folder
res.storage_object.update_storage()
def _import(import_file):
"""
Imports the objects from the given import file and saves them in the database.
"""
from django.core.management import call_command
print "importing {} ...".format(import_file)
call_command('loaddata', import_file)
def _update_pk(app_name):
"""
Updates the primary keys for the tables of the given app;
required for PostgreSQL to avoid the next db element created using a pk that
already exists.
"""
from StringIO import StringIO
from django.db import connection
from django.db.models.loading import get_app
from django.core.management import call_command
commands = StringIO()
cursorll = connection.cursor()
if get_app(app_name, emptyOK=True):
call_command('sqlsequencereset', app_name, stdout=commands)
cursorll.execute(commands.getvalue())
def recreate_sync_users():
"""
Recreates sync users as the might have been overwritten.
"""
from django.contrib.auth.models import User
from django.core.management import call_command
syncusers = getattr(settings, "SYNC_USERS", {})
for username, password in syncusers.iteritems():
try:
User.objects.get(username=username)
except User.DoesNotExist:
call_command(
"createsyncuser", username=username, password=password, verbosity=1)
if __name__ == "__main__":
# Check command line parameters first.
if len(sys.argv) < 2:
print "\n\tusage: {0} <source-folder>\n".format(sys.argv[0])
sys.exit(-1)
import_users(sys.argv[1])
import_resources(sys.argv[1])
import_stats(sys.argv[1])
recreate_sync_users()
|
JuliBakagianni/META-SHARE
|
misc/tools/migration/import_node_to_3_0_from_2_9_beta.py
|
Python
|
bsd-3-clause
| 13,449
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
"""
Runs all scripts named system_tests_*.py in the same directory as this script.
Note that each system test is an executable script, you can run them directly.
Run without any environment settings, this will test a dispatch router installed in
the standard system places. Use run.py or config.sh to run against dispatch build.
"""
import os
import sys
from fnmatch import fnmatch
import system_test
from system_test import unittest
# Collect all system_tests_*.py scripts in the same directory as this script.
test_modules = [os.path.splitext(f)[0] for f in os.listdir(system_test.DIR)
if fnmatch(f, "system_tests_*.py")]
sys.path = [system_test.DIR] + sys.path # Find test modules in sys.path
# python < 2.7 unittest main won't load tests from modules, so use the loader:
all_tests = unittest.TestSuite()
for m in test_modules:
tests = unittest.defaultTestLoader.loadTestsFromModule(__import__(m))
all_tests.addTest(tests)
result = unittest.TextTestRunner(verbosity=2).run(all_tests)
sys.exit(not result.wasSuccessful())
sys.argv = ['unittest', '-v'] + tests
|
ErnieAllen/qpid-dispatch
|
tests/run_system_tests.py
|
Python
|
apache-2.0
| 1,880
|
# Copyright 2012 django-compresshtml authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
from compresshtml import compress_html
from django.conf import settings
class CompressHtmlMiddleware(object):
def process_response(self, request, response):
compress = getattr(settings, "COMPRESS_HTML", not settings.DEBUG)
if compress:
response.content = compress_html(response.content)
return response
|
kamagatos/django-compresshtml
|
compresshtml/middleware.py
|
Python
|
bsd-2-clause
| 515
|
#!/usr/bin/env python
"""
author: Xiaowei Huang
"""
import numpy as np
import math
import ast
import copy
import random
import time
import multiprocessing
import stopit
from z3 import *
#import display
import mnist as mm
from scipy import ndimage
from configuration import *
from basics import *
def conv_safety_solve(layer2Consider,nfeatures,nfilters,filters,bias,input,activations,pcl,pgl,span,numSpan,pk):
random.seed(time.time())
# number of clauses
c = 0
# number of variables
d = 0
# variables to be used for z3
variable={}
if nfeatures == 1: images = np.expand_dims(input, axis=0)
else: images = input
s = Tactic('qflra').solver()
s.reset()
#print("%s\n%s\n%s\n%s"%(pcl,pgl,span,numSpan))
toBeChanged = []
if inverseFunction == "point":
if nfeatures == 1:
#print("%s\n%s"%(nfeatures,pcl.keys()))
ks = [ (0,x,y) for (x,y) in pcl.keys() ]
else: ks = copy.deepcopy(pcl.keys())
toBeChanged = toBeChanged + ks
elif inverseFunction == "area":
for (k,x,y) in span.keys():
toBeChanged = toBeChanged + [(l,x1,y1) for l in range(nfeatures) for x1 in range(x,x+filterSize) for y1 in range(y,y+filterSize) if x1 >= 0 and y1 >= 0 and x1 < images.shape[1] and y1 < images.shape[2]]
toBeChanged = list(set(toBeChanged))
for (l,x,y) in toBeChanged:
variable[1,0,l+1,x,y] = Real('1_x_%s_%s_%s' % (l+1,x,y))
d += 1
if not(boundOfPixelValue == [0,0]) and (layer2Consider == 0) and (boundRestriction == True):
pstr = eval("variable[1,0,%s,%s,%s] <= %s"%(l+1,x,y,boundOfPixelValue[1]))
pstr = And(eval("variable[1,0,%s,%s,%s] >= %s"%(l+1,x,y,boundOfPixelValue[0])), pstr)
pstr = And(eval("variable[1,0,%s,%s,%s] != %s"%(l+1,x,y,images[l][x][y])), pstr)
s.add(pstr)
c += 1
for (k,x,y) in span.keys():
variable[1,1,k+1,x,y] = Real('1_y_%s_%s_%s' % (k+1,x,y))
d += 1
string = "variable[1,1,%s,%s,%s] == "%(k+1,x,y)
for l in range(nfeatures):
for x1 in range(filterSize):
for y1 in range(filterSize):
if (l,x+x1,y+y1) in toBeChanged:
newstr1 = " variable[1,0,%s,%s,%s] * %s + "%(l+1,x+x1,y+y1,filters[l,k][x1][y1])
elif x+x1 < images.shape[1] and y+y1 < images.shape[2] :
newstr1 = " %s + "%(images[l][x+x1][y+y1] * filters[l,k][x1][y1])
string += newstr1
string += str(bias[l,k])
s.add(eval(string))
c += 1
if enumerationMethod == "line":
pstr = eval("variable[1,1,%s,%s,%s] < %s" %(k+1,x,y,activations[k][x][y] + span[(k,x,y)] * numSpan[(k,x,y)] + pk))
pstr = And(eval("variable[1,1,%s,%s,%s] > %s "%(k+1,x,y,activations[k][x][y] + span[(k,x,y)] * numSpan[(k,x,y)] - pk)), pstr)
elif enumerationMethod == "convex" or enumerationMethod == "point":
if activations[k][x][y] + span[(k,x,y)] * numSpan[(k,x,y)] >= 0:
upper = activations[k][x][y] + span[(k,x,y)] * numSpan[(k,x,y)] + pk
lower = -1 * (activations[k][x][y] + span[(k,x,y)] * numSpan[(k,x,y)]) - pk
else:
upper = -1 * (activations[k][x][y] + span[(k,x,y)] * numSpan[(k,x,y)]) + pk
lower = activations[k][x][y] + span[(k,x,y)] * numSpan[(k,x,y)] - pk
pstr = eval("variable[1,1,%s,%s,%s] < %s"%(k+1,x,y,upper))
pstr = And(eval("variable[1,1,%s,%s,%s] > %s"%(k+1,x,y,lower)), pstr)
s.add(pstr)
c += 1
nprint("Number of variables: " + str(d))
nprint("Number of clauses: " + str(c))
p = multiprocessing.Process(target=s.check)
p.start()
# Wait for timeout seconds or until process finishes
p.join(timeout)
# If thread is still active
if p.is_alive():
print "Solver running more than timeout seconds (default="+str(timeout)+"s)! Skip it"
p.terminate()
p.join()
else:
s_return = s.check()
if 's_return' in locals():
if s_return == sat:
inputVars = [ (l,x,y,eval("variable[1,0,"+ str(l+1) +"," + str(x) +"," + str(y)+ "]")) for (l,x,y) in toBeChanged ]
cex = copy.deepcopy(images)
for (l,x,y,v) in inputVars:
#if cex[l][x][y] != v: print("different dimension spotted ... ")
cex[l][x][y] = getDecimalValue(s.model()[v])
#print("%s\n%s"%(images[l][x][y],cex[l][x][y]))
cex = np.squeeze(cex)
nprint("satisfiable!")
return (True, cex)
else:
nprint("unsatisfiable!")
return (False, input)
else:
print "timeout! "
return (False, input)
def getDecimalValue(v0):
v = RealVal(str(v0))
return float(v.numerator_as_long())/v.denominator_as_long()
|
xiaoweih/DLV
|
safety_check/conv_safety_solve.py
|
Python
|
gpl-3.0
| 5,068
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.