code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
#changes I would make for next time if we were to access the API like this:
#1. mysql.connector, installed from the mysql site, is picky about which version of python you have.
# I would likely use a different driver, such as MySQLdb
#2. Handling pagination: The api's github events are paginated. The following code only uses page 1.
#3. My database tables contained simple sums of event types, but my code doesn't take into account whether an event was
# already added last time. It would have to have a way of making sure not to duplicate data. i.e. event id / event date
#4. This code is vulnerable to SQL injection through the data it obtains from the GitHub API.
# Future code using the API will need SQL injection protection.
#
# Some good information on using github events api: https://developer.github.com/v3/activity/events/
# Some good information on using github api pagination: https://developer.github.com/v3/#pagination
#
# This code is run using Django and depends on the 4 libraries imported at top.
# render and HttpResponse should come with Django.
# requests: pip install requests
# connector:
# I got mysql.connector for python 3.4 through the mysql installer which can be downloaded here: https://dev.mysql.com/downloads/installer/
# I found this as an alternate site for installation, with more python options: https://dev.mysql.com/downloads/connector/python/
#
# The database used is a mySQL database named 'github'
# It has the table 'githubevents'
# with three columns: an id column (unused here), 'EventName' varchar(255) and 'EventTotal' int(11)
# No initial data is needed in the table.
from django.shortcuts import render
from django.http import HttpResponse
import requests
import mysql.connector
def index(request):
#Connect to the database and create two cursors. At one point we will
#need to use a second cursor while still using SELECT data from the first.
cnx = mysql.connector.connect(user='root', password='example_password',
host='127.0.0.1',
database='github')
cursor = cnx.cursor(buffered=True)
cursor2 = cnx.cursor(buffered=True)
#Get the github api's events
#If a repo is not specified, the API will return events from all public repos.
#However, it still returns only a small number of events
#Because the events are paginated. If a page number is not specified,
#only page 1 will be provided
response = requests.get('https://api.github.com/events')
eventData = response.json()
#Create a list with the total for each event for this page load
#and a SQL query which will be used later to determine UPDATE vs INSERT
#when adding the events to the database.
getMatchingEventsSQL = ("SELECT * FROM githubevents WHERE 1 = 1")
eventsWithTotals = []
needParen = False
for thisEvent in eventData:
if thisEvent['type'] in eventsWithTotals:
eventIndex = eventsWithTotals.index(thisEvent['type'])
eventsWithTotals[eventIndex - 1] += 1
else:
if not eventsWithTotals:
getMatchingEventsSQL = getMatchingEventsSQL + " AND ("
else:
getMatchingEventsSQL = getMatchingEventsSQL + " OR "
eventsWithTotals = eventsWithTotals + [1, thisEvent['type']]
getMatchingEventsSQL = (getMatchingEventsSQL + "eventName = '"
+ thisEvent['type'] + "'")
needParen = True
if needParen:
getMatchingEventsSQL = getMatchingEventsSQL + ")"
#If none of the new events match events already in the database, INSERT
#all of them. If some of them match, UPDATE the ones that do and add to
#their total, then INSERT the ones that were not already in the database.
cursor.execute(getMatchingEventsSQL)
eventsAlreadyInserted = []
if not cursor.rowcount:
if eventsWithTotals:
for key, value in enumerate(eventsWithTotals):
if key % 2 == 0:
addEventSQL = ("INSERT INTO githubevents "
"EventTotal, EventName) VALUES "
"({},'{}')".format(eventsWithTotals[key],
eventsWithTotals[key + 1]))
cursor2.execute(addEventSQL)
cnx.commit()
else:
if eventsWithTotals:
for(gitHubEvent) in cursor:
for key, value in enumerate(eventsWithTotals):
if key % 2 == 1:
if gitHubEvent[1] == value:
addEventSQL = ("UPDATE githubevents SET "
"EventTotal = EventTotal + {} "
"WHERE EventName = '{}'"
.format(eventsWithTotals[key-1],
eventsWithTotals[key]))
eventsAlreadyInserted = (eventsAlreadyInserted
+ [value])
cursor2.execute(addEventSQL)
cnx.commit()
break
for key, value in enumerate(eventsWithTotals):
if key % 2 == 1:
if value not in eventsAlreadyInserted:
addEventSQL = ("INSERT INTO githubevents (EventTotal,"
"EventName) VALUES ({},'{}')"
.format(eventsWithTotals[key - 1],
eventsWithTotals[key]))
cursor2.execute(addEventSQL)
cnx.commit()
#Construct the HTML output for event totals in the current page load.
myHTMLOutput = "<h1>Welcome to the GitHub Events Page!</h1>"
myHTMLOutput = (myHTMLOutput + "<h3>These are the events from the current "
"page load:</h3>")
myHTMLOutput = (myHTMLOutput + "<table><tr><th>Event Type</th><th>Total "
"for this Page Load</th></tr>")
for key,value in enumerate(eventsWithTotals):
if key % 2 == 0:
myHTMLOutput = (myHTMLOutput + "<tr><td>"
+ str(eventsWithTotals[key + 1])
+ "</td><td>"
+ str(eventsWithTotals[key]) + "</td></tr>")
myHTMLOutput = myHTMLOutput + "</table>"
#Construct the HTML output for the event totals of past page loads.
getEventOutputSQL = ("SELECT * from githubevents")
cursor2.execute(getEventOutputSQL)
myHTMLOutput = (myHTMLOutput + "<h3>These are the events from the database"
" (the sum of all page loads):</h3>")
myHTMLOutput = (myHTMLOutput + "<table><tr><th>Event Type</th><th>Total "
"for all Page Loads</th></tr>")
for(gitHubEvent) in cursor2:
myHTMLOutput = (myHTMLOutput + "<tr><td>" + str(gitHubEvent[1])
+ "</td><td>" + str(gitHubEvent[2]) + "</td></tr>")
myHTMLOutput = myHTMLOutput + "</table>"
#Close the cursors and the database connection.
cursor2.close()
cursor.close()
cnx.close()
#Show the HTML output.
return HttpResponse(myHTMLOutput)
|
Hackers-To-Engineers/ghdata-sprint1team-2
|
views.py
|
Python
|
mit
| 7,571
|
bg_image_modes = ('stretch', 'tile', 'center', 'right', 'left')
transitions_jquery_ui = (
'blind', 'bounce', 'clip', 'drop', 'explode', 'fade', 'fold',
'highlight', 'puff', 'pulsate', 'scale', 'shake', 'size', 'slide'
)
transitions_animatecss = (
'bounceIn',
'bounceInDown',
'bounceInLeft',
'bounceInRight',
'bounceInUp',
'fadeIn',
'fadeInDown',
'fadeInDownBig',
'fadeInLeft',
'fadeInLeftBig',
'fadeInRight',
'fadeInRightBig',
'fadeInUp',
'fadeInUpBig',
'flipInX',
'flipInY',
'lightSpeedIn',
'rotateIn',
'rotateInDownLeft',
'rotateInDownRight',
'rotateInUpLeft',
'rotateInUpRight',
'rollIn',
'zoomIn',
'zoomInDown',
'zoomInLeft',
'zoomInRight',
'zoomInUp',
'slideInDown',
'slideInLeft',
'slideInRight',
'slideInUp',
)
|
alandmoore/pystump
|
includes/lookups.py
|
Python
|
gpl-3.0
| 856
|
import asyncio
def create_remote_signal_actor(ray):
# TODO(barakmich): num_cpus=0
@ray.remote
class SignalActor:
def __init__(self):
self.ready_event = asyncio.Event()
def send(self, clear=False):
self.ready_event.set()
if clear:
self.ready_event.clear()
async def wait(self, should_wait=True):
if should_wait:
await self.ready_event.wait()
return SignalActor
|
pcmoritz/ray-1
|
python/ray/tests/client_test_utils.py
|
Python
|
apache-2.0
| 485
|
import json
import random
import pytest
from ruamel.yaml import YAML
from great_expectations.core.batch import Batch, BatchRequest
from great_expectations.core.batch_spec import SqlAlchemyDatasourceBatchSpec
from great_expectations.data_context.util import instantiate_class_from_config
from great_expectations.datasource.data_connector import ConfiguredAssetSqlDataConnector
try:
sqlalchemy = pytest.importorskip("sqlalchemy")
except ImportError:
sqlalchemy = None
import great_expectations.exceptions as ge_exceptions
from great_expectations.validator.validator import Validator
yaml = YAML(typ="safe")
# TODO: <Alex>ALEX -- Some methods in this module are misplaced and/or provide no action; this must be repaired.</Alex>
def test_basic_self_check(test_cases_for_sql_data_connector_sqlite_execution_engine):
random.seed(0)
execution_engine = test_cases_for_sql_data_connector_sqlite_execution_engine
config = yaml.load(
"""
name: my_sql_data_connector
datasource_name: FAKE_Datasource_NAME
assets:
table_partitioned_by_date_column__A:
#table_name: events # If table_name is omitted, then the table_name defaults to the asset name
splitter_method: _split_on_column_value
splitter_kwargs:
column_name: date
""",
)
config["execution_engine"] = execution_engine
my_data_connector = ConfiguredAssetSqlDataConnector(**config)
report = my_data_connector.self_check()
print(json.dumps(report, indent=2))
assert report == {
"class_name": "ConfiguredAssetSqlDataConnector",
"data_asset_count": 1,
"example_data_asset_names": ["table_partitioned_by_date_column__A"],
"data_assets": {
"table_partitioned_by_date_column__A": {
"batch_definition_count": 30,
"example_data_references": [
{"date": "2020-01-01"},
{"date": "2020-01-02"},
{"date": "2020-01-03"},
],
}
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {
# "n_rows": 8,
# "batch_spec": {
# "table_name": "table_partitioned_by_date_column__A",
# "data_asset_name": "table_partitioned_by_date_column__A",
# "batch_identifiers": {"date": "2020-01-02"},
# "splitter_method": "_split_on_column_value",
# "splitter_kwargs": {"column_name": "date"},
# },
# },
}
def test_get_batch_definition_list_from_batch_request(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
random.seed(0)
db = test_cases_for_sql_data_connector_sqlite_execution_engine
config = yaml.load(
"""
name: my_sql_data_connector
datasource_name: FAKE_Datasource_NAME
assets:
table_partitioned_by_date_column__A:
splitter_method: _split_on_column_value
splitter_kwargs:
column_name: date
""",
)
config["execution_engine"] = db
my_data_connector = ConfiguredAssetSqlDataConnector(**config)
my_data_connector._refresh_data_references_cache()
batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_Datasource_NAME",
data_connector_name="my_sql_data_connector",
data_asset_name="table_partitioned_by_date_column__A",
data_connector_query={
"batch_filter_parameters": {"date": "2020-01-01"}
},
)
)
)
assert len(batch_definition_list) == 1
batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_Datasource_NAME",
data_connector_name="my_sql_data_connector",
data_asset_name="table_partitioned_by_date_column__A",
data_connector_query={"batch_filter_parameters": {}},
)
)
)
assert len(batch_definition_list) == 30
# Note: Abe 20201109: It would be nice to put in safeguards for mistakes like this.
# In this case, "date" should go inside "batch_identifiers".
# Currently, the method ignores "date" entirely, and matches on too many partitions.
# I don't think this is unique to ConfiguredAssetSqlDataConnector.
# with pytest.raises(ge_exceptions.DataConnectorError) as e:
# batch_definition_list = my_data_connector.get_batch_definition_list_from_batch_request(
# batch_request=BatchRequest(
# datasource_name="FAKE_Datasource_NAME",
# data_connector_name="my_sql_data_connector",
# data_asset_name="table_partitioned_by_date_column__A",
# data_connector_query={
# "batch_filter_parameters": {},
# "date" : "2020-01-01",
# }
# )
# )
# assert "Unmatched key" in e.value.message
batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_Datasource_NAME",
data_connector_name="my_sql_data_connector",
data_asset_name="table_partitioned_by_date_column__A",
)
)
)
assert len(batch_definition_list) == 30
with pytest.raises(TypeError):
# noinspection PyArgumentList
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_Datasource_NAME",
data_connector_name="my_sql_data_connector",
)
)
with pytest.raises(TypeError):
# noinspection PyArgumentList
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(datasource_name="FAKE_Datasource_NAME")
)
with pytest.raises(TypeError):
# noinspection PyArgumentList
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest()
)
def test_example_A(test_cases_for_sql_data_connector_sqlite_execution_engine):
random.seed(0)
db = test_cases_for_sql_data_connector_sqlite_execution_engine
config = yaml.load(
"""
name: my_sql_data_connector
datasource_name: FAKE_Datasource_NAME
assets:
table_partitioned_by_date_column__A:
splitter_method: _split_on_column_value
splitter_kwargs:
column_name: date
""",
)
config["execution_engine"] = db
my_data_connector = ConfiguredAssetSqlDataConnector(**config)
report = my_data_connector.self_check()
print(json.dumps(report, indent=2))
assert report == {
"class_name": "ConfiguredAssetSqlDataConnector",
"data_asset_count": 1,
"example_data_asset_names": ["table_partitioned_by_date_column__A"],
"data_assets": {
"table_partitioned_by_date_column__A": {
"batch_definition_count": 30,
"example_data_references": [
{"date": "2020-01-01"},
{"date": "2020-01-02"},
{"date": "2020-01-03"},
],
}
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {
# "n_rows": 8,
# "batch_spec": {
# "table_name": "table_partitioned_by_date_column__A",
# "data_asset_name": "table_partitioned_by_date_column__A",
# "batch_identifiers": {"date": "2020-01-02"},
# "splitter_method": "_split_on_column_value",
# "splitter_kwargs": {"column_name": "date"},
# },
# },
}
def test_example_B(test_cases_for_sql_data_connector_sqlite_execution_engine):
random.seed(0)
db = test_cases_for_sql_data_connector_sqlite_execution_engine
config = yaml.load(
"""
name: my_sql_data_connector
datasource_name: FAKE_Datasource_NAME
assets:
table_partitioned_by_timestamp_column__B:
splitter_method: _split_on_converted_datetime
splitter_kwargs:
column_name: timestamp
"""
)
config["execution_engine"] = db
my_data_connector = ConfiguredAssetSqlDataConnector(**config)
report = my_data_connector.self_check()
print(json.dumps(report, indent=2))
assert report == {
"class_name": "ConfiguredAssetSqlDataConnector",
"data_asset_count": 1,
"example_data_asset_names": ["table_partitioned_by_timestamp_column__B"],
"data_assets": {
"table_partitioned_by_timestamp_column__B": {
"batch_definition_count": 30,
"example_data_references": [
{"timestamp": "2020-01-01"},
{"timestamp": "2020-01-02"},
{"timestamp": "2020-01-03"},
],
}
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {
# "n_rows": 8,
# "batch_spec": {
# "table_name": "table_partitioned_by_timestamp_column__B",
# "data_asset_name": "table_partitioned_by_timestamp_column__B",
# "batch_identifiers": {"timestamp": "2020-01-02"},
# "splitter_method": "_split_on_converted_datetime",
# "splitter_kwargs": {"column_name": "timestamp"},
# },
# },
}
def test_example_C(test_cases_for_sql_data_connector_sqlite_execution_engine):
random.seed(0)
db = test_cases_for_sql_data_connector_sqlite_execution_engine
config = yaml.load(
"""
name: my_sql_data_connector
datasource_name: FAKE_Datasource_NAME
assets:
table_partitioned_by_regularly_spaced_incrementing_id_column__C:
splitter_method: _split_on_divided_integer
splitter_kwargs:
column_name: id
divisor: 10
""",
)
config["execution_engine"] = db
my_data_connector = ConfiguredAssetSqlDataConnector(**config)
report = my_data_connector.self_check()
print(json.dumps(report, indent=2))
assert report == {
"class_name": "ConfiguredAssetSqlDataConnector",
"data_asset_count": 1,
"example_data_asset_names": [
"table_partitioned_by_regularly_spaced_incrementing_id_column__C"
],
"data_assets": {
"table_partitioned_by_regularly_spaced_incrementing_id_column__C": {
"batch_definition_count": 12,
"example_data_references": [
{"id": 0},
{"id": 1},
{"id": 2},
],
}
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {
# "n_rows": 10,
# "batch_spec": {
# "table_name": "table_partitioned_by_regularly_spaced_incrementing_id_column__C",
# "data_asset_name": "table_partitioned_by_regularly_spaced_incrementing_id_column__C",
# "batch_identifiers": {"id": 1},
# "splitter_method": "_split_on_divided_integer",
# "splitter_kwargs": {"column_name": "id", "divisor": 10},
# },
# },
}
def test_example_E(test_cases_for_sql_data_connector_sqlite_execution_engine):
random.seed(0)
db = test_cases_for_sql_data_connector_sqlite_execution_engine
config = yaml.load(
"""
name: my_sql_data_connector
datasource_name: FAKE_Datasource_NAME
assets:
table_partitioned_by_incrementing_batch_id__E:
splitter_method: _split_on_column_value
splitter_kwargs:
column_name: batch_id
""",
)
config["execution_engine"] = db
my_data_connector = ConfiguredAssetSqlDataConnector(**config)
report = my_data_connector.self_check()
print(json.dumps(report, indent=2))
assert report == {
"class_name": "ConfiguredAssetSqlDataConnector",
"data_asset_count": 1,
"example_data_asset_names": ["table_partitioned_by_incrementing_batch_id__E"],
"data_assets": {
"table_partitioned_by_incrementing_batch_id__E": {
"batch_definition_count": 11,
"example_data_references": [
{"batch_id": 0},
{"batch_id": 1},
{"batch_id": 2},
],
}
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {
# "n_rows": 9,
# "batch_spec": {
# "table_name": "table_partitioned_by_incrementing_batch_id__E",
# "data_asset_name": "table_partitioned_by_incrementing_batch_id__E",
# "batch_identifiers": {"batch_id": 1},
# "splitter_method": "_split_on_column_value",
# "splitter_kwargs": {"column_name": "batch_id"},
# },
# },
}
def test_example_F(test_cases_for_sql_data_connector_sqlite_execution_engine):
random.seed(0)
db = test_cases_for_sql_data_connector_sqlite_execution_engine
config = yaml.load(
"""
name: my_sql_data_connector
datasource_name: FAKE_Datasource_NAME
assets:
table_partitioned_by_foreign_key__F:
splitter_method: _split_on_column_value
splitter_kwargs:
column_name: session_id
""",
)
config["execution_engine"] = db
my_data_connector = ConfiguredAssetSqlDataConnector(**config)
report = my_data_connector.self_check()
print(json.dumps(report, indent=2))
assert report == {
"class_name": "ConfiguredAssetSqlDataConnector",
"data_asset_count": 1,
"example_data_asset_names": ["table_partitioned_by_foreign_key__F"],
"data_assets": {
"table_partitioned_by_foreign_key__F": {
"batch_definition_count": 49,
# TODO Abe 20201029 : These values should be sorted
"example_data_references": [
{"session_id": 2},
{"session_id": 3},
{"session_id": 4},
],
}
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {
# "n_rows": 2,
# "batch_spec": {
# "table_name": "table_partitioned_by_foreign_key__F",
# "data_asset_name": "table_partitioned_by_foreign_key__F",
# "batch_identifiers": {"session_id": 2},
# "splitter_method": "_split_on_column_value",
# "splitter_kwargs": {"column_name": "session_id"},
# },
# },
}
def test_example_G(test_cases_for_sql_data_connector_sqlite_execution_engine):
random.seed(0)
db = test_cases_for_sql_data_connector_sqlite_execution_engine
config = yaml.load(
"""
name: my_sql_data_connector
datasource_name: FAKE_Datasource_NAME
assets:
table_partitioned_by_multiple_columns__G:
splitter_method: _split_on_multi_column_values
splitter_kwargs:
column_names:
- y
- m
- d
""",
)
config["execution_engine"] = db
my_data_connector = ConfiguredAssetSqlDataConnector(**config)
report = my_data_connector.self_check()
print(json.dumps(report, indent=2))
assert report == {
"class_name": "ConfiguredAssetSqlDataConnector",
"data_asset_count": 1,
"example_data_asset_names": ["table_partitioned_by_multiple_columns__G"],
"data_assets": {
"table_partitioned_by_multiple_columns__G": {
"batch_definition_count": 30,
# TODO Abe 20201029 : These values should be sorted
"example_data_references": [
{"y": 2020, "m": 1, "d": 1},
{"y": 2020, "m": 1, "d": 2},
{"y": 2020, "m": 1, "d": 3},
],
}
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {
# "n_rows": 8,
# "batch_spec": {
# "table_name": "table_partitioned_by_multiple_columns__G",
# "data_asset_name": "table_partitioned_by_multiple_columns__G",
# "batch_identifiers": {
# "y": 2020,
# "m": 1,
# "d": 2,
# },
# "splitter_method": "_split_on_multi_column_values",
# "splitter_kwargs": {"column_names": ["y", "m", "d"]},
# },
# },
}
def test_example_H(test_cases_for_sql_data_connector_sqlite_execution_engine):
return
# Leaving this test commented for now, since sqlite doesn't support MD5.
# Later, we'll want to add a more thorough test harness, including other databases.
# db = test_cases_for_sql_data_connector_sqlite_execution_engine
# config = yaml.load("""
# name: my_sql_data_connector
# datasource_name: FAKE_Datasource_NAME
# assets:
# table_that_should_be_partitioned_by_random_hash__H:
# splitter_method: _split_on_hashed_column
# splitter_kwargs:
# column_name: id
# hash_digits: 1
# """)
# config["execution_engine"] = db
# my_data_connector = ConfiguredAssetSqlDataConnector(**config)
# report = my_data_connector.self_check()
# print(json.dumps(report, indent=2))
# # TODO: Flesh this out once the implementation actually works to this point
# assert report == {
# "class_name": "ConfiguredAssetSqlDataConnector",
# "data_asset_count": 1,
# "example_data_asset_names": [
# "table_that_should_be_partitioned_by_random_hash__H"
# ],
# "data_assets": {
# "table_that_should_be_partitioned_by_random_hash__H": {
# "batch_definition_count": 16,
# "example_data_references": [
# 0,
# 1,
# 2,
# ]
# }
# },
# "unmatched_data_reference_count": 0,
# "example_unmatched_data_references": []
# }
# ['table_partitioned_by_irregularly_spaced_incrementing_id_with_spacing_in_a_second_table__D',
# 'table_containing_id_spacers_for_D',
# 'table_that_should_be_partitioned_by_random_hash__H']
def test_sampling_method__limit(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
execution_engine = test_cases_for_sql_data_connector_sqlite_execution_engine
batch_data, batch_markers = execution_engine.get_batch_data_and_markers(
batch_spec=SqlAlchemyDatasourceBatchSpec(
{
"table_name": "table_partitioned_by_date_column__A",
"batch_identifiers": {},
"splitter_method": "_split_on_whole_table",
"splitter_kwargs": {},
"sampling_method": "_sample_using_limit",
"sampling_kwargs": {"n": 20},
}
)
)
batch = Batch(data=batch_data)
validator = Validator(execution_engine, batches=[batch])
assert len(validator.head(fetch_all=True)) == 20
assert not validator.expect_column_values_to_be_in_set(
"date", value_set=["2020-01-02"]
).success
def test_sampling_method__random(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
execution_engine = test_cases_for_sql_data_connector_sqlite_execution_engine
# noinspection PyUnusedLocal
batch_data, batch_markers = execution_engine.get_batch_data_and_markers(
batch_spec=SqlAlchemyDatasourceBatchSpec(
{
"table_name": "table_partitioned_by_date_column__A",
"batch_identifiers": {},
"splitter_method": "_split_on_whole_table",
"splitter_kwargs": {},
"sampling_method": "_sample_using_random",
"sampling_kwargs": {"p": 1.0},
}
)
)
# random.seed() is no good here: the random number generator is in the database, not python
# assert len(batch_data.head(fetch_all=True)) == 63
pass
def test_sampling_method__mod(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
execution_engine = test_cases_for_sql_data_connector_sqlite_execution_engine
batch_data, batch_markers = execution_engine.get_batch_data_and_markers(
batch_spec=SqlAlchemyDatasourceBatchSpec(
{
"table_name": "table_partitioned_by_date_column__A",
"batch_identifiers": {},
"splitter_method": "_split_on_whole_table",
"splitter_kwargs": {},
"sampling_method": "_sample_using_mod",
"sampling_kwargs": {
"column_name": "id",
"mod": 10,
"value": 8,
},
}
)
)
execution_engine.load_batch_data("__", batch_data)
validator = Validator(execution_engine)
assert len(validator.head(fetch_all=True)) == 12
def test_sampling_method__a_list(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
execution_engine = test_cases_for_sql_data_connector_sqlite_execution_engine
batch_data, batch_markers = execution_engine.get_batch_data_and_markers(
batch_spec=SqlAlchemyDatasourceBatchSpec(
{
"table_name": "table_partitioned_by_date_column__A",
"batch_identifiers": {},
"splitter_method": "_split_on_whole_table",
"splitter_kwargs": {},
"sampling_method": "_sample_using_a_list",
"sampling_kwargs": {
"column_name": "id",
"value_list": [10, 20, 30, 40],
},
}
)
)
execution_engine.load_batch_data("__", batch_data)
validator = Validator(execution_engine)
assert len(validator.head(fetch_all=True)) == 4
def test_sampling_method__md5(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
# noinspection PyUnusedLocal
execution_engine = test_cases_for_sql_data_connector_sqlite_execution_engine
# SQlite doesn't support MD5
# batch_data, batch_markers = execution_engine.get_batch_data_and_markers(
# batch_spec=SqlAlchemyDatasourceBatchSpec({
# "table_name": "table_partitioned_by_date_column__A",
# "batch_identifiers": {},
# "splitter_method": "_split_on_whole_table",
# "splitter_kwargs": {},
# "sampling_method": "_sample_using_md5",
# "sampling_kwargs": {
# "column_name": "index",
# }
# })
# )
def test_to_make_sure_splitter_and_sampler_methods_are_optional(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
execution_engine = test_cases_for_sql_data_connector_sqlite_execution_engine
batch_data, batch_markers = execution_engine.get_batch_data_and_markers(
batch_spec=SqlAlchemyDatasourceBatchSpec(
{
"table_name": "table_partitioned_by_date_column__A",
"batch_identifiers": {},
"sampling_method": "_sample_using_mod",
"sampling_kwargs": {
"column_name": "id",
"mod": 10,
"value": 8,
},
}
)
)
execution_engine.load_batch_data("__", batch_data)
validator = Validator(execution_engine)
assert len(validator.head(fetch_all=True)) == 12
batch_data, batch_markers = execution_engine.get_batch_data_and_markers(
batch_spec=SqlAlchemyDatasourceBatchSpec(
{
"table_name": "table_partitioned_by_date_column__A",
"batch_identifiers": {},
}
)
)
execution_engine.load_batch_data("__", batch_data)
validator = Validator(execution_engine)
assert len(validator.head(fetch_all=True)) == 120
batch_data, batch_markers = execution_engine.get_batch_data_and_markers(
batch_spec=SqlAlchemyDatasourceBatchSpec(
{
"table_name": "table_partitioned_by_date_column__A",
"batch_identifiers": {},
"splitter_method": "_split_on_whole_table",
"splitter_kwargs": {},
}
)
)
execution_engine.load_batch_data("__", batch_data)
validator = Validator(execution_engine)
assert len(validator.head(fetch_all=True)) == 120
def test_default_behavior_with_no_splitter(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
db = test_cases_for_sql_data_connector_sqlite_execution_engine
config = yaml.load(
"""
name: my_sql_data_connector
datasource_name: FAKE_Datasource_NAME
assets:
table_partitioned_by_date_column__A: {}
""",
)
config["execution_engine"] = db
my_data_connector = ConfiguredAssetSqlDataConnector(**config)
report_object = my_data_connector.self_check()
print(json.dumps(report_object, indent=2))
batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_Datasource_NAME",
data_connector_name="my_sql_data_connector",
data_asset_name="table_partitioned_by_date_column__A",
)
)
)
assert len(batch_definition_list) == 1
assert batch_definition_list[0]["batch_identifiers"] == {}
batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_Datasource_NAME",
data_connector_name="my_sql_data_connector",
data_asset_name="table_partitioned_by_date_column__A",
data_connector_query={},
)
)
)
assert len(batch_definition_list) == 1
assert batch_definition_list[0]["batch_identifiers"] == {}
batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_Datasource_NAME",
data_connector_name="my_sql_data_connector",
data_asset_name="table_partitioned_by_date_column__A",
data_connector_query={"batch_filter_parameters": {}},
)
)
)
assert len(batch_definition_list) == 1
assert batch_definition_list[0]["batch_identifiers"] == {}
def test_behavior_with_whole_table_splitter(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
db = test_cases_for_sql_data_connector_sqlite_execution_engine
config = yaml.load(
"""
name: my_sql_data_connector
datasource_name: FAKE_Datasource_NAME
assets:
table_partitioned_by_date_column__A:
splitter_method : "_split_on_whole_table"
splitter_kwargs : {}
""",
)
config["execution_engine"] = db
my_data_connector = ConfiguredAssetSqlDataConnector(**config)
report_object = my_data_connector.self_check()
print(json.dumps(report_object, indent=2))
batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_Datasource_NAME",
data_connector_name="my_sql_data_connector",
data_asset_name="table_partitioned_by_date_column__A",
)
)
)
assert len(batch_definition_list) == 1
assert batch_definition_list[0]["batch_identifiers"] == {}
batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_Datasource_NAME",
data_connector_name="my_sql_data_connector",
data_asset_name="table_partitioned_by_date_column__A",
data_connector_query={},
)
)
)
assert len(batch_definition_list) == 1
assert batch_definition_list[0]["batch_identifiers"] == {}
batch_definition_list = (
my_data_connector.get_batch_definition_list_from_batch_request(
batch_request=BatchRequest(
datasource_name="FAKE_Datasource_NAME",
data_connector_name="my_sql_data_connector",
data_asset_name="table_partitioned_by_date_column__A",
data_connector_query={"batch_filter_parameters": {}},
)
)
)
assert len(batch_definition_list) == 1
assert batch_definition_list[0]["batch_identifiers"] == {}
def test_basic_instantiation_of_InferredAssetSqlDataConnector(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
my_data_connector = instantiate_class_from_config(
config={
"class_name": "InferredAssetSqlDataConnector",
"name": "whole_table",
"data_asset_name_prefix": "prexif__",
"data_asset_name_suffix": "__xiffus",
},
runtime_environment={
"execution_engine": test_cases_for_sql_data_connector_sqlite_execution_engine,
"datasource_name": "my_test_datasource",
},
config_defaults={"module_name": "great_expectations.datasource.data_connector"},
)
report_object = my_data_connector.self_check()
# print(json.dumps(report_object, indent=4))
assert report_object == {
"class_name": "InferredAssetSqlDataConnector",
"data_asset_count": 21,
"example_data_asset_names": [
"prexif__table_containing_id_spacers_for_D__xiffus",
"prexif__table_full__I__xiffus",
"prexif__table_partitioned_by_date_column__A__xiffus",
],
"data_assets": {
"prexif__table_containing_id_spacers_for_D__xiffus": {
"batch_definition_count": 1,
"example_data_references": [{}],
},
"prexif__table_full__I__xiffus": {
"batch_definition_count": 1,
"example_data_references": [{}],
},
"prexif__table_partitioned_by_date_column__A__xiffus": {
"batch_definition_count": 1,
"example_data_references": [{}],
},
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {
# "batch_spec": {
# "schema_name": "main",
# "table_name": "table_containing_id_spacers_for_D",
# "data_asset_name": "prexif__table_containing_id_spacers_for_D__xiffus",
# "batch_identifiers": {},
# },
# "n_rows": 30,
# },
}
assert my_data_connector.get_available_data_asset_names() == [
"prexif__table_containing_id_spacers_for_D__xiffus",
"prexif__table_full__I__xiffus",
"prexif__table_partitioned_by_date_column__A__xiffus",
"prexif__table_partitioned_by_foreign_key__F__xiffus",
"prexif__table_partitioned_by_incrementing_batch_id__E__xiffus",
"prexif__table_partitioned_by_irregularly_spaced_incrementing_id_with_spacing_in_a_second_table__D__xiffus",
"prexif__table_partitioned_by_multiple_columns__G__xiffus",
"prexif__table_partitioned_by_regularly_spaced_incrementing_id_column__C__xiffus",
"prexif__table_partitioned_by_timestamp_column__B__xiffus",
"prexif__table_that_should_be_partitioned_by_random_hash__H__xiffus",
"prexif__table_with_fk_reference_from_F__xiffus",
"prexif__view_by_date_column__A__xiffus",
"prexif__view_by_incrementing_batch_id__E__xiffus",
"prexif__view_by_irregularly_spaced_incrementing_id_with_spacing_in_a_second_table__D__xiffus",
"prexif__view_by_multiple_columns__G__xiffus",
"prexif__view_by_regularly_spaced_incrementing_id_column__C__xiffus",
"prexif__view_by_timestamp_column__B__xiffus",
"prexif__view_containing_id_spacers_for_D__xiffus",
"prexif__view_partitioned_by_foreign_key__F__xiffus",
"prexif__view_that_should_be_partitioned_by_random_hash__H__xiffus",
"prexif__view_with_fk_reference_from_F__xiffus",
]
batch_definition_list = my_data_connector.get_batch_definition_list_from_batch_request(
BatchRequest(
datasource_name="my_test_datasource",
data_connector_name="whole_table",
data_asset_name="prexif__table_that_should_be_partitioned_by_random_hash__H__xiffus",
)
)
assert len(batch_definition_list) == 1
def test_more_complex_instantiation_of_InferredAssetSqlDataConnector(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
my_data_connector = instantiate_class_from_config(
config={
"class_name": "InferredAssetSqlDataConnector",
"name": "whole_table",
"data_asset_name_suffix": "__whole",
"include_schema_name": True,
},
runtime_environment={
"execution_engine": test_cases_for_sql_data_connector_sqlite_execution_engine,
"datasource_name": "my_test_datasource",
},
config_defaults={"module_name": "great_expectations.datasource.data_connector"},
)
report_object = my_data_connector.self_check()
assert report_object == {
"class_name": "InferredAssetSqlDataConnector",
"data_asset_count": 21,
"data_assets": {
"main.table_containing_id_spacers_for_D__whole": {
"batch_definition_count": 1,
"example_data_references": [{}],
},
"main.table_full__I__whole": {
"batch_definition_count": 1,
"example_data_references": [{}],
},
"main.table_partitioned_by_date_column__A__whole": {
"batch_definition_count": 1,
"example_data_references": [{}],
},
},
"example_data_asset_names": [
"main.table_containing_id_spacers_for_D__whole",
"main.table_full__I__whole",
"main.table_partitioned_by_date_column__A__whole",
],
# FIXME: (Sam) example_data_reference removed temporarily in PR #2590:
# "example_data_reference": {
# "batch_spec": {
# "batch_identifiers": {},
# "schema_name": "main",
# "table_name": "table_containing_id_spacers_for_D",
# "data_asset_name": "main.table_containing_id_spacers_for_D__whole",
# },
# "n_rows": 30,
# },
"example_unmatched_data_references": [],
"unmatched_data_reference_count": 0,
}
assert my_data_connector.get_available_data_asset_names() == [
"main.table_containing_id_spacers_for_D__whole",
"main.table_full__I__whole",
"main.table_partitioned_by_date_column__A__whole",
"main.table_partitioned_by_foreign_key__F__whole",
"main.table_partitioned_by_incrementing_batch_id__E__whole",
"main.table_partitioned_by_irregularly_spaced_incrementing_id_with_spacing_in_a_second_table__D__whole",
"main.table_partitioned_by_multiple_columns__G__whole",
"main.table_partitioned_by_regularly_spaced_incrementing_id_column__C__whole",
"main.table_partitioned_by_timestamp_column__B__whole",
"main.table_that_should_be_partitioned_by_random_hash__H__whole",
"main.table_with_fk_reference_from_F__whole",
"main.view_by_date_column__A__whole",
"main.view_by_incrementing_batch_id__E__whole",
"main.view_by_irregularly_spaced_incrementing_id_with_spacing_in_a_second_table__D__whole",
"main.view_by_multiple_columns__G__whole",
"main.view_by_regularly_spaced_incrementing_id_column__C__whole",
"main.view_by_timestamp_column__B__whole",
"main.view_containing_id_spacers_for_D__whole",
"main.view_partitioned_by_foreign_key__F__whole",
"main.view_that_should_be_partitioned_by_random_hash__H__whole",
"main.view_with_fk_reference_from_F__whole",
]
batch_definition_list = my_data_connector.get_batch_definition_list_from_batch_request(
BatchRequest(
datasource_name="my_test_datasource",
data_connector_name="whole_table",
data_asset_name="main.table_that_should_be_partitioned_by_random_hash__H__whole",
)
)
assert len(batch_definition_list) == 1
def test_basic_instantiation_of_ConfiguredAssetSqlDataConnector(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
my_data_connector = instantiate_class_from_config(
config={
"class_name": "ConfiguredAssetSqlDataConnector",
"name": "my_sql_data_connector",
"assets": {"main.table_full__I__whole": {}},
},
runtime_environment={
"execution_engine": test_cases_for_sql_data_connector_sqlite_execution_engine,
"datasource_name": "my_test_datasource",
},
config_defaults={"module_name": "great_expectations.datasource.data_connector"},
)
report_object = my_data_connector.self_check()
assert report_object == {
"class_name": "ConfiguredAssetSqlDataConnector",
"data_asset_count": 1,
"example_data_asset_names": ["main.table_full__I__whole"],
"data_assets": {
"main.table_full__I__whole": {
"batch_definition_count": 1,
"example_data_references": [{}],
}
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
}
def test_more_complex_instantiation_of_ConfiguredAssetSqlDataConnector(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
my_data_connector = instantiate_class_from_config(
config={
"class_name": "ConfiguredAssetSqlDataConnector",
"name": "my_sql_data_connector",
"assets": {
"main.table_partitioned_by_date_column__A": {
"splitter_method": "_split_on_column_value",
"splitter_kwargs": {"column_name": "date"},
},
},
},
runtime_environment={
"execution_engine": test_cases_for_sql_data_connector_sqlite_execution_engine,
"datasource_name": "my_test_datasource",
},
config_defaults={"module_name": "great_expectations.datasource.data_connector"},
)
report_object = my_data_connector.self_check()
assert report_object == {
"class_name": "ConfiguredAssetSqlDataConnector",
"data_asset_count": 1,
"example_data_asset_names": ["main.table_partitioned_by_date_column__A"],
"data_assets": {
"main.table_partitioned_by_date_column__A": {
"batch_definition_count": 30,
"example_data_references": [
{"date": "2020-01-01"},
{"date": "2020-01-02"},
{"date": "2020-01-03"},
],
}
},
"unmatched_data_reference_count": 0,
"example_unmatched_data_references": [],
}
def test_more_complex_instantiation_of_ConfiguredAssetSqlDataConnector_include_schema_name(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
my_data_connector: ConfiguredAssetSqlDataConnector = ConfiguredAssetSqlDataConnector(
name="my_sql_data_connector",
datasource_name="my_test_datasource",
execution_engine="test_cases_for_sql_data_connector_sqlite_execution_engine",
assets={
"table_partitioned_by_date_column__A": {
"splitter_method": "_split_on_column_value",
"splitter_kwargs": {"column_name": "date"},
"include_schema_name": True,
"schema_name": "main",
},
},
)
assert "main.table_partitioned_by_date_column__A" in my_data_connector.assets
# schema_name given but include_schema_name is set to False
with pytest.raises(ge_exceptions.DataConnectorError) as e:
ConfiguredAssetSqlDataConnector(
name="my_sql_data_connector",
datasource_name="my_test_datasource",
execution_engine="test_cases_for_sql_data_connector_sqlite_execution_engine",
assets={
"table_partitioned_by_date_column__A": {
"splitter_method": "_split_on_column_value",
"splitter_kwargs": {"column_name": "date"},
"include_schema_name": False,
"schema_name": "main",
},
},
)
assert (
e.value.message
== "ConfiguredAssetSqlDataConnector ran into an error while initializing Asset names. Schema main was specified, but 'include_schema_name' flag was set to False."
)
def test_more_complex_instantiation_of_ConfiguredAssetSqlDataConnector_include_schema_name_prefix_suffix(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
my_data_connector: ConfiguredAssetSqlDataConnector = ConfiguredAssetSqlDataConnector(
name="my_sql_data_connector",
datasource_name="my_test_datasource",
execution_engine="test_cases_for_sql_data_connector_sqlite_execution_engine",
assets={
"table_partitioned_by_date_column__A": {
"splitter_method": "_split_on_column_value",
"splitter_kwargs": {"column_name": "date"},
"include_schema_name": True,
"schema_name": "main",
"data_asset_name_prefix": "taxi__",
"data_asset_name_suffix": "__asset",
},
},
)
assert (
"taxi__main.table_partitioned_by_date_column__A__asset"
in my_data_connector.assets
)
# schema_name provided, but include_schema_name is set to False
with pytest.raises(ge_exceptions.DataConnectorError) as e:
ConfiguredAssetSqlDataConnector(
name="my_sql_data_connector",
datasource_name="my_test_datasource",
execution_engine="test_cases_for_sql_data_connector_sqlite_execution_engine",
assets={
"table_partitioned_by_date_column__A": {
"splitter_method": "_split_on_column_value",
"splitter_kwargs": {"column_name": "date"},
"include_schema_name": False,
"schema_name": "main",
"data_asset_name_prefix": "taxi__",
"data_asset_name_suffix": "__asset",
},
},
)
assert (
e.value.message
== "ConfiguredAssetSqlDataConnector ran into an error while initializing Asset names. Schema main was specified, but 'include_schema_name' flag was set to False."
)
# TODO
def test_ConfiguredAssetSqlDataConnector_with_sorting(
test_cases_for_sql_data_connector_sqlite_execution_engine,
):
pass
|
great-expectations/great_expectations
|
tests/datasource/data_connector/test_sql_data_connector.py
|
Python
|
apache-2.0
| 44,985
|
a: int
b
c = 'no annotation'
x: int = 10
y: str = 'annotation'
z: tuple = (1, 2, 3)
confirm_subscr = {}
confirm_subscr['test'] = 'works'
|
zrax/pycdc
|
tests/input/variable_annotations.py
|
Python
|
gpl-3.0
| 137
|
import numpy as np
import pytest
from pandas._libs.tslibs import iNaT
from pandas._libs.tslibs.period import IncompatibleFrequency
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import (
PeriodArray,
period_array,
)
@pytest.mark.parametrize(
"data, freq, expected",
[
([pd.Period("2017", "D")], None, [17167]),
([pd.Period("2017", "D")], "D", [17167]),
([2017], "D", [17167]),
(["2017"], "D", [17167]),
([pd.Period("2017", "D")], pd.tseries.offsets.Day(), [17167]),
([pd.Period("2017", "D"), None], None, [17167, iNaT]),
(pd.Series(pd.date_range("2017", periods=3)), None, [17167, 17168, 17169]),
(pd.date_range("2017", periods=3), None, [17167, 17168, 17169]),
(pd.period_range("2017", periods=4, freq="Q"), None, [188, 189, 190, 191]),
],
)
def test_period_array_ok(data, freq, expected):
result = period_array(data, freq=freq).asi8
expected = np.asarray(expected, dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
def test_period_array_readonly_object():
# https://github.com/pandas-dev/pandas/issues/25403
pa = period_array([pd.Period("2019-01-01")])
arr = np.asarray(pa, dtype="object")
arr.setflags(write=False)
result = period_array(arr)
tm.assert_period_array_equal(result, pa)
result = pd.Series(arr)
tm.assert_series_equal(result, pd.Series(pa))
result = pd.DataFrame({"A": arr})
tm.assert_frame_equal(result, pd.DataFrame({"A": pa}))
def test_from_datetime64_freq_changes():
# https://github.com/pandas-dev/pandas/issues/23438
arr = pd.date_range("2017", periods=3, freq="D")
result = PeriodArray._from_datetime64(arr, freq="M")
expected = period_array(["2017-01-01", "2017-01-01", "2017-01-01"], freq="M")
tm.assert_period_array_equal(result, expected)
@pytest.mark.parametrize(
"data, freq, msg",
[
(
[pd.Period("2017", "D"), pd.Period("2017", "A")],
None,
"Input has different freq",
),
([pd.Period("2017", "D")], "A", "Input has different freq"),
],
)
def test_period_array_raises(data, freq, msg):
with pytest.raises(IncompatibleFrequency, match=msg):
period_array(data, freq)
def test_period_array_non_period_series_raies():
ser = pd.Series([1, 2, 3])
with pytest.raises(TypeError, match="dtype"):
PeriodArray(ser, freq="D")
def test_period_array_freq_mismatch():
arr = period_array(["2000", "2001"], freq="D")
with pytest.raises(IncompatibleFrequency, match="freq"):
PeriodArray(arr, freq="M")
with pytest.raises(IncompatibleFrequency, match="freq"):
PeriodArray(arr, freq=pd.tseries.offsets.MonthEnd())
def test_from_sequence_disallows_i8():
arr = period_array(["2000", "2001"], freq="D")
msg = str(arr[0].ordinal)
with pytest.raises(TypeError, match=msg):
PeriodArray._from_sequence(arr.asi8, dtype=arr.dtype)
with pytest.raises(TypeError, match=msg):
PeriodArray._from_sequence(list(arr.asi8), dtype=arr.dtype)
|
rs2/pandas
|
pandas/tests/arrays/period/test_constructors.py
|
Python
|
bsd-3-clause
| 3,116
|
import numpy as np
import pytest
import pandas as pd
from pandas import Index, PeriodIndex, date_range, period_range
import pandas.core.indexes.period as period
import pandas.util.testing as tm
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestPeriodIndex(object):
def test_joins(self, join_type):
index = period_range('1/1/2000', '1/20/2000', freq='D')
joined = index.join(index[:-5], how=join_type)
assert isinstance(joined, PeriodIndex)
assert joined.freq == index.freq
def test_join_self(self, join_type):
index = period_range('1/1/2000', '1/20/2000', freq='D')
res = index.join(index, how=join_type)
assert index is res
def test_join_does_not_recur(self):
df = tm.makeCustomDataframe(
3, 2, data_gen_f=lambda *args: np.random.randint(2),
c_idx_type='p', r_idx_type='dt')
s = df.iloc[:2, 0]
res = s.index.join(df.columns, how='outer')
expected = Index([s.index[0], s.index[1],
df.columns[0], df.columns[1]], object)
tm.assert_index_equal(res, expected)
def test_union(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'],
freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4,
expected4),
(rng5, other5, expected5), (rng6, other6,
expected6),
(rng7, other7, expected7)]:
result_union = rng.union(other)
tm.assert_index_equal(result_union, expected)
def test_union_misc(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].union(index[10:])
tm.assert_index_equal(result, index)
# not in order
result = _permute(index[:-5]).union(_permute(index[10:]))
tm.assert_index_equal(result, index)
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
with pytest.raises(period.IncompatibleFrequency):
index.union(index2)
msg = 'can only call with other PeriodIndex-ed objects'
with tm.assert_raises_regex(ValueError, msg):
index.join(index.to_timestamp())
index3 = period_range('1/1/2000', '1/20/2000', freq='2D')
with pytest.raises(period.IncompatibleFrequency):
index.join(index3)
def test_union_dataframe_index(self):
rng1 = pd.period_range('1/1/1999', '1/1/2012', freq='M')
s1 = pd.Series(np.random.randn(len(rng1)), rng1)
rng2 = pd.period_range('1/1/1980', '12/1/2001', freq='M')
s2 = pd.Series(np.random.randn(len(rng2)), rng2)
df = pd.DataFrame({'s1': s1, 's2': s2})
exp = pd.period_range('1/1/1980', '1/1/2012', freq='M')
tm.assert_index_equal(df.index, exp)
def test_intersection(self):
index = period_range('1/1/2000', '1/20/2000', freq='D')
result = index[:-5].intersection(index[10:])
tm.assert_index_equal(result, index[10:-5])
# not in order
left = _permute(index[:-5])
right = _permute(index[10:])
result = left.intersection(right).sort_values()
tm.assert_index_equal(result, index[10:-5])
# raise if different frequencies
index = period_range('1/1/2000', '1/20/2000', freq='D')
index2 = period_range('1/1/2000', '1/20/2000', freq='W-WED')
with pytest.raises(period.IncompatibleFrequency):
index.intersection(index2)
index3 = period_range('1/1/2000', '1/20/2000', freq='2D')
with pytest.raises(period.IncompatibleFrequency):
index.intersection(index3)
def test_intersection_cases(self):
base = period_range('6/1/2000', '6/30/2000', freq='D', name='idx')
# if target has the same name, it is preserved
rng2 = period_range('5/15/2000', '6/20/2000', freq='D', name='idx')
expected2 = period_range('6/1/2000', '6/20/2000', freq='D',
name='idx')
# if target name is different, it will be reset
rng3 = period_range('5/15/2000', '6/20/2000', freq='D', name='other')
expected3 = period_range('6/1/2000', '6/20/2000', freq='D',
name=None)
rng4 = period_range('7/1/2000', '7/31/2000', freq='D', name='idx')
expected4 = PeriodIndex([], name='idx', freq='D')
for (rng, expected) in [(rng2, expected2), (rng3, expected3),
(rng4, expected4)]:
result = base.intersection(rng)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == expected.freq
# non-monotonic
base = PeriodIndex(['2011-01-05', '2011-01-04', '2011-01-02',
'2011-01-03'], freq='D', name='idx')
rng2 = PeriodIndex(['2011-01-04', '2011-01-02',
'2011-02-02', '2011-02-03'],
freq='D', name='idx')
expected2 = PeriodIndex(['2011-01-04', '2011-01-02'], freq='D',
name='idx')
rng3 = PeriodIndex(['2011-01-04', '2011-01-02', '2011-02-02',
'2011-02-03'],
freq='D', name='other')
expected3 = PeriodIndex(['2011-01-04', '2011-01-02'], freq='D',
name=None)
rng4 = period_range('7/1/2000', '7/31/2000', freq='D', name='idx')
expected4 = PeriodIndex([], freq='D', name='idx')
for (rng, expected) in [(rng2, expected2), (rng3, expected3),
(rng4, expected4)]:
result = base.intersection(rng)
tm.assert_index_equal(result, expected)
assert result.name == expected.name
assert result.freq == 'D'
# empty same freq
rng = date_range('6/1/2000', '6/15/2000', freq='T')
result = rng[0:0].intersection(rng)
assert len(result) == 0
result = rng.intersection(rng[0:0])
assert len(result) == 0
def test_difference(self):
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=5)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=3)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = rng4
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(
['2000-01-01 09:01', '2000-01-01 09:05'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=3)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('2006-01-01', freq='A', periods=2)
for rng, other, expected in [(rng1, other1, expected1),
(rng2, other2, expected2),
(rng3, other3, expected3),
(rng4, other4, expected4),
(rng5, other5, expected5),
(rng6, other6, expected6),
(rng7, other7, expected7), ]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
|
harisbal/pandas
|
pandas/tests/indexes/period/test_setops.py
|
Python
|
bsd-3-clause
| 10,619
|
r"""
Parso is a Python parser that supports error recovery and round-trip parsing
for different Python versions (in multiple Python versions). Parso is also able
to list multiple syntax errors in your python file.
Parso has been battle-tested by jedi_. It was pulled out of jedi to be useful
for other projects as well.
Parso consists of a small API to parse Python and analyse the syntax tree.
.. _jedi: https://github.com/davidhalter/jedi
A simple example:
>>> import parso
>>> module = parso.parse('hello + 1', version="3.6")
>>> expr = module.children[0]
>>> expr
PythonNode(arith_expr, [<Name: hello@1,0>, <Operator: +>, <Number: 1>])
>>> print(expr.get_code())
hello + 1
>>> name = expr.children[0]
>>> name
<Name: hello@1,0>
>>> name.end_pos
(1, 5)
>>> expr.end_pos
(1, 9)
To list multiple issues:
>>> grammar = parso.load_grammar()
>>> module = grammar.parse('foo +\nbar\ncontinue')
>>> error1, error2 = grammar.iter_errors(module)
>>> error1.message
'SyntaxError: invalid syntax'
>>> error2.message
"SyntaxError: 'continue' not properly in loop"
"""
from parso.parser import ParserSyntaxError
from parso.grammar import Grammar, load_grammar
from parso.utils import split_lines, python_bytes_to_unicode
__version__ = '0.4.0'
def parse(code=None, **kwargs):
"""
A utility function to avoid loading grammars.
Params are documented in :py:meth:`parso.Grammar.parse`.
:param str version: The version used by :py:func:`parso.load_grammar`.
"""
version = kwargs.pop('version', None)
grammar = load_grammar(version=version)
return grammar.parse(code, **kwargs)
|
lmregus/Portfolio
|
python/design_patterns/env/lib/python3.7/site-packages/parso/__init__.py
|
Python
|
mit
| 1,607
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Article.active'
db.add_column('feedback_article', 'active',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Article.active'
db.delete_column('feedback_article', 'active')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'feedback.article': {
'Meta': {'object_name': 'Article'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feedback.Category']"}),
'common_issue': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'content': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'feedback.category': {
'Meta': {'ordering': "['name']", 'object_name': 'Category'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '75'})
},
'feedback.feedback': {
'Meta': {'object_name': 'Feedback'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'feedback': ('django.db.models.fields.TextField', [], {'max_length': '1000'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'login_method': ('django.db.models.fields.CharField', [], {'default': "'CAC'", 'max_length': '25'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '25', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'default': "'CAC'", 'max_length': '25'}),
'referer': ('django.db.models.fields.TextField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'subject': ('django.db.models.fields.CharField', [], {'default': "'Event Page'", 'max_length': '25'}),
'user_agent': ('django.db.models.fields.TextField', [], {'max_length': '300', 'null': 'True', 'blank': 'True'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['feedback']
|
ngageoint/geoevents
|
geoevents/feedback/migrations/0004_auto__add_field_article_active.py
|
Python
|
mit
| 7,466
|
from Components.config import ConfigSubsection, config
from Tools.LoadPixmap import LoadPixmap
config.plugins = ConfigSubsection()
class PluginDescriptor:
"""An object to describe a plugin."""
# where to list the plugin. Note that there are different call arguments,
# so you might not be able to combine them.
# supported arguments are:
# session
# servicereference
# reason
# you have to ignore unknown kwargs!
# argument: session
WHERE_EXTENSIONSMENU = 0
WHERE_MAINMENU = 1
WHERE_PLUGINMENU = 2
# argument: session, serviceref (currently selected)
WHERE_MOVIELIST = 3
# argument: menuid. Fnc must return list with menuitems (4-tuple of name, fnc to call, entryid or None, weight or None)
WHERE_MENU = 4
# reason (0: start, 1: end)
WHERE_AUTOSTART = 5
# start as wizard. In that case, fnc must be tuple (priority,class) with class being a screen class!
WHERE_WIZARD = 6
# like autostart, but for a session. currently, only session starts are
# delivered, and only on pre-loaded plugins
WHERE_SESSIONSTART = 7
# start as teletext plugin. arguments: session, serviceref
WHERE_TELETEXT = 8
# file-scanner, fnc must return a list of Scanners
WHERE_FILESCAN = 9
# fnc must take an interface name as parameter and return None if the plugin supports an extended setup
# or return a function which is called with session and the interface name for extended setup of this interface
WHERE_NETWORKSETUP = 10
WHERE_NETWORKMOUNTS = 11
# show up this plugin (or a choicebox with all of them) for long INFO keypress
# or return a function which is called with session and the interface name for extended setup of this interface
WHERE_EVENTINFO = 12
# reason (True: Networkconfig read finished, False: Networkconfig reload initiated )
WHERE_NETWORKCONFIG_READ = 13
WHERE_AUDIOMENU = 14
# fnc 'SoftwareSupported' or 'AdvancedSoftwareSupported' must take a parameter and return None
# if the plugin should not be displayed inside Softwaremanger or return a function which is called with session
# and 'None' as parameter to call the plugin from the Softwaremanager menus. "menuEntryName" and "menuEntryDescription"
# should be provided to name and describe the new menu entry.
WHERE_SOFTWAREMANAGER = 15
WHERE_SATCONFIGCHANGED = 16
WHERE_SERVICESCAN = 17
WHERE_EXTENSIONSINGLE = 18
def __init__(self, name = "Plugin", where = [ ], description = "", icon = None, fnc = None, wakeupfnc = None, needsRestart = None, internal = False, weight = 0):
self.name = name
self.internal = internal
self.needsRestart = needsRestart
self.path = None
if isinstance(where, list):
self.where = where
else:
self.where = [ where ]
self.description = description
if icon is None or isinstance(icon, str):
self.iconstr = icon
self.icon = None
else:
self.icon = icon
self.weight = weight
self.wakeupfnc = wakeupfnc
self.__call__ = fnc
def updateIcon(self, path):
if isinstance(self.iconstr, str):
self.icon = LoadPixmap('/'.join((path, self.iconstr)))
else:
self.icon = None
def getWakeupTime(self):
return self.wakeupfnc and self.wakeupfnc() or -1
def __eq__(self, other):
return self.__call__ == other.__call__
|
kingvuplus/ee
|
lib/python/Plugins/Plugin.py
|
Python
|
gpl-2.0
| 3,227
|
"""
A driver for Icotera CPE
"""
import re
from Exscript.protocols.drivers import Driver
class IcoteraDriver(Driver):
def __init__(self):
"""
Constructor of the IcoteraDriver.
"""
Driver.__init__(self, 'icotera')
self.user_re = [re.compile(r'user ?name: ?$', re.I)]
self.password_re = [re.compile(r'(?:[\r\n]Password: ?|last resort password:)$')]
self.prompt_re = [re.compile(r'.*?>\s*$')]
self.error_re = [re.compile(r'ERROR')]
def check_head_for_os(self, string):
if 'ICOTERA' in string:
return 80
return 0
|
maximumG/exscript
|
Exscript/protocols/drivers/icotera.py
|
Python
|
mit
| 612
|
import logging
import warnings
import astropy
import numpy as np
import pytest
from astropy.utils.introspection import minversion
from numpy.testing import assert_allclose
from ginga import AstroImage
from ginga.util import wcsmod
# TODO: Add a test for native GWCS object.
_logger = logging.getLogger("TestWCS")
_wcsmods = ('kapteyn', 'starlink', 'astlib', 'astropy', 'astropy_ape14')
_hdr = {'2d': {'ADC-END': 6.28,
'ADC-STR': 6.16,
'ADC-TYPE': 'IN',
'AIRMASS': 1.0526,
'ALTITUDE': 72.142,
'AUTOGUID': 'ON',
'AZIMUTH': 282.679,
'BIN-FCT1': 1,
'BIN-FCT2': 1,
'BITPIX': -32,
'BLANK': -32768,
'BUNIT': 'ADU',
'CD1_1': -5.611e-05,
'CD1_2': 0.0,
'CD2_1': 0.0,
'CD2_2': 5.611e-05,
'CDELT1': -5.611e-05,
'CDELT2': 5.611e-05,
'COADD': 1,
'CRPIX1': 5276.0,
'CRPIX2': 25.0,
'CRVAL1': 299.91736667,
'CRVAL2': 22.68769444,
'CTYPE1': 'RA---TAN',
'CTYPE2': 'DEC--TAN',
'CUNIT1': 'degree',
'CUNIT2': 'degree',
'DATA-TYP': 'OBJECT',
'DATASET': 'DS000',
'DATE-OBS': '2009-08-22',
'DEC': '+22:41:15.70',
'DEC2000': '+22:41:15.70',
'DET-A01': 90.0,
'DET-ID': 6,
'DET-P101': -79.14,
'DET-P201': -0.375,
'DET-TMAX': 0.0,
'DET-TMED': 0.0,
'DET-TMIN': 0.0,
'DET-TMP': 172.74,
'DET-VER': 'spcam20080721',
'DETECTOR': 'chihiro',
'DOM-HUM': 12.4,
'DOM-PRS': 622.3,
'DOM-TMP': 276.35,
'DOM-WND': 0.6,
'EFP-MIN1': 9,
'EFP-MIN2': 49,
'EFP-RNG1': 2256,
'EFP-RNG2': 4177,
'EQUINOX': 2000.0,
'EXP-ID': 'SUPE01118760',
'EXP1TIME': 90.0,
'EXPTIME': 90.0,
'EXTEND': False,
'FILTER01': 'W-J-B',
'FOC-POS': 'Prime',
'FOC-VAL': 7.14,
'FRAMEID': 'SUPA01118766',
'GAIN': 3.73,
'HST': '23:34:25.911',
'HST-END': '23:35:55.010',
'HST-STR': '23:34:25.911',
'INR-END': -174.487,
'INR-STR': -174.239,
'INS-VER': 'Messia5/sup080721',
'INST-PA': 90.0,
'INSTRUME': 'SuprimeCam',
'LONGPOLE': 180.0,
'LST': '21:15:48.968',
'LST-END': '21:17:18.311',
'LST-STR': '21:15:48.968',
'M2-ANG1': 1.5,
'M2-ANG2': -0.0,
'M2-ANG3': 0.0,
'M2-POS1': -0.753,
'M2-POS2': -2.1,
'M2-POS3': 8.205,
'MJD': 55065.398914,
'MJD-END': 55065.399945,
'MJD-STR': 55065.398914,
'NAXIS': 2,
'NAXIS1': 2272,
'NAXIS2': 4273,
'OBJECT': 'M27',
'OBS-ALOC': 'Observation',
'OBS-MOD': 'IMAG_N_VGW',
'OBSERVAT': 'NAOJ',
'OBSERVER': 'Jeschke, Inagaki, Streeper, Yagi, Nakata',
'OUT-HUM': 13.1,
'OUT-PRS': 622.3,
'OUT-TMP': 275.95,
'OUT-WND': 6.0,
'PRD-MIN1': 1,
'PRD-MIN2': 1,
'PRD-RNG1': 2272,
'PRD-RNG2': 4273,
'PROP-ID': 'o99005',
'RA': '19:59:40.168',
'RA2000': '19:59:40.168',
'RADECSYS': 'FK5',
'SECZ-END': 1.053,
'SECZ-STR': 1.051,
'SEEING': 0.29,
'SIMPLE': True,
'S_AG-DEC': 'N/A',
'S_AG-EQN': 2000.0,
'S_AG-OBJ': 'N/A',
'S_AG-R': 999.99,
'S_AG-RA': 'N/A',
'S_AG-TH': 999.99,
'S_AG-X': 109.97,
'S_AG-Y': 19.3,
'S_BCTAVE': 999.999,
'S_BCTSD': 999.999,
'S_DELTAD': 0.0,
'S_DELTAZ': 0.0,
'S_EFMN11': 9,
'S_EFMN12': 49,
'S_EFMN21': 617,
'S_EFMN22': 49,
'S_EFMN31': 1145,
'S_EFMN32': 49,
'S_EFMN41': 1753,
'S_EFMN42': 49,
'S_EFMX11': 520,
'S_EFMX12': 4225,
'S_EFMX21': 1128,
'S_EFMX22': 4225,
'S_EFMX31': 1656,
'S_EFMX32': 4225,
'S_EFMX41': 2264,
'S_EFMX42': 4225,
'S_ETMAX': 0.0,
'S_ETMED': 273.15,
'S_ETMIN': 0.0,
'S_FRMPOS': '0001',
'S_GAIN1': 3.73,
'S_GAIN2': 2.95,
'S_GAIN3': 3.1,
'S_GAIN4': 3.17,
'S_M2OFF1': 0.0,
'S_M2OFF2': 0.0,
'S_M2OFF3': 7.14,
'S_OSMN11': 521,
'S_OSMN12': 1,
'S_OSMN21': 569,
'S_OSMN22': 1,
'S_OSMN31': 1657,
'S_OSMN32': 1,
'S_OSMN41': 1705,
'S_OSMN42': 1,
'S_OSMX11': 568,
'S_OSMX12': 48,
'S_OSMX21': 616,
'S_OSMX22': 48,
'S_OSMX31': 1704,
'S_OSMX32': 48,
'S_OSMX41': 1752,
'S_OSMX42': 48,
'S_SENT': False,
'S_UFNAME': 'object060_chihiro.fits',
'S_XFLIP': False,
'S_YFLIP': True,
'TELESCOP': 'Subaru',
'TELFOCUS': 'P_OPT',
'TIMESYS': 'UTC',
'UT': '09:34:25.911',
'UT-END': '09:35:55.010',
'UT-STR': '09:34:25.911',
'WCS-ORIG': 'SUBARU Toolkit',
'WEATHER': 'Fine',
'ZD-END': 18.2,
'ZD-STR': 17.858},
'3d': {'SIMPLE': True,
'BITPIX': 16,
'NAXIS': 3,
'NAXIS1': 100,
'NAXIS2': 100,
'NAXIS3': 101,
'BLOCKED': True,
'CDELT1': -7.165998823E-03,
'CRPIX1': 5.1E+01,
'CRVAL1': -5.12820847959E+01,
'CTYPE1': 'RA---NCP',
'CUNIT1': 'deg',
'CDELT2': 7.165998823E-03,
'CRPIX2': 5.1E+01,
'CRVAL2': 6.01538880206E+01,
'CTYPE2': 'DEC--NCP',
'CUNIT2': 'deg',
'CDELT3': 4.199999809,
'CRPIX3': -2.0E+01,
'CRVAL3': -2.43E+02,
'CTYPE3': 'VOPT',
'CUNIT3': 'km/s',
'EPOCH': 2.0E+03,
'FREQ0': 1.420405758370E+09,
'BUNIT': 'JY/BEAM ',
'BMAJ': 1.82215739042E-02,
'BMIN': 1.76625289023E-02,
'BTYPE': 'intensity',
'BPA': -7.41641769409E+01,
'NITERS': 2626643,
'LWIDTH': 4.19999980927E+00,
'LSTEP': 4.19999980927E+00,
'LSTART': -2.43E+02,
'VOBS': -1.95447368244E+00,
'LTYPE': 'velocity',
'SPECSYS': 'BARYCENT'}}
img_dict = {}
def setup_module():
"""Create objects once and re-use throughout this module."""
global img_dict
if minversion(astropy, '3.1'):
USE_APE14 = True
else:
USE_APE14 = False
img_dict = {}
for modname in _wcsmods:
if modname == 'astropy_ape14' and not USE_APE14:
continue
if not wcsmod.use(modname, raise_err=False):
continue
img_dict[modname] = {}
for dim in _hdr.keys():
w = wcsmod.WCS(_logger)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
w.load_header(_hdr[dim])
img = AstroImage.AstroImage(logger=_logger)
img.wcs = w
if dim == '2d':
img.revnaxis = []
img.naxispath = []
else: # 3d
img.revnaxis = [0]
img.naxispath = [0]
img_dict[modname][dim] = img
@pytest.mark.parametrize('modname', _wcsmods)
def test_scalar_2d(modname):
if modname not in img_dict:
pytest.skip("WCS '{}' not available".format(modname))
img = img_dict[modname]['2d']
xy_v1 = (120, 100)
radec_deg_v1 = (300.2308791294835, 22.691653517073615)
# If this works here, should already work for other cases.
assert img.wcs.has_valid_wcs()
# 0.01% agreement is good enough across different libraries.
radec = img.pixtoradec(*xy_v1)
assert_allclose(radec, radec_deg_v1, rtol=1e-4)
xy = img.radectopix(*radec_deg_v1)
if modname == 'astropy_ape14':
# TODO: Remove rtol when load_header is fixed.
assert_allclose(xy, xy_v1, rtol=0.01)
else:
assert_allclose(xy, xy_v1)
gal = img.wcs.pixtosystem(xy_v1, system='galactic')
assert_allclose(gal, (60.97030081935234, -3.9706229385605307), rtol=1e-4)
@pytest.mark.parametrize('modname', _wcsmods)
def test_vectorized_2d(modname):
if modname not in img_dict:
pytest.skip("WCS '{}' not available".format(modname))
img = img_dict[modname]['2d']
xy_v1 = [(0, 0), (120, 100)]
radec_deg_v1 = np.array([(300.2381639, 22.68602823),
(300.2308791294835, 22.691653517073615)])
gal_v1 = np.array([(60.96903325, -3.97929572),
(60.97030081935234, -3.9706229385605307)])
# 0.01% agreement is good enough across different libraries.
radec = img.wcs.datapt_to_wcspt(xy_v1)
assert_allclose(radec, radec_deg_v1, rtol=1e-4)
xy = img.wcs.wcspt_to_datapt(radec_deg_v1)
assert_allclose(xy, xy_v1, atol=7e-5)
if modname == 'astlib':
with pytest.raises(NotImplementedError):
img.wcs.datapt_to_system(xy_v1, system='galactic')
else:
gal = img.wcs.datapt_to_system(xy_v1, system='galactic')
if modname in ('astropy', 'astropy_ape14'):
assert_allclose(gal.l.degree, gal_v1[:, 0])
assert_allclose(gal.b.degree, gal_v1[:, 1])
else:
assert_allclose(gal, gal_v1, rtol=1e-4)
@pytest.mark.parametrize('modname', _wcsmods)
def test_scalar_3d(modname):
if modname not in img_dict:
pytest.skip("WCS '{}' not available".format(modname))
img = img_dict[modname]['3d']
idxs = (0, 0, 0)
ra_deg_v1 = -50.569931842112965
dec_deg_v1 = 59.79236398619401
vel_v1 = -159000.00382
if modname == 'astlib':
with pytest.raises(wcsmod.common.WCSError):
img.spectral_coord(idxs)
else:
c = img.spectral_coord(idxs)
if modname == 'starlink':
assert_allclose(c, vel_v1 * 1e-3)
elif modname == 'astropy_ape14':
# TODO: Remove rtol with load_header() is fixed.
assert_allclose(c, vel_v1, rtol=0.03)
else:
assert_allclose(c, vel_v1)
# 0.01% agreement is good enough across different libraries.
# RA can be off by 360 degrees and still be valid.
c = img.pixtoradec(*idxs[:2])
assert (np.allclose(c, (ra_deg_v1, dec_deg_v1), rtol=1e-4) or
np.allclose(c, (ra_deg_v1 + 360, dec_deg_v1), rtol=1e-4))
px = img.radectopix(*c)
assert_allclose(px, idxs[:2], atol=1e-3)
c = img.wcs.pixtosystem(idxs, system='galactic')
assert_allclose(c, (95.62934261967311, 11.172927294480449), rtol=1e-4)
@pytest.mark.parametrize('modname', _wcsmods)
def test_vectorized_3d(modname):
if modname not in img_dict:
pytest.skip("WCS '{}' not available".format(modname))
img = img_dict[modname]['3d']
xy_v1 = [(0, 0), (120, 100)]
nxp = [0]
radec_deg_v1 = np.array([[-50.5699318, 59.7923640],
[-52.3010162, 60.5064254]])
gal_v1 = np.array([(95.62934262, 11.17292729),
(95.72174081, 12.28825976)])
if modname == 'kapteyn':
vel_v1 = -159000.00382
else:
vel_v1 = -154800.004
# 0.01% agreement is good enough across different libraries.
# RA can be off by 360 degrees and still be valid.
if modname == 'astlib':
with pytest.raises(NotImplementedError):
img.wcs.datapt_to_wcspt(xy_v1, naxispath=nxp)
else:
radec = img.wcs.datapt_to_wcspt(xy_v1, naxispath=nxp)
assert (np.allclose(radec[:, 0], radec_deg_v1[:, 0]) or
np.allclose(radec[:, 0], radec_deg_v1[:, 0] + 360))
assert_allclose(radec[:, 1], radec_deg_v1[:, 1], rtol=1e-4)
if modname == 'starlink':
with pytest.raises(IndexError):
radec[:, 2]
else:
assert_allclose(radec[:, 2], vel_v1, rtol=1e-4)
if modname == 'astlib':
with pytest.raises(NotImplementedError):
img.wcs.wcspt_to_datapt(radec_deg_v1, naxispath=nxp)
else:
xy = img.wcs.wcspt_to_datapt(radec_deg_v1, naxispath=nxp)
assert_allclose(xy[:, :2], xy_v1, atol=3e-6)
if modname == 'kapteyn':
assert_allclose(xy[:, 2], 36.85715, atol=3e-6)
if modname == 'astlib':
with pytest.raises(NotImplementedError):
img.wcs.datapt_to_system([(0, 0, 0), (120, 100, 0)],
system='galactic')
elif modname == 'kapteyn':
with pytest.raises(Exception):
img.wcs.datapt_to_system([(0, 0, 0), (120, 100, 0)],
system='galactic')
else:
gal = img.wcs.datapt_to_system([(0, 0, 0), (120, 100, 0)],
system='galactic')
if modname in ('astropy', 'astropy_ape14'):
assert_allclose(gal.l.degree, gal_v1[:, 0])
assert_allclose(gal.b.degree, gal_v1[:, 1])
else:
assert_allclose(gal, gal_v1, rtol=1e-4)
def test_fixheader():
w = wcsmod.common.BaseWCS(_logger)
w.header = {'SIMPLE': True, 'CUNIT1': 'degree', 'CUNIT2': 'Degree'}
w.fix_bad_headers()
assert w.get_keyword('SIMPLE')
assert w.get_keywords('CUNIT1', 'CUNIT2') == ['deg', 'deg']
with pytest.raises(wcsmod.common.WCSError):
w.datapt_to_system((0, 0))
@pytest.mark.parametrize('val', ['degr', 'blah'])
def test_choose_coord_units(val):
assert wcsmod.common.choose_coord_units({'CUNIT1': val}) == 'degree'
@pytest.mark.parametrize(
('hdr', 'val'),
[({'RA': 0, 'EQUINOX': 1983.9}, 'fk4'),
({'RA': 0, 'EQUINOX': 1984.0}, 'fk5'),
({'RA': 0}, 'icrs'),
({}, 'raw'),
({'CTYPE1': 'GLON-TAN'}, 'galactic'),
({'CTYPE1': 'ELON-TAN'}, 'ecliptic'),
({'CTYPE1': 'RA---TAN', 'EQUINOX': 1983.9}, 'fk4'),
({'CTYPE1': 'RA---TAN', 'EQUINOX': 1984.0}, 'fk5'),
({'CTYPE1': 'RA---TAN'}, 'icrs'),
({'CTYPE1': 'RA---TAN', 'RADECSYS': 'foo'}, 'foo'),
({'CTYPE1': 'RA---TAN', 'RADESYS': 'bar'}, 'bar'),
({'CTYPE1': 'HPLN-TAN'}, 'helioprojective'),
({'CTYPE1': 'HGLT-TAN'}, 'heliographicstonyhurst'),
({'CTYPE1': 'PIXEL'}, 'pixel'),
({'CTYPE1': 'LINEAR'}, 'pixel'),
({'CTYPE1': 'foo'}, 'icrs')])
def test_get_coord_sys_name(hdr, val):
assert wcsmod.common.get_coord_system_name(hdr) == val
# END
|
naojsoft/ginga
|
ginga/tests/test_wcs.py
|
Python
|
bsd-3-clause
| 15,731
|
# Rekall Memory Forensics
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or (at
# your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
__author__ = (
"Michael Cohen <scudette@google.com>",
"Adam Sindelar <adam.sindelar@gmail.com>")
from rekall import obj
from rekall import plugin
from rekall_lib import registry
from rekall.plugins.darwin import common
class DarwinUnpListCollector(common.AbstractDarwinProducer):
"""Walks the global list of sockets in uipc_usrreq."""
name = "unp_sockets"
type_name = "socket"
def collect(self):
for head_const in ["_unp_dhead", "_unp_shead"]:
lhead = self.session.profile.get_constant_object(
head_const,
target="unp_head")
for unp in lhead.lh_first.walk_list("unp_link.le_next"):
yield [unp.unp_socket]
class DarwinSocketsFromHandles(common.AbstractDarwinProducer):
"""Looks up handles that point to a socket and collects the socket."""
name = "open_sockets"
type_name = "socket"
def collect(self):
for fileproc in self.session.plugins.collect("fileproc"):
if fileproc.fg_type == "DTYPE_SOCKET":
yield [fileproc.autocast_fg_data()]
class DarwinNetstat(common.AbstractDarwinCommand):
"""Prints all open sockets we know about, from any source.
Netstat will display even connections that lsof doesn't know about, because
they were either recovered from an allocation zone, or found through a
secondary mechanism (like system call handler cache).
On the other hand, netstat doesn't know the file descriptor or, really, the
process that owns the connection (although it does know the PID of the last
process to access the socket.)
Netstat will also tell you, in the style of psxview, if a socket was only
found using some of the methods available.
"""
name = "netstat"
@classmethod
def methods(cls):
"""Return the names of available socket enumeration methods."""
# Find all the producers that collect procs and inherit from
# AbstractDarwinCachedProducer.
methods = []
for subclass in common.AbstractDarwinProducer.classes.itervalues():
# We look for a plugin which is a producer and a darwin command.
if (issubclass(subclass, common.AbstractDarwinCommand) and
issubclass(subclass, plugin.Producer) and
subclass.type_name == "socket"):
methods.append(subclass.name)
methods.sort()
return methods
@registry.classproperty
@registry.memoize
def table_header(cls): # pylint: disable=no-self-argument
header = [dict(name="socket", type="socket", width=60)]
for method in cls.methods():
header.append(dict(name=method, width=12))
return plugin.PluginHeader(*header)
def collect(self):
methods = self.methods()
for socket in sorted(self.session.plugins.collect("socket"),
key=lambda socket: socket.last_pid):
row = [socket]
for method in methods:
row.append(method in socket.obj_producers)
yield row
class DarwinGetArpListHead(common.AbstractDarwinParameterHook):
"""
One version of arp_init looks like this:
void
arp_init(void)
{
VERIFY(!arpinit_done);
LIST_INIT(&llinfo_arp); // <-- This is the global we want.
llinfo_arp_zone = zinit(sizeof (struct llinfo_arp),
LLINFO_ARP_ZONE_MAX * sizeof (struct llinfo_arp), 0,
LLINFO_ARP_ZONE_NAME);
if (llinfo_arp_zone == NULL)
panic("%s: failed allocating llinfo_arp_zone", __func__);
zone_change(llinfo_arp_zone, Z_EXPAND, TRUE);
zone_change(llinfo_arp_zone, Z_CALLERACCT, FALSE);
arpinit_done = 1;
}
Disassembled, the first few instructions look like this:
0x0 55 PUSH RBP
0x1 4889e5 MOV RBP, RSP
0x4 803d65e9400001 CMP BYTE [RIP+0x40e965], 0x1
0xb 7518 JNZ 0xff80090a7f95
0xd 488d3dee802900 LEA RDI, [RIP+0x2980ee]
0x14 488d35f5802900 LEA RSI, [RIP+0x2980f5]
0x1b baf3000000 MOV EDX, 0xf3
# This is a call to kernel!panic (later kernel!assfail):
0x20 e80b6c1400 CALL 0xff80091eeba0
# This is where it starts initializing the linked list:
0x25 48c70548e94000000000 MOV QWORD [RIP+0x40e948], 0x0
00
0x30 488d0d0e812900 LEA RCX, [RIP+0x29810e]
"""
name = "disassembled_llinfo_arp"
PANIC_FUNCTIONS = (u"__kernel__!_panic", u"__kernel__!_assfail")
def calculate(self):
resolver = self.session.address_resolver
arp_init = resolver.get_constant_object("__kernel__!_arp_init",
target="Function")
instructions = iter(arp_init.Decompose(20))
# Walk down to the CALL mnemonic and use the address resolver to
# see if it calls one of the panic functions.
for instruction in instructions:
# Keep spinning until we get to the first CALL.
if instruction.mnemonic != "CALL":
continue
# This is absolute:
target = instruction.operands[0].value
_, names = resolver.get_nearest_constant_by_address(target)
if not names:
return obj.NoneObject("Could not find CALL in arp_init.")
if names[0] not in self.PANIC_FUNCTIONS:
return obj.NoneObject(
"CALL was to %r, which is not on the PANIC list."
% names)
# We verified it's the right CALL. MOV should be right after it,
# so let's just grab it.
mov_instruction = next(instructions)
if mov_instruction.mnemonic != "MOV":
return obj.NoneObject("arp_init code changed.")
offset = (mov_instruction.operands[0].disp
+ mov_instruction.address
+ mov_instruction.size)
address = self.session.profile.Object(type_name="address",
offset=offset)
llinfo_arp = self.session.profile.Object(
type_name="llinfo_arp",
offset=address.v())
if llinfo_arp.isvalid:
return llinfo_arp.obj_offset
return obj.NoneObject("llinfo_arp didn't validate.")
class DarwinArp(common.AbstractDarwinProducer):
"""Show information about arp tables."""
name = "arp"
type_name = "rtentry"
def collect(self):
llinfo_arp = self.session.address_resolver.get_constant_object(
"__kernel__!_llinfo_arp",
target="Pointer",
target_args=dict(target="llinfo_arp"))
if not llinfo_arp:
# Must not have it in the profile. Try asking the session hook
# for the address.
offset = self.session.GetParameter("disassembled_llinfo_arp")
if not offset:
self.session.logging.error(
"Could not find the address of llinfo_arp.")
return
llinfo_arp = self.session.profile.Object(
type_name="llinfo_arp", offset=offset)
for arp_hit in llinfo_arp.walk_list("la_le.le_next"):
yield [arp_hit.la_rt]
class DarwinRoute(common.AbstractDarwinCommand):
"""Show routing table."""
__name = "route"
RNF_ROOT = 2
def rn_walk_tree(self, h):
"""Walks the radix tree starting from the header h.
This function is taken from
xnu-2422.1.72/bsd/net/radix.c: rn_walk_tree()
Which is why it does not conform to the style guide.
Note too that the darwin source code abuses C macros:
#define rn_dupedkey rn_u.rn_leaf.rn_Dupedkey
#define rn_key rn_u.rn_leaf.rn_Key
#define rn_mask rn_u.rn_leaf.rn_Mask
#define rn_offset rn_u.rn_node.rn_Off
#define rn_left rn_u.rn_node.rn_L
#define rn_right rn_u.rn_node.rn_R
And then the original code does:
rn = rn.rn_left
So we replace these below.
"""
rn = h.rnh_treetop
seen = set()
# First time through node, go left */
while rn.rn_bit >= 0:
rn = rn.rn_u.rn_node.rn_L
while rn and rn not in seen:
base = rn
seen.add(rn)
# If at right child go back up, otherwise, go right
while (rn.rn_parent.rn_u.rn_node.rn_R == rn and
not rn.rn_flags & self.RNF_ROOT):
rn = rn.rn_parent
# Find the next *leaf* to start from
rn = rn.rn_parent.rn_u.rn_node.rn_R
while rn.rn_bit >= 0:
rn = rn.rn_u.rn_node.rn_L
next = rn
# Process leaves
while True:
rn = base
if not rn:
break
base = rn.rn_u.rn_leaf.rn_Dupedkey
if not rn.rn_flags & self.RNF_ROOT:
yield rn
rn = next
if rn.rn_flags & self.RNF_ROOT:
return
def render(self, renderer):
renderer.table_header(
[("Source IP", "source", "20"),
("Dest IP", "dest", "20"),
("Interface", "interface", "9"),
("Sent", "sent", "8"),
("Recv", "recv", "8"),
("Time", "timestamp", "24"),
("Expires", "expires", "8"),
("Delta", "delta", "8")])
route_tables = self.profile.get_constant_object(
"_rt_tables",
target="Array",
target_args=dict(
count=32,
target="Pointer",
target_args=dict(
target="radix_node_head")))
for node in self.rn_walk_tree(route_tables[2]):
rentry = node.dereference_as("rtentry")
renderer.table_row(
rentry.source_ip,
rentry.dest_ip,
rentry.name,
rentry.sent, rentry.rx,
rentry.base_calendartime,
rentry.rt_expire,
rentry.delta)
class DarwinIfnetHook(common.AbstractDarwinParameterHook):
"""Walks the global list of interfaces.
The head of the list of network interfaces is a kernel global [1].
The struct we use [2] is just the public part of the data [3]. Addresses
are related to an interface in a N:1 relationship [4]. AF-specific data
is a normal sockaddr struct.
References:
1:
https://github.com/opensource-apple/xnu/blob/10.9/bsd/net/dlil.c#L254
2:
https://github.com/opensource-apple/xnu/blob/10.9/bsd/net/if_var.h#L528
3:
https://github.com/opensource-apple/xnu/blob/10.9/bsd/net/dlil.c#L188
4:
https://github.com/opensource-apple/xnu/blob/10.9/bsd/net/if_var.h#L816
"""
name = "ifconfig"
# ifnet_head is the actual extern holding ifnets and seems to be an
# improvement over dlil_ifnet_head, which is a static and used only in the
# dlil (stands for data link interface, I think?) module.
IFNET_HEAD_NAME = ("_ifnet_head", "_dlil_ifnet_head")
def calculate(self):
ifnet_head = obj.NoneObject("No ifnet global names given.")
for name in self.IFNET_HEAD_NAME:
ifnet_head = self.session.profile.get_constant_object(
name,
target="Pointer",
target_args=dict(
target="ifnet"))
if ifnet_head:
break
return [x.obj_offset for x in ifnet_head.walk_list("if_link.tqe_next")]
class DarwinIfnetCollector(common.AbstractDarwinCachedProducer):
name = "ifconfig"
type_name = "ifnet"
class DarwinIPFilters(common.AbstractDarwinCommand):
"""Check IP Filters for hooks."""
__name = "ip_filters"
def render(self, renderer):
renderer.table_header([
("Context", "context", "10"),
("Filter", "filter", "16"),
("Handler", "handler", "[addrpad]"),
("Symbol", "symbol", "20")])
resolver = self.session.address_resolver
for list_name in ["_ipv4_filters", "_ipv6_filters"]:
filter_list = self.profile.get_constant_object(
list_name, target="ipfilter_list")
for item in filter_list.tqh_first.walk_list("ipf_link.tqe_next"):
filter = item.ipf_filter
name = filter.name.deref()
handler = filter.ipf_input.deref()
renderer.table_row("INPUT", name, handler,
resolver.format_address(handler))
handler = filter.ipf_output.deref()
renderer.table_row("OUTPUT", name, handler,
resolver.format_address(handler))
handler = filter.ipf_detach.deref()
renderer.table_row("DETACH", name, handler,
resolver.format_address(handler))
|
dsweet04/rekall
|
rekall-core/rekall/plugins/darwin/networking.py
|
Python
|
gpl-2.0
| 13,974
|
""" Astropy coordinate class for the Magellanic Stream coordinate system """
from astropy.coordinates.matrix_utilities import (rotation_matrix,
matrix_product,
matrix_transpose)
from astropy.coordinates.baseframe import (frame_transform_graph,
BaseCoordinateFrame,
RepresentationMapping)
from astropy.coordinates.transformations import StaticMatrixTransform
from astropy.coordinates import representation as r
from astropy.coordinates import Galactic
import astropy.units as u
from gala.util import GalaDeprecationWarning
__all__ = ["MagellanicStreamNidever08", "MagellanicStream"]
class MagellanicStreamNidever08(BaseCoordinateFrame):
"""
A coordinate or frame aligned with the Magellanic Stream,
as defined by Nidever et al. (2008,
see: `<http://adsabs.harvard.edu/abs/2008ApJ...679..432N>`_).
For more information about this class, see the Astropy documentation
on coordinate frames in :mod:`~astropy.coordinates`.
Examples
--------
Converting the coordinates of the Large Magellanic Cloud:
>>> from astropy import coordinates as coord
>>> from astropy import units as u
>>> from gala.coordinates import MagellanicStreamNidever08
>>> c = coord.Galactic(l=280.4652*u.deg, b=-32.8884*u.deg)
>>> ms = c.transform_to(MagellanicStreamNidever08())
>>> print(ms)
<MagellanicStreamNidever08 Coordinate: (L, B) in deg
(-0.13686116, 2.42583948)>
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping('lon', 'L'),
RepresentationMapping('lat', 'B')
]
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
_ngp = Galactic(l=188.5*u.deg, b=-7.5*u.deg)
_lon0 = Galactic(l=280.47*u.deg, b=-32.75*u.deg)
_default_wrap_angle = 180*u.deg
def __init__(self, *args, **kwargs):
wrap = kwargs.pop('wrap_longitude', True)
super().__init__(*args, **kwargs)
if wrap and isinstance(self._data, (r.UnitSphericalRepresentation,
r.SphericalRepresentation)):
self._data.lon.wrap_angle = self._default_wrap_angle
# TODO: remove this. This is a hack required as of astropy v3.1 in order
# to have the longitude components wrap at the desired angle
def represent_as(self, base, s='base', in_frame_units=False):
r = super().represent_as(base, s=s, in_frame_units=in_frame_units)
if hasattr(r, "lon"):
r.lon.wrap_angle = self._default_wrap_angle
return r
represent_as.__doc__ = BaseCoordinateFrame.represent_as.__doc__
@frame_transform_graph.transform(StaticMatrixTransform,
Galactic, MagellanicStreamNidever08)
def gal_to_mag():
mat1 = rotation_matrix(57.275785782128686*u.deg, 'z')
mat2 = rotation_matrix(90*u.deg - MagellanicStreamNidever08._ngp.b, 'y')
mat3 = rotation_matrix(MagellanicStreamNidever08._ngp.l, 'z')
return matrix_product(mat1, mat2, mat3)
@frame_transform_graph.transform(StaticMatrixTransform,
MagellanicStreamNidever08, Galactic)
def mag_to_gal():
return matrix_transpose(gal_to_mag())
# TODO: remove this in next version
class MagellanicStream(MagellanicStreamNidever08):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn("This frame is deprecated. Use MagellanicStreamNidever08 "
"instead.", GalaDeprecationWarning)
super().__init__(*args, **kwargs)
trans = frame_transform_graph.get_transform(MagellanicStreamNidever08,
Galactic).transforms[0]
frame_transform_graph.add_transform(MagellanicStream, Galactic, trans)
trans = frame_transform_graph.get_transform(Galactic,
MagellanicStreamNidever08).transforms[0]
frame_transform_graph.add_transform(Galactic, MagellanicStream, trans)
|
adrn/gala
|
gala/coordinates/magellanic_stream.py
|
Python
|
mit
| 4,229
|
# -*- coding: utf-8 -*-
import numpy as np
from scipy import interpolate
from scipy import signal
from scipy.optimize import curve_fit
from scipy.interpolate import UnivariateSpline
def longcorr(data,temp,wave): # input are a two column matrix of data, temperature in C, wave as the wavelength in cm-1
"""
# Long Correction
# Charles Le Losq
# CIW Washington 2014
# Long's correction of Raman spectra and normalisation
# last rev. Oct 2010, converted to Matlab and then to Python
# ensures strictly increasing values of wavenumber
# calc. e.s.e. as Long cor. norm. sqrt(n_raw) 3d output col.
# exp. format to avoid null ese.
# program long3;
#Original program in pascal from J. Roux, modified for Python C. Le Losq.
# See Shucker and Gammon, Phys. Rev. Lett. 1970; Galeener and Sen, Phys. Rev. B 1978; Neuville and Mysen, GCA 1996; Le Losq et al. AM 2012 and GCA 2014 for equations and theory
"""
h = 6.62606896*10**-34 # J.s Plank constant
k = 1.38066e-23; # J/K Boltzman
c = 2.9979e8; # m/s Speed of light
v = wave; # nm Excitating laser line
nu0 = 1.0/v*10**9; # conversion of v in m
T = temp + 273.15; # C input temperature in K
x = data[:,0]
y = data[:,1]
# Calculate the relative error on data as sqrt(y). We calculate errors of eventual negative y values as sqrt of absolute value of y
error = np.sqrt(np.abs(y))/np.abs(y);
# then we proceed to the correction (Neuville and Mysen, 1996; Le Losq et
# al., 2012)
nu = 100.0*x; # cm-1 -> m-1 Raman shift
rnu = nu0-nu; # nu0 is in m-1
t0 = nu0*nu0*nu0*nu/rnu/rnu/rnu/rnu;
t1 = -h*c*nu/k/T; # c in m/s : t1 dimensionless
t2 = 1 - np.exp(t1);
longsp = y*t0*t2; # for y values
#long2 = ese*t0*t2; # for errors, as comment as we use relative errors
# normalized to max intensity
# tried area with np.trapz but their is an issue for now
#norm = np.trapz(long,x)
norm = np.max(longsp)
longsp = longsp/norm
eselong = error*np.abs(longsp)
spectreout = np.zeros((len(x),3))
spectreout[:,0] = x
spectreout[:,1] = longsp
spectreout[:,2] = eselong
return spectreout
|
charlesll/RamEau
|
gcvspl/longcorr.py
|
Python
|
gpl-2.0
| 2,286
|
"""
Django settings for Git_Issue_Tracker project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a_cc%y8)6@0+z7*h6x88eu5lxe!okk4%=2%^6+mj^8#!1oia@1'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'track',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'Git_Issue_Tracker.urls'
WSGI_APPLICATION = 'Git_Issue_Tracker.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_PATH = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = (
STATIC_PATH,
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
|
harshitanand/Git-Issue-Tracker
|
Git_Issue_Tracker/settings.py
|
Python
|
mit
| 2,238
|
def extractUtenatranslationsWordpressCom(item):
'''
Parser for 'utenatranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractUtenatranslationsWordpressCom.py
|
Python
|
bsd-3-clause
| 574
|
import os.path
from datetime import datetime
import uuid
from fabric.api import env, local, put, cd, run
from fabistrano.helpers import sudo_run
def prepare_for_checkout():
# Set current datetime_sha1 as the name of release
# use first 7 chars of commit hash
# Append user to end of string, for avoiding permission conflict.
git_cmd = 'git ls-remote %(git_clone)s %(git_branch)s' % {
'git_clone': env.git_clone, 'git_branch': env.git_branch,
}
commit_hash = local(git_cmd, capture=True).split('\t')[0]
env.commit_hash = commit_hash
env.current_revision = datetime.now().strftime('%Y%m%d_%H%M%S_') + commit_hash[:7] + "_" + str(env.user)
env.current_release = '%(releases_path)s/%(current_revision)s' % {
'releases_path': env.releases_path, 'current_revision': env.current_revision,
}
# Git
def remote_clone():
"""Checkout code to the remote servers"""
# set new release env
prepare_for_checkout()
# start
cache_name = 'code_%s.tar.bz2' % env.commit_hash[:15]
local_cache = '/tmp/'+cache_name
sudo_run('git archive --remote=%(git_clone)s %(git_branch)s | bzip2 > %(local_cache)s' % {
'git_clone': env.git_clone,
'git_branch': env.git_branch,
'local_cache': local_cache,
})
sudo_run('mkdir -p %(current_release)s' % {
'current_release': env.current_release,
})
with cd(env.current_release):
sudo_run('tar jxf %(local_cache)s' % {
'local_cache': local_cache,
})
def local_clone():
"""Checkout code to local machine, then upload to servers"""
# set new release env
prepare_for_checkout()
# start
cache_name = 'code_%s.tar.bz2' % env.commit_hash[:15]
local_cache = '/tmp/' + cache_name
if not os.path.isfile(local_cache):
local('git archive --remote=%(git_clone)s %(git_branch)s | bzip2 > %(local_cache)s' % {
'git_clone': env.git_clone,
'git_branch': env.git_branch,
'local_cache': local_cache,
})
put(local_cache, '/tmp/')
sudo_run('mkdir -p %(current_release)s' % {
'current_release': env.current_release,
})
with cd(env.current_release):
sudo_run('tar jxf %(local_cache)s' % {
'local_cache': local_cache,
})
# SVN
def remote_export():
"""Checkout code to the remote servers"""
# set new release env
prepare_for_checkout()
# start
cache_name = 'code_%(app_name)s_%(svn_revision)s_%(current_revision)s' % {
'app_name': env.app_name,
'svn_revision': env.svn_revision,
'current_revision': env.current_revision,
}
local_cache = '/tmp/'+cache_name
# svn auth
svn_username_opt = ''
if env.svn_username:
svn_username_opt = '--username %(svn_username)s' % {'svn_username': env.svn_username}
svn_password_opt = ''
if env.svn_password:
svn_password_opt = '--password %(svn_password)s' % {'svn_password': env.svn_password}
sudo_run('svn export -r %(svn_revision)s %(svn_repo)s %(local_cache)s %(svn_username_opt)s %(svn_password_opt)s' % {
'svn_revision': env.svn_revision,
'svn_repo': env.svn_repo,
'local_cache': local_cache,
'svn_username_opt': svn_username_opt,
'svn_password_opt': svn_password_opt,
})
sudo_run('mv %(local_cache)s %(current_release)s' % {
'local_cache': local_cache,
'current_release': env.current_release,
})
def local_export():
"""Checkout code to local machine, then upload to servers"""
# set new release env
prepare_for_checkout()
# start
cache_name = 'code_%(app_name)s_%(svn_revision)s_%(current_revision)s' % {
'app_name': env.app_name,
'svn_revision': env.svn_revision,
'current_revision': env.current_revision,
}
# svn auth
svn_username_opt = ''
if env.svn_username:
svn_username_opt = '--username %(svn_username)s' % {'svn_username': env.svn_username}
svn_password_opt = ''
if env.svn_password:
svn_password_opt = '--password %(svn_password)s' % {'svn_password': env.svn_password}
cmd = ('svn export -r %(svn_revision)s %(svn_repo)s '
'/tmp/%(cache_name)s %(svn_username_opt)s %(svn_password_opt)s') % {
'svn_revision': env.svn_revision,
'svn_repo': env.svn_repo,
'cache_name': cache_name,
'svn_username_opt': svn_username_opt,
'svn_password_opt': svn_password_opt,
}
local(cmd)
local('cd /tmp/ && tar cvzf %(cache_name)s.tar.gz %(cache_name)s' % {
'cache_name': cache_name,
})
put('/tmp/%(cache_name)s.tar.gz' % {'cache_name': cache_name}, '/tmp/')
with cd('/tmp'):
sudo_run('tar -xvf %(cache_name)s.tar.gz' % {
'cache_name': cache_name,
})
sudo_run('mv %(cache_name)s %(current_release)s' % {
'cache_name': cache_name,
'current_release': env.current_release,
})
def localcopy():
""" Deploy local copy to servers """
# set new release env
prepare_for_checkout()
# start
cache_name = 'code_%s.tar.bz2' % env.commit_hash[:15]
cmd = ('cp -rf %(localcopy_path)s /tmp/%(cache_name)s && '
'cd /tmp/ && tar cvzf %(cache_name)s.tar.gz %(cache_name)s') % {
'localcopy_path': env.localcopy_path,
'cache_name': cache_name,
}
local(cmd)
# We add a guid for tmp folder on server is to avoid conflict
# when deploying onto localhost, mainly for testing purpose.
server_tmp_folder = '/tmp/%(guid)s' % {'guid': uuid.uuid4().hex}
sudo_run('mkdir -p %(dir)s && chmod 777 %(dir)s' % {'dir': server_tmp_folder})
put('/tmp/%(cache_name)s.tar.gz' % {'cache_name': cache_name}, server_tmp_folder)
with cd(server_tmp_folder):
sudo_run('tar -xvf %(cache_name)s.tar.gz' % {
'cache_name': cache_name,
})
sudo_run('mv %(cache_name)s %(current_release)s' % {
'cache_name': cache_name,
'current_release': env.current_release,
})
|
zhang-z/fabistrano
|
fabistrano/deploy_strategies.py
|
Python
|
bsd-2-clause
| 6,246
|
# Copyright 2010-2011 OpenStack Foundation
# Copyright 2011 Piston Cloud Computing, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
from oslo_log import log as logging
from nova.api.openstack import api_version_request
from nova.api.openstack import common
from nova.api.openstack.compute.views import addresses as views_addresses
from nova.api.openstack.compute.views import flavors as views_flavors
from nova.api.openstack.compute.views import images as views_images
from nova.i18n import _LW
from nova.objects import base as obj_base
from nova import utils
LOG = logging.getLogger(__name__)
class ViewBuilder(common.ViewBuilder):
"""Model a server API response as a python dictionary."""
_collection_name = "servers"
_progress_statuses = (
"ACTIVE",
"BUILD",
"REBUILD",
"RESIZE",
"VERIFY_RESIZE",
"MIGRATING",
)
_fault_statuses = (
"ERROR", "DELETED"
)
# These are the lazy-loadable instance attributes required for showing
# details about an instance. Add to this list as new things need to be
# shown.
_show_expected_attrs = ['flavor', 'info_cache', 'metadata']
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
self._address_builder = views_addresses.ViewBuilder()
self._flavor_builder = views_flavors.ViewBuilder()
self._image_builder = views_images.ViewBuilder()
def create(self, request, instance):
"""View that should be returned when an instance is created."""
return {
"server": {
"id": instance["uuid"],
"links": self._get_links(request,
instance["uuid"],
self._collection_name),
},
}
def basic(self, request, instance):
"""Generic, non-detailed view of an instance."""
return {
"server": {
"id": instance["uuid"],
"name": instance["display_name"],
"links": self._get_links(request,
instance["uuid"],
self._collection_name),
},
}
def get_show_expected_attrs(self, expected_attrs=None):
"""Returns a list of lazy-loadable expected attributes used by show
This should be used when getting the instances from the database so
that the necessary attributes are pre-loaded before needing to build
the show response where lazy-loading can fail if an instance was
deleted.
:param list expected_attrs: The list of expected attributes that will
be requested in addition to what this view builder requires. This
method will merge the two lists and return what should be
ultimately used when getting an instance from the database.
:returns: merged and sorted list of expected attributes
"""
if expected_attrs is None:
expected_attrs = []
# NOTE(mriedem): We sort the list so we can have predictable test
# results.
return sorted(list(set(self._show_expected_attrs + expected_attrs)))
def show(self, request, instance):
"""Detailed view of a single instance."""
ip_v4 = instance.get('access_ip_v4')
ip_v6 = instance.get('access_ip_v6')
server = {
"server": {
"id": instance["uuid"],
"name": instance["display_name"],
"status": self._get_vm_status(instance),
"tenant_id": instance.get("project_id") or "",
"user_id": instance.get("user_id") or "",
"metadata": self._get_metadata(instance),
"hostId": self._get_host_id(instance) or "",
"image": self._get_image(request, instance),
"flavor": self._get_flavor(request, instance),
"created": utils.isotime(instance["created_at"]),
"updated": utils.isotime(instance["updated_at"]),
"addresses": self._get_addresses(request, instance),
"accessIPv4": str(ip_v4) if ip_v4 is not None else '',
"accessIPv6": str(ip_v6) if ip_v6 is not None else '',
"links": self._get_links(request,
instance["uuid"],
self._collection_name),
},
}
if server["server"]["status"] in self._fault_statuses:
_inst_fault = self._get_fault(request, instance)
if _inst_fault:
server['server']['fault'] = _inst_fault
if server["server"]["status"] in self._progress_statuses:
server["server"]["progress"] = instance.get("progress", 0)
return server
def index(self, request, instances):
"""Show a list of servers without many details."""
coll_name = self._collection_name
return self._list_view(self.basic, request, instances, coll_name)
def detail(self, request, instances):
"""Detailed view of a list of instance."""
coll_name = self._collection_name + '/detail'
return self._list_view(self.show, request, instances, coll_name)
def _list_view(self, func, request, servers, coll_name):
"""Provide a view for a list of servers.
:param func: Function used to format the server data
:param request: API request
:param servers: List of servers in dictionary format
:param coll_name: Name of collection, used to generate the next link
for a pagination query
:returns: Server data in dictionary format
"""
server_list = [func(request, server)["server"] for server in servers]
servers_links = self._get_collection_links(request,
servers,
coll_name)
servers_dict = dict(servers=server_list)
if servers_links:
servers_dict["servers_links"] = servers_links
return servers_dict
@staticmethod
def _get_metadata(instance):
# FIXME(danms): Transitional support for objects
metadata = instance.get('metadata')
if isinstance(instance, obj_base.NovaObject):
return metadata or {}
else:
return utils.instance_meta(instance)
@staticmethod
def _get_vm_status(instance):
# If the instance is deleted the vm and task states don't really matter
if instance.get("deleted"):
return "DELETED"
return common.status_from_state(instance.get("vm_state"),
instance.get("task_state"))
@staticmethod
def _get_host_id(instance):
host = instance.get("host")
project = str(instance.get("project_id"))
if host:
sha_hash = hashlib.sha224(project + host)
return sha_hash.hexdigest()
def _get_addresses(self, request, instance, extend_address=False):
context = request.environ["nova.context"]
networks = common.get_networks_for_instance(context, instance)
return self._address_builder.index(networks,
extend_address)["addresses"]
def _get_image(self, request, instance):
image_ref = instance["image_ref"]
if image_ref:
image_id = str(common.get_id_from_href(image_ref))
bookmark = self._image_builder._get_bookmark_link(request,
image_id,
"images")
return {
"id": image_id,
"links": [{
"rel": "bookmark",
"href": bookmark,
}],
}
else:
return ""
def _get_flavor(self, request, instance):
instance_type = instance.get_flavor()
if not instance_type:
LOG.warning(_LW("Instance has had its instance_type removed "
"from the DB"), instance=instance)
return {}
flavor_id = instance_type["flavorid"]
flavor_bookmark = self._flavor_builder._get_bookmark_link(request,
flavor_id,
"flavors")
return {
"id": str(flavor_id),
"links": [{
"rel": "bookmark",
"href": flavor_bookmark,
}],
}
def _get_fault(self, request, instance):
# This can result in a lazy load of the fault information
fault = instance.fault
if not fault:
return None
fault_dict = {
"code": fault["code"],
"created": utils.isotime(fault["created_at"]),
"message": fault["message"],
}
if fault.get('details', None):
is_admin = False
context = request.environ["nova.context"]
if context:
is_admin = getattr(context, 'is_admin', False)
if is_admin or fault['code'] != 500:
fault_dict['details'] = fault["details"]
return fault_dict
class ViewBuilderV21(ViewBuilder):
"""Model a server v2.1 API response as a python dictionary."""
def __init__(self):
"""Initialize view builder."""
super(ViewBuilderV21, self).__init__()
self._address_builder = views_addresses.ViewBuilderV21()
# TODO(alex_xu): In V3 API, we correct the image bookmark link to
# use glance endpoint. We revert back it to use nova endpoint for v2.1.
self._image_builder = views_images.ViewBuilder()
def show(self, request, instance, extend_address=True):
"""Detailed view of a single instance."""
server = {
"server": {
"id": instance["uuid"],
"name": instance["display_name"],
"status": self._get_vm_status(instance),
"tenant_id": instance.get("project_id") or "",
"user_id": instance.get("user_id") or "",
"metadata": self._get_metadata(instance),
"hostId": self._get_host_id(instance) or "",
# TODO(alex_xu): '_get_image' return {} when there image_ref
# isn't existed in V3 API, we revert it back to return "" in
# V2.1.
"image": self._get_image(request, instance),
"flavor": self._get_flavor(request, instance),
"created": utils.isotime(instance["created_at"]),
"updated": utils.isotime(instance["updated_at"]),
"addresses": self._get_addresses(request, instance,
extend_address),
"links": self._get_links(request,
instance["uuid"],
self._collection_name),
},
}
if server["server"]["status"] in self._fault_statuses:
_inst_fault = self._get_fault(request, instance)
if _inst_fault:
server['server']['fault'] = _inst_fault
if server["server"]["status"] in self._progress_statuses:
server["server"]["progress"] = instance.get("progress", 0)
if api_version_request.is_supported(request, min_version="2.9"):
server["server"]["locked"] = (True if instance["locked_by"]
else False)
if api_version_request.is_supported(request, min_version="2.19"):
server["server"]["description"] = instance.get(
"display_description")
return server
|
cyx1231st/nova
|
nova/api/openstack/compute/views/servers.py
|
Python
|
apache-2.0
| 12,638
|
#
# Copyright 2001 - 2006 Ludek Smid [http://www.ospace.net/]
#
# This file is part of IGE - Outer Space.
#
# IGE - Outer Space is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# IGE - Outer Space is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with IGE - Outer Space; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
import pygameui as ui
import re
from osci import gdata, client, res
from ige.ospace.Const import *
from ige.ospace import Utils, Rules
import string, math, copy
class ProblemsDlg:
"""Displays 'Problem locator' dialog.
"""
def __init__(self, app):
self.app = app
self.createUI()
def display(self):
self.show()
self.win.show()
# register for updates
if self not in gdata.updateDlgs:
gdata.updateDlgs.append(self)
def hide(self):
self.win.setStatus(_("Ready."))
self.win.hide()
# unregister updates
if self in gdata.updateDlgs:
gdata.updateDlgs.remove(self)
def update(self):
self.show()
def show(self):
critical = self.win.vCritical.checked
major = self.win.vMajor.checked
minor = self.win.vMinor.checked
info = self.win.vInfo.checked
disp = 1
player = client.getPlayer()
items = []
# object list (all player's objects + systems)
objects = player.fleets[:]
objects += player.planets[:]
systems = {}
for planetID in player.planets:
planet = client.get(planetID)
if planet.compOf not in systems:
systems[planet.compOf] = None
objects += systems.keys()
# go through all objects
for objID in objects:
if objID < OID_FREESTART:
continue
obj = client.get(objID, noUpdate = 1)
if not hasattr(obj, "type"):
continue
if obj.type == T_SYSTEM:
if not hasattr(obj, 'planets'):
continue
bio = 0
totalBio = 0
en = 0
totalEn = 0
buildingQuantity = {}
buildingInfo = {}
# holds modified planets
planetCopies = {}
for planetID in obj.planets:
planet = client.get(planetID, noUpdate = 1)
# copy of planet to change plSlots count
if not planetID in planetCopies:
cPlanet = copy.deepcopy(planet)
planetCopies[planetID] = cPlanet
else:
cPlanet = planetCopies[planetID]
if hasattr(planet, 'owner') and planet.owner == player.oid:
# compute bio and en for system
bio += planet.changeBio
totalBio += max(0, planet.storBio - planet.minBio)
en += planet.changeEn
totalEn += max(0, planet.storEn - planet.minEn)
if hasattr(planet, 'prodQueue') and self.win.vPlanets.checked:
totalEtc = 0
# compute length of production queue
if cPlanet.prodQueue and cPlanet.effProdProd > 0:
for task in cPlanet.prodQueue:
if task.isShip:
tech = client.getPlayer().shipDesigns[task.techID]
else:
tech = client.getFullTechInfo(task.techID)
if tech.isStructure and hasattr(task, "demolishStruct") and task.demolishStruct == 0:
# total count of constructing buildings on target
if buildingQuantity.has_key(task.targetID):
buildingQuantity[task.targetID] += task.quantity
else:
buildingQuantity[task.targetID] = task.quantity
# information about source and target of constructing
if buildingInfo.has_key((planetID, task.targetID)):
buildingInfo[(planetID, task.targetID)] += task.quantity
else:
buildingInfo[(planetID, task.targetID)] = task.quantity
elif tech.isProject and tech.id == 3802:
# we are constructing Habitable Surface Expansion
# so after construction we got some new slots on planet
if not task.targetID in planetCopies:
targetPlanet = client.get(task.targetID, noUpdate = 1)
cPlanet = copy.deepcopy(targetPlanet)
planetCopies[task.targetID] = cPlanet
planetCopies[task.targetID].plSlots += task.quantity
if task.targetID != planetID:
totalEtc += math.ceil(float(tech.buildProd * Rules.buildOnAnotherPlanetMod - task.currProd) / planet.effProdProd)
totalEtc += math.ceil((task.quantity - 1) * float(tech.buildProd * Rules.buildOnAnotherPlanetMod) / planet.effProdProd)
else:
totalEtc += math.ceil(task.quantity * float(tech.buildProd - task.currProd) / planet.effProdProd)
totalEtc += math.ceil((task.quantity - 1) * float(tech.buildProd) / planet.effProdProd)
else:
totalEtc = 99999
# check empty production queue
if len(planet.prodQueue) == 0 and planet.effProdProd > 0 and critical:
items.append(ui.Item(planet.name, tOID = planetID, tType = T_PLANET,
foreground = gdata.sevColors[gdata.CRI],
vDescription = _('Production queue is empty.')))
# check end of production queue
if totalEtc < 48:
fgColor = None
disp = minor
if totalEtc < 24:
disp = major
fgColor = gdata.sevColors[gdata.MAJ]
if disp:
items.append(ui.Item(planet.name, tOID = planetID, tType = T_PLANET, foreground = fgColor,
vDescription = _('Production queue ends in %s turns, %d item(s) on list.') % (res.formatTime(totalEtc), len(planet.prodQueue))))
# check for structures status
if hasattr(planet, 'slots') and self.win.vPlanets.checked:
for struct in planet.slots:
status = struct[STRUCT_IDX_STATUS]
problem = None
tech = client.getFullTechInfo(struct[STRUCT_IDX_TECHID])
if hasattr(player, 'techs'):
techEff = Rules.techImprEff[player.techs.get(struct[STRUCT_IDX_TECHID], Rules.techBaseImprovement)]
else:
techEff = Rules.techImprEff[Rules.techBaseImprovement]
HPturn = max(1, int(0.02 * tech.maxHP * techEff))
turnsToDestroy = math.ceil(struct[STRUCT_IDX_HP] / HPturn)
if turnsToDestroy < 48:
dispDestr = major
fgColorDestr = gdata.sevColors[gdata.MAJ]
if turnsToDestroy < 24:
dispDestr = critical
fgColorDestr = gdata.sevColors[gdata.CRI]
else:
dispDestr = minor
fgColorDestr = None
if not status & STRUCT_STATUS_ON:
# structure is off
if dispDestr:
items.append(ui.Item(planet.name, tOID = planetID, tType = T_PLANET, foreground = fgColorDestr,
vDescription = _('Structure (%s) is off and will be destroyed in %s turns.') % (tech.name, res.formatTime(turnsToDestroy))))
elif status & STRUCT_STATUS_DETER:
problem = _('is deteriorating and will be destroyed in %s turns') % res.formatTime(turnsToDestroy)
disp = major
fgColor = gdata.sevColors[gdata.MAJ]
elif status & STRUCT_STATUS_NOBIO:
problem = _('has insufficiend supply of biomatter')
disp = info
fgColor = gdata.sevColors[gdata.INFO]
elif status & STRUCT_STATUS_NOEN:
problem = _('has insufficiend supply of energy')
disp = info
fgColor = gdata.sevColors[gdata.INFO]
elif status & STRUCT_STATUS_NOPOP:
problem = _('has insufficiend supply of workers')
disp = info
fgColor = gdata.sevColors[gdata.INFO]
elif status & STRUCT_STATUS_REPAIRING:
problem = _('is repairing')
disp = info
fgColor = gdata.sevColors[gdata.INFO]
if problem and disp:
items.append(ui.Item(planet.name, tOID = planetID, tType = T_PLANET, foreground = fgColor,
vDescription = _('Structure (%s) %s.') % (tech.name, problem)))
for planetID, quantity in buildingQuantity.items():
planet = planetCopies[planetID]
# test, if there is total quantity of building as target for this planet
if planet.plSlots < len(planet.slots) + quantity and major:
# walk infos and search for all planets, that are building
# on planet founded above
for (sourceID, targetID), quantity in buildingInfo.items():
if planetID == targetID:
source = client.get(sourceID, noUpdate = 1)
items.append(ui.Item(source.name, tOID = sourceID, tType = T_PLANET, foreground = gdata.sevColors[gdata.MAJ],
vDescription = _('There is no space for all constructed buildings on %s.') % (planet.name)))
# check bio for system
if bio < 0 and self.win.vSystems.checked:
disp = minor
fgColor = None
surplusTurns = totalBio / (-bio)
if surplusTurns < 168:
disp = major
fgColor = gdata.sevColors[gdata.MAJ]
if surplusTurns < 48:
disp = critical
fgColor = gdata.sevColors[gdata.CRI]
if disp:
if totalBio > 0:
items.append(ui.Item(obj.name, tOID = obj.oid, tType = T_SYSTEM, foreground = fgColor,
vDescription = _('Bio decreasing - last turn change %d, surplus %d (%s).') % (bio, totalBio,res.formatTime(surplusTurns))))
else:
items.append(ui.Item(obj.name, tOID = obj.oid, tType = T_SYSTEM, foreground = fgColor,
vDescription = _('Bio decreasing - last turn change %d, surplus %d.') % (bio, totalBio)))
#check en for system
if en < 0 and self.win.vSystems.checked:
disp = minor
fgColor = None
surplusTurns = totalEn / (-en)
if surplusTurns < 168:
disp = major
fgColor = gdata.sevColors[gdata.MAJ]
if surplusTurns < 48:
disp = critical
fgColor = gdata.sevColors[gdata.CRI]
if disp:
if totalEn > 0:
items.append(ui.Item(obj.name, tOID = obj.oid, tType = T_SYSTEM, foreground = fgColor,
vDescription = _('Energy decreasing - last turn change %d, surplus %d (%s).') % (en, totalEn,res.formatTime(surplusTurns))))
else:
items.append(ui.Item(obj.name, tOID = obj.oid, tType = T_SYSTEM, foreground = fgColor,
vDescription = _('Energy decreasing - last turn change %d, surplus %d.') % (en, totalEn)))
# check fleets
elif obj.type == T_FLEET and self.win.vFleets.checked:
if hasattr(obj, 'owner') and obj.owner == player.oid:
energyReserve = obj.storEn * 100 / obj.maxEn
system = None
disp = major
fgColor = gdata.sevColors[gdata.MAJ]
note = _(' and IS NOT refueling')
maxRefuelMax = 0
if hasattr(obj, 'orbiting') and obj.orbiting:
system = client.get(obj.orbiting, noUpdate = 1)
if hasattr(system, 'planets'):
for planetID in system.planets:
planet = client.get(planetID, noUpdate = 1)
if hasattr(planet, 'owner') and hasattr(planet, 'refuelMax'):
if player.diplomacyRels.has_key(planet.owner):
dipl = client.getDiplomacyWith(planet.owner)
if dipl.pacts.has_key(PACT_ALLOW_TANKING) and dipl.pacts[PACT_ALLOW_TANKING][0] == PACT_ACTIVE:
maxRefuelMax = max(maxRefuelMax, planet.refuelMax)
else:
if planet.owner == player.oid:
maxRefuelMax = max(maxRefuelMax, planet.refuelMax)
if maxRefuelMax > 0:
disp = info
fgColor = gdata.sevColors[gdata.INFO]
note = _(' and IS refueling')
if maxRefuelMax <= energyReserve:
note = _(' and CAN refuel, but reach planet maximum refuel amount')
else:
continue
systemName = res.getUnknownName()
if system and hasattr(system, "name"):
systemName = system.name
# check fleets energy reserve
if energyReserve < 50 and maxRefuelMax == 0:
disp = major
fgColor = gdata.sevColors[gdata.MAJ]
if energyReserve < 25 and maxRefuelMax == 0:
disp = critical
fgColor = gdata.sevColors[gdata.CRI]
else:
fgColor = gdata.sevColors[gdata.INFO]
disp = info
# is fleet named?
if hasattr(obj,'customname') and obj.customname:
name = obj.customname
else:
name = getattr(obj, "name", None)
if energyReserve == 100 and info and disp:
items.append(ui.Item(systemName, tOID = obj.oid, tType = T_FLEET,foreground = gdata.sevColors[gdata.INFO],
vDescription = _('Fleet "%s" has full fuel tanks.') % (name)))
elif disp:
items.append(ui.Item(systemName, tOID = obj.oid, tType = T_FLEET,foreground = fgColor,
vDescription = _('Fleet "%s" is low on fuel [%d %%]%s.') % (name, energyReserve, note)))
# check research queue
if self.win.vResearch.checked:
totalEtc = 0
# compute length of research queue
for task in player.rsrchQueue:
tech = client.getTechInfo(task.techID)
fulltech = client.getFullTechInfo(task.techID)
researchSci = Utils.getTechRCost(player, task.techID, task.improvement)
maxImprovement = min(Rules.techMaxImprovement,fulltech.maxImprovement)
maxImpTotalSci = 0
if task.improveToMax and task.improvement < maxImprovement:
for impr in range(task.improvement+1,maxImprovement+1):
maxImpTotalSci += Utils.getTechRCost(player, task.techID, impr)
if task.changeSci > 0:
value = float(researchSci - task.currSci) / max(task.changeSci, player.effSciPoints)
totalEtc += int(value + 1)
totalEtc += float(maxImpTotalSci) / player.effSciPoints
elif task.changeSci < 0:
totalEtc -= float(task.currSci) / min(task.changeSci, player.effSciPoints)
elif player.effSciPoints > 0:
value = float(researchSci) / player.effSciPoints
totalEtc += int(value + 1)
totalEtc += float(maxImpTotalSci) / player.effSciPoints
else:
totalEtc = 99999
break
# check empty research queue
if totalEtc == 0 and len(player.rsrchQueue) == 0 and player.effSciPoints > 0 and major:
items.append(ui.Item(_('Research'), tType = T_TECHNOLOGY, foreground = gdata.sevColors[gdata.MAJ],
vDescription = _('Research queue is empty.')))
# check short reseach queue
elif totalEtc < 48:
disp = minor
fgColor = None
if totalEtc < 24:
disp = major
fgColor = gdata.sevColors[gdata.MAJ]
if disp:
items.append(ui.Item(_('Research'), tType = T_TECHNOLOGY, foreground = fgColor,
vDescription = _('Research queue ends in %s turns, %d item(s) on list.') % (res.formatTime(totalEtc), len(player.rsrchQueue))))
self.win.vProblems.items = items
self.win.vProblems.itemsChanged()
def onClose(self, widget, action, data):
self.hide()
def onShowSource(self, widget, action, data):
item = self.win.vProblems.selection[0]
if item.tType == T_FLEET:
object = client.get(item.tOID, noUpdate = 1)
# center on map
if hasattr(object, "x"):
gdata.mainGameDlg.win.vStarMap.highlightPos = (object.x, object.y)
gdata.mainGameDlg.win.vStarMap.setPos(object.x, object.y)
self.hide()
return
elif item.tType in (T_SYSTEM, T_PLANET):
if item.tOID != OID_NONE:
gdata.mainGameDlg.onSelectMapObj(None, None, item.tOID)
return
elif item.tType == T_TECHNOLOGY:
gdata.mainGameDlg.researchDlg.display()
return
self.win.setStatus(_("Cannot show location."))
def onShowLocation(self, widget, action, data):
item = self.win.vProblems.selection[0]
if item.tType in (T_SYSTEM, T_PLANET, T_FLEET):
object = client.get(item.tOID, noUpdate = 1)
# center on map
if hasattr(object, "x"):
gdata.mainGameDlg.win.vStarMap.highlightPos = (object.x, object.y)
gdata.mainGameDlg.win.vStarMap.setPos(object.x, object.y)
self.hide()
return
self.win.setStatus(_("Cannot show location."))
def onToggleCondition(self, widget, action, data):
self.update()
def createUI(self):
screenWidth, screenHeight = gdata.scrnSize
# size of dialog in layout metrics (for SimpleGridLM)
cols = 40
rows = 29
# dialog width and height in pixels
isSmallWin = screenHeight == 600 and screenWidth == 800
width = cols * 20 + 4 * (not isSmallWin)
height = rows * 20 + 4 * (not isSmallWin)
#creating dialog window
self.win = ui.Window(self.app,
modal = 1,
escKeyClose = 1,
movable = 0,
title = _("Problems Locator"),
titleOnly = isSmallWin,
#rect = ui.Rect((screenWidth - width) / 2, ((screenHeight - height) / 2) * (not isSmallWin), width, height),
rect = ui.Rect((screenWidth - 800 - 4 * (not isSmallWin)) / 2, (screenHeight - 600 - 4 * (not isSmallWin)) / 2, width, height),
layoutManager = ui.SimpleGridLM(),
)
self.win.subscribeAction('*', self)
# first row is window title
rows -= 1
ui.Listbox(self.win, layout = (0, 0, cols, rows - 2), id = 'vProblems',
columns = [(_('System'), 'text', 10, ui.ALIGN_W),
(_('Problem description'), 'vDescription', 30, ui.ALIGN_W)],
columnLabels = 1, action='onShowSource', rmbAction='onShowLocation')
btnWidth = 4
ui.Check(self.win, layout = (btnWidth * 0, rows - 2, btnWidth, 1), id = 'vSystems',
text = _('Systems'), action = 'onToggleCondition', checked = 1)
ui.Check(self.win, layout = (btnWidth * 1, rows - 2, btnWidth, 1), id = 'vPlanets',
text = _('Planets'), action = 'onToggleCondition', checked = 1)
ui.Check(self.win, layout = (btnWidth * 2, rows - 2, btnWidth, 1), id = 'vFleets',
text = _('Fleets'), action = 'onToggleCondition', checked = 1)
ui.Check(self.win, layout = (btnWidth * 3, rows - 2, btnWidth, 1), id = 'vResearch',
text = _('Research'), action = 'onToggleCondition', checked = 1)
ui.Check(self.win, layout = (btnWidth * 6, rows - 2, btnWidth, 1), id = 'vCritical',
text = _('Critical'), action = 'onToggleCondition', checked = 1)
ui.Check(self.win, layout = (btnWidth * 7, rows - 2, btnWidth, 1), id = 'vMajor',
text = _('Major'), action = 'onToggleCondition', checked = 1)
ui.Check(self.win, layout = (btnWidth * 8, rows - 2, btnWidth, 1), id = 'vMinor',
text = _('Minor'), action = 'onToggleCondition', checked = 1)
ui.Check(self.win, layout = (btnWidth * 9, rows - 2, btnWidth, 1), id = 'vInfo',
text = _('Info'), action = 'onToggleCondition', checked = 0)
# dialog bottom line
ui.Title(self.win, layout = (0, rows - 1, cols - 5, 1))
ui.TitleButton(self.win, layout = (cols - 5, rows - 1, 5, 1), text = _("Close"), action = 'onClose')
|
mozts2005/OuterSpace
|
client-pygame/lib/osci/dialog/ProblemsDlg.py
|
Python
|
gpl-2.0
| 18,889
|
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import difflib
class RecommendationFinder(object):
def __init__(self, normalizer=None):
self.normalizer = normalizer or (lambda x: x)
def find_recommendations(self, name, candidates, max_matches=10):
"""Return a list of close matches to `name` from `candidates`."""
if not name or not candidates:
return []
norm_name = self.normalizer(name)
norm_candidates = self._get_normalized_candidates(candidates)
cutoff = self._calculate_cutoff(norm_name)
norm_matches = difflib.get_close_matches(norm_name,
norm_candidates,
n=max_matches,
cutoff=cutoff)
return self._get_original_candidates(norm_candidates, norm_matches)
@staticmethod
def format_recommendations(msg, recommendations):
"""Add recommendations to the given message.
The recommendation string looks like:
<msg> Did you mean:
<recommendations[0]>
<recommendations[1]>
<recommendations[2]>
"""
if recommendations:
msg += " Did you mean:"
for rec in recommendations:
msg += "\n %s" % rec
return msg
def _get_normalized_candidates(self, candidates):
norm_candidates = {}
# sort before normalization for consistent Python/Jython ordering
for cand in sorted(candidates):
norm = self.normalizer(cand)
norm_candidates.setdefault(norm, []).append(cand)
return norm_candidates
def _get_original_candidates(self, norm_candidates, norm_matches):
candidates = []
for norm_match in norm_matches:
candidates.extend(norm_candidates[norm_match])
return candidates
def _calculate_cutoff(self, string, min_cutoff=.5, max_cutoff=.85,
step=.03):
"""Calculate a cutoff depending on string length.
Default values determined by manual tuning until the results
"look right".
"""
cutoff = min_cutoff + len(string) * step
return min(cutoff, max_cutoff)
|
alexandrul-ci/robotframework
|
src/robot/utils/recommendations.py
|
Python
|
apache-2.0
| 2,883
|
from __future__ import unicode_literals
from postman.models import Message
def inbox(request):
"""Provide the count of unread messages for an authenticated user."""
if request.user.is_authenticated():
return {'postman_unread_count': Message.objects.inbox_unread_count(request.user)}
else:
return {}
|
hzlf/openbroadcast
|
website/apps/postman/context_processors.py
|
Python
|
gpl-3.0
| 330
|
__author__ = 'gavin'
from nltk import word_tokenize
from nltk.stem import PorterStemmer
class Tokenizer(object):
def __init__(self):
self.stemmer = PorterStemmer()
def __call__(self, doc):
return [self.stemmer.stem(token) for token in word_tokenize(doc)]
|
moonbury/notebooks
|
github/MasteringMLWithScikit-learn/8365OS_04_Codes/tokenizer.py
|
Python
|
gpl-3.0
| 282
|
#!/usr/bin/env python
# Author: Shao Zhang and Phil Saltzman
# Last Updated: 2015-03-13
#
# This tutorial is intended as a initial panda scripting lesson going over
# display initialization, loading models, placing objects, and the scene graph.
#
# Step 3: In this step, we create a function called loadPlanets, which will
# eventually be used to load all of the planets in our simulation. For now
# we will load just the sun and and the sky-sphere we use to create the
# star-field.
from direct.showbase.ShowBase import ShowBase
base = ShowBase()
from panda3d.core import NodePath, TextNode
from direct.gui.DirectGui import *
import sys
class World(object):
def __init__(self):
# This is the initialization we had before
self.title = OnscreenText( # Create the title
text="Panda3D: Tutorial 1 - Solar System",
parent=base.a2dBottomRight, align=TextNode.A_right,
style=1, fg=(1, 1, 1, 1), pos=(-0.1, 0.1), scale=.07)
base.setBackgroundColor(0, 0, 0) # Set the background to black
base.disableMouse() # disable mouse control of the camera
camera.setPos(0, 0, 45) # Set the camera position (X, Y, Z)
camera.setHpr(0, -90, 0) # Set the camera orientation
#(heading, pitch, roll) in degrees
# We will now define a variable to help keep a consistent scale in
# our model. As we progress, we will continue to add variables here as we
# need them
# The value of this variable scales the size of the planets. True scale size
# would be 1
self.sizescale = 0.6
# Now that we have finished basic initialization, we call loadPlanets which
# will handle actually getting our objects in the world
self.loadPlanets()
def loadPlanets(self):
# Here, inside our class, is where we are creating the loadPlanets function
# For now we are just loading the star-field and sun. In the next step we
# will load all of the planets
# Loading objects in Panda is done via the command loader.loadModel, which
# takes one argument, the path to the model file. Models in Panda come in
# two types, .egg (which is readable in a text editor), and .bam (which is
# not readable but makes smaller files). When you load a file you leave the
# extension off so that it can choose the right version
# Load model returns a NodePath, which you can think of as an object
# containing your model
# Here we load the sky model. For all the planets we will use the same
# sphere model and simply change textures. However, even though the sky is
# a sphere, it is different from the planet model because its polygons
#(which are always one-sided in Panda) face inside the sphere instead of
# outside (this is known as a model with reversed normals). Because of
# that it has to be a separate model.
self.sky = loader.loadModel("models/solar_sky_sphere")
# After the object is loaded, it must be placed in the scene. We do this by
# changing the parent of self.sky to render, which is a special NodePath.
# Each frame, Panda starts with render and renders everything attached to
# it.
self.sky.reparentTo(render)
# You can set the position, orientation, and scale on a NodePath the same
# way that you set those properties on the camera. In fact, the camera is
# just another special NodePath
self.sky.setScale(40)
# Very often, the egg file will know what textures are needed and load them
# automatically. But sometimes we want to set our textures manually, (for
# instance we want to put different textures on the same planet model)
# Loading textures works the same way as loading models, but instead of
# calling loader.loadModel, we call loader.loadTexture
self.sky_tex = loader.loadTexture("models/stars_1k_tex.jpg")
# Finally, the following line sets our new sky texture on our sky model.
# The second argument must be one or the command will be ignored.
self.sky.setTexture(self.sky_tex, 1)
# Now we load the sun.
self.sun = loader.loadModel("models/planet_sphere")
# Now we repeat our other steps
self.sun.reparentTo(render)
self.sun_tex = loader.loadTexture("models/sun_1k_tex.jpg")
self.sun.setTexture(self.sun_tex, 1)
# The sun is really much bigger than
self.sun.setScale(2 * self.sizescale)
# this, but to be able to see the
# planets we're making it smaller
# end loadPlanets()
# end class world
# instantiate the class
w = World()
base.run()
|
brakhane/panda3d
|
samples/solar-system/step3_load_model.py
|
Python
|
bsd-3-clause
| 4,775
|
from __future__ import division
import numpy as np
#TODO: embed for FitHistogramPeaks
def poisson(x, a, b, c, d=0):
'''
Poisson function
a -> height of the curve's peak
b -> position of the center of the peak
c -> standard deviation
d -> offset
'''
from scipy.misc import factorial #save startup time
lamb = 1
X = (x/(2*c)).astype(int)
return a * (( lamb**X/factorial(X)) * np.exp(-lamb) ) +d
|
radjkarl/imgProcessor
|
imgProcessor/equations/poisson.py
|
Python
|
gpl-3.0
| 454
|
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_firewall_interface_policy6
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_firewall_interface_policy6.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_firewall_interface_policy6_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_interface_policy6': {
'address_type': 'ipv4',
'application_list': 'test_value_4',
'application_list_status': 'enable',
'av_profile': 'test_value_6',
'av_profile_status': 'enable',
'comments': 'test_value_8',
'dlp_sensor': 'test_value_9',
'dlp_sensor_status': 'enable',
'dsri': 'enable',
'interface': 'test_value_12',
'ips_sensor': 'test_value_13',
'ips_sensor_status': 'enable',
'label': 'test_value_15',
'logtraffic': 'all',
'policyid': '17',
'scan_botnet_connections': 'disable',
'spamfilter_profile': 'test_value_19',
'spamfilter_profile_status': 'enable',
'status': 'enable',
'webfilter_profile': 'test_value_22',
'webfilter_profile_status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_interface_policy6.fortios_firewall(input_data, fos_instance)
expected_data = {
'address-type': 'ipv4',
'application-list': 'test_value_4',
'application-list-status': 'enable',
'av-profile': 'test_value_6',
'av-profile-status': 'enable',
'comments': 'test_value_8',
'dlp-sensor': 'test_value_9',
'dlp-sensor-status': 'enable',
'dsri': 'enable',
'interface': 'test_value_12',
'ips-sensor': 'test_value_13',
'ips-sensor-status': 'enable',
'label': 'test_value_15',
'logtraffic': 'all',
'policyid': '17',
'scan-botnet-connections': 'disable',
'spamfilter-profile': 'test_value_19',
'spamfilter-profile-status': 'enable',
'status': 'enable',
'webfilter-profile': 'test_value_22',
'webfilter-profile-status': 'enable'
}
set_method_mock.assert_called_with('firewall', 'interface-policy6', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_firewall_interface_policy6_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_interface_policy6': {
'address_type': 'ipv4',
'application_list': 'test_value_4',
'application_list_status': 'enable',
'av_profile': 'test_value_6',
'av_profile_status': 'enable',
'comments': 'test_value_8',
'dlp_sensor': 'test_value_9',
'dlp_sensor_status': 'enable',
'dsri': 'enable',
'interface': 'test_value_12',
'ips_sensor': 'test_value_13',
'ips_sensor_status': 'enable',
'label': 'test_value_15',
'logtraffic': 'all',
'policyid': '17',
'scan_botnet_connections': 'disable',
'spamfilter_profile': 'test_value_19',
'spamfilter_profile_status': 'enable',
'status': 'enable',
'webfilter_profile': 'test_value_22',
'webfilter_profile_status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_interface_policy6.fortios_firewall(input_data, fos_instance)
expected_data = {
'address-type': 'ipv4',
'application-list': 'test_value_4',
'application-list-status': 'enable',
'av-profile': 'test_value_6',
'av-profile-status': 'enable',
'comments': 'test_value_8',
'dlp-sensor': 'test_value_9',
'dlp-sensor-status': 'enable',
'dsri': 'enable',
'interface': 'test_value_12',
'ips-sensor': 'test_value_13',
'ips-sensor-status': 'enable',
'label': 'test_value_15',
'logtraffic': 'all',
'policyid': '17',
'scan-botnet-connections': 'disable',
'spamfilter-profile': 'test_value_19',
'spamfilter-profile-status': 'enable',
'status': 'enable',
'webfilter-profile': 'test_value_22',
'webfilter-profile-status': 'enable'
}
set_method_mock.assert_called_with('firewall', 'interface-policy6', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_firewall_interface_policy6_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'firewall_interface_policy6': {
'address_type': 'ipv4',
'application_list': 'test_value_4',
'application_list_status': 'enable',
'av_profile': 'test_value_6',
'av_profile_status': 'enable',
'comments': 'test_value_8',
'dlp_sensor': 'test_value_9',
'dlp_sensor_status': 'enable',
'dsri': 'enable',
'interface': 'test_value_12',
'ips_sensor': 'test_value_13',
'ips_sensor_status': 'enable',
'label': 'test_value_15',
'logtraffic': 'all',
'policyid': '17',
'scan_botnet_connections': 'disable',
'spamfilter_profile': 'test_value_19',
'spamfilter_profile_status': 'enable',
'status': 'enable',
'webfilter_profile': 'test_value_22',
'webfilter_profile_status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_interface_policy6.fortios_firewall(input_data, fos_instance)
delete_method_mock.assert_called_with('firewall', 'interface-policy6', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_firewall_interface_policy6_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'firewall_interface_policy6': {
'address_type': 'ipv4',
'application_list': 'test_value_4',
'application_list_status': 'enable',
'av_profile': 'test_value_6',
'av_profile_status': 'enable',
'comments': 'test_value_8',
'dlp_sensor': 'test_value_9',
'dlp_sensor_status': 'enable',
'dsri': 'enable',
'interface': 'test_value_12',
'ips_sensor': 'test_value_13',
'ips_sensor_status': 'enable',
'label': 'test_value_15',
'logtraffic': 'all',
'policyid': '17',
'scan_botnet_connections': 'disable',
'spamfilter_profile': 'test_value_19',
'spamfilter_profile_status': 'enable',
'status': 'enable',
'webfilter_profile': 'test_value_22',
'webfilter_profile_status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_interface_policy6.fortios_firewall(input_data, fos_instance)
delete_method_mock.assert_called_with('firewall', 'interface-policy6', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_firewall_interface_policy6_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_interface_policy6': {
'address_type': 'ipv4',
'application_list': 'test_value_4',
'application_list_status': 'enable',
'av_profile': 'test_value_6',
'av_profile_status': 'enable',
'comments': 'test_value_8',
'dlp_sensor': 'test_value_9',
'dlp_sensor_status': 'enable',
'dsri': 'enable',
'interface': 'test_value_12',
'ips_sensor': 'test_value_13',
'ips_sensor_status': 'enable',
'label': 'test_value_15',
'logtraffic': 'all',
'policyid': '17',
'scan_botnet_connections': 'disable',
'spamfilter_profile': 'test_value_19',
'spamfilter_profile_status': 'enable',
'status': 'enable',
'webfilter_profile': 'test_value_22',
'webfilter_profile_status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_interface_policy6.fortios_firewall(input_data, fos_instance)
expected_data = {
'address-type': 'ipv4',
'application-list': 'test_value_4',
'application-list-status': 'enable',
'av-profile': 'test_value_6',
'av-profile-status': 'enable',
'comments': 'test_value_8',
'dlp-sensor': 'test_value_9',
'dlp-sensor-status': 'enable',
'dsri': 'enable',
'interface': 'test_value_12',
'ips-sensor': 'test_value_13',
'ips-sensor-status': 'enable',
'label': 'test_value_15',
'logtraffic': 'all',
'policyid': '17',
'scan-botnet-connections': 'disable',
'spamfilter-profile': 'test_value_19',
'spamfilter-profile-status': 'enable',
'status': 'enable',
'webfilter-profile': 'test_value_22',
'webfilter-profile-status': 'enable'
}
set_method_mock.assert_called_with('firewall', 'interface-policy6', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_firewall_interface_policy6_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'firewall_interface_policy6': {
'random_attribute_not_valid': 'tag',
'address_type': 'ipv4',
'application_list': 'test_value_4',
'application_list_status': 'enable',
'av_profile': 'test_value_6',
'av_profile_status': 'enable',
'comments': 'test_value_8',
'dlp_sensor': 'test_value_9',
'dlp_sensor_status': 'enable',
'dsri': 'enable',
'interface': 'test_value_12',
'ips_sensor': 'test_value_13',
'ips_sensor_status': 'enable',
'label': 'test_value_15',
'logtraffic': 'all',
'policyid': '17',
'scan_botnet_connections': 'disable',
'spamfilter_profile': 'test_value_19',
'spamfilter_profile_status': 'enable',
'status': 'enable',
'webfilter_profile': 'test_value_22',
'webfilter_profile_status': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_firewall_interface_policy6.fortios_firewall(input_data, fos_instance)
expected_data = {
'address-type': 'ipv4',
'application-list': 'test_value_4',
'application-list-status': 'enable',
'av-profile': 'test_value_6',
'av-profile-status': 'enable',
'comments': 'test_value_8',
'dlp-sensor': 'test_value_9',
'dlp-sensor-status': 'enable',
'dsri': 'enable',
'interface': 'test_value_12',
'ips-sensor': 'test_value_13',
'ips-sensor-status': 'enable',
'label': 'test_value_15',
'logtraffic': 'all',
'policyid': '17',
'scan-botnet-connections': 'disable',
'spamfilter-profile': 'test_value_19',
'spamfilter-profile-status': 'enable',
'status': 'enable',
'webfilter-profile': 'test_value_22',
'webfilter-profile-status': 'enable'
}
set_method_mock.assert_called_with('firewall', 'interface-policy6', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
|
thaim/ansible
|
test/units/modules/network/fortios/test_fortios_firewall_interface_policy6.py
|
Python
|
mit
| 16,051
|
#!/usr/bin/env python
"""
Usage: ./scripts/list_docs_report.sh | ./scripts/replace_labels.py --add docs --remove docs_report
"""
import argparse
import json
import sys
import requests
import ansibullbot.constants as C
HEADERS = {'Authorization': 'token %s' % C.DEFAULT_GITHUB_TOKEN}
ISSUE_URL_FMT = 'https://api.github.com/repos/ansible/ansible/issues/{}'
LABEL_URL_FMT = 'https://api.github.com/repos/ansible/ansible/issues/{}/labels{}'
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--add', dest='add_label', action='store')
parser.add_argument('-r', '--remove', dest='remove_label', action='store')
args = parser.parse_args()
return args
def main():
args = parse_args()
lines = sys.stdin.readlines()
for line in lines:
numbers = json.loads(line)
numbers = sorted(set(numbers))
for number in numbers:
print(number)
iurl = ISSUE_URL_FMT.format(number)
ir = requests.get(iurl, headers=HEADERS)
idata = ir.json()
try:
labels = [l['name'] for l in idata['labels']]
except KeyError:
continue
if args.remove_label in labels:
url = LABEL_URL_FMT.format(number, '/'+args.remove_label)
r = requests.delete(url, headers=HEADERS)
if r.status_code != 200:
import epdb; epdb.st()
if args.add_label not in labels:
url = LABEL_URL_FMT.format(number, '')
r = requests.post(url, data=json.dumps([args.add_label]), headers=HEADERS)
if r.status_code != 200:
import epdb; epdb.st()
if __name__ == "__main__":
main()
|
jctanner/ansibullbot
|
scripts/replace_labels.py
|
Python
|
gpl-3.0
| 1,793
|
#!/usr/bin/env python
"""
Unit tests for the main Battleship Algorithms functionality.
"""
import unittest
import tempfile
import logging
from battleship import main
from battleship import settings
class TestMain(unittest.TestCase): # pylint: disable=R0904
"""Unit tests for the main module."""
def test_run(self):
"""Verify simulations can be run with graph generation."""
temp = tempfile.NamedTemporaryFile()
self.assertTrue(main.run([0, 1], 2, graph_path=temp.name))
def test_run_logging(self):
"""Verify simulations can be run with sample generation."""
temp = tempfile.NamedTemporaryFile()
self.assertTrue(main.run([1], 1, sample_path=temp.name))
def test_run_invalid(self):
"""Verify sample genreation can only be performed on a single game."""
temp = tempfile.NamedTemporaryFile()
self.assertFalse(main.run([1, 2, 3], 1, sample_path=temp.name))
if __name__ == '__main__':
logging.basicConfig(format=settings.VERBOSE_LOGGING_FORMAT, level=settings.VERBOSE_LOGGING_LEVEL)
unittest.main()
|
jacebrowning/battleship
|
battleship/test/test_main.py
|
Python
|
lgpl-3.0
| 1,100
|
# -*- coding: utf-8 -*-
"""
sphinx.domains
~~~~~~~~~~~~~~
Support for domains, which are groupings of description directives
and roles describing e.g. constructs of one programming language.
:copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from sphinx.errors import SphinxError
from sphinx.locale import _
class ObjType(object):
"""
An ObjType is the description for a type of object that a domain can
document. In the object_types attribute of Domain subclasses, object type
names are mapped to instances of this class.
Constructor arguments:
- *lname*: localized name of the type (do not include domain name)
- *roles*: all the roles that can refer to an object of this type
- *attrs*: object attributes -- currently only "searchprio" is known,
which defines the object's priority in the full-text search index,
see :meth:`Domain.get_objects()`.
"""
known_attrs = {
'searchprio': 1,
}
def __init__(self, lname, *roles, **attrs):
self.lname = lname
self.roles = roles
self.attrs = self.known_attrs.copy()
self.attrs.update(attrs)
class Index(object):
"""
An Index is the description for a domain-specific index. To add an index to
a domain, subclass Index, overriding the three name attributes:
* `name` is an identifier used for generating file names.
* `localname` is the section title for the index.
* `shortname` is a short name for the index, for use in the relation bar in
HTML output. Can be empty to disable entries in the relation bar.
and providing a :meth:`generate()` method. Then, add the index class to
your domain's `indices` list. Extensions can add indices to existing
domains using :meth:`~sphinx.application.Sphinx.add_index_to_domain()`.
"""
name = None
localname = None
shortname = None
def __init__(self, domain):
if self.name is None or self.localname is None:
raise SphinxError('Index subclass %s has no valid name or localname'
% self.__class__.__name__)
self.domain = domain
def generate(self, docnames=None):
"""Return entries for the index given by *name*. If *docnames* is
given, restrict to entries referring to these docnames.
The return value is a tuple of ``(content, collapse)``, where *collapse*
is a boolean that determines if sub-entries should start collapsed (for
output formats that support collapsing sub-entries).
*content* is a sequence of ``(letter, entries)`` tuples, where *letter*
is the "heading" for the given *entries*, usually the starting letter.
*entries* is a sequence of single entries, where a single entry is a
sequence ``[name, subtype, docname, anchor, extra, qualifier, descr]``.
The items in this sequence have the following meaning:
- `name` -- the name of the index entry to be displayed
- `subtype` -- sub-entry related type:
0 -- normal entry
1 -- entry with sub-entries
2 -- sub-entry
- `docname` -- docname where the entry is located
- `anchor` -- anchor for the entry within `docname`
- `extra` -- extra info for the entry
- `qualifier` -- qualifier for the description
- `descr` -- description for the entry
Qualifier and description are not rendered e.g. in LaTeX output.
"""
return []
class Domain(object):
"""
A Domain is meant to be a group of "object" description directives for
objects of a similar nature, and corresponding roles to create references to
them. Examples would be Python modules, classes, functions etc., elements
of a templating language, Sphinx roles and directives, etc.
Each domain has a separate storage for information about existing objects
and how to reference them in `self.data`, which must be a dictionary. It
also must implement several functions that expose the object information in
a uniform way to parts of Sphinx that allow the user to reference or search
for objects in a domain-agnostic way.
About `self.data`: since all object and cross-referencing information is
stored on a BuildEnvironment instance, the `domain.data` object is also
stored in the `env.domaindata` dict under the key `domain.name`. Before the
build process starts, every active domain is instantiated and given the
environment object; the `domaindata` dict must then either be nonexistent or
a dictionary whose 'version' key is equal to the domain class'
:attr:`data_version` attribute. Otherwise, `IOError` is raised and the
pickled environment is discarded.
"""
#: domain name: should be short, but unique
name = ''
#: domain label: longer, more descriptive (used in messages)
label = ''
#: type (usually directive) name -> ObjType instance
object_types = {}
#: directive name -> directive class
directives = {}
#: role name -> role callable
roles = {}
#: a list of Index subclasses
indices = []
#: role name -> a warning message if reference is missing
dangling_warnings = {}
#: data value for a fresh environment
initial_data = {}
#: data version, bump this when the format of `self.data` changes
data_version = 0
def __init__(self, env):
self.env = env
if self.name not in env.domaindata:
assert isinstance(self.initial_data, dict)
new_data = self.initial_data.copy()
new_data['version'] = self.data_version
self.data = env.domaindata[self.name] = new_data
else:
self.data = env.domaindata[self.name]
if self.data['version'] != self.data_version:
raise IOError('data of %r domain out of date' % self.label)
self._role_cache = {}
self._directive_cache = {}
self._role2type = {}
for name, obj in self.object_types.iteritems():
for rolename in obj.roles:
self._role2type.setdefault(rolename, []).append(name)
self.objtypes_for_role = self._role2type.get
def role(self, name):
"""Return a role adapter function that always gives the registered
role its full name ('domain:name') as the first argument.
"""
if name in self._role_cache:
return self._role_cache[name]
if name not in self.roles:
return None
fullname = '%s:%s' % (self.name, name)
def role_adapter(typ, rawtext, text, lineno, inliner,
options={}, content=[]):
return self.roles[name](fullname, rawtext, text, lineno,
inliner, options, content)
self._role_cache[name] = role_adapter
return role_adapter
def directive(self, name):
"""Return a directive adapter class that always gives the registered
directive its full name ('domain:name') as ``self.name``.
"""
if name in self._directive_cache:
return self._directive_cache[name]
if name not in self.directives:
return None
fullname = '%s:%s' % (self.name, name)
BaseDirective = self.directives[name]
class DirectiveAdapter(BaseDirective):
def run(self):
self.name = fullname
return BaseDirective.run(self)
self._directive_cache[name] = DirectiveAdapter
return DirectiveAdapter
# methods that should be overwritten
def clear_doc(self, docname):
"""Remove traces of a document in the domain-specific inventories."""
pass
def process_doc(self, env, docname, document):
"""Process a document after it is read by the environment."""
pass
def resolve_xref(self, env, fromdocname, builder,
typ, target, node, contnode):
"""Resolve the pending_xref *node* with the given *typ* and *target*.
This method should return a new node, to replace the xref node,
containing the *contnode* which is the markup content of the
cross-reference.
If no resolution can be found, None can be returned; the xref node will
then given to the 'missing-reference' event, and if that yields no
resolution, replaced by *contnode*.
The method can also raise :exc:`sphinx.environment.NoUri` to suppress
the 'missing-reference' event being emitted.
"""
pass
def get_objects(self):
"""Return an iterable of "object descriptions", which are tuples with
five items:
* `name` -- fully qualified name
* `dispname` -- name to display when searching/linking
* `type` -- object type, a key in ``self.object_types``
* `docname` -- the document where it is to be found
* `anchor` -- the anchor name for the object
* `priority` -- how "important" the object is (determines placement
in search results)
- 1: default priority (placed before full-text matches)
- 0: object is important (placed before default-priority objects)
- 2: object is unimportant (placed after full-text matches)
- -1: object should not show up in search at all
"""
return []
def get_type_name(self, type, primary=False):
"""Return full name for given ObjType."""
if primary:
return type.lname
return _('%s %s') % (self.label, type.lname)
from sphinx.domains.c import CDomain
from sphinx.domains.cpp import CPPDomain
from sphinx.domains.std import StandardDomain
from sphinx.domains.python import PythonDomain
from sphinx.domains.javascript import JavaScriptDomain
from sphinx.domains.rst import ReSTDomain
BUILTIN_DOMAINS = {
'std': StandardDomain,
'py': PythonDomain,
'c': CDomain,
'cpp': CPPDomain,
'js': JavaScriptDomain,
'rst': ReSTDomain,
}
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/sphinx/domains/__init__.py
|
Python
|
agpl-3.0
| 10,141
|
from django.conf.urls import url
import zerver.views
import zerver.views.streams
import zerver.views.invite
import zerver.views.user_settings
import zerver.views.auth
import zerver.views.tutorial
import zerver.views.report
import zerver.views.upload
import zerver.views.messages
import zerver.views.muting
# Future endpoints should add to urls.py, which includes these legacy urls
legacy_urls = [
# These are json format views used by the web client. They require a logged in browser.
# We should remove this endpoint and all code related to it.
# It returns a 404 if the stream doesn't exist, which is confusing
# for devs, and I don't think we need to go to the server
# any more to find out about subscriptions, since they are already
# pushed to us via the event system.
url(r'^json/subscriptions/exists$', zerver.views.streams.json_stream_exists),
url(r'^json/fetch_api_key$', zerver.views.auth.json_fetch_api_key),
# This old-style tutorial is due to be eliminated soon.
url(r'^json/tutorial_send_message$', zerver.views.tutorial.json_tutorial_send_message),
url(r'^json/tutorial_status$', zerver.views.tutorial.json_tutorial_status),
# A version of these reporting views may make sense to support in
# the API for getting mobile analytics, but we may want something
# totally different.
url(r'^json/report_error$', zerver.views.report.json_report_error),
url(r'^json/report_send_time$', zerver.views.report.json_report_send_time),
url(r'^json/report_narrow_time$', zerver.views.report.json_report_narrow_time),
url(r'^json/report_unnarrow_time$', zerver.views.report.json_report_unnarrow_time),
]
|
vaidap/zulip
|
zproject/legacy_urls.py
|
Python
|
apache-2.0
| 1,681
|
from os.path import join
from pythonforandroid.recipe import CompiledComponentsPythonRecipe
from pythonforandroid.toolchain import current_directory
class Pygame2Recipe(CompiledComponentsPythonRecipe):
version = "2.0.0-dev7"
url = "https://github.com/pygame/pygame/archive/android-2.0.0-dev7.tar.gz"
site_packages_name = "pygame"
name = "pygame"
depends = [
"sdl2",
"sdl2_image",
"sdl2_mixer",
"sdl2_ttf",
"setuptools",
"jpeg",
"png",
]
call_hostpython_via_targetpython = False # Due to setuptools
install_in_hostpython = False
def prebuild_arch(self, arch):
super().prebuild_arch(arch)
with current_directory(self.get_build_dir(arch.arch)):
setup_template = open(join("buildconfig", "Setup.Android.SDL2.in")).read()
env = self.get_recipe_env(arch)
env["ANDROID_ROOT"] = join(self.ctx.ndk_platform, "usr")
ndk_lib_dir = join(self.ctx.ndk_platform, "usr", "lib")
png = self.get_recipe("png", self.ctx)
png_lib_dir = join(png.get_build_dir(arch.arch), ".libs")
png_inc_dir = png.get_build_dir(arch)
jpeg = self.get_recipe("jpeg", self.ctx)
jpeg_inc_dir = jpeg_lib_dir = jpeg.get_build_dir(arch.arch)
setup_file = setup_template.format(
sdl_includes=(
" -I" + join(self.ctx.bootstrap.build_dir, 'jni', 'SDL', 'include') +
" -L" + join(self.ctx.bootstrap.build_dir, "libs", str(arch)) +
" -L" + png_lib_dir + " -L" + jpeg_lib_dir + " -L" + ndk_lib_dir),
sdl_ttf_includes="-I"+join(self.ctx.bootstrap.build_dir, 'jni', 'SDL2_ttf'),
sdl_image_includes="-I"+join(self.ctx.bootstrap.build_dir, 'jni', 'SDL2_image'),
sdl_mixer_includes="-I"+join(self.ctx.bootstrap.build_dir, 'jni', 'SDL2_mixer'),
jpeg_includes="-I"+jpeg_inc_dir,
png_includes="-I"+png_inc_dir,
freetype_includes=""
)
open("Setup", "w").write(setup_file)
def get_recipe_env(self, arch):
env = super(Pygame2Recipe, self).get_recipe_env(arch)
env["USE_SDL2"] = "1"
env["PYGAME_CROSS_COMPILE"] = "TRUE"
env["PYGAME_ANDROID"] = "TRUE"
return env
recipe = Pygame2Recipe()
|
Tuxemon/Tuxemon
|
buildconfig/buildozer/recipes/pygame/__init__.py
|
Python
|
gpl-3.0
| 2,408
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed import DistributedObject
from direct.interval.IntervalGlobal import *
from toontown.effects import DustCloud
def getDustCloudIval(toon):
dustCloud = DustCloud.DustCloud(fBillboard=0)
dustCloud.setBillboardAxis(2.0)
dustCloud.setZ(3)
dustCloud.setScale(0.4)
dustCloud.createTrack()
if getattr(toon, 'laffMeter', None):
toon.laffMeter.color = toon.style.getBlackColor()
seq = Sequence(Wait(0.5), Func(dustCloud.reparentTo, toon), dustCloud.track, Func(dustCloud.destroy))
seq.append(Func(messenger.send, 'blackcat-transformed'))
if getattr(toon, 'laffMeter', None):
seq.append(Func(toon.laffMeter.adjustFace, toon.hp, toon.maxHp))
return seq
class DistributedBlackCatMgr(DistributedObject.DistributedObject):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBlackCatMgr')
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
DistributedBlackCatMgr.notify.debug('announceGenerate')
self.acceptOnce('DistributedBlackCatMgr-activate', self.d_requestBlackCatTransformation)
self.dustCloudIval = None
def delete(self):
if self.dustCloudIval:
self.dustCloudIval.finish()
del self.dustCloudIval
self.ignore('DistributedBlackCatMgr-activate')
DistributedObject.DistributedObject.delete(self)
def d_requestBlackCatTransformation(self):
self.sendUpdate('requestBlackCatTransformation', [])
def doBlackCatTransformation(self, avId):
DistributedBlackCatMgr.notify.debug('doBlackCatTransformation')
toon = self.cr.doId2do.get(avId)
if not toon:
DistributedBlackCatMgr.notify.warning("couldn't find Toon %s" % self.avId)
return
if toon.style.getAnimal() != 'cat':
DistributedBlackCatMgr.notify.warning('not a cat: %s' % self.avId)
return
self.dustCloudIval = getDustCloudIval(toon)
self.dustCloudIval.start()
|
Spiderlover/Toontown
|
toontown/ai/DistributedBlackCatMgr.py
|
Python
|
mit
| 2,173
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client code for requesting tasks over Cloud Pub/Sub."""
from concurrent import futures
import logging
from typing import Callable, Optional
from proto_task_queue import task_pb2
from google.cloud.pubsub_v1.publisher import client
from google.protobuf import message
from google.protobuf import text_format
class Requestor(object):
"""Client for sending tasks to background workers over Cloud Pub/Sub."""
_publisher: client.Client
def __init__(
self,
pubsub_publisher_client: Optional[client.Client] = None,
*,
task_to_string: Callable[[task_pb2.Task],
str] = text_format.MessageToString,
):
"""Constructor.
Args:
pubsub_publisher_client: Cloud Pub/Sub publisher client, or None to use
the default.
task_to_string: Function that converts a Task to a human-readable string
for logging.
"""
self._publisher = pubsub_publisher_client or client.Client()
self._task_to_string = task_to_string
def request(self, topic: str, args: message.Message) -> futures.Future:
"""Constructs a Task proto and sends it to background workers.
Most callers should use this method unless they have a reason to construct
the Task proto themselves.
Args:
topic: Resource name of the pubsub topic to send the request to.
args: Task arguments. The type of this proto determines which task to
call.
Returns:
Future for the request. The future will complete when the request is sent,
not when the task is completed.
"""
task = task_pb2.Task()
task.args.Pack(args)
return self.request_task(topic, task)
def request_task(self, topic: str, task: task_pb2.Task) -> futures.Future:
"""Sends a Task proto to background workers.
Prefer using request() above if you don't already have a Task proto.
Args:
topic: Resource name of the pubsub topic to send the request to.
task: Task to send.
Returns:
Future for the request. The future will complete when the request is sent,
not when the task is completed.
"""
task_bytes = task.SerializeToString()
logging.info('Sending background task to %s:\n%s', topic,
self._task_to_string(task))
return self._publisher.publish(topic, task_bytes)
|
google/proto-task-queue
|
proto_task_queue/requestor.py
|
Python
|
apache-2.0
| 2,897
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import date
from decimal import Decimal, ROUND_DOWN
from django.contrib.sites.models import Site
from django.db import models
from django.db.models import Sum
from django.db.models.signals import post_save
from django.dispatch import receiver
from .utils import duration_string, duration_decimal
from conf.utils import current_site_id
from conf.managers import CurrentSiteManager
@receiver(post_save)
def add_current_site(sender, instance, **kwargs):
"""
Add the current site to a model's sites property after a save. This is
required in post save because ManyToManyField fields require an existing
key.
TODO: Don't run this on *every* post_save.
"""
if hasattr(instance, 'sites'):
if not instance.sites.all():
instance.sites = Site.objects.filter(id=current_site_id())
instance.save()
class Client(models.Model):
name = models.CharField(max_length=255)
archive = models.BooleanField(default=False)
payment_id = models.CharField(max_length=255, blank=True, null=True)
invoice_email = models.EmailField(max_length=255, blank=True, null=True)
sites = models.ManyToManyField(Site)
objects = models.Manager()
on_site = CurrentSiteManager()
class Meta:
default_permissions = ('view', 'add', 'change', 'delete')
ordering = ['-id']
def __str__(self):
return 'Client: ' + self.name
def get_total_projects(self):
return self.projects.count()
def get_total_duration(self):
return duration_string(self.projects.aggregate(
Sum('entries__duration')
)['entries__duration__sum'])
class Project(models.Model):
client = models.ForeignKey('Client', related_name='projects')
name = models.CharField(max_length=255)
archive = models.BooleanField(default=False)
estimate = models.DecimalField(max_digits=10, decimal_places=2, blank=True, null=True)
hourly_rate = models.DecimalField(max_digits=10, decimal_places=2, blank=True, null=True)
class Meta:
default_permissions = ('view', 'add', 'change', 'delete')
ordering = ['client', '-id']
def __str__(self):
return 'Project: ' + self.name
def get_total_entries(self):
return self.entries.count()
def get_total_cost(self):
total_cost = Decimal()
for entry in self.entries.iterator():
try:
if entry.task.hourly_rate:
total_cost += (
duration_decimal(entry.duration)
* entry.task.hourly_rate
)
except:
continue
return total_cost.quantize(Decimal('.01'), rounding=ROUND_DOWN)
def get_total_duration(self):
return duration_string(self.entries.aggregate(
Sum('duration')
)['duration__sum'])
def get_percent_done(self):
if self.estimate is not None:
total_cost = Decimal(self.get_total_cost())
total_estimate = Decimal(self.estimate)
if total_cost != 0 and total_estimate != 0:
return int(100 * (total_cost/total_estimate))
return None
class Task(models.Model):
name = models.CharField(max_length=255)
hourly_rate = models.DecimalField(max_digits=10, decimal_places=2,
blank=True, null=True)
sites = models.ManyToManyField(Site)
objects = models.Manager()
on_site = CurrentSiteManager()
class Meta:
default_permissions = ('view', 'add', 'change', 'delete')
ordering = ['-id']
def __str__(self):
return 'Task: ' + self.name
class EntryManager(models.Manager):
def invoiced(self):
return super(EntryManager, self).get_queryset().filter(
invoices__isnull=False)
def uninvoiced(self):
return super(EntryManager, self).get_queryset().filter(
invoices__isnull=True)
class Entry(models.Model):
project = models.ForeignKey('Project', related_name='entries')
task = models.ForeignKey('core.Task', related_name='entries',
blank=True, null=True)
user = models.ForeignKey('auth.User', related_name='entries')
date = models.DateField(blank=True)
duration = models.DurationField(blank=True)
note = models.TextField(blank=True, null=True)
site = models.ForeignKey(Site, default=current_site_id(),
on_delete=models.CASCADE)
objects = EntryManager()
on_site = CurrentSiteManager()
class Meta:
default_permissions = ('view', 'add', 'change', 'delete')
ordering = ['-date', '-id']
verbose_name_plural = 'Entries'
def save(self, *args, **kwargs):
if not self.date:
self.date = date.today()
if not self.site:
self.site = Site.objects.get(id=current_site_id())
super(Entry, self).save(*args, **kwargs)
def __str__(self):
return 'Entry for ' + self.project.name + ' by ' + self.user.username
def is_invoiced(self):
if self.invoices.count() > 0:
return True
return False
class Invoice(models.Model):
client = models.ForeignKey('Client') # Redundant with entries?
note = models.CharField(max_length=255, blank=True, null=True)
entries = models.ManyToManyField('Entry', related_name='invoices')
created = models.DateTimeField(auto_now_add=True)
paid = models.DateTimeField(blank=True, null=True)
transaction_id = models.CharField(max_length=255, blank=True, null=True)
site = models.ForeignKey(Site, default=current_site_id(),
on_delete=models.CASCADE)
objects = models.Manager()
on_site = CurrentSiteManager()
class Meta:
default_permissions = ('view', 'add', 'change', 'delete')
def save(self, *args, **kwargs):
self.site = Site.objects.get(id=current_site_id())
super(Invoice, self).save(*args, **kwargs)
def __str__(self):
return 'Invoice: ' + self.client.name
def total_duration(self):
total = 0
for entry in self.entries:
total += entry.duration
def total_billed(self):
total = 0
for entry in self.entries:
if entry.task.hourly_rate:
total += entry.duration * entry.hourly_rate
return total
|
muhleder/timestrap
|
core/models.py
|
Python
|
bsd-2-clause
| 6,476
|
"""
Write an efficient algorithm that searches for a value in an m x n matrix. This matrix has the following properties:
Integers in each row are sorted from left to right.
The first integer of each row is greater than the last integer of the previous row.
"""
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:
top = 0
bottom = len(matrix) - 1
while top <= bottom:
middle = (top + bottom) // 2
if bottom == middle:
break
first_num = matrix[middle][0]
last_num = matrix[middle][-1]
# above = matrix[top][0]
# below = matrix[bottom][-1]
if target == first_num: # in [first_num, above, below]:
return True
if first_num <= target <= last_num:
break
if target < first_num:
bottom = middle - 1
elif target > first_num:
top = middle + 1
row = matrix[middle]
left = 0
right = len(row) - 1
while left <= right:
middle = (left + right) // 2
if row[middle] == target:
return True
if row[middle] < target:
left = middle + 1
elif row[middle] > target:
right = middle - 1
return False
|
1337/yesterday-i-learned
|
leetcode/74m.py
|
Python
|
gpl-3.0
| 1,381
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
def format_long_string(string, max_length=50):
if len(string) > max_length:
string = string[:max_length - 3]
string += '...'
return string
class AutoVivification(dict):
"""Implementation of perl's autovivification feature. Checkout
http://stackoverflow.com/a/652284/280182 """
def __getitem__(self, item):
try:
return dict.__getitem__(self, item)
except KeyError:
value = self[item] = type(self)()
return value
|
zakandrewking/cobrapy
|
cobra/util/util.py
|
Python
|
lgpl-2.1
| 568
|
from flask import send_from_directory
from flask import Flask,g,flash,render_template,redirect,request,url_for
from flask.templating import render_template_string
from wtforms import FileField, HiddenField
from flask_wtf.form import Form
from issues import app,login_manager,mail
import os
from werkzeug.utils import secure_filename
@app.route('/uploads/<filename>')
def uploaded_file(filename):
print 'uploaded_file:',filename,app.config['UPLOAD_FOLDER']
return send_from_directory(app.config['UPLOAD_FOLDER'],filename)
class UploadForm(Form):
formname=HiddenField('formname',default='UploadAttachment')
filename=FileField()
#
# @app.route('/uploads', methods=['GET', 'POST'])
# def upload_file():
# form=UploadForm()
# if form.validate_on_submit():
# file=request.files['filename']
# if file:
# filename = secure_filename(file.filename)
# file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
# return redirect(url_for('uploaded_file',filename=filename))
# return render_template_string(
# '''
# <!doctype html>
# <title>Upload new File</title>
# <h1>Upload new File</h1>
# <div class="panel-body">
#
# <form method="POST" action="" method="post" enctype="multipart/form-data" class="form" role="form">
# {{ form.hidden_tag() }}
#
# <div class="form-inline">
#
# {{ form.filename() }}
#
# <input type="submit" class="btn btn-success btn-sm" value="Update">
#
# </div>
# </form>
#
# </div>
#
# <form >
# <p>
#
# </form>
# ''',form=form)
|
maconnell/issues
|
issues/uploads.py
|
Python
|
gpl-2.0
| 1,646
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import django.utils.timezone
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20151215_1537'),
]
operations = [
migrations.AlterField(
model_name='author',
name='email',
field=models.EmailField(
default=b'unknow@redhat.com', max_length=254, db_index=True),
),
migrations.AlterField(
model_name='author',
name='name',
field=models.CharField(
default=b'Unknown', max_length=255, db_index=True),
),
migrations.AlterField(
model_name='job',
name='date',
field=models.DateTimeField(
default=django.utils.timezone.now, db_index=True),
),
migrations.AlterField(
model_name='system',
name='hostname',
field=models.CharField(db_index=True, max_length=255, blank=True),
),
migrations.AlterField(
model_name='test',
name='folder',
field=models.CharField(
db_index=True, max_length=256, null=True, blank=True),
),
migrations.AlterField(
model_name='test',
name='is_enable',
field=models.BooleanField(
default=True, db_index=True, verbose_name=b'enable'),
),
migrations.AlterField(
model_name='test',
name='name',
field=models.CharField(unique=True, max_length=255, db_index=True),
),
]
|
lhellebr/GreenTea
|
apps/core/migrations/0004_auto_20160105_1533.py
|
Python
|
gpl-2.0
| 1,679
|
# -*- coding: utf-8 -*-
from datetime import datetime
import newrelic.agent
import waffle
from kuma.core.cache import memcache
from ..constants import DOCUMENT_LAST_MODIFIED_CACHE_KEY_TMPL
from ..events import EditDocumentEvent
from ..models import Document, RevisionIP
from ..tasks import send_first_edit_email
def split_slug(slug):
"""
Utility function to do basic slug splitting
"""
slug_split = slug.split('/')
length = len(slug_split)
root = None
seo_root = ''
bad_seo_roots = ['Web']
if length > 1:
root = slug_split[0]
if root in bad_seo_roots:
if length > 2:
seo_root = root + '/' + slug_split[1]
else:
seo_root = root
specific = slug_split.pop()
parent = '/'.join(slug_split)
return {
'specific': specific,
'parent': parent,
'full': slug,
'parent_split': slug_split,
'length': length,
'root': root,
'seo_root': seo_root,
}
def join_slug(parent_split, slug):
parent_split.append(slug)
return '/'.join(parent_split)
@newrelic.agent.function_trace()
def document_last_modified(request, document_slug, document_locale):
"""
Utility function to derive the last modified timestamp of a document.
Mainly for the @condition decorator.
"""
# build an adhoc natural cache key to not have to do DB query
adhoc_natural_key = (document_locale, document_slug)
natural_key_hash = Document.natural_key_hash(adhoc_natural_key)
cache_key = DOCUMENT_LAST_MODIFIED_CACHE_KEY_TMPL % natural_key_hash
try:
last_mod = memcache.get(cache_key)
if last_mod is None:
doc = Document.objects.get(locale=document_locale,
slug=document_slug)
last_mod = doc.fill_last_modified_cache()
# Convert the cached Unix epoch seconds back to Python datetime
return datetime.fromtimestamp(float(last_mod))
except Document.DoesNotExist:
return None
def document_form_initial(document):
"""
Return a dict with the document data pertinent for the form.
"""
return {
'title': document.title,
'slug': document.slug,
'category': document.category,
'is_localizable': document.is_localizable,
'tags': list(document.tags.values_list('name', flat=True))
}
def save_revision_and_notify(rev_form, request, document):
"""
Save the given RevisionForm and send notifications.
"""
creator = request.user
# have to check for first edit before we rev_form.save
first_edit = creator.wiki_revisions().count() == 0
new_rev = rev_form.save(creator, document)
if waffle.switch_is_active('store_revision_ips'):
ip = request.META.get('REMOTE_ADDR')
RevisionIP.objects.create(revision=new_rev, ip=ip)
if first_edit:
send_first_edit_email.delay(new_rev.pk)
document.schedule_rendering('max-age=0')
# Enqueue notifications
EditDocumentEvent(new_rev).fire(exclude=new_rev.creator)
|
surajssd/kuma
|
kuma/wiki/views/utils.py
|
Python
|
mpl-2.0
| 3,100
|
#---------------------------------------------------------------------
#Introdução a Programação dos Computadores - IPC
#Prof. Jucimar Jr.
#Adham Lucas da Silva Oliveira 1715310001
#Erik Atilio Silva Rey 1715310059
#Enrique Leão Barbosa Izel 1715310048
#Ulisses Antonio Antonino da Costa 1515090555
#Lukas Michel Souza Mota 1715310018
#Guilherme Silva de Oliveira 1715310034
#
#Faça um Programa que peça a temperatura em graus Farenheit,
#transforme e mostre a temperatura em graus Celsius.
#
# C = (5 * (F-32) / 9).
#----------------------------------------------------------------------
print('-Conversão de Graus Farenheit para Celsius-')
farenheit = float(input('Digite o valor em Farenheit:'))
celsius = (5*(farenheit-32)/9)
print('O resultado é:',celsius,'°C')
|
jucimarjr/IPC_2017-1
|
lista02/lista02_exercicio01_questao09.py
|
Python
|
apache-2.0
| 831
|
import time
from datetime import datetime, timedelta
from vFense.plugins.monit.utils import Monitor, MonitorKey
def _default_from_date():
now = datetime.now()
from_date = now - timedelta(hours=5)
return from_date
def _latest_time():
now = datetime.now()
latest_time = now - timedelta(minutes=2)
def get_memory_latest(agent=None):
if not agent:
return {
'pass': False,
'message': 'No agent id provided.'
}
memory = Monitor.get_memory_data_since(agent, _latest_time())
results = {}
if memory:
results['data'] = memory
results['pass'] = True
results['message'] = 'Memory status found.'
else:
results['pass'] = False
results['message'] = 'No memory stats found.'
def get_file_system_latest(agent=None):
if not agent:
return {
'pass': False,
'message': 'No agent id provided.'
}
fs = Monitor.get_file_system_data_since(agent
)
results = {}
if fs:
results['data'] = fs
results['pass'] = True
results['message'] = 'File system stats found.'
else:
results['pass'] = False
results['message'] = 'No file system stats found.'
def get_cpu_latest(agent=None):
if not agent:
return {
'pass': False,
'message': 'No agent id provided.'
}
cpu = Monitor.get_cpu_data_since(agent
)
results = {}
if cpu:
results['data'] = cpu
results['pass'] = True
results['message'] = 'CPU stats found.'
else:
results['pass'] = False
results['message'] = 'No CPU stats found.'
def get_agent_memory_latest(agent=None, conn=None):
memory = Monitor.get_agent_memory_stats(agent=agent)
results = {}
if memory:
results['data'] = memory
results['pass'] = True
results['message'] = 'Memory stats found.'
else:
results['pass'] = False
results['message'] = 'No memory stats found.'
return results
def get_agent_cpu_latest(agent=None):
if not agent:
return {
'pass': False,
'message': 'No agent it provided.'
}
cpu = Monitor.get_agent_cpu_stats(agent=agent)
results = {}
if cpu:
# Special cpu data
if cpu.get('user') and cpu.get('system'):
percent = float(cpu['user']) + float(cpu['system'])
cpu['used'] = str(percent)
else:
cpu['used'] = ''
results['data'] = cpu
results['pass'] = True
results['message'] = 'Cpu stats found.'
else:
results['pass'] = False
results['message'] = 'No cpu stats found.'
return results
def get_agent_memory_latest(agent=None):
if not agent:
return {
'pass': False,
'message': 'No agent it provided.'
}
memory = Monitor.get_agent_memory_stats(agent=agent)
results = {}
if memory:
results['data'] = memory
results['pass'] = True
results['message'] = 'Memory stats found.'
else:
results['pass'] = False
results['message'] = 'No memory stats found.'
return results
def get_agent_file_system_latest(agent=None):
if not agent:
return {
'pass': False,
'message': 'No agent it provided.'
}
file_system = Monitor.get_agent_file_system_stats(agent=agent)
results = {}
if file_system:
results['data'] = file_system
results['pass'] = True
results['message'] = 'File system stats found.'
else:
results['pass'] = False
results['message'] = 'No file system stats found.'
return results
def get_agent_latest(agent=None):
if not agent:
return {
'pass': False,
'message': 'No agent it provided.'
}
file_system = Monitor.get_agent_file_system_stats(agent=agent)
cpu = Monitor.get_agent_cpu_stats(agent=agent)
memory = Monitor.get_agent_memory_stats(agent=agent)
data = {}
if file_system:
data[MonitorKey.FileSystem] = file_system
else:
data[MonitorKey.FileSystem] = []
if cpu:
data[MonitorKey.Cpu] = cpu
else:
data[MonitorKey.Cpu] = {}
if memory:
data[MonitorKey.Memory] = memory
else:
data[MonitorKey.Memory] = {}
results = {}
if not file_system and not cpu and not memory:
results['pass'] = False
results['message'] = 'No agent stats found.'
else:
results['data'] = data
results['pass'] = True
results['message'] = 'Agent stats found.'
return results
|
dtklein/vFense
|
tp/src/plugins/monit/api.py
|
Python
|
lgpl-3.0
| 4,777
|
#!/usr/bin/env python
#
# is_hostname_safe - determine whether a supplied string is hostname safe.
#
# Copyright (C) 2014 Michael Davies <michael@the-davies.net>
#
# By "hostname safe" we mean the whether the hostname part of a FQDN
# follows the approporiate standards for a hostname. Specifically:
# * http://en.wikipedia.org/wiki/Hostname
# * http://tools.ietf.org/html/rfc952
# * http://tools.ietf.org/html/rfc1123
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
#
import re
import sys
def is_hostname_safe(hostname):
regex = '^[a-z0-9]([a-z0-9\-]{0,61}[a-z0-9]|[a-z0-9]{0,62})?$'
p = re.compile(regex)
return p.match(hostname) is not None
if __name__ == '__main__':
if len(sys.argv) == 2:
print is_hostname_safe(sys.argv[1])
else:
# Otherwise just run the tests
tests = {
'spam': True,
'-spam': False,
'spam-': False,
'spam-eggs': True,
'spam eggs': False,
'9spam': True,
'spam7': True,
'br34kf4st': True,
'$pam': False,
'egg$': False,
'spam#eggs': False,
' eggs': False,
'spam ': False,
'': False,
's': True,
's' * 63: True,
's' * 64: False,
}
print "\nTesting is_hostname_safe()\n"
final_result = True
for id, name in enumerate(tests):
result = is_hostname_safe(name)
test_result = result == tests[name]
if test_result:
print "Test %s: PASSED Testing '%s', got '%s'" % \
(id, name, tests[name])
else:
print "Test %s: *** FAILED '%s' != '%s' got '%s'" % \
(id, name, tests[name], result)
final_result = final_result and test_result
print "\nAll tests passed? %s\n" % final_result
|
mrda/junkcode
|
is_hostname_safe.py
|
Python
|
gpl-2.0
| 2,582
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp import workflow
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp import tools
from openerp.report import report_sxw
import openerp
from openerp.exceptions import UserError
class account_move_line(osv.osv):
_name = "account.move.line"
_description = "Journal Items"
def _query_get(self, cr, uid, obj='l', context=None):
fiscalyear_obj = self.pool.get('account.fiscalyear')
fiscalperiod_obj = self.pool.get('account.period')
account_obj = self.pool.get('account.account')
fiscalyear_ids = []
context = dict(context or {})
initial_bal = context.get('initial_bal', False)
company_clause = " "
if context.get('company_id', False):
company_clause = " AND " +obj+".company_id = %s" % context.get('company_id', False)
if not context.get('fiscalyear', False):
if context.get('all_fiscalyear', False):
#this option is needed by the aged balance report because otherwise, if we search only the draft ones, an open invoice of a closed fiscalyear won't be displayed
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [])
else:
fiscalyear_ids = fiscalyear_obj.search(cr, uid, [('state', '=', 'draft')])
else:
#for initial balance as well as for normal query, we check only the selected FY because the best practice is to generate the FY opening entries
fiscalyear_ids = [context['fiscalyear']]
fiscalyear_clause = (','.join([str(x) for x in fiscalyear_ids])) or '0'
state = context.get('state', False)
where_move_state = ''
where_move_lines_by_date = ''
if context.get('date_from', False) and context.get('date_to', False):
if initial_bal:
where_move_lines_by_date = " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date < '" +context['date_from']+"')"
else:
where_move_lines_by_date = " AND " +obj+".move_id IN (SELECT id FROM account_move WHERE date >= '" +context['date_from']+"' AND date <= '"+context['date_to']+"')"
if state:
if state.lower() not in ['all']:
where_move_state= " AND "+obj+".move_id IN (SELECT id FROM account_move WHERE account_move.state = '"+state+"')"
if context.get('period_from', False) and context.get('period_to', False) and not context.get('periods', False):
if initial_bal:
period_company_id = fiscalperiod_obj.browse(cr, uid, context['period_from'], context=context).company_id.id
first_period = fiscalperiod_obj.search(cr, uid, [('company_id', '=', period_company_id)], order='date_start', limit=1)[0]
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, first_period, context['period_from'])
else:
context['periods'] = fiscalperiod_obj.build_ctx_periods(cr, uid, context['period_from'], context['period_to'])
if context.get('periods', False):
if initial_bal:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s)) %s %s" % (fiscalyear_clause, where_move_state, where_move_lines_by_date)
period_ids = fiscalperiod_obj.search(cr, uid, [('id', 'in', context['periods'])], order='date_start', limit=1)
if period_ids and period_ids[0]:
first_period = fiscalperiod_obj.browse(cr, uid, period_ids[0], context=context)
ids = ','.join([str(x) for x in context['periods']])
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) AND date_start <= '%s' AND id NOT IN (%s)) %s %s" % (fiscalyear_clause, first_period.date_start, ids, where_move_state, where_move_lines_by_date)
else:
ids = ','.join([str(x) for x in context['periods']])
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s) AND id IN (%s)) %s %s" % (fiscalyear_clause, ids, where_move_state, where_move_lines_by_date)
else:
query = obj+".state <> 'draft' AND "+obj+".period_id IN (SELECT id FROM account_period WHERE fiscalyear_id IN (%s)) %s %s" % (fiscalyear_clause, where_move_state, where_move_lines_by_date)
if initial_bal and not context.get('periods', False) and not where_move_lines_by_date:
#we didn't pass any filter in the context, and the initial balance can't be computed using only the fiscalyear otherwise entries will be summed twice
#so we have to invalidate this query
raise UserError(("You have not supplied enough arguments to compute the initial balance, please select a period and a journal in the context."))
if context.get('journal_ids', False):
query += ' AND '+obj+'.journal_id IN (%s)' % ','.join(map(str, context['journal_ids']))
if context.get('chart_account_id', False):
child_ids = account_obj._get_children_and_consol(cr, uid, [context['chart_account_id']], context=context)
query += ' AND '+obj+'.account_id IN (%s)' % ','.join(map(str, child_ids))
query += company_clause
return query
def _amount_residual(self, cr, uid, ids, field_names, args, context=None):
"""
This function returns the residual amount on a receivable or payable account.move.line.
By default, it returns an amount in the currency of this journal entry (maybe different
of the company currency), but if you pass 'residual_in_company_currency' = True in the
context then the returned amount will be in company currency.
"""
res = {}
if context is None:
context = {}
cur_obj = self.pool.get('res.currency')
for move_line in self.browse(cr, uid, ids, context=context):
res[move_line.id] = {
'amount_residual': 0.0,
'amount_residual_currency': 0.0,
}
if move_line.reconcile_id:
continue
if not move_line.account_id.reconcile:
#this function does not suport to be used on move lines not related to a reconcilable account
continue
if move_line.currency_id:
move_line_total = move_line.amount_currency
sign = move_line.amount_currency < 0 and -1 or 1
else:
move_line_total = move_line.debit - move_line.credit
sign = (move_line.debit - move_line.credit) < 0 and -1 or 1
line_total_in_company_currency = move_line.debit - move_line.credit
context_unreconciled = context.copy()
if move_line.reconcile_partial_id:
for payment_line in move_line.reconcile_partial_id.line_partial_ids:
if payment_line.id == move_line.id:
continue
if payment_line.currency_id and move_line.currency_id and payment_line.currency_id.id == move_line.currency_id.id:
move_line_total += payment_line.amount_currency
else:
if move_line.currency_id:
context_unreconciled.update({'date': payment_line.date})
amount_in_foreign_currency = cur_obj.compute(cr, uid, move_line.company_id.currency_id.id, move_line.currency_id.id, (payment_line.debit - payment_line.credit), round=False, context=context_unreconciled)
move_line_total += amount_in_foreign_currency
else:
move_line_total += (payment_line.debit - payment_line.credit)
line_total_in_company_currency += (payment_line.debit - payment_line.credit)
result = move_line_total
res[move_line.id]['amount_residual_currency'] = sign * (move_line.currency_id and self.pool.get('res.currency').round(cr, uid, move_line.currency_id, result) or result)
res[move_line.id]['amount_residual'] = sign * line_total_in_company_currency
return res
def default_get(self, cr, uid, fields, context=None):
data = self._default_get(cr, uid, fields, context=context)
for f in data.keys():
if f not in fields:
del data[f]
return data
def _prepare_analytic_line(self, cr, uid, obj_line, context=None):
"""
Prepare the values given at the create() of account.analytic.line upon the validation of a journal item having
an analytic account. This method is intended to be extended in other modules.
:param obj_line: browse record of the account.move.line that triggered the analytic line creation
"""
return {'name': obj_line.name,
'date': obj_line.date,
'account_id': obj_line.analytic_account_id.id,
'unit_amount': obj_line.quantity,
'product_id': obj_line.product_id and obj_line.product_id.id or False,
'product_uom_id': obj_line.product_uom_id and obj_line.product_uom_id.id or False,
'amount': (obj_line.credit or 0.0) - (obj_line.debit or 0.0),
'general_account_id': obj_line.account_id.id,
'journal_id': obj_line.journal_id.analytic_journal_id.id,
'ref': obj_line.ref,
'move_id': obj_line.id,
'user_id': uid,
}
def create_analytic_lines(self, cr, uid, ids, context=None):
acc_ana_line_obj = self.pool.get('account.analytic.line')
for obj_line in self.browse(cr, uid, ids, context=context):
if obj_line.analytic_lines:
acc_ana_line_obj.unlink(cr,uid,[obj.id for obj in obj_line.analytic_lines])
if obj_line.analytic_account_id:
if not obj_line.journal_id.analytic_journal_id:
raise UserError(("You have to define an analytic journal on the '%s' journal!") % (obj_line.journal_id.name, ))
vals_line = self._prepare_analytic_line(cr, uid, obj_line, context=context)
acc_ana_line_obj.create(cr, uid, vals_line)
return True
def _default_get_move_form_hook(self, cursor, user, data):
'''Called in the end of default_get method for manual entry in account_move form'''
if data.has_key('analytic_account_id'):
del(data['analytic_account_id'])
if data.has_key('account_tax_id'):
del(data['account_tax_id'])
return data
def convert_to_period(self, cr, uid, context=None):
if context is None:
context = {}
period_obj = self.pool.get('account.period')
#check if the period_id changed in the context from client side
if context.get('period_id', False):
period_id = context.get('period_id')
if type(period_id) == str:
ids = period_obj.search(cr, uid, [('name', 'ilike', period_id)])
context = dict(context, period_id=ids and ids[0] or False)
return context
def _default_get(self, cr, uid, fields, context=None):
#default_get should only do the following:
# -propose the next amount in debit/credit in order to balance the move
# -propose the next account from the journal (default debit/credit account) accordingly
context = dict(context or {})
account_obj = self.pool.get('account.account')
period_obj = self.pool.get('account.period')
journal_obj = self.pool.get('account.journal')
move_obj = self.pool.get('account.move')
tax_obj = self.pool.get('account.tax')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
partner_obj = self.pool.get('res.partner')
currency_obj = self.pool.get('res.currency')
if not context.get('journal_id', False):
context['journal_id'] = context.get('search_default_journal_id', False)
if not context.get('period_id', False):
context['period_id'] = context.get('search_default_period_id', False)
context = self.convert_to_period(cr, uid, context)
# Compute simple values
data = super(account_move_line, self).default_get(cr, uid, fields, context=context)
if context.get('journal_id'):
total = 0.0
#in account.move form view, it is not possible to compute total debit and credit using
#a browse record. So we must use the context to pass the whole one2many field and compute the total
if context.get('line_id'):
for move_line_dict in move_obj.resolve_2many_commands(cr, uid, 'line_id', context.get('line_id'), context=context):
data['name'] = data.get('name') or move_line_dict.get('name')
data['partner_id'] = data.get('partner_id') or move_line_dict.get('partner_id')
total += move_line_dict.get('debit', 0.0) - move_line_dict.get('credit', 0.0)
elif context.get('period_id'):
#find the date and the ID of the last unbalanced account.move encoded by the current user in that journal and period
move_id = False
cr.execute('''SELECT move_id, date FROM account_move_line
WHERE journal_id = %s AND period_id = %s AND create_uid = %s AND state = %s
ORDER BY id DESC limit 1''', (context['journal_id'], context['period_id'], uid, 'draft'))
res = cr.fetchone()
move_id = res and res[0] or False
data['date'] = res and res[1] or period_obj.browse(cr, uid, context['period_id'], context=context).date_start
data['move_id'] = move_id
if move_id:
#if there exist some unbalanced accounting entries that match the journal and the period,
#we propose to continue the same move by copying the ref, the name, the partner...
move = move_obj.browse(cr, uid, move_id, context=context)
data.setdefault('name', move.line_id[-1].name)
for l in move.line_id:
data['partner_id'] = data.get('partner_id') or l.partner_id.id
data['ref'] = data.get('ref') or l.ref
total += (l.debit or 0.0) - (l.credit or 0.0)
#compute the total of current move
data['debit'] = total < 0 and -total or 0.0
data['credit'] = total > 0 and total or 0.0
#pick the good account on the journal accordingly if the next proposed line will be a debit or a credit
journal_data = journal_obj.browse(cr, uid, context['journal_id'], context=context)
account = total > 0 and journal_data.default_credit_account_id or journal_data.default_debit_account_id
#map the account using the fiscal position of the partner, if needed
if isinstance(data.get('partner_id'), (int, long)):
part = partner_obj.browse(cr, uid, data['partner_id'], context=context)
elif isinstance(data.get('partner_id'), (tuple, list)):
part = partner_obj.browse(cr, uid, data['partner_id'][0], context=context)
else:
part = False
if account and part:
account = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, account.id)
account = account_obj.browse(cr, uid, account, context=context)
data['account_id'] = account and account.id or False
#compute the amount in secondary currency of the account, if needed
if account and account.currency_id:
data['currency_id'] = account.currency_id.id
#set the context for the multi currency change
compute_ctx = context.copy()
compute_ctx.update({
#the following 2 parameters are used to choose the currency rate, in case where the account
#doesn't work with an outgoing currency rate method 'at date' but 'average'
'res.currency.compute.account': account,
'res.currency.compute.account_invert': True,
})
if data.get('date'):
compute_ctx.update({'date': data['date']})
data['amount_currency'] = currency_obj.compute(cr, uid, account.company_id.currency_id.id, data['currency_id'], -total, context=compute_ctx)
data = self._default_get_move_form_hook(cr, uid, data)
return data
def on_create_write(self, cr, uid, id, context=None):
if not id:
return []
ml = self.browse(cr, uid, id, context=context)
domain = (context or {}).get('on_write_domain', [])
return self.pool.get('account.move.line').search(cr, uid, domain + [['id', 'in', [l.id for l in ml.move_id.line_id]]], context=context)
def _balance(self, cr, uid, ids, name, arg, context=None):
if context is None:
context = {}
c = context.copy()
c['initital_bal'] = True
sql = """SELECT l1.id, COALESCE(SUM(l2.debit-l2.credit), 0)
FROM account_move_line l1 LEFT JOIN account_move_line l2
ON (l1.account_id = l2.account_id
AND l2.id <= l1.id
AND """ + \
self._query_get(cr, uid, obj='l2', context=c) + \
") WHERE l1.id IN %s GROUP BY l1.id"
cr.execute(sql, [tuple(ids)])
return dict(cr.fetchall())
def _invoice(self, cursor, user, ids, name, arg, context=None):
invoice_obj = self.pool.get('account.invoice')
res = {}
for line_id in ids:
res[line_id] = False
cursor.execute('SELECT l.id, i.id ' \
'FROM account_move_line l, account_invoice i ' \
'WHERE l.move_id = i.move_id ' \
'AND l.id IN %s',
(tuple(ids),))
invoice_ids = []
for line_id, invoice_id in cursor.fetchall():
res[line_id] = invoice_id
invoice_ids.append(invoice_id)
invoice_names = {}
for invoice_id, name in invoice_obj.name_get(cursor, user, invoice_ids, context=context):
invoice_names[invoice_id] = name
for line_id in res.keys():
invoice_id = res[line_id]
res[line_id] = invoice_id and (invoice_id, invoice_names[invoice_id]) or False
return res
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
result = []
for line in self.browse(cr, uid, ids, context=context):
if line.ref:
result.append((line.id, (line.move_id.name or '')+' ('+line.ref+')'))
else:
result.append((line.id, line.move_id.name))
return result
def _balance_search(self, cursor, user, obj, name, args, domain=None, context=None):
if context is None:
context = {}
if not args:
return []
where = ' AND '.join(map(lambda x: '(abs(sum(debit-credit))'+x[1]+str(x[2])+')',args))
cursor.execute('SELECT id, SUM(debit-credit) FROM account_move_line \
GROUP BY id, debit, credit having '+where)
res = cursor.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', [x[0] for x in res])]
def _invoice_search(self, cursor, user, obj, name, args, context=None):
if not args:
return []
invoice_obj = self.pool.get('account.invoice')
i = 0
while i < len(args):
fargs = args[i][0].split('.', 1)
if len(fargs) > 1:
args[i] = (fargs[0], 'in', invoice_obj.search(cursor, user,
[(fargs[1], args[i][1], args[i][2])]))
i += 1
continue
if isinstance(args[i][2], basestring):
res_ids = invoice_obj.name_search(cursor, user, args[i][2], [],
args[i][1])
args[i] = (args[i][0], 'in', [x[0] for x in res_ids])
i += 1
qu1, qu2 = [], []
for x in args:
if x[1] != 'in':
if (x[2] is False) and (x[1] == '='):
qu1.append('(i.id IS NULL)')
elif (x[2] is False) and (x[1] == '<>' or x[1] == '!='):
qu1.append('(i.id IS NOT NULL)')
else:
qu1.append('(i.id %s %s)' % (x[1], '%s'))
qu2.append(x[2])
elif x[1] == 'in':
if len(x[2]) > 0:
qu1.append('(i.id IN (%s))' % (','.join(['%s'] * len(x[2]))))
qu2 += x[2]
else:
qu1.append(' (False)')
if qu1:
qu1 = ' AND' + ' AND'.join(qu1)
else:
qu1 = ''
cursor.execute('SELECT l.id ' \
'FROM account_move_line l, account_invoice i ' \
'WHERE l.move_id = i.move_id ' + qu1, qu2)
res = cursor.fetchall()
if not res:
return [('id', '=', '0')]
return [('id', 'in', [x[0] for x in res])]
def _get_move_lines(self, cr, uid, ids, context=None):
result = []
for move in self.pool.get('account.move').browse(cr, uid, ids, context=context):
for line in move.line_id:
result.append(line.id)
return result
def _get_reconcile(self, cr, uid, ids,name, unknow_none, context=None):
res = dict.fromkeys(ids, False)
for line in self.browse(cr, uid, ids, context=context):
if line.reconcile_id:
res[line.id] = str(line.reconcile_id.name)
elif line.reconcile_partial_id:
res[line.id] = str(line.reconcile_partial_id.name)
return res
def _get_move_from_reconcile(self, cr, uid, ids, context=None):
move = {}
for r in self.pool.get('account.move.reconcile').browse(cr, uid, ids, context=context):
for line in r.line_partial_ids:
move[line.move_id.id] = True
for line in r.line_id:
move[line.move_id.id] = True
move_line_ids = []
if move:
move_line_ids = self.pool.get('account.move.line').search(cr, uid, [('move_id','in',move.keys())], context=context)
return move_line_ids
_columns = {
'name': fields.char('Name', required=True),
'quantity': fields.float('Quantity', digits=(16,2), help="The optional quantity expressed by this line, eg: number of product sold. The quantity is not a legal requirement but is very useful for some reports."),
'product_uom_id': fields.many2one('product.uom', 'Unit of Measure'),
'product_id': fields.many2one('product.product', 'Product'),
'debit': fields.float('Debit', digits_compute=dp.get_precision('Account')),
'credit': fields.float('Credit', digits_compute=dp.get_precision('Account')),
'account_id': fields.many2one('account.account', 'Account', required=True, ondelete="cascade", domain=[('type','<>','view'), ('type', '<>', 'closed')], select=2),
'move_id': fields.many2one('account.move', 'Journal Entry', ondelete="cascade", help="The move of this entry line.", select=2, required=True, auto_join=True),
'narration': fields.related('move_id','narration', type='text', relation='account.move', string='Internal Note'),
'ref': fields.related('move_id', 'ref', string='Reference', type='char', store=True),
'statement_id': fields.many2one('account.bank.statement', 'Statement', help="The bank statement used for bank reconciliation", select=1, copy=False),
'reconcile_id': fields.many2one('account.move.reconcile', 'Reconcile', readonly=True, ondelete='set null', select=2, copy=False),
'reconcile_partial_id': fields.many2one('account.move.reconcile', 'Partial Reconcile', readonly=True, ondelete='set null', select=2, copy=False),
'reconcile_ref': fields.function(_get_reconcile, type='char', string='Reconcile Ref', oldname='reconcile', store={
'account.move.line': (lambda self, cr, uid, ids, c={}: ids, ['reconcile_id','reconcile_partial_id'], 50),'account.move.reconcile': (_get_move_from_reconcile, None, 50)}),
'amount_currency': fields.float('Amount Currency', help="The amount expressed in an optional other currency if it is a multi-currency entry.", digits_compute=dp.get_precision('Account')),
'amount_residual_currency': fields.function(_amount_residual, string='Residual Amount in Currency', multi="residual", help="The residual amount on a receivable or payable of a journal entry expressed in its currency (maybe different of the company currency)."),
'amount_residual': fields.function(_amount_residual, string='Residual Amount', multi="residual", help="The residual amount on a receivable or payable of a journal entry expressed in the company currency."),
'currency_id': fields.many2one('res.currency', 'Currency', help="The optional other currency if it is a multi-currency entry."),
'journal_id': fields.related('move_id', 'journal_id', string='Journal', type='many2one', relation='account.journal', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['journal_id'], 20)
}),
'period_id': fields.related('move_id', 'period_id', string='Period', type='many2one', relation='account.period', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['period_id'], 20)
}),
'blocked': fields.boolean('No Follow-up', help="You can check this box to mark this journal item as a litigation with the associated partner"),
'partner_id': fields.many2one('res.partner', 'Partner', select=1, ondelete='restrict'),
'date_maturity': fields.date('Due date', select=True ,help="This field is used for payable and receivable journal entries. You can put the limit date for the payment of this line."),
'date': fields.related('move_id','date', string='Effective date', type='date', required=True, select=True,
store = {
'account.move': (_get_move_lines, ['date'], 20)
}),
'date_created': fields.date('Creation date', select=True),
'analytic_lines': fields.one2many('account.analytic.line', 'move_id', 'Analytic lines'),
'centralisation': fields.selection([('normal','Normal'),('credit','Credit Centralisation'),('debit','Debit Centralisation'),('currency','Currency Adjustment')], 'Centralisation', size=8),
'balance': fields.function(_balance, fnct_search=_balance_search, string='Balance'),
'state': fields.selection([('draft','Unbalanced'), ('valid','Balanced')], 'Status', readonly=True, copy=False),
'tax_code_id': fields.many2one('account.tax.code', 'Tax Account', help="The Account can either be a base tax code or a tax code account."),
'tax_amount': fields.float('Tax/Base Amount', digits_compute=dp.get_precision('Account'), select=True, help="If the Tax account is a tax code account, this field will contain the taxed amount.If the tax account is base tax code, "\
"this field will contain the basic amount(without tax)."),
'invoice': fields.function(_invoice, string='Invoice',
type='many2one', relation='account.invoice', fnct_search=_invoice_search),
'account_tax_id':fields.many2one('account.tax', 'Tax', copy=False),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company',
string='Company', store=True, readonly=True)
}
def _get_date(self, cr, uid, context=None):
if context is None:
context or {}
period_obj = self.pool.get('account.period')
dt = time.strftime('%Y-%m-%d')
if context.get('journal_id') and context.get('period_id'):
cr.execute('SELECT date FROM account_move_line ' \
'WHERE journal_id = %s AND period_id = %s ' \
'ORDER BY id DESC limit 1',
(context['journal_id'], context['period_id']))
res = cr.fetchone()
if res:
dt = res[0]
else:
period = period_obj.browse(cr, uid, context['period_id'], context=context)
dt = period.date_start
return dt
def _get_currency(self, cr, uid, context=None):
if context is None:
context = {}
if not context.get('journal_id', False):
return False
cur = self.pool.get('account.journal').browse(cr, uid, context['journal_id']).currency
return cur and cur.id or False
def _get_period(self, cr, uid, context=None):
"""
Return default account period value
"""
context = context or {}
if context.get('period_id', False):
return context['period_id']
account_period_obj = self.pool.get('account.period')
ids = account_period_obj.find(cr, uid, context=context)
period_id = False
if ids:
period_id = ids[0]
return period_id
def _get_journal(self, cr, uid, context=None):
"""
Return journal based on the journal type
"""
context = context or {}
if context.get('journal_id', False):
return context['journal_id']
journal_id = False
journal_pool = self.pool.get('account.journal')
if context.get('journal_type', False):
jids = journal_pool.search(cr, uid, [('type','=', context.get('journal_type'))])
if not jids:
model, action_id = self.pool['ir.model.data'].get_object_reference(cr, uid, 'account', 'action_account_journal_form')
msg = _("""Cannot find any account journal of "%s" type for this company, You should create one.\n Please go to Journal Configuration""") % context.get('journal_type').replace('_', ' ').title()
raise openerp.exceptions.RedirectWarning(msg, action_id, _('Go to the configuration panel'))
journal_id = jids[0]
return journal_id
_defaults = {
'blocked': False,
'centralisation': 'normal',
'date': _get_date,
'date_created': fields.date.context_today,
'state': 'draft',
'currency_id': _get_currency,
'journal_id': _get_journal,
'credit': 0.0,
'debit': 0.0,
'amount_currency': 0.0,
'account_id': lambda self, cr, uid, c: c.get('account_id', False),
'period_id': _get_period,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.move.line', context=c)
}
_order = "date desc, id desc"
_sql_constraints = [
('credit_debit1', 'CHECK (credit*debit=0)', 'Wrong credit or debit value in accounting entry !'),
('credit_debit2', 'CHECK (credit+debit>=0)', 'Wrong credit or debit value in accounting entry !'),
]
def _auto_init(self, cr, context=None):
res = super(account_move_line, self)._auto_init(cr, context=context)
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = \'account_move_line_journal_id_period_id_index\'')
if not cr.fetchone():
cr.execute('CREATE INDEX account_move_line_journal_id_period_id_index ON account_move_line (journal_id, period_id)')
cr.execute('SELECT indexname FROM pg_indexes WHERE indexname = %s', ('account_move_line_date_id_index',))
if not cr.fetchone():
cr.execute('CREATE INDEX account_move_line_date_id_index ON account_move_line (date DESC, id desc)')
return res
def _check_no_view(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.account_id.type in ('view', 'consolidation'):
return False
return True
def _check_no_closed(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.account_id.type == 'closed':
raise UserError(('You cannot create journal items on a closed account %s %s.') % (l.account_id.code, l.account_id.name))
return True
def _check_company_id(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for l in lines:
if l.company_id != l.account_id.company_id or l.company_id != l.period_id.company_id:
return False
return True
def _check_date(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.journal_id.allow_date:
if not time.strptime(l.date[:10],'%Y-%m-%d') >= time.strptime(l.period_id.date_start, '%Y-%m-%d') or not time.strptime(l.date[:10], '%Y-%m-%d') <= time.strptime(l.period_id.date_stop, '%Y-%m-%d'):
return False
return True
def _check_currency(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.account_id.currency_id:
if not l.currency_id or not l.currency_id.id == l.account_id.currency_id.id:
return False
return True
def _check_currency_and_amount(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if (l.amount_currency and not l.currency_id):
return False
return True
def _check_currency_amount(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.amount_currency:
if (l.amount_currency > 0.0 and l.credit > 0.0) or (l.amount_currency < 0.0 and l.debit > 0.0):
return False
return True
def _check_currency_company(self, cr, uid, ids, context=None):
for l in self.browse(cr, uid, ids, context=context):
if l.currency_id.id == l.company_id.currency_id.id:
return False
return True
_constraints = [
(_check_no_view, 'You cannot create journal items on an account of type view or consolidation.', ['account_id']),
(_check_no_closed, 'You cannot create journal items on closed account.', ['account_id']),
(_check_company_id, 'Account and Period must belong to the same company.', ['company_id']),
(_check_date, 'The date of your Journal Entry is not in the defined period! You should change the date or remove this constraint from the journal.', ['date']),
(_check_currency, 'The selected account of your Journal Entry forces to provide a secondary currency. You should remove the secondary currency on the account or select a multi-currency view on the journal.', ['currency_id']),
(_check_currency_and_amount, "You cannot create journal items with a secondary currency without recording both 'currency' and 'amount currency' field.", ['currency_id','amount_currency']),
(_check_currency_amount, 'The amount expressed in the secondary currency must be positive when account is debited and negative when account is credited.', ['amount_currency']),
(_check_currency_company, "You cannot provide a secondary currency if it is the same than the company one." , ['currency_id']),
]
#TODO: ONCHANGE_ACCOUNT_ID: set account_tax_id
def onchange_currency(self, cr, uid, ids, account_id, amount, currency_id, date=False, journal=False, context=None):
if context is None:
context = {}
account_obj = self.pool.get('account.account')
journal_obj = self.pool.get('account.journal')
currency_obj = self.pool.get('res.currency')
if (not currency_id) or (not account_id):
return {}
result = {}
acc = account_obj.browse(cr, uid, account_id, context=context)
if (amount>0) and journal:
x = journal_obj.browse(cr, uid, journal).default_credit_account_id
if x: acc = x
context = dict(context)
context.update({
'date': date,
'res.currency.compute.account': acc,
})
v = currency_obj.compute(cr, uid, currency_id, acc.company_id.currency_id.id, amount, context=context)
result['value'] = {
'debit': v > 0 and v or 0.0,
'credit': v < 0 and -v or 0.0
}
return result
def onchange_partner_id(self, cr, uid, ids, move_id, partner_id, account_id=None, debit=0, credit=0, date=False, journal=False, context=None):
partner_obj = self.pool.get('res.partner')
payment_term_obj = self.pool.get('account.payment.term')
journal_obj = self.pool.get('account.journal')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
val = {}
val['date_maturity'] = False
if not partner_id:
return {'value':val}
if not date:
date = datetime.now().strftime('%Y-%m-%d')
jt = False
if journal:
jt = journal_obj.browse(cr, uid, journal, context=context).type
part = partner_obj.browse(cr, uid, partner_id, context=context)
payment_term_id = False
if jt and jt in ('purchase', 'purchase_refund') and part.property_supplier_payment_term:
payment_term_id = part.property_supplier_payment_term.id
elif jt and part.property_payment_term:
payment_term_id = part.property_payment_term.id
if payment_term_id:
res = payment_term_obj.compute(cr, uid, payment_term_id, 100, date)
if res:
val['date_maturity'] = res[0][0]
if not account_id:
id1 = part.property_account_payable.id
id2 = part.property_account_receivable.id
if jt:
if jt in ('sale', 'purchase_refund'):
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id2)
elif jt in ('purchase', 'sale_refund'):
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id1)
elif jt in ('general', 'bank', 'cash'):
if part.customer:
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id2)
elif part.supplier:
val['account_id'] = fiscal_pos_obj.map_account(cr, uid, part and part.property_account_position or False, id1)
if val.get('account_id', False):
d = self.onchange_account_id(cr, uid, ids, account_id=val['account_id'], partner_id=part.id, context=context)
val.update(d['value'])
return {'value':val}
def onchange_account_id(self, cr, uid, ids, account_id=False, partner_id=False, context=None):
account_obj = self.pool.get('account.account')
partner_obj = self.pool.get('res.partner')
fiscal_pos_obj = self.pool.get('account.fiscal.position')
val = {}
if account_id:
res = account_obj.browse(cr, uid, account_id, context=context)
tax_ids = res.tax_ids
if tax_ids and partner_id:
part = partner_obj.browse(cr, uid, partner_id, context=context)
tax_id = fiscal_pos_obj.map_tax(cr, uid, part and part.property_account_position or False, tax_ids)[0]
else:
tax_id = tax_ids and tax_ids[0].id or False
val['account_tax_id'] = tax_id
return {'value': val}
#
# type: the type if reconciliation (no logic behind this field, for info)
#
# writeoff; entry generated for the difference between the lines
#
def search(self, cr, uid, args, offset=0, limit=None, order=None, context=None, count=False):
if context is None:
context = {}
if context.get('fiscalyear'):
args.append(('period_id.fiscalyear_id', '=', context.get('fiscalyear', False)))
if context and context.get('next_partner_only', False):
if not context.get('partner_id', False):
partner = self.list_partners_to_reconcile(cr, uid, context=context)
if partner:
partner = partner[0]
else:
partner = context.get('partner_id', False)
if not partner:
return []
args.append(('partner_id', '=', partner[0]))
return super(account_move_line, self).search(cr, uid, args, offset, limit, order, context, count)
def prepare_move_lines_for_reconciliation_widget(self, cr, uid, lines, target_currency=False, target_date=False, context=None):
""" Returns move lines formatted for the bank reconciliation widget
:param target_currency: curreny you want the move line debit/credit converted into
:param target_date: date to use for the monetary conversion
"""
if not lines:
return []
if context is None:
context = {}
ctx = context.copy()
currency_obj = self.pool.get('res.currency')
company_currency = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id
rml_parser = report_sxw.rml_parse(cr, uid, 'reconciliation_widget_aml', context=context)
ret = []
for line in lines:
partial_reconciliation_siblings_ids = []
if line.reconcile_partial_id:
partial_reconciliation_siblings_ids = self.search(cr, uid, [('reconcile_partial_id', '=', line.reconcile_partial_id.id)], context=context)
partial_reconciliation_siblings_ids.remove(line.id)
ret_line = {
'id': line.id,
'name': line.name != '/' and line.move_id.name + ': ' + line.name or line.move_id.name,
'ref': line.move_id.ref or '',
# For reconciliation between statement transactions and already registered payments (eg. checks)
'already_paid': line.account_id.type == 'liquidity',
'account_code': line.account_id.code,
'account_name': line.account_id.name,
'account_type': line.account_id.type,
'date_maturity': line.date_maturity,
'date': line.date,
'period_name': line.period_id.name,
'journal_name': line.journal_id.name,
'partner_id': line.partner_id.id,
'partner_name': line.partner_id.name,
'is_partially_reconciled': bool(line.reconcile_partial_id),
'partial_reconciliation_siblings_ids': partial_reconciliation_siblings_ids,
}
# Amount residual can be negative
debit = line.debit
credit = line.credit
amount = line.amount_residual
amount_currency = line.amount_residual_currency
if line.amount_residual < 0:
debit, credit = credit, debit
amount = -amount
amount_currency = -amount_currency
# For already reconciled lines, don't use amount_residual(_currency)
if line.account_id.type == 'liquidity':
amount = abs(debit - credit)
amount_currency = abs(line.amount_currency)
# Get right debit / credit:
target_currency = target_currency or company_currency
line_currency = line.currency_id or company_currency
amount_currency_str = ""
total_amount_currency_str = ""
if line_currency != company_currency:
total_amount = line.amount_currency
actual_debit = debit > 0 and amount_currency or 0.0
actual_credit = credit > 0 and amount_currency or 0.0
else:
total_amount = abs(debit - credit)
actual_debit = debit > 0 and amount or 0.0
actual_credit = credit > 0 and amount or 0.0
if line_currency != target_currency:
amount_currency_str = rml_parser.formatLang(actual_debit or actual_credit, currency_obj=line_currency)
total_amount_currency_str = rml_parser.formatLang(total_amount, currency_obj=line_currency)
ret_line['credit_currency'] = actual_credit
ret_line['debit_currency'] = actual_debit
ctx = context.copy()
if target_date:
ctx.update({'date': target_date})
total_amount = currency_obj.compute(cr, uid, line_currency.id, target_currency.id, total_amount, context=ctx)
actual_debit = currency_obj.compute(cr, uid, line_currency.id, target_currency.id, actual_debit, context=ctx)
actual_credit = currency_obj.compute(cr, uid, line_currency.id, target_currency.id, actual_credit, context=ctx)
amount_str = rml_parser.formatLang(actual_debit or actual_credit, currency_obj=target_currency)
total_amount_str = rml_parser.formatLang(total_amount, currency_obj=target_currency)
ret_line['debit'] = actual_debit
ret_line['credit'] = actual_credit
ret_line['amount_str'] = amount_str
ret_line['total_amount_str'] = total_amount_str
ret_line['amount_currency_str'] = amount_currency_str
ret_line['total_amount_currency_str'] = total_amount_currency_str
ret.append(ret_line)
return ret
def list_partners_to_reconcile(self, cr, uid, context=None, filter_domain=False):
line_ids = []
if filter_domain:
line_ids = self.search(cr, uid, filter_domain, context=context)
where_clause = filter_domain and "AND l.id = ANY(%s)" or ""
cr.execute(
"""SELECT partner_id FROM (
SELECT l.partner_id, p.last_reconciliation_date, SUM(l.debit) AS debit, SUM(l.credit) AS credit, MAX(l.create_date) AS max_date
FROM account_move_line l
RIGHT JOIN account_account a ON (a.id = l.account_id)
RIGHT JOIN res_partner p ON (l.partner_id = p.id)
WHERE a.reconcile IS TRUE
AND l.reconcile_id IS NULL
AND l.state <> 'draft'
%s
GROUP BY l.partner_id, p.last_reconciliation_date
) AS s
WHERE debit > 0 AND credit > 0 AND (last_reconciliation_date IS NULL OR max_date > last_reconciliation_date)
ORDER BY last_reconciliation_date"""
% where_clause, (line_ids,))
ids = [x[0] for x in cr.fetchall()]
if not ids:
return []
# To apply the ir_rules
partner_obj = self.pool.get('res.partner')
ids = partner_obj.search(cr, uid, [('id', 'in', ids)], context=context)
return partner_obj.name_get(cr, uid, ids, context=context)
def reconcile_partial(self, cr, uid, ids, type='auto', context=None, writeoff_acc_id=False, writeoff_period_id=False, writeoff_journal_id=False):
move_rec_obj = self.pool.get('account.move.reconcile')
merges = []
unmerge = []
total = 0.0
merges_rec = []
company_list = []
if context is None:
context = {}
for line in self.browse(cr, uid, ids, context=context):
if company_list and not line.company_id.id in company_list:
raise UserError(_('To reconcile the entries company should be the same for all entries.'))
company_list.append(line.company_id.id)
for line in self.browse(cr, uid, ids, context=context):
if line.account_id.currency_id:
currency_id = line.account_id.currency_id
else:
currency_id = line.company_id.currency_id
if line.reconcile_id:
raise UserError(_("Journal Item '%s' (id: %s), Move '%s' is already reconciled!") % (line.name, line.id, line.move_id.name))
if line.reconcile_partial_id:
for line2 in line.reconcile_partial_id.line_partial_ids:
if line2.state != 'valid':
raise UserError(_("Journal Item '%s' (id: %s) cannot be used in a reconciliation as it is not balanced!") % (line2.name, line2.id))
if not line2.reconcile_id:
if line2.id not in merges:
merges.append(line2.id)
if line2.account_id.currency_id:
total += line2.amount_currency
else:
total += (line2.debit or 0.0) - (line2.credit or 0.0)
merges_rec.append(line.reconcile_partial_id.id)
else:
unmerge.append(line.id)
if line.account_id.currency_id:
total += line.amount_currency
else:
total += (line.debit or 0.0) - (line.credit or 0.0)
if self.pool.get('res.currency').is_zero(cr, uid, currency_id, total):
res = self.reconcile(cr, uid, merges+unmerge, context=context, writeoff_acc_id=writeoff_acc_id, writeoff_period_id=writeoff_period_id, writeoff_journal_id=writeoff_journal_id)
return res
# marking the lines as reconciled does not change their validity, so there is no need
# to revalidate their moves completely.
reconcile_context = dict(context, novalidate=True)
r_id = move_rec_obj.create(cr, uid, {
'type': type,
'line_partial_ids': map(lambda x: (4,x,False), merges+unmerge)
}, context=reconcile_context)
move_rec_obj.reconcile_partial_check(cr, uid, [r_id] + merges_rec, context=reconcile_context)
return r_id
def reconcile(self, cr, uid, ids, type='auto', writeoff_acc_id=False, writeoff_period_id=False, writeoff_journal_id=False, context=None):
account_obj = self.pool.get('account.account')
move_obj = self.pool.get('account.move')
move_rec_obj = self.pool.get('account.move.reconcile')
partner_obj = self.pool.get('res.partner')
currency_obj = self.pool.get('res.currency')
lines = self.browse(cr, uid, ids, context=context)
unrec_lines = filter(lambda x: not x['reconcile_id'], lines)
credit = debit = 0.0
currency = 0.0
account_id = False
partner_id = False
if context is None:
context = {}
company_list = []
for line in self.browse(cr, uid, ids, context=context):
if company_list and not line.company_id.id in company_list:
raise UserError(_('To reconcile the entries company should be the same for all entries.'))
company_list.append(line.company_id.id)
for line in unrec_lines:
if line.state <> 'valid':
raise UserError(_('Entry "%s" is not valid !') % line.name)
credit += line['credit']
debit += line['debit']
currency += line['amount_currency'] or 0.0
account_id = line['account_id']['id']
partner_id = (line['partner_id'] and line['partner_id']['id']) or False
writeoff = debit - credit
# Ifdate_p in context => take this date
if context.has_key('date_p') and context['date_p']:
date=context['date_p']
else:
date = time.strftime('%Y-%m-%d')
cr.execute('SELECT account_id, reconcile_id '\
'FROM account_move_line '\
'WHERE id IN %s '\
'GROUP BY account_id,reconcile_id',
(tuple(ids), ))
r = cr.fetchall()
#TODO: move this check to a constraint in the account_move_reconcile object
if len(r) != 1:
raise UserError(_('Entries are not of the same account or already reconciled ! '))
if not unrec_lines:
raise UserError(_('Entry is already reconciled.'))
account = account_obj.browse(cr, uid, account_id, context=context)
if not account.reconcile:
raise UserError(_('The account is not defined to be reconciled !'))
if r[0][1] != None:
raise UserError(_('Some entries are already reconciled.'))
if (not currency_obj.is_zero(cr, uid, account.company_id.currency_id, writeoff)) or \
(account.currency_id and (not currency_obj.is_zero(cr, uid, account.currency_id, currency))):
if not writeoff_acc_id:
raise UserError(_('You have to provide an account for the write off/exchange difference entry.'))
if writeoff > 0:
debit = writeoff
credit = 0.0
self_credit = writeoff
self_debit = 0.0
else:
debit = 0.0
credit = -writeoff
self_credit = 0.0
self_debit = -writeoff
# If comment exist in context, take it
if 'comment' in context and context['comment']:
libelle = context['comment']
else:
libelle = _('Write-Off')
cur_obj = self.pool.get('res.currency')
cur_id = False
amount_currency_writeoff = 0.0
if context.get('company_currency_id',False) != context.get('currency_id',False):
cur_id = context.get('currency_id',False)
for line in unrec_lines:
if line.currency_id and line.currency_id.id == context.get('currency_id',False):
amount_currency_writeoff += line.amount_currency
else:
tmp_amount = cur_obj.compute(cr, uid, line.account_id.company_id.currency_id.id, context.get('currency_id',False), abs(line.debit-line.credit), context={'date': line.date})
amount_currency_writeoff += (line.debit > 0) and tmp_amount or -tmp_amount
writeoff_lines = [
(0, 0, {
'name': libelle,
'debit': self_debit,
'credit': self_credit,
'account_id': account_id,
'date': date,
'partner_id': partner_id,
'currency_id': cur_id or (account.currency_id.id or False),
'amount_currency': amount_currency_writeoff and -1 * amount_currency_writeoff or (account.currency_id.id and -1 * currency or 0.0)
}),
(0, 0, {
'name': libelle,
'debit': debit,
'credit': credit,
'account_id': writeoff_acc_id,
'analytic_account_id': context.get('analytic_id', False),
'date': date,
'partner_id': partner_id,
'currency_id': cur_id or (account.currency_id.id or False),
'amount_currency': amount_currency_writeoff and amount_currency_writeoff or (account.currency_id.id and currency or 0.0)
})
]
writeoff_move_id = move_obj.create(cr, uid, {
'period_id': writeoff_period_id,
'journal_id': writeoff_journal_id,
'date':date,
'state': 'draft',
'line_id': writeoff_lines
})
writeoff_line_ids = self.search(cr, uid, [('move_id', '=', writeoff_move_id), ('account_id', '=', account_id)])
if account_id == writeoff_acc_id:
writeoff_line_ids = [writeoff_line_ids[1]]
ids += writeoff_line_ids
# marking the lines as reconciled does not change their validity, so there is no need
# to revalidate their moves completely.
reconcile_context = dict(context, novalidate=True)
r_id = move_rec_obj.create(cr, uid, {
'type': type,
'line_id': map(lambda x: (4, x, False), ids),
'line_partial_ids': map(lambda x: (3, x, False), ids)
}, context=reconcile_context)
# the id of the move.reconcile is written in the move.line (self) by the create method above
# because of the way the line_id are defined: (4, x, False)
for id in ids:
workflow.trg_trigger(uid, 'account.move.line', id, cr)
if lines and lines[0]:
partner_id = lines[0].partner_id and lines[0].partner_id.id or False
if partner_id and not partner_obj.has_something_to_reconcile(cr, uid, partner_id, context=context):
partner_obj.mark_as_reconciled(cr, uid, [partner_id], context=context)
return r_id
def view_header_get(self, cr, user, view_id, view_type, context=None):
if context is None:
context = {}
context = self.convert_to_period(cr, user, context=context)
if context.get('account_id', False):
cr.execute('SELECT code FROM account_account WHERE id = %s', (context['account_id'], ))
res = cr.fetchone()
if res:
res = _('Entries: ')+ (res[0] or '')
return res
if (not context.get('journal_id', False)) or (not context.get('period_id', False)):
return False
if context.get('search_default_journal_id', False):
context['journal_id'] = context.get('search_default_journal_id')
cr.execute('SELECT code FROM account_journal WHERE id = %s', (context['journal_id'], ))
j = cr.fetchone()[0] or ''
cr.execute('SELECT code FROM account_period WHERE id = %s', (context['period_id'], ))
p = cr.fetchone()[0] or ''
if j or p:
return j + (p and (':' + p) or '')
return False
def onchange_date(self, cr, user, ids, date, context=None):
"""
Returns a dict that contains new values and context
@param cr: A database cursor
@param user: ID of the user currently logged in
@param date: latest value from user input for field date
@param args: other arguments
@param context: context arguments, like lang, time zone
@return: Returns a dict which contains new values, and context
"""
res = {}
if context is None:
context = {}
period_pool = self.pool.get('account.period')
pids = period_pool.find(cr, user, date, context=context)
if pids:
res.update({'period_id':pids[0]})
context = dict(context, period_id=pids[0])
return {
'value':res,
'context':context,
}
def _check_moves(self, cr, uid, context=None):
# use the first move ever created for this journal and period
if context is None:
context = {}
cr.execute('SELECT id, state, name FROM account_move WHERE journal_id = %s AND period_id = %s ORDER BY id limit 1', (context['journal_id'],context['period_id']))
res = cr.fetchone()
if res:
if res[1] != 'draft':
raise UserError(_('The account move (%s) for centralisation has been confirmed.') % res[2])
return res
def _remove_move_reconcile(self, cr, uid, move_ids=None, opening_reconciliation=False, context=None):
# Function remove move rencocile ids related with moves
obj_move_line = self.pool.get('account.move.line')
obj_move_rec = self.pool.get('account.move.reconcile')
unlink_ids = []
if not move_ids:
return True
recs = obj_move_line.read(cr, uid, move_ids, ['reconcile_id', 'reconcile_partial_id'])
full_recs = filter(lambda x: x['reconcile_id'], recs)
rec_ids = [rec['reconcile_id'][0] for rec in full_recs]
part_recs = filter(lambda x: x['reconcile_partial_id'], recs)
part_rec_ids = [rec['reconcile_partial_id'][0] for rec in part_recs]
unlink_ids += rec_ids
unlink_ids += part_rec_ids
all_moves = obj_move_line.search(cr, uid, ['|',('reconcile_id', 'in', unlink_ids),('reconcile_partial_id', 'in', unlink_ids)])
all_moves = list(set(all_moves) - set(move_ids))
if unlink_ids:
if opening_reconciliation:
raise UserError(_('Opening Entries have already been generated. Please run "Cancel Closing Entries" wizard to cancel those entries and then run this wizard.'))
obj_move_rec.write(cr, uid, unlink_ids, {'opening_reconciliation': False})
obj_move_rec.unlink(cr, uid, unlink_ids)
if len(all_moves) >= 2:
obj_move_line.reconcile_partial(cr, uid, all_moves, 'auto',context=context)
return True
def unlink(self, cr, uid, ids, context=None, check=True):
if context is None:
context = {}
move_obj = self.pool.get('account.move')
self._update_check(cr, uid, ids, context)
result = False
move_ids = set()
for line in self.browse(cr, uid, ids, context=context):
move_ids.add(line.move_id.id)
localcontext = dict(context)
localcontext['journal_id'] = line.journal_id.id
localcontext['period_id'] = line.period_id.id
result = super(account_move_line, self).unlink(cr, uid, [line.id], context=localcontext)
move_ids = list(move_ids)
if check and move_ids:
move_obj.validate(cr, uid, move_ids, context=context)
return result
def write(self, cr, uid, ids, vals, context=None, check=True, update_check=True):
if context is None:
context={}
move_obj = self.pool.get('account.move')
account_obj = self.pool.get('account.account')
journal_obj = self.pool.get('account.journal')
if isinstance(ids, (int, long)):
ids = [ids]
if vals.get('account_tax_id', False):
raise UserError(_('You cannot change the tax, you should remove and recreate lines.'))
if ('account_id' in vals) and not account_obj.read(cr, uid, vals['account_id'], ['active'])['active']:
raise UserError(_('You cannot use an inactive account.'))
affects_move = any(f in vals for f in ('account_id', 'journal_id', 'period_id', 'move_id', 'debit', 'credit', 'date'))
if update_check and affects_move:
self._update_check(cr, uid, ids, context)
todo_date = None
if vals.get('date', False):
todo_date = vals['date']
del vals['date']
for line in self.browse(cr, uid, ids, context=context):
ctx = context.copy()
if not ctx.get('journal_id'):
if line.move_id:
ctx['journal_id'] = line.move_id.journal_id.id
else:
ctx['journal_id'] = line.journal_id.id
if not ctx.get('period_id'):
if line.move_id:
ctx['period_id'] = line.move_id.period_id.id
else:
ctx['period_id'] = line.period_id.id
#Check for centralisation
journal = journal_obj.browse(cr, uid, ctx['journal_id'], context=ctx)
if journal.centralisation:
self._check_moves(cr, uid, context=ctx)
result = super(account_move_line, self).write(cr, uid, ids, vals, context)
if affects_move and check and not context.get('novalidate'):
done = []
for line in self.browse(cr, uid, ids):
if line.move_id.id not in done:
done.append(line.move_id.id)
move_obj.validate(cr, uid, [line.move_id.id], context)
if todo_date:
move_obj.write(cr, uid, [line.move_id.id], {'date': todo_date}, context=context)
return result
def _update_journal_check(self, cr, uid, journal_id, period_id, context=None):
journal_obj = self.pool.get('account.journal')
period_obj = self.pool.get('account.period')
jour_period_obj = self.pool.get('account.journal.period')
cr.execute('SELECT state FROM account_journal_period WHERE journal_id = %s AND period_id = %s', (journal_id, period_id))
result = cr.fetchall()
journal = journal_obj.browse(cr, uid, journal_id, context=context)
period = period_obj.browse(cr, uid, period_id, context=context)
for (state,) in result:
if state == 'done':
raise UserError(_('You can not add/modify entries in a closed period %s of journal %s.') % (period.name,journal.name))
if not result:
jour_period_obj.create(cr, uid, {
'name': (journal.code or journal.name)+':'+(period.name or ''),
'journal_id': journal.id,
'period_id': period.id
})
return True
def _update_check(self, cr, uid, ids, context=None):
done = {}
for line in self.browse(cr, uid, ids, context=context):
err_msg = _('Move name (id): %s (%s)') % (line.move_id.name, str(line.move_id.id))
if line.move_id.state <> 'draft' and (not line.journal_id.entry_posted):
raise UserError(_('You cannot do this modification on a confirmed entry. You can just change some non legal fields or you must unconfirm the journal entry first.\n%s.') % err_msg)
if line.reconcile_id:
raise UserError(_('You cannot do this modification on a reconciled entry. You can just change some non legal fields or you must unreconcile first.\n%s.') % err_msg)
t = (line.journal_id.id, line.period_id.id)
if t not in done:
self._update_journal_check(cr, uid, line.journal_id.id, line.period_id.id, context)
done[t] = True
return True
def create(self, cr, uid, vals, context=None, check=True):
account_obj = self.pool.get('account.account')
tax_obj = self.pool.get('account.tax')
move_obj = self.pool.get('account.move')
cur_obj = self.pool.get('res.currency')
journal_obj = self.pool.get('account.journal')
context = dict(context or {})
if vals.get('move_id', False):
move = self.pool.get('account.move').browse(cr, uid, vals['move_id'], context=context)
if move.company_id:
vals['company_id'] = move.company_id.id
if move.date and not vals.get('date'):
vals['date'] = move.date
if ('account_id' in vals) and not account_obj.read(cr, uid, [vals['account_id']], ['active'])[0]['active']:
raise UserError(_('You cannot use an inactive account.'))
if 'journal_id' in vals and vals['journal_id']:
context['journal_id'] = vals['journal_id']
if 'period_id' in vals and vals['period_id']:
context['period_id'] = vals['period_id']
if ('journal_id' not in context) and ('move_id' in vals) and vals['move_id']:
m = move_obj.browse(cr, uid, vals['move_id'])
context['journal_id'] = m.journal_id.id
context['period_id'] = m.period_id.id
#we need to treat the case where a value is given in the context for period_id as a string
if 'period_id' in context and not isinstance(context.get('period_id', ''), (int, long)):
period_candidate_ids = self.pool.get('account.period').name_search(cr, uid, name=context.get('period_id',''))
if len(period_candidate_ids) != 1:
raise UserError(_('No period found or more than one period found for the given date.'))
context['period_id'] = period_candidate_ids[0][0]
if not context.get('journal_id', False) and context.get('search_default_journal_id', False):
context['journal_id'] = context.get('search_default_journal_id')
self._update_journal_check(cr, uid, context['journal_id'], context['period_id'], context)
move_id = vals.get('move_id', False)
journal = journal_obj.browse(cr, uid, context['journal_id'], context=context)
vals['journal_id'] = vals.get('journal_id') or context.get('journal_id')
vals['period_id'] = vals.get('period_id') or context.get('period_id')
vals['date'] = vals.get('date') or context.get('date')
if not move_id:
if journal.centralisation:
#Check for centralisation
res = self._check_moves(cr, uid, context)
if res:
vals['move_id'] = res[0]
if not vals.get('move_id', False):
if journal.sequence_id:
#name = self.pool.get('ir.sequence').next_by_id(cr, uid, journal.sequence_id.id)
v = {
'date': vals.get('date', time.strftime('%Y-%m-%d')),
'period_id': context['period_id'],
'journal_id': context['journal_id']
}
if vals.get('ref', ''):
v.update({'ref': vals['ref']})
move_id = move_obj.create(cr, uid, v, context)
vals['move_id'] = move_id
else:
raise UserError(_('Cannot create an automatic sequence for this piece.\nPut a sequence in the journal definition for automatic numbering or create a sequence manually for this piece.'))
ok = not (journal.type_control_ids or journal.account_control_ids)
if ('account_id' in vals):
account = account_obj.browse(cr, uid, vals['account_id'], context=context)
if journal.type_control_ids:
type = account.user_type
for t in journal.type_control_ids:
if type.code == t.code:
ok = True
break
if journal.account_control_ids and not ok:
for a in journal.account_control_ids:
if a.id == vals['account_id']:
ok = True
break
# Automatically convert in the account's secondary currency if there is one and
# the provided values were not already multi-currency
if account.currency_id and 'amount_currency' not in vals and account.currency_id.id != account.company_id.currency_id.id:
vals['currency_id'] = account.currency_id.id
ctx = {}
if 'date' in vals:
ctx['date'] = vals['date']
vals['amount_currency'] = cur_obj.compute(cr, uid, account.company_id.currency_id.id,
account.currency_id.id, vals.get('debit', 0.0)-vals.get('credit', 0.0), context=ctx)
if not ok:
raise UserError(_('You cannot use this general account in this journal, check the tab \'Entry Controls\' on the related journal.'))
result = super(account_move_line, self).create(cr, uid, vals, context=context)
# CREATE Taxes
if vals.get('account_tax_id', False):
tax_id = tax_obj.browse(cr, uid, vals['account_tax_id'])
total = vals['debit'] - vals['credit']
base_code = 'base_code_id'
tax_code = 'tax_code_id'
account_id = 'account_collected_id'
base_sign = 'base_sign'
tax_sign = 'tax_sign'
if journal.type in ('purchase_refund', 'sale_refund') or (journal.type in ('cash', 'bank') and total < 0):
base_code = 'ref_base_code_id'
tax_code = 'ref_tax_code_id'
account_id = 'account_paid_id'
base_sign = 'ref_base_sign'
tax_sign = 'ref_tax_sign'
base_adjusted = False
for tax in tax_obj.compute_all(cr, uid, [tax_id], total, 1.00, force_excluded=False).get('taxes'):
#create the base movement
if base_adjusted == False:
base_adjusted = True
if tax_id.price_include:
total = tax['price_unit']
newvals = {
'tax_code_id': tax[base_code],
'tax_amount': tax[base_sign] * abs(total),
}
if tax_id.price_include:
if tax['price_unit'] < 0:
newvals['credit'] = abs(tax['price_unit'])
else:
newvals['debit'] = tax['price_unit']
self.write(cr, uid, [result], newvals, context=context)
else:
data = {
'move_id': vals['move_id'],
'name': tools.ustr(vals['name'] or '') + ' ' + tools.ustr(tax['name'] or ''),
'date': vals['date'],
'partner_id': vals.get('partner_id', False),
'ref': vals.get('ref', False),
'statement_id': vals.get('statement_id', False),
'account_tax_id': False,
'tax_code_id': tax[base_code],
'tax_amount': tax[base_sign] * abs(total),
'account_id': vals['account_id'],
'credit': 0.0,
'debit': 0.0,
}
self.create(cr, uid, data, context)
#create the Tax movement
if not tax['amount'] and not tax[tax_code]:
continue
data = {
'move_id': vals['move_id'],
'name': tools.ustr(vals['name'] or '') + ' ' + tools.ustr(tax['name'] or ''),
'date': vals['date'],
'partner_id': vals.get('partner_id',False),
'ref': vals.get('ref',False),
'statement_id': vals.get('statement_id', False),
'account_tax_id': False,
'tax_code_id': tax[tax_code],
'tax_amount': tax[tax_sign] * abs(tax['amount']),
'account_id': tax[account_id] or vals['account_id'],
'credit': tax['amount']<0 and -tax['amount'] or 0.0,
'debit': tax['amount']>0 and tax['amount'] or 0.0,
}
self.create(cr, uid, data, context)
del vals['account_tax_id']
recompute = journal.env.recompute and context.get('recompute', True)
if check and not context.get('novalidate') and (recompute or journal.entry_posted):
tmp = move_obj.validate(cr, uid, [vals['move_id']], context)
if journal.entry_posted and tmp:
move_obj.button_validate(cr,uid, [vals['move_id']], context)
return result
def list_periods(self, cr, uid, context=None):
ids = self.pool.get('account.period').search(cr,uid,[])
return self.pool.get('account.period').name_get(cr, uid, ids, context=context)
def list_journals(self, cr, uid, context=None):
ng = dict(self.pool.get('account.journal').name_search(cr,uid,'',[]))
ids = ng.keys()
result = []
for journal in self.pool.get('account.journal').browse(cr, uid, ids, context=context):
result.append((journal.id,ng[journal.id],journal.type,
bool(journal.currency),bool(journal.analytic_journal_id)))
return result
|
Manojkumar91/odoo_inresto
|
addons/account/account_move_line.py
|
Python
|
agpl-3.0
| 77,779
|
#coding=utf-8
class GeneralSpec:
def __init__(self, args):
self._paymentTerms = int(args['payment_terms'])
self._currency = str(args['currency'])
self._vat = float(args['vat'])
def paymentTerms(self):
return self._paymentTerms
def currency(self):
return self._currency
def vat(self):
return self._vat
def prettyVat(self):
vat = self.vat() * 100
if int(vat) == float(vat) :
vat = int(vat)
return str(vat) + " %"
if __name__ == "__main__":
config = {
"payment_terms":30,
"currency":"SEK",
"vat":0.25
}
genSpec = GeneralSpec(config)
assert(genSpec.paymentTerms() == config['payment_terms'])
assert(genSpec.currency() == config['currency'])
assert(genSpec.vat() == config['vat'])
assert(genSpec.prettyVat() == "25 %")
config['vat'] = 0.125
genSpec = GeneralSpec(config)
assert(genSpec.prettyVat() == "12.5 %")
|
SudoQ/yig
|
general_spec.py
|
Python
|
mit
| 857
|
"""Stuff to parse AIFF-C and AIFF files.
Unless explicitly stated otherwise, the description below is true
both for AIFF-C files and AIFF files.
An AIFF-C file has the following structure.
+-----------------+
| FORM |
+-----------------+
| <size> |
+----+------------+
| | AIFC |
| +------------+
| | <chunks> |
| | . |
| | . |
| | . |
+----+------------+
An AIFF file has the string "AIFF" instead of "AIFC".
A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
big endian order), followed by the data. The size field does not include
the size of the 8 byte header.
The following chunk types are recognized.
FVER
<version number of AIFF-C defining document> (AIFF-C only).
MARK
<# of markers> (2 bytes)
list of markers:
<marker ID> (2 bytes, must be > 0)
<position> (4 bytes)
<marker name> ("pstring")
COMM
<# of channels> (2 bytes)
<# of sound frames> (4 bytes)
<size of the samples> (2 bytes)
<sampling frequency> (10 bytes, IEEE 80-bit extended
floating point)
in AIFF-C files only:
<compression type> (4 bytes)
<human-readable version of compression type> ("pstring")
SSND
<offset> (4 bytes, not used by this program)
<blocksize> (4 bytes, not used by this program)
<sound data>
A pstring consists of 1 byte length, a string of characters, and 0 or 1
byte pad to make the total length even.
Usage.
Reading AIFF files:
f = aifc.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
In some types of audio files, if the setpos() method is not used,
the seek() method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for AIFF files)
getcompname() -- returns human-readable version of
compression type ('not compressed' for AIFF files)
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- get the list of marks in the audio file or None
if there are no marks
getmark(id) -- get mark with the specified id (raises an error
if the mark does not exist)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell(), the position given to setpos() and
the position of marks are all compatible and have nothing to do with
the actual position in the file.
The close() method is called automatically when the class instance
is destroyed.
Writing AIFF files:
f = aifc.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
aiff() -- create an AIFF file (AIFF-C default)
aifc() -- create an AIFF-C file
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
setmark(id, pos, name)
-- add specified mark to the list of marks
tell() -- return current position in output file (useful
in combination with setmark())
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
Marks can be added anytime. If there are any marks, ypu must call
close() after all frames have been written.
The close() method is called automatically when the class instance
is destroyed.
When a file is opened with the extension '.aiff', an AIFF file is
written, otherwise an AIFF-C file is written. This default can be
changed by calling aiff() or aifc() before the first writeframes or
writeframesraw.
"""
import struct
import __builtin__
__all__ = ["Error","open","openfp"]
class Error(Exception):
pass
_AIFC_version = 0xA2805140L # Version 1 of AIFF-C
def _read_long(file):
try:
return struct.unpack('>l', file.read(4))[0]
except struct.error:
raise EOFError
def _read_ulong(file):
try:
return struct.unpack('>L', file.read(4))[0]
except struct.error:
raise EOFError
def _read_short(file):
try:
return struct.unpack('>h', file.read(2))[0]
except struct.error:
raise EOFError
def _read_string(file):
length = ord(file.read(1))
if length == 0:
data = ''
else:
data = file.read(length)
if length & 1 == 0:
dummy = file.read(1)
return data
_HUGE_VAL = 1.79769313486231e+308 # See <limits.h>
def _read_float(f): # 10 bytes
expon = _read_short(f) # 2 bytes
sign = 1
if expon < 0:
sign = -1
expon = expon + 0x8000
himant = _read_ulong(f) # 4 bytes
lomant = _read_ulong(f) # 4 bytes
if expon == himant == lomant == 0:
f = 0.0
elif expon == 0x7FFF:
f = _HUGE_VAL
else:
expon = expon - 16383
f = (himant * 0x100000000L + lomant) * pow(2.0, expon - 63)
return sign * f
def _write_short(f, x):
f.write(struct.pack('>h', x))
def _write_long(f, x):
f.write(struct.pack('>L', x))
def _write_string(f, s):
if len(s) > 255:
raise ValueError("string exceeds maximum pstring length")
f.write(chr(len(s)))
f.write(s)
if len(s) & 1 == 0:
f.write(chr(0))
def _write_float(f, x):
import math
if x < 0:
sign = 0x8000
x = x * -1
else:
sign = 0
if x == 0:
expon = 0
himant = 0
lomant = 0
else:
fmant, expon = math.frexp(x)
if expon > 16384 or fmant >= 1: # Infinity or NaN
expon = sign|0x7FFF
himant = 0
lomant = 0
else: # Finite
expon = expon + 16382
if expon < 0: # denormalized
fmant = math.ldexp(fmant, expon)
expon = 0
expon = expon | sign
fmant = math.ldexp(fmant, 32)
fsmant = math.floor(fmant)
himant = long(fsmant)
fmant = math.ldexp(fmant - fsmant, 32)
fsmant = math.floor(fmant)
lomant = long(fsmant)
_write_short(f, expon)
_write_long(f, himant)
_write_long(f, lomant)
from chunk import Chunk
class Aifc_read:
# Variables used in this class:
#
# These variables are available to the user though appropriate
# methods of this class:
# _file -- the open file with methods read(), close(), and seek()
# set through the __init__() method
# _nchannels -- the number of audio channels
# available through the getnchannels() method
# _nframes -- the number of audio frames
# available through the getnframes() method
# _sampwidth -- the number of bytes per audio sample
# available through the getsampwidth() method
# _framerate -- the sampling frequency
# available through the getframerate() method
# _comptype -- the AIFF-C compression type ('NONE' if AIFF)
# available through the getcomptype() method
# _compname -- the human-readable AIFF-C compression type
# available through the getcomptype() method
# _markers -- the marks in the audio file
# available through the getmarkers() and getmark()
# methods
# _soundpos -- the position in the audio stream
# available through the tell() method, set through the
# setpos() method
#
# These variables are used internally only:
# _version -- the AIFF-C version number
# _decomp -- the decompressor from builtin module cl
# _comm_chunk_read -- 1 iff the COMM chunk has been read
# _aifc -- 1 iff reading an AIFF-C file
# _ssnd_seek_needed -- 1 iff positioned correctly in audio
# file for readframes()
# _ssnd_chunk -- instantiation of a chunk class for the SSND chunk
# _framesize -- size of one frame in the file
def initfp(self, file):
self._version = 0
self._decomp = None
self._convert = None
self._markers = []
self._soundpos = 0
self._file = file
chunk = Chunk(file)
if chunk.getname() != 'FORM':
raise Error, 'file does not start with FORM id'
formdata = chunk.read(4)
if formdata == 'AIFF':
self._aifc = 0
elif formdata == 'AIFC':
self._aifc = 1
else:
raise Error, 'not an AIFF or AIFF-C file'
self._comm_chunk_read = 0
while 1:
self._ssnd_seek_needed = 1
try:
chunk = Chunk(self._file)
except EOFError:
break
chunkname = chunk.getname()
if chunkname == 'COMM':
self._read_comm_chunk(chunk)
self._comm_chunk_read = 1
elif chunkname == 'SSND':
self._ssnd_chunk = chunk
dummy = chunk.read(8)
self._ssnd_seek_needed = 0
elif chunkname == 'FVER':
self._version = _read_ulong(chunk)
elif chunkname == 'MARK':
self._readmark(chunk)
chunk.skip()
if not self._comm_chunk_read or not self._ssnd_chunk:
raise Error, 'COMM chunk and/or SSND chunk missing'
if self._aifc and self._decomp:
import cl
params = [cl.ORIGINAL_FORMAT, 0,
cl.BITS_PER_COMPONENT, self._sampwidth * 8,
cl.FRAME_RATE, self._framerate]
if self._nchannels == 1:
params[1] = cl.MONO
elif self._nchannels == 2:
params[1] = cl.STEREO_INTERLEAVED
else:
raise Error, 'cannot compress more than 2 channels'
self._decomp.SetParams(params)
def __init__(self, f):
if type(f) == type(''):
f = __builtin__.open(f, 'rb')
# else, assume it is an open file object already
self.initfp(f)
#
# User visible methods.
#
def getfp(self):
return self._file
def rewind(self):
self._ssnd_seek_needed = 1
self._soundpos = 0
def close(self):
if self._decomp:
self._decomp.CloseDecompressor()
self._decomp = None
self._file.close()
def tell(self):
return self._soundpos
def getnchannels(self):
return self._nchannels
def getnframes(self):
return self._nframes
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
## def getversion(self):
## return self._version
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def getmarkers(self):
if len(self._markers) == 0:
return None
return self._markers
def getmark(self, id):
for marker in self._markers:
if id == marker[0]:
return marker
raise Error, 'marker %r does not exist' % (id,)
def setpos(self, pos):
if pos < 0 or pos > self._nframes:
raise Error, 'position not in range'
self._soundpos = pos
self._ssnd_seek_needed = 1
def readframes(self, nframes):
if self._ssnd_seek_needed:
self._ssnd_chunk.seek(0)
dummy = self._ssnd_chunk.read(8)
pos = self._soundpos * self._framesize
if pos:
self._ssnd_chunk.seek(pos + 8)
self._ssnd_seek_needed = 0
if nframes == 0:
return ''
data = self._ssnd_chunk.read(nframes * self._framesize)
if self._convert and data:
data = self._convert(data)
self._soundpos = self._soundpos + len(data) // (self._nchannels * self._sampwidth)
return data
#
# Internal methods.
#
def _decomp_data(self, data):
import cl
dummy = self._decomp.SetParam(cl.FRAME_BUFFER_SIZE,
len(data) * 2)
return self._decomp.Decompress(len(data) // self._nchannels,
data)
def _ulaw2lin(self, data):
import audioop
return audioop.ulaw2lin(data, 2)
def _adpcm2lin(self, data):
import audioop
if not hasattr(self, '_adpcmstate'):
# first time
self._adpcmstate = None
data, self._adpcmstate = audioop.adpcm2lin(data, 2,
self._adpcmstate)
return data
def _read_comm_chunk(self, chunk):
self._nchannels = _read_short(chunk)
self._nframes = _read_long(chunk)
self._sampwidth = (_read_short(chunk) + 7) // 8
self._framerate = int(_read_float(chunk))
self._framesize = self._nchannels * self._sampwidth
if self._aifc:
#DEBUG: SGI's soundeditor produces a bad size :-(
kludge = 0
if chunk.chunksize == 18:
kludge = 1
print 'Warning: bad COMM chunk size'
chunk.chunksize = 23
#DEBUG end
self._comptype = chunk.read(4)
#DEBUG start
if kludge:
length = ord(chunk.file.read(1))
if length & 1 == 0:
length = length + 1
chunk.chunksize = chunk.chunksize + length
chunk.file.seek(-1, 1)
#DEBUG end
self._compname = _read_string(chunk)
if self._comptype != 'NONE':
if self._comptype == 'G722':
try:
import audioop
except ImportError:
pass
else:
self._convert = self._adpcm2lin
self._framesize = self._framesize // 4
return
# for ULAW and ALAW try Compression Library
try:
import cl
except ImportError:
if self._comptype == 'ULAW':
try:
import audioop
self._convert = self._ulaw2lin
self._framesize = self._framesize // 2
return
except ImportError:
pass
raise Error, 'cannot read compressed AIFF-C files'
if self._comptype == 'ULAW':
scheme = cl.G711_ULAW
self._framesize = self._framesize // 2
elif self._comptype == 'ALAW':
scheme = cl.G711_ALAW
self._framesize = self._framesize // 2
else:
raise Error, 'unsupported compression type'
self._decomp = cl.OpenDecompressor(scheme)
self._convert = self._decomp_data
else:
self._comptype = 'NONE'
self._compname = 'not compressed'
def _readmark(self, chunk):
nmarkers = _read_short(chunk)
# Some files appear to contain invalid counts.
# Cope with this by testing for EOF.
try:
for i in range(nmarkers):
id = _read_short(chunk)
pos = _read_long(chunk)
name = _read_string(chunk)
if pos or name:
# some files appear to have
# dummy markers consisting of
# a position 0 and name ''
self._markers.append((id, pos, name))
except EOFError:
print 'Warning: MARK chunk contains only',
print len(self._markers),
if len(self._markers) == 1: print 'marker',
else: print 'markers',
print 'instead of', nmarkers
class Aifc_write:
# Variables used in this class:
#
# These variables are user settable through appropriate methods
# of this class:
# _file -- the open file with methods write(), close(), tell(), seek()
# set through the __init__() method
# _comptype -- the AIFF-C compression type ('NONE' in AIFF)
# set through the setcomptype() or setparams() method
# _compname -- the human-readable AIFF-C compression type
# set through the setcomptype() or setparams() method
# _nchannels -- the number of audio channels
# set through the setnchannels() or setparams() method
# _sampwidth -- the number of bytes per audio sample
# set through the setsampwidth() or setparams() method
# _framerate -- the sampling frequency
# set through the setframerate() or setparams() method
# _nframes -- the number of audio frames written to the header
# set through the setnframes() or setparams() method
# _aifc -- whether we're writing an AIFF-C file or an AIFF file
# set through the aifc() method, reset through the
# aiff() method
#
# These variables are used internally only:
# _version -- the AIFF-C version number
# _comp -- the compressor from builtin module cl
# _nframeswritten -- the number of audio frames actually written
# _datalength -- the size of the audio samples written to the header
# _datawritten -- the size of the audio samples actually written
def __init__(self, f):
if type(f) == type(''):
filename = f
f = __builtin__.open(f, 'wb')
else:
# else, assume it is an open file object already
filename = '???'
self.initfp(f)
if filename[-5:] == '.aiff':
self._aifc = 0
else:
self._aifc = 1
def initfp(self, file):
self._file = file
self._version = _AIFC_version
self._comptype = 'NONE'
self._compname = 'not compressed'
self._comp = None
self._convert = None
self._nchannels = 0
self._sampwidth = 0
self._framerate = 0
self._nframes = 0
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._markers = []
self._marklength = 0
self._aifc = 1 # AIFF-C is default
def __del__(self):
if self._file:
self.close()
#
# User visible methods.
#
def aiff(self):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._aifc = 0
def aifc(self):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._aifc = 1
def setnchannels(self, nchannels):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if nchannels < 1:
raise Error, 'bad # of channels'
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error, 'number of channels not set'
return self._nchannels
def setsampwidth(self, sampwidth):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if sampwidth < 1 or sampwidth > 4:
raise Error, 'bad sample width'
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._sampwidth:
raise Error, 'sample width not set'
return self._sampwidth
def setframerate(self, framerate):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if framerate <= 0:
raise Error, 'bad frame rate'
self._framerate = framerate
def getframerate(self):
if not self._framerate:
raise Error, 'frame rate not set'
return self._framerate
def setnframes(self, nframes):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, comptype, compname):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'):
raise Error, 'unsupported compression type'
self._comptype = comptype
self._compname = compname
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
## def setversion(self, version):
## if self._nframeswritten:
## raise Error, 'cannot change parameters after starting to write'
## self._version = version
def setparams(self, info):
nchannels, sampwidth, framerate, nframes, comptype, compname = info
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if comptype not in ('NONE', 'ULAW', 'ALAW', 'G722'):
raise Error, 'unsupported compression type'
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
if not self._nchannels or not self._sampwidth or not self._framerate:
raise Error, 'not all parameters set'
return self._nchannels, self._sampwidth, self._framerate, \
self._nframes, self._comptype, self._compname
def setmark(self, id, pos, name):
if id <= 0:
raise Error, 'marker ID must be > 0'
if pos < 0:
raise Error, 'marker position must be >= 0'
if type(name) != type(''):
raise Error, 'marker name must be a string'
for i in range(len(self._markers)):
if id == self._markers[i][0]:
self._markers[i] = id, pos, name
return
self._markers.append((id, pos, name))
def getmark(self, id):
for marker in self._markers:
if id == marker[0]:
return marker
raise Error, 'marker %r does not exist' % (id,)
def getmarkers(self):
if len(self._markers) == 0:
return None
return self._markers
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
self._ensure_header_written(len(data))
nframes = len(data) // (self._sampwidth * self._nchannels)
if self._convert:
data = self._convert(data)
self._file.write(data)
self._nframeswritten = self._nframeswritten + nframes
self._datawritten = self._datawritten + len(data)
def writeframes(self, data):
self.writeframesraw(data)
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten:
self._patchheader()
def close(self):
self._ensure_header_written(0)
if self._datawritten & 1:
# quick pad to even size
self._file.write(chr(0))
self._datawritten = self._datawritten + 1
self._writemarkers()
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten or \
self._marklength:
self._patchheader()
if self._comp:
self._comp.CloseCompressor()
self._comp = None
# Prevent ref cycles
self._convert = None
self._file.close()
#
# Internal methods.
#
def _comp_data(self, data):
import cl
dummy = self._comp.SetParam(cl.FRAME_BUFFER_SIZE, len(data))
dummy = self._comp.SetParam(cl.COMPRESSED_BUFFER_SIZE, len(data))
return self._comp.Compress(self._nframes, data)
def _lin2ulaw(self, data):
import audioop
return audioop.lin2ulaw(data, 2)
def _lin2adpcm(self, data):
import audioop
if not hasattr(self, '_adpcmstate'):
self._adpcmstate = None
data, self._adpcmstate = audioop.lin2adpcm(data, 2,
self._adpcmstate)
return data
def _ensure_header_written(self, datasize):
if not self._nframeswritten:
if self._comptype in ('ULAW', 'ALAW'):
if not self._sampwidth:
self._sampwidth = 2
if self._sampwidth != 2:
raise Error, 'sample width must be 2 when compressing with ULAW or ALAW'
if self._comptype == 'G722':
if not self._sampwidth:
self._sampwidth = 2
if self._sampwidth != 2:
raise Error, 'sample width must be 2 when compressing with G7.22 (ADPCM)'
if not self._nchannels:
raise Error, '# channels not specified'
if not self._sampwidth:
raise Error, 'sample width not specified'
if not self._framerate:
raise Error, 'sampling rate not specified'
self._write_header(datasize)
def _init_compression(self):
if self._comptype == 'G722':
self._convert = self._lin2adpcm
return
try:
import cl
except ImportError:
if self._comptype == 'ULAW':
try:
import audioop
self._convert = self._lin2ulaw
return
except ImportError:
pass
raise Error, 'cannot write compressed AIFF-C files'
if self._comptype == 'ULAW':
scheme = cl.G711_ULAW
elif self._comptype == 'ALAW':
scheme = cl.G711_ALAW
else:
raise Error, 'unsupported compression type'
self._comp = cl.OpenCompressor(scheme)
params = [cl.ORIGINAL_FORMAT, 0,
cl.BITS_PER_COMPONENT, self._sampwidth * 8,
cl.FRAME_RATE, self._framerate,
cl.FRAME_BUFFER_SIZE, 100,
cl.COMPRESSED_BUFFER_SIZE, 100]
if self._nchannels == 1:
params[1] = cl.MONO
elif self._nchannels == 2:
params[1] = cl.STEREO_INTERLEAVED
else:
raise Error, 'cannot compress more than 2 channels'
self._comp.SetParams(params)
# the compressor produces a header which we ignore
dummy = self._comp.Compress(0, '')
self._convert = self._comp_data
def _write_header(self, initlength):
if self._aifc and self._comptype != 'NONE':
self._init_compression()
self._file.write('FORM')
if not self._nframes:
self._nframes = initlength // (self._nchannels * self._sampwidth)
self._datalength = self._nframes * self._nchannels * self._sampwidth
if self._datalength & 1:
self._datalength = self._datalength + 1
if self._aifc:
if self._comptype in ('ULAW', 'ALAW'):
self._datalength = self._datalength // 2
if self._datalength & 1:
self._datalength = self._datalength + 1
elif self._comptype == 'G722':
self._datalength = (self._datalength + 3) // 4
if self._datalength & 1:
self._datalength = self._datalength + 1
self._form_length_pos = self._file.tell()
commlength = self._write_form_length(self._datalength)
if self._aifc:
self._file.write('AIFC')
self._file.write('FVER')
_write_long(self._file, 4)
_write_long(self._file, self._version)
else:
self._file.write('AIFF')
self._file.write('COMM')
_write_long(self._file, commlength)
_write_short(self._file, self._nchannels)
self._nframes_pos = self._file.tell()
_write_long(self._file, self._nframes)
_write_short(self._file, self._sampwidth * 8)
_write_float(self._file, self._framerate)
if self._aifc:
self._file.write(self._comptype)
_write_string(self._file, self._compname)
self._file.write('SSND')
self._ssnd_length_pos = self._file.tell()
_write_long(self._file, self._datalength + 8)
_write_long(self._file, 0)
_write_long(self._file, 0)
def _write_form_length(self, datalength):
if self._aifc:
commlength = 18 + 5 + len(self._compname)
if commlength & 1:
commlength = commlength + 1
verslength = 12
else:
commlength = 18
verslength = 0
_write_long(self._file, 4 + verslength + self._marklength + \
8 + commlength + 16 + datalength)
return commlength
def _patchheader(self):
curpos = self._file.tell()
if self._datawritten & 1:
datalength = self._datawritten + 1
self._file.write(chr(0))
else:
datalength = self._datawritten
if datalength == self._datalength and \
self._nframes == self._nframeswritten and \
self._marklength == 0:
self._file.seek(curpos, 0)
return
self._file.seek(self._form_length_pos, 0)
dummy = self._write_form_length(datalength)
self._file.seek(self._nframes_pos, 0)
_write_long(self._file, self._nframeswritten)
self._file.seek(self._ssnd_length_pos, 0)
_write_long(self._file, datalength + 8)
self._file.seek(curpos, 0)
self._nframes = self._nframeswritten
self._datalength = datalength
def _writemarkers(self):
if len(self._markers) == 0:
return
self._file.write('MARK')
length = 2
for marker in self._markers:
id, pos, name = marker
length = length + len(name) + 1 + 6
if len(name) & 1 == 0:
length = length + 1
_write_long(self._file, length)
self._marklength = length + 8
_write_short(self._file, len(self._markers))
for marker in self._markers:
id, pos, name = marker
_write_short(self._file, id)
_write_long(self._file, pos)
_write_string(self._file, name)
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Aifc_read(f)
elif mode in ('w', 'wb'):
return Aifc_write(f)
else:
raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
openfp = open # B/W compatibility
if __name__ == '__main__':
import sys
if not sys.argv[1:]:
sys.argv.append('/usr/demos/data/audio/bach.aiff')
fn = sys.argv[1]
f = open(fn, 'r')
print "Reading", fn
print "nchannels =", f.getnchannels()
print "nframes =", f.getnframes()
print "sampwidth =", f.getsampwidth()
print "framerate =", f.getframerate()
print "comptype =", f.getcomptype()
print "compname =", f.getcompname()
if sys.argv[2:]:
gn = sys.argv[2]
print "Writing", gn
g = open(gn, 'w')
g.setparams(f.getparams())
while 1:
data = f.readframes(1024)
if not data:
break
g.writeframes(data)
g.close()
f.close()
print "Done."
|
ktan2020/legacy-automation
|
win/Lib/aifc.py
|
Python
|
mit
| 34,203
|
import unittest
import datetime
from datetime import datetime as dt
import logging
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from . import models
from . import slides
from . import utils
from . import videos
from . import exporters
from ..accounts import models as account_models
logging.disable(logging.CRITICAL)
class MockEvent(object):
def __init__(self, start, end):
self.start = start
self.end = end
class ScheduleGeneratorTests(unittest.TestCase):
def test_number_of_rows_in_order(self):
evts = [
MockEvent(dt(2012, 6, 1, 10, 00), dt(2012, 6, 1, 10, 30)),
MockEvent(dt(2012, 6, 1, 10, 30), dt(2012, 6, 1, 11, 00)),
MockEvent(dt(2012, 6, 1, 11, 00), dt(2012, 6, 1, 11, 30)),
]
self.assertEquals(3, utils._get_number_of_rows(evts, 30))
def test_number_of_rows_in_parallel(self):
evts = [
MockEvent(dt(2012, 6, 1, 10, 00), dt(2012, 6, 1, 10, 30)),
MockEvent(dt(2012, 6, 1, 10, 30), dt(2012, 6, 1, 11, 00)),
MockEvent(dt(2012, 6, 1, 10, 30), dt(2012, 6, 1, 11, 00)),
MockEvent(dt(2012, 6, 1, 11, 00), dt(2012, 6, 1, 11, 30)),
]
self.assertEquals(3, utils._get_number_of_rows(evts, 30))
def test_number_of_rows_out_of_order(self):
evts = [
MockEvent(dt(2012, 6, 1, 11, 00), dt(2012, 6, 1, 11, 30)),
MockEvent(dt(2012, 6, 1, 10, 00), dt(2012, 6, 1, 10, 30)),
MockEvent(dt(2012, 6, 1, 10, 30), dt(2012, 6, 1, 11, 00)),
MockEvent(dt(2012, 6, 1, 10, 30), dt(2012, 6, 1, 11, 00)),
]
self.assertEquals(3, utils._get_number_of_rows(evts, 30))
def test_base_grid_creation(self):
start = dt(2012, 6, 1, 9, 0)
end = dt(2012, 6, 1, 10, 30)
self.assertEquals(3, len(utils._create_base_grid(start, end, 30)))
def test_strip_rows(self):
e1 = utils.GridCell(None, 2)
e1.end = dt(2012, 6, 1, 10, 30)
rows = [
utils.GridRow(dt(2012, 6, 1, 9, 0), dt(2012, 6, 1, 9, 30), []),
utils.GridRow(dt(2012, 6, 1, 9, 30), dt(2012, 6, 1, 10, 0), [e1]),
utils.GridRow(dt(2012, 6, 1, 10, 0), dt(2012, 6, 1, 10, 30), []),
utils.GridRow(dt(2012, 6, 1, 10, 30), dt(2012, 6, 1, 11, 00), []),
]
result = utils._strip_empty_rows(rows)
self.assertEquals(2, len(result))
self.assertEquals(rows[1:3], result)
def test_strip_rows_empty(self):
rows = [
utils.GridRow(dt(2012, 6, 1, 9, 0), dt(2012, 6, 1, 9, 30), []),
utils.GridRow(dt(2012, 6, 1, 9, 30), dt(2012, 6, 1, 10, 0), []),
utils.GridRow(dt(2012, 6, 1, 10, 0), dt(2012, 6, 1, 10, 30), []),
utils.GridRow(dt(2012, 6, 1, 10, 30), dt(2012, 6, 1, 11, 00), []),
]
result = utils._strip_empty_rows(rows)
self.assertEquals(0, len(result))
class SlideCodeGeneratorTests(unittest.TestCase):
def test_prezi_match(self):
service = slides.PreziService()
self.assertTrue(service.matches_link('http://prezi.com/mkg9y_pl1cxd/presentation-on-presentations/'))
self.assertFalse(service.matches_link("https://speakerdeck.com/u/speakerdeck/p/introduction-to-speakerdeck"))
def test_prezi_id_extraction(self):
service = slides.PreziService()
self.assertEquals('mkg9y_pl1cxd', service.extract_id('http://prezi.com/mkg9y_pl1cxd/presentation-on-presentations/'))
def test_slideshare_match(self):
service = slides.SlideShareService()
self.assertTrue(service.matches_link('http://www.slideshare.net/zeeg/pycon-2011-scaling-disqus-7251315'))
assert not service.matches_link("https://speakerdeck.com/u/speakerdeck/p/introduction-to-speakerdeck")
def test_slideshare_https_match(self):
service = slides.SlideShareService()
self.assertTrue(service.matches_link('https://www.slideshare.net/zeeg/pycon-2011-scaling-disqus-7251315'))
assert not service.matches_link("https://speakerdeck.com/u/speakerdeck/p/introduction-to-speakerdeck")
def test_speakerdeck_match(self):
service = slides.SpeakerDeckService()
self.assertTrue(service.matches_link("https://speakerdeck.com/u/speakerdeck/p/introduction-to-speakerdeck"))
self.assertTrue(service.matches_link("http://speakerdeck.com/u/speakerdeck/p/introduction-to-speakerdeck"))
self.assertFalse(service.matches_link("http://youtube.com"))
def test_speakerdeck_generation(self):
doc = SAMPLE_SPEAKERDECK_DOC
code = slides.SpeakerDeckService().generate_embed_code(None, doc=doc)
self.assertEquals("""<script async class="speakerdeck-embed" data-id="123" data-ratio="1.3333333333333333" src="//speakerdeck.com/assets/embed.js"></script>""",
code)
def test_slideshare_oembed_link(self):
service = slides.SlideShareService()
self.assertTrue(service.get_oembed_url('http://www.slideshare.net/zeeg/pycon-2011-scaling-disqus-7251315') == 'https://www.slideshare.net/api/oembed/2?format=json&url=http%3A%2F%2Fwww.slideshare.net%2Fzeeg%2Fpycon-2011-scaling-disqus-7251315')
class VideoServiceTests(unittest.TestCase):
def test_youtube_oembed_link(self):
"""
Internal test to make sure that the url generator for youtube works.
"""
service = videos.YouTubeService()
self.assertEquals(
r"https://www.youtube.com/oembed?url=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DcR2XilcGYOo&format=json",
service.generate_oembed_url('https://www.youtube.com/watch?v=cR2XilcGYOo')
)
class PyVideoServiceTests(unittest.TestCase):
service = videos.PyVideoService()
def test_id_extraction(self):
self.assertEquals('1436', self.service.get_video_id('http://pyvideo.org/video/1436/praktische-anwendung-von-metaklassen'))
SAMPLE_SPEAKERDECK_DOC = """<!DOCTYPE html>
<html>
<body class="signed-out">
<div id="content">
<section id="presentation" class="feature">
<div class="wrapper">
<div class="main">
<h1>Introduction to Speakerdeck</h1>
<div class="slide_frame" id="slides_container">
<div class="speakerdeck-embed"
data-id="123"
data-ratio="1.3333333333333333"
data-slide=""></div>
</div>
</div>
</div>
</section>
</div>
</body>
</html>
"""
class AttendSessionTest(TestCase):
fixtures = ['example/users.json', 'example/proposal-and-schedule.json']
def _temporary_shift_training(self, property_name, timedelta):
tomorrow = datetime.datetime.utcnow() + datetime.timedelta(days=1)
prop = getattr(self, property_name)
self._original_data['%s_start' % (property_name,)] = prop.start
self._original_data['%s_end' % (property_name,)] = prop.end
prop.start = prop.start.replace(tomorrow.year, tomorrow.month, tomorrow.day)
prop.end = prop.start.replace(tomorrow.year, tomorrow.month, tomorrow.day)
prop.save()
def _restore_training_time(self, property_name):
prop = getattr(self, property_name)
prop.start = self._original_data['%s_start' % (property_name,)]
prop.end = self._original_data['%s_end' % (property_name,)]
prop.save()
def setUp(self):
self._original_data = {}
now = datetime.datetime.utcnow()
self.attendees = [User.objects.create_user(username='att%d' % i, password='att%d' % i) for i in range(1, 4)]
for i in range(3):
account_models.Profile.objects.create(user=self.attendees[i])
self.training = models.Session.objects.get(title='Training 15')
self.training2 = models.Session.objects.get(title='Training 16')
self._temporary_shift_training('training', datetime.timedelta(days=1))
self._temporary_shift_training('training2', datetime.timedelta(days=1))
self.attend_url = reverse('session-attend', kwargs={'session_pk': self.training.pk})
self.leave_url = reverse('session-leave', kwargs={'session_pk': self.training.pk})
def tearDown(self):
self._restore_training_time('training')
self._restore_training_time('training2')
def test_attend_no_limit(self):
self.client.login(username='att1', password='att1')
response = self.client.get(self.training.get_absolute_url())
self.assertContains(response,
'<input type="submit" class="btn btn-primary" value="Attend this session" />')
response = self.client.post(self.attend_url, follow=True)
self.assertContains(response,
'You are now attending %s.' % self.training.title)
self.assertContains(response,
'<input type="submit" class="btn btn-primary" value="Do not attend anymore" />')
att_ids = list(self.training.attendees.order_by('id').values_list('id', flat=True).all())
self.assertEqual(att_ids, [self.attendees[0].pk])
self.client.logout()
self.client.login(username='att2', password='att2')
response = self.client.get(self.training.get_absolute_url())
self.assertContains(response,
'<input type="submit" class="btn btn-primary" value="Attend this session" />')
response = self.client.post(self.attend_url, follow=True)
self.assertContains(response,
'You are now attending %s.' % self.training.title)
self.assertContains(response,
'<input type="submit" class="btn btn-primary" value="Do not attend anymore" />')
att_ids = list(self.training.attendees.order_by('id').values_list('id', flat=True).all())
self.assertEqual(att_ids, [self.attendees[0].pk, self.attendees[1].pk])
self.client.logout()
self.client.login(username='att3', password='att3')
response = self.client.get(self.training.get_absolute_url())
self.assertContains(response,
'<input type="submit" class="btn btn-primary" value="Attend this session" />')
response = self.client.post(self.attend_url, follow=True)
self.assertContains(response,
'You are now attending %s.' % self.training.title)
self.assertContains(response,
'<input type="submit" class="btn btn-primary" value="Do not attend anymore" />')
att_ids = list(self.training.attendees.order_by('id').values_list('id', flat=True).all())
self.assertEqual(att_ids, [self.attendees[0].pk, self.attendees[1].pk, self.attendees[2].pk])
self.client.logout()
def test_attend_limit(self):
self.training.max_attendees = 2
self.training.save(update_fields=['max_attendees'])
self.client.login(username='att1', password='att1')
response = self.client.get(self.training.get_absolute_url())
self.assertContains(response,
'<input type="submit" class="btn btn-primary" value="Attend this session" />')
response = self.client.post(self.attend_url, follow=True)
self.assertContains(response,
'You are now attending %s.' % self.training.title)
self.assertContains(response,
'<input type="submit" class="btn btn-primary" value="Do not attend anymore" />')
att_ids = list(self.training.attendees.order_by('id').values_list('id', flat=True).all())
self.assertEqual(att_ids, [self.attendees[0].pk])
self.client.logout()
self.client.login(username='att2', password='att2')
response = self.client.get(self.training.get_absolute_url())
self.assertContains(response,
'<input type="submit" class="btn btn-primary" value="Attend this session" />')
response = self.client.post(self.attend_url, follow=True)
self.assertContains(response,
'You are now attending %s.' % self.training.title)
self.assertContains(response,
'<input type="submit" class="btn btn-primary" value="Do not attend anymore" />')
att_ids = list(self.training.attendees.order_by('id').values_list('id', flat=True).all())
self.assertEqual(att_ids, [self.attendees[0].pk, self.attendees[1].pk])
self.client.logout()
self.client.login(username='att3', password='att3')
response = self.client.get(self.training.get_absolute_url())
self.assertContains(response,
'You cannot attend right now. There are no free seats left.')
response = self.client.post(self.attend_url, follow=True)
self.assertContains(response,
'You cannot attend right now. There are no free seats left.')
att_ids = list(self.training.attendees.order_by('id').values_list('id', flat=True).all())
self.assertEqual(att_ids, [self.attendees[0].pk, self.attendees[1].pk])
self.client.logout()
def test_leave(self):
self.training.attendees.add(self.attendees[0].profile)
att_ids = list(self.training.attendees.order_by('id').values_list('id', flat=True).all())
self.assertEqual(att_ids, [self.attendees[0].pk])
self.client.login(username='att1', password='att1')
response = self.client.get(self.training.get_absolute_url())
self.assertContains(response,
'<input type="submit" class="btn btn-primary" value="Do not attend anymore" />')
response = self.client.post(self.leave_url, follow=True)
self.assertContains(response,
'You are not attending %s anymore.' % self.training.title)
self.assertContains(response,
'<input type="submit" class="btn btn-primary" value="Attend this session" />')
att_ids = list(self.training.attendees.order_by('id').values_list('id', flat=True).all())
self.assertEqual(att_ids, [])
self.client.logout()
def test_not_attendable(self):
talk = models.Session.objects.get(title='Talk 1')
self.client.login(username='att1', password='att1')
response = self.client.post(reverse('session-leave', kwargs={'session_pk': talk.pk}))
self.assertEqual(response.status_code, 404)
def test_cannot_attend_overlapping(self):
attend_url2 = reverse('session-attend', kwargs={'session_pk': self.training2.pk})
self.client.login(username='att1', password='att1')
response = self.client.get(self.training.get_absolute_url())
self.assertContains(response,
'<input type="submit" class="btn btn-primary" value="Attend this session" />')
response = self.client.post(self.attend_url, follow=True)
self.assertContains(response,
'You are now attending %s.' % self.training.title)
att_ids = list(self.training.attendees.order_by('id').values_list('id', flat=True).all())
self.assertEqual(att_ids, [self.attendees[0].pk])
response = self.client.get(self.training2.get_absolute_url())
self.assertContains(response,
'<input type="submit" class="btn btn-primary" value="Attend this session" />')
response = self.client.post(attend_url2, follow=True)
self.assertContains(response,
'You cannot attend this session because you are already attending another one at that time.')
att_ids2 = list(self.training2.attendees.order_by('id').values_list('id', flat=True).all())
self.assertEqual(att_ids2, [])
self.client.logout()
class FrabExporterTest(TestCase):
def test_calculate_event_duration(self):
exporter = exporters.FrabExporter()
event = models.Session(
start=datetime.datetime(2014, 7, 1, 16, 0),
end=datetime.datetime(2014, 7, 1, 17, 30)
)
self.assertEqual(u'01:30', exporter._calculate_event_duration(event))
pass
|
EuroPython/djep
|
pyconde/schedule/tests.py
|
Python
|
bsd-3-clause
| 15,838
|
from . import equipmentslot, baseequipment
class Armor(baseequipment.Equipment):
def __init__(self, name, armor_bonus, equipment_slot=equipmentslot.EquipmentSlot.CHEST):
super().__init__(name, equipment_slot)
self.armor_bonus = armor_bonus
def compute_normal_armor_class(self):
return self.armor_bonus
|
agingrasc/PathfinderPhorum
|
phorum/equipment/armor.py
|
Python
|
mit
| 337
|
## @file
# This file is used to define class objects of INF file [Pcds] section.
# It will consumed by InfParser.
#
# Copyright (c) 2011 - 2014, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
'''
InfPcdObject
'''
import os
import re
from Logger import StringTable as ST
from Logger import ToolError
import Logger.Log as Logger
from Library import GlobalData
from Library import DataType as DT
from Library.Misc import Sdict
from Library.Misc import GetHelpStringByRemoveHashKey
from Library.ParserValidate import IsValidPcdType
from Library.ParserValidate import IsValidCVariableName
from Library.ParserValidate import IsValidPcdValue
from Library.ParserValidate import IsValidArch
from Library.CommentParsing import ParseComment
from Library.String import GetSplitValueList
from Library.String import IsHexDigitUINT32
from Library.ExpressionValidate import IsValidFeatureFlagExp
from Parser.InfAsBuiltProcess import GetPackageListInfo
from Parser.DecParser import Dec
from Object.Parser.InfPackagesObject import InfPackageItem
def ValidateArch(ArchItem, PcdTypeItem1, LineNo, SupArchDict, SupArchList):
#
# Validate Arch
#
if (ArchItem == '' or ArchItem == None):
ArchItem = 'COMMON'
if PcdTypeItem1.upper != DT.TAB_INF_FEATURE_PCD.upper():
ArchList = GetSplitValueList(ArchItem, ' ')
for ArchItemNew in ArchList:
if not IsValidArch(ArchItemNew):
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_DEFINE_FROMAT_INVALID % (ArchItemNew),
File=GlobalData.gINF_MODULE_NAME,
Line=LineNo,
ExtraData=ArchItemNew)
SupArchDict[PcdTypeItem1] = ArchList
else:
SupArchList.append(ArchItem)
return SupArchList, SupArchDict
def ParsePcdComment(CommentList, PcdTypeItem, PcdItemObj):
CommentInsList = []
PreUsage = None
PreHelpText = ''
BlockFlag = -1
FFEHelpText = ''
CommentItemHelpText = ''
Count = 0
for CommentItem in CommentList:
Count = Count + 1
CommentItemUsage, CommentType, CommentString, CommentItemHelpText = ParseComment(CommentItem,
DT.ALL_USAGE_TOKENS,
{},
[],
False)
if CommentType and CommentString:
pass
if PcdTypeItem == 'FeaturePcd':
CommentItemUsage = DT.USAGE_ITEM_CONSUMES
if CommentItemHelpText == None:
CommentItemHelpText = ''
if Count == 1:
FFEHelpText = CommentItemHelpText
else:
FFEHelpText = FFEHelpText + DT.END_OF_LINE + CommentItemHelpText
if Count == len(CommentList):
CommentItemHelpText = FFEHelpText
BlockFlag = 4
else:
continue
if CommentItemHelpText == None:
CommentItemHelpText = ''
if Count == len(CommentList) and CommentItemUsage == DT.ITEM_UNDEFINED:
CommentItemHelpText = DT.END_OF_LINE
if Count == len(CommentList) and (BlockFlag == 1 or BlockFlag == 2):
if CommentItemUsage == DT.ITEM_UNDEFINED:
BlockFlag = 4
else:
BlockFlag = 3
elif BlockFlag == -1 and Count == len(CommentList):
BlockFlag = 4
if BlockFlag == -1 or BlockFlag == 1 or BlockFlag == 2:
if CommentItemUsage == DT.ITEM_UNDEFINED:
if BlockFlag == -1:
BlockFlag = 1
elif BlockFlag == 1:
BlockFlag = 2
else:
if BlockFlag == 1 or BlockFlag == 2:
BlockFlag = 3
elif BlockFlag == -1:
BlockFlag = 4
#
# Combine two comment line if they are generic comment
#
if CommentItemUsage == PreUsage == DT.ITEM_UNDEFINED:
CommentItemHelpText = PreHelpText + DT.END_OF_LINE + CommentItemHelpText
PreHelpText = CommentItemHelpText
if BlockFlag == 4:
CommentItemIns = InfPcdItemCommentContent()
CommentItemIns.SetUsageItem(CommentItemUsage)
CommentItemIns.SetHelpStringItem(CommentItemHelpText)
CommentInsList.append(CommentItemIns)
BlockFlag = -1
PreUsage = None
PreHelpText = ''
elif BlockFlag == 3:
#
# Add previous help string
#
CommentItemIns = InfPcdItemCommentContent()
CommentItemIns.SetUsageItem(DT.ITEM_UNDEFINED)
if PreHelpText == '' or PreHelpText.endswith(DT.END_OF_LINE):
PreHelpText += DT.END_OF_LINE
CommentItemIns.SetHelpStringItem(PreHelpText)
CommentInsList.append(CommentItemIns)
#
# Add Current help string
#
CommentItemIns = InfPcdItemCommentContent()
CommentItemIns.SetUsageItem(CommentItemUsage)
CommentItemIns.SetHelpStringItem(CommentItemHelpText)
CommentInsList.append(CommentItemIns)
BlockFlag = -1
PreUsage = None
PreHelpText = ''
else:
PreUsage = CommentItemUsage
PreHelpText = CommentItemHelpText
PcdItemObj.SetHelpStringList(CommentInsList)
return PcdItemObj
class InfPcdItemCommentContent():
def __init__(self):
#
# ## SOMETIMES_CONSUMES ## HelpString
#
self.UsageItem = ''
#
# Help String
#
self.HelpStringItem = ''
def SetUsageItem(self, UsageItem):
self.UsageItem = UsageItem
def GetUsageItem(self):
return self.UsageItem
def SetHelpStringItem(self, HelpStringItem):
self.HelpStringItem = HelpStringItem
def GetHelpStringItem(self):
return self.HelpStringItem
## InfPcdItem
#
# This class defined Pcd item used in Module files
#
# @param CName: Input value for CName, default is ''
# @param Token: Input value for Token, default is ''
# @param TokenSpaceGuidCName: Input value for TokenSpaceGuidCName, default
# is ''
# @param DatumType: Input value for DatumType, default is ''
# @param MaxDatumSize: Input value for MaxDatumSize, default is ''
# @param DefaultValue: Input value for DefaultValue, default is ''
# @param ItemType: Input value for ItemType, default is ''
# @param ValidUsage: Input value for ValidUsage, default is []
# @param SkuInfoList: Input value for SkuInfoList, default is {}
# @param SupModuleList: Input value for SupModuleList, default is []
#
class InfPcdItem():
def __init__(self):
self.CName = ''
self.Token = ''
self.TokenSpaceGuidCName = ''
self.TokenSpaceGuidValue = ''
self.DatumType = ''
self.MaxDatumSize = ''
self.DefaultValue = ''
self.Offset = ''
self.ValidUsage = ''
self.ItemType = ''
self.SupModuleList = []
self.HelpStringList = []
self.FeatureFlagExp = ''
self.SupArchList = []
self.PcdErrorsList = []
def SetCName(self, CName):
self.CName = CName
def GetCName(self):
return self.CName
def SetToken(self, Token):
self.Token = Token
def GetToken(self):
return self.Token
def SetTokenSpaceGuidCName(self, TokenSpaceGuidCName):
self.TokenSpaceGuidCName = TokenSpaceGuidCName
def GetTokenSpaceGuidCName(self):
return self.TokenSpaceGuidCName
def SetTokenSpaceGuidValue(self, TokenSpaceGuidValue):
self.TokenSpaceGuidValue = TokenSpaceGuidValue
def GetTokenSpaceGuidValue(self):
return self.TokenSpaceGuidValue
def SetDatumType(self, DatumType):
self.DatumType = DatumType
def GetDatumType(self):
return self.DatumType
def SetMaxDatumSize(self, MaxDatumSize):
self.MaxDatumSize = MaxDatumSize
def GetMaxDatumSize(self):
return self.MaxDatumSize
def SetDefaultValue(self, DefaultValue):
self.DefaultValue = DefaultValue
def GetDefaultValue(self):
return self.DefaultValue
def SetPcdErrorsList(self, PcdErrorsList):
self.PcdErrorsList = PcdErrorsList
def GetPcdErrorsList(self):
return self.PcdErrorsList
def SetItemType(self, ItemType):
self.ItemType = ItemType
def GetItemType(self):
return self.ItemType
def SetSupModuleList(self, SupModuleList):
self.SupModuleList = SupModuleList
def GetSupModuleList(self):
return self.SupModuleList
def SetHelpStringList(self, HelpStringList):
self.HelpStringList = HelpStringList
def GetHelpStringList(self):
return self.HelpStringList
def SetFeatureFlagExp(self, FeatureFlagExp):
self.FeatureFlagExp = FeatureFlagExp
def GetFeatureFlagExp(self):
return self.FeatureFlagExp
def SetSupportArchList(self, ArchList):
self.SupArchList = ArchList
def GetSupportArchList(self):
return self.SupArchList
def SetOffset(self, Offset):
self.Offset = Offset
def GetOffset(self):
return self.Offset
def SetValidUsage(self, ValidUsage):
self.ValidUsage = ValidUsage
def GetValidUsage(self):
return self.ValidUsage
##
#
#
#
class InfPcdObject():
def __init__(self, FileName):
self.Pcds = Sdict()
self.FileName = FileName
def SetPcds(self, PcdContent, KeysList=None, PackageInfo=None):
if GlobalData.gIS_BINARY_INF:
self.SetAsBuildPcds(PcdContent, KeysList, PackageInfo)
return True
#
# Validate Arch
#
SupArchList = []
SupArchDict = {}
PcdTypeItem = ''
for (PcdTypeItem1, ArchItem, LineNo) in KeysList:
SupArchList, SupArchDict = ValidateArch(ArchItem, PcdTypeItem1, LineNo, SupArchDict, SupArchList)
#
# Validate PcdType
#
if (PcdTypeItem1 == '' or PcdTypeItem1 == None):
return False
else:
if not IsValidPcdType(PcdTypeItem1):
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_PCD_SECTION_TYPE_ERROR % (DT.PCD_USAGE_TYPE_LIST_OF_MODULE),
File=GlobalData.gINF_MODULE_NAME,
Line=LineNo,
ExtraData=PcdTypeItem1)
return False
PcdTypeItem = PcdTypeItem1
for PcdItem in PcdContent:
PcdItemObj = InfPcdItem()
CommentList = PcdItem[1]
CurrentLineOfPcdItem = PcdItem[2]
PcdItem = PcdItem[0]
if CommentList != None and len(CommentList) != 0:
PcdItemObj = ParsePcdComment(CommentList, PcdTypeItem, PcdItemObj)
else:
CommentItemIns = InfPcdItemCommentContent()
CommentItemIns.SetUsageItem(DT.ITEM_UNDEFINED)
PcdItemObj.SetHelpStringList([CommentItemIns])
if len(PcdItem) >= 1 and len(PcdItem) <= 3:
PcdItemObj = SetPcdName(PcdItem, CurrentLineOfPcdItem, PcdItemObj)
if len(PcdItem) >= 2 and len(PcdItem) <= 3:
#
# Contain PcdName and Value, validate value.
#
if IsValidPcdValue(PcdItem[1]) or PcdItem[1].strip() == "":
PcdItemObj.SetDefaultValue(PcdItem[1])
else:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_PCD_VALUE_INVALID,
File=CurrentLineOfPcdItem[2],
Line=CurrentLineOfPcdItem[1],
ExtraData=PcdItem[1])
if len(PcdItem) == 3:
#
# Contain PcdName, value, and FeatureFlag express
#
#
# Validate Feature Flag Express
#
if PcdItem[2].strip() == '':
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FEATURE_FLAG_EXP_MISSING,
File=CurrentLineOfPcdItem[2],
Line=CurrentLineOfPcdItem[1],
ExtraData=CurrentLineOfPcdItem[0])
#
# Validate FFE
#
FeatureFlagRtv = IsValidFeatureFlagExp(PcdItem[2].strip())
if not FeatureFlagRtv[0]:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_FEATURE_FLAG_EXP_SYNTAX_INVLID % (FeatureFlagRtv[1]),
File=CurrentLineOfPcdItem[2],
Line=CurrentLineOfPcdItem[1],
ExtraData=CurrentLineOfPcdItem[0])
PcdItemObj.SetFeatureFlagExp(PcdItem[2])
if len(PcdItem) < 1 or len(PcdItem) > 3:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_PCD_SECTION_CONTENT_ERROR,
File=CurrentLineOfPcdItem[2],
Line=CurrentLineOfPcdItem[1],
ExtraData=CurrentLineOfPcdItem[0])
return False
if PcdTypeItem.upper != DT.TAB_INF_FEATURE_PCD.upper():
PcdItemObj.SetSupportArchList(SupArchDict[PcdTypeItem])
else:
PcdItemObj.SetSupportArchList(SupArchList)
if self.Pcds.has_key((PcdTypeItem, PcdItemObj)):
PcdsList = self.Pcds[PcdTypeItem, PcdItemObj]
PcdsList.append(PcdItemObj)
self.Pcds[PcdTypeItem, PcdItemObj] = PcdsList
else:
PcdsList = []
PcdsList.append(PcdItemObj)
self.Pcds[PcdTypeItem, PcdItemObj] = PcdsList
return True
def SetAsBuildPcds(self, PcdContent, KeysList=None, PackageInfo=None):
for PcdItem in PcdContent:
PcdItemObj = InfPcdItem()
CommentList = PcdItem[1]
CurrentLineOfPcdItem = PcdItem[2]
PcdItem = PcdItem[0]
CommentString = ''
for CommentLine in CommentList:
CommentString = GetHelpStringByRemoveHashKey(CommentLine)
CommentItemIns = InfPcdItemCommentContent()
CommentItemIns.SetHelpStringItem(CommentString)
CommentItemIns.SetUsageItem(CommentString)
PcdItemObj.SetHelpStringList(PcdItemObj.GetHelpStringList() + [CommentItemIns])
if PcdItemObj.GetValidUsage():
PcdItemObj.SetValidUsage(PcdItemObj.GetValidUsage() + DT.TAB_VALUE_SPLIT + CommentString)
else:
PcdItemObj.SetValidUsage(CommentString)
PcdItemObj.SetItemType(KeysList[0][0])
#
# Set PcdTokenSpaceCName and CName
#
PcdItemObj = SetPcdName(PcdItem, CurrentLineOfPcdItem, PcdItemObj)
#
# Set Value/DatumType/OffSet/Token
#
PcdItemObj = SetValueDatumTypeMaxSizeToken(PcdItem,
CurrentLineOfPcdItem,
PcdItemObj,
KeysList[0][1],
PackageInfo)
PcdTypeItem = KeysList[0][0]
if self.Pcds.has_key((PcdTypeItem, PcdItemObj)):
PcdsList = self.Pcds[PcdTypeItem, PcdItemObj]
PcdsList.append(PcdItemObj)
self.Pcds[PcdTypeItem, PcdItemObj] = PcdsList
else:
PcdsList = []
PcdsList.append(PcdItemObj)
self.Pcds[PcdTypeItem, PcdItemObj] = PcdsList
def GetPcds(self):
return self.Pcds
def ParserPcdInfoInDec(String):
ValueList = GetSplitValueList(String, DT.TAB_VALUE_SPLIT, 3)
#
# DatumType, Token
#
return ValueList[2], ValueList[3]
def SetValueDatumTypeMaxSizeToken(PcdItem, CurrentLineOfPcdItem, PcdItemObj, Arch, PackageInfo=None):
#
# Package information not been generated currently, we need to parser INF file to get information.
#
if not PackageInfo:
PackageInfo = []
InfFileName = CurrentLineOfPcdItem[2]
PackageInfoList = GetPackageListInfo(InfFileName, GlobalData.gWORKSPACE, -1)
for PackageInfoListItem in PackageInfoList:
PackageInfoIns = InfPackageItem()
PackageInfoIns.SetPackageName(PackageInfoListItem)
PackageInfo.append(PackageInfoIns)
PcdInfoInDecHasFound = False
for PackageItem in PackageInfo:
if PcdInfoInDecHasFound:
break
PackageName = PackageItem.PackageName
#
# Open DEC file to get information
#
FullFileName = os.path.normpath(os.path.realpath(os.path.join(GlobalData.gWORKSPACE, PackageName)))
DecParser = None
if FullFileName not in GlobalData.gPackageDict:
DecParser = Dec(FullFileName)
GlobalData.gPackageDict[FullFileName] = DecParser
else:
DecParser = GlobalData.gPackageDict[FullFileName]
#
# Find PCD information.
#
DecPcdsDict = DecParser.GetPcdSectionObject().ValueDict
for Key in DecPcdsDict.keys():
if (Key[0] == 'PCDSDYNAMICEX' and PcdItemObj.GetItemType() == 'PcdEx') and \
(Key[1] == 'COMMON' or Key[1] == Arch):
for PcdInDec in DecPcdsDict[Key]:
if PcdInDec.TokenCName == PcdItemObj.CName and \
PcdInDec.TokenSpaceGuidCName == PcdItemObj.TokenSpaceGuidCName:
PcdItemObj.SetToken(PcdInDec.TokenValue)
PcdItemObj.SetDatumType(PcdInDec.DatumType)
PcdItemObj.SetSupportArchList([Arch])
PcdItemObj.SetDefaultValue(PcdInDec.DefaultValue)
if (Key[0] == 'PCDSPATCHABLEINMODULE' and PcdItemObj.GetItemType() == 'PatchPcd') and \
(Key[1] == 'COMMON' or Key[1] == Arch):
for PcdInDec in DecPcdsDict[Key]:
if PcdInDec.TokenCName == PcdItemObj.CName and \
PcdInDec.TokenSpaceGuidCName == PcdItemObj.TokenSpaceGuidCName:
PcdItemObj.SetToken(PcdInDec.TokenValue)
PcdItemObj.SetDatumType(PcdInDec.DatumType)
PcdItemObj.SetSupportArchList([Arch])
if PcdItemObj.GetDatumType() == 'VOID*':
if len(PcdItem) > 1:
PcdItemObj.SetMaxDatumSize('%s' % (len(GetSplitValueList(PcdItem[1], DT.TAB_COMMA_SPLIT))))
DecGuidsDict = DecParser.GetGuidSectionObject().ValueDict
for Key in DecGuidsDict.keys():
if Key == 'COMMON' or Key == Arch:
for GuidInDec in DecGuidsDict[Key]:
if GuidInDec.GuidCName == PcdItemObj.TokenSpaceGuidCName:
PcdItemObj.SetTokenSpaceGuidValue(GuidInDec.GuidString)
if PcdItemObj.GetItemType().upper() == DT.TAB_INF_PATCH_PCD.upper():
#
# Validate Value.
#
# convert the value from a decimal 0 to a formatted hex value.
if PcdItem[1] == "0":
DatumType = PcdItemObj.GetDatumType()
if DatumType == "UINT8":
PcdItem[1] = "0x00"
if DatumType == "UINT16":
PcdItem[1] = "0x0000"
if DatumType == "UINT32":
PcdItem[1] = "0x00000000"
if DatumType == "UINT64":
PcdItem[1] = "0x0000000000000000"
if ValidatePcdValueOnDatumType(PcdItem[1], PcdItemObj.GetDatumType()):
PcdItemObj.SetDefaultValue(PcdItem[1])
else:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_ASBUILD_PCD_VALUE_INVALID % ("\"" + PcdItem[1] + "\"", "\"" +
PcdItemObj.GetDatumType() + "\""),
File=CurrentLineOfPcdItem[2],
Line=CurrentLineOfPcdItem[1],
ExtraData=CurrentLineOfPcdItem[0])
#
# validate offset
#
if PcdItemObj.GetItemType().upper() == DT.TAB_INF_PATCH_PCD.upper():
if not IsHexDigitUINT32(PcdItem[2]):
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_ASBUILD_PCD_OFFSET_FORMAT_INVALID % ("\"" + PcdItem[2] + "\""),
File=CurrentLineOfPcdItem[2],
Line=CurrentLineOfPcdItem[1],
ExtraData=CurrentLineOfPcdItem[0])
PcdItemObj.SetOffset(PcdItem[2])
if PcdItemObj.GetToken() == '' or PcdItemObj.GetDatumType() == '':
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_ASBUILD_PCD_DECLARITION_MISS % ("\"" + PcdItem[0] + "\""),
File=CurrentLineOfPcdItem[2],
Line=CurrentLineOfPcdItem[1],
ExtraData=CurrentLineOfPcdItem[0])
return PcdItemObj
def ValidatePcdValueOnDatumType(Value, Type):
Value = Value.strip()
#
# Boolean type only allow 0x00 or 0x01 as value per INF spec
#
if Type == 'BOOLEAN':
if not (Value == '0x00' or Value == '0x01'):
return False
elif Type == 'VOID*':
if not Value.startswith("{"):
return False
if not Value.endswith("}"):
return False
#
# Strip "{" at head and "}" at tail.
#
Value = Value[1:-1]
ValueList = GetSplitValueList(Value, DT.TAB_COMMA_SPLIT)
ReIsValidHexByte = re.compile("^0x[0-9a-f]{1,2}$", re.IGNORECASE)
for ValueItem in ValueList:
if not ReIsValidHexByte.match(ValueItem):
return False
elif Type == 'UINT8' or Type == 'UINT16' or Type == 'UINT32' or Type == 'UINT64':
ReIsValidUint8z = re.compile('^0[x|X][a-fA-F0-9]{2}$')
ReIsValidUint16z = re.compile('^0[x|X][a-fA-F0-9]{4}$')
ReIsValidUint32z = re.compile('^0[x|X][a-fA-F0-9]{8}$')
ReIsValidUint64z = re.compile('^0[x|X][a-fA-F0-9]{16}$')
if not ReIsValidUint8z.match(Value) and Type == 'UINT8':
return False
elif not ReIsValidUint16z.match(Value) and Type == 'UINT16':
return False
elif not ReIsValidUint32z.match(Value) and Type == 'UINT32':
return False
elif not ReIsValidUint64z.match(Value) and Type == 'UINT64':
return False
else:
#
# Since we assume the DEC file always correct, should never go to here.
#
pass
return True
def SetPcdName(PcdItem, CurrentLineOfPcdItem, PcdItemObj):
#
# Only PCD Name specified
# <PcdName> ::= <TokenSpaceGuidCName> "." <TokenCName>
#
PcdId = GetSplitValueList(PcdItem[0], DT.TAB_SPLIT)
if len(PcdId) != 2:
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_PCD_NAME_FORMAT_ERROR,
File=CurrentLineOfPcdItem[2],
Line=CurrentLineOfPcdItem[1],
ExtraData=CurrentLineOfPcdItem[0])
else:
#
# Validate PcdTokenSpaceGuidCName
#
if not IsValidCVariableName(PcdId[0]):
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_PCD_CVAR_GUID,
File=CurrentLineOfPcdItem[2],
Line=CurrentLineOfPcdItem[1],
ExtraData=PcdId[0])
if not IsValidCVariableName(PcdId[1]):
Logger.Error("InfParser",
ToolError.FORMAT_INVALID,
ST.ERR_INF_PARSER_PCD_CVAR_PCDCNAME,
File=CurrentLineOfPcdItem[2],
Line=CurrentLineOfPcdItem[1],
ExtraData=PcdId[1])
PcdItemObj.SetTokenSpaceGuidCName(PcdId[0])
PcdItemObj.SetCName(PcdId[1])
return PcdItemObj
|
miguelinux/vbox
|
src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/UPT/Object/Parser/InfPcdObject.py
|
Python
|
gpl-2.0
| 25,992
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
GridPP and DIRAC: adding CERN@school cluster metadata.
"""
#...for operating system stuff.
import os
#...for parsing the arguments.
import argparse
#...for the logging.
import logging as lg
# Import the JSON library.
import json
# The DIRAC imports.
from DIRAC.Core.Base import Script
Script.parseCommandLine()
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
if __name__ == "__main__":
print("")
print("*###############################=########*")
print("* GridPP and DIRAC: add cluster metadata *")
print("*########################################*")
print("")
# Get the datafile path from the command line.
parser = argparse.ArgumentParser()
parser.add_argument("jsonPath", help="Path to the cluster dataset's JSON.")
parser.add_argument("outputPath", help="The path for the output files.")
parser.add_argument("dfcBaseDir", help="The name of the base directory on the DFC.")
parser.add_argument("-v", "--verbose", help="Increase output verbosity", action="store_true")
args = parser.parse_args()
## The path to the cluster JSON file.
datapath = args.jsonPath
## The output path.
outputpath = args.outputPath
# Check if the output directory exists. If it doesn't, quit.
if not os.path.isdir(outputpath):
raise IOError("* ERROR: '%s' output directory does not exist!" % (outputpath))
## The target directory on the DFC.
dfcbasedir = args.dfcBaseDir
# Set the logging level.
if args.verbose:
level=lg.DEBUG
else:
level=lg.INFO
# Configure the logging.
lg.basicConfig(filename='log_add_cluster_metadata.log', filemode='w', level=level)
print("*")
print("* Input JSON : '%s'" % (datapath))
print("* Output path : '%s'" % (outputpath))
print("* DFC base dir. : '%s'" % (dfcbasedir))
print("*")
## The frame properties JSON file - FIXME: check it exists...
kf = open(datapath, "r")
#
kd = json.load(kf)
kf.close()
## The File Catalog client object.
fc = FileCatalogClient()
# Loop over the clusters and upload the metadata to the DFC.
for k in kd:
## The cluster file name.
fn = k["id"] + ".png"
print("* Found : '%s'" % (fn))
## The full LFN for the datafile.
lfn = dfcbasedir + "/" + fn
print("*--> Adding to : '%s'" % (lfn))
metadataresult = fc.setMetadata(lfn, k)
#print("*--> '%s'" % (lfn))
print metadataresult
|
gridpp/dirac-getting-started
|
add_cluster_metadata.py
|
Python
|
mit
| 2,620
|
import boto.iam
import ConfigParser
import sys
import os.path
import argparse
import idsConfig
import idsNotify
list_user=""
parser = argparse.ArgumentParser(description='IAM intrusion detection')
parser.add_argument("-l", "--list-user", action="store", dest="list_user", help="list trusted user file")
args = parser.parse_args()
try:
conn = boto.iam.connection.IAMConnection(aws_access_key_id = idsConfig.id, aws_secret_access_key = idsConfig.key)
data = conn.get_all_users()
except Exception as e:
idsNotify.send_alert("AWS IDS: \n"+str(e))
sys.exit(1)
user_list=[]
for user in data['list_users_response']['list_users_result']['users']:
user_list.append(user['user_name'])
user_list_file = ConfigParser.ConfigParser()
user_list_file.read(os.path.expanduser(args.list_user))
local_user_names = user_list_file.get("verified_users", "unames", raw=True).split(',')
diff = list(set(user_list) - set(local_user_names))
if len(diff) > 0:
idsNotify.send_alert("AWS IDS: \nUnknown user detected: "+ str(diff))
|
adimania/AWS-IDS
|
iam_ids.py
|
Python
|
gpl-2.0
| 1,032
|
"""
============
Image Module
============
Routines for images.
"""
import numpy as _np
from astropy.io import fits
from astropy import wcs
from .catalog import (read_bgps, select_bgps_field)
from .paths import all_paths as d
class Dirs(object):
"""
Object to hold directories for interactive editing of paths.
"""
def __init__(self):
self.root_dir = d.root_dir
self.bgps_dir = self.root_dir + 'BGPS/Images/{}/'
self.bgps_img_filen = '{}_{}_13pca_{}.fits'
d = Dirs()
def get_bgps_img(identifier, exten, v=210):
"""
Retrieve BGPS image file in astropy.io.fits instance. Only works for v2.
Parameters
----------
identifier : number
BGPS catalog number of clump or a BGPS field string
exten : string
Image file extension name. Valid types:
labelmask -> source contours, label masks
labelmap50 -> source contours, label masks for v1
map20 -> default map
map50 -> default map for v1
medmap20 -> median map 20
noisemap20 -> rms map
v : number, default 2
BGPS version number, valid [1, 2, 201, 'v2d', 210]. This only effects
choice in label mask.
Returns
-------
img : astropy.io.fits.HDUList
"""
if v not in [101, 200, 201, '2d', 210]:
raise ValueError('Invalid version: {0}'.format(v))
if exten not in ['labelmask', 'labelmap50', 'map20', 'map50', 'medmap20',
'noisemap20', 'map20_crop', 'map20_crop_reproject',
'map20_reproject', 'medmap20_crop', 'medmap20_reproject']:
raise ValueError('Incorrect exten: {}.'.format(exten))
if v == '2d':
exten = 'labelmask_deeper'
ver_path = {101: 'v1.0.2', 200: 'v2.0.0', 201: 'v2.0.1', '2d': 'v2.0.1d',
210: 'v2.1.0'}
ver_init = {101: 'v1.0.2', 200: 'v2.0_ds2', 201: 'v2.0_ds2',
'2d': 'v2.0_ds2', 210: 'v2.1_ds2'}
cnum_col = {101: 'cnum', 200: 'cnum', 201: 'cnum',
'2d': 'cnum', 210: 'v210cnum'}
# cnum or field
if isinstance(identifier, (float, int)):
bgps = read_bgps(exten='none', v=v)
c_index = _np.argwhere(bgps[cnum_col[v]] == identifier)[0][0]
field = bgps.ix[c_index, 'field']
elif isinstance(identifier, (str)):
field = identifier
else:
raise ValueError('Improper identifier {0}.'.format(identifier))
img = fits.open(d.bgps_dir.format(ver_path[v]) +
d.bgps_img_filen.format(ver_init[v], field, exten))
return img
def sample_img(img, coord):
"""
Parameters
----------
img : astropy.io.hdu.hdulist.HDUList
Fits image.
coord : tuple
Coordinates in (lon, lat) form appropriate for native coordinates in
the fits image header.
Returns
-------
sample : number
Returns `np.nan` if coordinate not present in image
"""
img_wcs = wcs.WCS(img[0].header)
# Convert coordinates to pixel values
pix = _np.round(img_wcs.wcs_world2pix(coord[0], coord[1], 1)).astype(int)
# Sample pixel value
try:
sample = img[0].data[pix[1], pix[0]]
except:
sample = _np.nan
finally:
return sample
def sample_bgps_img(lon, lat, exten='labelmask', v=210):
"""
Retrieve a value from the BGPS images or labelmasks at a coordinate
position.
Parameters
----------
lon : number
lat : number
Galactic coordinates in decimal degrees.
Returns
-------
sample : number
Returns `np.nan` if not contained in the BGPS bounds file.
"""
# Get field identifier at coordinates
field = select_bgps_field(lon=lon, lat=lat, coord_type='gal')
if not isinstance(field, str):
return _np.nan
img = get_bgps_img(field, exten=exten, v=v)
return sample_img(img=img, coord=(lon, lat))
class BgpsLib(object):
"""
Container for BGPS images.
"""
def __init__(self, exten='map20', v=210):
"""
Parameters
----------
exten : string, default 'map20'
Image file extension name. Valid types:
labelmask -> source contours, label masks
labelmap50 -> source contours, label masks for v1
map20 -> default map
map50 -> default map for v1
medmap20 -> median map 20
noisemap20 -> rms map
v : number, default 210
BGPS version number
"""
self.exten = exten
self.v = v
self.bgps = read_bgps()
self.fields = self.bgps.field.unique()
self._images = {}
def read_images(self):
for field in self.fields:
img = get_bgps_img(field, exten=self.exten, v=self.v)
self._images[field] = img
def get_hdu(self, field):
return self._images[field]
def get_img(self, field):
return self._images[field][0].data
def get_hdr(self, field):
return self._images[field][0].header
|
autocorr/besl
|
besl/image.py
|
Python
|
gpl-3.0
| 5,022
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
# @Author: oesteban
# @Date: 2015-10-08 13:07:21
# @Last Modified by: oesteban
# @Last Modified time: 2015-10-08 14:53:20
if __name__ == "__main__":
import argparse
import os.path as op
from qap.viz.reports import workflow_report
parser = argparse.ArgumentParser()
req = parser.add_argument_group("Required Inputs")
req.add_argument('-i', '--input_csv', type=str, required=True,
help='filepath to csv file generated by qap')
req.add_argument(
'-m', '--qap_mode', type=str, help='report type',
choices=['qap_anatomical_spatial', 'qap_functional_temporal',
'qap_functional_spatial'], required=True)
args = parser.parse_args()
in_csv = args.input_csv
out_dir = op.dirname(in_csv)
qap_type = args.qap_mode
reports = workflow_report(in_csv, qap_type, run_name,
out_dir=out_dir)
for k, v in reports.iteritems():
if v['success']:
logger.info('Written report (%s) in %s' % (k, v['path']))
|
preprocessed-connectomes-project/quality-assessment-protocol
|
scripts/qap_report.py
|
Python
|
bsd-3-clause
| 1,208
|
"""
This module handles accessing, storing, and managing the graph reference.
"""
from __future__ import absolute_import
import hashlib
import json
import os
import re
from pkg_resources import resource_string
import requests
import six
from plotly import files, utils
GRAPH_REFERENCE_PATH = '/v2/plot-schema'
GRAPH_REFERENCE_DOWNLOAD_TIMEOUT = 5 # seconds
# For backwards compat, we keep this list of previously known objects.
# Moving forward, we only add new trace names.
# {<ClassName>: {'object_name': <object_name>, 'base_type': <base-type>}
_BACKWARDS_COMPAT_CLASS_NAMES = {
'AngularAxis': {'object_name': 'angularaxis', 'base_type': dict},
'Annotation': {'object_name': 'annotation', 'base_type': dict},
'Annotations': {'object_name': 'annotations', 'base_type': list},
'Area': {'object_name': 'area', 'base_type': dict},
'Bar': {'object_name': 'bar', 'base_type': dict},
'Box': {'object_name': 'box', 'base_type': dict},
'ColorBar': {'object_name': 'colorbar', 'base_type': dict},
'Contour': {'object_name': 'contour', 'base_type': dict},
'Contours': {'object_name': 'contours', 'base_type': dict},
'Data': {'object_name': 'data', 'base_type': list},
'ErrorX': {'object_name': 'error_x', 'base_type': dict},
'ErrorY': {'object_name': 'error_y', 'base_type': dict},
'ErrorZ': {'object_name': 'error_z', 'base_type': dict},
'Figure': {'object_name': 'figure', 'base_type': dict},
'Font': {'object_name': 'font', 'base_type': dict},
'Heatmap': {'object_name': 'heatmap', 'base_type': dict},
'Histogram': {'object_name': 'histogram', 'base_type': dict},
'Histogram2d': {'object_name': 'histogram2d', 'base_type': dict},
'Histogram2dContour': {'object_name': 'histogram2dcontour',
'base_type': dict},
'Layout': {'object_name': 'layout', 'base_type': dict},
'Legend': {'object_name': 'legend', 'base_type': dict},
'Line': {'object_name': 'line', 'base_type': dict},
'Margin': {'object_name': 'margin', 'base_type': dict},
'Marker': {'object_name': 'marker', 'base_type': dict},
'RadialAxis': {'object_name': 'radialaxis', 'base_type': dict},
'Scatter': {'object_name': 'scatter', 'base_type': dict},
'Scatter3d': {'object_name': 'scatter3d', 'base_type': dict},
'Scene': {'object_name': 'scene', 'base_type': dict},
'Stream': {'object_name': 'stream', 'base_type': dict},
'Surface': {'object_name': 'surface', 'base_type': dict},
'Trace': {'object_name': None, 'base_type': dict},
'XAxis': {'object_name': 'xaxis', 'base_type': dict},
'XBins': {'object_name': 'xbins', 'base_type': dict},
'YAxis': {'object_name': 'yaxis', 'base_type': dict},
'YBins': {'object_name': 'ybins', 'base_type': dict},
'ZAxis': {'object_name': 'zaxis', 'base_type': dict}
}
def get_graph_reference():
"""
Attempts to load local copy of graph reference or makes GET request if DNE.
:return: (dict) The graph reference.
:raises: (PlotlyError) When graph reference DNE and GET request fails.
"""
default_config = files.FILE_CONTENT[files.CONFIG_FILE]
if files.check_file_permissions():
graph_reference = utils.load_json_dict(files.GRAPH_REFERENCE_FILE)
config = utils.load_json_dict(files.CONFIG_FILE)
# TODO: https://github.com/plotly/python-api/issues/293
plotly_api_domain = config.get('plotly_api_domain',
default_config['plotly_api_domain'])
else:
graph_reference = {}
plotly_api_domain = default_config['plotly_api_domain']
sha1 = hashlib.sha1(six.b(str(graph_reference))).hexdigest()
graph_reference_url = '{}{}?sha1={}'.format(plotly_api_domain,
GRAPH_REFERENCE_PATH, sha1)
try:
response = requests.get(graph_reference_url,
timeout=GRAPH_REFERENCE_DOWNLOAD_TIMEOUT)
response.raise_for_status()
except requests.exceptions.RequestException:
if not graph_reference:
path = os.path.join('graph_reference', 'default-schema.json')
s = resource_string('plotly', path).decode('utf-8')
graph_reference = json.loads(s)
else:
if six.PY3:
content = str(response.content, encoding='utf-8')
else:
content = response.content
data = json.loads(content)
if data['modified']:
graph_reference = data['schema']
return utils.decode_unicode(graph_reference)
def string_to_class_name(string):
"""
Single function to handle turning object names into class names.
GRAPH_REFERENCE has names like `error_y`, which we'll turn into `ErrorY`.
:param (str) string: A string that we'll turn into a class name string.
:return: (str)
"""
# capitalize first letter
string = re.sub(r'[A-Za-z]', lambda m: m.group().title(), string, count=1)
# replace `*_<c>` with `*<C>` E.g., `Error_x` --> `ErrorX`
string = re.sub(r'_[A-Za-z0-9]+', lambda m: m.group()[1:].title(), string)
return str(string)
def object_name_to_class_name(object_name):
"""Not all objects have classes auto-generated."""
if object_name in TRACE_NAMES:
return string_to_class_name(object_name)
if object_name in OBJECT_NAME_TO_CLASS_NAME:
return OBJECT_NAME_TO_CLASS_NAME[object_name]
if object_name in ARRAYS:
return 'list'
else:
return 'dict'
def get_attributes_dicts(object_name, parent_object_names=()):
"""
Returns *all* attribute information given the context of parents.
The response has the form:
{
('some', 'path'): {},
('some', 'other', 'path'): {},
...
'additional_attributes': {}
}
There may be any number of paths mapping to attribute dicts. There will be
one attribute dict under 'additional_attributes' which will usually be
empty.
:param (str|unicode) object_name: The object name whose attributes we want.
:param (list[str|unicode]) parent_object_names: Names of parent objects.
:return: (dict)
"""
object_dict = OBJECTS[object_name]
# If we patched this object, we may have added hard-coded attrs.
additional_attributes = object_dict['additional_attributes']
# We should also one or more paths where attributes are defined.
attribute_paths = list(object_dict['attribute_paths']) # shallow copy
# If we have parent_names, some of these attribute paths may be invalid.
for parent_object_name in reversed(parent_object_names):
if parent_object_name in ARRAYS:
continue
parent_object_dict = OBJECTS[parent_object_name]
parent_attribute_paths = parent_object_dict['attribute_paths']
for path in list(attribute_paths):
if not _is_valid_sub_path(path, parent_attribute_paths):
attribute_paths.remove(path)
# We return a dict mapping paths to attributes. We also add in additional
# attributes if defined.
attributes_dicts = {path: utils.get_by_path(GRAPH_REFERENCE, path)
for path in attribute_paths}
attributes_dicts['additional_attributes'] = additional_attributes
return attributes_dicts
def get_valid_attributes(object_name, parent_object_names=()):
attributes = get_attributes_dicts(object_name, parent_object_names)
# These are for documentation and quick lookups. They're just strings.
valid_attributes = set()
for attributes_dict in attributes.values():
for key, val in attributes_dict.items():
if key not in GRAPH_REFERENCE['defs']['metaKeys']:
valid_attributes.add(key)
deprecated_attributes = attributes_dict.get('_deprecated', {})
for key, val in deprecated_attributes.items():
if key not in GRAPH_REFERENCE['defs']['metaKeys']:
valid_attributes.add(key)
return valid_attributes
def get_deprecated_attributes(object_name, parent_object_names=()):
attributes = get_attributes_dicts(object_name, parent_object_names)
# These are for documentation and quick lookups. They're just strings.
deprecated_attributes = set()
for attributes_dict in attributes.values():
deprecated_attributes_dict = attributes_dict.get('_deprecated', {})
for key, val in deprecated_attributes_dict.items():
if key not in GRAPH_REFERENCE['defs']['metaKeys']:
deprecated_attributes.add(key)
return deprecated_attributes
def get_subplot_attributes(object_name, parent_object_names=()):
attributes = get_attributes_dicts(object_name, parent_object_names)
# These are for documentation and quick lookups. They're just strings.
subplot_attributes = set()
for attributes_dict in attributes.values():
for key, val in attributes_dict.items():
if key not in GRAPH_REFERENCE['defs']['metaKeys']:
if isinstance(val, dict) and val.get('_isSubplotObj'):
subplot_attributes.add(key)
deprecated_attributes = attributes_dict.get('_deprecated', {})
for key, val in deprecated_attributes.items():
if key not in GRAPH_REFERENCE['defs']['metaKeys']:
if isinstance(val, dict) and val.get('_isSubplotObj'):
subplot_attributes.add(key)
return subplot_attributes
def attribute_path_to_object_names(attribute_container_path):
"""
Return a location within a figure from a path existing in GRAPH_REFERENCE.
Users don't need to know about GRAPH_REFERENCE, so yielding information
about paths there would only be confusing. Also, the implementation and
structure there may change, but figure structure won't.
:param (tuple[str]) attribute_container_path: An object should exist here.
:return: (tuple[str]) A tuple of object names:
Example:
In: ('traces', 'pie', 'attributes', 'marker')
Out: ('figure', 'data', 'pie', 'marker')
"""
object_names = ['figure'] # this is always the case
if 'layout' in attribute_container_path:
for path_part in attribute_container_path:
if path_part in OBJECTS:
object_names.append(path_part)
if path_part in ARRAYS:
object_names.append(path_part)
object_names.append(path_part[:-1])
elif 'layoutAttributes' in attribute_container_path:
object_names.append('layout')
start_index = attribute_container_path.index('layoutAttributes')
for path_part in attribute_container_path[start_index:]:
if path_part in OBJECTS:
object_names.append(path_part)
if path_part in ARRAYS:
object_names.append(path_part)
object_names.append(path_part[:-1])
else:
# assume it's in 'traces'
object_names.append('data')
for path_part in attribute_container_path:
if path_part in OBJECTS:
object_names.append(path_part)
if path_part in ARRAYS:
object_names.append(path_part)
object_names.append(path_part[:-1])
return tuple(object_names)
def get_role(object_name, attribute, value=None, parent_object_names=()):
"""
Values have types associated with them based on graph_reference.
'data' type values are always kept
'style' values are kept if they're sequences (but not strings)
:param (str) object_name: The name of the object containing 'attribute'.
:param (str) attribute: The attribute we want the `role` of.
:param (*) value: If the value is an array, the return can be different.
:param parent_object_names: An iterable of obj names from graph reference.
:returns: (str) This will be 'data', 'style', or 'info'.
"""
if object_name in TRACE_NAMES and attribute == 'type':
return 'info'
attributes_dicts = get_attributes_dicts(object_name, parent_object_names)
matches = []
for attributes_dict in attributes_dicts.values():
for key, val in attributes_dict.items():
if key == attribute:
matches.append(val)
for key, val in attributes_dict.get('_deprecated', {}).items():
if key == attribute:
matches.append(val)
roles = []
for match in matches:
role = match['role']
array_ok = match.get('arrayOk')
if value is not None and array_ok:
iterable = hasattr(value, '__iter__')
stringy = isinstance(value, six.string_types)
dicty = isinstance(value, dict)
if iterable and not stringy and not dicty:
role = 'data'
roles.append(role)
# TODO: this is ambiguous until the figure is in place...
if 'data' in roles:
role = 'data'
else:
role = roles[0]
return role
def _is_valid_sub_path(path, parent_paths):
"""
Check if a sub path is valid given an iterable of parent paths.
:param (tuple[str]) path: The path that may be a sub path.
:param (list[tuple]) parent_paths: The known parent paths.
:return: (bool)
Examples:
* ('a', 'b', 'c') is a valid subpath of ('a', )
* ('a', 'd') is not a valid subpath of ('b', )
* ('a', ) is not a valid subpath of ('a', 'b')
* ('anything',) is a valid subpath of ()
"""
if not parent_paths:
return True
for parent_path in parent_paths:
if path[:len(parent_path)] == parent_path:
return True
return False
def _get_objects():
"""
Create a reorganization of graph reference which organizes by object name.
Each object can have *many* different definitions in the graph reference.
These possibilities get narrowed down when we have contextual information
about parent objects. For instance, Marker in Scatter has a different
definition than Marker in Pie. However, we need Marker, Scatter, and Pie
to exist on their own as well.
Each value has the form:
{
'meta_paths': [],
'attribute_paths': [],
'additional_attributes': {}
}
* meta_paths describes the top-most path where this object is defined
* attribute_paths describes all the locations where attributes exist
* additional_attributes can be used to hard-code (patch) the plot schema
:return: (dict)
"""
objects = {}
for node, path in utils.node_generator(GRAPH_REFERENCE):
if any([key in path for key in GRAPH_REFERENCE['defs']['metaKeys']]):
continue # objects don't exist under nested meta keys
if node.get('role') != 'object':
continue
if 'items' in node:
continue
object_name = path[-1]
if object_name not in objects:
objects[object_name] = {'meta_paths': [], 'attribute_paths': [],
'additional_attributes': {}}
if node.get('attributes'):
objects[object_name]['attribute_paths'].append(
path + ('attributes', )
)
else:
objects[object_name]['attribute_paths'].append(path)
objects[object_name]['meta_paths'].append(path)
return objects
def _patch_objects():
"""Things like Layout, Figure, and Data need to be included."""
layout_attribute_paths = []
for node, path in utils.node_generator(GRAPH_REFERENCE):
if any([key in path for key in GRAPH_REFERENCE['defs']['metaKeys']]):
continue # objects don't exist under nested meta keys
if path and path[-1] == 'layoutAttributes':
layout_attribute_paths.append(path)
for trace_name in TRACE_NAMES:
OBJECTS[trace_name] = {
'meta_paths': [('traces', trace_name)],
'attribute_paths': [('traces', trace_name, 'attributes')],
'additional_attributes': {}
}
OBJECTS['layout'] = {'meta_paths': [('layout', )],
'attribute_paths': layout_attribute_paths,
'additional_attributes': {}}
figure_attributes = {'layout': {'role': 'object'},
'data': {'role': 'object', '_isLinkedToArray': True}}
OBJECTS['figure'] = {'meta_paths': [],
'attribute_paths': [],
'additional_attributes': figure_attributes}
def _get_arrays():
"""Very few arrays, but this dict is the complement of OBJECTS."""
arrays = {}
for node, path in utils.node_generator(GRAPH_REFERENCE):
if any([key in path for key in GRAPH_REFERENCE['defs']['metaKeys']]):
continue # objects don't exist under nested meta keys
if node.get('role') != 'object':
continue
if 'items' not in node:
continue
object_name = path[-1]
if object_name not in arrays:
items = node['items']
# If items is a dict, it's anyOf them.
if isinstance(items, dict):
item_names = list(items.keys())
else:
item_names = [object_name[:-1]]
arrays[object_name] = {'meta_paths': [path], 'items': item_names}
return arrays
def _patch_arrays():
"""Adds information on our eventual Data array."""
ARRAYS['data'] = {'meta_paths': [('traces', )], 'items': list(TRACE_NAMES)}
def _get_classes():
"""
We eventually make classes out of the objects in GRAPH_REFERENCE.
:return: (dict) A mapping of class names to object names.
"""
classes = {}
# add all the objects we had before, but mark them if they no longer
# exist in the graph reference
for class_name, class_dict in _BACKWARDS_COMPAT_CLASS_NAMES.items():
object_name = class_dict['object_name']
base_type = class_dict['base_type']
if object_name in OBJECTS or object_name in ARRAYS:
classes[class_name] = {'object_name': object_name,
'base_type': base_type}
else:
classes[class_name] = {'object_name': None, 'base_type': base_type}
# always keep the trace dicts up to date
for object_name in TRACE_NAMES:
class_name = string_to_class_name(object_name)
classes[class_name] = {'object_name': object_name, 'base_type': dict}
return classes
# The ordering here is important.
GRAPH_REFERENCE = get_graph_reference()
# See http://blog.labix.org/2008/06/27/watch-out-for-listdictkeys-in-python-3
TRACE_NAMES = list(GRAPH_REFERENCE['traces'].keys())
OBJECTS = _get_objects()
_patch_objects()
ARRAYS = _get_arrays()
_patch_arrays()
CLASSES = _get_classes()
OBJECT_NAME_TO_CLASS_NAME = {class_dict['object_name']: class_name
for class_name, class_dict in CLASSES.items()
if class_dict['object_name'] is not None}
|
jeanfeydy/lddmm-ot
|
LDDMM_Python/lddmm_python/lib/plotly/graph_reference.py
|
Python
|
mit
| 19,000
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2016 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import argparse
import os
import platform
import shutil
import subprocess
import sys
WINDOWS_MAKEFILE = """
OUTDIR = {outdir}
OBJDIR = obj
BINDIR = bin
TARGET = $(OUTDIR)\$(BINDIR)\{target}.exe
RM = del /s /f /q
CC = cl -c -nologo
MT = mt -nologo
LINK = link -nologo
CFLAGS = -GS -Zi -Fd"$(OUTDIR)\$(BINDIR)\\vc.pdb" -O2 -W2 -MDd -EHsc -D_DEBUG -D_CRT_SECURE_NO_WARNINGS -D_CRT_NONSTDC_NO_WARNINGS -DTYR_CODING=1 -wd4091 -wd4996
LDFLAGS = -INCREMENTAL -DEBUG -PDB:$(TARGET).pdb -manifest -manifestfile:$(TARGET).manifest -manifestuac:no winmm.lib Dbghelp.lib ws2_32.lib
INCLUDES= {includes}
OBJS = {objs}
all: $(TARGET)
rebuild: clean all
clean:
$(RM) $(OUTDIR)\$(BINDIR)\*.* $(OUTDIR)\$(OBJDIR)\*.*
$(TARGET): $(OBJS)
$(LINK) -out:$(TARGET) $(OBJS) $(LDFLAGS)
$(MT) -manifest $(TARGET).manifest -outputresource:$(TARGET);1
{make_objs}
"""
WINDOWS_CCOBJ = """{make_obj}: {make_src}
$(CC) -Fo:$@ $(CFLAGS) $(INCLUDES) $**
"""
POSIX_MAKEFILE = """
OUTDIR = {outdir}
OBJDIR = obj
BINDIR = bin
TARGET = $(OUTDIR)/$(BINDIR)/{target}
RM = rm -rfv
CC = {cc}
CFLAGS = -g -O2 -Wall -std=c++0x -DTYR_CODING=1
LDFLAGS = {ldflags}
INCLUDES= {includes}
OBJS = {objs}
all: $(TARGET)
rebuild: clean all
clean:
$(RM) $(OUTDIR)/$(BINDIR)/*.* $(OUTDIR)/$(OBJDIR)/*.*
$(TARGET): $(OBJS)
$(CC) -o $@ $^ $(LDFLAGS)
{make_objs}
"""
POSIX_CCOBJ = """{make_obj}: {make_src}
$(CC) -o $@ -c $(CFLAGS) $(INCLUDES) $^
"""
LINUX_MKEXT = {'cc': 'g++', 'ldflags': '-lpthread'}
DARWIN_MKEXT = {'cc': 'clang++', 'ldflags': '-lc -lpthread'}
TARGET = 'tyr.test'
OUTDIR = 'build'
INC_DIRS = ['..']
SOURCE_DIRS = {
'common': (('./', True), ('../basic', False), ('../basic/unexposed', True), ('../net', False)),
'darwin': (('../basic/posix', True), ('../basic/darwin', True), ('../net/posix', True), ('../net/darwin', True)),
'linux': (('../basic/posix', True), ('../basic/linux', True), ('../net/posix', True), ('../net/linux', True)),
'windows': (('../basic/windows', True), ('../net/windows', True)),
}
if sys.version_info.major < 3:
import codecs
def do_open(fname, mode='rb', encoding=None):
return codecs.open(fname, mode, encoding)
else:
do_open = open
def get_platform():
return platform.system().lower()
def safe_mkdir(dirpath):
if not os.path.exists(dirpath):
os.mkdir(dirpath)
def safe_rmdir(dirpath):
if os.path.exists(dirpath):
shutil.rmtree(dirpath)
def safe_rm(path):
if os.path.exists(path):
if os.path.isdir(path):
safe_rmdir(path)
else:
os.remove(path)
def get_sources_from_dir(dirpath, recursive=True):
cur_sources = os.listdir(dirpath)
all_sources = []
for source_fname in cur_sources:
source_fullpath = os.path.join(dirpath, source_fname).replace('\\', '/')
if os.path.isdir(source_fullpath) and recursive:
all_sources.extend(get_sources_from_dir(source_fullpath))
else:
if os.path.splitext(source_fname)[1][1:] in ('cpp', 'cc'):
all_sources.append(source_fullpath)
return all_sources
def get_all_sources(dirs=(('./', True),)):
all_sources = []
for path, recursive in dirs:
all_sources.extend(get_sources_from_dir(path, recursive))
return all_sources
def gen_windows_obj(source_fname):
s = source_fname.strip('./').strip('../').replace('/', '.')
return '$(OUTDIR)\$(OBJDIR)\{objname}.obj '.format(objname=os.path.splitext(s)[0])
def gen_posix_obj(source_fname):
s = source_fname.strip('./').strip('../').replace('/', '.')
return '$(OUTDIR)/$(OBJDIR)/{objname}.o '.format(objname=os.path.splitext(s)[0])
def gen_windows_make_obj(out, src):
return WINDOWS_CCOBJ.format(make_obj=out.strip(), make_src=src)
def gen_posix_make_obj(out, src):
return POSIX_CCOBJ.format(make_obj=out.strip(), make_src=src)
def gen_windows_include(inc_dirs=INC_DIRS):
inc_list = []
for inc in inc_dirs:
inc_list.append('-I"{inc}" '.format(inc=inc.replace('/', '\\')))
return ''.join(inc_list).rstrip()
def gen_posix_include(inc_dirs=INC_DIRS):
inc_list = []
for inc in inc_dirs:
inc_list.append('-I{inc} '.format(inc=inc))
return ''.join(inc_list).rstrip()
def gen_makefile(platform='linux', target='a.out', outdir='build', sources=[]):
mname = sys.modules['__main__']
gen_obj = getattr(mname, 'gen_{pt}_obj'.format(pt=platform), gen_posix_obj)
gen_make_obj = getattr(mname, 'gen_{pt}_make_obj'.format(pt=platform), gen_posix_make_obj)
gen_includes = getattr(mname, 'gen_{pt}_include'.format(pt=platform), gen_posix_include)
objs_list = []
make_objs_list = []
for s in sources:
objstr = gen_obj(s)
objs_list.append(objstr)
make_objs_list.append(gen_make_obj(objstr, s))
make_dict = dict(
outdir = outdir,
target = target,
objs = ''.join(objs_list).rstrip(),
make_objs = ''.join(make_objs_list).rstrip(),
includes = gen_includes(INC_DIRS)
)
ext_dict = getattr(mname, '{pt}_MKEXT'.format(pt=platform.upper()), None)
if ext_dict:
make_dict.update(ext_dict)
makefile_temp = getattr(mname, '{pt}_MAKEFILE'.format(pt=platform.upper()), POSIX_MAKEFILE)
with do_open('Makefile', 'w', encoding='utf-8') as fp:
fp.write(makefile_temp.format(**make_dict))
def gen_outdir(outdir='build'):
safe_mkdir(outdir)
safe_mkdir('{outdir}/bin'.format(outdir=outdir))
safe_mkdir('{outdir}/obj'.format(outdir=outdir))
def get_options():
parser = argparse.ArgumentParser(description='Devil building tool')
parser.add_argument('option', nargs='?', help='config|build|rebuild|clean|remove the project')
args = parser.parse_args()
return args.option
def do_remove():
safe_rmdir(OUTDIR)
safe_rm('Makefile')
def main():
option = get_options()
if option == 'remove':
do_remove()
return
platform = get_platform()
gen_outdir(outdir=OUTDIR)
source_dirs = SOURCE_DIRS['common'] + SOURCE_DIRS.get(platform, ())
gen_makefile(platform=platform, target=TARGET, outdir=OUTDIR, sources=get_all_sources(dirs=source_dirs))
if option == 'config':
return
elif option is None:
option = 'build'
make = platform == 'windows' and 'nmake' or 'make'
make_flags = {
'build': '',
'rebuild': 'rebuild',
'clean': 'clean'
}
flag = make_flags.get(option, None)
if flag is None:
do_remove()
else:
subprocess.check_call('{make} {flag}'.format(make=make, flag=flag), shell=True)
if __name__ == '__main__':
main()
|
ASMlover/study
|
cplusplus/tyr2/test/Build.py
|
Python
|
bsd-2-clause
| 8,055
|
# -*- coding: utf-8 -*-
'''These are the things used in making and using controllers.
'''
import imp
import os
class Controller(object):
"""Controllers take data from some files, do stuff with it,
and write it to the build directory."""
def __init__(self, site, data, destination, templates="_templates"):
# take four arguments: the site, the source directory, the destination
# directory, and optionally a directory wherein the templates reside.
# assume these are relative pathnames.
self.site = site
self.data_directory = os.path.join(site.source, data)
self.destination_directory = os.path.join(site.source, destination)
self.templates_directory = os.path.join(site.source, templates)
# create the destination directory, if it doesn't exist
if not os.path.exists(self.destination_directory):
os.mkdir(self.destination_directory)
def __call__(self):
# do whatever needs to be done here...
pass
def template(self, name):
"""Given a name, return the name of the file in the templates directory
that fits it."""
# list of files that fit:
files = [f for f in os.listdir(self.templates_directory)
if f.startswith(name + ".")]
# return the first file that matches, with the directory prepended.
return os.path.join(self.templates_directory, files[0])
|
startling/cytoplasm
|
cytoplasm/controllers.py
|
Python
|
mit
| 1,437
|
import os, sqlite3, json, random, markovify, re
from colorama import init, Fore, Style
from aiotg import Bot
bot = Bot(os.environ["API_TOKEN"])
@bot.command('.')
async def msg(chat, match):
m = chat.message
if m['chat']['type'] == 'channel':
return
if chat.message['chat']['type'] in ['group', 'supergroup']:
print(Fore.CYAN + "(%s) [%s]" % (m['chat']['id'], m['chat']['title']) + Style.RESET_ALL)
await bot.api_call('forwardMessage', chat_id=m['chat']['id'], from_chat_id='@WolfebotNews', message_id=28, disable_notification='True')
if chat.message['chat']['type'] in ['group', 'supergroup']:
return bot.api_call('leaveChat', chat_id=m['chat']['id'])
else:
return
#return bot.api_call('forwardMessage', chat_id='-183777017', from_chat_id=m['chat']['id'], message_id=m['message_id'], disable_notification='True')
if __name__ == '__main__':
bot.run()
|
TNTINC/WolfeBot
|
delet.py
|
Python
|
mit
| 875
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, sookido
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: zabbix_template
short_description: Create/update/delete/dump Zabbix template
description:
- This module allows you to create, modify, delete and dump Zabbix templates.
- Multiple templates can be created or modified at once if passing JSON or XML to module.
version_added: "2.5"
author:
- "sookido (@sookido)"
- "Logan Vig (@logan2211)"
- "Dusan Matejka (@D3DeFi)"
requirements:
- "python >= 2.6"
- "zabbix-api >= 0.5.3"
options:
template_name:
description:
- Name of Zabbix template.
- Required when I(template_json) or I(template_xml) are not used.
- Mutually exclusive with I(template_json) and I(template_xml).
required: false
template_json:
description:
- JSON dump of templates to import.
- Multiple templates can be imported this way.
- Mutually exclusive with I(template_name) and I(template_xml).
required: false
type: json
template_xml:
description:
- XML dump of templates to import.
- Multiple templates can be imported this way.
- You are advised to pass XML structure matching the structure used by your version of Zabbix server.
- Custom XML structure can be imported as long as it is valid, but may not yield consistent idempotent
results on subsequent runs.
- Mutually exclusive with I(template_name) and I(template_json).
required: false
version_added: '2.9'
template_groups:
description:
- List of host groups to add template to when template is created.
- Replaces the current host groups the template belongs to if the template is already present.
- Required when creating a new template with C(state=present) and I(template_name) is used.
Not required when updating an existing template.
required: false
type: list
link_templates:
description:
- List of template names to be linked to the template.
- Templates that are not specified and are linked to the existing template will be only unlinked and not
cleared from the template.
required: false
type: list
clear_templates:
description:
- List of template names to be unlinked and cleared from the template.
- This option is ignored if template is being created for the first time.
required: false
type: list
macros:
description:
- List of user macros to create for the template.
- Macros that are not specified and are present on the existing template will be replaced.
- See examples on how to pass macros.
required: false
type: list
suboptions:
name:
description:
- Name of the macro.
- Must be specified in {$NAME} format.
value:
description:
- Value of the macro.
dump_format:
description:
- Format to use when dumping template with C(state=dump).
required: false
choices: [json, xml]
default: "json"
version_added: '2.9'
state:
description:
- Required state of the template.
- On C(state=present) template will be created/imported or updated depending if it is already present.
- On C(state=dump) template content will get dumped into required format specified in I(dump_format).
- On C(state=absent) template will be deleted.
required: false
choices: [present, absent, dump]
default: "present"
extends_documentation_fragment:
- zabbix
'''
EXAMPLES = '''
---
- name: Create a new Zabbix template linked to groups, macros and templates
local_action:
module: zabbix_template
server_url: http://127.0.0.1
login_user: username
login_password: password
template_name: ExampleHost
template_groups:
- Role
- Role2
link_templates:
- Example template1
- Example template2
macros:
- macro: '{$EXAMPLE_MACRO1}'
value: 30000
- macro: '{$EXAMPLE_MACRO2}'
value: 3
- macro: '{$EXAMPLE_MACRO3}'
value: 'Example'
state: present
- name: Unlink and clear templates from the existing Zabbix template
local_action:
module: zabbix_template
server_url: http://127.0.0.1
login_user: username
login_password: password
template_name: ExampleHost
clear_templates:
- Example template3
- Example template4
state: present
- name: Import Zabbix templates from JSON
local_action:
module: zabbix_template
server_url: http://127.0.0.1
login_user: username
login_password: password
template_json: "{{ lookup('file', 'zabbix_apache2.json') }}"
state: present
- name: Import Zabbix templates from XML
local_action:
module: zabbix_template
server_url: http://127.0.0.1
login_user: username
login_password: password
template_xml: "{{ lookup('file', 'zabbix_apache2.json') }}"
state: present
- name: Import Zabbix template from Ansible dict variable
zabbix_template:
login_user: username
login_password: password
server_url: http://127.0.0.1
template_json:
zabbix_export:
version: '3.2'
templates:
- name: Template for Testing
description: 'Testing template import'
template: Test Template
groups:
- name: Templates
applications:
- name: Test Application
state: present
- name: Configure macros on the existing Zabbix template
local_action:
module: zabbix_template
server_url: http://127.0.0.1
login_user: username
login_password: password
template_name: Template
macros:
- macro: '{$TEST_MACRO}'
value: 'Example'
state: present
- name: Delete Zabbix template
local_action:
module: zabbix_template
server_url: http://127.0.0.1
login_user: username
login_password: password
template_name: Template
state: absent
- name: Dump Zabbix template as JSON
local_action:
module: zabbix_template
server_url: http://127.0.0.1
login_user: username
login_password: password
template_name: Template
state: dump
register: template_dump
- name: Dump Zabbix template as XML
local_action:
module: zabbix_template
server_url: http://127.0.0.1
login_user: username
login_password: password
template_name: Template
dump_format: xml
state: dump
register: template_dump
'''
RETURN = '''
---
template_json:
description: The JSON dump of the template
returned: when state is dump
type: str
sample: {
"zabbix_export":{
"date":"2017-11-29T16:37:24Z",
"templates":[{
"templates":[],
"description":"",
"httptests":[],
"screens":[],
"applications":[],
"discovery_rules":[],
"groups":[{"name":"Templates"}],
"name":"Test Template",
"items":[],
"macros":[],
"template":"test"
}],
"version":"3.2",
"groups":[{
"name":"Templates"
}]
}
}
template_xml:
description: dump of the template in XML representation
returned: when state is dump and dump_format is xml
type: str
sample: |-
<?xml version="1.0" ?>
<zabbix_export>
<version>4.2</version>
<date>2019-07-12T13:37:26Z</date>
<groups>
<group>
<name>Templates</name>
</group>
</groups>
<templates>
<template>
<template>test</template>
<name>Test Template</name>
<description/>
<groups>
<group>
<name>Templates</name>
</group>
</groups>
<applications/>
<items/>
<discovery_rules/>
<httptests/>
<macros/>
<templates/>
<screens/>
<tags/>
</template>
</templates>
</zabbix_export>
'''
import atexit
import json
import traceback
import xml.etree.ElementTree as ET
from distutils.version import LooseVersion
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils._text import to_native
try:
from zabbix_api import ZabbixAPI, ZabbixAPIException
HAS_ZABBIX_API = True
except ImportError:
ZBX_IMP_ERR = traceback.format_exc()
HAS_ZABBIX_API = False
class Template(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# check if host group exists
def check_host_group_exist(self, group_names):
for group_name in group_names:
result = self._zapi.hostgroup.get({'filter': {'name': group_name}})
if not result:
self._module.fail_json(msg="Hostgroup not found: %s" %
group_name)
return True
# get group ids by group names
def get_group_ids_by_group_names(self, group_names):
group_ids = []
if group_names is None or len(group_names) == 0:
return group_ids
if self.check_host_group_exist(group_names):
group_list = self._zapi.hostgroup.get(
{'output': 'extend',
'filter': {'name': group_names}})
for group in group_list:
group_id = group['groupid']
group_ids.append({'groupid': group_id})
return group_ids
def get_template_ids(self, template_list):
template_ids = []
if template_list is None or len(template_list) == 0:
return template_ids
for template in template_list:
template_list = self._zapi.template.get(
{'output': 'extend',
'filter': {'host': template}})
if len(template_list) < 1:
continue
else:
template_id = template_list[0]['templateid']
template_ids.append(template_id)
return template_ids
def add_template(self, template_name, group_ids, link_template_ids, macros):
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.template.create({'host': template_name, 'groups': group_ids, 'templates': link_template_ids,
'macros': macros})
def check_template_changed(self, template_ids, template_groups, link_templates, clear_templates,
template_macros, template_content, template_type):
"""Compares template parameters to already existing values if any are found.
template_json - JSON structures are compared as deep sorted dictionaries,
template_xml - XML structures are compared as strings, but filtered and formatted first,
If none above is used, all the other arguments are compared to their existing counterparts
retrieved from Zabbix API."""
changed = False
# Compare filtered and formatted XMLs strings for any changes. It is expected that provided
# XML has same structure as Zabbix uses (e.g. it was optimally exported via Zabbix GUI or API)
if template_content is not None and template_type == 'xml':
existing_template = self.dump_template(template_ids, template_type='xml')
if self.filter_xml_template(template_content) != self.filter_xml_template(existing_template):
changed = True
return changed
existing_template = self.dump_template(template_ids, template_type='json')
# Compare JSON objects as deep sorted python dictionaries
if template_content is not None and template_type == 'json':
parsed_template_json = self.load_json_template(template_content)
if self.diff_template(parsed_template_json, existing_template):
changed = True
return changed
# If neither template_json or template_xml were used, user provided all parameters via module options
if template_groups is not None:
existing_groups = [g['name'] for g in existing_template['zabbix_export']['groups']]
if set(template_groups) != set(existing_groups):
changed = True
# Check if any new templates would be linked or any existing would be unlinked
exist_child_templates = [t['name'] for t in existing_template['zabbix_export']['templates'][0]['templates']]
if link_templates is not None:
if set(link_templates) != set(exist_child_templates):
changed = True
# Mark that there will be changes when at least one existing template will be unlinked
if clear_templates is not None:
for t in clear_templates:
if t in exist_child_templates:
changed = True
break
if template_macros is not None:
existing_macros = existing_template['zabbix_export']['templates'][0]['macros']
if template_macros != existing_macros:
changed = True
return changed
def update_template(self, template_ids, group_ids, link_template_ids, clear_template_ids, template_macros):
template_changes = {}
if group_ids is not None:
template_changes.update({'groups': group_ids})
if link_template_ids is not None:
template_changes.update({'templates': link_template_ids})
if clear_template_ids is not None:
template_changes.update({'templates_clear': clear_template_ids})
if template_macros is not None:
template_changes.update({'macros': template_macros})
if template_changes:
# If we got here we know that only one template was provided via template_name
template_changes.update({'templateid': template_ids[0]})
self._zapi.template.update(template_changes)
def delete_template(self, templateids):
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.template.delete(templateids)
def ordered_json(self, obj):
# Deep sort json dicts for comparison
if isinstance(obj, dict):
return sorted((k, self.ordered_json(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(self.ordered_json(x) for x in obj)
else:
return obj
def dump_template(self, template_ids, template_type='json'):
if self._module.check_mode:
self._module.exit_json(changed=True)
try:
dump = self._zapi.configuration.export({'format': template_type, 'options': {'templates': template_ids}})
if template_type == 'xml':
return str(ET.tostring(ET.fromstring(dump.encode('utf-8')), encoding='utf-8').decode('utf-8'))
else:
return self.load_json_template(dump)
except ZabbixAPIException as e:
self._module.fail_json(msg='Unable to export template: %s' % e)
def diff_template(self, template_json_a, template_json_b):
# Compare 2 zabbix templates and return True if they differ.
template_json_a = self.filter_template(template_json_a)
template_json_b = self.filter_template(template_json_b)
if self.ordered_json(template_json_a) == self.ordered_json(template_json_b):
return False
return True
def filter_template(self, template_json):
# Filter the template json to contain only the keys we will update
keep_keys = set(['graphs', 'templates', 'triggers', 'value_maps'])
unwanted_keys = set(template_json['zabbix_export']) - keep_keys
for unwanted_key in unwanted_keys:
del template_json['zabbix_export'][unwanted_key]
# Versions older than 2.4 do not support description field within template
desc_not_supported = False
if LooseVersion(self._zapi.api_version()).version[:2] < LooseVersion('2.4').version:
desc_not_supported = True
# Filter empty attributes from template object to allow accurate comparison
for template in template_json['zabbix_export']['templates']:
for key in list(template.keys()):
if not template[key] or (key == 'description' and desc_not_supported):
template.pop(key)
return template_json
def filter_xml_template(self, template_xml):
"""Filters out keys from XML template that may wary between exports (e.g date or version) and
keys that are not imported via this module.
It is advised that provided XML template exactly matches XML structure used by Zabbix"""
# Strip last new line and convert string to ElementTree
parsed_xml_root = self.load_xml_template(template_xml.strip())
keep_keys = ['graphs', 'templates', 'triggers', 'value_maps']
# Remove unwanted XML nodes
for node in list(parsed_xml_root):
if node.tag not in keep_keys:
parsed_xml_root.remove(node)
# Filter empty attributes from template objects to allow accurate comparison
for template in list(parsed_xml_root.find('templates')):
for element in list(template):
if element.text is None and len(list(element)) == 0:
template.remove(element)
# Filter new lines and indentation
xml_root_text = list(line.strip() for line in ET.tostring(parsed_xml_root).split('\n'))
return ''.join(xml_root_text)
def load_json_template(self, template_json):
try:
return json.loads(template_json)
except ValueError as e:
self._module.fail_json(msg='Invalid JSON provided', details=to_native(e), exception=traceback.format_exc())
def load_xml_template(self, template_xml):
try:
return ET.fromstring(template_xml)
except ET.ParseError as e:
self._module.fail_json(msg='Invalid XML provided', details=to_native(e), exception=traceback.format_exc())
def import_template(self, template_content, template_type='json'):
# rules schema latest version
update_rules = {
'applications': {
'createMissing': True,
'deleteMissing': True
},
'discoveryRules': {
'createMissing': True,
'updateExisting': True,
'deleteMissing': True
},
'graphs': {
'createMissing': True,
'updateExisting': True,
'deleteMissing': True
},
'httptests': {
'createMissing': True,
'updateExisting': True,
'deleteMissing': True
},
'items': {
'createMissing': True,
'updateExisting': True,
'deleteMissing': True
},
'templates': {
'createMissing': True,
'updateExisting': True
},
'templateLinkage': {
'createMissing': True
},
'templateScreens': {
'createMissing': True,
'updateExisting': True,
'deleteMissing': True
},
'triggers': {
'createMissing': True,
'updateExisting': True,
'deleteMissing': True
},
'valueMaps': {
'createMissing': True,
'updateExisting': True
}
}
try:
# old api version support here
api_version = self._zapi.api_version()
# updateExisting for application removed from zabbix api after 3.2
if LooseVersion(api_version).version[:2] <= LooseVersion('3.2').version:
update_rules['applications']['updateExisting'] = True
import_data = {'format': template_type, 'source': template_content, 'rules': update_rules}
self._zapi.configuration.import_(import_data)
except ZabbixAPIException as e:
self._module.fail_json(msg='Unable to import template', details=to_native(e),
exception=traceback.format_exc())
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(type='str', required=True, aliases=['url']),
login_user=dict(type='str', required=True),
login_password=dict(type='str', required=True, no_log=True),
http_login_user=dict(type='str', required=False, default=None),
http_login_password=dict(type='str', required=False, default=None, no_log=True),
validate_certs=dict(type='bool', required=False, default=True),
template_name=dict(type='str', required=False),
template_json=dict(type='json', required=False),
template_xml=dict(type='str', required=False),
template_groups=dict(type='list', required=False),
link_templates=dict(type='list', required=False),
clear_templates=dict(type='list', required=False),
macros=dict(type='list', required=False),
dump_format=dict(type='str', required=False, default='json', choices=['json', 'xml']),
state=dict(default="present", choices=['present', 'absent', 'dump']),
timeout=dict(type='int', default=10)
),
required_one_of=[
['template_name', 'template_json', 'template_xml']
],
mutually_exclusive=[
['template_name', 'template_json', 'template_xml']
],
required_if=[
['state', 'absent', ['template_name']],
['state', 'dump', ['template_name']]
],
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg=missing_required_lib('zabbix-api', url='https://pypi.org/project/zabbix-api/'), exception=ZBX_IMP_ERR)
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
http_login_user = module.params['http_login_user']
http_login_password = module.params['http_login_password']
validate_certs = module.params['validate_certs']
template_name = module.params['template_name']
template_json = module.params['template_json']
template_xml = module.params['template_xml']
template_groups = module.params['template_groups']
link_templates = module.params['link_templates']
clear_templates = module.params['clear_templates']
template_macros = module.params['macros']
dump_format = module.params['dump_format']
state = module.params['state']
timeout = module.params['timeout']
zbx = None
try:
zbx = ZabbixAPI(server_url, timeout=timeout, user=http_login_user, passwd=http_login_password,
validate_certs=validate_certs)
zbx.login(login_user, login_password)
atexit.register(zbx.logout)
except ZabbixAPIException as e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
template = Template(module, zbx)
# Identify template names for IDs retrieval
# Template names are expected to reside in ['zabbix_export']['templates'][*]['template'] for both data types
template_content, template_type = None, None
if template_json is not None:
template_type = 'json'
template_content = template_json
json_parsed = template.load_json_template(template_content)
template_names = list(t['template'] for t in json_parsed['zabbix_export']['templates'])
elif template_xml is not None:
template_type = 'xml'
template_content = template_xml
xml_parsed = template.load_xml_template(template_content)
template_names = list(t.find('template').text for t in list(xml_parsed.find('templates')))
else:
template_names = [template_name]
template_ids = template.get_template_ids(template_names)
if state == "absent":
if not template_ids:
module.exit_json(changed=False, msg="Template not found. No changed: %s" % template_name)
template.delete_template(template_ids)
module.exit_json(changed=True, result="Successfully deleted template %s" % template_name)
elif state == "dump":
if not template_ids:
module.fail_json(msg='Template not found: %s' % template_name)
if dump_format == 'json':
module.exit_json(changed=False, template_json=template.dump_template(template_ids, template_type='json'))
elif dump_format == 'xml':
module.exit_json(changed=False, template_xml=template.dump_template(template_ids, template_type='xml'))
elif state == "present":
# Load all subelements for template that were provided by user
group_ids = None
if template_groups is not None:
group_ids = template.get_group_ids_by_group_names(template_groups)
link_template_ids = None
if link_templates is not None:
link_template_ids = template.get_template_ids(link_templates)
clear_template_ids = None
if clear_templates is not None:
clear_template_ids = template.get_template_ids(clear_templates)
if template_macros is not None:
# Zabbix configuration.export does not differentiate python types (numbers are returned as strings)
for macroitem in template_macros:
for key in macroitem:
macroitem[key] = str(macroitem[key])
if not template_ids:
# Assume new templates are being added when no ID's were found
if template_content is not None:
template.import_template(template_content, template_type)
module.exit_json(changed=True, result="Template import successful")
else:
if group_ids is None:
module.fail_json(msg='template_groups are required when creating a new Zabbix template')
template.add_template(template_name, group_ids, link_template_ids, template_macros)
module.exit_json(changed=True, result="Successfully added template: %s" % template_name)
else:
changed = template.check_template_changed(template_ids, template_groups, link_templates, clear_templates,
template_macros, template_content, template_type)
if module.check_mode:
module.exit_json(changed=changed)
if changed:
if template_type is not None:
template.import_template(template_content, template_type)
else:
template.update_template(template_ids, group_ids, link_template_ids, clear_template_ids,
template_macros)
module.exit_json(changed=changed, result="Template successfully updated")
if __name__ == '__main__':
main()
|
sestrella/ansible
|
lib/ansible/modules/monitoring/zabbix/zabbix_template.py
|
Python
|
gpl-3.0
| 28,122
|
# IfcOpenShell - IFC toolkit and geometry engine
# Copyright (C) 2021 Thomas Krijnen <thomas@aecgeeks.com>
#
# This file is part of IfcOpenShell.
#
# IfcOpenShell is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# IfcOpenShell is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with IfcOpenShell. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import ifcopenshell_wrapper
version = ifcopenshell_wrapper.version()
get_log = ifcopenshell_wrapper.get_log
|
IfcOpenShell/IfcOpenShell
|
src/ifcopenshell-python/ifcopenshell/main.py
|
Python
|
lgpl-3.0
| 1,031
|
# Copyright (C) 2013 Korei Klein <korei.klein1@gmail.com>
from misc import *
from calculus.enriched import formula, endofunctor, constructors
from calculus.basic import bifunctor as basicBifunctor, endofunctor as basicEndofunctor
UntransportableException = basicBifunctor.UntransportableException
class Bifunctor:
def translate(self):
raise Exception("Abstract superclass.")
def variables(self):
raise Exception("Abstract superclass.")
def onObjects(self, left, right):
raise Exception("Abstract superclass.")
# return a function representing a natural transform: F(B, .) --> F(B, And([B, .]))
def transport_duplicating(self, B):
nt = self.translate().transport_duplicating(B.translate())
return (lambda x:
formula.Arrow(src = self.onObjects(left = B, right = x),
tgt = self.onObjects(left = B, right = constructors.And([B.updateVariables(), x])),
basicArrow = nt(x.translate())))
def onArrows(self, left, right):
return formula.Arrow(src = self.onObjects(left.src, right.src),
tgt = self.onObjects(left.tgt, right.tgt),
basicArrow = self.translate().onArrows(left.translate(), right.translate()))
def precompose(self, left, right):
return PrecompositeBifunctor(bifunctor = self, left = left, right = right)
def compose(self, other):
return PostcompositeBifunctor(bifunctor = self, functor = other)
def precomposeLeft(self, left):
return self.precompose(left = left, right = endofunctor.identity_functor)
def precomposeRight(self, right):
return self.precompose(left = endofunctor.identity_functor, right = right)
class PostcompositeBifunctor(Bifunctor):
def __init__(self, bifunctor, functor):
self.bifunctor = bifunctor
self.functor = functor
def translate(self):
return self.bifunctor.translate().compose(self.functor.translate())
def onObjects(self, left, right):
return self.functor.onObject(self.bifunctor.onObjects(left, right))
def variables(self):
result = []
result.extend(self.bifunctor.variables())
result.extend(self.functor.variables())
return result
def precompose(self, left, right):
return PostcompositeBifunctor(bifunctor = self.bifunctor.precompose(left, right),
functor = self.functor)
def compose(self, other):
return PostcompositeBifunctor(bifunctor = self.bifunctor, functor = self.functor.compose(other))
class PrecompositeBifunctor(Bifunctor):
def __init__(self, bifunctor, left, right):
self.bifunctor = bifunctor
self.left = left
self.right = right
def __repr__(self):
return "%s x %s . %s"%(self.left, self.right, self.bifunctor)
def variables(self):
result = []
result.extend(self.left.variables)
result.extend(self.right.variables)
result.extend(self.bifunctor.variables)
return result
def translate(self):
return self.bifunctor.translate().precompose(
left = self.left.translate(),
right = self.right.translate())
def onObjects(self, left, right):
return self.bifunctor.onObjects(self.left.onObject(left), self.right.onObject(right))
def precompose(self, left, right):
return PrecompositeBifunctor(bifunctor = self.bifunctor,
left = left.compose(self.left),
right = right.compose(self.right))
class Conjunction(Bifunctor):
# rightIndex is an index into the list formed after inserting at leftIndex
# e.g. Conjunction([a, b, c], 1, 1).onObjects(x, y) -> [a, x, b, c] -> [a, y, x, b, c]
# e.g. Conjunction([a, b, c], 1, 2).onObjects(x, y) -> [a, x, b, c] -> [a, x, y, b, c]
# e.g. Conjunction([a, b, c], 1, 0).onObjects(x, y) -> [a, x, b, c] -> [y, a, x, b, c]
def __init__(self, values, leftIndex, rightIndex):
self.values = values
self.leftIndex = leftIndex
self.rightIndex = rightIndex
def __repr__(self):
values = [repr(value) for value in self.values]
values.insert(self.leftIndex, " . ")
values.insert(self.rightIndex, " . ")
return self.name() + " [ " + ', '.join(values) + ' ]'
def translate(self):
lesserIndex = min(self.leftIndex, self.rightIndex)
greaterIndex = max(self.leftIndex, self.rightIndex)
# begin, (lesser), middle, (greater), end
begin = self.values[:lesserIndex]
m = greaterIndex
if self.leftIndex < self.rightIndex:
m -= 1
middle = self.values[lesserIndex:m]
end = self.values[m:]
if len(end) > 0:
result = self.basicEndofunctor()(side = left,
other = self.multiOp()(end).translate())
else:
result = basicEndofunctor.identity_functor
for value in middle[::-1]:
result = result.compose(self.basicEndofunctor()(side = right,
other = value.translate()))
result = self.basicBifunctor().precomposeRight(result)
for value in begin[::-1]:
result = result.compose(self.basicEndofunctor()(side = right,
other = value.translate()))
if self.leftIndex < self.rightIndex:
return result
else:
return result.commute()
def variables(self):
return []
def onObjects(self, left, right):
values = list(self.values)
values.insert(self.leftIndex, left)
values.insert(self.rightIndex, right)
return self.multiOp()(values)
class And(Conjunction):
def name(self):
return 'AND'
def basicEndofunctor(self):
return basicEndofunctor.And
def enrichedEndofunctor(self):
return endofunctor.And
def basicBifunctor(self):
return basicBifunctor.and_functor
def multiOp(self):
return formula.And
class Or(Conjunction):
def name(self):
return 'OR'
def basicEndofunctor(self):
return basicEndofunctor.Or
def enrichedEndofunctor(self):
return endofunctor.Or
def basicBifunctor(self):
return basicBifunctor.or_functor
def multiOp(self):
return formula.Or
|
koreiklein/fantasia
|
calculus/enriched/bifunctor.py
|
Python
|
gpl-2.0
| 5,810
|
from django.test import TestCase, Client
from django.core.files import File
from django.core.files.uploadedfile import SimpleUploadedFile
from django.contrib.auth.models import User
from .models import Admin
from .models import Professor
from .models import Student
from django.contrib.auth import authenticate
from django.contrib.auth import login
from django.shortcuts import render
class ViewTestCase(TestCase):
def setUp(self):
self.client = Client()
def test_viewStudents(self):
response = self.client.get('/viewStudents/')
self.assertEqual(response.status_code, 200)
def test_viewAdmin(self):
response = self.client.get('/viewAdmin/')
self.assertEqual(response.status_code, 200)
def test_viewPro(self):
response = self.client.get('/viewProfessor/')
self.assertEqual(response.status_code, 200)
class AddStudentsTestCase(TestCase):
def setUp(self):
self.client = Client()
User.objects.create_user('admin', 'admin@gmail.com', 'admin.password')
Admin.objects.create(user= User.objects.get(username = 'admin'))
self.client.login(username = 'admin', password = 'admin.password')
def test_addStudents_missing_fields(self):
response = self.client.post('/addStudents/',{'CSVFile':SimpleUploadedFile(b'test.csv', b'email@email\nemail1@email,12345')})
template_name = 'main/error.html'
context = {'err_at':'email@email','err_msg':'The csv file does not have required number of columns'}
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['err_msg'], context['err_msg'])
self.assertEqual(response.context['err_at'], context['err_at'])
self.assertTemplateUsed(response, template_name)
def test_addStudents_empty_fields(self):
response = self.client.post('/addStudents/',{'CSVFile':SimpleUploadedFile(b'test.csv', b'email@email,\nemail1@email,12345')})
template_name = 'main/error.html'
context = {'err_at':'email@email, ','err_msg':'One of the fields seems to be empty'}
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['err_msg'], context['err_msg'])
self.assertEqual(response.context['err_at'], context['err_at'])
self.assertTemplateUsed(response, template_name)
def test_addStudents_spclcharacter_fields(self):
response = self.client.post('/addStudents/',{'CSVFile':SimpleUploadedFile(b'test.csv', b'email@email,12345\nemail1@email,12!#$#$')})
template_name = 'main/error.html'
context = {'err_at':'email1@email, 12!#$#$','err_msg':'One of the fields seems to have special characters'}
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['err_msg'], context['err_msg'])
self.assertEqual(response.context['err_at'], context['err_at'])
self.assertTemplateUsed(response, template_name)
def test_addStudents_valid_example(self):
response = self.client.post('/addStudents/',{'CSVFile':SimpleUploadedFile(b'test.csv', b'stu1@iiits.in,is12\nstu2@iiits.in,is22\n')})
template_name = 'main/tables.html'
#context = {'model_name':'Students','err_msg':'One of the fields seems to have special characters'}
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Student')
### assert other context items as well
self.assertTemplateUsed(response, template_name)
class AddAdminTestCase(TestCase):
def setUp(self):
self.client = Client()
User.objects.create_user('admin', 'admin@gmail.com', 'admin.password')
Admin.objects.create(user= User.objects.get(username = 'admin'))
self.client.login(username = 'admin', password = 'admin.password')
def test_addAdmin_empty_fields(self):
response = self.client.post('/addAdmin/',{'CSVFile':SimpleUploadedFile(b'test.csv', b'\nemail@email\n')})
template_name = 'main/error.html'
context = {'err_at':'','err_msg':'The csv file does not have required number of columns'}
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['err_msg'], context['err_msg'])
self.assertEqual(response.context['err_at'], context['err_at'])
self.assertTemplateUsed(response, template_name)
def test_addAdmin_spclcharacter_fields(self):
response = self.client.post('/addAdmin/',{'CSVFile':SimpleUploadedFile(b'test.csv', b'email@email\ner@#@#!@#@email')})
template_name = 'main/error.html'
context = {'err_at':'er@#@#!@#@email','err_msg':'One of the fields seems to have special characters'}
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['err_msg'], context['err_msg'])
self.assertEqual(response.context['err_at'], context['err_at'])
self.assertTemplateUsed(response, template_name)
def test_addAdmin_valid_example(self):
response = self.client.post('/addAdmin/',{'CSVFile':SimpleUploadedFile(b'test.csv', b'adm1@iiits.in\nadm2@iiits.in\n')})
template_name = 'main/tables.html'
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Admin')
### assert other context items as well
self.assertTemplateUsed(response, template_name)
class AddProfessorTestCase(TestCase):
def setUp(self):
self.client = Client()
User.objects.create_user('admin', 'admin@gmail.com', 'admin.password')
Admin.objects.create(user= User.objects.get(username = 'admin'))
self.client.login(username = 'admin', password = 'admin.password')
def test_addProfessor_empty_fields(self):
response = self.client.post('/addProfessor/',{'CSVFile':SimpleUploadedFile(b'test.csv', b'email@email,\nemail1@email1,name')})
template_name = 'main/error.html'
context = {'err_at':'email@email, ','err_msg':'One of the fields seems to be empty'}
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['err_msg'], context['err_msg'])
self.assertEqual(response.context['err_at'], context['err_at'])
self.assertTemplateUsed(response, template_name)
def test_addProfessor_spclcharacter_fields(self):
response = self.client.post('/addProfessor/',{'CSVFile':SimpleUploadedFile(b'test.csv', b'email@email,name\ner@#@#!@#@email,name1')})
template_name = 'main/error.html'
context = {'err_at':'er@#@#!@#@email, name1','err_msg':'One of the fields seems to have special characters'}
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context['err_msg'], context['err_msg'])
self.assertEqual(response.context['err_at'], context['err_at'])
self.assertTemplateUsed(response, template_name)
def test_addProfessor_valid_example(self):
response = self.client.post('/addProfessor/',{'CSVFile':SimpleUploadedFile(b'test.csv', b'prof1@iiits.in,prof1\nprof2@iiits.in,prof2\n')})
template_name = 'main/tables.html'
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Professor')
### assert other context items as well
self.assertTemplateUsed(response, template_name)
|
chrizandr/ITS_feedback
|
feedback_portal/main/tests.py
|
Python
|
gpl-3.0
| 7,319
|
import hashlib
import urllib
from django.contrib import messages
from django.contrib.auth import authenticate
from django.contrib.auth import login
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.db.models import Q
from django.forms.models import model_to_dict
from django.views.generic import FormView
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, Http404
from django.shortcuts import render
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from orb.models import UserProfile, Tag, Category, Resource, ResourceRating, Collection, ResourceTracker, TagTracker, SearchTracker, CollectionUser, Resource, ResourceURL, ResourceFile
from orb.profiles.forms import LoginForm, RegisterForm, ResetForm, ProfileForm, DeleteProfileForm
from orb.emailer import password_reset
from orb.signals import user_registered
from tastypie.models import ApiKey
def login_view(request):
username = password = ''
# if already logged in
if request.user.is_authenticated():
return HttpResponseRedirect(reverse('orb_home'))
if request.POST:
form = LoginForm(request.POST)
username = request.POST.get('username')
password = request.POST.get('password')
next = request.POST.get('next')
user = authenticate(username=username, password=password)
if user is not None and user.is_active:
login(request, user)
return redirect(next) if next else redirect(reverse('orb_home'))
else:
form = LoginForm(initial={'next': request.GET.get('next'), })
return render(request, 'orb/form.html', {'username': username, 'form': form, 'title': _(u'Login')})
class RegistrationView(FormView):
template_name = 'orb/form.html'
form_class = RegisterForm
initial = {'mailing': True}
def get_initial(self):
initial = self.initial.copy()
initial.update({'next': self.request.GET.get('next', '')})
return initial
def get_form_kwargs(self):
kwargs = super(RegistrationView, self).get_form_kwargs()
print(kwargs)
return kwargs
def get_success_url(self, form):
return form.cleaned_data['next'] if form.cleaned_data.get('next') else reverse('profile_register_thanks')
def get_context_data(self, **kwargs):
context = super(RegistrationView, self).get_context_data(**kwargs)
context['title'] = _(u'Register')
return context
def form_valid(self, form):
user_profile = form.save_profile()
user_registered.send(sender=user_profile.user, user=user_profile.user, request=self.request)
authd_user= form.authenticate_user()
if authd_user and authd_user.is_active:
login(self.request, authd_user)
return redirect(self.get_success_url(form))
def reset(request):
if request.method == 'POST': # if form submitted...
form = ResetForm(request.POST)
if form.is_valid():
username = form.cleaned_data.get("username")
try:
user = User.objects.get(username__exact=username)
except User.DoesNotExist:
user = User.objects.get(email__exact=username)
newpass = User.objects.make_random_password(length=8)
user.set_password(newpass)
user.save()
password_reset(user, newpass)
return HttpResponseRedirect('sent')
else:
form = ResetForm() # An unbound form
return render(request, 'orb/form.html', {'form': form, 'title': _(u'Reset password')})
@login_required
def edit(request):
key = ApiKey.objects.get(user__id=request.user.id)
if request.method == 'POST':
form = ProfileForm(request.POST, request.FILES)
build_form_options(form, blank_options=False)
if form.is_valid():
# update basic data
email = form.cleaned_data.get("email")
first_name = form.cleaned_data.get("first_name")
last_name = form.cleaned_data.get("last_name")
request.user.email = email
request.user.first_name = first_name
request.user.last_name = last_name
request.user.save()
try:
user_profile = UserProfile.objects.get(user=request.user)
except UserProfile.DoesNotExist:
user_profile = UserProfile()
user_profile.user = request.user
if request.FILES.has_key('photo'):
user_profile.photo = request.FILES["photo"]
if form.cleaned_data.get("organisation").strip() != '':
category = Category.objects.get(slug='organisation')
try:
organisation = Tag.objects.get(
name=form.cleaned_data.get("organisation"), category=category)
except Tag.DoesNotExist:
organisation = Tag()
organisation.name = form.cleaned_data.get("organisation")
organisation.category = category
organisation.create_user = request.user
organisation.update_user = request.user
organisation.save()
user_profile.organisation = organisation
if form.cleaned_data.get("role") != '0':
role = Tag.objects.get(pk=form.cleaned_data.get("role"))
user_profile.role = role
else:
user_profile.role = None
user_profile.role_other = form.cleaned_data.get("role_other")
user_profile.gender = form.cleaned_data.get("gender")
user_profile.age_range = form.cleaned_data.get("age_range")
user_profile.mailing = form.cleaned_data.get("mailing")
user_profile.website = form.cleaned_data.get("website")
user_profile.twitter = form.cleaned_data.get("twitter")
user_profile.about = form.cleaned_data.get("about")
user_profile.save()
messages.success(request, _(u"Profile updated"))
# if password should be changed
password = form.cleaned_data.get("password")
if password:
request.user.set_password(password)
request.user.save()
messages.success(request, _(u"Password updated"))
else:
try:
user_profile = UserProfile.objects.get(user=request.user)
except UserProfile.DoesNotExist:
user_profile = UserProfile()
if user_profile.role is not None:
role = user_profile.role.id
else:
role = 0
form = ProfileForm(initial={'username': request.user.username,
'email': request.user.email,
'first_name': request.user.first_name,
'last_name': request.user.last_name,
'api_key': key.key,
'organisation': user_profile.organisation,
'role': role,
'role_other': user_profile.role_other,
'age_range': user_profile.age_range,
'gender': user_profile.gender,
'mailing': user_profile.mailing,
'about': user_profile.about,
'website': user_profile.website,
'twitter': user_profile.twitter,
'photo': user_profile.photo})
build_form_options(form, blank_options=False)
return render(request, 'orb/profile/edit.html', {'form': form, })
@login_required
def view_profile(request, id):
try:
user = User.objects.get(pk=id)
except User.DoesNotExist:
raise Http404()
gravatar_url = "https://www.gravatar.com/avatar.php?"
gravatar_url += urllib.urlencode({
'gravatar_id': hashlib.md5(user.email).hexdigest(),
'size': 64
})
return render(request, 'orb/profile/view.html', {'viewuser': user, 'gravatar_url': gravatar_url})
@login_required
def view_my_profile(request):
try:
user = User.objects.get(pk=request.user.id)
return view_profile(request, user.id)
except User.DoesNotExist:
raise Http404()
@login_required
def view_my_ratings(request):
try:
user = User.objects.get(pk=request.user.id)
except User.DoesNotExist:
raise Http404()
ratings = ResourceRating.objects.filter(
resource__status=Resource.APPROVED, user=user).order_by('resource__title')
return render(request, 'orb/profile/rated.html', {'ratings': ratings})
@login_required
def view_my_bookmarks(request):
try:
user = User.objects.get(pk=request.user.id)
except User.DoesNotExist:
raise Http404()
bookmarks = Resource.objects.filter(status=Resource.APPROVED, collectionresource__collection__visibility=Collection.PRIVATE,
collectionresource__collection__collectionuser__user=user).order_by('title')
return render(request, 'orb/profile/bookmarks.html', {'bookmarks': bookmarks})
@login_required
def export_data(request):
'''
'''
resources = Resource.objects.filter(Q(create_user=request.user) | Q(update_user=request.user)).order_by('-create_date')
collections = Collection.objects.filter(collectionuser__user=request.user).order_by('-create_date')
resource_trackers = ResourceTracker.objects.filter(user=request.user).order_by('-access_date')
tag_trackers = TagTracker.objects.filter(user=request.user).order_by('-access_date')
search_trackers = SearchTracker.objects.filter(user=request.user).order_by('-access_date')
ratings = ResourceRating.objects.filter(user=request.user).order_by('-create_date')
return render(request, 'orb/profile/export.html',
{'userrecord': model_to_dict(request.user, fields=[field.name for field in request.user._meta.fields]),
'userprofile': model_to_dict(request.user.userprofile, fields=[field.name for field in request.user.userprofile._meta.fields]),
'organisation': request.user.userprofile.organisation.name,
'resources': resources,
'collections': collections,
'resource_trackers': resource_trackers,
'tag_trackers': tag_trackers,
'search_trackers': search_trackers,
'ratings': ratings})
@login_required
def delete_account(request):
resources_count = Resource.objects.filter(create_user=request.user).count()
if request.method == 'POST':
form = DeleteProfileForm(resources_count, request.POST)
if form.is_valid():
# ratings
ResourceRating.objects.filter(user=request.user).delete()
# search trackers
SearchTracker.objects.filter(user=request.user).delete()
# tag trackers
TagTracker.objects.filter(user=request.user).delete()
# resource trackers
ResourceTracker.objects.filter(user=request.user).delete()
# collections
CollectionUser.objects.filter(user=request.user).delete()
if form.cleaned_data.get("delete_resources"):
# resources
Resource.objects.filter(create_user=request.user).delete()
# resource_urls
ResourceURL.objects.filter(create_user=request.user).delete()
# resource_files
ResourceFile.objects.filter(create_user=request.user).delete()
# user
u = User.objects.get(pk=request.user.id)
u.delete()
return HttpResponseRedirect(reverse('profile_delete_account_complete'))
else:
form = DeleteProfileForm(resources_count, initial={'username':request.user.username},)
return render(request, 'orb/profile/delete.html',
{'form': form })
def delete_account_complete(request):
return render(request, 'orb/profile/delete_complete.html')
# Helper Methods
def build_form_options(form, blank_options=True):
# roles
form.fields['role'].choices = [('0', '--')]
for t in Tag.objects.filter(category__slug='audience').order_by('order_by', 'name'):
form.fields['role'].choices.append((t.id, t.name))
if blank_options == True:
form.fields['age_range'].choices = [('0', '--')]
form.fields['gender'].choices = [('0', '--')]
else:
form.fields['age_range'].choices = []
form.fields['gender'].choices = []
# age range
for x, y in UserProfile.AGE_RANGE:
form.fields['age_range'].choices.append((x, y))
# gender
for x, y in UserProfile.GENDER:
form.fields['gender'].choices.append((x, y))
return
|
mPowering/django-orb
|
orb/profiles/views.py
|
Python
|
gpl-3.0
| 13,227
|
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from unittest import TestCase
from traceml.processors.units_processors import (
format_sizeof,
number_percentage_format,
to_percentage,
)
@pytest.mark.processors_mark
class ToPercentageTest(TestCase):
"""A test case for the `to_percentage`."""
def test_number_format_works_as_expected(self):
float_nums = [
(123.123, "123.12"),
(123.1243453, "123.12"),
(213213213.123, "213,213,213.12"),
]
int_nums = [(213214, "213,214"), (123213.00, "123,213")]
for num, expected in float_nums:
self.assertEqual(
number_percentage_format(num, precision=2, use_comma=True), expected
)
for num, expected in int_nums:
self.assertEqual(
number_percentage_format(num, precision=2, use_comma=True), expected
)
def test_get_percentage_works_as_expected(self):
float_nums = [
(0.123, "12.30%"),
(3.1243453, "312.43%"),
(213.12312, "21,312.31%"),
]
int_nums = [(0.14, "14%"), (1.300, "130%")]
for num, expected in float_nums:
self.assertEqual(
to_percentage(num, rounding=2, precision=2, use_comma=True), expected
)
for num, expected in int_nums:
self.assertEqual(
to_percentage(num, rounding=2, precision=2, use_comma=True), expected
)
def test_works_as_expected_for_valid_values(self):
test_data = [
(0, "0%"),
(0.25, "25%"),
(-0.25, "-25%"),
(12, "1200%"),
(0.123, "12.3%"),
(0.12345, "12.35%"),
(0.12001, "12%"),
(0.12101, "12.1%"),
("0", "0%"),
("0.25", "25%"),
(3.1243453, "312.43%"),
(213.12312, "21312.31%"),
(0.14, "14%"),
(1.300, "130%"),
]
for value, expected in test_data:
result = to_percentage(value)
self.assertEqual(result, expected)
def test_works_as_expected_for_precision(self):
test_data = [
(0, "0%"),
(0.25, "25%"),
(-0.25, "-25%"),
(12, "1200%"),
(0.123, "12.300%"),
(0.12345, "12.345%"),
(0.12001, "12.001%"),
(0.12101, "12.101%"),
("0", "0%"),
("0.25", "25%"),
(3.1243453, "312.435%"),
(213.12312, "21,312.312%"),
(0.14, "14%"),
(1.300, "130%"),
]
for value, expected in test_data:
result = to_percentage(value, rounding=3, precision=3, use_comma=True)
self.assertEqual(result, expected)
def test_raises_value_error_for_invalid_types(self):
with self.assertRaises(ValueError):
to_percentage("foo")
def test_format_sizeof(self):
assert format_sizeof(10) == "10.0B"
assert format_sizeof(10000) == "9.8KiB"
assert format_sizeof(100000) == "97.7KiB"
assert format_sizeof(10000000) == "9.5MiB"
assert format_sizeof(10000000000) == "9.3GiB"
|
polyaxon/polyaxon
|
traceml/tests/test_events_processing/test_units_processors.py
|
Python
|
apache-2.0
| 3,814
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-30 12:42
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('web', '0011_auto_20170130_1241'),
]
operations = [
migrations.AlterField(
model_name='listing',
name='region',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='listings', to='web.Locality'),
),
migrations.AlterField(
model_name='listing',
name='suburb',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='listings', to='web.Suburb'),
),
]
|
leesdolphin/rentme
|
rentme/web/migrations/0012_auto_20170130_1242.py
|
Python
|
agpl-3.0
| 783
|
#!/usr/bin/env python
"""
@package mi.dataset.parser
@file mi-dataset/mi/dataset/parser/ctdmo_ghqr_imodem_telemetered_driver.py
@author Maria Lutz, Mark Worden
@brief Parser for the ctdmo_ghqr_imodem dataset driver
Release notes:
Initial release
"""
__author__ = 'Mark Worden'
__license__ = 'Apache 2.0'
import array
import binascii
import struct
import ntplib
import re
from mi.core.common import BaseEnum
from mi.core.log import get_logger
log = get_logger()
from mi.core.instrument.dataset_data_particle import DataParticle
from mi.core.exceptions import ConfigurationException, UnexpectedDataException
from mi.dataset.parser.utilities import time_2000_to_ntp, \
formatted_timestamp_utc_time
from mi.dataset.parser.common_regexes import END_OF_LINE_REGEX, \
FLOAT_REGEX, ASCII_HEX_CHAR_REGEX
from mi.dataset.dataset_parser import SimpleParser, DataSetDriverConfigKeys
class CtdmoGhqrImodemParticleClassKey(BaseEnum):
METADATA_PARTICLE_CLASS = 'metadata_particle_class'
INSTRUMENT_PARTICLE_CLASS = 'instrument_particle_class'
class CtdmoGhqrImodemDataParticleKey(BaseEnum):
# For metadata data particle
DATE_TIME_STRING = 'date_time_string'
SERIAL_NUMBER = 'serial_number'
BATTERY_VOLTAGE_MAIN = 'battery_voltage_main'
BATTERY_VOLTAGE_LITHIUM = 'battery_voltage_lithium'
SAMPLE_NUMBER = 'sample_number'
MEM_FREE = 'mem_free'
SAMPLE_INTERVAL = 'sample_interval'
PRESSURE_RANGE = 'pressure_range'
NUM_SAMPLES = 'num_samples'
# For instrument data particle
TEMPERATURE = 'temperature'
CONDUCTIVITY = 'conductivity'
PRESSURE = 'pressure'
CTD_TIME = 'ctd_time'
HEADER_BEGIN_REGEX = r'#MCAT Status' + END_OF_LINE_REGEX
FILE_DATETIME_REGEX = r'#7370_DateTime:\s+(?P<' + \
CtdmoGhqrImodemDataParticleKey.DATE_TIME_STRING + \
'>\d{8}\s+\d{6})' + END_OF_LINE_REGEX
INSTRUMENT_SERIAL_NUM_REGEX = \
r'#SBE37-IM.*SERIAL NO.\s+(?P<' + \
CtdmoGhqrImodemDataParticleKey.SERIAL_NUMBER + \
'>\d+).*' + END_OF_LINE_REGEX
BATTERY_VOLTAGE_REGEX = \
r'#vMain\s+=\s+(?P<' + \
CtdmoGhqrImodemDataParticleKey.BATTERY_VOLTAGE_MAIN + \
'>' + FLOAT_REGEX + '),\s+vLith\s+=\s+(?P<' + \
CtdmoGhqrImodemDataParticleKey.BATTERY_VOLTAGE_LITHIUM + \
'>' + FLOAT_REGEX + ')' + END_OF_LINE_REGEX
SAMPLE_NUM_MEM_REGEX = \
r'#samplenumber\s+=\s+(?P<' + \
CtdmoGhqrImodemDataParticleKey.SAMPLE_NUMBER + \
'>\d+),\s+free\s+=\s+(?P<' + \
CtdmoGhqrImodemDataParticleKey.MEM_FREE + \
'>\d+)' + END_OF_LINE_REGEX
SAMPLE_INTERVAL_REGEX = \
r'#sample interval\s+=\s+(?P<' + \
CtdmoGhqrImodemDataParticleKey.SAMPLE_INTERVAL + \
'>\d+) seconds' + END_OF_LINE_REGEX
PRESSURE_RANGE_REGEX = \
r'#PressureRange\s+=\s+(?P<' + \
CtdmoGhqrImodemDataParticleKey.PRESSURE_RANGE + \
'>\d+)' + END_OF_LINE_REGEX
SAMPLES_RECORDED_REGEX = \
r'#SamplesRecorded\s+=\s+(?P<' + \
CtdmoGhqrImodemDataParticleKey.NUM_SAMPLES + \
'>\d+)' + END_OF_LINE_REGEX
class MetadataMatchKey(BaseEnum):
FILE_DATETIME_MATCH = 'file_datetime_match'
INSTRUMENT_SERIAL_NO_MATCH = 'instrument_serial_no_match'
BATTERY_VOLTAGE_MATCH = 'battery_voltage_match'
SAMPLE_NUM_MEM_MATCH = 'sample_num_mem_match'
SAMPLE_INTERVAL_MATCH = 'sample_interval_match'
PRESSURE_RANGE_MATCH = 'pressure_range_match'
SAMPLES_RECORDED_MATCH = 'samples_recorded_match'
LOGGING_REGEX = r'#logging.*' + END_OF_LINE_REGEX
DATA_BEGIN_REGEX = r'#Begin Data' + END_OF_LINE_REGEX
DATA_FORMAT_REGEX = r'#Data Format:.+' + END_OF_LINE_REGEX
DATA_END_REGEX = r'#End Data.*'
# each line of format tttttcccccppppTTTTTTTT.
# ttttt: temp
# ccccc: conductivity
# pppp: pressure
# TTTTTTTT: time
INSTRUMENT_DATA_REGEX = \
b'(?P<' + CtdmoGhqrImodemDataParticleKey.TEMPERATURE + \
'>' + ASCII_HEX_CHAR_REGEX + '{5})(?P<' + \
CtdmoGhqrImodemDataParticleKey.CONDUCTIVITY + \
'>' + ASCII_HEX_CHAR_REGEX + '{5})(?P<' + \
CtdmoGhqrImodemDataParticleKey.PRESSURE + \
'>' + ASCII_HEX_CHAR_REGEX + '{4})(?P<' + \
CtdmoGhqrImodemDataParticleKey.CTD_TIME + \
'>' + ASCII_HEX_CHAR_REGEX + '{8})' + END_OF_LINE_REGEX
INSTRUMENT_DATA_MATCHER = re.compile(INSTRUMENT_DATA_REGEX)
# This table is used in the generation of the metadata data particle.
METADATA_ENCODING_RULES = [
(CtdmoGhqrImodemDataParticleKey.DATE_TIME_STRING, str),
(CtdmoGhqrImodemDataParticleKey.SERIAL_NUMBER, str),
(CtdmoGhqrImodemDataParticleKey.BATTERY_VOLTAGE_MAIN, float),
(CtdmoGhqrImodemDataParticleKey.BATTERY_VOLTAGE_LITHIUM, float),
(CtdmoGhqrImodemDataParticleKey.SAMPLE_NUMBER, int),
(CtdmoGhqrImodemDataParticleKey.MEM_FREE, int),
(CtdmoGhqrImodemDataParticleKey.SAMPLE_INTERVAL, int),
(CtdmoGhqrImodemDataParticleKey.PRESSURE_RANGE, int),
(CtdmoGhqrImodemDataParticleKey.NUM_SAMPLES, int)
]
class DataParticleType(BaseEnum):
CTDMO_GHQR_IMODEM_INSTRUMENT_RECOVERED = \
'ctdmo_ghqr_imodem_instrument_recovered'
CTDMO_GHQR_IMODEM_METADATA_RECOVERED = \
'ctdmo_ghqr_imodem_metadata_recovered'
CTDMO_GHQR_IMODEM_INSTRUMENT = \
'ctdmo_ghqr_imodem_instrument'
CTDMO_GHQR_IMODEM_METADATA = \
'ctdmo_ghqr_imodem_metadata'
class CtdmoGhqrImodemInstrumentDataParticle(DataParticle):
"""
Class for generating the CTDMO instrument particle.
"""
def _build_parsed_values(self):
"""
Generate a particle by iterating through the raw_data dictionary
items. Convert the data in the manner necessary and return the
encoded particles.
"""
result = []
for key in self.raw_data.keys():
if key == CtdmoGhqrImodemDataParticleKey.CONDUCTIVITY or \
key == CtdmoGhqrImodemDataParticleKey.TEMPERATURE:
encoded = self._encode_value(key, self.raw_data[key],
lambda x: int('0x' + x, 16))
result.append(encoded)
elif key == CtdmoGhqrImodemDataParticleKey.PRESSURE or \
key == CtdmoGhqrImodemDataParticleKey.CTD_TIME:
if key == CtdmoGhqrImodemDataParticleKey.PRESSURE:
type_code = 'H'
else:
type_code = 'I'
# Will first unhexlify into binary and put into an
# array of longs
byte_array = array.array(type_code, binascii.unhexlify(
self.raw_data[key]))
# Then swap bytes to get the bytes in the right order.
# This is called out as necessary in the IDD
byte_array.byteswap()
# Then unpack the binary data to get the final value
(val,) = struct.unpack('>'+type_code, byte_array)
# Now we will encode it and append it to the result to
# return
encoded = self._encode_value(key, val, int)
result.append(encoded)
if key == CtdmoGhqrImodemDataParticleKey.CTD_TIME:
# Need to use the CTD time for the internal timestamp
ctd_time = time_2000_to_ntp(val)
self.set_internal_timestamp(timestamp=ctd_time)
return result
class CtdmoGhqrImodemInstrumentTelemeteredDataParticle(
CtdmoGhqrImodemInstrumentDataParticle):
_data_particle_type = \
DataParticleType.CTDMO_GHQR_IMODEM_INSTRUMENT
class CtdmoGhqrImodemInstrumentRecoveredDataParticle(
CtdmoGhqrImodemInstrumentDataParticle):
_data_particle_type = \
DataParticleType.CTDMO_GHQR_IMODEM_INSTRUMENT_RECOVERED
class CtdmoGhqrImodemMetadataDataParticle(DataParticle):
"""
Class for generating the Metadata particle.
"""
def _build_parsed_values(self):
"""
Generate a particle by calling encode_value for each entry
in the METADATA_ENCODING_RULES list, where each entry is a
tuple containing the particle field name and a function to
use for data conversion.
"""
return [self._encode_value(name,
self.raw_data[name],
encoding_function)
for name,
encoding_function in METADATA_ENCODING_RULES]
class CtdmoGhqrImodemMetadataTelemeteredDataParticle(
CtdmoGhqrImodemMetadataDataParticle):
_data_particle_type = \
DataParticleType.CTDMO_GHQR_IMODEM_METADATA
class CtdmoGhqrImodemMetadataRecoveredDataParticle(
CtdmoGhqrImodemMetadataDataParticle):
_data_particle_type = \
DataParticleType.CTDMO_GHQR_IMODEM_METADATA_RECOVERED
class CtdmoGhqrImodemParser(SimpleParser):
def __init__(self,
config,
stream_handle,
exception_callback):
super(CtdmoGhqrImodemParser, self).__init__(config,
stream_handle,
exception_callback)
try:
self.instrument_particle_class = config[
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT][
CtdmoGhqrImodemParticleClassKey.INSTRUMENT_PARTICLE_CLASS]
self.metadata_particle_class = config[
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT][
CtdmoGhqrImodemParticleClassKey.METADATA_PARTICLE_CLASS]
except:
raise ConfigurationException
# Construct the dictionary to save off the metadata record matches
self._metadata_matches_dict = {
MetadataMatchKey.FILE_DATETIME_MATCH: None,
MetadataMatchKey.INSTRUMENT_SERIAL_NO_MATCH: None,
MetadataMatchKey.BATTERY_VOLTAGE_MATCH: None,
MetadataMatchKey.SAMPLE_NUM_MEM_MATCH: None,
MetadataMatchKey.SAMPLE_INTERVAL_MATCH: None,
MetadataMatchKey.PRESSURE_RANGE_MATCH: None,
MetadataMatchKey.SAMPLES_RECORDED_MATCH: None,
}
self._metadata_sample_generated = False
def _process_metadata_match_dict(self, key, particle_data):
group_dict = self._metadata_matches_dict[key].groupdict()
if key == MetadataMatchKey.FILE_DATETIME_MATCH:
particle_data[CtdmoGhqrImodemDataParticleKey.DATE_TIME_STRING] = \
group_dict[CtdmoGhqrImodemDataParticleKey.DATE_TIME_STRING]
elif key == MetadataMatchKey.INSTRUMENT_SERIAL_NO_MATCH:
particle_data[CtdmoGhqrImodemDataParticleKey.SERIAL_NUMBER] = \
group_dict[CtdmoGhqrImodemDataParticleKey.SERIAL_NUMBER]
elif key == MetadataMatchKey.BATTERY_VOLTAGE_MATCH:
particle_data[CtdmoGhqrImodemDataParticleKey.BATTERY_VOLTAGE_MAIN] = \
group_dict[CtdmoGhqrImodemDataParticleKey.BATTERY_VOLTAGE_MAIN]
particle_data[CtdmoGhqrImodemDataParticleKey.BATTERY_VOLTAGE_LITHIUM] = \
group_dict[CtdmoGhqrImodemDataParticleKey.BATTERY_VOLTAGE_LITHIUM]
elif key == MetadataMatchKey.SAMPLE_NUM_MEM_MATCH:
particle_data[CtdmoGhqrImodemDataParticleKey.SAMPLE_NUMBER] = \
group_dict[CtdmoGhqrImodemDataParticleKey.SAMPLE_NUMBER]
particle_data[CtdmoGhqrImodemDataParticleKey.MEM_FREE] = \
group_dict[CtdmoGhqrImodemDataParticleKey.MEM_FREE]
elif key == MetadataMatchKey.SAMPLE_INTERVAL_MATCH:
particle_data[CtdmoGhqrImodemDataParticleKey.SAMPLE_INTERVAL] = \
group_dict[CtdmoGhqrImodemDataParticleKey.SAMPLE_INTERVAL]
elif key == MetadataMatchKey.PRESSURE_RANGE_MATCH:
particle_data[CtdmoGhqrImodemDataParticleKey.PRESSURE_RANGE] = \
group_dict[CtdmoGhqrImodemDataParticleKey.PRESSURE_RANGE]
elif key == MetadataMatchKey.SAMPLES_RECORDED_MATCH:
particle_data[CtdmoGhqrImodemDataParticleKey.NUM_SAMPLES] = \
group_dict[CtdmoGhqrImodemDataParticleKey.NUM_SAMPLES]
def _generate_metadata_particle(self):
"""
This function generates a metadata particle.
"""
particle_data = dict()
for key in self._metadata_matches_dict.keys():
self._process_metadata_match_dict(key, particle_data)
utc_time = formatted_timestamp_utc_time(
particle_data[CtdmoGhqrImodemDataParticleKey.DATE_TIME_STRING],
"%Y%m%d %H%M%S")
ntp_time = ntplib.system_to_ntp_time(utc_time)
# Generate the metadata particle class and add the
# result to the list of particles to be returned.
particle = self._extract_sample(self.metadata_particle_class,
None,
particle_data,
internal_timestamp=ntp_time)
if particle is not None:
log.debug("Appending metadata particle to record buffer")
self._record_buffer.append(particle)
def _generate_instrument_particle(self, inst_match):
"""
This method will create an instrument particle given
instrument match data found from parsing an input file.
"""
# Extract the instrument particle sample providing the instrument data
# tuple and ntp timestamp
particle = self._extract_sample(self.instrument_particle_class,
None,
inst_match.groupdict())
if particle is not None:
log.debug("Appending instrument particle to record buffer")
self._record_buffer.append(particle)
def _handle_non_match(self, line):
# Check for other lines that can be ignored
if (re.match(LOGGING_REGEX, line) or
re.match(HEADER_BEGIN_REGEX, line) or
re.match(DATA_FORMAT_REGEX, line) or
re.match(DATA_BEGIN_REGEX, line) or
re.match(DATA_END_REGEX, line)):
log.debug("Ignoring line: %s", line)
else:
# Exception callback
message = "Unexpected data found. Line: " + line
log.warn(message)
self._exception_callback(UnexpectedDataException(message))
def _process_line(self, line):
file_datetime_match = re.match(FILE_DATETIME_REGEX, line)
instrument_serial_num_match = \
re.match(INSTRUMENT_SERIAL_NUM_REGEX, line)
battery_voltage_match = re.match(BATTERY_VOLTAGE_REGEX, line)
sample_num_mem_match = re.match(SAMPLE_NUM_MEM_REGEX, line)
sample_interval_match = re.match(SAMPLE_INTERVAL_REGEX, line)
pressure_range_match = re.match(PRESSURE_RANGE_REGEX, line)
samples_recorded_match = re.match(SAMPLES_RECORDED_REGEX, line)
instrument_data_match = re.match(INSTRUMENT_DATA_REGEX, line)
# Does the line contain data needed for the metadata particle?
if file_datetime_match:
self._metadata_matches_dict[MetadataMatchKey.FILE_DATETIME_MATCH] = \
file_datetime_match
elif instrument_serial_num_match:
self._metadata_matches_dict[MetadataMatchKey.INSTRUMENT_SERIAL_NO_MATCH] = \
instrument_serial_num_match
elif battery_voltage_match:
self._metadata_matches_dict[MetadataMatchKey.BATTERY_VOLTAGE_MATCH] = \
battery_voltage_match
elif sample_num_mem_match:
self._metadata_matches_dict[MetadataMatchKey.SAMPLE_NUM_MEM_MATCH] = \
sample_num_mem_match
elif sample_interval_match:
self._metadata_matches_dict[MetadataMatchKey.SAMPLE_INTERVAL_MATCH] = \
sample_interval_match
elif pressure_range_match:
self._metadata_matches_dict[MetadataMatchKey.PRESSURE_RANGE_MATCH] = \
pressure_range_match
elif samples_recorded_match:
self._metadata_matches_dict[MetadataMatchKey.SAMPLES_RECORDED_MATCH] = \
samples_recorded_match
# Does the line contain instrument data?
elif instrument_data_match:
# create instrument particle
self._generate_instrument_particle(instrument_data_match)
else:
self._handle_non_match(line)
def parse_file(self):
# read the first line in the file
line = self._stream_handle.readline()
while line:
self._process_line(line)
if (None not in self._metadata_matches_dict.values() and
not self._metadata_sample_generated):
# Attempt to generate metadata particles
self._generate_metadata_particle()
self._metadata_sample_generated = True
# read the next line in the file
line = self._stream_handle.readline()
|
danmergens/mi-instrument
|
mi/dataset/parser/ctdmo_ghqr_imodem.py
|
Python
|
bsd-2-clause
| 16,927
|
# Copyright (c) 2006 Eric P. Mangold
# See LICENSE for details.
import socket, struct
MAX_KEY_LENGTH = 0xff
MAX_VALUE_LENGTH = 0xffff
ASK = '_ask'
ANSWER = '_answer'
COMMAND = '_command'
ERROR = '_error'
ERROR_CODE = '_error_code'
ERROR_DESCRIPTION = '_error_description'
UNKNOWN_ERROR_CODE = 'UNKNOWN'
UNHANDLED_ERROR_CODE = 'UNHANDLED'
class AMPError(Exception):
"""AMP returned an error response"""
def __init__(self, errorCode, errorDescription):
Exception.__init__(self, errorDescription)
self.errorCode = errorCode
self.errorDescription = errorDescription
class Argument:
"""Base class of AMP arguments"""
def __init__ (self, optional=False, ) :
self.optional = optional
def fromString(self, bytes):
raise NotImplementedError
def toString(self, obj):
raise NotImplementedError
class Integer(Argument):
fromString = int
toString = str
class String(Argument):
def fromString(self, bytes):
return bytes
def toString(self, obj):
return obj
class Float(Argument):
fromString = float
toString = repr
class Boolean(Argument):
def fromString(self, bytes):
if bytes == 'True':
return True
elif bytes == 'False':
return False
else:
raise ValueError("Bad boolean value: %r" % (bytes,))
def toString(self, obj):
if obj:
return 'True'
else:
return 'False'
class Unicode(Argument):
def fromString(self, bytes):
return bytes.decode('utf-8')
def toString(self, obj):
return obj.encode('utf-8')
class ListOf(Argument):
def __init__(self, elementType):
self.elementType = elementType
def fromString (self, inString, ) :
"""
this code was derived from `twisted.protocols.basic.Int16StringReceiver`.
"""
strings = list()
structFormat = '!H'
prefixLength = struct.calcsize(structFormat)
alldata = inString
currentOffset = 0
fmt = structFormat
while len(alldata) >= (currentOffset + prefixLength) :
messageStart = currentOffset + prefixLength
length, = struct.unpack(fmt, alldata[currentOffset:messageStart])
if length > MAX_VALUE_LENGTH:
return
messageEnd = messageStart + length
if len(alldata) < messageEnd:
break
packet = alldata[messageStart:messageEnd]
currentOffset = messageEnd
strings.append(packet)
return map(self.elementType.fromString, strings, )
def toString(self, inObject):
strings = []
for obj in inObject:
serialized = self.elementType.toString(obj)
strings.append(struct.pack('!H', len(serialized)))
strings.append(serialized)
return ''.join(strings)
class Command:
arguments = []
response = []
errors = {}
#fatalErrors = {} # NOT IMPLEMENTED YET
requiresAnswer = True
class __metaclass__(type):
def __new__(cls, name, bases, attrs):
if 'commandName' not in attrs:
attrs['commandName'] = name
return type.__new__(cls, name, bases, attrs)
def serializeRequest(cls, dataList, kw):
"""Serialize and append data to the given list"""
kw = kw.copy()
for argName, argType in cls.arguments:
if argName not in kw and argType.optional :
continue
argValue = argType.toString(kw[argName])
del kw[argName]
if len(argName) > MAX_KEY_LENGTH:
raise ValueError, "Key too long"
if len(argValue) > MAX_VALUE_LENGTH:
raise ValueError, "Value too long"
dataList.extend( [argName, argValue] )
#for kv in argName, argValue:
# dataList.append(struct.pack('!H', len(kv)))
# dataList.append(kv)
if kw:
raise ValueError, "arguments contained unneeded values: %r" % (kw,)
return dataList
serializeRequest = classmethod(serializeRequest)
def deserializeRequest(cls, wireResponse):
result = {}
for fieldName, fieldType in cls.arguments:
result[fieldName] = fieldType.fromString(wireResponse[fieldName])
return result
deserializeRequest = classmethod(deserializeRequest)
def serializeResponse(cls, dataList, kw):
"""Serialize and append data to the given list"""
for fieldName, fieldType in cls.response:
fieldValue = fieldType.toString(kw[fieldName])
if len(fieldName) > MAX_KEY_LENGTH:
raise ValueError, "Key too long"
if len(fieldValue) > MAX_VALUE_LENGTH:
raise ValueError, "Value too long"
dataList.extend( [fieldName, fieldValue] )
#for kv in fieldName, fieldValue:
# dataList.append(struct.pack('!H', len(kv)))
# dataList.append(kv)
return dataList
serializeResponse = classmethod(serializeResponse)
def deserializeResponse(cls, wireResponse):
response = {}
for respName, respType in cls.response:
response[respName] = respType.fromString(wireResponse[respName])
return response
deserializeResponse = classmethod(deserializeResponse)
class Proxy:
counter = 0
socketTimeout = None
ssl = None
def __init__(self, host, port, useSSL=False, socketTimeout=60.0):
self.host = host
self.port = port
self.useSSL = useSSL
self.socketTimeout = socketTimeout # set to None to enable fully blocking sockets
def connect(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(self.socketTimeout)
sock.connect( (self.host, self.port) )
#ssl = socket.ssl(s, '/home/teratorn/code/host.key', '/home/teratorn/code/host.cert')
if self.useSSL:
self.ssl = socket.ssl(sock)
#print 'Connected to SSL server:', self.ssl.server()
self.sock = sock
return self
def callRemote(self, command, **kw):
"""
Syncronously call a remote AMP command.
This method may raise socket.error or AMPError during "normal" operations.
Any other exception should indicate a bug
"""
return self._callRemote(command, True, **kw)
def callRemoteNoAnswer(self, command, **kw):
"""
Syncronously call a remote AMP command. No response is expected, and this
method will return as soon as the request has been sent.
This method may raise socket.error or AMPError during "normal" operations.
Any other exception should indicate a bug
"""
return self._callRemote(command, False, **kw)
def _callRemote(self, command, answerExpected, **kw):
askKey = str(self.counter)
self.counter += 1
# compose packet
dataList = [COMMAND, command.commandName, ASK, askKey]
command.serializeRequest(dataList, kw)
insertPrefixes(dataList)
data = ''.join(dataList)
# write packet
self._write(data)
if not answerExpected:
return
# read the response
wireResponse = {}
while 1:
keylen = struct.unpack('!H', self._read(2))[0]
if keylen == 0:
break
key = self._read(keylen)
valuelen = struct.unpack('!H', self._read(2))[0]
value = self._read(valuelen)
wireResponse[key] = value
#print 'wireResponse:', repr(wireResponse)
if ERROR in wireResponse:
assert wireResponse[ERROR] == askKey
raise AMPError(wireResponse[ERROR_CODE], wireResponse[ERROR_DESCRIPTION])
assert wireResponse[ANSWER] == askKey
del wireResponse[ANSWER]
# return the de-serialized response
return command.deserializeResponse(wireResponse)
def _read(self, bufsize):
if self.useSSL:
return self.ssl.read(bufsize)
else:
data = self.sock.recv(bufsize)
if len(data) < 1 :
raise socket.error
while len(data) < bufsize:
data += self.sock.recv(bufsize-len(data))
return data
def _write(self, bytes):
if self.useSSL:
self.ssl.write(bytes)
else:
self.sock.sendall(bytes)
def close(self):
self.sock.close()
def insertPrefixes(dataList):
for i, value in enumerate(dataList[:]):
dataList.insert(i*2, struct.pack('!H', len(value)))
dataList.append('\x00\x00')
return dataList
|
spikeekips/ampy
|
ampy/ampy.py
|
Python
|
mit
| 8,805
|
from django.apps import AppConfig
import rest_registration.checks # noqa
class RestRegistrationConfig(AppConfig):
name = 'rest_registration'
|
szopu/django-rest-registration
|
rest_registration/apps.py
|
Python
|
mit
| 149
|
PRIORITIES_1 = PRIORITIES_2 = PRIORITIES_3 = PRIORITIES_4 = PRIORITIES_4B \
= PRIORITIES_4C = 'Second Variable File'
|
userzimmermann/robotframework-python3
|
atest/testdata/variables/resvarfiles/variables_2.py
|
Python
|
apache-2.0
| 132
|
# Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import scipy.io
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
# Returns mu,sigma for 20 hidden-states from feature-vectors(123,35) for Smooth, Moderate, and Rough Surface Models
def feature_to_mu_sigma(fvec):
index = 0
m,n = np.shape(fvec)
#print m,n
mu = np.matrix(np.zeros((20,1)))
sigma = np.matrix(np.zeros((20,1)))
DIVS = m/20
while (index < 20):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),0:]
#if index == 1:
#print temp_fvec
mu[index] = scp.mean(temp_fvec)
sigma[index] = scp.std(temp_fvec)
index = index+1
return mu,sigma
# Returns sequence given raw data
def create_seq(fvec):
m,n = np.shape(fvec)
#print m,n
seq = np.matrix(np.zeros((20,n)))
DIVS = m/20
for i in range(n):
index = 0
while (index < 20):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),i]
#if index == 1:
#print temp_fvec
seq[index,i] = scp.mean(temp_fvec)
index = index+1
return seq
if __name__ == '__main__':
### Simulation Data
tSamples = 400
datasmooth = scipy.io.loadmat('smooth.mat')
datamoderate = scipy.io.loadmat('medium.mat')
datarough = scipy.io.loadmat('rough.mat')
simulforce = np.zeros((tSamples,150))
datatime = np.arange(0,4,0.01)
dataforceSmooth = np.transpose(datasmooth['force'])
dataforceModerate = np.transpose(datamoderate['force'])
dataforceRough = np.transpose(datarough['force'])
j = 0
for i in dataforceSmooth:
simulforce[:,j] = i
j = j+1
j = 50
for i in dataforceModerate:
simulforce[:,j] = i
j = j+1
j = 100
for i in dataforceRough:
simulforce[:,j] = i
j = j+1
Fmat = np.matrix(simulforce)
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
#print " "
#print 'Total_Matrix_Shape:',m_tot,n_tot
mu_smooth,sigma_smooth = feature_to_mu_sigma(Fmat[0:tSamples,0:50])
mu_moderate,sigma_moderate = feature_to_mu_sigma(Fmat[0:tSamples,50:100])
mu_rough,sigma_rough = feature_to_mu_sigma(Fmat[0:tSamples,100:150])
#print [mu_smooth, sigma_smooth]
# HMM - Implementation:
# 10 Hidden States
# Force as Continuous Gaussian Observations from each hidden state
# Three HMM-Models for Smooth, Moderate, Rough Surfaces
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.20, 0.10, 0.04, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_smooth = np.zeros((20,2))
B_moderate = np.zeros((20,2))
B_rough = np.zeros((20,2))
for num_states in range(20):
B_smooth[num_states,0] = mu_smooth[num_states]
B_smooth[num_states,1] = sigma_smooth[num_states]
B_moderate[num_states,0] = mu_moderate[num_states]
B_moderate[num_states,1] = sigma_moderate[num_states]
B_rough[num_states,0] = mu_rough[num_states]
B_rough[num_states,1] = sigma_rough[num_states]
B_smooth = B_smooth.tolist()
B_moderate = B_moderate.tolist()
B_rough = B_rough.tolist()
# pi - initial probabilities per state
pi = [0.05] * 20
# generate Smooth, Moderate, Rough Surface models from parameters
model_smooth = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_smooth, pi) # Will be Trained
model_moderate = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_moderate, pi) # Will be Trained
model_rough = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rough, pi) # Will be Trained
trial_number = 1
smooth_final = np.matrix(np.zeros((30,1)))
moderate_final = np.matrix(np.zeros((30,1)))
rough_final = np.matrix(np.zeros((30,1)))
while (trial_number < 6):
# For Training
total_seq = Fmat[0:tSamples,:]
m_total, n_total = np.shape(total_seq)
#print 'Total_Sequence_Shape:', m_total, n_total
if (trial_number == 1):
j = 5
total_seq_smooth = total_seq[0:tSamples,1:5]
total_seq_moderate = total_seq[0:tSamples,51:55]
total_seq_rough = total_seq[0:tSamples,101:105]
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+1:j+5]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+51:j+55]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+101:j+105]))
j = j+5
if (trial_number == 2):
j = 5
total_seq_smooth = np.column_stack((total_seq[0:tSamples,0],total_seq[0:tSamples,2:5]))
total_seq_moderate = np.column_stack((total_seq[0:tSamples,50],total_seq[0:tSamples,52:55]))
total_seq_rough = np.column_stack((total_seq[0:tSamples,100],total_seq[0:tSamples,102:105]))
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+0],total_seq[0:tSamples,j+2:j+5]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+50],total_seq[0:tSamples,j+52:j+55]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+100],total_seq[0:tSamples,j+102:j+105]))
j = j+5
if (trial_number == 3):
j = 5
total_seq_smooth = np.column_stack((total_seq[0:tSamples,0:2],total_seq[0:tSamples,3:5]))
total_seq_moderate = np.column_stack((total_seq[0:tSamples,50:52],total_seq[0:tSamples,53:55]))
total_seq_rough = np.column_stack((total_seq[0:tSamples,100:102],total_seq[0:tSamples,103:105]))
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+0:j+2],total_seq[0:tSamples,j+3:j+5]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+50:j+52],total_seq[0:tSamples,j+53:j+55]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+100:j+102],total_seq[0:tSamples,j+103:j+105]))
j = j+5
if (trial_number == 4):
j = 5
total_seq_smooth = np.column_stack((total_seq[0:tSamples,0:3],total_seq[0:tSamples,4:5]))
total_seq_moderate = np.column_stack((total_seq[0:tSamples,50:53],total_seq[0:tSamples,54:55]))
total_seq_rough = np.column_stack((total_seq[0:tSamples,100:103],total_seq[0:tSamples,104:105]))
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+0:j+3],total_seq[0:tSamples,j+4:j+5]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+50:j+53],total_seq[0:tSamples,j+54:j+55]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+100:j+103],total_seq[0:tSamples,j+104:j+105]))
j = j+5
if (trial_number == 5):
j = 5
total_seq_smooth = total_seq[0:tSamples,0:4]
total_seq_moderate = total_seq[0:tSamples,50:54]
total_seq_rough = total_seq[0:tSamples,100:104]
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+0:j+4]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+50:j+54]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+100:j+104]))
j = j+5
train_seq_smooth = (np.array(total_seq_smooth).T).tolist()
train_seq_moderate = (np.array(total_seq_moderate).T).tolist()
train_seq_rough = (np.array(total_seq_rough).T).tolist()
#m,n = np.shape(train_seq_smooth)
#print m,n
#print train_seq_smooth
final_ts_smooth = ghmm.SequenceSet(F,train_seq_smooth)
final_ts_moderate = ghmm.SequenceSet(F,train_seq_moderate)
final_ts_rough = ghmm.SequenceSet(F,train_seq_rough)
model_smooth.baumWelch(final_ts_smooth)
model_moderate.baumWelch(final_ts_moderate)
model_rough.baumWelch(final_ts_rough)
# For Testing
if (trial_number == 1):
j = 5
total_seq_smooth = total_seq[0:tSamples,0]
total_seq_moderate = total_seq[0:tSamples,50]
total_seq_rough = total_seq[0:tSamples,100]
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+50]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+100]))
j = j+5
if (trial_number == 2):
j = 5
total_seq_smooth = total_seq[0:tSamples,1]
total_seq_moderate = total_seq[0:tSamples,51]
total_seq_rough = total_seq[0:tSamples,101]
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+1]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+51]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+101]))
j = j+5
if (trial_number == 3):
j = 5
total_seq_smooth = total_seq[0:tSamples,2]
total_seq_moderate = total_seq[0:tSamples,52]
total_seq_rough = total_seq[0:tSamples,102]
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+2]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+52]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+102]))
j = j+5
if (trial_number == 4):
j = 5
total_seq_smooth = total_seq[0:tSamples,3]
total_seq_moderate = total_seq[0:tSamples,53]
total_seq_rough = total_seq[0:tSamples,103]
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+3]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+53]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+103]))
j = j+5
if (trial_number == 5):
j = 5
total_seq_smooth = total_seq[0:tSamples,4]
total_seq_moderate = total_seq[0:tSamples,54]
total_seq_rough = total_seq[0:tSamples,104]
while (j < 50):
total_seq_smooth = np.column_stack((total_seq_smooth,total_seq[0:tSamples,j+4]))
total_seq_moderate = np.column_stack((total_seq_moderate,total_seq[0:tSamples,j+54]))
total_seq_rough = np.column_stack((total_seq_rough,total_seq[0:tSamples,j+104]))
j = j+5
total_seq_obj = np.matrix(np.column_stack((total_seq_smooth,total_seq_moderate,total_seq_rough)))
smooth = np.matrix(np.zeros(np.size(total_seq_obj,1)))
moderate = np.matrix(np.zeros(np.size(total_seq_obj,1)))
rough = np.matrix(np.zeros(np.size(total_seq_obj,1)))
m,n = np.shape(smooth)
print m,n
k = 0
while (k < np.size(total_seq_obj,1)):
test_seq_obj = (np.array(total_seq_obj[0:tSamples,k]).T).tolist()
new_test_seq_obj = np.array(sum(test_seq_obj,[]))
ts_obj = new_test_seq_obj
final_ts_obj = ghmm.EmissionSequence(F,ts_obj.tolist())
# Find Viterbi Path
path_smooth_obj = model_smooth.viterbi(final_ts_obj)
path_moderate_obj = model_moderate.viterbi(final_ts_obj)
path_rough_obj = model_rough.viterbi(final_ts_obj)
obj = max(path_smooth_obj[1],path_moderate_obj[1],path_rough_obj[1])
if obj == path_smooth_obj[1]:
smooth[0,k] = 1
elif obj == path_moderate_obj[1]:
moderate[0,k] = 1
else:
rough[0,k] = 1
k = k+1
#print smooth.T
smooth_final = smooth_final + smooth.T
moderate_final = moderate_final + moderate.T
rough_final = rough_final + rough.T
trial_number = trial_number + 1
#print smooth_final
#print moderate_final
#print rough_final
# Confusion Matrix
cmat = np.zeros((3,3))
arrsum_smooth = np.zeros((3,1))
arrsum_moderate = np.zeros((3,1))
arrsum_rough= np.zeros((3,1))
k = 10
i = 0
while (k < 31):
arrsum_smooth[i] = np.sum(smooth_final[k-10:k,0])
arrsum_moderate[i] = np.sum(moderate_final[k-10:k,0])
arrsum_rough[i] = np.sum(rough_final[k-10:k,0])
i = i+1
k = k+10
i=0
while (i < 3):
j=0
while (j < 3):
if (i == 0):
cmat[i][j] = arrsum_smooth[j]
elif (i == 1):
cmat[i][j] = arrsum_moderate[j]
else:
cmat[i][j] = arrsum_rough[j]
j = j+1
i = i+1
#print cmat
# Plot Confusion Matrix
Nlabels = 3
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5])
ax.set_xticklabels(['Smooth', 'Moderate', 'Rough'])
ax.set_yticks([2.5,1.5,0.5])
ax.set_yticklabels(['Smooth', 'Moderate', 'Rough'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 3):
j = 0
while (j < 3):
pp.text(j+0.5,2.5-i,cmat[i][j])
j = j+1
i = i+1
pp.show()
|
tapomayukh/projects_in_python
|
sandbox_tapo/src/AI/Code for Project-3/HMM Code/hmm_crossvalidation_force_20_states.py
|
Python
|
mit
| 17,322
|
#!/usr/bin/env python
#coding: utf-8
"""
This module simply sends request to the Digital Ocean API,
and returns their response as a dict.
"""
import requests
API_ENDPOINT = 'https://api.digitalocean.com'
class DoError(RuntimeError):
pass
class DoManager(object):
def __init__(self, client_id, api_key):
self.client_id = client_id
self.api_key = api_key
def all_active_droplets(self):
json = self.request('/droplets/')
return json['droplets']
def new_droplet(self, name, size_id, image_id, region_id,
ssh_key_ids=None, virtio=False, private_networking=False,
backups_enabled=False):
params = {
'name': name,
'size_id': size_id,
'image_id': image_id,
'region_id': region_id,
'virtio': virtio,
'private_networking': private_networking,
'backups_enabled': backups_enabled,
}
if ssh_key_ids:
params['ssh_key_ids'] = ssh_key_ids
json = self.request('/droplets/new', params=params)
return json['droplet']
def show_droplet(self, id):
json = self.request('/droplets/%s' % id)
return json['droplet']
def reboot_droplet(self, id):
json = self.request('/droplets/%s/reboot/' % id)
json.pop('status', None)
return json
def power_cycle_droplet(self, id):
json = self.request('/droplets/%s/power_cycle/' % id)
json.pop('status', None)
return json
def shutdown_droplet(self, id):
json = self.request('/droplets/%s/shutdown/' % id)
json.pop('status', None)
return json
def power_off_droplet(self, id):
json = self.request('/droplets/%s/power_off/' % id)
json.pop('status', None)
return json
def power_on_droplet(self, id):
json = self.request('/droplets/%s/power_on/' % id)
json.pop('status', None)
return json
def password_reset_droplet(self, id):
json = self.request('/droplets/%s/password_reset/' % id)
json.pop('status', None)
return json
def resize_droplet(self, id, size_id):
params = {'size_id': size_id}
json = self.request('/droplets/%s/resize/' % id, params)
json.pop('status', None)
return json
def snapshot_droplet(self, id, name):
params = {'name': name}
json = self.request('/droplets/%s/snapshot/' % id, params)
json.pop('status', None)
return json
def restore_droplet(self, id, image_id):
params = {'image_id': image_id}
json = self.request('/droplets/%s/restore/' % id, params)
json.pop('status', None)
return json
def rebuild_droplet(self, id, image_id):
params = {'image_id': image_id}
json = self.request('/droplets/%s/rebuild/' % id, params)
json.pop('status', None)
return json
def enable_backups_droplet(self, id):
json = self.request('/droplets/%s/enable_backups/' % id)
json.pop('status', None)
return json
def disable_backups_droplet(self, id):
json = self.request('/droplets/%s/disable_backups/' % id)
json.pop('status', None)
return json
def rename_droplet(self, id, name):
params = {'name': name}
json = self.request('/droplets/%s/rename/' % id, params)
json.pop('status', None)
return json
def destroy_droplet(self, id, scrub_data=True):
params = {'scrub_data': '1' if scrub_data else '0'}
json = self.request('/droplets/%s/destroy/' % id, params)
json.pop('status', None)
return json
#regions==========================================
def all_regions(self):
json = self.request('/regions/')
return json['regions']
#images==========================================
def all_images(self, filter='global'):
params = {'filter': filter}
json = self.request('/images/', params)
return json['images']
def show_image(self, image_id):
params= {'image_id': image_id}
json = self.request('/images/%s/' % image_id, params)
return json['image']
def destroy_image(self, image_id):
self.request('/images/%s/destroy' % image_id)
return True
def transfer_image(self, image_id, region_id):
params = {'region_id': region_id}
json = self.request('/images/%s/transfer/' % image_id, params)
json.pop('status', None)
return json
#ssh_keys=========================================
def all_ssh_keys(self):
json = self.request('/ssh_keys/')
return json['ssh_keys']
def new_ssh_key(self, name, pub_key):
params = {'name': name, 'ssh_pub_key': pub_key}
json = self.request('/ssh_keys/new/', params)
return json['ssh_key']
def show_ssh_key(self, key_id):
json = self.request('/ssh_keys/%s/' % key_id)
return json['ssh_key']
def edit_ssh_key(self, key_id, name, pub_key):
params = {'name': name, 'ssh_pub_key': pub_key} # the doc needs to be improved
json = self.request('/ssh_keys/%s/edit/' % key_id, params)
return json['ssh_key']
def destroy_ssh_key(self, key_id):
self.request('/ssh_keys/%s/destroy/' % key_id)
return True
#sizes============================================
def sizes(self):
json = self.request('/sizes/')
return json['sizes']
#domains==========================================
def all_domains(self):
json = self.request('/domains/')
return json['domains']
def new_domain(self, name, ip):
params = {
'name': name,
'ip_address': ip
}
json = self.request('/domains/new/', params)
return json['domain']
def show_domain(self, domain_id):
json = self.request('/domains/%s/' % domain_id)
return json['domain']
def destroy_domain(self, domain_id):
self.request('/domains/%s/destroy/' % domain_id)
return True
def all_domain_records(self, domain_id):
json = self.request('/domains/%s/records/' % domain_id)
return json['records']
def new_domain_record(self, domain_id, record_type, data, name=None, priority=None, port=None, weight=None):
params = {
'record_type': record_type,
'data': data,
}
if name: params['name'] = name
if priority: params['priority'] = priority
if port: params['port'] = port
if weight: params['weight'] = port
json = self.request('/domains/%s/records/new/' % domain_id, params)
return json['domain_record'] if 'domain_record' in json else json['record'] # DO API docs say 'domain_record', but actually it 'record'
def show_domain_record(self, domain_id, record_id):
json = self.request('/domains/%s/records/%s' % (domain_id, record_id))
return json['record']
def edit_domain_record(self, domain_id, record_id, record_type, data, name=None, priority=None, port=None, weight=None):
params = {
'record_type': record_type,
'data': data,
}
if name: params['name'] = name
if priority: params['priority'] = priority
if port: params['port'] = port
if weight: params['weight'] = port
json = self.request('/domains/%s/records/%s/edit/' % (domain_id, record_id), params)
return json['domain_record'] if 'domain_record' in json else json['record'] # DO API docs say 'domain_record' for /new/ but 'record' for /edit/.
def destroy_domain_record(self, domain_id, record_id):
return self.request('/domains/%s/records/%s/destroy/' % (domain_id, record_id))
return True
#events===========================================
def show_event(self, event_id):
json = self.request('/events/%s' % event_id)
return json['event']
#low_level========================================
def request(self, path, params={}, method='GET'):
params['client_id'] = self.client_id
params['api_key'] = self.api_key
if not path.startswith('/'):
path = '/'+path
url = API_ENDPOINT+path
try:
resp = requests.get(url, params=params, timeout=60)
json = resp.json()
except ValueError: # requests.models.json.JSONDecodeError
raise ValueError("The API server doesn't respond with a valid json")
except requests.RequestException as e: # errors from requests
raise RuntimeError(e)
if resp.status_code != requests.codes.ok:
if json:
if 'error_message' in json:
raise DoError(json['error_message'])
elif 'message' in json:
raise DoError(json['message'])
# The JSON reponse is bad, so raise an exception with the HTTP status
resp.raise_for_status()
if json.get('status') != 'OK':
raise DoError(json['error_message'])
return json
if __name__=='__main__':
import os
client_id = os.environ['DO_CLIENT_ID']
api_key = os.environ['DO_API_KEY']
do = DoManager(client_id, api_key)
import sys
fname = sys.argv[1]
import pprint
# size_id: 66, image_id: 1601, region_id: 1
pprint.pprint(getattr(do, fname)(*sys.argv[2:]))
|
chuwy/dopy
|
dopy/manager.py
|
Python
|
mit
| 9,514
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
import werkzeug
import datetime
import time
from openerp.tools.translate import _
class sale_quote(http.Controller):
@http.route([
"/quote/<int:order_id>",
"/quote/<int:order_id>/<token>"
], type='http', auth="public", website=True)
def view(self, order_id, pdf=None, token=None, message=False, **post):
# use SUPERUSER_ID allow to access/view order for public user
# only if he knows the private token
order = request.registry.get('sale.order').browse(request.cr, token and SUPERUSER_ID or request.uid, order_id)
now = time.strftime('%Y-%m-%d')
dummy, action = request.registry.get('ir.model.data').get_object_reference(request.cr, request.uid, 'sale', 'action_quotations')
if token:
if token != order.access_token:
return request.website.render('website.404')
# Log only once a day
if request.session.get('view_quote',False)!=now:
request.session['view_quote'] = now
body=_('Quotation viewed by customer')
self.__message_post(body, order_id, type='comment')
days = 0
if order.validity_date:
days = (datetime.datetime.strptime(order.validity_date, '%Y-%m-%d') - datetime.datetime.now()).days + 1
if pdf:
report_obj = request.registry['report']
pdf = report_obj.get_pdf(request.cr, SUPERUSER_ID, [order_id], 'website_quote.report_quote', data=None, context=request.context)
pdfhttpheaders = [('Content-Type', 'application/pdf'), ('Content-Length', len(pdf))]
return request.make_response(pdf, headers=pdfhttpheaders)
values = {
'quotation': order,
'message': message and int(message) or False,
'option': bool(filter(lambda x: not x.line_id, order.options)),
'order_valid': (not order.validity_date) or (now <= order.validity_date),
'days_valid': max(days, 0),
'action': action
}
return request.website.render('website_quote.so_quotation', values)
@http.route(['/quote/accept'], type='json', auth="public", website=True)
def accept(self, order_id, token=None, signer=None, sign=None, **post):
order_obj = request.registry.get('sale.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
if token != order.access_token:
return request.website.render('website.404')
attachments=sign and [('signature.png', sign.decode('base64'))] or []
order_obj.signal_workflow(request.cr, SUPERUSER_ID, [order_id], 'order_confirm', context=request.context)
message = _('Order signed by %s') % (signer,)
self.__message_post(message, order_id, type='comment', subtype='mt_comment', attachments=attachments)
return True
@http.route(['/quote/<int:order_id>/<token>/decline'], type='http', auth="public", website=True)
def decline(self, order_id, token, **post):
order_obj = request.registry.get('sale.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
if token != order.access_token:
return request.website.render('website.404')
request.registry.get('sale.order').action_cancel(request.cr, SUPERUSER_ID, [order_id])
message = post.get('decline_message')
if message:
self.__message_post(message, order_id, type='comment', subtype='mt_comment')
return werkzeug.utils.redirect("/quote/%s/%s?message=2" % (order_id, token))
@http.route(['/quote/<int:order_id>/<token>/post'], type='http', auth="public", website=True)
def post(self, order_id, token, **post):
# use SUPERUSER_ID allow to access/view order for public user
order_obj = request.registry.get('sale.order')
order = order_obj.browse(request.cr, SUPERUSER_ID, order_id)
message = post.get('comment')
if token != order.access_token:
return request.website.render('website.404')
if message:
self.__message_post(message, order_id, type='comment', subtype='mt_comment')
return werkzeug.utils.redirect("/quote/%s/%s?message=1" % (order_id, token))
def __message_post(self, message, order_id, type='comment', subtype=False, attachments=[]):
request.session.body = message
cr, uid, context = request.cr, request.uid, request.context
user = request.registry['res.users'].browse(cr, SUPERUSER_ID, uid, context=context)
if 'body' in request.session and request.session.body:
request.registry.get('sale.order').message_post(cr, SUPERUSER_ID, order_id,
body=request.session.body,
type=type,
subtype=subtype,
author_id=user.partner_id.id,
context=context,
attachments=attachments
)
request.session.body = False
return True
@http.route(['/quote/update_line'], type='json', auth="public", website=True)
def update(self, line_id, remove=False, unlink=False, order_id=None, token=None, **post):
order = request.registry.get('sale.order').browse(request.cr, SUPERUSER_ID, int(order_id))
if token != order.access_token:
return request.website.render('website.404')
if order.state not in ('draft','sent'):
return False
line_id=int(line_id)
if unlink:
request.registry.get('sale.order.line').unlink(request.cr, SUPERUSER_ID, [line_id], context=request.context)
return False
number=(remove and -1 or 1)
order_line_obj = request.registry.get('sale.order.line')
order_line_val = order_line_obj.read(request.cr, SUPERUSER_ID, [line_id], [], context=request.context)[0]
quantity = order_line_val['product_uom_qty'] + number
order_line_obj.write(request.cr, SUPERUSER_ID, [line_id], {'product_uom_qty': (quantity)}, context=request.context)
return [str(quantity), str(order.amount_total)]
@http.route(["/quote/template/<model('sale.quote.template'):quote>"], type='http', auth="user", website=True)
def template_view(self, quote, **post):
values = { 'template': quote }
return request.website.render('website_quote.so_template', values)
@http.route(["/quote/add_line/<int:option_id>/<int:order_id>/<token>"], type='http', auth="public", website=True)
def add(self, option_id, order_id, token, **post):
vals = {}
order = request.registry.get('sale.order').browse(request.cr, SUPERUSER_ID, order_id)
if token != order.access_token:
return request.website.render('website.404')
if order.state not in ['draft', 'sent']:
return request.website.render('website.http_error', {'status_code': 'Forbidden', 'status_message': _('You cannot add options to a confirmed order.')})
option_obj = request.registry.get('sale.order.option')
option = option_obj.browse(request.cr, SUPERUSER_ID, option_id)
res = request.registry.get('sale.order.line').product_id_change(request.cr, SUPERUSER_ID, order_id,
False, option.product_id.id, option.quantity, option.uom_id.id, option.quantity, option.uom_id.id,
option.name, order.partner_id.id, False, True, time.strftime('%Y-%m-%d'),
False, order.fiscal_position.id, True, request.context)
vals = res.get('value', {})
if 'tax_id' in vals:
vals['tax_id'] = [(6, 0, vals['tax_id'])]
vals.update({
'price_unit': option.price_unit,
'website_description': option.website_description,
'name': option.name,
'order_id': order.id,
'product_id' : option.product_id.id,
'product_uos_qty': option.quantity,
'product_uos': option.uom_id.id,
'product_uom_qty': option.quantity,
'product_uom': option.uom_id.id,
'discount': option.discount,
})
line = request.registry.get('sale.order.line').create(request.cr, SUPERUSER_ID, vals, context=request.context)
option_obj.write(request.cr, SUPERUSER_ID, [option.id], {'line_id': line}, context=request.context)
return werkzeug.utils.redirect("/quote/%s/%s#pricing" % (order.id, token))
|
Grirrane/odoo
|
addons/website_quote/controllers/main.py
|
Python
|
agpl-3.0
| 9,493
|
#!/usr/bin/env python2
#This script keeps the bot running, restarts it, and writes error logs when it bugs out.
import logging
try:
open('errors.log', 'r').close()
except IOError:
open('errors.log', 'w').close()
logging.basicConfig(level=logging.WARNING, filename='errors.log', filemode = 'a', format='[%(asctime)s] %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
logging.debug('Starting bot')
while True:
try:
execfile('main.py') #wrapper around code
except Exception as ex:
logging.exception("Fatal error occured")
print "{0}! BOT RESTARTING NOW!".format(ex)
logging.warning('Restarting bot')
|
feblehober123/GETWatchBot2
|
run.py
|
Python
|
gpl-3.0
| 620
|
# ask token from keystone
from keystoneclient.v3 import client
auth_args = {
'auth_url': 'http://192.168.56.102:5000/v3',
'project_name': 'admin',
'user_domain_name': 'default',
'project_domain_name': 'default',
'username': 'admin',
'password': 'jinghui108',
}
def get_token():
keystone = client.Client(**auth_args)
return keystone.auth_ref.get('auth_token')
|
DTUbigdata/Thesis_Source_Code
|
thesis_package/Retrieve_Data/token_authenticate.py
|
Python
|
apache-2.0
| 395
|
from lib.analysis.author.edge_list import *
from lib.util.file_util import load_from_disk
def test_generate_edge_list():
author_nodes = './.tmp/integration_test/lib/analysis/author/edge_list/author_nodes.csv'
author_edges = './.tmp/integration_test/lib/analysis/author/edge_list/author_edges.csv'
graph_nodes = './test/integration_test/data/graph_nodes.csv'
graph_edges = './test/integration_test/data/graph_edges.csv'
clean_data = './test/integration_test/data/clean_data.json'
author_uid_map = './test/integration_test/data/author_uid_map.json'
req_data1 = './test/integration_test/data/req_data/test_edge_list1.csv'
generate_edge_list(author_nodes, author_edges, graph_nodes, graph_edges, clean_data, author_uid_map)
assert nx.is_isomorphic(nx.read_edgelist(author_nodes, delimiter='\t'), nx.read_edgelist(req_data1, delimiter='\t'))
generate_edge_list(author_nodes, author_edges, graph_nodes, graph_edges, clean_data, author_uid_map, ignore_lat=False)
assert nx.is_isomorphic(nx.read_edgelist(author_nodes, delimiter='\t'), nx.read_edgelist(req_data1, delimiter='\t'))
|
prasadtalasila/MailingListParser
|
test/integration_test/lib/analysis/author/test_edge_list.py
|
Python
|
gpl-3.0
| 1,122
|
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the moderator page."""
from core.controllers import base
from core.domain import acl_decorators
from core.domain import activity_domain
from core.domain import activity_services
from core.domain import email_manager
from core.domain import summary_services
import feconf
class ModeratorPage(base.BaseHandler):
"""The moderator page."""
@acl_decorators.can_access_moderator_page
def get(self):
"""Handles GET requests."""
self.render_template('pages/moderator/moderator.html')
class FeaturedActivitiesHandler(base.BaseHandler):
"""The moderator page handler for featured activities."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_access_moderator_page
def get(self):
"""Handles GET requests."""
self.render_json({
'featured_activity_references': [
activity_reference.to_dict() for activity_reference in
activity_services.get_featured_activity_references()
],
})
@acl_decorators.can_access_moderator_page
def post(self):
"""Handles POST requests."""
featured_activity_reference_dicts = self.payload.get(
'featured_activity_reference_dicts')
featured_activity_references = [
activity_domain.ActivityReference(
reference_dict['type'], reference_dict['id'])
for reference_dict in featured_activity_reference_dicts]
try:
summary_services.require_activities_to_be_public(
featured_activity_references)
except Exception as e:
raise self.InvalidInputException(e)
activity_services.update_featured_activity_references(
featured_activity_references)
self.render_json({})
class EmailDraftHandler(base.BaseHandler):
"""Provide default email templates for moderator emails."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
@acl_decorators.can_send_moderator_emails
def get(self, action):
"""Handles GET requests."""
self.render_json({
'draft_email_body': (
email_manager.get_draft_moderator_action_email(action)),
})
|
himanshu-dixit/oppia
|
core/controllers/moderator.py
|
Python
|
apache-2.0
| 2,847
|
from __future__ import print_function
import json
import os
import sys
def print_file_tree(startpath):
""" Prints a directory and its contents
"""
for root, dirs, files in os.walk(startpath):
# skip hidden dirs
dirs[:] = [d for d in dirs if not d.startswith(".")]
level = root.replace(startpath, '').count(os.sep)
indent = ' ' * 4 * (level)
print('{}{}/'.format(indent, os.path.basename(root)))
subindent = ' ' * 4 * (level + 1)
for f in files:
if not f.startswith("."):
print('{}{}'.format(subindent, f))
def generate_lab_from_solution(ipynb_path):
""" Strictly removes input ant output of code cells prefixed
with #SOLUTION from a jupyter notebook.
This is a little hacky, but keeps the nbs in sync easily
"""
lab_file = ipynb_path.replace('-solution', '-lab')
raw_nb = json.load(open(ipynb_path))
def _is_solution_cell(c):
# source is a list of strings representing each line
return (c['cell_type'] == 'code' and
len(c['source']) > 0 and
c['source'][0].startswith("#SOLUTION"))
no_sol_cells = [c for c in raw_nb['cells'] if not _is_solution_cell(c)]
raw_nb["cells"] = no_sol_cells
with open(lab_file, "w") as lab_out:
json.dump(raw_nb, lab_out)
if __name__ == "__main__":
generate_lab_from_solution(sys.argv[1])
|
drivendata/data-science-is-software
|
src/utils.py
|
Python
|
mit
| 1,436
|
# -*- test-case-name: txdav -*-
##
# Copyright (c) 2010-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Calendar & contacts data store.
"""
#
# FIXME: 'txdav' should be renamed to 'datastore' and shoul not be
# WebDAV-specific.
#
# (That means txdav.xml should be pulled out, for example, as that is
# WebDAV-specific.)
#
|
trevor/calendarserver
|
txdav/__init__.py
|
Python
|
apache-2.0
| 862
|
# Copyright (c) 2013-2016 Christian Geier et al.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""contains a re-usable CalendarWidget for urwid"""
import calendar
from datetime import date
from locale import getlocale, setlocale, LC_ALL
import urwid
setlocale(LC_ALL, '')
def getweeknumber(day):
"""return iso week number for datetime.date object
:param day: date
:type day: datetime.date()
:return: weeknumber
:rtype: int
"""
return date.isocalendar(day)[1]
class DatePart(urwid.Text):
"""used in the Date widget (single digit)"""
def __init__(self, digit):
super(DatePart, self).__init__(digit)
@classmethod
def selectable(cls):
return True
def keypress(self, _, key):
return key
class Date(urwid.WidgetWrap):
"""used in the main calendar for dates (a number)"""
def __init__(self, date, get_styles=None):
dstr = str(date.day).rjust(2)
self.halves = [urwid.AttrMap(DatePart(dstr[:1]), None, None),
urwid.AttrMap(DatePart(dstr[1:]), None, None)]
self.date = date
self._get_styles = get_styles
super(Date, self).__init__(urwid.Columns(self.halves))
def set_styles(self, styles):
"""If single string, sets the same style for both halves, if two
strings, sets different style for each half.
"""
if type(styles) is tuple:
self.halves[0].set_attr_map({None: styles[0]})
self.halves[1].set_attr_map({None: styles[1]})
self.halves[0].set_focus_map({None: styles[0]})
self.halves[1].set_focus_map({None: styles[1]})
else:
self.halves[0].set_attr_map({None: styles})
self.halves[1].set_attr_map({None: styles})
self.halves[1].set_focus_map({None: styles})
self.halves[0].set_focus_map({None: styles})
def reset_styles(self):
self.set_styles(self._get_styles(self.date, False))
@property
def marked(self):
if 'mark' in [self.halves[0].attr_map[None], self.halves[1].attr_map[None]]:
return True
else:
return False
@classmethod
def selectable(cls):
return True
def keypress(self, _, key):
return key
class DateCColumns(urwid.Columns):
"""container for one week worth of dates
which are horizontally aligned
TODO: rename, awful name
focus can only move away by pressing 'TAB',
calls 'on_date_change' on every focus change (see below for details)
"""
def __init__(self, widget_list, on_date_change, on_press, keybindings,
get_styles=None, **kwargs):
self.on_date_change = on_date_change
self.on_press = on_press
self.keybindings = keybindings
self.get_styles = get_styles
# we need the next two attributes for attribute resetting when a
# cell regains focus after having lost it
self._old_attr_map = False
self._old_pos = 0
self._init = True
super(DateCColumns, self).__init__(widget_list, **kwargs)
def __repr__(self):
return '<DateCColumns from {} to {}>'.format(self[1].date, self[7].date)
def _set_focus_position(self, position):
"""calls on_date_change before calling super()._set_focus_position"""
# do not call when building up the interface, lots of potentially
# expensive calls made here
if self._init:
self._init = False
else:
self.contents[position][0].set_styles(
self.get_styles(self.contents[position][0].date, True))
self.on_date_change(self.contents[position][0].date)
super(DateCColumns, self)._set_focus_position(position)
def set_focus_date(self, a_date):
for num, day in enumerate(self.contents[1:8], 1):
if day[0].date == a_date:
self._set_focus_position(num)
return None
raise ValueError('%s not found in this week' % a_date)
def get_date_column(self, a_date):
"""return the column `a_date` is in, raises ValueError if `a_date`
cannot be found
"""
for num, day in enumerate(self.contents[1:8], 1):
if day[0].date == a_date:
return num
raise ValueError('%s not found in this week' % a_date)
focus_position = property(
urwid.Columns._get_focus_position,
_set_focus_position,
doc=('Index of child widget in focus. Raises IndexError if read when '
'CColumns is empty, or when set to an invalid index.')
)
def keypress(self, size, key):
"""only leave calendar area on pressing 'tab' or 'enter'"""
if key in self.keybindings['left']:
key = 'left'
elif key in self.keybindings['up']:
key = 'up'
elif key in self.keybindings['right']:
key = 'right'
elif key in self.keybindings['down']:
key = 'down'
old_pos = self.focus_position
key = super(DateCColumns, self).keypress(size, key)
# make sure we don't leave the calendar
if old_pos == 7 and key == 'right':
self.contents[old_pos][0].set_styles(
self.get_styles(self.contents[old_pos][0].date, False))
self.focus_position = 1
self.contents[self.focus_position][0].set_styles(
self.get_styles(self.contents[self.focus_position][0].date, False))
return 'down'
elif old_pos == 1 and key == 'left':
self.contents[old_pos][0].set_styles(
self.get_styles(self.contents[old_pos][0].date, False))
self.focus_position = 7
self.contents[self.focus_position][0].set_styles(
self.get_styles(self.contents[self.focus_position][0].date, False))
return 'up'
if key in self.keybindings['view']: # XXX make this more generic
self._old_pos = old_pos
self.contents[old_pos][0].set_styles(
self.get_styles(self.contents[old_pos][0].date, True))
return 'right'
if old_pos != self.focus_position:
self.contents[old_pos][0].set_styles(
self.get_styles(self.contents[old_pos][0].date, False))
self.contents[self.focus_position][0].set_styles(
self.get_styles(self.contents[self.focus_position][0].date, True))
if key in ['up', 'down']:
self.contents[old_pos][0].set_styles(
self.get_styles(self.contents[old_pos][0].date, False))
return key
class CListBox(urwid.ListBox):
"""our custom version of ListBox containing a CalendarWalker instance
it should contain a `CalendarWalker` instance which it autoextends on
rendering, if needed """
def __init__(self, walker):
self._init = True
self.keybindings = walker.keybindings
self.on_press = walker.on_press
self._marked = False
self._pos_old = False
super(CListBox, self).__init__(walker)
def render(self, size, focus=False):
if self._init:
while 'bottom' in self.ends_visible(size):
self.body._autoextend()
self.set_focus_valign('middle')
self._init = False
return super(CListBox, self).render(size, focus)
def _date(self, row, column):
"""return the date at row `row` and column `column`"""
return self.body[row].contents[column][0].date
def _unmark_one(self, row, column):
"""remove attribute *mark* from the date at row `row` and column `column`
returning it to the attributes defined by self._get_color()
"""
self.body[row].contents[column][0].reset_styles()
def _mark_one(self, row, column):
"""set attribute *mark* on the date at row `row` and column `column`"""
self.body[row].contents[column][0].set_styles('mark')
def _mark(self, a_date=None):
"""make sure everything between the marked entry and `a_date`
is visually marked, and nothing else"""
if a_date is None:
a_date = self.body.focus_date
def toggle(row, column):
if self.body[row].contents[column][0].marked:
self._mark_one(row, column)
else:
self._unmark_one(row, column)
start = min(self._marked['pos'][0], self.focus_position) - 2
stop = max(self._marked['pos'][0], self.focus_position) + 2
for row in range(start, stop):
for col in range(1, 8):
if a_date > self._marked['date']:
if self._marked['date'] <= self._date(row, col) <= a_date:
self._mark_one(row, col)
else:
self._unmark_one(row, col)
else:
if self._marked['date'] >= self._date(row, col) >= a_date:
self._mark_one(row, col)
else:
self._unmark_one(row, col)
toggle(self.focus_position, self.focus.focus_col)
self._pos_old = self.focus_position, self.focus.focus_col
def _unmark_all(self):
start = min(self._marked['pos'][0], self.focus_position, self._pos_old[0])
end = max(self._marked['pos'][0], self.focus_position, self._pos_old[0]) + 1
for row in range(start, end):
for col in range(1, 8):
self._unmark_one(row, col)
def set_focus_date(self, a_day):
if self._marked:
self._unmark_all()
self._mark(a_day)
self.body.set_focus_date(a_day)
def keypress(self, size, key):
if key in self.keybindings['mark'] + ['esc'] and self._marked:
self._unmark_all()
self._marked = False
return
if key in self.keybindings['mark']:
self._marked = {'date': self.body.focus_date,
'pos': (self.focus_position, self.focus.focus_col)}
if self._marked and key in self.keybindings['other']:
row, col = self._marked['pos']
self._marked = {'date': self.body.focus_date,
'pos': (self.focus_position, self.focus.focus_col)}
self.focus.focus_col = col
self.focus_position = row
if key in self.on_press:
if self._marked:
start = min(self.body.focus_date, self._marked['date'])
end = max(self.body.focus_date, self._marked['date'])
else:
start = self.body.focus_date
end = None
return self.on_press[key](start, end)
if key in self.keybindings['today']:
# reset colors of currently focused Date widget
self.focus.focus.set_styles(
self.focus.get_styles(self.body.focus_date, False))
self.set_focus_date(date.today())
self.set_focus_valign(('relative', 10))
key = super(CListBox, self).keypress(size, key)
if self._marked:
self._mark()
return key
class CalendarWalker(urwid.SimpleFocusListWalker):
def __init__(self, on_date_change, on_press, keybindings, firstweekday=0,
weeknumbers=False, get_styles=None):
self.firstweekday = firstweekday
self.weeknumbers = weeknumbers
self.on_date_change = on_date_change
self.on_press = on_press
self.keybindings = keybindings
self.get_styles = get_styles
weeks = self._construct_month()
urwid.SimpleFocusListWalker.__init__(self, weeks)
def set_focus(self, position):
"""set focus by item number"""
while position >= len(self) - 1:
self._autoextend()
while position <= 0:
no_additional_weeks = self._autoprepend()
position += no_additional_weeks
return urwid.SimpleFocusListWalker.set_focus(self, position)
@property
def focus_date(self):
"""return the date the focus is currently set to
:rtype: datetime.date
"""
return self[self.focus].focus.date
def set_focus_date(self, a_day):
"""set the focus to `a_day`
:type: a_day: datetime.date
"""
row, column = self.get_date_pos(a_day)
self.set_focus(row)
self[self.focus]._set_focus_position(column)
return None
def get_date_pos(self, a_day):
"""get row and column where `a_day` is located
:type: a_day: datetime.date
:rtype: tuple(int, int)
"""
# rough estimate of difference in lines, i.e. full weeks, we might be
# off by as much as one week though
week_diff = int((self.focus_date - a_day).days / 7)
new_focus = self.focus - week_diff
# in case new_focus is 1 we will later try set the focus to 0 which
# will lead to an autoprepend which will f*ck up our estimation,
# therefore better autoprepending anyway, even if it might not be
# necessary
if new_focus <= 1:
self.set_focus(new_focus - 1)
week_diff = int((self.focus_date - a_day).days / 7)
new_focus = self.focus - week_diff
for offset in [0, -1, 1]: # we might be off by a week
row = new_focus + offset
self.set_focus(row)
try:
column = self[self.focus].get_date_column(a_day)
return row, column
except ValueError:
pass
# we didn't find the date we were looking for...
raise ValueError('something is wrong')
def _autoextend(self):
"""appends the next month"""
date_last_month = self[-1][1].date # a date from the last month
last_month = date_last_month.month
last_year = date_last_month.year
month = last_month % 12 + 1
year = last_year if not last_month == 12 else last_year + 1
weeks = self._construct_month(year, month, clean_first_row=True)
self.extend(weeks)
def _autoprepend(self):
"""prepends the previous month
:returns: number of weeks prepended
:rtype: int
"""
try:
date_first_month = self[0][-1].date # a date from the first month
except AttributeError:
# rightmost column is weeknumber
date_first_month = self[0][-2].date
first_month = date_first_month.month
first_year = date_first_month.year
if first_month == 1:
month = 12
year = first_year - 1
else:
month = first_month - 1
year = first_year
weeks = self._construct_month(year, month, clean_last_row=True)
weeks.reverse()
for one in weeks:
self.insert(0, one)
return len(weeks)
def _construct_week(self, week):
"""
constructs a CColumns week from a week of datetime.date objects. Also
prepends the month name if the first day of the month is included in
that week.
:param week: list of datetime.date objects
:returns: the week as an CColumns object and True or False depending on
if today is in this week
:rtype: tuple(urwid.CColumns, bool)
"""
if 1 in [day.day for day in week]:
month_name = calendar.month_abbr[week[-1].month].ljust(4)
attr = 'monthname'
elif self.weeknumbers == 'left':
month_name = ' {:2} '.format(getweeknumber(week[0]))
attr = 'weeknumber_left'
else:
month_name = ' '
attr = None
this_week = [(4, urwid.AttrMap(urwid.Text(month_name), attr))]
for number, day in enumerate(week):
new_date = Date(day, self.get_styles)
this_week.append((2, new_date))
new_date.set_styles(self.get_styles(new_date.date, False))
if self.weeknumbers == 'right':
this_week.append((2, urwid.AttrMap(
urwid.Text('{:2}'.format(getweeknumber(week[0]))), 'weeknumber_right')))
week = DateCColumns(this_week,
on_date_change=self.on_date_change,
on_press=self.on_press,
keybindings=self.keybindings,
dividechars=1,
get_styles=self.get_styles)
return week
def _construct_month(self,
year=date.today().year,
month=date.today().month,
clean_first_row=False,
clean_last_row=False):
"""construct one month of DateCColumns
:param year: the year this month is set in
:type year: int
:param month: the number of the month to be constructed
:type month: int (1-12)
:param clean_first_row: makes sure that the first element returned is
completely in `month` and not partly in the one
before (which might lead to that line occurring
twice
:type clean_first_row: bool
:param clean_last_row: makes sure that the last element returned is
completely in `month` and not partly in the one
after (which might lead to that line occurring
twice
:type clean_last_row: bool
:returns: list of DateCColumns and the number of the list element which
contains today (or None if it isn't in there)
:rtype: tuple(list(dateCColumns, int or None))
"""
plain_weeks = calendar.Calendar(
self.firstweekday).monthdatescalendar(year, month)
weeks = list()
for number, week in enumerate(plain_weeks):
week = self._construct_week(week)
weeks.append(week)
if clean_first_row and weeks[0][1].date.month != weeks[0][7].date.month:
return weeks[1:]
elif clean_last_row and \
weeks[-1][1].date.month != weeks[-1][7].date.month:
return weeks[:-1]
else:
return weeks
class CalendarWidget(urwid.WidgetWrap):
def __init__(self, on_date_change, keybindings, on_press, firstweekday=0,
weeknumbers=False, get_styles=None, initial=date.today()):
"""
:param on_date_change: a function that is called every time the selected date
is changed with the newly selected date as a first (and
only argument)
:type on_date_change: function
:param keybindings: bind keys to specific functions, keys are
commands (e.g. movement commands, values are lists of keys
that should be bound to those commands. See below for the
defaults.
Available commands:
'left', 'right', 'up', 'down': move cursor in direction
'today': refocus on today
'mark': toggles selection mode
:type keybindings: dict
:param on_press: dict of functions that are called when the key is
pressed. These functions must accept at least two argument. In the
normal case the first argument is the currently selected date
(datetime.date) and the second is *None*. When a date range is
selected, the first argument is the earlier and the second argument
is the later date. The function's return values are interpreted as
pressed keys.
:type on_pres: dict
"""
default_keybindings = {
'left': ['left'], 'down': ['down'], 'right': ['right'], 'up': ['up'],
'today': ['t'],
'view': [],
'mark': ['v'],
}
from collections import defaultdict
on_press = defaultdict(lambda: lambda x: x, on_press)
default_keybindings.update(keybindings)
calendar.setfirstweekday(firstweekday)
try:
mylocale = '.'.join(getlocale())
except TypeError: # language code and encoding may be None
mylocale = 'C'
_calendar = calendar.LocaleTextCalendar(firstweekday, mylocale)
weekheader = _calendar.formatweekheader(2)
dnames = weekheader.split(' ')
def _get_styles(date, focus):
if focus:
if date == date.today():
return 'today focus'
else:
return 'reveal focus'
else:
if date == date.today():
return 'today'
else:
return None
if get_styles is None:
get_styles = _get_styles
if weeknumbers == 'right':
dnames.append('#w')
dnames = urwid.Columns(
[(4, urwid.Text(' '))] +
[(2, urwid.AttrMap(urwid.Text(name), 'dayname')) for name in dnames],
dividechars=1)
self.walker = CalendarWalker(
on_date_change, on_press, default_keybindings, firstweekday, weeknumbers,
get_styles)
self.box = CListBox(self.walker)
frame = urwid.Frame(self.box, header=dnames)
urwid.WidgetWrap.__init__(self, frame)
self.set_focus_date(initial)
def focus_today(self):
self.set_focus_date(date.today())
@property
def focus_date(self):
return self.walker.focus_date
def set_focus_date(self, a_day):
"""set the focus to `a_day`
:type a_day: datetime.date
"""
self.box.set_focus_date(a_day)
|
dzoep/khal
|
khal/ui/calendarwidget.py
|
Python
|
mit
| 22,891
|
from .SingleThreadGenerator import SingleThreadGenerator as SingleThreadProjectionGenerator
from .OpenMPGenerator import OpenMPGenerator as OpenMPProjectionGenerator
from .CUDAGenerator import CUDAGenerator as CUDAProjectionGenerator
|
vitay/ANNarchy
|
ANNarchy/generator/Projection/__init__.py
|
Python
|
gpl-2.0
| 233
|
import sqlalchemy as sa
from pytest import mark
from sqlalchemy_utils import Country, CountryType, i18n # noqa
from tests import TestCase
@mark.skipif('i18n.babel is None')
class TestCountryType(TestCase):
def create_models(self):
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
country = sa.Column(CountryType)
def __repr__(self):
return 'User(%r)' % self.id
self.User = User
def test_parameter_processing(self):
user = self.User(
country=Country(u'FI')
)
self.session.add(user)
self.session.commit()
user = self.session.query(self.User).first()
assert user.country.name == u'Finland'
def test_scalar_attributes_get_coerced_to_objects(self):
user = self.User(country='FI')
assert isinstance(user.country, Country)
|
spoqa/sqlalchemy-utils
|
tests/types/test_country.py
|
Python
|
bsd-3-clause
| 936
|
# -*- coding: utf-8 -*-
#
# This file is part of the bliss project
#
# Copyright (c) 2016 Beamline Control Unit, ESRF
# Distributed under the GNU LGPLv3. See LICENSE for more info.
"""
pythonic RPC implementation using zerorpc.
Server example::
from bliss.comm.rpc import Server
class Car(object):
'''A silly car. This doc should show up in the client'''
wheels = 4
def __init__(self, color, horsepower):
self.horsepower = horsepower
self.__position = 0
@property
def position(self):
'''this doc should show up in the client too'''
return self.__position
@staticmethod
def horsepower_to_watts(horsepower):
'''so should this'''
return horsepower * 735.499
@property
def watts(self):
'''also this one'''
return self.horsepower_to_watts(self.horsepower)
def move(self, value, relative=False):
'''needless to say this one as well'''
if relative:
self.__position += value
else:
self.__position = value
car = Car('yellow', 120)
server = Server(car)
server.bind('tcp://0:8989')
server.run()
Client::
from bliss.comm.rpc import Client
car = Client('tcp://localhost:8989')
assert car.__doc__
assert type(car).__name__ == 'Car'
assert car.position == 0
assert car.horsepower == 120.0
assert car.horsepower_to_watts(1) == 735.499
assert car.watts == 120 * 735.499
car.move(12)
assert car.position == 12
car.move(10, relative=True)
assert car.position == 22
"""
import inspect
import logging
import weakref
import louie
import zerorpc
import gevent.queue
import msgpack_numpy
from bliss.common.utils import StripIt
# Patching
msgpack_numpy.patch()
SPECIAL_METHODS = set((
'new', 'init', 'del', 'hash', 'class', 'dict', 'sizeof', 'weakref',
'metaclass', 'subclasshook',
'getattr', 'setattr', 'delattr', 'getattribute',
'instancecheck', 'subclasscheck',
'reduce', 'reduce_ex', 'getstate', 'setstate'))
class ServerError(Exception):
pass
def _discover_object(obj):
members = {}
otype = type(obj)
for name, member in inspect.getmembers(otype):
info = dict(name=name, doc=inspect.getdoc(member))
if callable(member):
if inspect.ismethod(member) and member.__self__ == otype:
member_type = 'classmethod'
elif inspect.isfunction(member):
member_type = 'staticmethod'
else:
member_type = 'method'
elif inspect.isdatadescriptor(member):
member_type = 'attribute'
else:
member_type = 'attribute'
info['doc'] = None
info['type'] = member_type
members[name] = info
for name in dir(obj):
if name.startswith('__') or name in members:
continue
member = getattr(obj, name)
info = dict(name=name, doc=inspect.getdoc(member))
if callable(member):
member_type = 'method'
else:
member_type = 'attribute'
info['doc'] = None
info['type'] = member_type
members[name] = info
return dict(name=otype.__name__,
module=inspect.getmodule(obj).__name__,
doc=inspect.getdoc(obj),
members=members)
class _ServerObject(object):
def __init__(self, obj):
self._object = obj
self._log = logging.getLogger('zerorpc.' + type(obj).__name__)
self._metadata = _discover_object(obj)
def __dir__(self):
result = ['zerorpc_call__']
for name, info in self._metadata['members'].items():
if 'method' in info['type']:
result.append(name)
return result
def __getattr__(self, name):
return getattr(self._object, name)
def zerorpc_call__(self, code, args, kwargs):
if code == 'introspect':
self._log.debug("zerorpc 'introspect'")
return self._metadata
else:
name = args[0]
if code == 'call':
value = getattr(self._object, name)(*args[1:], **kwargs)
self._log.debug("zerorpc call %s() = %r", name, StripIt(value))
return value
elif code == 'getattr':
value = getattr(self._object, name)
self._log.debug("zerorpc get %s = %r", name, StripIt(value))
return value
elif code == 'setattr':
value = args[1]
self._log.debug("zerorpc set %s = %r", name, StripIt(value))
return setattr(self._object, name, value)
elif code == 'delattr':
self._log.debug("zerorpc del %s", name)
return delattr(self._object, name)
else:
raise ServerError('Unknown call type {0!r}'.format(code))
class _StreamServerObject(_ServerObject):
def __init__(self, obj):
super(_StreamServerObject, self).__init__(obj)
self._metadata['stream'] = True
self._streams = weakref.WeakSet()
@zerorpc.stream
def zerorpc_stream__(self):
stream = gevent.queue.Queue()
self._streams.add(stream)
def dispatcher(value, signal):
stream.put((signal, value))
louie.connect(dispatcher, sender=self._object)
debug = self._log.debug
for message in stream:
if message is None:
break
signal, value = message
debug('streaming signal=%r value=%s', signal, StripIt(value))
yield message
def __dir__(self):
return super(_StreamServerObject, self).__dir__() + ['zerorpc_stream__']
def __del__(self):
for stream in self._streams:
stream.put(None)
def Server(obj, stream=False, **kwargs):
"""
Create a zerorpc server for the given object with a pythonic API
Args:
obj: any python object
Keyword Args:
stream (bool): supply a stream listening to events coming from obj
Return:
a zerorpc server
It accepts the same keyword arguments as :class:`zerorpc.Server`.
"""
klass = _StreamServerObject if stream else _ServerObject
return zerorpc.Server(klass(obj), **kwargs)
# Client code
def _property(name, doc):
def fget(self):
return self._client.zerorpc_call__('getattr', (name,), {})
def fset(self, value):
self._client.zerorpc_call__('setattr', (name, value), {})
def fdel(self):
return self._client.zerorpc_call__('delattr', (name,), {})
return property(fget=fget, fset=fset, fdel=fdel, doc=doc)
def _method(name, doc):
if name == '__dir__':
# need to handle __dir__ to make sure it returns a list, not a tuple
def method(self):
return list(self._client.zerorpc_call__('call', [name], {}))
else:
def method(self, *args, **kwargs):
args = [name] + list(args)
return self._client.zerorpc_call__('call', args, kwargs)
method.__name__ = name
method.__doc__ = doc
return method
def _static_method(client, name, doc):
def method(*args, **kwargs):
args = [name] + list(args)
return client.zerorpc_call__('call', args, kwargs)
method.__name__ = name
method.__doc__ = doc
return staticmethod(method)
def _class_method(client, name, doc):
def method(cls, *args, **kwargs):
args = [name] + list(args)
return client.zerorpc_call__('call', args, kwargs)
method.__name__ = name
method.__doc__ = doc
return classmethod(method)
def _member(client, member_info):
name, mtype, doc = info['name'], info['type'], info['doc']
if mtype == 'attribute':
members[name] = _property(name, doc)
elif mtype == 'method':
members[name] = _method(name, doc)
elif mtype == 'staticmethod':
members[name] = _static_method(client, name, doc)
elif mtype == 'classmethod':
members[name] = _class_method(client, name, doc)
def Client(address, **kwargs):
"""
Create a zerorpc client with a pythonic API
Args:
address: connection address (ex: 'tcp://lid00c:8989')
Return:
a zerorpc client
It accepts the same keyword arguments as :class:`zerorpc.Client`.
"""
kwargs['connect_to'] = address
client = zerorpc.Client(**kwargs)
metadata = client.zerorpc_call__('introspect', (), {})
client._log = logging.getLogger('zerorpc.' + metadata['name'])
stream = metadata.get('stream', False)
members = dict(_client=client)
for name, info in metadata['members'].items():
if name.startswith('__') and name[2:-2] in SPECIAL_METHODS:
continue
name, mtype, doc = info['name'], info['type'], info['doc']
if mtype == 'attribute':
members[name] = _property(name, doc)
elif mtype == 'method':
members[name] = _method(name, doc)
elif mtype == 'staticmethod':
members[name] = _static_method(client, name, doc)
elif mtype == 'classmethod':
members[name] = _class_method(client, name, doc)
def close(self):
self._client.close()
if hasattr(self._client, '_stream_task'):
self._client._stream_task.kill()
members['close'] = close
klass = type(metadata['name'], (object,), members)
proxy = klass()
if stream:
def stream_task_ended(task):
if task.exception:
client._log.warning('stream task terminated in error: %s',
task.exception)
else:
client._log.debug('stream task terminated')
def dispatch(proxy):
while True:
for signal, value in client.zerorpc_stream__(timeout=None):
client._log.debug('dispatching stream event signal=%r value=%r',
signal, StripIt(value))
louie.send(signal, proxy, value)
client._stream_task = gevent.spawn(dispatch, proxy)
client._stream_task.link(stream_task_ended)
return proxy
|
tiagocoutinho/bliss
|
bliss/comm/rpc.py
|
Python
|
lgpl-3.0
| 10,329
|
# -*- coding: UTF-8 -*-
# thermo.py
# Created by Francesco Porcari on 2010-09-03.
# Copyright (c) 2010 Softwell. All rights reserved.
#
#
import os
from gnr.core.gnrbag import Bag
import random
import time
cli_max = 12
invoice_max = 20
row_max = 100
sleep_time = 0.05
class GnrCustomWebPage(object):
dojo_version = '11'
py_requires = "gnrcomponents/testhandler:TestHandlerFull,gnrcomponents/thermopane:ThermoPane"
def windowTitle(self):
return 'Thermo'
def test_1_batch(self, pane):
"Batch"
box = pane.div(datapath='test1')
box.button('Start', fire='.start_test')
box.dataRpc('dummy', 'test_1_batch', _fired='^.start_test')
def test_2_batch(self, pane):
"Batch 2"
box = pane.div(datapath='test2')
box.button('Start', fire='.start_test')
box.dataRpc('dummy', 'test_2_batch', _fired='^.start_test')
def test_3_batch(self, pane):
"Batch 3"
box = pane.div(datapath='test3')
box.button('Start', fire='.start_test')
box.dataRpc('dummy', 'test_3_batch', _fired='^.start_test')
def rpc_test_1_batch(self, ):
t = time.time()
# thermo_lines = [{'title':'Clients',_class=}]
thermo_lines = 'clients,invoices,rows'
thermo_lines = None
self.btc.batch_create(title='testbatch',
thermo_lines=thermo_lines, note='This is a test batch_1 %i' % int(random.random() * 100))
clients = int(random.random() * cli_max)
self.btc.thermo_line_add(line='clients', maximum=clients)
try:
for client in range(1, clients + 1):
stopped = self.btc.thermo_line_update(line='clients',
maximum=clients, message='client %i/%i' % (client, clients),
progress=client)
invoices = int(random.random() * invoice_max)
self.btc.thermo_line_add(line='invoices', maximum=invoices)
for invoice in range(1, invoices + 1):
stopped = self.btc.thermo_line_update(line='invoices',
maximum=invoices,
message='invoice %i/%i' % (invoice, invoices),
progress=invoice)
rows = int(random.random() * row_max)
self.btc.thermo_line_add(line='rows', maximum=rows)
for row in range(1, rows + 1):
stopped = self.btc.thermo_line_update(line='rows',
maximum=rows, message='row %i/%i' % (row, rows),
progress=row)
time.sleep(sleep_time)
self.btc.thermo_line_del(line='rows')
self.btc.thermo_line_del(line='invoices')
self.btc.thermo_line_del(line='clients')
except self.btc.exception_stopped:
self.btc.batch_aborted()
except Exception, e:
self.btc.batch_error(error=str(e))
self.btc.batch_complete(result='Execution completed', result_attr=dict(url='http://www.apple.com'))
def rpc_test_2_batch(self):
t = time.time()
thermo_lines = 'clients,invoices,rows'
thermo_lines = None
self.btc.batch_create(title='testbatch',
thermo_lines=thermo_lines, note='This is a test batch_2 %i' % int(random.random() * 100))
try:
clients = int(random.random() * cli_max)
for client in self.client_provider(clients):
invoices = int(random.random() * invoice_max)
for invoice in self.invoice_provider(invoices):
rows = int(random.random() * row_max)
for row in self.row_provider(rows):
time.sleep(sleep_time)
except self.btc.exception_stopped:
self.btc.batch_aborted()
except Exception, e:
self.btc.batch_error(error=str(e))
self.btc.batch_complete(result='Execution completed', result_attr=dict(url='http://www.apple.com'))
def client_provider(self, clients):
self.btc.thermo_line_add(line='clients', maximum=clients)
for client in range(1, clients + 1):
self.btc.thermo_line_update(line='clients',
maximum=clients, message='client %i/%i' % (client, clients), progress=client)
yield client
self.btc.thermo_line_del(line='invoices')
def invoice_provider(self, invoices):
self.btc.thermo_line_add(line='invoices', maximum=invoices)
for invoice in range(1, invoices + 1):
self.btc.thermo_line_update(line='invoices',
maximum=invoices, message='invoice %i/%i' % (invoice, invoices),
progress=invoice)
yield invoice
self.btc.thermo_line_del(line='invoices')
def row_provider(self, rows):
self.btc.thermo_line_add(line='rows', maximum=rows)
for row in range(1, rows + 1):
self.btc.thermo_line_update(line='rows',
maximum=rows, message='row %i/%i' % (row, rows), progress=row)
yield row
self.btc.thermo_line_del(line='rows')
def rpc_test_3_batch(self):
t = time.time()
btc = self.btc
self.btc.batch_create(title='testbatch', note='This is a test batch_3 %i' % int(random.random() * 100))
def clients_cb():
return range(int(random.random() * cli_max))
def invoices_cb(client=None):
return range(int(random.random() * invoice_max))
def rows_cb(invoice=None):
return range(int(random.random() * row_max))
try:
for client in btc.thermo_wrapper(clients_cb, 'clients'):
for invoice in btc.thermo_wrapper(invoices_cb, 'invoices', client=client):
for row in btc.thermo_wrapper(rows_cb, 'rows', invoice=invoice):
time.sleep(sleep_time)
except self.btc.exception_stopped:
self.btc.batch_aborted()
except Exception, e:
self.btc.batch_error(error=str(e))
self.btc.batch_complete(result='Execution completed', result_attr=dict(url='http://www.apple.com'))
|
poppogbr/genropy
|
packages/test15/webpages/components/thermo.py
|
Python
|
lgpl-2.1
| 6,614
|
# -*- coding: utf-8 -*-
"""
===============================================================================
module __Physics__: Base class for mananging pore-scale Physics properties
===============================================================================
"""
from OpenPNM.Base import logging
from OpenPNM.Network import GenericNetwork
from OpenPNM.Phases import GenericPhase
import OpenPNM.Physics.models
import scipy as sp
logger = logging.getLogger(__name__)
class GenericPhysics(OpenPNM.Base.Core):
r"""
Generic class to generate Physics objects
Parameters
----------
network : OpenPNM Network object
The network to which this Physics should be attached
phase : OpenPNM Phase object
The Phase object to which this Physics applies
geometry : OpenPNM Geometry object
The Geometry object that defines the pores/throats where this Physics
should be applied. If this argument is supplied, then pores and
throats cannot be specified.
pores and/or throats : array_like
The list of pores and throats where this physics applies. If either are
left blank this will apply the physics nowhere. The locations can be
change after instantiation using ``set_locations()``. If pores and
throats are supplied, than a geometry cannot be specified.
name : str, optional
A unique string name to identify the Physics object, typically same as
instance name but can be anything. If left blank, and name will be
generated that include the class name and a random string.
"""
def __init__(self, network=None, phase=None, geometry=None,
pores=[], throats=[], **kwargs):
super().__init__(**kwargs)
logger.name = self.name
# Associate with Network
if network is None:
self._net = GenericNetwork()
else:
self._net = network # Attach network to self
self._net._physics.append(self) # Register self with network
# Associate with Phase
if phase is None:
self._phases.append(GenericPhase())
else:
phase._physics.append(self) # Register self with phase
self._phases.append(phase) # Register phase with self
if geometry is not None:
if (sp.size(pores) > 0) or (sp.size(throats) > 0):
raise Exception('Cannot specify a Geometry AND pores or throats')
pores = self._net.toindices(self._net['pore.' + geometry.name])
throats = self._net.toindices(self._net['throat.' + geometry.name])
# Initialize a label dictionary in the associated phase and network
self._phases[0]['pore.'+self.name] = False
self._phases[0]['throat.'+self.name] = False
self._net['pore.'+self.name] = False
self._net['throat.'+self.name] = False
try:
self.set_locations(pores=pores, throats=throats)
except:
self.controller.purge_object(self)
raise Exception('Provided locations are in use, instantiation cancelled')
def __getitem__(self, key):
element = key.split('.')[0]
# Convert self.name into 'all'
if key.split('.')[-1] == self.name:
key = element + '.all'
if key in self.keys(): # Look for data on self...
return super(GenericPhysics, self).__getitem__(key)
else: # ...Then check Network
return self._phases[0][key][self._phases[0][element + '.' + self.name]]
def set_locations(self, pores=[], throats=[], mode='add'):
r"""
Set the pore and throat locations of the Physics object
Parameters
----------
pores and throats : array_like
The list of pores and/or throats where the object should be applied.
mode : string
Indicates whether list of pores or throats is to be added or removed
from the object. Options are 'add' (default) or 'remove'.
"""
if sp.size(pores) > 0:
pores = sp.array(pores, ndmin=1)
self._set_locations(element='pore', locations=pores, mode=mode)
if sp.size(throats) > 0:
throats = sp.array(throats, ndmin=1)
self._set_locations(element='throat', locations=throats, mode=mode)
|
amdouglas/OpenPNM
|
OpenPNM/Physics/__GenericPhysics__.py
|
Python
|
mit
| 4,361
|
#encoding:utf-8
import codecs
import numpy as np
def build_topic_index_hasmap(topic_keys_file, has_head = False,index = False):
tk_read = codecs.open(topic_keys_file, 'r', 'utf-8')
line = tk_read.readline()
tk_list = line.strip().split('\t')
t_map = {}
ind = 0
for tk in tk_list:
t_map[tk] = ind
ind += int(index)
print('load %s finished' % topic_keys_file)
return t_map
|
Guhaifudeng/zhihukankan
|
topic_util.py
|
Python
|
apache-2.0
| 419
|
#!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import amulet
import re
import unittest
class TestDeploy(unittest.TestCase):
"""
Smoke test for Apache Bigtop Spark.
"""
@classmethod
def setUpClass(cls):
cls.d = amulet.Deployment(series='xenial')
cls.d.add('spark')
cls.d.setup(timeout=1800)
cls.d.sentry.wait_for_messages({'spark': re.compile('ready')},
timeout=1800)
cls.spark = cls.d.sentry['spark'][0]
def test_spark(self):
"""
Validate Spark by running the smoke-test action.
"""
uuid = self.spark.run_action('smoke-test')
result = self.d.action_fetch(uuid, full_output=True)
# action status=completed on success
if (result['status'] != "completed"):
self.fail('Spark smoke-test failed: %s' % result)
if __name__ == '__main__':
unittest.main()
|
apache/bigtop
|
bigtop-packages/src/charm/spark/layer-spark/tests/02-smoke-test.py
|
Python
|
apache-2.0
| 1,686
|
'''
Created by auto_sdk on 2012.10.16
'''
from top.api.base import RestApi
class FenxiaoProductImageUploadRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.image = None
self.pic_path = None
self.position = None
self.product_id = None
self.properties = None
def getapiname(self):
return 'taobao.fenxiao.product.image.upload'
def getMultipartParas(self):
return ['image']
|
colaftc/webtool
|
top/api/rest/FenxiaoProductImageUploadRequest.py
|
Python
|
mit
| 473
|
#!/usr/bin/python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests to cover Logger."""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
import logging
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..'))
import unittest
from adspygoogle.common import Utils
from tests.adspygoogle.adwords import HTTP_PROXY
from tests.adspygoogle.adwords import SERVER_V201109
from tests.adspygoogle.adwords import TEST_VERSION_V201109
from tests.adspygoogle.adwords import VERSION_V201109
from tests.adspygoogle.adwords import client
class LoggerTestV201109(unittest.TestCase):
"""Unittest suite for Logger using v201109."""
SERVER = SERVER_V201109
VERSION = VERSION_V201109
TMP_LOG = os.path.join('..', '..', '..', 'logs', 'logger_unittest.log')
DEBUG_MSG1 = 'Message before call to an API method.'
DEBUG_MSG2 = 'Message after call to an API method.'
client.debug = False
def setUp(self):
"""Prepare unittest."""
print self.id()
def testUpperStackLogging(self):
"""Tests whether we can define logger at client level and log before and
after the API request is made."""
logger = logging.getLogger('LoggerTest')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler(self.__class__.TMP_LOG)
fh.setLevel(logging.DEBUG)
logger.addHandler(fh)
# Clean up temporary log file.
Utils.PurgeLog(self.__class__.TMP_LOG)
logger.debug(self.__class__.DEBUG_MSG1)
info_service = client.GetInfoService(
self.__class__.SERVER, self.__class__.VERSION, HTTP_PROXY)
selector = {
'apiUsageType': 'TOTAL_USAGE_API_UNITS_PER_MONTH'
}
info_service.Get(selector)
logger.debug(self.__class__.DEBUG_MSG2)
data = Utils.ReadFile(self.__class__.TMP_LOG)
self.assertEqual(data.find(self.__class__.DEBUG_MSG1), 0)
self.assertEqual(data.find(self.__class__.DEBUG_MSG2),
len(self.__class__.DEBUG_MSG1) + 1)
# Clean up and remove temporary log file.
Utils.PurgeLog(self.__class__.TMP_LOG)
os.remove(self.__class__.TMP_LOG)
def makeTestSuiteV201109():
"""Set up test suite using v201109.
Returns:
TestSuite test suite using v201109.
"""
suite = unittest.TestSuite()
suite.addTests(unittest.makeSuite(LoggerTestV201109))
return suite
if __name__ == '__main__':
suites = []
if TEST_VERSION_V201109:
suites.append(makeTestSuiteV201109())
if suites:
alltests = unittest.TestSuite(suites)
unittest.main(defaultTest='alltests')
|
nearlyfreeapps/python-googleadwords
|
tests/adspygoogle/adwords/adwords_logger_unittest.py
|
Python
|
apache-2.0
| 3,059
|
def extractNanoDesuLightNovelTranslations(item):
"""
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if 'Ore to Kawazu-san no Isekai Hourouki' in item['tags']:
return buildReleaseMessageWithType(item, 'Ore to Kawazu-san no Isekai Hourouki', vol, chp, frag=frag, postfix=postfix)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractNanoDesuLightNovelTranslations.py
|
Python
|
bsd-3-clause
| 412
|
#
# Copyright 2013 Red Hat, Inc.
# Copyright(c) FUJITSU Limited 2007.
#
# Cloning a virtual machine module.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
# MA 02110-1301 USA.
import logging
import re
import os
import urlgrabber.progress as progress
import libvirt
from virtinst import Guest
from virtinst import VirtualNetworkInterface
from virtinst import VirtualDisk
from virtinst import StorageVolume
from virtinst import util
class Cloner(object):
# Reasons why we don't default to cloning.
CLONE_POLICY_NO_READONLY = 1
CLONE_POLICY_NO_SHAREABLE = 2
CLONE_POLICY_NO_EMPTYMEDIA = 3
def __init__(self, conn):
self.conn = conn
# original guest name or uuid
self._original_guest = None
self.original_dom = None
self._original_disks = []
self._original_xml = None
self._guest = None
# clone guest
self._clone_name = None
self._clone_disks = []
self._clone_macs = []
self._clone_uuid = None
self._clone_sparse = True
self._clone_xml = None
self._force_target = []
self._skip_target = []
self._preserve = True
self._clone_running = False
self._replace = False
# Default clone policy for back compat: don't clone readonly,
# shareable, or empty disks
self._clone_policy = [self.CLONE_POLICY_NO_READONLY,
self.CLONE_POLICY_NO_SHAREABLE,
self.CLONE_POLICY_NO_EMPTYMEDIA]
# Generate a random UUID at the start
self.clone_uuid = util.generate_uuid(conn)
# Getter/Setter methods
def get_original_guest(self):
return self._original_guest
def set_original_guest(self, original_guest):
if self._lookup_vm(original_guest):
self._original_guest = original_guest
original_guest = property(get_original_guest, set_original_guest,
doc="Original guest name.")
def set_original_xml(self, val):
if type(val) is not str:
raise ValueError(_("Original xml must be a string."))
self._original_xml = val
self._original_guest = Guest(self.conn,
parsexml=self._original_xml).name
def get_original_xml(self):
return self._original_xml
original_xml = property(get_original_xml, set_original_xml,
doc="XML of the original guest.")
def get_clone_name(self):
return self._clone_name
def set_clone_name(self, name):
try:
Guest.validate_name(self.conn, name,
check_collision=not self.replace,
validate=False)
except ValueError, e:
raise ValueError(_("Invalid name for new guest: %s") % e)
self._clone_name = name
clone_name = property(get_clone_name, set_clone_name,
doc="Name to use for the new guest clone.")
def set_clone_uuid(self, uuid):
try:
util.validate_uuid(uuid)
except ValueError, e:
raise ValueError(_("Invalid uuid for new guest: %s") % e)
if util.vm_uuid_collision(self.conn, uuid):
raise ValueError(_("UUID '%s' is in use by another guest.") %
uuid)
self._clone_uuid = uuid
def get_clone_uuid(self):
return self._clone_uuid
clone_uuid = property(get_clone_uuid, set_clone_uuid,
doc="UUID to use for the new guest clone")
def set_clone_paths(self, paths):
disklist = []
for path in util.listify(paths):
try:
device = VirtualDisk.DEVICE_DISK
if not path:
device = VirtualDisk.DEVICE_CDROM
disk = VirtualDisk(self.conn)
disk.path = path
disk.device = device
if path and not self.preserve_dest_disks:
# We fake storage creation params for now, but we will
# update it later. Just use any clone_path to make sure
# validation doesn't trip up
clone_path = "/foo/bar"
disk.set_create_storage(fake=True, clone_path=clone_path)
disk.validate()
disklist.append(disk)
except Exception, e:
logging.debug("Error setting clone path.", exc_info=True)
raise ValueError(_("Could not use path '%s' for cloning: %s") %
(path, str(e)))
self._clone_disks = disklist
def get_clone_paths(self):
return [d.path for d in self.clone_disks]
clone_paths = property(get_clone_paths, set_clone_paths,
doc="Paths to use for the new disk locations.")
def get_clone_disks(self):
return self._clone_disks
clone_disks = property(get_clone_disks,
doc="VirtualDisk instances for the new"
" disk paths")
def set_clone_macs(self, mac):
maclist = util.listify(mac)
for m in maclist:
msg = VirtualNetworkInterface.is_conflict_net(self.conn, m)[1]
if msg:
raise RuntimeError(msg)
self._clone_macs = maclist
def get_clone_macs(self):
return self._clone_macs
clone_macs = property(get_clone_macs, set_clone_macs,
doc="MAC address for the new guest clone.")
def get_original_disks(self):
return self._original_disks
original_disks = property(get_original_disks,
doc="VirtualDisk instances of the "
"original disks being cloned.")
def get_clone_xml(self):
return self._clone_xml
def set_clone_xml(self, clone_xml):
self._clone_xml = clone_xml
clone_xml = property(get_clone_xml, set_clone_xml,
doc="Generated XML for the guest clone.")
def get_clone_sparse(self):
return self._clone_sparse
def set_clone_sparse(self, flg):
self._clone_sparse = flg
clone_sparse = property(get_clone_sparse, set_clone_sparse,
doc="Whether to attempt sparse allocation during "
"cloning.")
def get_preserve(self):
return self._preserve
def set_preserve(self, flg):
self._preserve = flg
preserve = property(get_preserve, set_preserve,
doc="If true, preserve ALL original disk devices.")
def get_preserve_dest_disks(self):
return not self.preserve
preserve_dest_disks = property(get_preserve_dest_disks,
doc="It true, preserve ALL disk devices for the "
"NEW guest. This means no storage cloning. "
"This is a convenience access for "
"(not Cloner.preserve)")
def set_force_target(self, dev):
if type(dev) is list:
self._force_target = dev[:]
else:
self._force_target.append(dev)
def get_force_target(self):
return self._force_target
force_target = property(get_force_target, set_force_target,
doc="List of disk targets that we force cloning "
"despite Cloner's recommendation.")
def set_skip_target(self, dev):
if type(dev) is list:
self._skip_target = dev[:]
else:
self._skip_target.append(dev)
def get_skip_target(self):
return self._skip_target
skip_target = property(get_skip_target, set_skip_target,
doc="List of disk targets that we skip cloning "
"despite Cloner's recommendation. This "
"takes precedence over force_target.")
def set_clone_policy(self, policy_list):
if type(policy_list) != list:
raise ValueError(_("Cloning policy must be a list of rules."))
self._clone_policy = policy_list
def get_clone_policy(self):
return self._clone_policy
clone_policy = property(get_clone_policy, set_clone_policy,
doc="List of policy rules for determining which "
"vm disks to clone. See CLONE_POLICY_*")
def get_clone_running(self):
return self._clone_running
def set_clone_running(self, val):
self._clone_running = bool(val)
clone_running = property(get_clone_running, set_clone_running,
doc="Allow cloning a running VM. If enabled, "
"domain state is not checked before "
"cloning.")
def _get_replace(self):
return self._replace
def _set_replace(self, val):
self._replace = bool(val)
replace = property(_get_replace, _set_replace,
doc="If enabled, don't check for clone name collision, "
"simply undefine any conflicting guest.")
# Functional methods
def setup_original(self):
"""
Validate and setup all parameters needed for the original (cloned) VM
"""
logging.debug("Validating original guest parameters")
if self.original_guest is None and self.original_xml is None:
raise RuntimeError(_("Original guest name or xml is required."))
if self.original_guest is not None and not self.original_xml:
self.original_dom = self._lookup_vm(self.original_guest)
self.original_xml = self.original_dom.XMLDesc(0)
logging.debug("Original XML:\n%s", self.original_xml)
self._guest = Guest(self.conn, parsexml=self.original_xml)
self._guest.id = None
self._guest.replace = self.replace
# Pull clonable storage info from the original xml
self._original_disks = self._get_original_disks_info()
logging.debug("Original paths: %s",
[d.path for d in self.original_disks])
logging.debug("Original sizes: %s",
[d.get_size() for d in self.original_disks])
# If domain has devices to clone, it must be 'off' or 'paused'
if (not self.clone_running and
(self.original_dom and len(self.original_disks) != 0)):
status = self.original_dom.info()[0]
if status not in [libvirt.VIR_DOMAIN_SHUTOFF,
libvirt.VIR_DOMAIN_PAUSED]:
raise RuntimeError(_("Domain with devices to clone must be "
"paused or shutoff."))
def _setup_disk_clone_destination(self, orig_disk, clone_disk):
"""
Helper that validates the new path location
"""
if self.preserve_dest_disks:
return
if clone_disk.get_vol_object():
# XXX We could always do this with vol upload?
# Special case: non remote cloning of a guest using
# managed block devices: fall back to local cloning if
# we have permissions to do so. This validation check
# caused a few bug reports in a short period of time,
# so must be a common case.
if (self.conn.is_remote() or
clone_disk.type != clone_disk.TYPE_BLOCK or
not orig_disk.path or
not os.access(orig_disk.path, os.R_OK) or
not clone_disk.path or
not os.access(clone_disk.path, os.W_OK)):
raise RuntimeError(
_("Clone onto existing storage volume is not "
"currently supported: '%s'") % clone_disk.path)
# Sync 'size' between the two
size = orig_disk.get_size()
vol_install = None
clone_path = None
# Setup proper cloning inputs for the new virtual disks
if (orig_disk.get_vol_object() and
clone_disk.get_vol_install()):
clone_vol_install = clone_disk.get_vol_install()
# Source and dest are managed. If they share the same pool,
# replace vol_install with a CloneVolume instance, otherwise
# simply set input_vol on the dest vol_install
if (clone_vol_install.pool.name() ==
orig_disk.get_vol_object().storagePoolLookupByVolume().name()):
vol_install = StorageVolume(self.conn)
vol_install.input_vol = orig_disk.get_vol_object()
vol_install.sync_input_vol()
vol_install.name = clone_vol_install.name
else:
# Cross pool cloning
# Deliberately don't sync input_vol params here
clone_vol_install.input_vol = orig_disk.get_vol_object()
vol_install = clone_vol_install
else:
clone_path = orig_disk.path
clone_disk.set_create_storage(
size=size, vol_install=vol_install, clone_path=clone_path)
clone_disk.validate()
def setup_clone(self):
"""
Validate and set up all parameters needed for the new (clone) VM
"""
logging.debug("Validating clone parameters.")
self._clone_xml = self.original_xml
if len(self.clone_disks) < len(self.original_disks):
raise ValueError(_("More disks to clone than new paths specified. "
"(%(passed)d specified, %(need)d needed") %
{"passed" : len(self.clone_disks),
"need" : len(self.original_disks)})
logging.debug("Clone paths: %s", [d.path for d in self.clone_disks])
self._guest.name = self._clone_name
self._guest.uuid = self._clone_uuid
self._clone_macs.reverse()
for dev in self._guest.get_devices("graphics"):
if dev.port and dev.port != -1:
logging.warn(_("Setting the graphics device port to autoport, "
"in order to avoid conflicting."))
dev.port = -1
for iface in self._guest.get_devices("interface"):
iface.target_dev = None
if self._clone_macs:
mac = self._clone_macs.pop()
else:
mac = VirtualNetworkInterface.generate_mac(self.conn)
iface.macaddr = mac
# Changing storage XML
for i in range(len(self._original_disks)):
orig_disk = self._original_disks[i]
clone_disk = self._clone_disks[i]
for disk in self._guest.get_devices("disk"):
if disk.target == orig_disk.target:
xmldisk = disk
self._setup_disk_clone_destination(orig_disk, clone_disk)
# Change the XML
xmldisk.path = None
xmldisk.type = clone_disk.type
xmldisk.driver_name = orig_disk.driver_name
xmldisk.driver_type = orig_disk.driver_type
xmldisk.path = clone_disk.path
# Save altered clone xml
self._clone_xml = self._guest.get_xml_config()
logging.debug("Clone guest xml is\n%s", self._clone_xml)
def setup(self):
"""
Helper function that wraps setup_original and setup_clone, with
additional debug logging.
"""
self.setup_original()
self.setup_clone()
def start_duplicate(self, meter=None):
"""
Actually perform the duplication: cloning disks if needed and defining
the new clone xml.
"""
logging.debug("Starting duplicate.")
if not meter:
meter = progress.BaseMeter()
dom = None
try:
# Replace orig VM if required
Guest.check_vm_collision(self.conn, self.clone_name,
do_remove=self.replace)
# Define domain early to catch any xml errors before duping storage
dom = self.conn.defineXML(self.clone_xml)
if self.preserve:
for dst_dev in self.clone_disks:
dst_dev.setup(meter=meter)
except Exception, e:
logging.debug("Duplicate failed: %s", str(e))
if dom:
dom.undefine()
raise
logging.debug("Duplicating finished.")
def generate_clone_disk_path(self, origpath, newname=None):
origname = self.original_guest
newname = newname or self.clone_name
path = origpath
suffix = ""
# Try to split the suffix off the existing disk name. Ex.
# foobar.img -> foobar-clone.img
#
# If the suffix is greater than 7 characters, assume it isn't
# a file extension and is part of the disk name, at which point
# just stick '-clone' on the end.
if origpath.count(".") and len(origpath.rsplit(".", 1)[1]) <= 7:
path, suffix = origpath.rsplit(".", 1)
suffix = "." + suffix
dirname = os.path.dirname(path)
basename = os.path.basename(path)
clonebase = basename + "-clone"
if origname and basename == origname:
clonebase = newname
clonebase = os.path.join(dirname, clonebase)
return util.generate_name(
clonebase,
lambda p: VirtualDisk.path_exists(self.conn, p),
suffix,
lib_collision=False)
def generate_clone_name(self):
# If the orig name is "foo-clone", we don't want the clone to be
# "foo-clone-clone", we want "foo-clone1"
basename = self.original_guest
match = re.search("-clone[1-9]*$", basename)
start_num = 1
if match:
num_match = re.search("[1-9]+$", match.group())
if num_match:
start_num = int(str(num_match.group()))
basename = basename.replace(match.group(), "")
basename = basename + "-clone"
return util.generate_name(basename,
self.conn.lookupByName,
sep="", start_num=start_num)
############################
# Private helper functions #
############################
# Parse disk paths that need to be cloned from the original guest's xml
# Return a list of VirtualDisk instances pointing to the original
# storage
def _get_original_disks_info(self):
clonelist = []
retdisks = []
for disk in self._guest.get_devices("disk"):
if self._do_we_clone_device(disk):
clonelist.append(disk)
continue
# Set up virtual disk to encapsulate all relevant path info
for disk in clonelist:
validate = not self.preserve_dest_disks
try:
device = VirtualDisk.DEVICE_DISK
if not disk.path:
# Tell VirtualDisk we are a cdrom to allow empty media
device = VirtualDisk.DEVICE_CDROM
newd = VirtualDisk(self.conn)
newd.path = disk.path
newd.device = device
newd.driver_name = disk.driver_name
newd.driver_type = disk.driver_type
newd.target = disk.target
if validate:
newd.set_create_storage(fake=True)
if newd.creating_storage() and disk.path is not None:
raise ValueError("Disk path '%s' does not exist." %
newd.path)
except Exception, e:
logging.debug("Exception creating clone disk objects",
exc_info=True)
raise ValueError(_("Could not determine original disk "
"information: %s" % str(e)))
retdisks.append(newd)
return retdisks
# Pull disk #i from the original guest xml, return it's source path
# if it should be cloned
# Cloning policy based on 'clone_policy', 'force_target' and 'skip_target'
def _do_we_clone_device(self, disk):
if not disk.target:
raise ValueError("XML has no 'dev' attribute in disk target")
if disk.target in self.skip_target:
return False
if disk.target in self.force_target:
return True
# No media path
if (not disk.path and
self.CLONE_POLICY_NO_EMPTYMEDIA in self.clone_policy):
return False
# Readonly disks
if (disk.read_only and
self.CLONE_POLICY_NO_READONLY in self.clone_policy):
return False
# Shareable disks
if (disk.shareable and
self.CLONE_POLICY_NO_SHAREABLE in self.clone_policy):
return False
return True
# Simple wrapper for checking a vm exists and returning the domain
def _lookup_vm(self, name):
try:
return self.conn.lookupByName(name)
except libvirt.libvirtError:
raise ValueError(_("Domain '%s' was not found.") % str(name))
|
aurex-linux/virt-manager
|
virtinst/cloner.py
|
Python
|
gpl-2.0
| 21,998
|
"""Class for printing reports on profiled python code."""
# Written by James Roskind
# Based on prior profile module by Sjoerd Mullender...
# which was hacked somewhat by: Guido van Rossum
# Copyright Disney Enterprises, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
import sys
import os
import time
import marshal
import re
from functools import cmp_to_key
__all__ = ["Stats"]
class Stats:
"""This class is used for creating reports from data generated by the
Profile class. It is a "friend" of that class, and imports data either
by direct access to members of Profile class, or by reading in a dictionary
that was emitted (via marshal) from the Profile class.
The big change from the previous Profiler (in terms of raw functionality)
is that an "add()" method has been provided to combine Stats from
several distinct profile runs. Both the constructor and the add()
method now take arbitrarily many file names as arguments.
All the print methods now take an argument that indicates how many lines
to print. If the arg is a floating point number between 0 and 1.0, then
it is taken as a decimal percentage of the available lines to be printed
(e.g., .1 means print 10% of all available lines). If it is an integer,
it is taken to mean the number of lines of data that you wish to have
printed.
The sort_stats() method now processes some additional options (i.e., in
addition to the old -1, 0, 1, or 2). It takes an arbitrary number of
quoted strings to select the sort order. For example sort_stats('time',
'name') sorts on the major key of 'internal function time', and on the
minor key of 'the name of the function'. Look at the two tables in
sort_stats() and get_sort_arg_defs(self) for more examples.
All methods return self, so you can string together commands like:
Stats('foo', 'goo').strip_dirs().sort_stats('calls').\
print_stats(5).print_callers(5)
"""
def __init__(self, *args, **kwds):
# I can't figure out how to explictly specify a stream keyword arg
# with *args:
# def __init__(self, *args, stream=sys.stdout): ...
# so I use **kwds and sqauwk if something unexpected is passed in.
self.stream = sys.stdout
if "stream" in kwds:
self.stream = kwds["stream"]
del kwds["stream"]
if kwds:
keys = kwds.keys()
keys.sort()
extras = ", ".join(["%s=%s" % (k, kwds[k]) for k in keys])
raise ValueError, "unrecognized keyword args: %s" % extras
if not len(args):
arg = None
else:
arg = args[0]
args = args[1:]
self.init(arg)
self.add(*args)
def init(self, arg):
self.all_callees = None # calc only if needed
self.files = []
self.fcn_list = None
self.total_tt = 0
self.total_calls = 0
self.prim_calls = 0
self.max_name_len = 0
self.top_level = {}
self.stats = {}
self.sort_arg_dict = {}
self.load_stats(arg)
trouble = 1
try:
self.get_top_level_stats()
trouble = 0
finally:
if trouble:
print >> self.stream, "Invalid timing data",
if self.files: print >> self.stream, self.files[-1],
print >> self.stream
def load_stats(self, arg):
if not arg: self.stats = {}
elif isinstance(arg, basestring):
f = open(arg, 'rb')
self.stats = marshal.load(f)
f.close()
try:
file_stats = os.stat(arg)
arg = time.ctime(file_stats.st_mtime) + " " + arg
except: # in case this is not unix
pass
self.files = [ arg ]
elif hasattr(arg, 'create_stats'):
arg.create_stats()
self.stats = arg.stats
arg.stats = {}
if not self.stats:
raise TypeError, "Cannot create or construct a %r object from '%r''" % (
self.__class__, arg)
return
def get_top_level_stats(self):
for func, (cc, nc, tt, ct, callers) in self.stats.items():
self.total_calls += nc
self.prim_calls += cc
self.total_tt += tt
if ("jprofile", 0, "profiler") in callers:
self.top_level[func] = None
if len(func_std_string(func)) > self.max_name_len:
self.max_name_len = len(func_std_string(func))
def add(self, *arg_list):
if not arg_list: return self
if len(arg_list) > 1: self.add(*arg_list[1:])
other = arg_list[0]
if type(self) != type(other) or self.__class__ != other.__class__:
other = Stats(other)
self.files += other.files
self.total_calls += other.total_calls
self.prim_calls += other.prim_calls
self.total_tt += other.total_tt
for func in other.top_level:
self.top_level[func] = None
if self.max_name_len < other.max_name_len:
self.max_name_len = other.max_name_len
self.fcn_list = None
for func, stat in other.stats.iteritems():
if func in self.stats:
old_func_stat = self.stats[func]
else:
old_func_stat = (0, 0, 0, 0, {},)
self.stats[func] = add_func_stats(old_func_stat, stat)
return self
def dump_stats(self, filename):
"""Write the profile data to a file we know how to load back."""
f = file(filename, 'wb')
try:
marshal.dump(self.stats, f)
finally:
f.close()
# list the tuple indices and directions for sorting,
# along with some printable description
sort_arg_dict_default = {
"calls" : (((1,-1), ), "call count"),
"cumulative": (((3,-1), ), "cumulative time"),
"file" : (((4, 1), ), "file name"),
"line" : (((5, 1), ), "line number"),
"module" : (((4, 1), ), "file name"),
"name" : (((6, 1), ), "function name"),
"nfl" : (((6, 1),(4, 1),(5, 1),), "name/file/line"),
"pcalls" : (((0,-1), ), "call count"),
"stdname" : (((7, 1), ), "standard name"),
"time" : (((2,-1), ), "internal time"),
}
def get_sort_arg_defs(self):
"""Expand all abbreviations that are unique."""
if not self.sort_arg_dict:
self.sort_arg_dict = dict = {}
bad_list = {}
for word, tup in self.sort_arg_dict_default.iteritems():
fragment = word
while fragment:
if not fragment:
break
if fragment in dict:
bad_list[fragment] = 0
break
dict[fragment] = tup
fragment = fragment[:-1]
for word in bad_list:
del dict[word]
return self.sort_arg_dict
def sort_stats(self, *field):
if not field:
self.fcn_list = 0
return self
if len(field) == 1 and isinstance(field[0], (int, long)):
# Be compatible with old profiler
field = [ {-1: "stdname",
0: "calls",
1: "time",
2: "cumulative"}[field[0]] ]
sort_arg_defs = self.get_sort_arg_defs()
sort_tuple = ()
self.sort_type = ""
connector = ""
for word in field:
sort_tuple = sort_tuple + sort_arg_defs[word][0]
self.sort_type += connector + sort_arg_defs[word][1]
connector = ", "
stats_list = []
for func, (cc, nc, tt, ct, callers) in self.stats.iteritems():
stats_list.append((cc, nc, tt, ct) + func +
(func_std_string(func), func))
stats_list.sort(key=cmp_to_key(TupleComp(sort_tuple).compare))
self.fcn_list = fcn_list = []
for tuple in stats_list:
fcn_list.append(tuple[-1])
return self
def reverse_order(self):
if self.fcn_list:
self.fcn_list.reverse()
return self
def strip_dirs(self):
oldstats = self.stats
self.stats = newstats = {}
max_name_len = 0
for func, (cc, nc, tt, ct, callers) in oldstats.iteritems():
newfunc = func_strip_path(func)
if len(func_std_string(newfunc)) > max_name_len:
max_name_len = len(func_std_string(newfunc))
newcallers = {}
for func2, caller in callers.iteritems():
newcallers[func_strip_path(func2)] = caller
if newfunc in newstats:
newstats[newfunc] = add_func_stats(
newstats[newfunc],
(cc, nc, tt, ct, newcallers))
else:
newstats[newfunc] = (cc, nc, tt, ct, newcallers)
old_top = self.top_level
self.top_level = new_top = {}
for func in old_top:
new_top[func_strip_path(func)] = None
self.max_name_len = max_name_len
self.fcn_list = None
self.all_callees = None
return self
def calc_callees(self):
if self.all_callees: return
self.all_callees = all_callees = {}
for func, (cc, nc, tt, ct, callers) in self.stats.iteritems():
if not func in all_callees:
all_callees[func] = {}
for func2, caller in callers.iteritems():
if not func2 in all_callees:
all_callees[func2] = {}
all_callees[func2][func] = caller
return
#******************************************************************
# The following functions support actual printing of reports
#******************************************************************
# Optional "amount" is either a line count, or a percentage of lines.
def eval_print_amount(self, sel, list, msg):
new_list = list
if isinstance(sel, basestring):
try:
rex = re.compile(sel)
except re.error:
msg += " <Invalid regular expression %r>\n" % sel
return new_list, msg
new_list = []
for func in list:
if rex.search(func_std_string(func)):
new_list.append(func)
else:
count = len(list)
if isinstance(sel, float) and 0.0 <= sel < 1.0:
count = int(count * sel + .5)
new_list = list[:count]
elif isinstance(sel, (int, long)) and 0 <= sel < count:
count = sel
new_list = list[:count]
if len(list) != len(new_list):
msg += " List reduced from %r to %r due to restriction <%r>\n" % (
len(list), len(new_list), sel)
return new_list, msg
def get_print_list(self, sel_list):
width = self.max_name_len
if self.fcn_list:
stat_list = self.fcn_list[:]
msg = " Ordered by: " + self.sort_type + '\n'
else:
stat_list = self.stats.keys()
msg = " Random listing order was used\n"
for selection in sel_list:
stat_list, msg = self.eval_print_amount(selection, stat_list, msg)
count = len(stat_list)
if not stat_list:
return 0, stat_list
print >> self.stream, msg
if count < len(self.stats):
width = 0
for func in stat_list:
if len(func_std_string(func)) > width:
width = len(func_std_string(func))
return width+2, stat_list
def print_stats(self, *amount):
for filename in self.files:
print >> self.stream, filename
if self.files: print >> self.stream
indent = ' ' * 8
for func in self.top_level:
print >> self.stream, indent, func_get_function_name(func)
print >> self.stream, indent, self.total_calls, "function calls",
if self.total_calls != self.prim_calls:
print >> self.stream, "(%d primitive calls)" % self.prim_calls,
print >> self.stream, "in %.3f seconds" % self.total_tt
print >> self.stream
width, list = self.get_print_list(amount)
if list:
self.print_title()
for func in list:
self.print_line(func)
print >> self.stream
print >> self.stream
return self
def print_callees(self, *amount):
width, list = self.get_print_list(amount)
if list:
self.calc_callees()
self.print_call_heading(width, "called...")
for func in list:
if func in self.all_callees:
self.print_call_line(width, func, self.all_callees[func])
else:
self.print_call_line(width, func, {})
print >> self.stream
print >> self.stream
return self
def print_callers(self, *amount):
width, list = self.get_print_list(amount)
if list:
self.print_call_heading(width, "was called by...")
for func in list:
cc, nc, tt, ct, callers = self.stats[func]
self.print_call_line(width, func, callers, "<-")
print >> self.stream
print >> self.stream
return self
def print_call_heading(self, name_size, column_title):
print >> self.stream, "Function ".ljust(name_size) + column_title
# print sub-header only if we have new-style callers
subheader = False
for cc, nc, tt, ct, callers in self.stats.itervalues():
if callers:
value = callers.itervalues().next()
subheader = isinstance(value, tuple)
break
if subheader:
print >> self.stream, " "*name_size + " ncalls tottime cumtime"
def print_call_line(self, name_size, source, call_dict, arrow="->"):
print >> self.stream, func_std_string(source).ljust(name_size) + arrow,
if not call_dict:
print >> self.stream
return
clist = call_dict.keys()
clist.sort()
indent = ""
for func in clist:
name = func_std_string(func)
value = call_dict[func]
if isinstance(value, tuple):
nc, cc, tt, ct = value
if nc != cc:
substats = '%d/%d' % (nc, cc)
else:
substats = '%d' % (nc,)
substats = '%s %s %s %s' % (substats.rjust(7+2*len(indent)),
f8(tt), f8(ct), name)
left_width = name_size + 1
else:
substats = '%s(%r) %s' % (name, value, f8(self.stats[func][3]))
left_width = name_size + 3
print >> self.stream, indent*left_width + substats
indent = " "
def print_title(self):
print >> self.stream, ' ncalls tottime percall cumtime percall',
print >> self.stream, 'filename:lineno(function)'
def print_line(self, func): # hack : should print percentages
cc, nc, tt, ct, callers = self.stats[func]
c = str(nc)
if nc != cc:
c = c + '/' + str(cc)
print >> self.stream, c.rjust(9),
print >> self.stream, f8(tt),
if nc == 0:
print >> self.stream, ' '*8,
else:
print >> self.stream, f8(float(tt)/nc),
print >> self.stream, f8(ct),
if cc == 0:
print >> self.stream, ' '*8,
else:
print >> self.stream, f8(float(ct)/cc),
print >> self.stream, func_std_string(func)
class TupleComp:
"""This class provides a generic function for comparing any two tuples.
Each instance records a list of tuple-indices (from most significant
to least significant), and sort direction (ascending or decending) for
each tuple-index. The compare functions can then be used as the function
argument to the system sort() function when a list of tuples need to be
sorted in the instances order."""
def __init__(self, comp_select_list):
self.comp_select_list = comp_select_list
def compare (self, left, right):
for index, direction in self.comp_select_list:
l = left[index]
r = right[index]
if l < r:
return -direction
if l > r:
return direction
return 0
#**************************************************************************
# func_name is a triple (file:string, line:int, name:string)
def func_strip_path(func_name):
filename, line, name = func_name
return os.path.basename(filename), line, name
def func_get_function_name(func):
return func[2]
def func_std_string(func_name): # match what old profile produced
if func_name[:2] == ('~', 0):
# special case for built-in functions
name = func_name[2]
if name.startswith('<') and name.endswith('>'):
return '{%s}' % name[1:-1]
else:
return name
else:
return "%s:%d(%s)" % func_name
#**************************************************************************
# The following functions combine statists for pairs functions.
# The bulk of the processing involves correctly handling "call" lists,
# such as callers and callees.
#**************************************************************************
def add_func_stats(target, source):
"""Add together all the stats for two profile entries."""
cc, nc, tt, ct, callers = source
t_cc, t_nc, t_tt, t_ct, t_callers = target
return (cc+t_cc, nc+t_nc, tt+t_tt, ct+t_ct,
add_callers(t_callers, callers))
def add_callers(target, source):
"""Combine two caller lists in a single list."""
new_callers = {}
for func, caller in target.iteritems():
new_callers[func] = caller
for func, caller in source.iteritems():
if func in new_callers:
if isinstance(caller, tuple):
# format used by cProfile
new_callers[func] = tuple([i[0] + i[1] for i in
zip(caller, new_callers[func])])
else:
# format used by profile
new_callers[func] += caller
else:
new_callers[func] = caller
return new_callers
def count_calls(callers):
"""Sum the caller statistics to get total number of calls received."""
nc = 0
for calls in callers.itervalues():
nc += calls
return nc
#**************************************************************************
# The following functions support printing of reports
#**************************************************************************
def f8(x):
return "%8.3f" % x
#**************************************************************************
# Statistics browser added by ESR, April 2001
#**************************************************************************
if __name__ == '__main__':
import cmd
try:
import readline
except ImportError:
pass
class ProfileBrowser(cmd.Cmd):
def __init__(self, profile=None):
cmd.Cmd.__init__(self)
self.prompt = "% "
self.stats = None
self.stream = sys.stdout
if profile is not None:
self.do_read(profile)
def generic(self, fn, line):
args = line.split()
processed = []
for term in args:
try:
processed.append(int(term))
continue
except ValueError:
pass
try:
frac = float(term)
if frac > 1 or frac < 0:
print >> self.stream, "Fraction argument must be in [0, 1]"
continue
processed.append(frac)
continue
except ValueError:
pass
processed.append(term)
if self.stats:
getattr(self.stats, fn)(*processed)
else:
print >> self.stream, "No statistics object is loaded."
return 0
def generic_help(self):
print >> self.stream, "Arguments may be:"
print >> self.stream, "* An integer maximum number of entries to print."
print >> self.stream, "* A decimal fractional number between 0 and 1, controlling"
print >> self.stream, " what fraction of selected entries to print."
print >> self.stream, "* A regular expression; only entries with function names"
print >> self.stream, " that match it are printed."
def do_add(self, line):
if self.stats:
self.stats.add(line)
else:
print >> self.stream, "No statistics object is loaded."
return 0
def help_add(self):
print >> self.stream, "Add profile info from given file to current statistics object."
def do_callees(self, line):
return self.generic('print_callees', line)
def help_callees(self):
print >> self.stream, "Print callees statistics from the current stat object."
self.generic_help()
def do_callers(self, line):
return self.generic('print_callers', line)
def help_callers(self):
print >> self.stream, "Print callers statistics from the current stat object."
self.generic_help()
def do_EOF(self, line):
print >> self.stream, ""
return 1
def help_EOF(self):
print >> self.stream, "Leave the profile brower."
def do_quit(self, line):
return 1
def help_quit(self):
print >> self.stream, "Leave the profile brower."
def do_read(self, line):
if line:
try:
self.stats = Stats(line)
except IOError, args:
print >> self.stream, args[1]
return
except Exception as err:
print >> self.stream, err.__class__.__name__ + ':', err
return
self.prompt = line + "% "
elif len(self.prompt) > 2:
line = self.prompt[:-2]
self.do_read(line)
else:
print >> self.stream, "No statistics object is current -- cannot reload."
return 0
def help_read(self):
print >> self.stream, "Read in profile data from a specified file."
print >> self.stream, "Without argument, reload the current file."
def do_reverse(self, line):
if self.stats:
self.stats.reverse_order()
else:
print >> self.stream, "No statistics object is loaded."
return 0
def help_reverse(self):
print >> self.stream, "Reverse the sort order of the profiling report."
def do_sort(self, line):
if not self.stats:
print >> self.stream, "No statistics object is loaded."
return
abbrevs = self.stats.get_sort_arg_defs()
if line and all((x in abbrevs) for x in line.split()):
self.stats.sort_stats(*line.split())
else:
print >> self.stream, "Valid sort keys (unique prefixes are accepted):"
for (key, value) in Stats.sort_arg_dict_default.iteritems():
print >> self.stream, "%s -- %s" % (key, value[1])
return 0
def help_sort(self):
print >> self.stream, "Sort profile data according to specified keys."
print >> self.stream, "(Typing `sort' without arguments lists valid keys.)"
def complete_sort(self, text, *args):
return [a for a in Stats.sort_arg_dict_default if a.startswith(text)]
def do_stats(self, line):
return self.generic('print_stats', line)
def help_stats(self):
print >> self.stream, "Print statistics from the current stat object."
self.generic_help()
def do_strip(self, line):
if self.stats:
self.stats.strip_dirs()
else:
print >> self.stream, "No statistics object is loaded."
def help_strip(self):
print >> self.stream, "Strip leading path information from filenames in the report."
def help_help(self):
print >> self.stream, "Show help for a given command."
def postcmd(self, stop, line):
if stop:
return stop
return None
import sys
if len(sys.argv) > 1:
initprofile = sys.argv[1]
else:
initprofile = None
try:
browser = ProfileBrowser(initprofile)
print >> browser.stream, "Welcome to the profile statistics browser."
browser.cmdloop()
print >> browser.stream, "Goodbye."
except KeyboardInterrupt:
pass
# That's all, folks.
|
alanjw/GreenOpenERP-Win-X86
|
python/Lib/pstats.py
|
Python
|
agpl-3.0
| 27,126
|
from django.views.generic import CreateView
from django.core.urlresolvers import reverse_lazy
from django import forms
from django.contrib.auth import forms as auth
from django.utils.translation import ugettext_lazy as _
from .models import User
from project.settings import LANGUAGE_CODE
from bootstrap3_datetime.widgets import DateTimePicker
# Create your views here.
class UserRegistrationForm(auth.UserCreationForm):
birth_date = forms.DateField(
widget=DateTimePicker(options={'format': 'DD.MM.YYYY',
'showTodayButton': True,
'locale': LANGUAGE_CODE}),
label=_('Дата рождения')
)
def __init__(self, *args, **kargs):
super(UserRegistrationForm, self).__init__(*args, **kargs)
class Meta:
model = User
fields = ('first_name', 'last_name', 'patronymic',
'email', 'passport_id', 'birth_date', 'address')
class RegisterView(CreateView):
form_class = UserRegistrationForm
template_name = 'users/register.html'
success_url = reverse_lazy('index')
|
Flowneee/django-e-referendum
|
project/users/views.py
|
Python
|
mit
| 1,131
|
import io
def main():
list_songName = []
for line in f:
songName = line.split('|')[1]
#neu da co bai hat giong voi ten thi khong them vao nua
if songName not in list_songName:
list_songName.append(songName)
f2.write(u'' + unicode(songName, encoding = 'utf-8') + '\n')
else:
f3.write(u'' + unicode(songName, encoding = 'utf-8') + '\n')
if __name__ == '__main__':
f = open('kara_list1_1.txt','r')
f2 = io.open('kara_list1_1_songName_Moi.txt','w', encoding = 'utf-8')
f3 = io.open('kara_list1_1_songName_trungTen_Moi.txt','w', encoding = 'utf-8')
main()
f.close()
f2.close()
f3.close()
|
hoaibang07/Webscrap
|
karaokeyoutube/tachtenfilekara.py
|
Python
|
gpl-2.0
| 688
|
# -*- coding: utf-8 -*-
'''
This file is part of the Python Mapper package, an open source tool
for exploration, analysis and visualization of data.
Copyright 2011–2014 by the authors:
Daniel Müllner, http://danifold.net
Aravindakshan Babu, anounceofpractice@hotmail.com
Python Mapper is distributed under the GPLv3 license. See the project home page
http://danifold.net/mapper
for more information.
'''
'''
Find an optimal path through a fence of vertical intervals. "Optimal" means the
shortest path from left to right with minimal vertical movement.
Run this as a standalone Python script to see the algorithm in action on random
data. This needs matplotlib for the display.
Otherwise, import the module and use 'shortest_path' on your own data.
'''
from collections import deque
from operator import lt, gt
import numpy as np
__all__ = ['shortest_path']
def shortest_path(LB, UB, verbose=False):
'''
Find an optimal path through a fence of vertical intervals. "Optimal" means
the shortest path from left to right with minimal vertical movement.
Intervals are equally spaced for simplicity, but that's not necessary
for the algorithm to work.
Arguments:
LB, UB : lower and upper bounds for the path. Must be sequences of numbers
with equal length.
Time and space complexity: \Theta(len(LB))
'''
pathlength = len(LB)
assert len(UB) == pathlength, \
"Upper and lower bounds must have the same length."
x0 = 0
y0 = _startpos(LB, UB)
if isinstance(y0, tuple):
if verbose:
print('Shortest path is not unique.\nAny horizontal line '
'between {} and {} is optimal.'.format(*y0))
return np.tile(np.mean(y0), pathlength)
Y = np.empty(pathlength) # result
# Convex hulls of the lower and upper bounds. The elements are triples
# (x, y, slope from the last point to the current point).
convex_hull_lower = deque([(x0, y0, None)])
convex_hull_upper = deque([(x0, y0, None)])
# In each step, introduce one more vertical interval [lb, ub] at the next
# position x and extend the path as far as its nodes can be unambiguously
# determined at this stage.
for x in range(1, pathlength):
lb = LB[x]
ub = UB[x]
assert lb <= ub, \
"Lower bounds must be less than or equal to the upper bounds."
# Update the convex hulls with the lower and upper bounds at x.
_update_hull(convex_hull_lower, x, lb, lt)
_update_hull(convex_hull_upper, x, ub, gt)
# When then current upper bound forces the path to touch
# convex_hull_lower, extend the path.
#
# Complexity argument: for every iteration of the while loop, the path
# Y makes progress, so there are globally at most "pathlength"
# iterations in the four while loops below.
while len(convex_hull_lower) > 1 and \
ub - y0 <= (x - x0) * convex_hull_lower[1][2]:
x0, y0 = _anchor(x0, y0, convex_hull_lower, convex_hull_upper,
x, Y, ub)
# When then current lower bound forces the path to touch
# convex_hull_upper, extend the path
while len(convex_hull_upper) > 1 and \
lb - y0 >= (x - x0) * convex_hull_upper[1][2]:
x0, y0 = _anchor(x0, y0, convex_hull_upper, convex_hull_lower,
x, Y, lb)
# Add the last path segments when it touches convex_hull_lower.
while len(convex_hull_lower) > 1 and convex_hull_lower[1][2] >= 0:
x0, y0 = _anchor(x0, y0, convex_hull_lower, convex_hull_upper,
x, Y, ub)
# Add the last path segments when it touches convex_hull_upper.
while len(convex_hull_upper) > 1 and convex_hull_upper[1][2] <= 0:
x0, y0 = _anchor(x0, y0, convex_hull_upper, convex_hull_lower,
x, Y, lb)
# The final path segments are horizontal.
Y[x0:] = y0
return Y
def _update_hull(hull, x, b, cmp):
'''
Update the convex hull with the new bound b
The parameter cmp is either the "less than" or "greater than" operator.
The amortized time complexity is constant since for every extra iteration
of the "while" loop, the array "hull" is shortened by one.
'''
while True:
xc, yc, sl = hull[-1]
if len(hull) == 1 or cmp(b - yc, (x - xc) * sl): break
hull.pop()
hull.append((x, b, (b - yc) / float(x - xc)))
def _anchor(x0, y0, hullA, hullB, x, Y, b):
'''
Extend the path in Y so that it touches hullA at the second point hullA[1].
Update hullA and hullB so that both start at (x1, y1),
'''
x1, y1, slope = hullA[1]
Y[x0:x1] = y0 + slope * np.arange(x1-x0)
hullA.popleft()
hullB.clear()
hullB.append((x1, y1, None))
if x != x1:
hullB.append((x, b, (b - y1) / float(x - x1)))
return x1, y1
def _startpos(LB, UB):
'''
Find the vertical start position of the optimal path. If the path is not
unique, then the optimal paths are straight horizontal lines. In this
case, return a tuple with the minimal and maximal y-coordinate for these
straight lines.
'''
lb = -float('inf')
ub = float('inf')
for lbb, ubb in zip(LB, UB):
if ubb <= lb:
return lb
if lbb >= ub:
return ub
lb = max(lb, lbb)
ub = min(ub, ubb)
if ub == lb:
return ub
return (lb, ub)
if __name__ == '__main__':
'''
Generate random data points and demonstrate the shortest path algorithm
visually. The 'matplotlib' package is needed for displaying the function
graphs.
'''
import random
import math
seed = random.randint(0, 1e10)
print('Random seed: {}'.format(seed))
random.seed(seed)
# Random data: superposition of K sine waves, sampled at N points.
N = 50
K = 5
fmin = .5
fmax = 10
slope = random.uniform(-.3, .3)
freq = [random.uniform(fmin, fmax) for k in range(K)]
phase = [random.uniform(0, 2 * math.pi) for k in range(K)]
amplitude = [random.uniform(0, 1 / freq[k]) for k in range(K)]
error = [random.uniform(0, .4 / freq[k]) for k in range(K)]
errorphase = [random.uniform(0, 2 * math.pi) for k in range(K)]
LB = []
UB = []
for x in range(N):
v = 0
dv = 0
for k in range(K):
v += math.sin(x * freq[k] * 2 * math.pi / N + phase[k]) \
* amplitude[k] + slope * x / N
dv += abs(math.sin(x * freq[k] * 2 * math.pi / N + errorphase[k]) \
* error[k])
LB.append(v - dv)
UB.append(v + dv)
Y = shortest_path(LB, UB)
import matplotlib.pyplot as plt
plt.plot(range(len(LB)), LB, 'b-+')
plt.plot(range(len(UB)), UB, 'r-+')
plt.plot(range(len(Y)), Y, 'k-+')
plt.show()
|
timothy-mcroy/mapper
|
tools/shortest_path.py
|
Python
|
gpl-2.0
| 6,939
|
# Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from UM.Math.Vector import Vector
from UM.Math.Float import Float
class Plane:
"""Plane representation using normal and distance."""
def __init__(self, normal = Vector(), distance = 0.0):
super().__init__()
self._normal = normal
self._distance = distance
@property
def normal(self):
return self._normal
@property
def distance(self):
return self._distance
def intersectsRay(self, ray):
w = ray.origin - (self._normal * self._distance)
nDotR = self._normal.dot(ray.direction)
nDotW = -self._normal.dot(w)
if Float.fuzzyCompare(nDotR, 0.0):
return False
t = nDotW / nDotR
if t < 0:
return False
return t
def __repr__(self):
return "Plane(normal = {0}, distance = {1})".format(self._normal, self._distance)
|
Ultimaker/Uranium
|
UM/Math/Plane.py
|
Python
|
lgpl-3.0
| 975
|
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'Module for testing the update machine command.'
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
from eventstest import EventsTestMixin
class TestUpdateMachine(EventsTestMixin, TestBrokerCommand):
def test_1000_update_ut3c1n3(self):
self.noouttest(["update", "machine", "--machine", "ut3c1n3",
"--slot", "10", "--serial", "USN99C5553",
"--uuid", "097a2277-840d-4bd5-8327-cf133aa3c9d3"])
def test_1005_show_ut3c1n3(self):
command = "show machine --machine ut3c1n3"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Machine: ut3c1n3", command)
self.matchoutput(out, "Model Type: blade", command)
self.matchoutput(out, "Chassis: ut3c1.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 10", command)
self.matchoutput(out, "Vendor: ibm Model: hs21-8853", command)
self.matchoutput(out, "Cpu: e5-2660 x 2", command)
self.matchoutput(out, "Memory: 8192 MB", command)
self.matchoutput(out, "Serial: USN99C5553", command)
self.matchoutput(out, "UUID: 097a2277-840d-4bd5-8327-cf133aa3c9d3",
command)
def test_1005_cat_ut3c1n3(self):
command = "cat --machine ut3c1n3"
out = self.commandtest(command.split(" "))
self.matchoutput(out, '"location" = "ut.ny.na";', command)
self.matchoutput(out, '"serialnumber" = "USN99C5553";', command)
self.matchoutput(out,
'include "hardware/machine/ibm/hs21-8853";',
command)
self.searchoutput(out,
r'"ram" = list\(\s*'
r'create\("hardware/ram/generic",\s*'
r'"size", 8192\*MB\s*\)\s*\);',
command)
self.searchoutput(out,
r'"cpu" = list\(\s*'
r'create\("hardware/cpu/intel/e5-2660"\),\s*'
r'create\("hardware/cpu/intel/e5-2660"\s*\)\s*\);',
command)
self.matchoutput(out, '"chassis" = "ut3c1.aqd-unittest.ms.com";', command)
self.matchoutput(out, '"slot" = 10;', command)
self.matchoutput(out,
'"uuid" = "097a2277-840d-4bd5-8327-cf133aa3c9d3";',
command)
def test_1006_clear_uuid(self):
command = ["update_machine", "--machine", "ut3c1n3", "--clear_uuid"]
self.noouttest(command)
def test_1007_verify_no_uuid(self):
command = ["show_machine", "--machine", "ut3c1n3"]
out = self.commandtest(command)
self.matchclean(out, "UUID", command)
command = ["cat", "--machine", "ut3c1n3"]
out = self.commandtest(command)
self.matchclean(out, "uuid", command)
def test_1010_update_ut3c5n10(self):
self.noouttest(["update", "machine",
"--hostname", "unittest02.one-nyp.ms.com",
"--chassis", "ut3c5.aqd-unittest.ms.com", "--slot", "20",
"--comments", "New machine comments"])
def test_1015_search_slot(self):
command = "search machine --slot 20 --fullinfo"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Machine: ut3c5n10", command)
self.matchoutput(out, "Model Type: blade", command)
def test_1015_search_chassis_slot(self):
command = "search machine --chassis ut3c5.aqd-unittest.ms.com --slot 20 --fullinfo"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Machine: ut3c5n10", command)
self.matchoutput(out, "Model Type: blade", command)
def test_1015_show_ut3c5n10(self):
command = "show machine --machine ut3c5n10"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Machine: ut3c5n10", command)
self.matchoutput(out, "Model Type: blade", command)
self.matchoutput(out, "Chassis: ut3c5.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 20", command)
self.matchoutput(out, "Vendor: ibm Model: hs21-8853", command)
self.matchoutput(out, "Cpu: e5-2660 x 2", command)
self.matchoutput(out, "Memory: 8192 MB", command)
self.matchoutput(out, "Serial: 99C5553", command)
self.searchoutput(out, "^ Comments: New machine comments", command)
def test_1015_cat_ut3c5n10(self):
command = "cat --machine ut3c5n10"
out = self.commandtest(command.split(" "))
self.matchoutput(out, '"location" = "ut.ny.na";', command)
self.matchoutput(out, '"serialnumber" = "99C5553";', command)
self.matchoutput(out,
'include "hardware/machine/ibm/hs21-8853";',
command)
self.searchoutput(out,
r'"ram" = list\(\s*'
r'create\("hardware/ram/generic",\s*'
r'"size", 8192\*MB\s*\)\s*\);',
command)
self.searchoutput(out,
r'"cpu" = list\(\s*'
r'create\("hardware/cpu/intel/e5-2660"\),\s*'
r'create\("hardware/cpu/intel/e5-2660"\s*\)\s*\);',
command)
self.matchoutput(out, '"chassis" = "ut3c5.aqd-unittest.ms.com";', command)
self.matchoutput(out, '"slot" = 20;', command)
def test_1016_clear_comments(self):
self.noouttest(["update_machine", "--machine", "ut3c5n10", "--comments", ""])
def test_1017_verify_comments(self):
command = ["show_machine", "--machine", "ut3c5n10"]
out = self.commandtest(command)
self.searchclean(out, "^ Comments", command)
def test_1020_update_ut3c1n4_serial(self):
self.noouttest(["update", "machine", "--machine", "ut3c1n4",
"--serial", "USNKPDZ407"])
def test_1021_update_ut3c1n4_cpu(self):
self.noouttest(["update", "machine", "--machine", "ut3c1n4",
"--cpuname", "e5-2697-v3"])
def test_1022_update_ut3c1n4_rack(self):
# Changing the rack will change the location of the plenary, so we
# can test if the host profile gets written
self.noouttest(["update", "machine", "--machine", "ut3c1n4",
"--rack", "ut4"])
def test_1025_show_ut3c1n4(self):
command = "show machine --machine ut3c1n4"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Machine: ut3c1n4", command)
self.matchoutput(out, "Model Type: blade", command)
self.matchoutput(out, "Rack: ut4", command)
self.matchoutput(out, "Vendor: ibm Model: hs21-8853", command)
self.matchoutput(out, "Cpu: e5-2697-v3 x 2", command)
self.matchoutput(out, "Memory: 8192 MB", command)
self.matchoutput(out, "Serial: USNKPDZ407", command)
def test_1025_cat_ut3c1n4(self):
command = "cat --machine ut3c1n4"
out = self.commandtest(command.split(" "))
self.matchoutput(out, '"location" = "ut.ny.na";', command)
self.matchoutput(out, '"serialnumber" = "USNKPDZ407";', command)
self.matchoutput(out,
'include "hardware/machine/ibm/hs21-8853";',
command)
self.searchoutput(out,
r'"ram" = list\(\s*'
r'create\("hardware/ram/generic",\s*'
r'"size", 8192\*MB\s*\)\s*\);',
command)
self.searchoutput(out,
r'"cpu" = list\(\s*'
r'create\("hardware/cpu/intel/e5-2697-v3"\),\s*'
r'create\("hardware/cpu/intel/e5-2697-v3"\s*\)\s*\);',
command)
def test_1025_cat_unittest01(self):
# There should be no host template present after the update_machine
# command
command = ["cat", "--hostname", "unittest01.one-nyp.ms.com"]
out = self.notfoundtest(command)
self.matchoutput(out, "not found", command)
def test_1030_clearchassis(self):
command = ["update", "machine", "--machine", "ut9s03p1",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "1"] + self.valid_just_tcm
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p1",
"--clearchassis"] + self.valid_just_tcm
self.noouttest(command)
def test_1031_verify_clearchassis(self):
command = ["show", "machine", "--machine", "ut9s03p1"]
out = self.commandtest(command)
self.matchoutput(out, "Machine: ut9s03p1", command)
self.matchoutput(out, "Model Type: blade", command)
self.matchclean(out, "Chassis: ", command)
def test_1032_clearchassis_plus_new(self):
command = ["update", "machine", "--machine", "ut9s03p2",
"--chassis", "ut9c5.aqd-unittest.ms.com", "--slot", "1"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p2",
"--clearchassis",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "2"]
self.noouttest(command)
def test_1033_verify_clearchassis_plus_new(self):
command = ["show", "machine", "--machine", "ut9s03p2"]
out = self.commandtest(command)
self.matchoutput(out, "Machine: ut9s03p2", command)
self.matchoutput(out, "Model Type: blade", command)
self.matchoutput(out, "Chassis: ut9c1.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 2", command)
def test_1034_true_chassis_update(self):
command = ["update", "machine", "--machine", "ut9s03p3",
"--chassis", "ut9c5.aqd-unittest.ms.com", "--slot", "2"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p3",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "3"]
self.noouttest(command)
def test_1035_verify_true_chassis_update(self):
command = ["show", "machine", "--machine", "ut9s03p3"]
out = self.commandtest(command)
self.matchoutput(out, "Machine: ut9s03p3", command)
self.matchoutput(out, "Model Type: blade", command)
self.matchoutput(out, "Chassis: ut9c1.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 3", command)
def test_1040_simple_chassis_update(self):
command = ["update", "machine", "--machine", "ut9s03p4",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "4"]
self.noouttest(command)
def test_1041_verify_simple_chassis_update(self):
command = ["show", "machine", "--machine", "ut9s03p4"]
out = self.commandtest(command)
self.matchoutput(out, "Machine: ut9s03p4", command)
self.matchoutput(out, "Model Type: blade", command)
self.matchoutput(out, "Chassis: ut9c1.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 4", command)
def test_1042_simple_chassis_update2(self):
command = ["update", "machine", "--machine", "ut9s03p5",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "5"]
self.noouttest(command)
def test_1043_verify_simple_chassis_update2(self):
command = ["show", "machine", "--machine", "ut9s03p5"]
out = self.commandtest(command)
self.matchoutput(out, "Machine: ut9s03p5", command)
self.matchoutput(out, "Model Type: blade", command)
self.matchoutput(out, "Chassis: ut9c1.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 5", command)
def test_1044_simple_chassis_update3(self):
command = ["update", "machine", "--machine", "ut9s03p6",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "6"]
self.noouttest(command)
def test_1045_verify_simple_chassis_update3(self):
command = ["show", "machine", "--machine", "ut9s03p6"]
out = self.commandtest(command)
self.matchoutput(out, "Machine: ut9s03p6", command)
self.matchoutput(out, "Model Type: blade", command)
self.matchoutput(out, "Chassis: ut9c1.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 6", command)
def test_1050_different_rack(self):
command = ["update", "machine", "--machine", "ut9s03p9",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "9"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p9",
"--rack", "ut8"]
self.noouttest(command)
def test_1055_verify_different_rack(self):
command = ["show", "machine", "--machine", "ut9s03p9"]
out = self.commandtest(command)
self.matchoutput(out, "Machine: ut9s03p9", command)
self.matchoutput(out, "Model Type: blade", command)
self.matchclean(out, "Chassis: ", command)
self.matchclean(out, "Slot: ", command)
self.matchoutput(out, "Model Type: blade", command)
def test_1060_reuse_slot(self):
command = ["update", "machine", "--machine", "ut9s03p10",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "10"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p10",
"--clearchassis"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p10",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "10"]
self.noouttest(command)
def test_1065_verify_reuse_slot(self):
command = ["show", "machine", "--machine", "ut9s03p10"]
out = self.commandtest(command)
self.matchoutput(out, "Machine: ut9s03p10", command)
self.matchoutput(out, "Model Type: blade", command)
self.matchoutput(out, "Chassis: ut9c1.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 10", command)
def test_1070_taken_slot(self):
command = ["update", "machine", "--machine", "ut9s03p11",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "11"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p12",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "11"]
out = self.badrequesttest(command)
self.matchoutput(out, "Chassis ut9c1.aqd-unittest.ms.com slot 11 "
"already has machine ut9s03p11", command)
def test_1075_verify_taken_slot(self):
command = ["show", "machine", "--machine", "ut9s03p11"]
out = self.commandtest(command)
self.matchoutput(out, "Machine: ut9s03p11", command)
self.matchoutput(out, "Model Type: blade", command)
self.matchoutput(out, "Chassis: ut9c1.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 11", command)
command = ["show", "machine", "--machine", "ut9s03p12"]
out = self.commandtest(command)
self.matchoutput(out, "Machine: ut9s03p12", command)
self.matchoutput(out, "Model Type: blade", command)
self.matchclean(out, "Chassis: ", command)
self.matchclean(out, "Slot: ", command)
def test_1080_multislot_clear(self):
command = ["update", "machine", "--machine", "ut9s03p13",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "13"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p13",
"--multislot",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "14"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p13",
"--clearchassis"]
self.noouttest(command)
def test_1085_verify_multislot_clear(self):
command = ["show", "machine", "--machine", "ut9s03p13"]
out = self.commandtest(command)
self.matchoutput(out, "Machine: ut9s03p13", command)
self.matchoutput(out, "Model Type: blade", command)
self.matchclean(out, "Chassis: ", command)
self.matchclean(out, "Slot: ", command)
def test_1090_multislot_add(self):
command = ["update", "machine", "--machine", "ut9s03p15",
"--multislot",
"--chassis", "ut9c2.aqd-unittest.ms.com", "--slot", "1"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p15",
"--multislot",
"--chassis", "ut9c2.aqd-unittest.ms.com", "--slot", "2"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p15",
"--multislot",
"--chassis", "ut9c2.aqd-unittest.ms.com", "--slot", "3"]
self.noouttest(command)
def test_1095_verify_multislot_add(self):
command = ["show", "machine", "--machine", "ut9s03p15"]
out = self.commandtest(command)
self.matchoutput(out, "Machine: ut9s03p15", command)
self.matchoutput(out, "Model Type: blade", command)
self.matchoutput(out, "Chassis: ut9c2.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 1", command)
self.matchoutput(out, "Slot: 2", command)
self.matchoutput(out, "Slot: 3", command)
def test_1100_multislot_update_fail(self):
command = ["update", "machine", "--machine", "ut9s03p19",
"--chassis", "ut9c2.aqd-unittest.ms.com", "--slot", "4"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p19",
"--multislot",
"--chassis", "ut9c2.aqd-unittest.ms.com", "--slot", "5"]
self.noouttest(command)
command = ["update", "machine", "--machine", "ut9s03p19",
"--chassis", "ut9c2.aqd-unittest.ms.com", "--slot", "6"]
out = self.badrequesttest(command)
self.matchoutput(out, "Use --multislot to support a machine in more "
"than one slot", command)
def test_1101_multislot_update_fail_multichassis(self):
command = ["update", "machine", "--machine", "ut9s03p19", "--multislot",
"--chassis", "ut9c1.aqd-unittest.ms.com", "--slot", "7"]
out = self.badrequesttest(command)
self.matchoutput(out, "Machine cannot be in multiple chassis. "
"Use --clearchassis to remove "
"current chassis slot information.", command)
def test_1105_verify_multislot_update_fail(self):
command = ["show", "machine", "--machine", "ut9s03p19"]
out = self.commandtest(command)
self.matchoutput(out, "Machine: ut9s03p19", command)
self.matchoutput(out, "Model Type: blade", command)
self.matchoutput(out, "Chassis: ut9c2.aqd-unittest.ms.com", command)
self.matchoutput(out, "Slot: 4", command)
self.matchoutput(out, "Slot: 5", command)
self.matchclean(out, "Slot: 6", command)
def test_1110_move_machine_with_vms(self):
old_path = ["machine", "americas", "ut", "ut3", "ut14s1p2"]
new_path = ["machine", "americas", "ut", "ut14", "ut14s1p2"]
self.check_plenary_exists(*old_path)
self.check_plenary_gone(*new_path)
self.noouttest(["update", "machine", "--machine", "ut14s1p2",
"--rack", "ut14"])
self.check_plenary_gone(*old_path)
self.check_plenary_exists(*new_path)
def test_1115_show_ut14s1p2(self):
command = ["show", "machine", "--machine", "ut14s1p2"]
out = self.commandtest(command)
self.matchoutput(out, "Rack: ut14", command)
def test_1115_check_vm_location(self):
for i in range(0, 3):
machine = "evm%d" % (i + 50)
command = ["show", "machine", "--machine", machine]
out = self.commandtest(command)
self.matchoutput(out, "Rack: ut14", command)
def test_1120_update_ut3s01p2(self):
self.noouttest(["update", "machine", "--machine", "ut3s01p2",
"--model", "hs21-8853", "--vendor", "ibm"])
def test_1125_show_ut3s01p2(self):
command = "show machine --machine ut3s01p2"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Machine: ut3s01p2", command)
self.matchoutput(out, "Model Type: blade", command)
def test_1130_verify_initial_state(self):
command = "cat --machine evm1"
out = self.commandtest(command.split(" "))
self.searchoutput(out,
r'"cards/nic/eth0" = '
r'create\("hardware/nic/utvirt/default",\s*'
r'"boot", true,\s*'
r'"hwaddr", "00:50:56:01:20:00"\s*\);',
command)
def test_1131_update_default_nic_model(self):
command = ["update_machine", "--machine=evm1", "--model=utlarge",
"--cpucount=2", "--memory=12288"]
self.noouttest(command)
def test_1132_cat_evm1(self):
command = "cat --machine evm1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, '"location" = "ut.ny.na";', command)
self.matchoutput(out,
'include "hardware/machine/utvendor/utlarge";',
command)
self.searchoutput(out,
r'"ram" = list\(\s*'
r'create\("hardware/ram/generic",\s*'
r'"size", 12288\*MB\s*\)\s*\);',
command)
self.searchoutput(out,
r'"cpu" = list\(\s*'
r'create\("hardware/cpu/intel/l5520"\),\s*'
r'create\("hardware/cpu/intel/l5520"\)\s*\);',
command)
# Updating the model of the machine changes the NIC model from
# utvirt/default to generic/generic_nic
self.searchoutput(out,
r'"cards/nic/eth0" = '
r'create\("hardware/nic/generic/generic_nic",\s*'
r'"boot", true,\s*'
r'"hwaddr", "00:50:56:01:20:00"\s*\);',
command)
def test_1132_show_evm1(self):
command = "show machine --machine evm1"
out = self.commandtest(command.split(" "))
self.matchoutput(out, "Machine: evm1", command)
self.matchoutput(out, "Model Type: virtual_machine", command)
self.matchoutput(out, "Hosted by: ESX Cluster utecl1", command)
self.matchoutput(out, "Building: ut", command)
self.matchoutput(out, "Vendor: utvendor Model: utlarge", command)
self.matchoutput(out, "Cpu: l5520 x 2", command)
self.matchoutput(out, "Memory: 12288 MB", command)
self.searchoutput(out,
r"Interface: eth0 00:50:56:01:20:00 \[boot, default_route\]\s*"
r"Type: public\s*"
r"Vendor: generic Model: generic_nic$",
command)
def test_1135_restore_status_quo(self):
command = ["update_machine", "--machine=evm1", "--model=utmedium",
"--cpucount=1", "--memory=8192"]
self.noouttest(command)
def test_2000_bad_cpu_vendor(self):
self.notfoundtest(["update", "machine", "--machine", "ut3c1n4",
"--cpuvendor", "no-such-vendor"])
def test_2000_bad_cpu_name(self):
self.notfoundtest(["update", "machine", "--machine", "ut3c1n4",
"--cpuname", "no-such-cpu"])
def test_2000_phys_to_cluster(self):
command = ["update_machine", "--machine=ut9s03p19", "--cluster=utecl1"]
out = self.badrequesttest(command)
self.matchoutput(out, "Cannot convert a physical machine to virtual.",
command)
def test_2000_steal_ip(self):
ip = self.net["unknown0"].usable[2]
command = ["update_machine", "--machine", "ut3c1n3", "--ip", ip]
out = self.badrequesttest(command)
self.matchoutput(out,
"IP address %s is already in use by public interface "
"eth0 of machine unittest00.one-nyp.ms.com" % ip,
command)
def test_2010_missing_slot(self):
command = ["update", "machine", "--machine", "ut9s03p7",
"--chassis", "ut9c1.aqd-unittest.ms.com"]
out = self.badrequesttest(command)
self.matchoutput(out, "Option --chassis requires --slot information",
command)
def test_2015_verify_missing_slot(self):
command = ["show", "machine", "--machine", "ut9s03p7"]
out = self.commandtest(command)
self.matchoutput(out, "Machine: ut9s03p7", command)
self.matchoutput(out, "Model Type: blade", command)
self.matchclean(out, "Chassis: ", command)
self.matchclean(out, "Slot: ", command)
def test_2020_missing_chassis(self):
command = ["update", "machine", "--machine", "ut9s03p8",
"--slot", "8"]
out = self.badrequesttest(command)
self.matchoutput(out, "Option --slot requires --chassis information",
command)
def test_2025_verify_missing_chassis(self):
command = ["show", "machine", "--machine", "ut9s03p8"]
out = self.commandtest(command)
self.matchoutput(out, "Machine: ut9s03p8", command)
self.matchoutput(out, "Model Type: blade", command)
self.matchclean(out, "Chassis: ", command)
self.matchclean(out, "Slot: ", command)
def test_2030_reject_machine_uri(self):
command = ["update", "machine", "--machine", "ut3c1n9",
"--uri", "file:///somepath/to/ovf"]
out = self.badrequesttest(command)
self.matchoutput(out, "URI can be specified only for virtual "
"machines and the model's type is blade",
command)
def test_2035_verify_reject_machine_uri(self):
command = ["show", "machine", "--machine", "ut3c1n9"]
out = self.commandtest(command)
self.matchclean(out, "URI:", command)
def test_2040_check_aurora_host_machine(self):
ip = self.net["tor_net_0"].usable[4]
command = ["show", "host", "--hostname", "test-aurora-default-os.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Primary Name: test-aurora-default-os.ms.com [{}]".format(ip), command)
def test_2045_update_aurora_host_machine(self):
new_ip = self.net["tor_net_0"].usable[7]
command = ["update", "machine", "--hostname", "test-aurora-default-os.ms.com", "--ip", new_ip] + self.valid_just_sn
self.noouttest(command)
def test_2050_verify_aurora_host_machine(self):
ip = self.net["tor_net_0"].usable[7]
command = ["show", "host", "--hostname", "test-aurora-default-os.ms.com"]
out = self.commandtest(command)
self.matchoutput(out, "Primary Name: test-aurora-default-os.ms.com [{}]".format(ip), command)
def test_2055_update_aurora_host_machine_back(self):
ip = self.net["tor_net_0"].usable[4]
command = ["update", "machine", "--hostname", "test-aurora-default-os.ms.com", "--ip", ip] + self.valid_just_sn
self.noouttest(command)
# These tests would be nice, but twisted just ignores the permission
# on the files since we're still the owner. Which is good, but means
# the recovery routines can't be easily tested.
# def testfailbrokenplenary(self):
# template = self.plenary_name("machine", "americas", "ut", "ut9",
# "ut9s03p20")
# os.chmod(template, 0000)
# command = ["update_machine", "--machine=ut9s03p20", "--serial=20"]
# out = self.badrequesttest(command)
# self.matchoutput(out, "FIXME", command)
# def testverifyfailbrokenplenary(self):
# # Fixing the previous breakage... not actually necessary for this test.
# template = self.plenary_name("machine", "americas", "ut", "ut9",
# "ut9s03p20")
# os.chmod(template, 0644)
# command = ["show_machine", "--machine=ut9s03p20"]
# out = self.commandtest(command)
# self.matchclean(out, "Serial", command)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUpdateMachine)
unittest.TextTestRunner(verbosity=2).run(suite)
|
quattor/aquilon
|
tests/broker/test_update_machine.py
|
Python
|
apache-2.0
| 29,626
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.com/license.html.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/.
from __future__ import with_statement
from datetime import datetime
from trac.admin import *
from trac.core import *
from trac.perm import PermissionSystem
from trac.resource import ResourceNotFound
from trac.ticket import model
from trac.util import getuser
from trac.util.datefmt import utc, parse_date, format_date, format_datetime, \
get_datetime_format_hint, user_time
from trac.util.text import print_table, printout, exception_to_unicode
from trac.util.translation import _, N_, gettext
from trac.web.chrome import Chrome, add_notice, add_warning
class TicketAdminPanel(Component):
implements(IAdminPanelProvider, IAdminCommandProvider)
abstract = True
_label = (N_('(Undefined)'), N_('(Undefined)'))
# i18n note: use gettext() whenever refering to the above as text labels,
# and don't use it whenever using them as field names (after
# a call to `.lower()`)
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'TICKET_ADMIN' in req.perm:
# in global scope show only products
# in local scope everything but products
parent = getattr(self.env, 'parent', None)
if (parent is None and self._type == 'products') or \
(parent and self._type != 'products'):
yield ('ticket', _('Ticket System'), self._type,
gettext(self._label[1]))
def render_admin_panel(self, req, cat, page, version):
req.perm.require('TICKET_ADMIN')
# Trap AssertionErrors and convert them to TracErrors
try:
return self._render_admin_panel(req, cat, page, version)
except AssertionError, e:
raise TracError(e)
def _save_config(config, req, log):
"""Try to save the config, and display either a success notice or a
failure warning.
"""
try:
config.save()
add_notice(req, _('Your changes have been saved.'))
except Exception, e:
log.error('Error writing to trac.ini: %s', exception_to_unicode(e))
add_warning(req, _('Error writing to trac.ini, make sure it is '
'writable by the web server. Your changes have not '
'been saved.'))
class ComponentAdminPanel(TicketAdminPanel):
_type = 'components'
_label = (N_('Component'), N_('Components'))
# TicketAdminPanel methods
def _render_admin_panel(self, req, cat, page, component):
# Detail view?
if component:
comp = model.Component(self.env, component)
if req.method == 'POST':
if req.args.get('save'):
comp.name = name = req.args.get('name')
comp.owner = req.args.get('owner')
comp.description = req.args.get('description')
try:
comp.update()
except self.env.db_exc.IntegrityError:
raise TracError(_('The component "%(name)s" already '
'exists.', name=name))
add_notice(req, _('Your changes have been saved.'))
req.redirect(req.href.admin(cat, page))
elif req.args.get('cancel'):
req.redirect(req.href.admin(cat, page))
Chrome(self.env).add_wiki_toolbars(req)
data = {'view': 'detail', 'component': comp}
else:
default = self.config.get('ticket', 'default_component')
if req.method == 'POST':
# Add Component
if req.args.get('add') and req.args.get('name'):
name = req.args.get('name')
try:
comp = model.Component(self.env, name=name)
except ResourceNotFound:
comp = model.Component(self.env)
comp.name = name
if req.args.get('owner'):
comp.owner = req.args.get('owner')
comp.insert()
add_notice(req, _('The component "%(name)s" has been '
'added.', name=name))
req.redirect(req.href.admin(cat, page))
else:
if comp.name is None:
raise TracError(_("Invalid component name."))
raise TracError(_("Component %(name)s already exists.",
name=name))
# Remove components
elif req.args.get('remove'):
sel = req.args.get('sel')
if not sel:
raise TracError(_('No component selected'))
if not isinstance(sel, list):
sel = [sel]
with self.env.db_transaction:
for name in sel:
model.Component(self.env, name).delete()
add_notice(req, _("The selected components have been "
"removed."))
req.redirect(req.href.admin(cat, page))
# Set default component
elif req.args.get('apply'):
name = req.args.get('default')
if name and name != default:
self.log.info("Setting default component to %s", name)
self.config.set('ticket', 'default_component', name)
_save_config(self.config, req, self.log)
req.redirect(req.href.admin(cat, page))
data = {'view': 'list',
'components': model.Component.select(self.env),
'default': default}
if self.config.getbool('ticket', 'restrict_owner'):
perm = PermissionSystem(self.env)
def valid_owner(username):
return perm.get_user_permissions(username).get('TICKET_MODIFY')
data['owners'] = [username for username, name, email
in self.env.get_known_users()
if valid_owner(username)]
data['owners'].insert(0, '')
data['owners'].sort()
else:
data['owners'] = None
return 'admin_components.html', data
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('component list', '',
'Show available components',
None, self._do_list)
yield ('component add', '<name> <owner>',
'Add a new component',
self._complete_add, self._do_add)
yield ('component rename', '<name> <newname>',
'Rename a component',
self._complete_remove_rename, self._do_rename)
yield ('component remove', '<name>',
'Remove/uninstall a component',
self._complete_remove_rename, self._do_remove)
yield ('component chown', '<name> <owner>',
'Change component ownership',
self._complete_chown, self._do_chown)
def get_component_list(self):
return [c.name for c in model.Component.select(self.env)]
def get_user_list(self):
return [username for username, in
self.env.db_query("SELECT DISTINCT username FROM permission")]
def _complete_add(self, args):
if len(args) == 2:
return self.get_user_list()
def _complete_remove_rename(self, args):
if len(args) == 1:
return self.get_component_list()
def _complete_chown(self, args):
if len(args) == 1:
return self.get_component_list()
elif len(args) == 2:
return self.get_user_list()
def _do_list(self):
print_table([(c.name, c.owner)
for c in model.Component.select(self.env)],
[_('Name'), _('Owner')])
def _do_add(self, name, owner):
component = model.Component(self.env)
component.name = name
component.owner = owner
component.insert()
def _do_rename(self, name, newname):
component = model.Component(self.env, name)
component.name = newname
component.update()
def _do_remove(self, name):
model.Component(self.env, name).delete()
def _do_chown(self, name, owner):
component = model.Component(self.env, name)
component.owner = owner
component.update()
class MilestoneAdminPanel(TicketAdminPanel):
_type = 'milestones'
_label = (N_('Milestone'), N_('Milestones'))
# IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'MILESTONE_VIEW' in req.perm:
return TicketAdminPanel.get_admin_panels(self, req)
return iter([])
# TicketAdminPanel methods
def _render_admin_panel(self, req, cat, page, milestone):
req.perm.require('MILESTONE_VIEW')
# Detail view?
if milestone:
mil = model.Milestone(self.env, milestone)
if req.method == 'POST':
if req.args.get('save'):
req.perm.require('MILESTONE_MODIFY')
mil.name = name = req.args.get('name')
mil.due = mil.completed = None
due = req.args.get('duedate', '')
if due:
mil.due = user_time(req, parse_date, due,
hint='datetime')
if req.args.get('completed', False):
completed = req.args.get('completeddate', '')
mil.completed = user_time(req, parse_date, completed,
hint='datetime')
if mil.completed > datetime.now(utc):
raise TracError(_('Completion date may not be in '
'the future'),
_('Invalid Completion Date'))
mil.description = req.args.get('description', '')
try:
mil.update()
except self.env.db_exc.IntegrityError:
raise TracError(_('The milestone "%(name)s" already '
'exists.', name=name))
add_notice(req, _('Your changes have been saved.'))
req.redirect(req.href.admin(cat, page))
elif req.args.get('cancel'):
req.redirect(req.href.admin(cat, page))
Chrome(self.env).add_wiki_toolbars(req)
data = {'view': 'detail', 'milestone': mil}
else:
default = self.config.get('ticket', 'default_milestone')
if req.method == 'POST':
# Add Milestone
if req.args.get('add') and req.args.get('name'):
req.perm.require('MILESTONE_CREATE')
name = req.args.get('name')
try:
mil = model.Milestone(self.env, name=name)
except ResourceNotFound:
mil = model.Milestone(self.env)
mil.name = name
if req.args.get('duedate'):
mil.due = user_time(req, parse_date,
req.args.get('duedate'),
hint='datetime')
mil.insert()
add_notice(req, _('The milestone "%(name)s" has been '
'added.', name=name))
req.redirect(req.href.admin(cat, page))
else:
if mil.name is None:
raise TracError(_('Invalid milestone name.'))
raise TracError(_("Milestone %(name)s already exists.",
name=name))
# Remove milestone
elif req.args.get('remove'):
req.perm.require('MILESTONE_DELETE')
sel = req.args.get('sel')
if not sel:
raise TracError(_('No milestone selected'))
if not isinstance(sel, list):
sel = [sel]
with self.env.db_transaction:
for name in sel:
mil = model.Milestone(self.env, name)
mil.delete(author=req.authname)
add_notice(req, _("The selected milestones have been "
"removed."))
req.redirect(req.href.admin(cat, page))
# Set default milestone
elif req.args.get('apply'):
name = req.args.get('default')
if name and name != default:
self.log.info("Setting default milestone to %s", name)
self.config.set('ticket', 'default_milestone', name)
_save_config(self.config, req, self.log)
req.redirect(req.href.admin(cat, page))
# Get ticket count
milestones = [
(milestone, self.env.db_query("""
SELECT COUNT(*) FROM ticket WHERE milestone=%s
""", (milestone.name,))[0][0])
for milestone in model.Milestone.select(self.env)]
data = {'view': 'list',
'milestones': milestones,
'default': default}
Chrome(self.env).add_jquery_ui(req)
data.update({
'datetime_hint': get_datetime_format_hint(req.lc_time),
})
return 'admin_milestones.html', data
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('milestone list', '',
"Show milestones",
None, self._do_list)
yield ('milestone add', '<name> [due]',
"Add milestone",
None, self._do_add)
yield ('milestone rename', '<name> <newname>',
"Rename milestone",
self._complete_name, self._do_rename)
yield ('milestone due', '<name> <due>',
"""Set milestone due date
The <due> date must be specified in the "%s" format.
Alternatively, "now" can be used to set the due date to the
current time. To remove the due date from a milestone, specify
an empty string ("").
""" % console_date_format_hint,
self._complete_name, self._do_due)
yield ('milestone completed', '<name> <completed>',
"""Set milestone complete date
The <completed> date must be specified in the "%s" format.
Alternatively, "now" can be used to set the completion date to
the current time. To remove the completion date from a
milestone, specify an empty string ("").
""" % console_date_format_hint,
self._complete_name, self._do_completed)
yield ('milestone remove', '<name>',
"Remove milestone",
self._complete_name, self._do_remove)
def get_milestone_list(self):
return [m.name for m in model.Milestone.select(self.env)]
def _complete_name(self, args):
if len(args) == 1:
return self.get_milestone_list()
def _do_list(self):
print_table([(m.name, m.due and
format_date(m.due, console_date_format),
m.completed and
format_datetime(m.completed, console_datetime_format))
for m in model.Milestone.select(self.env)],
[_("Name"), _("Due"), _("Completed")])
def _do_add(self, name, due=None):
milestone = model.Milestone(self.env)
milestone.name = name
if due is not None:
milestone.due = parse_date(due, hint='datetime')
milestone.insert()
def _do_rename(self, name, newname):
milestone = model.Milestone(self.env, name)
milestone.name = newname
milestone.update()
def _do_due(self, name, due):
milestone = model.Milestone(self.env, name)
milestone.due = due and parse_date(due, hint='datetime')
milestone.update()
def _do_completed(self, name, completed):
milestone = model.Milestone(self.env, name)
milestone.completed = completed and parse_date(completed,
hint='datetime')
milestone.update()
def _do_remove(self, name):
model.Milestone(self.env, name).delete(author=getuser())
class VersionAdminPanel(TicketAdminPanel):
_type = 'versions'
_label = (N_('Version'), N_('Versions'))
# TicketAdminPanel methods
def _render_admin_panel(self, req, cat, page, version):
# Detail view?
if version:
ver = model.Version(self.env, version)
if req.method == 'POST':
if req.args.get('save'):
ver.name = name = req.args.get('name')
if req.args.get('time'):
ver.time = user_time(req, parse_date,
req.args.get('time'),
hint='datetime')
else:
ver.time = None # unset
ver.description = req.args.get('description')
try:
ver.update()
except self.env.db_exc.IntegrityError:
raise TracError(_('The version "%(name)s" already '
'exists.', name=name))
add_notice(req, _('Your changes have been saved.'))
req.redirect(req.href.admin(cat, page))
elif req.args.get('cancel'):
req.redirect(req.href.admin(cat, page))
Chrome(self.env).add_wiki_toolbars(req)
data = {'view': 'detail', 'version': ver}
else:
default = self.config.get('ticket', 'default_version')
if req.method == 'POST':
# Add Version
if req.args.get('add') and req.args.get('name'):
name = req.args.get('name')
try:
ver = model.Version(self.env, name=name)
except ResourceNotFound:
ver = model.Version(self.env)
ver.name = name
if req.args.get('time'):
ver.time = user_time(req, parse_date,
req.args.get('time'),
hint='datetime')
ver.insert()
add_notice(req, _('The version "%(name)s" has been '
'added.', name=name))
req.redirect(req.href.admin(cat, page))
else:
if ver.name is None:
raise TracError(_("Invalid version name."))
raise TracError(_("Version %(name)s already exists.",
name=name))
# Remove versions
elif req.args.get('remove'):
sel = req.args.get('sel')
if not sel:
raise TracError(_("No version selected"))
if not isinstance(sel, list):
sel = [sel]
with self.env.db_transaction:
for name in sel:
ver = model.Version(self.env, name)
ver.delete()
add_notice(req, _("The selected versions have been "
"removed."))
req.redirect(req.href.admin(cat, page))
# Set default version
elif req.args.get('apply'):
name = req.args.get('default')
if name and name != default:
self.log.info("Setting default version to %s", name)
self.config.set('ticket', 'default_version', name)
_save_config(self.config, req, self.log)
req.redirect(req.href.admin(cat, page))
data = {'view': 'list',
'versions': model.Version.select(self.env),
'default': default}
Chrome(self.env).add_jquery_ui(req)
data.update({
'datetime_hint': get_datetime_format_hint(req.lc_time),
})
return 'admin_versions.html', data
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('version list', '',
"Show versions",
None, self._do_list)
yield ('version add', '<name> [time]',
"Add version",
None, self._do_add)
yield ('version rename', '<name> <newname>',
"Rename version",
self._complete_name, self._do_rename)
yield ('version time', '<name> <time>',
"""Set version date
The <time> must be specified in the "%s" format. Alternatively,
"now" can be used to set the version date to the current time.
To remove the date from a version, specify an empty string
("").
""" % console_date_format_hint,
self._complete_name, self._do_time)
yield ('version remove', '<name>',
"Remove version",
self._complete_name, self._do_remove)
def get_version_list(self):
return [v.name for v in model.Version.select(self.env)]
def _complete_name(self, args):
if len(args) == 1:
return self.get_version_list()
def _do_list(self):
print_table([(v.name,
v.time and format_date(v.time, console_date_format))
for v in model.Version.select(self.env)],
[_("Name"), _("Time")])
def _do_add(self, name, time=None):
version = model.Version(self.env)
version.name = name
if time is not None:
version.time = time and parse_date(time, hint='datetime')
version.insert()
def _do_rename(self, name, newname):
version = model.Version(self.env, name)
version.name = newname
version.update()
def _do_time(self, name, time):
version = model.Version(self.env, name)
version.time = time and parse_date(time, hint='datetime')
version.update()
def _do_remove(self, name):
model.Version(self.env, name).delete()
class AbstractEnumAdminPanel(TicketAdminPanel):
abstract = True
_type = 'unknown'
_enum_cls = None
# TicketAdminPanel methods
def _render_admin_panel(self, req, cat, page, path_info):
label = [gettext(each) for each in self._label]
data = {'label_singular': label[0], 'label_plural': label[1],
'type': self._type}
# Detail view?
if path_info:
enum = self._enum_cls(self.env, path_info)
if req.method == 'POST':
if req.args.get('save'):
enum.name = name = req.args.get('name')
try:
enum.update()
except self.env.db_exc.IntegrityError:
raise TracError(_('%(type)s value "%(name)s" already '
'exists', type=label[0], name=name))
add_notice(req, _("Your changes have been saved."))
req.redirect(req.href.admin(cat, page))
elif req.args.get('cancel'):
req.redirect(req.href.admin(cat, page))
data.update({'view': 'detail', 'enum': enum})
else:
default = self.config.get('ticket', 'default_%s' % self._type)
if req.method == 'POST':
# Add enum
if req.args.get('add') and req.args.get('name'):
name = req.args.get('name')
try:
enum = self._enum_cls(self.env, name=name)
except ResourceNotFound:
enum = self._enum_cls(self.env)
enum.name = name
enum.insert()
add_notice(req, _('The %(field)s value "%(name)s" has '
'been added.',
field=label[0], name=name))
req.redirect(req.href.admin(cat, page))
else:
if enum.name is None:
raise TracError(_("Invalid %(type)s value.",
type=label[0]))
raise TracError(_('%(type)s value "%(name)s" already '
'exists', type=label[0], name=name))
# Remove enums
elif req.args.get('remove'):
sel = req.args.get('sel')
if not sel:
raise TracError(_("No %s selected") % self._type)
if not isinstance(sel, list):
sel = [sel]
with self.env.db_transaction:
for name in sel:
self._enum_cls(self.env, name).delete()
add_notice(req, _("The selected %(field)s values have "
"been removed.", field=label[0]))
req.redirect(req.href.admin(cat, page))
# Apply changes
elif req.args.get('apply'):
changed = False
# Set default value
name = req.args.get('default')
if name and name != default:
self.log.info("Setting default %s to %s",
self._type, name)
self.config.set('ticket', 'default_%s' % self._type,
name)
try:
self.config.save()
changed = True
except Exception, e:
self.log.error("Error writing to trac.ini: %s",
exception_to_unicode(e))
add_warning(req,
_("Error writing to trac.ini, make "
"sure it is writable by the web "
"server. The default value has not "
"been saved."))
# Change enum values
order = dict([(str(int(key[6:])),
str(int(req.args.get(key)))) for key
in req.args.keys()
if key.startswith('value_')])
values = dict([(val, True) for val in order.values()])
if len(order) != len(values):
raise TracError(_("Order numbers must be unique"))
with self.env.db_transaction:
for enum in self._enum_cls.select(self.env):
new_value = order[enum.value]
if new_value != enum.value:
enum.value = new_value
enum.update()
changed = True
if changed:
add_notice(req, _("Your changes have been saved."))
req.redirect(req.href.admin(cat, page))
data.update(dict(enums=list(self._enum_cls.select(self.env)),
default=default, view='list'))
return 'admin_enums.html', data
# IAdminCommandProvider methods
_command_help = {
'list': 'Show possible ticket %s',
'add': 'Add a %s value option',
'change': 'Change a %s value',
'remove': 'Remove a %s value',
'order': 'Move a %s value up or down in the list',
}
def get_admin_commands(self):
enum_type = getattr(self, '_command_type', self._type)
label = tuple(each.lower() for each in self._label)
yield ('%s list' % enum_type, '',
self._command_help['list'] % label[1],
None, self._do_list)
yield ('%s add' % enum_type, '<value>',
self._command_help['add'] % label[0],
None, self._do_add)
yield ('%s change' % enum_type, '<value> <newvalue>',
self._command_help['change'] % label[0],
self._complete_change_remove, self._do_change)
yield ('%s remove' % enum_type, '<value>',
self._command_help['remove'] % label[0],
self._complete_change_remove, self._do_remove)
yield ('%s order' % enum_type, '<value> up|down',
self._command_help['order'] % label[0],
self._complete_order, self._do_order)
def get_enum_list(self):
return [e.name for e in self._enum_cls.select(self.env)]
def _complete_change_remove(self, args):
if len(args) == 1:
return self.get_enum_list()
def _complete_order(self, args):
if len(args) == 1:
return self.get_enum_list()
elif len(args) == 2:
return ['up', 'down']
def _do_list(self):
print_table([(e.name,) for e in self._enum_cls.select(self.env)],
[_('Possible Values')])
def _do_add(self, name):
enum = self._enum_cls(self.env)
enum.name = name
enum.insert()
def _do_change(self, name, newname):
enum = self._enum_cls(self.env, name)
enum.name = newname
enum.update()
def _do_remove(self, value):
self._enum_cls(self.env, value).delete()
def _do_order(self, name, up_down):
if up_down not in ('up', 'down'):
raise AdminCommandError(_("Invalid up/down value: %(value)s",
value=up_down))
direction = -1 if up_down == 'up' else 1
enum1 = self._enum_cls(self.env, name)
enum1.value = int(float(enum1.value) + direction)
for enum2 in self._enum_cls.select(self.env):
if int(float(enum2.value)) == enum1.value:
enum2.value = int(float(enum2.value) - direction)
break
else:
return
with self.env.db_transaction:
enum1.update()
enum2.update()
class PriorityAdminPanel(AbstractEnumAdminPanel):
_type = 'priority'
_enum_cls = model.Priority
_label = (N_('Priority'), N_('Priorities'))
class ResolutionAdminPanel(AbstractEnumAdminPanel):
_type = 'resolution'
_enum_cls = model.Resolution
_label = (N_('Resolution'), N_('Resolutions'))
class SeverityAdminPanel(AbstractEnumAdminPanel):
_type = 'severity'
_enum_cls = model.Severity
_label = (N_('Severity'), N_('Severities'))
class TicketTypeAdminPanel(AbstractEnumAdminPanel):
_type = 'type'
_enum_cls = model.Type
_label = (N_('Ticket Type'), N_('Ticket Types'))
_command_type = 'ticket_type'
_command_help = {
'list': 'Show possible %s',
'add': 'Add a %s',
'change': 'Change a %s',
'remove': 'Remove a %s',
'order': 'Move a %s up or down in the list',
}
class TicketAdmin(Component):
"""trac-admin command provider for ticket administration."""
implements(IAdminCommandProvider)
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('ticket remove', '<number>',
'Remove ticket',
None, self._do_remove)
def _do_remove(self, number):
try:
number = int(number)
except ValueError:
raise AdminCommandError(_('<number> must be a number'))
with self.env.db_transaction:
model.Ticket(self.env, number).delete()
printout(_('Ticket #%(num)s and all associated data removed.',
num=number))
|
apache/bloodhound
|
trac/trac/ticket/admin.py
|
Python
|
apache-2.0
| 33,531
|
#!/usr/bin/env python
# Authors:
# Trevor Perrin
# Marcelo Fernandez - bugfix and NPN support
# Martin von Loewis - python 3 port
#
# See the LICENSE file for legal information regarding use of this file.
from __future__ import print_function
import sys
import os
import os.path
import socket
import time
import getopt
try:
import httplib
from SocketServer import *
from BaseHTTPServer import *
from SimpleHTTPServer import *
except ImportError:
# Python 3.x
from http import client as httplib
from socketserver import *
from http.server import *
from http.server import SimpleHTTPRequestHandler
if __name__ != "__main__":
raise "This must be run as a command, not used as a module!"
from tlslite.api import *
from tlslite.constants import CipherSuite, HashAlgorithm, SignatureAlgorithm, \
GroupName
from tlslite import __version__
try:
from tack.structures.Tack import Tack
except ImportError:
pass
def printUsage(s=None):
if s:
print("ERROR: %s" % s)
print("")
print("Version: %s" % __version__)
print("")
print("RNG: %s" % prngName)
print("")
print("Modules:")
if tackpyLoaded:
print(" tackpy : Loaded")
else:
print(" tackpy : Not Loaded")
if m2cryptoLoaded:
print(" M2Crypto : Loaded")
else:
print(" M2Crypto : Not Loaded")
if pycryptoLoaded:
print(" pycrypto : Loaded")
else:
print(" pycrypto : Not Loaded")
if gmpyLoaded:
print(" GMPY : Loaded")
else:
print(" GMPY : Not Loaded")
print("")
print("""Commands:
server
[-k KEY] [-c CERT] [-t TACK] [-v VERIFIERDB] [-d DIR]
[--reqcert] HOST:PORT
client
[-k KEY] [-c CERT] [-u USER] [-p PASS]
HOST:PORT
""")
sys.exit(-1)
def printError(s):
"""Print error message and exit"""
sys.stderr.write("ERROR: %s\n" % s)
sys.exit(-1)
def handleArgs(argv, argString, flagsList=[]):
# Convert to getopt argstring format:
# Add ":" after each arg, ie "abc" -> "a:b:c:"
getOptArgString = ":".join(argString) + ":"
try:
opts, argv = getopt.getopt(argv, getOptArgString, flagsList)
except getopt.GetoptError as e:
printError(e)
# Default values if arg not present
privateKey = None
certChain = None
username = None
password = None
tacks = None
verifierDB = None
reqCert = False
directory = None
for opt, arg in opts:
if opt == "-k":
s = open(arg, "rb").read()
if sys.version_info[0] >= 3:
s = str(s, 'utf-8')
privateKey = parsePEMKey(s, private=True)
elif opt == "-c":
s = open(arg, "rb").read()
if sys.version_info[0] >= 3:
s = str(s, 'utf-8')
x509 = X509()
x509.parse(s)
certChain = X509CertChain([x509])
elif opt == "-u":
username = arg
elif opt == "-p":
password = arg
elif opt == "-t":
if tackpyLoaded:
s = open(arg, "rU").read()
tacks = Tack.createFromPemList(s)
elif opt == "-v":
verifierDB = VerifierDB(arg)
verifierDB.open()
elif opt == "-d":
directory = arg
elif opt == "--reqcert":
reqCert = True
else:
assert(False)
if not argv:
printError("Missing address")
if len(argv)>1:
printError("Too many arguments")
#Split address into hostname/port tuple
address = argv[0]
address = address.split(":")
if len(address) != 2:
raise SyntaxError("Must specify <host>:<port>")
address = ( address[0], int(address[1]) )
# Populate the return list
retList = [address]
if "k" in argString:
retList.append(privateKey)
if "c" in argString:
retList.append(certChain)
if "u" in argString:
retList.append(username)
if "p" in argString:
retList.append(password)
if "t" in argString:
retList.append(tacks)
if "v" in argString:
retList.append(verifierDB)
if "d" in argString:
retList.append(directory)
if "reqcert" in flagsList:
retList.append(reqCert)
return retList
def printGoodConnection(connection, seconds):
print(" Handshake time: %.3f seconds" % seconds)
print(" Version: %s" % connection.getVersionName())
print(" Cipher: %s %s" % (connection.getCipherName(),
connection.getCipherImplementation()))
print(" Ciphersuite: {0}".\
format(CipherSuite.ietfNames[connection.session.cipherSuite]))
if connection.session.srpUsername:
print(" Client SRP username: %s" % connection.session.srpUsername)
if connection.session.clientCertChain:
print(" Client X.509 SHA1 fingerprint: %s" %
connection.session.clientCertChain.getFingerprint())
else:
print(" No client certificate provided by peer")
if connection.session.serverCertChain:
print(" Server X.509 SHA1 fingerprint: %s" %
connection.session.serverCertChain.getFingerprint())
if connection.version >= (3, 3) and connection.serverSigAlg is not None:
print(" Key exchange signature: {1}+{0}".format(\
HashAlgorithm.toStr(connection.serverSigAlg[0]),
SignatureAlgorithm.toStr(connection.serverSigAlg[1])))
if connection.ecdhCurve is not None:
print(" Group used for key exchange: {0}".format(\
GroupName.toStr(connection.ecdhCurve)))
if connection.dhGroupSize is not None:
print(" DH group size: {0} bits".format(connection.dhGroupSize))
if connection.session.serverName:
print(" SNI: %s" % connection.session.serverName)
if connection.session.tackExt:
if connection.session.tackInHelloExt:
emptyStr = "\n (via TLS Extension)"
else:
emptyStr = "\n (via TACK Certificate)"
print(" TACK: %s" % emptyStr)
print(str(connection.session.tackExt))
print(" Next-Protocol Negotiated: %s" % connection.next_proto)
print(" Encrypt-then-MAC: {0}".format(connection.encryptThenMAC))
def clientCmd(argv):
(address, privateKey, certChain, username, password) = \
handleArgs(argv, "kcup")
if (certChain and not privateKey) or (not certChain and privateKey):
raise SyntaxError("Must specify CERT and KEY together")
if (username and not password) or (not username and password):
raise SyntaxError("Must specify USER with PASS")
if certChain and username:
raise SyntaxError("Can use SRP or client cert for auth, not both")
#Connect to server
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(5)
sock.connect(address)
connection = TLSConnection(sock)
settings = HandshakeSettings()
settings.useExperimentalTackExtension = True
try:
start = time.clock()
if username and password:
connection.handshakeClientSRP(username, password,
settings=settings, serverName=address[0])
else:
connection.handshakeClientCert(certChain, privateKey,
settings=settings, serverName=address[0])
stop = time.clock()
print("Handshake success")
except TLSLocalAlert as a:
if a.description == AlertDescription.user_canceled:
print(str(a))
else:
raise
sys.exit(-1)
except TLSRemoteAlert as a:
if a.description == AlertDescription.unknown_psk_identity:
if username:
print("Unknown username")
else:
raise
elif a.description == AlertDescription.bad_record_mac:
if username:
print("Bad username or password")
else:
raise
elif a.description == AlertDescription.handshake_failure:
print("Unable to negotiate mutually acceptable parameters")
else:
raise
sys.exit(-1)
printGoodConnection(connection, stop-start)
connection.close()
def serverCmd(argv):
(address, privateKey, certChain, tacks,
verifierDB, directory, reqCert) = handleArgs(argv, "kctbvd", ["reqcert"])
if (certChain and not privateKey) or (not certChain and privateKey):
raise SyntaxError("Must specify CERT and KEY together")
if tacks and not certChain:
raise SyntaxError("Must specify CERT with Tacks")
print("I am an HTTPS test server, I will listen on %s:%d" %
(address[0], address[1]))
if directory:
os.chdir(directory)
print("Serving files from %s" % os.getcwd())
if certChain and privateKey:
print("Using certificate and private key...")
if verifierDB:
print("Using verifier DB...")
if tacks:
print("Using Tacks...")
if reqCert:
print("Asking for client certificates...")
#############
sessionCache = SessionCache()
username = None
class MyHTTPServer(ThreadingMixIn, TLSSocketServerMixIn, HTTPServer):
def handshake(self, connection):
print(" ")
print("About to handshake...")
activationFlags = 0
if tacks:
if len(tacks) == 1:
activationFlags = 1
elif len(tacks) == 2:
activationFlags = 3
try:
start = time.clock()
settings = HandshakeSettings()
settings.useExperimentalTackExtension=True
connection.handshakeServer(certChain=certChain,
privateKey=privateKey,
verifierDB=verifierDB,
tacks=tacks,
activationFlags=activationFlags,
sessionCache=sessionCache,
settings=settings,
nextProtos=[b"http/1.1"],
reqCert=reqCert)
# As an example (does not work here):
#nextProtos=[b"spdy/3", b"spdy/2", b"http/1.1"])
stop = time.clock()
except TLSRemoteAlert as a:
if a.description == AlertDescription.user_canceled:
print(str(a))
return False
else:
raise
except TLSLocalAlert as a:
if a.description == AlertDescription.unknown_psk_identity:
if username:
print("Unknown username")
return False
else:
raise
elif a.description == AlertDescription.bad_record_mac:
if username:
print("Bad username or password")
return False
else:
raise
elif a.description == AlertDescription.handshake_failure:
print("Unable to negotiate mutually acceptable parameters")
return False
else:
raise
connection.ignoreAbruptClose = True
printGoodConnection(connection, stop-start)
return True
httpd = MyHTTPServer(address, SimpleHTTPRequestHandler)
httpd.serve_forever()
if __name__ == '__main__':
if len(sys.argv) < 2:
printUsage("Missing command")
elif sys.argv[1] == "client"[:len(sys.argv[1])]:
clientCmd(sys.argv[2:])
elif sys.argv[1] == "server"[:len(sys.argv[1])]:
serverCmd(sys.argv[2:])
else:
printUsage("Unknown command: %s" % sys.argv[1])
|
ioef/tlslite-ng
|
scripts/tls.py
|
Python
|
lgpl-2.1
| 12,217
|
"""Higher-level processing of moves and positions from SGF games."""
from gomill import boards
from gomill import sgf_properties
def get_setup_and_moves(sgf_game, board=None):
"""Return the initial setup and the following moves from an Sgf_game.
Returns a pair (board, plays)
board -- boards.Board
plays -- list of pairs (colour, move)
moves are (row, col), or None for a pass.
The board represents the position described by AB and/or AW properties
in the root node.
The moves are from the game's 'leftmost' variation.
Raises ValueError if this position isn't legal.
Raises ValueError if there are any AB/AW/AE properties after the root
node.
Doesn't check whether the moves are legal.
If the optional 'board' parameter is provided, it must be an empty board of
the right size; the same object will be returned.
"""
size = sgf_game.get_size()
if board is None:
board = boards.Board(size)
else:
if board.side != size:
raise ValueError("wrong board size, must be %d" % size)
if not board.is_empty():
raise ValueError("board not empty")
root = sgf_game.get_root()
nodes = sgf_game.main_sequence_iter()
ab, aw, ae = root.get_setup_stones()
if ab or aw:
is_legal = board.apply_setup(ab, aw, ae)
if not is_legal:
raise ValueError("setup position not legal")
colour, raw = root.get_raw_move()
if colour is not None:
raise ValueError("mixed setup and moves in root node")
nodes.next()
moves = []
for node in nodes:
if node.has_setup_stones():
raise ValueError("setup properties after the root node")
colour, raw = node.get_raw_move()
if colour is not None:
moves.append((colour, sgf_properties.interpret_go_point(raw, size)))
return board, moves
def set_initial_position(sgf_game, board):
"""Add setup stones to an Sgf_game reflecting a board position.
sgf_game -- Sgf_game
board -- boards.Board
Replaces any existing setup stones in the Sgf_game's root node.
"""
stones = {'b' : set(), 'w' : set()}
for (colour, point) in board.list_occupied_points():
stones[colour].add(point)
sgf_game.get_root().set_setup_stones(stones['b'], stones['w'])
def indicate_first_player(sgf_game):
"""Add a PL property to the root node if appropriate.
Looks at the first child of the root to see who the first player is, and
sets PL it isn't the expected player (ie, black normally, but white if
there is a handicap), or if there are non-handicap setup stones.
"""
root = sgf_game.get_root()
first_player, move = root[0].get_move()
if first_player is None:
return
has_handicap = root.has_property("HA")
if root.has_property("AW"):
specify_pl = True
elif root.has_property("AB") and not has_handicap:
specify_pl = True
elif not has_handicap and first_player == 'w':
specify_pl = True
elif has_handicap and first_player == 'b':
specify_pl = True
else:
specify_pl = False
if specify_pl:
root.set('PL', first_player)
|
inclement/noGo
|
noGo/ext/gomill/sgf_moves.py
|
Python
|
gpl-3.0
| 3,240
|
# coding: utf-8
#
# Copyright 2020 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for service functions related to Oppia improvement tasks."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import datetime
from core.domain import improvements_domain
from core.domain import improvements_services
from core.platform import models
from core.tests import test_utils
import feconf
import python_utils
(improvements_models,) = (
models.Registry.import_models([models.NAMES.improvements]))
class ImprovementsServicesTestBase(test_utils.GenericTestBase):
"""Base class with helper methods for the improvements_services tests."""
EXP_ID = 'eid'
MOCK_DATE = datetime.datetime(2020, 6, 15)
def setUp(self):
super(ImprovementsServicesTestBase, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.exp = self.save_new_valid_exploration(self.EXP_ID, self.owner_id)
# Necessary to provide sufficient debug information when failures occur.
self.maxDiff = None
def _new_obsolete_task(
self, state_name=feconf.DEFAULT_INIT_STATE_NAME,
task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE,
exploration_version=1):
"""Constructs a new default obsolete task with the provided values.
Args:
state_name: str. The name of the state the task should target.
task_type: str. The type of the task.
exploration_version: int. The version of the exploration the task
should target.
Returns:
improvements_domain.TaskEntry. A new obsolete task entry.
"""
return improvements_domain.TaskEntry(
entity_type=improvements_models.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_ID,
entity_version=exploration_version,
task_type=task_type,
target_type=improvements_models.TASK_TARGET_TYPE_STATE,
target_id=state_name,
issue_description='issue description',
status=improvements_models.TASK_STATUS_OBSOLETE,
resolver_id=None,
resolved_on=None)
def _new_open_task(
self, state_name=feconf.DEFAULT_INIT_STATE_NAME,
task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE,
exploration_version=1):
"""Constructs a new default open task with the provided values.
Args:
state_name: str. The name of the state the task should target.
task_type: str. The type of the task.
exploration_version: int. The version of the exploration the task
should target.
Returns:
improvements_domain.TaskEntry. A new open task entry.
"""
return improvements_domain.TaskEntry(
entity_type=improvements_models.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_ID,
entity_version=exploration_version,
task_type=task_type,
target_type=improvements_models.TASK_TARGET_TYPE_STATE,
target_id=state_name,
issue_description='issue description',
status=improvements_models.TASK_STATUS_OPEN,
resolver_id=None,
resolved_on=None)
def _new_resolved_task(
self, state_name=feconf.DEFAULT_INIT_STATE_NAME,
task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE,
exploration_version=1):
"""Constructs a new default resolved task with the provided values.
Args:
state_name: str. The name of the state the task should target.
task_type: str. The type of the task.
exploration_version: int. The version of the exploration the task
should target.
Returns:
improvements_domain.TaskEntry. A new resolved task entry.
"""
return improvements_domain.TaskEntry(
entity_type=improvements_models.TASK_ENTITY_TYPE_EXPLORATION,
entity_id=self.EXP_ID,
entity_version=exploration_version,
task_type=task_type,
target_type=improvements_models.TASK_TARGET_TYPE_STATE,
target_id=state_name,
issue_description='issue description',
status=improvements_models.TASK_STATUS_RESOLVED,
resolver_id=self.owner_id,
resolved_on=self.MOCK_DATE)
class GetTaskEntryFromModelTests(ImprovementsServicesTestBase):
"""Unit tests for the get_task_entry_from_model function."""
def test_returns_same_fields_as_model(self):
task_id = improvements_models.TaskEntryModel.create(
improvements_models.TASK_ENTITY_TYPE_EXPLORATION, self.EXP_ID, 1,
improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE,
improvements_models.TASK_TARGET_TYPE_STATE,
feconf.DEFAULT_INIT_STATE_NAME, 'issue description',
improvements_models.TASK_STATUS_RESOLVED, self.owner_id,
self.MOCK_DATE)
task_entry_model = improvements_models.TaskEntryModel.get_by_id(task_id)
task_entry = (
improvements_services.get_task_entry_from_model(task_entry_model))
self.assertEqual(task_entry.task_id, task_entry_model.id)
self.assertEqual(
task_entry.composite_entity_id,
task_entry_model.composite_entity_id)
self.assertEqual(task_entry.entity_type, task_entry_model.entity_type)
self.assertEqual(
task_entry.entity_version, task_entry_model.entity_version)
self.assertEqual(task_entry.task_type, task_entry_model.task_type)
self.assertEqual(task_entry.target_type, task_entry_model.target_type)
self.assertEqual(task_entry.target_id, task_entry_model.target_id)
self.assertEqual(
task_entry.issue_description, task_entry_model.issue_description)
self.assertEqual(task_entry.status, task_entry_model.status)
self.assertEqual(task_entry.resolver_id, task_entry_model.resolver_id)
self.assertEqual(task_entry.resolved_on, task_entry_model.resolved_on)
class FetchExplorationTasksTests(ImprovementsServicesTestBase):
"""Unit tests for the fetch_exploration_tasks function."""
def test_fetch_when_no_models_exist(self):
open_tasks, resolved_task_types_by_state_name = (
improvements_services.fetch_exploration_tasks(self.exp))
self.assertEqual(open_tasks, [])
self.assertEqual(resolved_task_types_by_state_name, {})
def test_fetch_when_number_of_open_tasks_exceed_single_fetch_limit(self):
tasks = [
self._new_open_task(state_name='State %d' % (i,))
for i in python_utils.RANGE(
int(feconf.MAX_TASK_MODELS_PER_FETCH * 2.5))
]
improvements_services.put_tasks(tasks)
open_tasks, resolved_task_types_by_state_name = (
improvements_services.fetch_exploration_tasks(self.exp))
self.assertEqual(resolved_task_types_by_state_name, {})
self.assertItemsEqual(
[t.to_dict() for t in tasks], [t.to_dict() for t in open_tasks])
def test_fetch_identifies_the_resolved_tasks_of_each_state(self):
tasks = [
self._new_resolved_task(
state_name='A',
task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE),
self._new_resolved_task(
state_name='B',
task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE),
self._new_resolved_task(
state_name='B',
task_type=(
improvements_models.TASK_TYPE_NEEDS_GUIDING_RESPONSES)),
self._new_resolved_task(
state_name='C',
task_type=(
improvements_models.TASK_TYPE_INEFFECTIVE_FEEDBACK_LOOP)),
self._new_resolved_task(
state_name='D',
task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE),
self._new_resolved_task(
state_name='D',
task_type=(
improvements_models.TASK_TYPE_NEEDS_GUIDING_RESPONSES)),
self._new_resolved_task(
state_name='D',
task_type=(
improvements_models.TASK_TYPE_INEFFECTIVE_FEEDBACK_LOOP)),
self._new_resolved_task(
state_name='D',
task_type=(
improvements_models.TASK_TYPE_SUCCESSIVE_INCORRECT_ANSWERS))
]
improvements_services.put_tasks(tasks)
open_tasks, resolved_task_types_by_state_name = (
improvements_services.fetch_exploration_tasks(self.exp))
self.assertEqual(open_tasks, [])
self.assertItemsEqual(list(resolved_task_types_by_state_name.keys()), [
'A',
'B',
'C',
'D',
])
self.assertItemsEqual(
resolved_task_types_by_state_name['A'], ['high_bounce_rate'])
self.assertItemsEqual(
resolved_task_types_by_state_name['B'], [
'high_bounce_rate',
'needs_guiding_responses',
])
self.assertItemsEqual(
resolved_task_types_by_state_name['C'], [
'ineffective_feedback_loop',
])
self.assertItemsEqual(
resolved_task_types_by_state_name['D'], [
'high_bounce_rate',
'needs_guiding_responses',
'ineffective_feedback_loop',
'successive_incorrect_answers',
])
def test_fetch_ignores_obsolete_tasks(self):
tasks = [
self._new_obsolete_task(state_name='State %d' % (i,))
for i in python_utils.RANGE(50)
]
improvements_services.put_tasks(tasks)
open_tasks, resolved_task_types_by_state_name = (
improvements_services.fetch_exploration_tasks(self.exp))
self.assertEqual(open_tasks, [])
self.assertEqual(resolved_task_types_by_state_name, {})
def test_fetch_only_returns_tasks_for_the_given_exploration_version(self):
tasks = [
# Version 1 tasks.
self._new_open_task(
state_name='A',
task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE,
exploration_version=1),
self._new_open_task(
state_name='B',
task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE,
exploration_version=1),
self._new_open_task(
state_name='C',
task_type=improvements_models.TASK_TYPE_NEEDS_GUIDING_RESPONSES,
exploration_version=1),
# Version 2 tasks.
self._new_open_task(
state_name='A',
task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE,
exploration_version=2),
self._new_resolved_task(
state_name='B',
task_type=improvements_models.TASK_TYPE_HIGH_BOUNCE_RATE,
exploration_version=2),
self._new_resolved_task(
state_name='C',
task_type=improvements_models.TASK_TYPE_NEEDS_GUIDING_RESPONSES,
exploration_version=2),
]
improvements_services.put_tasks(tasks)
self.exp.version = 2
open_tasks, resolved_task_types_by_state_name = (
improvements_services.fetch_exploration_tasks(self.exp))
self.assertItemsEqual(
[t.to_dict() for t in open_tasks], [tasks[3].to_dict()])
self.assertEqual(
resolved_task_types_by_state_name, {
'B': ['high_bounce_rate'],
'C': ['needs_guiding_responses'],
})
class FetchExplorationTaskHistoryPageTests(ImprovementsServicesTestBase):
"""Unit tests for the fetch_exploration_task_history_page function."""
def setUp(self):
super(FetchExplorationTaskHistoryPageTests, self).setUp()
task_entries = []
for i in python_utils.RANGE(1, 26):
task_entry = self._new_resolved_task(
state_name='State %d' % (i,), exploration_version=i)
task_entry.resolved_on = (
self.MOCK_DATE + datetime.timedelta(minutes=5 * i))
# last_updated of tasks are descending to ensure that the tasks
# returned are ordered by resolved_on instead.
task_entry.last_updated = (
self.MOCK_DATE - datetime.timedelta(minutes=5 * i))
task_entries.append(task_entry)
improvements_services.put_tasks(
task_entries, update_last_updated_time=False)
def test_fetch_returns_first_page_of_history(self):
results, cursor, more = (
improvements_services.fetch_exploration_task_history_page(self.exp))
self.assertEqual([t.target_id for t in results], [
'State 25', 'State 24', 'State 23', 'State 22', 'State 21',
'State 20', 'State 19', 'State 18', 'State 17', 'State 16',
])
self.assertTrue(more)
self.assertIsNotNone(cursor)
def test_fetch_until_no_more_pages_returns_every_resolved_task(self):
aggregated_tasks, cursor, more = [], None, True
while more:
results, cursor, more = (
improvements_services.fetch_exploration_task_history_page(
self.exp, urlsafe_start_cursor=cursor))
aggregated_tasks.extend(results)
self.assertEqual([t.target_id for t in aggregated_tasks], [
'State 25', 'State 24', 'State 23', 'State 22', 'State 21',
'State 20', 'State 19', 'State 18', 'State 17', 'State 16',
'State 15', 'State 14', 'State 13', 'State 12', 'State 11',
'State 10', 'State 9', 'State 8', 'State 7', 'State 6',
'State 5', 'State 4', 'State 3', 'State 2', 'State 1',
])
self.assertFalse(more)
def test_fetch_first_page_after_fetching_next_page_returns_same_results(
self):
initial_results, initial_cursor, initial_more = (
improvements_services.fetch_exploration_task_history_page(self.exp))
self.assertIsNotNone(initial_cursor)
self.assertTrue(initial_more)
# Make a call for the second page.
improvements_services.fetch_exploration_task_history_page(
self.exp, urlsafe_start_cursor=initial_cursor)
# Make another call for the first page.
subsequent_results, subsequent_cursor, subsequent_more = (
improvements_services.fetch_exploration_task_history_page(self.exp))
self.assertEqual(
[t.to_dict() for t in initial_results],
[t.to_dict() for t in subsequent_results])
self.assertEqual(initial_cursor, subsequent_cursor)
self.assertEqual(initial_more, subsequent_more)
class PutTasksTests(ImprovementsServicesTestBase):
"""Unit tests for the put_tasks function."""
def test_put_for_task_entries_which_do_not_exist_creates_new_models(self):
open_task = self._new_open_task(state_name='Start')
obsolete_task = self._new_obsolete_task(state_name='Middle')
resolved_task = self._new_resolved_task(state_name='End')
improvements_services.put_tasks(
[open_task, obsolete_task, resolved_task])
open_task_model = (
improvements_models.TaskEntryModel.get_by_id(open_task.task_id))
obsolete_task_model = (
improvements_models.TaskEntryModel.get_by_id(obsolete_task.task_id))
resolved_task_model = (
improvements_models.TaskEntryModel.get_by_id(resolved_task.task_id))
self.assertEqual(
open_task.to_dict(),
improvements_services.get_task_entry_from_model(
open_task_model).to_dict())
self.assertEqual(
obsolete_task.to_dict(),
improvements_services.get_task_entry_from_model(
obsolete_task_model).to_dict())
self.assertEqual(
resolved_task.to_dict(),
improvements_services.get_task_entry_from_model(
resolved_task_model).to_dict())
def test_put_for_tasks_entries_which_exist_updates_the_models(self):
task_entry = self._new_open_task()
created_on = datetime.datetime(2020, 6, 15, 5)
updated_on = created_on + datetime.timedelta(minutes=5)
with self.mock_datetime_utcnow(created_on):
improvements_services.put_tasks([task_entry])
model = improvements_models.TaskEntryModel.get_by_id(task_entry.task_id)
self.assertEqual(model.resolver_id, None)
self.assertEqual(model.created_on, created_on)
self.assertEqual(model.last_updated, created_on)
task_entry = self._new_resolved_task()
with self.mock_datetime_utcnow(updated_on):
improvements_services.put_tasks([task_entry])
model = improvements_models.TaskEntryModel.get_by_id(task_entry.task_id)
self.assertEqual(model.resolver_id, self.owner_id)
self.assertEqual(model.created_on, created_on)
self.assertEqual(model.last_updated, updated_on)
def test_put_for_task_entries_that_are_not_changing_does_nothing(self):
task_entry = self._new_resolved_task()
created_on = datetime.datetime(2020, 6, 15, 5)
updated_on = created_on + datetime.timedelta(minutes=5)
with self.mock_datetime_utcnow(created_on):
improvements_services.put_tasks([task_entry])
model = improvements_models.TaskEntryModel.get_by_id(task_entry.task_id)
self.assertEqual(model.resolver_id, self.owner_id)
self.assertEqual(model.created_on, created_on)
self.assertEqual(model.last_updated, created_on)
with self.mock_datetime_utcnow(updated_on):
improvements_services.put_tasks([task_entry])
model = improvements_models.TaskEntryModel.get_by_id(task_entry.task_id)
self.assertEqual(model.resolver_id, self.owner_id)
self.assertEqual(model.created_on, created_on)
self.assertEqual(model.last_updated, created_on)
def test_put_for_updated_task_entries_without_changing_last_updated(self):
task_entry = self._new_open_task()
created_on = datetime.datetime(2020, 6, 15, 5)
updated_on = created_on + datetime.timedelta(minutes=5)
with self.mock_datetime_utcnow(created_on):
improvements_services.put_tasks([task_entry])
model = improvements_models.TaskEntryModel.get_by_id(task_entry.task_id)
self.assertEqual(model.resolver_id, None)
self.assertEqual(model.created_on, created_on)
self.assertEqual(model.last_updated, created_on)
task_entry = self._new_resolved_task()
with self.mock_datetime_utcnow(updated_on):
improvements_services.put_tasks(
[task_entry], update_last_updated_time=False)
model = improvements_models.TaskEntryModel.get_by_id(task_entry.task_id)
self.assertEqual(model.resolver_id, self.owner_id)
self.assertEqual(model.created_on, created_on)
self.assertEqual(model.last_updated, created_on)
class ApplyChangesToModelTests(ImprovementsServicesTestBase):
"""Unit tests for the apply_changes_to_model function."""
def test_passing_mismatching_task_entries_raises_an_exception(self):
task_entry = self._new_open_task()
improvements_services.put_tasks([task_entry])
task_entry_model = (
improvements_models.TaskEntryModel.get_by_id(task_entry.task_id))
task_entry.target_id = 'Different State'
with self.assertRaisesRegexp(Exception, 'Wrong model provided'):
improvements_services.apply_changes_to_model(
task_entry, task_entry_model)
def test_returns_false_when_task_is_equalivalent_to_model(self):
task_entry = self._new_open_task()
improvements_services.put_tasks([task_entry])
task_entry_model = (
improvements_models.TaskEntryModel.get_by_id(task_entry.task_id))
self.assertFalse(
improvements_services.apply_changes_to_model(
task_entry, task_entry_model))
def test_makes_changes_when_issue_description_is_different(self):
task_entry = self._new_open_task()
improvements_services.put_tasks([task_entry])
task_entry_model = (
improvements_models.TaskEntryModel.get_by_id(task_entry.task_id))
task_entry.issue_description = 'new issue description'
self.assertTrue(
improvements_services.apply_changes_to_model(
task_entry, task_entry_model))
self.assertEqual(
task_entry_model.issue_description, 'new issue description')
def test_makes_changes_to_status_related_fields_if_status_is_different(
self):
task_entry = self._new_open_task()
improvements_services.put_tasks([task_entry])
task_entry_model = (
improvements_models.TaskEntryModel.get_by_id(task_entry.task_id))
task_entry = self._new_resolved_task()
self.assertTrue(
improvements_services.apply_changes_to_model(
task_entry, task_entry_model))
self.assertEqual(
task_entry_model.status, improvements_models.TASK_STATUS_RESOLVED)
self.assertEqual(task_entry_model.resolver_id, self.owner_id)
self.assertEqual(task_entry_model.resolved_on, self.MOCK_DATE)
def test_no_changes_made_if_only_resolver_id_is_different(self):
task_entry = self._new_open_task()
improvements_services.put_tasks([task_entry])
task_entry_model = (
improvements_models.TaskEntryModel.get_by_id(task_entry.task_id))
task_entry.resolver_id = self.owner_id
self.assertFalse(
improvements_services.apply_changes_to_model(
task_entry, task_entry_model))
self.assertEqual(
task_entry_model.status, improvements_models.TASK_STATUS_OPEN)
self.assertIsNone(task_entry_model.resolver_id)
self.assertIsNone(task_entry_model.resolved_on)
def test_no_changes_made_if_only_resolved_on_is_different(self):
task_entry = self._new_open_task()
improvements_services.put_tasks([task_entry])
task_entry_model = (
improvements_models.TaskEntryModel.get_by_id(task_entry.task_id))
task_entry.resolved_on = self.owner_id
self.assertFalse(
improvements_services.apply_changes_to_model(
task_entry, task_entry_model))
self.assertEqual(
task_entry_model.status, improvements_models.TASK_STATUS_OPEN)
self.assertIsNone(task_entry_model.resolved_on)
self.assertIsNone(task_entry_model.resolved_on)
|
prasanna08/oppia
|
core/domain/improvements_services_test.py
|
Python
|
apache-2.0
| 23,748
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 22 09:23:19 2014
@author: B Poon (demure)
"""
import numpy as np
from matplotlib import pyplot as plt
#from math import log10
#definitions
first = r"C:\pyf\ast\iso100Mfixed.txt"
second = r"C:\pyf\ast\iso1G.txt"
third = r"C:\pyf\ast\iso10G.txt"
def iso_theo(filepath, color, dist):
"""
Plots theoretical isochrones using file and color
"""
data = np.loadtxt(filepath)
i = data[:,2]
v = data[:,3]
mbol = data[:,1]
#optdist = 1 #replaces dist because of worksheet
optdist = log10(dist)
i_mag = mbol - i + 5 * optdist - 5
v_mag = mbol - v + 5 * optdist - 5
v_minus_i = v_mag - i_mag
plt.plot(v_minus_i, i_mag, color ,markersize = 5)
plt.title("Theoretical Isochrones, 100Myr, 1Gyr, 10Gyr")
plt.xlabel("V-i (mag)")
plt.ylabel("i (mag)")
#y axis bottom to top, positive to negative value switch
plt.ylim(26, 10)
def main():
#lmc_dist doesn't do anything, because calc from worksheet
lmc_dist = 39810 #parsecs to LMC
iso_theo(first, 'b.', lmc_dist)
iso_theo(second, 'y.', lmc_dist)
iso_theo(third, 'g.', lmc_dist)
if __name__ == '__main__':
main()
|
brupoon/nextTwilight
|
iso_theoretical.py
|
Python
|
mit
| 1,193
|
#! /usr/bin/env python
# encoding: UTF-8
# Thomas Nagy 2008-2010 (ita)
"""
Doxygen support
Variables passed to bld():
* doxyfile -- the Doxyfile to use
* install_path -- where to install the documentation
* pars -- dictionary overriding doxygen configuration settings
When using this tool, the wscript will look like:
def options(opt):
opt.load('doxygen')
def configure(conf):
conf.load('doxygen')
# check conf.env.DOXYGEN, if it is mandatory
def build(bld):
if bld.env.DOXYGEN:
bld(features="doxygen", doxyfile='Doxyfile', ...)
"""
import os, os.path, sys, re
from pprint import pprint
from waflib import Utils
from waflib import Task, Utils, Node
from waflib.TaskGen import feature
DOXY_STR = '"${DOXYGEN}" - '
DOXY_FMTS = 'html latex man rft xml'.split()
DOXY_FILE_PATTERNS = '*.' + ' *.'.join('''
"*.c", "*.cc", "*.cxx", "*.cpp", "*.c++", "*.d", "*.java", "*.ii", "*.ixx",
"*.ipp", "*.i++", "*.inl", "*.h", "*.hh", "*.hxx", "*.hpp", "*.h++", "*.idl",
"*.odl", "*.cs", "*.php", "*.php3", "*.inc", "*.m", "*.mm", "*.dox", "*.py",
"*.f90", "*.f", "*.for", "*.vhd", "*.vhdl", "*.tcl", "*.md", "*.markdown",
"*.C", "*.CC", "*.C++", "*.II", "*.I++", "*.H", "*.HH", "*.H++", "*.CS",
"*.PHP", "*.PHP3", "*.M", "*.MM", "*.PY", "*.F90", "*.F", "*.VHD", "*.VHDL",
"*.TCL", "*.MD", "*.MARKDOWN"
'''.split())
def parse_doxy_text(txt):
re_rl = re.compile('\\\\\r*\n', re.MULTILINE) # regular expression to address
# the following tag structure.
#
# TAG = value [value, ...] \
# value [value, ...] \
# value [value, ...]
re_nl = re.compile('\r*\n', re.MULTILINE)
table = {}
txt = re_rl.sub('', txt)
lines = re_nl.split(txt)
for line in lines:
line = line.strip()
if not line or line.startswith('#') or line.find('=') < 0:
continue
if line.find('+=') > 0:
tmp = line.split('+=', 1) # split the line on the first ocorrence of '+='
key = tmp[0].strip()
value = tmp[1].strip()
if key in table:
table[key] = (table[key] + ' ' + value).strip()
else:
table[key] = value
else:
tmp = line.split('=', 1) # split the line on the first ocorrence of '='
key = tmp[0].strip()
value = tmp[1].strip()
table[key] = value
return table
class doxygen(Task.Task):
"""
Task to process doxygen targets
"""
def __init__(self, *k, **kw):
Task.Task.__init__(self, *k, **kw)
self.hasrun = 0
vars = ['DOXYGEN', 'DOXYFLAGS']
color = 'PINK'
def runnable_status(self):
# wait for dependent tasks to be complete
for task in self.run_after:
if not task.hasrun:
return Task.ASK_LATER
if not getattr(self, 'pars', None):
doxyfile_node = self.inputs[0]
doxyfile_dir_node = doxyfile_node.parent
# initialize self.pars with parameters readed from doxyfile
txt = doxyfile_node.read()
self.pars = parse_doxy_text(txt)
# override self.pars with any parameters passed to the task generator
if getattr(self.generator, 'pars', None):
for key, value in self.generator.pars.items():
self.pars[key] = value
if self.pars.get('OUTPUT_DIRECTORY'):
output_directory_node = doxyfile_dir_node.get_bld().make_node(self.pars['OUTPUT_DIRECTORY'])
else:
# if no OUTPUT_DIRECTORY was specified in the doxyfile or task
# generator, build the output directory name from the doxyfile name
output_directory_node = doxyfile_dir_node.get_bld().make_node(doxyfile_node.name)
output_directory_node.mkdir()
self.pars['OUTPUT_DIRECTORY'] = output_directory_node.abspath()
if self.pars.get('GENERATE_TAGFILE'):
generate_tagfile = self.pars['GENERATE_TAGFILE']
generate_tagfile_node = output_directory_node.make_node(generate_tagfile)
generate_tagfile_node.parent.mkdir()
self.pars['GENERATE_TAGFILE'] = generate_tagfile_node.abspath()
for task in self.run_after:
dependent_task_install_path = Utils.subst_vars(getattr(task.generator, 'install_path'), task.env)
main_task_install_path = Utils.subst_vars(getattr(self.generator, 'install_path'), self.env)
self.pars['TAGFILES'] = self.pars['TAGFILES'] + ' ' + (task.pars['GENERATE_TAGFILE'] + '=' + os.path.relpath(os.path.join(dependent_task_install_path, task.pars['HTML_OUTPUT']), os.path.join(main_task_install_path, self.pars['HTML_OUTPUT'])))
self.doxy_inputs = getattr(self, 'doxy_inputs', [])
if self.pars.get('INPUT'):
re_path_split = re.compile(r'"[^"]+"|\'[^\']+\'|[^"\'\s]+') # to allow parse paths with space. Ex: INPUT: ./first/path "./second path" ...
inputs = re.findall(re_path_split, self.pars['INPUT'])
for input in inputs:
if os.path.isabs(input):
node = self.generator.bld.root.find_node(input)
else:
node = doxyfile_dir_node.find_node(input)
if not node:
self.generator.bld.fatal('Could not find the doxygen INPUT %r' % input)
self.doxy_inputs.append(node)
else:
self.doxy_inputs.append(doxyfile_dir_node)
if not getattr(self, 'output_dir', None):
self.output_dir = self.generator.bld.root.find_dir(self.pars.get('OUTPUT_DIRECTORY'))
else:
if not isinstance(self.output_dir, Node.Node):
self.output_dir = self.generator.bld.root.find_dir(self.output_dir)
if not self.output_dir:
raise ValueError('Bad things was happened!')
self.signature()
ret = Task.Task.runnable_status(self)
if ret == Task.SKIP_ME:
# in case the files were removed
self.add_install()
return ret
def scan(self):
exclude_patterns = self.pars.get('EXCLUDE_PATTERNS', '').split()
file_patterns = self.pars.get('FILE_PATTERNS', '').split()
if not file_patterns:
file_patterns = DOXY_FILE_PATTERNS
if self.pars.get('RECURSIVE', '').upper() == 'YES':
file_patterns = ["**/%s" % pattern for pattern in file_patterns]
nodes = []
names = []
for doxy_input in self.doxy_inputs:
if os.path.isdir(doxy_input.abspath()):
for node in doxy_input.ant_glob(incl=file_patterns, excl=exclude_patterns):
nodes.append(node)
else:
nodes.append(node)
return (nodes, names)
def run(self):
doxyfile_node = self.inputs[0]
doxyfile_dir_node = doxyfile_node.parent
pars = self.pars.copy()
input_data = '\n'.join(['%s = %s' % (key, pars[key]) for key in pars])
input_data = input_data.encode() # for python 3
cmd = Utils.subst_vars(DOXY_STR, self.env)
proc = Utils.subprocess.Popen(cmd, shell=True, stdin=Utils.subprocess.PIPE, env=self.env.env or None, cwd=doxyfile_dir_node.abspath())
proc.communicate(input_data)
return proc.returncode
def post_run(self):
nodes = self.output_dir.ant_glob('**/*', quiet=True)
for node in nodes:
node.sig = Utils.h_file(node.abspath())
self.add_install()
return Task.Task.post_run(self)
def add_install(self):
nodes = self.output_dir.ant_glob('**/*', quiet=True)
self.outputs += nodes
if getattr(self.generator, 'install_path', None):
if not getattr(self.generator, 'doxy_tar', None):
self.generator.bld.install_files(self.generator.install_path,
self.outputs,
postpone=False,
cwd=self.output_dir,
relative_trick=True)
@feature('doxygen')
def create_doxygen(self):
"""
Creates a doxygen task (feature 'doxygen')
"""
if not getattr(self, 'doxyfile', None):
self.bld.fatal('The doxygen configuration file was not specified!')
doxyfile_node = self.doxyfile
if not isinstance(doxyfile_node, Node.Node):
doxyfile_node = self.path.find_node(doxyfile_node)
if not doxyfile_node:
self.bld.fatal('The doxygen configuration file (' + self.doxyfile + ') was not found!')
self.doxygen_task = self.create_task('doxygen', doxyfile_node)
ref_docs = self.to_list(getattr(self, 'ref_docs', []))
for ref_doc in ref_docs:
try:
tg = self.bld.get_tgen_by_name(ref_doc)
tg.post()
if isinstance(getattr(tg, 'doxygen_task', None), doxygen):
tg.pars = getattr(tg, 'pars', {})
tg.pars["GENERATE_TAGFILE"] = tg.get_name() + '.tag'
else:
self.bld.fatal('The following reference documentation is not a valid doxygen target : ' + ref_doc)
self.doxygen_task.set_run_after(tg.doxygen_task)
except Exception:
pass
def configure(conf):
'''
Check if doxygen and tar commands are present in the system
If the commands are present, then conf.env.DOXYGEN and conf.env.TAR
variables will be set. Detection can be controlled by setting DOXYGEN and
TAR environmental variables.
'''
conf.find_program('doxygen', var='DOXYGEN', mandatory=False)
|
XNerv/Mermaid
|
tools/my/modules/waftools/doxygen.py
|
Python
|
gpl-3.0
| 8,580
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.